diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 1126f4f29..40ae9d3a9 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -76,6 +76,7 @@ set(HEADERS synchronized_wrapper.h telemetry.h thread.h + thread_pool.h thread_queue_list.h timer.h vector_math.h diff --git a/src/common/thread_pool.h b/src/common/thread_pool.h new file mode 100644 index 000000000..ed32a5416 --- /dev/null +++ b/src/common/thread_pool.h @@ -0,0 +1,120 @@ +// Copyright 2017 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include +#include +#include +#include +#include + +#include "common/assert.h" + +namespace Common { + +class ThreadPool { +private: + explicit ThreadPool(size_t num_threads) : num_threads(num_threads), workers(num_threads) { + ASSERT(num_threads); + } + +public: + static ThreadPool& GetPool() { + static ThreadPool thread_pool(std::thread::hardware_concurrency()); + return thread_pool; + } + + template + auto push(F&& f, Args&&... args) { + auto ret = workers[next_worker].push(std::forward(f), std::forward(args)...); + next_worker = (next_worker + 1) % num_threads; + return ret; + } + + const size_t total_threads() const { + return num_threads; + } + +private: + template + class ThreadsafeQueue { + private: + const size_t capacity; + std::vector queue_storage; + std::mutex mutex; + std::condition_variable queue_changed; + + public: + explicit ThreadsafeQueue(const size_t capacity) : capacity(capacity) { + queue_storage.reserve(capacity); + } + + void push(const T& element) { + std::unique_lock lock(mutex); + while (queue_storage.size() >= capacity) { + queue_changed.wait(lock); + } + queue_storage.push_back(element); + queue_changed.notify_one(); + } + + T Pop() { + std::unique_lock lock(mutex); + while (queue_storage.empty()) { + queue_changed.wait(lock); + } + T element(std::move(queue_storage.back())); + queue_storage.pop_back(); + queue_changed.notify_one(); + return element; + } + + void push(T&& element) { + std::unique_lock lock(mutex); + while (queue_storage.size() >= capacity) { + queue_changed.wait(lock); + } + queue_storage.emplace_back(std::move(element)); + queue_changed.notify_one(); + } + }; + + class Worker { + private: + ThreadsafeQueue> queue; + std::thread thread; + static constexpr size_t MAX_QUEUE_CAPACITY = 50; + + public: + Worker() : queue(MAX_QUEUE_CAPACITY), thread([this] { Loop(); }) {} + + ~Worker() { + queue.push(nullptr); // Exit the loop + thread.join(); + } + + void Loop() { + while (true) { + std::function fn(queue.Pop()); + if (!fn) // a nullptr function is the signal to exit the loop + break; + fn(); + } + } + + template + auto push(F&& f, Args&&... args) { + auto task = std::make_shared>( + std::bind(std::forward(f), std::forward(args)...)); + queue.push([task] { (*task)(); }); + return task->get_future(); + } + }; + + const size_t num_threads; + size_t next_worker = 0; + std::vector workers; +}; + +} // namespace Common diff --git a/src/video_core/command_processor.cpp b/src/video_core/command_processor.cpp index 15f0cedf2..1d6f1b3f6 100644 --- a/src/video_core/command_processor.cpp +++ b/src/video_core/command_processor.cpp @@ -4,11 +4,13 @@ #include #include +#include #include #include #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" +#include "common/thread_pool.h" #include "common/vector_math.h" #include "core/hle/service/gsp_gpu.h" #include "core/hw/gpu.h" @@ -119,228 +121,6 @@ static void WriteUniformFloatReg(ShaderRegs& config, Shader::ShaderSetup& setup, } } -static void LoadDefaultVertexAttributes(u32 register_value) { - auto& regs = g_state.regs; - - // TODO: Does actual hardware indeed keep an intermediate buffer or does - // it directly write the values? - default_attr_write_buffer[default_attr_counter++] = register_value; - - // Default attributes are written in a packed format such that four float24 values are encoded - // in three 32-bit numbers. - // We write to internal memory once a full such vector is written. - if (default_attr_counter >= 3) { - default_attr_counter = 0; - - auto& setup = regs.pipeline.vs_default_attributes_setup; - - if (setup.index >= 16) { - LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index); - return; - } - - Math::Vec4 attribute; - - // NOTE: The destination component order indeed is "backwards" - attribute.w = float24::FromRaw(default_attr_write_buffer[0] >> 8); - attribute.z = float24::FromRaw(((default_attr_write_buffer[0] & 0xFF) << 16) | - ((default_attr_write_buffer[1] >> 16) & 0xFFFF)); - attribute.y = float24::FromRaw(((default_attr_write_buffer[1] & 0xFFFF) << 8) | - ((default_attr_write_buffer[2] >> 24) & 0xFF)); - attribute.x = float24::FromRaw(default_attr_write_buffer[2] & 0xFFFFFF); - - LOG_TRACE(HW_GPU, "Set default VS attribute %x to (%f %f %f %f)", (int)setup.index, - attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(), - attribute.w.ToFloat32()); - - // TODO: Verify that this actually modifies the register! - if (setup.index < 15) { - g_state.input_default_attributes.attr[setup.index] = attribute; - setup.index++; - } else { - // Put each attribute into an immediate input buffer. When all specified immediate - // attributes are present, the Vertex Shader is invoked and everything is sent to - // the primitive assembler. - - auto& immediate_input = g_state.immediate.input_vertex; - auto& immediate_attribute_id = g_state.immediate.current_attribute; - - immediate_input.attr[immediate_attribute_id] = attribute; - - if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) { - immediate_attribute_id += 1; - } else { - MICROPROFILE_SCOPE(GPU_Drawing); - immediate_attribute_id = 0; - - auto* shader_engine = Shader::GetEngine(); - shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset); - - // Send to vertex shader - if (g_debug_context) - g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, - static_cast(&immediate_input)); - Shader::UnitState shader_unit; - Shader::AttributeBuffer output{}; - - shader_unit.LoadInput(regs.vs, immediate_input); - shader_engine->Run(g_state.vs, shader_unit); - shader_unit.WriteOutput(regs.vs, output); - - // Send to geometry pipeline - if (g_state.immediate.reset_geometry_pipeline) { - g_state.geometry_pipeline.Reconfigure(); - g_state.immediate.reset_geometry_pipeline = false; - } - ASSERT(!g_state.geometry_pipeline.NeedIndexInput()); - g_state.geometry_pipeline.Setup(shader_engine); - g_state.geometry_pipeline.SubmitVertex(output); - - // TODO: If drawing after every immediate mode triangle kills performance, - // change it to flush triangles whenever a drawing config register changes - // See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550 - VideoCore::g_renderer->Rasterizer()->DrawTriangles(); - if (g_debug_context) { - g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); - } - } - } - } -} - -static void Draw(u32 command_id) { - MICROPROFILE_SCOPE(GPU_Drawing); - auto& regs = g_state.regs; - -#if PICA_LOG_TEV - DebugUtils::DumpTevStageConfig(regs.GetTevStages()); -#endif - if (g_debug_context) - g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr); - - // Processes information about internal vertex attributes to figure out how a vertex is - // loaded. - // Later, these can be compiled and cached. - const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress(); - VertexLoader loader(regs.pipeline); - - // Load vertices - bool is_indexed = (command_id == PICA_REG_INDEX(pipeline.trigger_draw_indexed)); - - const auto& index_info = regs.pipeline.index_array; - const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset); - if (!index_address_8) { - LOG_CRITICAL(HW_GPU, "Invalid index_address_8 %08x", index_address_8); - return; - } - const u16* index_address_16 = reinterpret_cast(index_address_8); - bool index_u16 = index_info.format != 0; - - PrimitiveAssembler& primitive_assembler = g_state.primitive_assembler; - - if (g_debug_context && g_debug_context->recorder) { - for (int i = 0; i < 3; ++i) { - const auto texture = regs.texturing.GetTextures()[i]; - if (!texture.enabled) - continue; - - u8* texture_data = Memory::GetPhysicalPointer(texture.config.GetPhysicalAddress()); - g_debug_context->recorder->MemoryAccessed( - texture_data, Pica::TexturingRegs::NibblesPerPixel(texture.format) * - texture.config.width / 2 * texture.config.height, - texture.config.GetPhysicalAddress()); - } - } - - DebugUtils::MemoryAccessTracker memory_accesses; - - // Simple circular-replacement vertex cache - // The size has been tuned for optimal balance between hit-rate and the cost of lookup - const size_t VERTEX_CACHE_SIZE = 32; - std::array vertex_cache_ids; - std::array vertex_cache; - Shader::AttributeBuffer vs_output; - - unsigned int vertex_cache_pos = 0; - vertex_cache_ids.fill(-1); - - auto* shader_engine = Shader::GetEngine(); - Shader::UnitState shader_unit; - - shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset); - - g_state.geometry_pipeline.Reconfigure(); - g_state.geometry_pipeline.Setup(shader_engine); - if (g_state.geometry_pipeline.NeedIndexInput()) - ASSERT(is_indexed); - - for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) { - // Indexed rendering doesn't use the start offset - unsigned int vertex = is_indexed - ? (index_u16 ? index_address_16[index] : index_address_8[index]) - : (index + regs.pipeline.vertex_offset); - - // -1 is a common special value used for primitive restart. Since it's unknown if - // the PICA supports it, and it would mess up the caching, guard against it here. - ASSERT(vertex != -1); - - bool vertex_cache_hit = false; - - if (is_indexed) { - if (g_state.geometry_pipeline.NeedIndexInput()) { - g_state.geometry_pipeline.SubmitIndex(vertex); - continue; - } - - if (g_debug_context && Pica::g_debug_context->recorder) { - int size = index_u16 ? 2 : 1; - memory_accesses.AddAccess(base_address + index_info.offset + size * index, size); - } - - for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) { - if (vertex == vertex_cache_ids[i]) { - vs_output = vertex_cache[i]; - vertex_cache_hit = true; - break; - } - } - } - - if (!vertex_cache_hit) { - // Initialize data for the current vertex - Shader::AttributeBuffer input; - loader.LoadVertex(base_address, index, vertex, input, memory_accesses); - - // Send to vertex shader - if (g_debug_context) - g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, - (void*)&input); - shader_unit.LoadInput(regs.vs, input); - shader_engine->Run(g_state.vs, shader_unit); - shader_unit.WriteOutput(regs.vs, vs_output); - - if (is_indexed) { - vertex_cache[vertex_cache_pos] = vs_output; - vertex_cache_ids[vertex_cache_pos] = vertex; - vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE; - } - } - - // Send to geometry pipeline - g_state.geometry_pipeline.SubmitVertex(vs_output); - } - - for (auto& range : memory_accesses.ranges) { - g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first), - range.second, range.first); - } - - VideoCore::g_renderer->Rasterizer()->DrawTriangles(); - if (g_debug_context) { - g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); - } -} - static void WritePicaReg(u32 id, u32 value, u32 mask) { auto& regs = g_state.regs; @@ -390,9 +170,95 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) { // Load default vertex input attributes case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[0], 0x233): case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[1], 0x234): - case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235): - LoadDefaultVertexAttributes(value); + case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235): { + // TODO: Does actual hardware indeed keep an intermediate buffer or does + // it directly write the values? + default_attr_write_buffer[default_attr_counter++] = value; + + // Default attributes are written in a packed format such that four float24 values are + // encoded in + // three 32-bit numbers. We write to internal memory once a full such vector is + // written. + if (default_attr_counter >= 3) { + default_attr_counter = 0; + + auto& setup = regs.pipeline.vs_default_attributes_setup; + + if (setup.index >= 16) { + LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index); + break; + } + + Math::Vec4 attribute; + + // NOTE: The destination component order indeed is "backwards" + attribute.w = float24::FromRaw(default_attr_write_buffer[0] >> 8); + attribute.z = float24::FromRaw(((default_attr_write_buffer[0] & 0xFF) << 16) | + ((default_attr_write_buffer[1] >> 16) & 0xFFFF)); + attribute.y = float24::FromRaw(((default_attr_write_buffer[1] & 0xFFFF) << 8) | + ((default_attr_write_buffer[2] >> 24) & 0xFF)); + attribute.x = float24::FromRaw(default_attr_write_buffer[2] & 0xFFFFFF); + + LOG_TRACE(HW_GPU, "Set default VS attribute %x to (%f %f %f %f)", (int)setup.index, + attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(), + attribute.w.ToFloat32()); + + // TODO: Verify that this actually modifies the register! + if (setup.index < 15) { + g_state.input_default_attributes.attr[setup.index] = attribute; + setup.index++; + } else { + // Put each attribute into an immediate input buffer. When all specified immediate + // attributes are present, the Vertex Shader is invoked and everything is sent to + // the primitive assembler. + + auto& immediate_input = g_state.immediate.input_vertex; + auto& immediate_attribute_id = g_state.immediate.current_attribute; + + immediate_input.attr[immediate_attribute_id] = attribute; + + if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) { + immediate_attribute_id += 1; + } else { + MICROPROFILE_SCOPE(GPU_Drawing); + immediate_attribute_id = 0; + + auto* shader_engine = Shader::GetEngine(); + shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset); + + // Send to vertex shader + if (g_debug_context) + g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, + static_cast(&immediate_input)); + Shader::UnitState shader_unit; + Shader::AttributeBuffer output{}; + + shader_unit.LoadInput(regs.vs, immediate_input); + shader_engine->Run(g_state.vs, shader_unit); + shader_unit.WriteOutput(regs.vs, output); + + // Send to geometry pipeline + if (g_state.immediate.reset_geometry_pipeline) { + g_state.geometry_pipeline.Reconfigure(); + g_state.immediate.reset_geometry_pipeline = false; + } + ASSERT(!g_state.geometry_pipeline.NeedIndexInput()); + g_state.geometry_pipeline.Setup(shader_engine); + g_state.geometry_pipeline.SubmitVertex(output); + + // TODO: If drawing after every immediate mode triangle kills performance, + // change it to flush triangles whenever a drawing config register changes + // See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550 + VideoCore::g_renderer->Rasterizer()->DrawTriangles(); + if (g_debug_context) { + g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, + nullptr); + } + } + } + } break; + } case PICA_REG_INDEX(pipeline.gpu_mode): // This register likely just enables vertex processing and doesn't need any special handling @@ -411,9 +277,213 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) { // It seems like these trigger vertex rendering case PICA_REG_INDEX(pipeline.trigger_draw): - case PICA_REG_INDEX(pipeline.trigger_draw_indexed): - Draw(id); + case PICA_REG_INDEX(pipeline.trigger_draw_indexed): { + MICROPROFILE_SCOPE(GPU_Drawing); + const bool is_indexed = (id == PICA_REG_INDEX(pipeline.trigger_draw_indexed)); + +#if PICA_LOG_TEV + DebugUtils::DumpTevStageConfig(regs.GetTevStages()); +#endif + if (g_debug_context) + g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr); + + struct CachedVertex { + explicit CachedVertex() : batch(0), lock{ ATOMIC_FLAG_INIT } {} + CachedVertex(const CachedVertex& other) : CachedVertex() {} + union { + Shader::AttributeBuffer output_attr; // GS used + Shader::OutputVertex output_vertex; // No GS + }; + std::atomic batch; + std::atomic_flag lock; + }; + static std::vector vs_output(0x10000); + + if (!is_indexed && vs_output.size() < regs.pipeline.num_vertices) + vs_output.resize(regs.pipeline.num_vertices); + + // used as a mean to invalidate data from the previous batch without clearing it + static u32 batch_id = std::numeric_limits::max(); + + ++batch_id; + if (batch_id == 0) { // reset cache when id overflows for safety + ++batch_id; + for (auto& entry : vs_output) + entry.batch = 0; + } + + // Processes information about internal vertex attributes to figure out how a vertex is + // loaded. + // Later, these can be compiled and cached. + const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress(); + VertexLoader loader(regs.pipeline); + + const auto& index_info = regs.pipeline.index_array; + const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset); + if (!index_address_8) { + LOG_CRITICAL(HW_GPU, "Invalid index_address_8 %08x", index_address_8); + return; + } + const u16* index_address_16 = reinterpret_cast(index_address_8); + bool index_u16 = index_info.format != 0; + + auto VertexIndex = [&](unsigned int index) { + // Indexed rendering doesn't use the start offset + return is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index]) + : (index + regs.pipeline.vertex_offset); + }; + + PrimitiveAssembler& primitive_assembler = g_state.primitive_assembler; + + if (g_debug_context && g_debug_context->recorder) { + for (int i = 0; i < 3; ++i) { + const auto texture = regs.texturing.GetTextures()[i]; + if (!texture.enabled) + continue; + + u8* texture_data = Memory::GetPhysicalPointer(texture.config.GetPhysicalAddress()); + g_debug_context->recorder->MemoryAccessed( + texture_data, Pica::TexturingRegs::NibblesPerPixel(texture.format) * + texture.config.width / 2 * texture.config.height, + texture.config.GetPhysicalAddress()); + } + } + + DebugUtils::MemoryAccessTracker memory_accesses; + + auto* shader_engine = Shader::GetEngine(); + + shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset); + + const bool use_gs = regs.pipeline.use_gs == PipelineRegs::UseGS::Yes; + + auto VSUnitLoop = [&](u32 thread_id, auto num_threads) { + constexpr bool single_thread = std::is_same_v, decltype(num_threads)>; + Shader::UnitState shader_unit; + + for (unsigned int index = thread_id; index < regs.pipeline.num_vertices; index += num_threads) { + unsigned int vertex = VertexIndex(index); + auto& cached_vertex = vs_output[is_indexed ? vertex : index]; + + // -1 is a common special value used for primitive restart. Since it's unknown if + // the PICA supports it, and it would mess up the caching, guard against it here. + ASSERT(vertex != -1); + + if (is_indexed) { + if (g_debug_context && Pica::g_debug_context->recorder) { + int size = index_u16 ? 2 : 1; + memory_accesses.AddAccess(base_address + index_info.offset + size * index, + size); + } + + if (!single_thread) { + // Try locking this vertex + if (cached_vertex.lock.test_and_set(std::memory_order_acquire)) { + // Another thread is processing this vertex + continue; + } + // Vertex is not being processed and is from the correct batch + else if (cached_vertex.batch.load(std::memory_order_acquire) == batch_id) { + // Unlock + cached_vertex.lock.clear(std::memory_order_release); + continue; + } + } + else if (cached_vertex.batch.load(std::memory_order_relaxed) == batch_id) { + continue; + } + } + Shader::AttributeBuffer attribute_buffer; + Shader::AttributeBuffer& output_attr = use_gs ? cached_vertex.output_attr : attribute_buffer; + + // Initialize data for the current vertex + loader.LoadVertex(base_address, index, vertex, attribute_buffer, memory_accesses); + + // Send to vertex shader + if (g_debug_context) + g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, &attribute_buffer); + shader_unit.LoadInput(regs.vs, attribute_buffer); + shader_engine->Run(g_state.vs, shader_unit); + + shader_unit.WriteOutput(regs.vs, output_attr); + if (!use_gs) + cached_vertex.output_vertex = Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, output_attr); + + if (!single_thread) { + cached_vertex.batch.store(batch_id, std::memory_order_release); + if (is_indexed) { + cached_vertex.lock.clear(std::memory_order_release); + } + } + else if (is_indexed) { + cached_vertex.batch.store(batch_id, std::memory_order_relaxed); + } + } + }; + + auto& thread_pool = Common::ThreadPool::GetPool(); + std::vector> futures; + + constexpr unsigned int MIN_VERTICES_PER_THREAD = 10; + unsigned int vs_threads = regs.pipeline.num_vertices / MIN_VERTICES_PER_THREAD; + vs_threads = std::min(vs_threads, std::thread::hardware_concurrency() - 1); + + if (!vs_threads) { + VSUnitLoop(0, std::integral_constant{}); + } else { + for (unsigned int thread_id = 0; thread_id < vs_threads; ++thread_id) { + futures.emplace_back(thread_pool.push(VSUnitLoop, thread_id, vs_threads)); + } + } + + g_state.geometry_pipeline.Reconfigure(); + g_state.geometry_pipeline.Setup(shader_engine); + if (g_state.geometry_pipeline.NeedIndexInput()) + ASSERT(is_indexed); + + for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) { + unsigned int vertex = VertexIndex(index); + auto& cached_vertex = vs_output[is_indexed ? vertex : index]; + + if (use_gs && is_indexed && g_state.geometry_pipeline.NeedIndexInput()) { + g_state.geometry_pipeline.SubmitIndex(vertex); + continue; + } + + // Synchronize threads + if (vs_threads) { + while (cached_vertex.batch.load(std::memory_order_acquire) != batch_id) { + std::this_thread::yield(); + } + } + + if (use_gs) { + // Send to geometry pipeline + g_state.geometry_pipeline.SubmitVertex(cached_vertex.output_attr); + } else { + primitive_assembler.SubmitVertex(cached_vertex.output_vertex, + std::bind(&std::decay_tRasterizer())>::AddTriangle, + VideoCore::g_renderer->Rasterizer(), + std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); + } + } + + for (auto& future : futures) + future.get(); + + for (auto& range : memory_accesses.ranges) { + g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first), + range.second, range.first); + } + + VideoCore::g_renderer->Rasterizer()->DrawTriangles(); + + if (g_debug_context) { + g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); + } + break; + } case PICA_REG_INDEX(gs.bool_uniforms): WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value()); diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h index c1f29c527..f266bc9cd 100644 --- a/src/video_core/debug_utils/debug_utils.h +++ b/src/video_core/debug_utils/debug_utils.h @@ -235,6 +235,8 @@ class MemoryAccessTracker { public: /// Record a particular memory access in the list void AddAccess(u32 paddr, u32 size) { + std::lock_guard lock(mutex); + // Create new range or extend existing one ranges[paddr] = std::max(ranges[paddr], size); @@ -242,6 +244,8 @@ public: SimplifyRanges(); } + std::mutex mutex; + /// Map of accessed ranges (mapping start address to range size) std::map ranges; }; diff --git a/src/video_core/primitive_assembly.cpp b/src/video_core/primitive_assembly.cpp index 9c3dd4cab..9ff9d097a 100644 --- a/src/video_core/primitive_assembly.cpp +++ b/src/video_core/primitive_assembly.cpp @@ -15,7 +15,7 @@ PrimitiveAssembler::PrimitiveAssembler(PipelineRegs::TriangleTopolog template void PrimitiveAssembler::SubmitVertex(const VertexType& vtx, - TriangleHandler triangle_handler) { + const TriangleHandler& triangle_handler) { switch (topology) { case PipelineRegs::TriangleTopology::List: case PipelineRegs::TriangleTopology::Shader: diff --git a/src/video_core/primitive_assembly.h b/src/video_core/primitive_assembly.h index 12de8e3b9..2ecbe6742 100644 --- a/src/video_core/primitive_assembly.h +++ b/src/video_core/primitive_assembly.h @@ -27,7 +27,7 @@ struct PrimitiveAssembler { * NOTE: We could specify the triangle handler in the constructor, but this way we can * keep event and handler code next to each other. */ - void SubmitVertex(const VertexType& vtx, TriangleHandler triangle_handler); + void SubmitVertex(const VertexType& vtx, const TriangleHandler& triangle_handler); /** * Invert the vertex order of the next triangle. Called by geometry shader emitter. diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index d2db44629..4ed78d363 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -29,7 +29,7 @@ MICROPROFILE_DEFINE(OpenGL_Drawing, "OpenGL", "Drawing", MP_RGB(128, 128, 192)); MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(100, 100, 255)); MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Mgmt", MP_RGB(100, 255, 100)); -RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true) { +RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true), vertex_buffer_size(0) { // Clipping plane 0 is always enabled for PICA fixed clip plane z <= 0 state.clip_distance[0] = true; @@ -277,23 +277,23 @@ void RasterizerOpenGL::DrawTriangles() { state.Apply(); glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, - color_surface != nullptr ? color_surface->texture.handle : 0, 0); + color_surface != nullptr ? color_surface->texture.handle : 0, 0); if (depth_surface != nullptr) { if (has_stencil) { // attach both depth and stencil glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, - depth_surface->texture.handle, 0); + depth_surface->texture.handle, 0); } else { // attach depth glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, - depth_surface->texture.handle, 0); + depth_surface->texture.handle, 0); // clear stencil attachment glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); } } else { // clear both depth and stencil attachment glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, - 0); + 0); } // Sync the viewport @@ -405,7 +405,7 @@ void RasterizerOpenGL::DrawTriangles() { // Sync the uniform data if (uniform_block_data.dirty) { glBufferData(GL_UNIFORM_BUFFER, sizeof(UniformData), &uniform_block_data.data, - GL_STATIC_DRAW); + GL_STATIC_DRAW); uniform_block_data.dirty = false; } @@ -421,15 +421,18 @@ void RasterizerOpenGL::DrawTriangles() { state.Apply(); // Draw the vertex batch - glBufferData(GL_ARRAY_BUFFER, vertex_batch.size() * sizeof(HardwareVertex), vertex_batch.data(), - GL_STREAM_DRAW); - glDrawArrays(GL_TRIANGLES, 0, (GLsizei)vertex_batch.size()); + GLsizeiptr target_size = vertex_batch.size() * sizeof(HardwareVertex); + if (vertex_buffer_size < target_size) { + vertex_buffer_size = target_size * 2; + glBufferData(GL_ARRAY_BUFFER, vertex_buffer_size, nullptr, GL_STREAM_DRAW); + } + glBufferSubData(GL_ARRAY_BUFFER, 0, target_size, vertex_batch.data()); + glDrawArrays(GL_TRIANGLES, 0, static_cast(vertex_batch.size())); + vertex_batch.clear(); // Disable scissor test state.scissor.enabled = false; - vertex_batch.clear(); - // Unbind textures for potential future use as framebuffer attachments for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) { state.texture_units[texture_index].texture_2d = 0; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 18808b1e4..d1f6c093d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -284,6 +284,7 @@ private: std::array texture_samplers; OGLVertexArray vertex_array; OGLBuffer vertex_buffer; + GLsizeiptr vertex_buffer_size; OGLBuffer uniform_buffer; OGLFramebuffer framebuffer;