mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-12-25 16:40:05 +00:00
Merge pull request #10155 from FernandoS27/reactive-flushing-new
Y.F.C. bring back Reactive Flushing
This commit is contained in:
commit
2688fb1aa2
@ -62,6 +62,7 @@ void LogSettings() {
|
|||||||
log_setting("Renderer_AccelerateASTC", values.accelerate_astc.GetValue());
|
log_setting("Renderer_AccelerateASTC", values.accelerate_astc.GetValue());
|
||||||
log_setting("Renderer_AsyncASTC", values.async_astc.GetValue());
|
log_setting("Renderer_AsyncASTC", values.async_astc.GetValue());
|
||||||
log_setting("Renderer_UseVsync", values.vsync_mode.GetValue());
|
log_setting("Renderer_UseVsync", values.vsync_mode.GetValue());
|
||||||
|
log_setting("Renderer_UseReactiveFlushing", values.use_reactive_flushing.GetValue());
|
||||||
log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue());
|
log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue());
|
||||||
log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue());
|
log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue());
|
||||||
log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue());
|
log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue());
|
||||||
@ -223,6 +224,7 @@ void RestoreGlobalState(bool is_powered_on) {
|
|||||||
values.nvdec_emulation.SetGlobal(true);
|
values.nvdec_emulation.SetGlobal(true);
|
||||||
values.accelerate_astc.SetGlobal(true);
|
values.accelerate_astc.SetGlobal(true);
|
||||||
values.async_astc.SetGlobal(true);
|
values.async_astc.SetGlobal(true);
|
||||||
|
values.use_reactive_flushing.SetGlobal(true);
|
||||||
values.shader_backend.SetGlobal(true);
|
values.shader_backend.SetGlobal(true);
|
||||||
values.use_asynchronous_shaders.SetGlobal(true);
|
values.use_asynchronous_shaders.SetGlobal(true);
|
||||||
values.use_fast_gpu_time.SetGlobal(true);
|
values.use_fast_gpu_time.SetGlobal(true);
|
||||||
|
@ -465,6 +465,7 @@ struct Values {
|
|||||||
SwitchableSetting<bool> async_astc{false, "async_astc"};
|
SwitchableSetting<bool> async_astc{false, "async_astc"};
|
||||||
Setting<VSyncMode, true> vsync_mode{VSyncMode::FIFO, VSyncMode::Immediate,
|
Setting<VSyncMode, true> vsync_mode{VSyncMode::FIFO, VSyncMode::Immediate,
|
||||||
VSyncMode::FIFORelaxed, "use_vsync"};
|
VSyncMode::FIFORelaxed, "use_vsync"};
|
||||||
|
SwitchableSetting<bool> use_reactive_flushing{true, "use_reactive_flushing"};
|
||||||
SwitchableSetting<ShaderBackend, true> shader_backend{ShaderBackend::GLSL, ShaderBackend::GLSL,
|
SwitchableSetting<ShaderBackend, true> shader_backend{ShaderBackend::GLSL, ShaderBackend::GLSL,
|
||||||
ShaderBackend::SPIRV, "shader_backend"};
|
ShaderBackend::SPIRV, "shader_backend"};
|
||||||
SwitchableSetting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"};
|
SwitchableSetting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"};
|
||||||
|
@ -612,6 +612,10 @@ void System::PrepareReschedule(const u32 core_index) {
|
|||||||
impl->kernel.PrepareReschedule(core_index);
|
impl->kernel.PrepareReschedule(core_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t System::GetCurrentHostThreadID() const {
|
||||||
|
return impl->kernel.GetCurrentHostThreadID();
|
||||||
|
}
|
||||||
|
|
||||||
PerfStatsResults System::GetAndResetPerfStats() {
|
PerfStatsResults System::GetAndResetPerfStats() {
|
||||||
return impl->GetAndResetPerfStats();
|
return impl->GetAndResetPerfStats();
|
||||||
}
|
}
|
||||||
|
@ -222,6 +222,8 @@ public:
|
|||||||
/// Prepare the core emulation for a reschedule
|
/// Prepare the core emulation for a reschedule
|
||||||
void PrepareReschedule(u32 core_index);
|
void PrepareReschedule(u32 core_index);
|
||||||
|
|
||||||
|
[[nodiscard]] size_t GetCurrentHostThreadID() const;
|
||||||
|
|
||||||
/// Gets and resets core performance statistics
|
/// Gets and resets core performance statistics
|
||||||
[[nodiscard]] PerfStatsResults GetAndResetPerfStats();
|
[[nodiscard]] PerfStatsResults GetAndResetPerfStats();
|
||||||
|
|
||||||
|
@ -13,10 +13,12 @@
|
|||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/device_memory.h"
|
#include "core/device_memory.h"
|
||||||
|
#include "core/hardware_properties.h"
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
#include "core/hle/kernel/k_process.h"
|
#include "core/hle/kernel/k_process.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/rasterizer_download_area.h"
|
||||||
|
|
||||||
namespace Core::Memory {
|
namespace Core::Memory {
|
||||||
|
|
||||||
@ -243,7 +245,7 @@ struct Memory::Impl {
|
|||||||
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
||||||
const u8* const host_ptr) {
|
const u8* const host_ptr) {
|
||||||
if constexpr (!UNSAFE) {
|
if constexpr (!UNSAFE) {
|
||||||
system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
|
HandleRasterizerDownload(GetInteger(current_vaddr), copy_amount);
|
||||||
}
|
}
|
||||||
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
std::memcpy(dest_buffer, host_ptr, copy_amount);
|
||||||
},
|
},
|
||||||
@ -334,7 +336,7 @@ struct Memory::Impl {
|
|||||||
},
|
},
|
||||||
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
|
||||||
u8* const host_ptr) {
|
u8* const host_ptr) {
|
||||||
system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
|
HandleRasterizerDownload(GetInteger(current_vaddr), copy_amount);
|
||||||
WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
|
WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
|
||||||
},
|
},
|
||||||
[&](const std::size_t copy_amount) {
|
[&](const std::size_t copy_amount) {
|
||||||
@ -373,7 +375,7 @@ struct Memory::Impl {
|
|||||||
const std::size_t block_size) {
|
const std::size_t block_size) {
|
||||||
// dc ivac: Invalidate to point of coherency
|
// dc ivac: Invalidate to point of coherency
|
||||||
// GPU flush -> CPU invalidate
|
// GPU flush -> CPU invalidate
|
||||||
system.GPU().FlushRegion(GetInteger(current_vaddr), block_size);
|
HandleRasterizerDownload(GetInteger(current_vaddr), block_size);
|
||||||
};
|
};
|
||||||
return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
|
return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
|
||||||
}
|
}
|
||||||
@ -462,7 +464,8 @@ struct Memory::Impl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (Settings::IsFastmemEnabled()) {
|
if (Settings::IsFastmemEnabled()) {
|
||||||
const bool is_read_enable = !Settings::IsGPULevelExtreme() || !cached;
|
const bool is_read_enable =
|
||||||
|
!Settings::values.use_reactive_flushing.GetValue() || !cached;
|
||||||
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
|
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -651,7 +654,7 @@ struct Memory::Impl {
|
|||||||
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8,
|
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8,
|
||||||
GetInteger(vaddr));
|
GetInteger(vaddr));
|
||||||
},
|
},
|
||||||
[&]() { system.GPU().FlushRegion(GetInteger(vaddr), sizeof(T)); });
|
[&]() { HandleRasterizerDownload(GetInteger(vaddr), sizeof(T)); });
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
std::memcpy(&result, ptr, sizeof(T));
|
std::memcpy(&result, ptr, sizeof(T));
|
||||||
}
|
}
|
||||||
@ -712,7 +715,19 @@ struct Memory::Impl {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void HandleRasterizerDownload(VAddr address, size_t size) {
|
||||||
|
const size_t core = system.GetCurrentHostThreadID();
|
||||||
|
auto& current_area = rasterizer_areas[core];
|
||||||
|
const VAddr end_address = address + size;
|
||||||
|
if (current_area.start_address <= address && end_address <= current_area.end_address)
|
||||||
|
[[likely]] {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
current_area = system.GPU().OnCPURead(address, size);
|
||||||
|
}
|
||||||
|
|
||||||
Common::PageTable* current_page_table = nullptr;
|
Common::PageTable* current_page_table = nullptr;
|
||||||
|
std::array<VideoCore::RasterizerDownloadArea, Core::Hardware::NUM_CPU_CORES> rasterizer_areas{};
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -535,12 +535,12 @@ TEST_CASE("MemoryTracker: Cached write downloads") {
|
|||||||
memory_track->MarkRegionAsGpuModified(c + PAGE, PAGE);
|
memory_track->MarkRegionAsGpuModified(c + PAGE, PAGE);
|
||||||
int num = 0;
|
int num = 0;
|
||||||
memory_track->ForEachDownloadRangeAndClear(c, WORD, [&](u64 offset, u64 size) { ++num; });
|
memory_track->ForEachDownloadRangeAndClear(c, WORD, [&](u64 offset, u64 size) { ++num; });
|
||||||
REQUIRE(num == 1);
|
REQUIRE(num == 0);
|
||||||
num = 0;
|
num = 0;
|
||||||
memory_track->ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) { ++num; });
|
memory_track->ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) { ++num; });
|
||||||
REQUIRE(num == 0);
|
REQUIRE(num == 0);
|
||||||
REQUIRE(!memory_track->IsRegionCpuModified(c + PAGE, PAGE));
|
REQUIRE(!memory_track->IsRegionCpuModified(c + PAGE, PAGE));
|
||||||
REQUIRE(!memory_track->IsRegionGpuModified(c + PAGE, PAGE));
|
REQUIRE(memory_track->IsRegionGpuModified(c + PAGE, PAGE));
|
||||||
memory_track->FlushCachedWrites();
|
memory_track->FlushCachedWrites();
|
||||||
REQUIRE(memory_track->IsRegionCpuModified(c + PAGE, PAGE));
|
REQUIRE(memory_track->IsRegionCpuModified(c + PAGE, PAGE));
|
||||||
REQUIRE(!memory_track->IsRegionGpuModified(c + PAGE, PAGE));
|
REQUIRE(!memory_track->IsRegionGpuModified(c + PAGE, PAGE));
|
||||||
|
@ -18,6 +18,7 @@ namespace VideoCommon {
|
|||||||
enum class BufferFlagBits {
|
enum class BufferFlagBits {
|
||||||
Picked = 1 << 0,
|
Picked = 1 << 0,
|
||||||
CachedWrites = 1 << 1,
|
CachedWrites = 1 << 1,
|
||||||
|
PreemtiveDownload = 1 << 2,
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
|
DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
|
||||||
|
|
||||||
@ -54,6 +55,10 @@ public:
|
|||||||
flags |= BufferFlagBits::Picked;
|
flags |= BufferFlagBits::Picked;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MarkPreemtiveDownload() noexcept {
|
||||||
|
flags |= BufferFlagBits::PreemtiveDownload;
|
||||||
|
}
|
||||||
|
|
||||||
/// Unmark buffer as picked
|
/// Unmark buffer as picked
|
||||||
void Unpick() noexcept {
|
void Unpick() noexcept {
|
||||||
flags &= ~BufferFlagBits::Picked;
|
flags &= ~BufferFlagBits::Picked;
|
||||||
@ -84,6 +89,10 @@ public:
|
|||||||
return True(flags & BufferFlagBits::CachedWrites);
|
return True(flags & BufferFlagBits::CachedWrites);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsPreemtiveDownload() const noexcept {
|
||||||
|
return True(flags & BufferFlagBits::PreemtiveDownload);
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the base CPU address of the buffer
|
/// Returns the base CPU address of the buffer
|
||||||
[[nodiscard]] VAddr CpuAddr() const noexcept {
|
[[nodiscard]] VAddr CpuAddr() const noexcept {
|
||||||
return cpu_addr;
|
return cpu_addr;
|
||||||
|
@ -23,8 +23,6 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
|||||||
common_ranges.clear();
|
common_ranges.clear();
|
||||||
inline_buffer_id = NULL_BUFFER_ID;
|
inline_buffer_id = NULL_BUFFER_ID;
|
||||||
|
|
||||||
active_async_buffers = !Settings::IsGPULevelHigh();
|
|
||||||
|
|
||||||
if (!runtime.CanReportMemoryUsage()) {
|
if (!runtime.CanReportMemoryUsage()) {
|
||||||
minimum_memory = DEFAULT_EXPECTED_MEMORY;
|
minimum_memory = DEFAULT_EXPECTED_MEMORY;
|
||||||
critical_memory = DEFAULT_CRITICAL_MEMORY;
|
critical_memory = DEFAULT_CRITICAL_MEMORY;
|
||||||
@ -75,8 +73,6 @@ void BufferCache<P>::TickFrame() {
|
|||||||
uniform_cache_hits[0] = 0;
|
uniform_cache_hits[0] = 0;
|
||||||
uniform_cache_shots[0] = 0;
|
uniform_cache_shots[0] = 0;
|
||||||
|
|
||||||
active_async_buffers = !Settings::IsGPULevelHigh();
|
|
||||||
|
|
||||||
const bool skip_preferred = hits * 256 < shots * 251;
|
const bool skip_preferred = hits * 256 < shots * 251;
|
||||||
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
|
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
|
||||||
|
|
||||||
@ -111,9 +107,25 @@ void BufferCache<P>::WriteMemory(VAddr cpu_addr, u64 size) {
|
|||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) {
|
void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) {
|
||||||
memory_tracker.CachedCpuWrite(cpu_addr, size);
|
memory_tracker.CachedCpuWrite(cpu_addr, size);
|
||||||
const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE),
|
}
|
||||||
Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)};
|
|
||||||
cached_ranges.add(add_interval);
|
template <class P>
|
||||||
|
std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(VAddr cpu_addr,
|
||||||
|
u64 size) {
|
||||||
|
std::optional<VideoCore::RasterizerDownloadArea> area{};
|
||||||
|
area.emplace();
|
||||||
|
VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
|
||||||
|
VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
|
||||||
|
area->start_address = cpu_addr_start_aligned;
|
||||||
|
area->end_address = cpu_addr_end_aligned;
|
||||||
|
if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
|
||||||
|
area->preemtive = true;
|
||||||
|
return area;
|
||||||
|
};
|
||||||
|
memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
|
||||||
|
cpu_addr_end_aligned - cpu_addr_start_aligned);
|
||||||
|
area->preemtive = !IsRegionGpuModified(cpu_addr, size);
|
||||||
|
return area;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
@ -205,7 +217,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
|||||||
if (has_new_downloads) {
|
if (has_new_downloads) {
|
||||||
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
|
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
|
||||||
}
|
}
|
||||||
std::vector<u8> tmp_buffer(amount);
|
tmp_buffer.resize(amount);
|
||||||
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
|
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
|
||||||
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
|
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
|
||||||
return true;
|
return true;
|
||||||
@ -441,9 +453,7 @@ void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add
|
|||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::FlushCachedWrites() {
|
void BufferCache<P>::FlushCachedWrites() {
|
||||||
cached_write_buffer_ids.clear();
|
|
||||||
memory_tracker.FlushCachedWrites();
|
memory_tracker.FlushCachedWrites();
|
||||||
cached_ranges.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
@ -474,9 +484,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
|||||||
|
|
||||||
if (committed_ranges.empty()) {
|
if (committed_ranges.empty()) {
|
||||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||||
if (active_async_buffers) {
|
|
||||||
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -537,64 +546,65 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
|||||||
committed_ranges.clear();
|
committed_ranges.clear();
|
||||||
if (downloads.empty()) {
|
if (downloads.empty()) {
|
||||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||||
if (active_async_buffers) {
|
|
||||||
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
async_buffers.emplace_back(std::optional<Async_Buffer>{});
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (active_async_buffers) {
|
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
||||||
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
|
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
|
||||||
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
|
boost::container::small_vector<BufferCopy, 4> normalized_copies;
|
||||||
boost::container::small_vector<BufferCopy, 4> normalized_copies;
|
IntervalSet new_async_range{};
|
||||||
IntervalSet new_async_range{};
|
runtime.PreCopyBarrier();
|
||||||
runtime.PreCopyBarrier();
|
for (auto& [copy, buffer_id] : downloads) {
|
||||||
for (auto& [copy, buffer_id] : downloads) {
|
copy.dst_offset += download_staging.offset;
|
||||||
copy.dst_offset += download_staging.offset;
|
const std::array copies{copy};
|
||||||
const std::array copies{copy};
|
BufferCopy second_copy{copy};
|
||||||
BufferCopy second_copy{copy};
|
Buffer& buffer = slot_buffers[buffer_id];
|
||||||
Buffer& buffer = slot_buffers[buffer_id];
|
second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
|
||||||
second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
|
VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
|
||||||
VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
|
const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
|
||||||
const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
|
async_downloads += std::make_pair(base_interval, 1);
|
||||||
async_downloads += std::make_pair(base_interval, 1);
|
runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
|
||||||
runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
|
normalized_copies.push_back(second_copy);
|
||||||
normalized_copies.push_back(second_copy);
|
}
|
||||||
}
|
runtime.PostCopyBarrier();
|
||||||
runtime.PostCopyBarrier();
|
pending_downloads.emplace_back(std::move(normalized_copies));
|
||||||
pending_downloads.emplace_back(std::move(normalized_copies));
|
async_buffers.emplace_back(download_staging);
|
||||||
async_buffers.emplace_back(download_staging);
|
} else {
|
||||||
} else {
|
if (!Settings::IsGPULevelHigh()) {
|
||||||
committed_ranges.clear();
|
committed_ranges.clear();
|
||||||
uncommitted_ranges.clear();
|
uncommitted_ranges.clear();
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if constexpr (USE_MEMORY_MAPS) {
|
|
||||||
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
|
|
||||||
runtime.PreCopyBarrier();
|
|
||||||
for (auto& [copy, buffer_id] : downloads) {
|
|
||||||
// Have in mind the staging buffer offset for the copy
|
|
||||||
copy.dst_offset += download_staging.offset;
|
|
||||||
const std::array copies{copy};
|
|
||||||
runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies, false);
|
|
||||||
}
|
|
||||||
runtime.PostCopyBarrier();
|
|
||||||
runtime.Finish();
|
|
||||||
for (const auto& [copy, buffer_id] : downloads) {
|
|
||||||
const Buffer& buffer = slot_buffers[buffer_id];
|
|
||||||
const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
|
|
||||||
// Undo the modified offset
|
|
||||||
const u64 dst_offset = copy.dst_offset - download_staging.offset;
|
|
||||||
const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
|
|
||||||
cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
|
if constexpr (USE_MEMORY_MAPS) {
|
||||||
for (const auto& [copy, buffer_id] : downloads) {
|
auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
|
||||||
Buffer& buffer = slot_buffers[buffer_id];
|
runtime.PreCopyBarrier();
|
||||||
buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
|
for (auto& [copy, buffer_id] : downloads) {
|
||||||
const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
|
// Have in mind the staging buffer offset for the copy
|
||||||
cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
|
copy.dst_offset += download_staging.offset;
|
||||||
|
const std::array copies{copy};
|
||||||
|
runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies,
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
runtime.PostCopyBarrier();
|
||||||
|
runtime.Finish();
|
||||||
|
for (const auto& [copy, buffer_id] : downloads) {
|
||||||
|
const Buffer& buffer = slot_buffers[buffer_id];
|
||||||
|
const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
|
||||||
|
// Undo the modified offset
|
||||||
|
const u64 dst_offset = copy.dst_offset - download_staging.offset;
|
||||||
|
const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
|
||||||
|
cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
|
||||||
|
for (const auto& [copy, buffer_id] : downloads) {
|
||||||
|
Buffer& buffer = slot_buffers[buffer_id];
|
||||||
|
buffer.ImmediateDownload(copy.src_offset,
|
||||||
|
immediate_buffer.subspan(0, copy.size));
|
||||||
|
const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
|
||||||
|
cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1629,7 +1639,6 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
|
|||||||
replace(transform_feedback_buffers);
|
replace(transform_feedback_buffers);
|
||||||
replace(compute_uniform_buffers);
|
replace(compute_uniform_buffers);
|
||||||
replace(compute_storage_buffers);
|
replace(compute_storage_buffers);
|
||||||
std::erase(cached_write_buffer_ids, buffer_id);
|
|
||||||
|
|
||||||
// Mark the whole buffer as CPU written to stop tracking CPU writes
|
// Mark the whole buffer as CPU written to stop tracking CPU writes
|
||||||
if (!do_not_mark) {
|
if (!do_not_mark) {
|
||||||
|
@ -188,6 +188,8 @@ public:
|
|||||||
|
|
||||||
void DownloadMemory(VAddr cpu_addr, u64 size);
|
void DownloadMemory(VAddr cpu_addr, u64 size);
|
||||||
|
|
||||||
|
std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
|
||||||
|
|
||||||
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
|
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
|
||||||
|
|
||||||
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
|
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
|
||||||
@ -541,8 +543,6 @@ private:
|
|||||||
std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
|
std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
|
||||||
uniform_buffer_binding_sizes{};
|
uniform_buffer_binding_sizes{};
|
||||||
|
|
||||||
std::vector<BufferId> cached_write_buffer_ids;
|
|
||||||
|
|
||||||
MemoryTracker memory_tracker;
|
MemoryTracker memory_tracker;
|
||||||
IntervalSet uncommitted_ranges;
|
IntervalSet uncommitted_ranges;
|
||||||
IntervalSet common_ranges;
|
IntervalSet common_ranges;
|
||||||
@ -572,9 +572,8 @@ private:
|
|||||||
u64 critical_memory = 0;
|
u64 critical_memory = 0;
|
||||||
BufferId inline_buffer_id;
|
BufferId inline_buffer_id;
|
||||||
|
|
||||||
bool active_async_buffers = false;
|
|
||||||
|
|
||||||
std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table;
|
std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table;
|
||||||
|
std::vector<u8> tmp_buffer;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace VideoCommon
|
} // namespace VideoCommon
|
||||||
|
@ -66,6 +66,14 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if a region has been marked as Preflushable
|
||||||
|
[[nodiscard]] bool IsRegionPreflushable(VAddr query_cpu_addr, u64 query_size) noexcept {
|
||||||
|
return IteratePages<false>(
|
||||||
|
query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
|
||||||
|
return manager->template IsRegionModified<Type::Preflushable>(offset, size);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Mark region as CPU modified, notifying the rasterizer about this change
|
/// Mark region as CPU modified, notifying the rasterizer about this change
|
||||||
void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
|
void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
|
||||||
IteratePages<true>(dirty_cpu_addr, query_size,
|
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||||
@ -93,6 +101,15 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Mark region as modified from the host GPU
|
||||||
|
void MarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
|
||||||
|
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||||
|
[](Manager* manager, u64 offset, size_t size) {
|
||||||
|
manager->template ChangeRegionState<Type::Preflushable, true>(
|
||||||
|
manager->GetCpuAddr() + offset, size);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Unmark region as modified from the host GPU
|
/// Unmark region as modified from the host GPU
|
||||||
void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
|
void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
|
||||||
IteratePages<true>(dirty_cpu_addr, query_size,
|
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||||
@ -102,6 +119,15 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Unmark region as modified from the host GPU
|
||||||
|
void UnmarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
|
||||||
|
IteratePages<true>(dirty_cpu_addr, query_size,
|
||||||
|
[](Manager* manager, u64 offset, size_t size) {
|
||||||
|
manager->template ChangeRegionState<Type::Preflushable, false>(
|
||||||
|
manager->GetCpuAddr() + offset, size);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Mark region as modified from the CPU
|
/// Mark region as modified from the CPU
|
||||||
/// but don't mark it as modified until FlusHCachedWrites is called.
|
/// but don't mark it as modified until FlusHCachedWrites is called.
|
||||||
void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) {
|
void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) {
|
||||||
|
@ -26,6 +26,7 @@ enum class Type {
|
|||||||
GPU,
|
GPU,
|
||||||
CachedCPU,
|
CachedCPU,
|
||||||
Untracked,
|
Untracked,
|
||||||
|
Preflushable,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Vector tracking modified pages tightly packed with small vector optimization
|
/// Vector tracking modified pages tightly packed with small vector optimization
|
||||||
@ -55,17 +56,20 @@ struct Words {
|
|||||||
gpu.stack.fill(0);
|
gpu.stack.fill(0);
|
||||||
cached_cpu.stack.fill(0);
|
cached_cpu.stack.fill(0);
|
||||||
untracked.stack.fill(~u64{0});
|
untracked.stack.fill(~u64{0});
|
||||||
|
preflushable.stack.fill(0);
|
||||||
} else {
|
} else {
|
||||||
// Share allocation between CPU and GPU pages and set their default values
|
// Share allocation between CPU and GPU pages and set their default values
|
||||||
u64* const alloc = new u64[num_words * 4];
|
u64* const alloc = new u64[num_words * 5];
|
||||||
cpu.heap = alloc;
|
cpu.heap = alloc;
|
||||||
gpu.heap = alloc + num_words;
|
gpu.heap = alloc + num_words;
|
||||||
cached_cpu.heap = alloc + num_words * 2;
|
cached_cpu.heap = alloc + num_words * 2;
|
||||||
untracked.heap = alloc + num_words * 3;
|
untracked.heap = alloc + num_words * 3;
|
||||||
|
preflushable.heap = alloc + num_words * 4;
|
||||||
std::fill_n(cpu.heap, num_words, ~u64{0});
|
std::fill_n(cpu.heap, num_words, ~u64{0});
|
||||||
std::fill_n(gpu.heap, num_words, 0);
|
std::fill_n(gpu.heap, num_words, 0);
|
||||||
std::fill_n(cached_cpu.heap, num_words, 0);
|
std::fill_n(cached_cpu.heap, num_words, 0);
|
||||||
std::fill_n(untracked.heap, num_words, ~u64{0});
|
std::fill_n(untracked.heap, num_words, ~u64{0});
|
||||||
|
std::fill_n(preflushable.heap, num_words, 0);
|
||||||
}
|
}
|
||||||
// Clean up tailing bits
|
// Clean up tailing bits
|
||||||
const u64 last_word_size = size_bytes % BYTES_PER_WORD;
|
const u64 last_word_size = size_bytes % BYTES_PER_WORD;
|
||||||
@ -88,13 +92,14 @@ struct Words {
|
|||||||
gpu = rhs.gpu;
|
gpu = rhs.gpu;
|
||||||
cached_cpu = rhs.cached_cpu;
|
cached_cpu = rhs.cached_cpu;
|
||||||
untracked = rhs.untracked;
|
untracked = rhs.untracked;
|
||||||
|
preflushable = rhs.preflushable;
|
||||||
rhs.cpu.heap = nullptr;
|
rhs.cpu.heap = nullptr;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Words(Words&& rhs) noexcept
|
Words(Words&& rhs) noexcept
|
||||||
: size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
|
: size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
|
||||||
cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked} {
|
cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked}, preflushable{rhs.preflushable} {
|
||||||
rhs.cpu.heap = nullptr;
|
rhs.cpu.heap = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,6 +134,8 @@ struct Words {
|
|||||||
return std::span<u64>(cached_cpu.Pointer(IsShort()), num_words);
|
return std::span<u64>(cached_cpu.Pointer(IsShort()), num_words);
|
||||||
} else if constexpr (type == Type::Untracked) {
|
} else if constexpr (type == Type::Untracked) {
|
||||||
return std::span<u64>(untracked.Pointer(IsShort()), num_words);
|
return std::span<u64>(untracked.Pointer(IsShort()), num_words);
|
||||||
|
} else if constexpr (type == Type::Preflushable) {
|
||||||
|
return std::span<u64>(preflushable.Pointer(IsShort()), num_words);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,6 +149,8 @@ struct Words {
|
|||||||
return std::span<const u64>(cached_cpu.Pointer(IsShort()), num_words);
|
return std::span<const u64>(cached_cpu.Pointer(IsShort()), num_words);
|
||||||
} else if constexpr (type == Type::Untracked) {
|
} else if constexpr (type == Type::Untracked) {
|
||||||
return std::span<const u64>(untracked.Pointer(IsShort()), num_words);
|
return std::span<const u64>(untracked.Pointer(IsShort()), num_words);
|
||||||
|
} else if constexpr (type == Type::Preflushable) {
|
||||||
|
return std::span<const u64>(preflushable.Pointer(IsShort()), num_words);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,6 +160,7 @@ struct Words {
|
|||||||
WordsArray<stack_words> gpu;
|
WordsArray<stack_words> gpu;
|
||||||
WordsArray<stack_words> cached_cpu;
|
WordsArray<stack_words> cached_cpu;
|
||||||
WordsArray<stack_words> untracked;
|
WordsArray<stack_words> untracked;
|
||||||
|
WordsArray<stack_words> preflushable;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class RasterizerInterface, size_t stack_words = 1>
|
template <class RasterizerInterface, size_t stack_words = 1>
|
||||||
@ -292,6 +302,9 @@ public:
|
|||||||
(pending_pointer - pending_offset) * BYTES_PER_PAGE);
|
(pending_pointer - pending_offset) * BYTES_PER_PAGE);
|
||||||
};
|
};
|
||||||
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
||||||
|
if constexpr (type == Type::GPU) {
|
||||||
|
mask &= ~untracked_words[index];
|
||||||
|
}
|
||||||
const u64 word = state_words[index] & mask;
|
const u64 word = state_words[index] & mask;
|
||||||
if constexpr (clear) {
|
if constexpr (clear) {
|
||||||
if constexpr (type == Type::CPU || type == Type::CachedCPU) {
|
if constexpr (type == Type::CPU || type == Type::CachedCPU) {
|
||||||
@ -340,8 +353,13 @@ public:
|
|||||||
static_assert(type != Type::Untracked);
|
static_assert(type != Type::Untracked);
|
||||||
|
|
||||||
const std::span<const u64> state_words = words.template Span<type>();
|
const std::span<const u64> state_words = words.template Span<type>();
|
||||||
|
[[maybe_unused]] const std::span<const u64> untracked_words =
|
||||||
|
words.template Span<Type::Untracked>();
|
||||||
bool result = false;
|
bool result = false;
|
||||||
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
||||||
|
if constexpr (type == Type::GPU) {
|
||||||
|
mask &= ~untracked_words[index];
|
||||||
|
}
|
||||||
const u64 word = state_words[index] & mask;
|
const u64 word = state_words[index] & mask;
|
||||||
if (word != 0) {
|
if (word != 0) {
|
||||||
result = true;
|
result = true;
|
||||||
@ -362,9 +380,14 @@ public:
|
|||||||
[[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept {
|
[[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept {
|
||||||
static_assert(type != Type::Untracked);
|
static_assert(type != Type::Untracked);
|
||||||
const std::span<const u64> state_words = words.template Span<type>();
|
const std::span<const u64> state_words = words.template Span<type>();
|
||||||
|
[[maybe_unused]] const std::span<const u64> untracked_words =
|
||||||
|
words.template Span<Type::Untracked>();
|
||||||
u64 begin = std::numeric_limits<u64>::max();
|
u64 begin = std::numeric_limits<u64>::max();
|
||||||
u64 end = 0;
|
u64 end = 0;
|
||||||
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
||||||
|
if constexpr (type == Type::GPU) {
|
||||||
|
mask &= ~untracked_words[index];
|
||||||
|
}
|
||||||
const u64 word = state_words[index] & mask;
|
const u64 word = state_words[index] & mask;
|
||||||
if (word == 0) {
|
if (word == 0) {
|
||||||
return;
|
return;
|
||||||
|
@ -223,7 +223,7 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
|
|||||||
write_buffer.resize_destructive(dst_size);
|
write_buffer.resize_destructive(dst_size);
|
||||||
|
|
||||||
memory_manager.ReadBlock(src_operand.address, read_buffer.data(), src_size);
|
memory_manager.ReadBlock(src_operand.address, read_buffer.data(), src_size);
|
||||||
memory_manager.ReadBlockUnsafe(dst_operand.address, write_buffer.data(), dst_size);
|
memory_manager.ReadBlock(dst_operand.address, write_buffer.data(), dst_size);
|
||||||
|
|
||||||
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
||||||
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
|
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
|
||||||
@ -288,11 +288,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
|
|||||||
write_buffer.resize_destructive(dst_size);
|
write_buffer.resize_destructive(dst_size);
|
||||||
|
|
||||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||||
if (Settings::IsGPULevelExtreme()) {
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
||||||
} else {
|
|
||||||
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
||||||
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
||||||
|
@ -59,6 +59,11 @@ public:
|
|||||||
buffer_cache.AccumulateFlushes();
|
buffer_cache.AccumulateFlushes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SignalReference() {
|
||||||
|
std::function<void()> do_nothing([] {});
|
||||||
|
SignalFence(std::move(do_nothing));
|
||||||
|
}
|
||||||
|
|
||||||
void SyncOperation(std::function<void()>&& func) {
|
void SyncOperation(std::function<void()>&& func) {
|
||||||
uncommitted_operations.emplace_back(std::move(func));
|
uncommitted_operations.emplace_back(std::move(func));
|
||||||
}
|
}
|
||||||
|
@ -283,6 +283,21 @@ struct GPU::Impl {
|
|||||||
gpu_thread.FlushRegion(addr, size);
|
gpu_thread.FlushRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size) {
|
||||||
|
auto raster_area = rasterizer->GetFlushArea(addr, size);
|
||||||
|
if (raster_area.preemtive) {
|
||||||
|
return raster_area;
|
||||||
|
}
|
||||||
|
raster_area.preemtive = true;
|
||||||
|
const u64 fence = RequestSyncOperation([this, &raster_area]() {
|
||||||
|
rasterizer->FlushRegion(raster_area.start_address,
|
||||||
|
raster_area.end_address - raster_area.start_address);
|
||||||
|
});
|
||||||
|
gpu_thread.TickGPU();
|
||||||
|
WaitForSyncOperation(fence);
|
||||||
|
return raster_area;
|
||||||
|
}
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
void InvalidateRegion(VAddr addr, u64 size) {
|
void InvalidateRegion(VAddr addr, u64 size) {
|
||||||
gpu_thread.InvalidateRegion(addr, size);
|
gpu_thread.InvalidateRegion(addr, size);
|
||||||
@ -538,6 +553,10 @@ void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
|||||||
impl->SwapBuffers(framebuffer);
|
impl->SwapBuffers(framebuffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VideoCore::RasterizerDownloadArea GPU::OnCPURead(VAddr addr, u64 size) {
|
||||||
|
return impl->OnCPURead(addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
void GPU::FlushRegion(VAddr addr, u64 size) {
|
void GPU::FlushRegion(VAddr addr, u64 size) {
|
||||||
impl->FlushRegion(addr, size);
|
impl->FlushRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "video_core/cdma_pusher.h"
|
#include "video_core/cdma_pusher.h"
|
||||||
#include "video_core/framebuffer_config.h"
|
#include "video_core/framebuffer_config.h"
|
||||||
|
#include "video_core/rasterizer_download_area.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
class System;
|
class System;
|
||||||
@ -240,6 +241,9 @@ public:
|
|||||||
/// Swap buffers (render frame)
|
/// Swap buffers (render frame)
|
||||||
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
||||||
|
|
||||||
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
|
[[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size);
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
void FlushRegion(VAddr addr, u64 size);
|
void FlushRegion(VAddr addr, u64 size);
|
||||||
|
|
||||||
|
@ -255,7 +255,6 @@ private:
|
|||||||
if (!in_range(query)) {
|
if (!in_range(query)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1);
|
|
||||||
AsyncJobId async_job_id = query.GetAsyncJob();
|
AsyncJobId async_job_id = query.GetAsyncJob();
|
||||||
auto flush_result = query.Flush(async);
|
auto flush_result = query.Flush(async);
|
||||||
if (async_job_id == NULL_ASYNC_JOB_ID) {
|
if (async_job_id == NULL_ASYNC_JOB_ID) {
|
||||||
@ -273,7 +272,6 @@ private:
|
|||||||
|
|
||||||
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
|
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
|
||||||
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
|
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
|
||||||
rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
|
|
||||||
const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
|
const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
|
||||||
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
|
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
|
||||||
host_ptr);
|
host_ptr);
|
||||||
|
16
src/video_core/rasterizer_download_area.h
Normal file
16
src/video_core/rasterizer_download_area.h
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace VideoCore {
|
||||||
|
|
||||||
|
struct RasterizerDownloadArea {
|
||||||
|
VAddr start_address;
|
||||||
|
VAddr end_address;
|
||||||
|
bool preemtive;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace VideoCore
|
@ -12,6 +12,7 @@
|
|||||||
#include "video_core/cache_types.h"
|
#include "video_core/cache_types.h"
|
||||||
#include "video_core/engines/fermi_2d.h"
|
#include "video_core/engines/fermi_2d.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/rasterizer_download_area.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
class MemoryManager;
|
class MemoryManager;
|
||||||
@ -95,6 +96,8 @@ public:
|
|||||||
virtual bool MustFlushRegion(VAddr addr, u64 size,
|
virtual bool MustFlushRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
|
||||||
|
|
||||||
|
virtual RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) = 0;
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
virtual void InvalidateRegion(VAddr addr, u64 size,
|
virtual void InvalidateRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "core/memory.h"
|
||||||
#include "video_core/host1x/host1x.h"
|
#include "video_core/host1x/host1x.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/renderer_null/null_rasterizer.h"
|
#include "video_core/renderer_null/null_rasterizer.h"
|
||||||
@ -46,6 +48,14 @@ bool RasterizerNull::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheTyp
|
|||||||
}
|
}
|
||||||
void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
|
void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
|
||||||
void RasterizerNull::OnCPUWrite(VAddr addr, u64 size) {}
|
void RasterizerNull::OnCPUWrite(VAddr addr, u64 size) {}
|
||||||
|
VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 size) {
|
||||||
|
VideoCore::RasterizerDownloadArea new_area{
|
||||||
|
.start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
.end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
.preemtive = true,
|
||||||
|
};
|
||||||
|
return new_area;
|
||||||
|
}
|
||||||
void RasterizerNull::InvalidateGPUCache() {}
|
void RasterizerNull::InvalidateGPUCache() {}
|
||||||
void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {}
|
void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {}
|
||||||
void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {}
|
void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {}
|
||||||
|
@ -54,6 +54,7 @@ public:
|
|||||||
void InvalidateRegion(VAddr addr, u64 size,
|
void InvalidateRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
void OnCPUWrite(VAddr addr, u64 size) override;
|
void OnCPUWrite(VAddr addr, u64 size) override;
|
||||||
|
VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
|
||||||
void InvalidateGPUCache() override;
|
void InvalidateGPUCache() override;
|
||||||
void UnmapMemory(VAddr addr, u64 size) override;
|
void UnmapMemory(VAddr addr, u64 size) override;
|
||||||
void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
|
void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
|
||||||
|
@ -433,6 +433,29 @@ bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 size) {
|
||||||
|
{
|
||||||
|
std::scoped_lock lock{texture_cache.mutex};
|
||||||
|
auto area = texture_cache.GetFlushArea(addr, size);
|
||||||
|
if (area) {
|
||||||
|
return *area;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
std::scoped_lock lock{buffer_cache.mutex};
|
||||||
|
auto area = buffer_cache.GetFlushArea(addr, size);
|
||||||
|
if (area) {
|
||||||
|
return *area;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
VideoCore::RasterizerDownloadArea new_area{
|
||||||
|
.start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
.end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
.preemtive = true,
|
||||||
|
};
|
||||||
|
return new_area;
|
||||||
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
|
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||||
if (addr == 0 || size == 0) {
|
if (addr == 0 || size == 0) {
|
||||||
@ -1281,7 +1304,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
|||||||
const Tegra::DMA::BufferOperand& buffer_operand,
|
const Tegra::DMA::BufferOperand& buffer_operand,
|
||||||
const Tegra::DMA::ImageOperand& image_operand) {
|
const Tegra::DMA::ImageOperand& image_operand) {
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
const auto image_id = texture_cache.DmaImageId(image_operand);
|
const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
|
||||||
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -95,6 +95,7 @@ public:
|
|||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
bool MustFlushRegion(VAddr addr, u64 size,
|
bool MustFlushRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
|
VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
|
||||||
void InvalidateRegion(VAddr addr, u64 size,
|
void InvalidateRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
void OnCPUWrite(VAddr addr, u64 size) override;
|
void OnCPUWrite(VAddr addr, u64 size) override;
|
||||||
|
@ -502,6 +502,22 @@ bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 size) {
|
||||||
|
{
|
||||||
|
std::scoped_lock lock{texture_cache.mutex};
|
||||||
|
auto area = texture_cache.GetFlushArea(addr, size);
|
||||||
|
if (area) {
|
||||||
|
return *area;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
VideoCore::RasterizerDownloadArea new_area{
|
||||||
|
.start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
.end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
|
||||||
|
.preemtive = true,
|
||||||
|
};
|
||||||
|
return new_area;
|
||||||
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
|
void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
|
||||||
if (addr == 0 || size == 0) {
|
if (addr == 0 || size == 0) {
|
||||||
return;
|
return;
|
||||||
@ -598,7 +614,7 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::SignalReference() {
|
void RasterizerVulkan::SignalReference() {
|
||||||
fence_manager.SignalOrdering();
|
fence_manager.SignalReference();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::ReleaseFences() {
|
void RasterizerVulkan::ReleaseFences() {
|
||||||
@ -631,7 +647,7 @@ void RasterizerVulkan::WaitForIdle() {
|
|||||||
cmdbuf.SetEvent(event, flags);
|
cmdbuf.SetEvent(event, flags);
|
||||||
cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
|
cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
|
||||||
});
|
});
|
||||||
SignalReference();
|
fence_manager.SignalOrdering();
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::FragmentBarrier() {
|
void RasterizerVulkan::FragmentBarrier() {
|
||||||
@ -777,7 +793,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
|
|||||||
const Tegra::DMA::BufferOperand& buffer_operand,
|
const Tegra::DMA::BufferOperand& buffer_operand,
|
||||||
const Tegra::DMA::ImageOperand& image_operand) {
|
const Tegra::DMA::ImageOperand& image_operand) {
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
const auto image_id = texture_cache.DmaImageId(image_operand);
|
const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
|
||||||
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
if (image_id == VideoCommon::NULL_IMAGE_ID) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -92,6 +92,7 @@ public:
|
|||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
bool MustFlushRegion(VAddr addr, u64 size,
|
bool MustFlushRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
|
VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
|
||||||
void InvalidateRegion(VAddr addr, u64 size,
|
void InvalidateRegion(VAddr addr, u64 size,
|
||||||
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||||
void InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) override;
|
void InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) override;
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
|
#include "common/settings.h"
|
||||||
#include "video_core/surface.h"
|
#include "video_core/surface.h"
|
||||||
#include "video_core/texture_cache/format_lookup_table.h"
|
#include "video_core/texture_cache/format_lookup_table.h"
|
||||||
#include "video_core/texture_cache/image_info.h"
|
#include "video_core/texture_cache/image_info.h"
|
||||||
@ -22,6 +23,8 @@ using VideoCore::Surface::PixelFormat;
|
|||||||
using VideoCore::Surface::SurfaceType;
|
using VideoCore::Surface::SurfaceType;
|
||||||
|
|
||||||
ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
||||||
|
forced_flushed = config.IsPitchLinear() && !Settings::values.use_reactive_flushing.GetValue();
|
||||||
|
dma_downloaded = forced_flushed;
|
||||||
format = PixelFormatFromTextureInfo(config.format, config.r_type, config.g_type, config.b_type,
|
format = PixelFormatFromTextureInfo(config.format, config.r_type, config.g_type, config.b_type,
|
||||||
config.a_type, config.srgb_conversion);
|
config.a_type, config.srgb_conversion);
|
||||||
num_samples = NumSamples(config.msaa_mode);
|
num_samples = NumSamples(config.msaa_mode);
|
||||||
@ -117,6 +120,9 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
|||||||
|
|
||||||
ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
|
ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
|
||||||
Tegra::Texture::MsaaMode msaa_mode) noexcept {
|
Tegra::Texture::MsaaMode msaa_mode) noexcept {
|
||||||
|
forced_flushed =
|
||||||
|
ct.tile_mode.is_pitch_linear && !Settings::values.use_reactive_flushing.GetValue();
|
||||||
|
dma_downloaded = forced_flushed;
|
||||||
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(ct.format);
|
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(ct.format);
|
||||||
rescaleable = false;
|
rescaleable = false;
|
||||||
if (ct.tile_mode.is_pitch_linear) {
|
if (ct.tile_mode.is_pitch_linear) {
|
||||||
@ -155,6 +161,9 @@ ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
|
|||||||
|
|
||||||
ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::ZetaSize& zt_size,
|
ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::ZetaSize& zt_size,
|
||||||
Tegra::Texture::MsaaMode msaa_mode) noexcept {
|
Tegra::Texture::MsaaMode msaa_mode) noexcept {
|
||||||
|
forced_flushed =
|
||||||
|
zt.tile_mode.is_pitch_linear && !Settings::values.use_reactive_flushing.GetValue();
|
||||||
|
dma_downloaded = forced_flushed;
|
||||||
format = VideoCore::Surface::PixelFormatFromDepthFormat(zt.format);
|
format = VideoCore::Surface::PixelFormatFromDepthFormat(zt.format);
|
||||||
size.width = zt_size.width;
|
size.width = zt_size.width;
|
||||||
size.height = zt_size.height;
|
size.height = zt_size.height;
|
||||||
@ -195,6 +204,9 @@ ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::Zet
|
|||||||
|
|
||||||
ImageInfo::ImageInfo(const Fermi2D::Surface& config) noexcept {
|
ImageInfo::ImageInfo(const Fermi2D::Surface& config) noexcept {
|
||||||
UNIMPLEMENTED_IF_MSG(config.layer != 0, "Surface layer is not zero");
|
UNIMPLEMENTED_IF_MSG(config.layer != 0, "Surface layer is not zero");
|
||||||
|
forced_flushed = config.linear == Fermi2D::MemoryLayout::Pitch &&
|
||||||
|
!Settings::values.use_reactive_flushing.GetValue();
|
||||||
|
dma_downloaded = forced_flushed;
|
||||||
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(config.format);
|
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(config.format);
|
||||||
rescaleable = false;
|
rescaleable = false;
|
||||||
if (config.linear == Fermi2D::MemoryLayout::Pitch) {
|
if (config.linear == Fermi2D::MemoryLayout::Pitch) {
|
||||||
|
@ -39,6 +39,8 @@ struct ImageInfo {
|
|||||||
u32 tile_width_spacing = 0;
|
u32 tile_width_spacing = 0;
|
||||||
bool rescaleable = false;
|
bool rescaleable = false;
|
||||||
bool downscaleable = false;
|
bool downscaleable = false;
|
||||||
|
bool forced_flushed = false;
|
||||||
|
bool dma_downloaded = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace VideoCommon
|
} // namespace VideoCommon
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/settings.h"
|
|
||||||
#include "video_core/compatible_formats.h"
|
#include "video_core/compatible_formats.h"
|
||||||
#include "video_core/surface.h"
|
#include "video_core/surface.h"
|
||||||
#include "video_core/texture_cache/formatter.h"
|
#include "video_core/texture_cache/formatter.h"
|
||||||
@ -26,8 +25,7 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
|
|||||||
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
|
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
|
||||||
"Image view format {} is incompatible with image format {}", info.format,
|
"Image view format {} is incompatible with image format {}", info.format,
|
||||||
image_info.format);
|
image_info.format);
|
||||||
const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
if (image_info.forced_flushed) {
|
||||||
if (image_info.type == ImageType::Linear && is_async) {
|
|
||||||
flags |= ImageViewFlagBits::PreemtiveDownload;
|
flags |= ImageViewFlagBits::PreemtiveDownload;
|
||||||
}
|
}
|
||||||
if (image_info.type == ImageType::e3D && info.type != ImageViewType::e3D) {
|
if (image_info.type == ImageType::e3D && info.type != ImageViewType::e3D) {
|
||||||
|
@ -490,6 +490,32 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class P>
|
||||||
|
std::optional<VideoCore::RasterizerDownloadArea> TextureCache<P>::GetFlushArea(VAddr cpu_addr,
|
||||||
|
u64 size) {
|
||||||
|
std::optional<VideoCore::RasterizerDownloadArea> area{};
|
||||||
|
ForEachImageInRegion(cpu_addr, size, [&](ImageId, ImageBase& image) {
|
||||||
|
if (False(image.flags & ImageFlagBits::GpuModified)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!area) {
|
||||||
|
area.emplace();
|
||||||
|
area->start_address = cpu_addr;
|
||||||
|
area->end_address = cpu_addr + size;
|
||||||
|
area->preemtive = true;
|
||||||
|
}
|
||||||
|
area->start_address = std::min(area->start_address, image.cpu_addr);
|
||||||
|
area->end_address = std::max(area->end_address, image.cpu_addr_end);
|
||||||
|
for (auto image_view_id : image.image_view_ids) {
|
||||||
|
auto& image_view = slot_image_views[image_view_id];
|
||||||
|
image_view.flags |= ImageViewFlagBits::PreemtiveDownload;
|
||||||
|
}
|
||||||
|
area->preemtive &= image.info.forced_flushed;
|
||||||
|
image.info.forced_flushed = true;
|
||||||
|
});
|
||||||
|
return area;
|
||||||
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
|
void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
|
||||||
std::vector<ImageId> deleted_images;
|
std::vector<ImageId> deleted_images;
|
||||||
@ -683,6 +709,7 @@ void TextureCache<P>::CommitAsyncFlushes() {
|
|||||||
download_info.async_buffer_id = last_async_buffer_id;
|
download_info.async_buffer_id = last_async_buffer_id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (any_none_dma) {
|
if (any_none_dma) {
|
||||||
auto download_map = runtime.DownloadStagingBuffer(total_size_bytes, true);
|
auto download_map = runtime.DownloadStagingBuffer(total_size_bytes, true);
|
||||||
for (const PendingDownload& download_info : download_ids) {
|
for (const PendingDownload& download_info : download_ids) {
|
||||||
@ -695,6 +722,7 @@ void TextureCache<P>::CommitAsyncFlushes() {
|
|||||||
}
|
}
|
||||||
uncommitted_async_buffers.emplace_back(download_map);
|
uncommitted_async_buffers.emplace_back(download_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
async_buffers.emplace_back(std::move(uncommitted_async_buffers));
|
async_buffers.emplace_back(std::move(uncommitted_async_buffers));
|
||||||
uncommitted_async_buffers.clear();
|
uncommitted_async_buffers.clear();
|
||||||
}
|
}
|
||||||
@ -783,17 +811,22 @@ void TextureCache<P>::PopAsyncFlushes() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
|
ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload) {
|
||||||
const ImageInfo dst_info(operand);
|
const ImageInfo dst_info(operand);
|
||||||
const ImageId dst_id = FindDMAImage(dst_info, operand.address);
|
const ImageId dst_id = FindDMAImage(dst_info, operand.address);
|
||||||
if (!dst_id) {
|
if (!dst_id) {
|
||||||
return NULL_IMAGE_ID;
|
return NULL_IMAGE_ID;
|
||||||
}
|
}
|
||||||
const auto& image = slot_images[dst_id];
|
auto& image = slot_images[dst_id];
|
||||||
if (False(image.flags & ImageFlagBits::GpuModified)) {
|
if (False(image.flags & ImageFlagBits::GpuModified)) {
|
||||||
// No need to waste time on an image that's synced with guest
|
// No need to waste time on an image that's synced with guest
|
||||||
return NULL_IMAGE_ID;
|
return NULL_IMAGE_ID;
|
||||||
}
|
}
|
||||||
|
if (!is_upload && !image.info.dma_downloaded) {
|
||||||
|
// Force a full sync.
|
||||||
|
image.info.dma_downloaded = true;
|
||||||
|
return NULL_IMAGE_ID;
|
||||||
|
}
|
||||||
const auto base = image.TryFindBase(operand.address);
|
const auto base = image.TryFindBase(operand.address);
|
||||||
if (!base) {
|
if (!base) {
|
||||||
return NULL_IMAGE_ID;
|
return NULL_IMAGE_ID;
|
||||||
@ -1290,7 +1323,6 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
|||||||
all_siblings.push_back(overlap_id);
|
all_siblings.push_back(overlap_id);
|
||||||
} else {
|
} else {
|
||||||
bad_overlap_ids.push_back(overlap_id);
|
bad_overlap_ids.push_back(overlap_id);
|
||||||
overlap.flags |= ImageFlagBits::BadOverlap;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
|
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
|
||||||
@ -1401,7 +1433,12 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
|
|||||||
ImageBase& aliased = slot_images[aliased_id];
|
ImageBase& aliased = slot_images[aliased_id];
|
||||||
aliased.overlapping_images.push_back(new_image_id);
|
aliased.overlapping_images.push_back(new_image_id);
|
||||||
new_image.overlapping_images.push_back(aliased_id);
|
new_image.overlapping_images.push_back(aliased_id);
|
||||||
new_image.flags |= ImageFlagBits::BadOverlap;
|
if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) {
|
||||||
|
aliased.flags |= ImageFlagBits::BadOverlap;
|
||||||
|
}
|
||||||
|
if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) {
|
||||||
|
new_image.flags |= ImageFlagBits::BadOverlap;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
RegisterImage(new_image_id);
|
RegisterImage(new_image_id);
|
||||||
return new_image_id;
|
return new_image_id;
|
||||||
|
@ -179,6 +179,8 @@ public:
|
|||||||
/// Download contents of host images to guest memory in a region
|
/// Download contents of host images to guest memory in a region
|
||||||
void DownloadMemory(VAddr cpu_addr, size_t size);
|
void DownloadMemory(VAddr cpu_addr, size_t size);
|
||||||
|
|
||||||
|
std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
|
||||||
|
|
||||||
/// Remove images in a region
|
/// Remove images in a region
|
||||||
void UnmapMemory(VAddr cpu_addr, size_t size);
|
void UnmapMemory(VAddr cpu_addr, size_t size);
|
||||||
|
|
||||||
@ -205,7 +207,7 @@ public:
|
|||||||
/// Pop asynchronous downloads
|
/// Pop asynchronous downloads
|
||||||
void PopAsyncFlushes();
|
void PopAsyncFlushes();
|
||||||
|
|
||||||
[[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
|
[[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload);
|
||||||
|
|
||||||
[[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
|
[[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
|
||||||
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
|
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
|
||||||
|
@ -710,6 +710,7 @@ void Config::ReadRendererValues() {
|
|||||||
ReadGlobalSetting(Settings::values.nvdec_emulation);
|
ReadGlobalSetting(Settings::values.nvdec_emulation);
|
||||||
ReadGlobalSetting(Settings::values.accelerate_astc);
|
ReadGlobalSetting(Settings::values.accelerate_astc);
|
||||||
ReadGlobalSetting(Settings::values.async_astc);
|
ReadGlobalSetting(Settings::values.async_astc);
|
||||||
|
ReadGlobalSetting(Settings::values.use_reactive_flushing);
|
||||||
ReadGlobalSetting(Settings::values.shader_backend);
|
ReadGlobalSetting(Settings::values.shader_backend);
|
||||||
ReadGlobalSetting(Settings::values.use_asynchronous_shaders);
|
ReadGlobalSetting(Settings::values.use_asynchronous_shaders);
|
||||||
ReadGlobalSetting(Settings::values.use_fast_gpu_time);
|
ReadGlobalSetting(Settings::values.use_fast_gpu_time);
|
||||||
@ -1355,6 +1356,7 @@ void Config::SaveRendererValues() {
|
|||||||
Settings::values.nvdec_emulation.UsingGlobal());
|
Settings::values.nvdec_emulation.UsingGlobal());
|
||||||
WriteGlobalSetting(Settings::values.accelerate_astc);
|
WriteGlobalSetting(Settings::values.accelerate_astc);
|
||||||
WriteGlobalSetting(Settings::values.async_astc);
|
WriteGlobalSetting(Settings::values.async_astc);
|
||||||
|
WriteGlobalSetting(Settings::values.use_reactive_flushing);
|
||||||
WriteSetting(QString::fromStdString(Settings::values.shader_backend.GetLabel()),
|
WriteSetting(QString::fromStdString(Settings::values.shader_backend.GetLabel()),
|
||||||
static_cast<u32>(Settings::values.shader_backend.GetValue(global)),
|
static_cast<u32>(Settings::values.shader_backend.GetValue(global)),
|
||||||
static_cast<u32>(Settings::values.shader_backend.GetDefault()),
|
static_cast<u32>(Settings::values.shader_backend.GetDefault()),
|
||||||
|
@ -21,6 +21,7 @@ ConfigureGraphicsAdvanced::~ConfigureGraphicsAdvanced() = default;
|
|||||||
|
|
||||||
void ConfigureGraphicsAdvanced::SetConfiguration() {
|
void ConfigureGraphicsAdvanced::SetConfiguration() {
|
||||||
const bool runtime_lock = !system.IsPoweredOn();
|
const bool runtime_lock = !system.IsPoweredOn();
|
||||||
|
ui->use_reactive_flushing->setEnabled(runtime_lock);
|
||||||
ui->async_present->setEnabled(runtime_lock);
|
ui->async_present->setEnabled(runtime_lock);
|
||||||
ui->renderer_force_max_clock->setEnabled(runtime_lock);
|
ui->renderer_force_max_clock->setEnabled(runtime_lock);
|
||||||
ui->async_astc->setEnabled(runtime_lock);
|
ui->async_astc->setEnabled(runtime_lock);
|
||||||
@ -29,6 +30,7 @@ void ConfigureGraphicsAdvanced::SetConfiguration() {
|
|||||||
|
|
||||||
ui->async_present->setChecked(Settings::values.async_presentation.GetValue());
|
ui->async_present->setChecked(Settings::values.async_presentation.GetValue());
|
||||||
ui->renderer_force_max_clock->setChecked(Settings::values.renderer_force_max_clock.GetValue());
|
ui->renderer_force_max_clock->setChecked(Settings::values.renderer_force_max_clock.GetValue());
|
||||||
|
ui->use_reactive_flushing->setChecked(Settings::values.use_reactive_flushing.GetValue());
|
||||||
ui->async_astc->setChecked(Settings::values.async_astc.GetValue());
|
ui->async_astc->setChecked(Settings::values.async_astc.GetValue());
|
||||||
ui->use_asynchronous_shaders->setChecked(Settings::values.use_asynchronous_shaders.GetValue());
|
ui->use_asynchronous_shaders->setChecked(Settings::values.use_asynchronous_shaders.GetValue());
|
||||||
ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time.GetValue());
|
ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time.GetValue());
|
||||||
@ -60,6 +62,8 @@ void ConfigureGraphicsAdvanced::ApplyConfiguration() {
|
|||||||
renderer_force_max_clock);
|
renderer_force_max_clock);
|
||||||
ConfigurationShared::ApplyPerGameSetting(&Settings::values.max_anisotropy,
|
ConfigurationShared::ApplyPerGameSetting(&Settings::values.max_anisotropy,
|
||||||
ui->anisotropic_filtering_combobox);
|
ui->anisotropic_filtering_combobox);
|
||||||
|
ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_reactive_flushing,
|
||||||
|
ui->use_reactive_flushing, use_reactive_flushing);
|
||||||
ConfigurationShared::ApplyPerGameSetting(&Settings::values.async_astc, ui->async_astc,
|
ConfigurationShared::ApplyPerGameSetting(&Settings::values.async_astc, ui->async_astc,
|
||||||
async_astc);
|
async_astc);
|
||||||
ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_asynchronous_shaders,
|
ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_asynchronous_shaders,
|
||||||
@ -91,6 +95,7 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
|
|||||||
ui->async_present->setEnabled(Settings::values.async_presentation.UsingGlobal());
|
ui->async_present->setEnabled(Settings::values.async_presentation.UsingGlobal());
|
||||||
ui->renderer_force_max_clock->setEnabled(
|
ui->renderer_force_max_clock->setEnabled(
|
||||||
Settings::values.renderer_force_max_clock.UsingGlobal());
|
Settings::values.renderer_force_max_clock.UsingGlobal());
|
||||||
|
ui->use_reactive_flushing->setEnabled(Settings::values.use_reactive_flushing.UsingGlobal());
|
||||||
ui->async_astc->setEnabled(Settings::values.async_astc.UsingGlobal());
|
ui->async_astc->setEnabled(Settings::values.async_astc.UsingGlobal());
|
||||||
ui->use_asynchronous_shaders->setEnabled(
|
ui->use_asynchronous_shaders->setEnabled(
|
||||||
Settings::values.use_asynchronous_shaders.UsingGlobal());
|
Settings::values.use_asynchronous_shaders.UsingGlobal());
|
||||||
@ -108,6 +113,8 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
|
|||||||
ConfigurationShared::SetColoredTristate(ui->renderer_force_max_clock,
|
ConfigurationShared::SetColoredTristate(ui->renderer_force_max_clock,
|
||||||
Settings::values.renderer_force_max_clock,
|
Settings::values.renderer_force_max_clock,
|
||||||
renderer_force_max_clock);
|
renderer_force_max_clock);
|
||||||
|
ConfigurationShared::SetColoredTristate(
|
||||||
|
ui->use_reactive_flushing, Settings::values.use_reactive_flushing, use_reactive_flushing);
|
||||||
ConfigurationShared::SetColoredTristate(ui->async_astc, Settings::values.async_astc,
|
ConfigurationShared::SetColoredTristate(ui->async_astc, Settings::values.async_astc,
|
||||||
async_astc);
|
async_astc);
|
||||||
ConfigurationShared::SetColoredTristate(ui->use_asynchronous_shaders,
|
ConfigurationShared::SetColoredTristate(ui->use_asynchronous_shaders,
|
||||||
|
@ -40,6 +40,7 @@ private:
|
|||||||
ConfigurationShared::CheckState renderer_force_max_clock;
|
ConfigurationShared::CheckState renderer_force_max_clock;
|
||||||
ConfigurationShared::CheckState use_vsync;
|
ConfigurationShared::CheckState use_vsync;
|
||||||
ConfigurationShared::CheckState async_astc;
|
ConfigurationShared::CheckState async_astc;
|
||||||
|
ConfigurationShared::CheckState use_reactive_flushing;
|
||||||
ConfigurationShared::CheckState use_asynchronous_shaders;
|
ConfigurationShared::CheckState use_asynchronous_shaders;
|
||||||
ConfigurationShared::CheckState use_fast_gpu_time;
|
ConfigurationShared::CheckState use_fast_gpu_time;
|
||||||
ConfigurationShared::CheckState use_vulkan_driver_pipeline_cache;
|
ConfigurationShared::CheckState use_vulkan_driver_pipeline_cache;
|
||||||
|
@ -96,6 +96,16 @@
|
|||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QCheckBox" name="use_reactive_flushing">
|
||||||
|
<property name="toolTip">
|
||||||
|
<string>Uses reactive flushing instead of predictive flushing. Allowing a more accurate syncing of memory.</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Enable Reactive Flushing</string>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
<item>
|
<item>
|
||||||
<widget class="QCheckBox" name="use_asynchronous_shaders">
|
<widget class="QCheckBox" name="use_asynchronous_shaders">
|
||||||
<property name="toolTip">
|
<property name="toolTip">
|
||||||
|
@ -312,6 +312,7 @@ void Config::ReadValues() {
|
|||||||
ReadSetting("Renderer", Settings::values.use_asynchronous_gpu_emulation);
|
ReadSetting("Renderer", Settings::values.use_asynchronous_gpu_emulation);
|
||||||
ReadSetting("Renderer", Settings::values.vsync_mode);
|
ReadSetting("Renderer", Settings::values.vsync_mode);
|
||||||
ReadSetting("Renderer", Settings::values.shader_backend);
|
ReadSetting("Renderer", Settings::values.shader_backend);
|
||||||
|
ReadSetting("Renderer", Settings::values.use_reactive_flushing);
|
||||||
ReadSetting("Renderer", Settings::values.use_asynchronous_shaders);
|
ReadSetting("Renderer", Settings::values.use_asynchronous_shaders);
|
||||||
ReadSetting("Renderer", Settings::values.nvdec_emulation);
|
ReadSetting("Renderer", Settings::values.nvdec_emulation);
|
||||||
ReadSetting("Renderer", Settings::values.accelerate_astc);
|
ReadSetting("Renderer", Settings::values.accelerate_astc);
|
||||||
|
@ -340,6 +340,10 @@ use_vsync =
|
|||||||
# 0: GLSL, 1 (default): GLASM, 2: SPIR-V
|
# 0: GLSL, 1 (default): GLASM, 2: SPIR-V
|
||||||
shader_backend =
|
shader_backend =
|
||||||
|
|
||||||
|
# Uses reactive flushing instead of predictive flushing. Allowing a more accurate syncing of memory.
|
||||||
|
# 0: Off, 1 (default): On
|
||||||
|
use_reactive_flushing =
|
||||||
|
|
||||||
# Whether to allow asynchronous shader building.
|
# Whether to allow asynchronous shader building.
|
||||||
# 0 (default): Off, 1: On
|
# 0 (default): Off, 1: On
|
||||||
use_asynchronous_shaders =
|
use_asynchronous_shaders =
|
||||||
|
Loading…
Reference in New Issue
Block a user