mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-12-25 16:30:04 +00:00
Merge pull request #1205 from bunnei/improve-rasterizer-cache-2
Various fixes and improvements to rasterizer cache 2: Electric Boogaloo
This commit is contained in:
commit
42588493d5
@ -10,6 +10,7 @@
|
|||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
|
@ -251,8 +251,8 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
|
|||||||
return string;
|
return string;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) {
|
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
|
||||||
if (gpu_addr == 0) {
|
if (vaddr == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,19 +261,8 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
|
|||||||
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
|
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
|
||||||
// assumes the specified GPU address region is contiguous as well.
|
// assumes the specified GPU address region is contiguous as well.
|
||||||
|
|
||||||
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
|
u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
|
||||||
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
|
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
||||||
boost::optional<VAddr> maybe_vaddr =
|
|
||||||
Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
|
|
||||||
// The GPU <-> CPU virtual memory mapping is not 1:1
|
|
||||||
if (!maybe_vaddr) {
|
|
||||||
LOG_ERROR(HW_Memory,
|
|
||||||
"Trying to flush a cached region to an invalid physical address {:016X}",
|
|
||||||
gpu_addr);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
VAddr vaddr = *maybe_vaddr;
|
|
||||||
|
|
||||||
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||||
|
|
||||||
if (cached) {
|
if (cached) {
|
||||||
@ -344,29 +333,19 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
|
|||||||
|
|
||||||
const VAddr overlap_start = std::max(start, region_start);
|
const VAddr overlap_start = std::max(start, region_start);
|
||||||
const VAddr overlap_end = std::min(end, region_end);
|
const VAddr overlap_end = std::min(end, region_end);
|
||||||
|
|
||||||
const std::vector<Tegra::GPUVAddr> gpu_addresses =
|
|
||||||
system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
|
|
||||||
|
|
||||||
if (gpu_addresses.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const u64 overlap_size = overlap_end - overlap_start;
|
const u64 overlap_size = overlap_end - overlap_start;
|
||||||
|
|
||||||
for (const auto& gpu_address : gpu_addresses) {
|
auto& rasterizer = system_instance.Renderer().Rasterizer();
|
||||||
auto& rasterizer = system_instance.Renderer().Rasterizer();
|
switch (mode) {
|
||||||
switch (mode) {
|
case FlushMode::Flush:
|
||||||
case FlushMode::Flush:
|
rasterizer.FlushRegion(overlap_start, overlap_size);
|
||||||
rasterizer.FlushRegion(gpu_address, overlap_size);
|
break;
|
||||||
break;
|
case FlushMode::Invalidate:
|
||||||
case FlushMode::Invalidate:
|
rasterizer.InvalidateRegion(overlap_start, overlap_size);
|
||||||
rasterizer.InvalidateRegion(gpu_address, overlap_size);
|
break;
|
||||||
break;
|
case FlushMode::FlushAndInvalidate:
|
||||||
case FlushMode::FlushAndInvalidate:
|
rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size);
|
||||||
rasterizer.FlushAndInvalidateRegion(gpu_address, overlap_size);
|
break;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
#include <boost/icl/interval_map.hpp>
|
#include <boost/icl/interval_map.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/memory_hook.h"
|
#include "core/memory_hook.h"
|
||||||
#include "video_core/memory_manager.h"
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
class Process;
|
class Process;
|
||||||
@ -179,7 +178,7 @@ enum class FlushMode {
|
|||||||
/**
|
/**
|
||||||
* Mark each page touching the region as cached.
|
* Mark each page touching the region as cached.
|
||||||
*/
|
*/
|
||||||
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached);
|
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flushes and invalidates any externally cached rasterizer resources touching the given virtual
|
* Flushes and invalidates any externally cached rasterizer resources touching the given virtual
|
||||||
|
@ -4,113 +4,87 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <unordered_map>
|
#include <set>
|
||||||
|
|
||||||
#include <boost/icl/interval_map.hpp>
|
#include <boost/icl/interval_map.hpp>
|
||||||
#include <boost/range/iterator_range.hpp>
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "core/core.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
class RasterizerCache : NonCopyable {
|
class RasterizerCache : NonCopyable {
|
||||||
public:
|
public:
|
||||||
/// Mark the specified region as being invalidated
|
/// Mark the specified region as being invalidated
|
||||||
void InvalidateRegion(Tegra::GPUVAddr region_addr, size_t region_size) {
|
void InvalidateRegion(VAddr addr, u64 size) {
|
||||||
for (auto iter = cached_objects.cbegin(); iter != cached_objects.cend();) {
|
if (size == 0)
|
||||||
const auto& object{iter->second};
|
return;
|
||||||
|
|
||||||
++iter;
|
const ObjectInterval interval{addr, addr + size};
|
||||||
|
for (auto& pair : boost::make_iterator_range(object_cache.equal_range(interval))) {
|
||||||
|
for (auto& cached_object : pair.second) {
|
||||||
|
if (!cached_object)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (object->GetAddr() <= (region_addr + region_size) &&
|
remove_objects.emplace(cached_object);
|
||||||
region_addr <= (object->GetAddr() + object->GetSizeInBytes())) {
|
|
||||||
// Regions overlap, so invalidate
|
|
||||||
Unregister(object);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (auto& remove_object : remove_objects) {
|
||||||
|
Unregister(remove_object);
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_objects.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Invalidates everything in the cache
|
||||||
|
void InvalidateAll() {
|
||||||
|
while (object_cache.begin() != object_cache.end()) {
|
||||||
|
Unregister(*object_cache.begin()->second.begin());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// Tries to get an object from the cache with the specified address
|
/// Tries to get an object from the cache with the specified address
|
||||||
T TryGet(Tegra::GPUVAddr addr) const {
|
T TryGet(VAddr addr) const {
|
||||||
const auto& search{cached_objects.find(addr)};
|
const ObjectInterval interval{addr};
|
||||||
if (search != cached_objects.end()) {
|
for (auto& pair : boost::make_iterator_range(object_cache.equal_range(interval))) {
|
||||||
return search->second;
|
for (auto& cached_object : pair.second) {
|
||||||
|
if (cached_object->GetAddr() == addr) {
|
||||||
|
return cached_object;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets a reference to the cache
|
|
||||||
const std::unordered_map<Tegra::GPUVAddr, T>& GetCache() const {
|
|
||||||
return cached_objects;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register an object into the cache
|
/// Register an object into the cache
|
||||||
void Register(const T& object) {
|
void Register(const T& object) {
|
||||||
const auto& search{cached_objects.find(object->GetAddr())};
|
object_cache.add({GetInterval(object), ObjectSet{object}});
|
||||||
if (search != cached_objects.end()) {
|
auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer();
|
||||||
// Registered already
|
rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), 1);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
cached_objects[object->GetAddr()] = object;
|
|
||||||
UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unregisters an object from the cache
|
/// Unregisters an object from the cache
|
||||||
void Unregister(const T& object) {
|
void Unregister(const T& object) {
|
||||||
const auto& search{cached_objects.find(object->GetAddr())};
|
auto& rasterizer = Core::System::GetInstance().Renderer().Rasterizer();
|
||||||
if (search == cached_objects.end()) {
|
rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1);
|
||||||
// Unregistered already
|
object_cache.subtract({GetInterval(object), ObjectSet{object}});
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1);
|
|
||||||
cached_objects.erase(search);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using PageMap = boost::icl::interval_map<u64, int>;
|
using ObjectSet = std::set<T>;
|
||||||
|
using ObjectCache = boost::icl::interval_map<VAddr, ObjectSet>;
|
||||||
|
using ObjectInterval = typename ObjectCache::interval_type;
|
||||||
|
|
||||||
template <typename Map, typename Interval>
|
static auto GetInterval(const T& object) {
|
||||||
constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
return ObjectInterval::right_open(object->GetAddr(),
|
||||||
return boost::make_iterator_range(map.equal_range(interval));
|
object->GetAddr() + object->GetSizeInBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Increase/decrease the number of object in pages touching the specified region
|
ObjectCache object_cache;
|
||||||
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
|
ObjectSet remove_objects;
|
||||||
const u64 page_start{addr >> Tegra::MemoryManager::PAGE_BITS};
|
|
||||||
const u64 page_end{(addr + size) >> Tegra::MemoryManager::PAGE_BITS};
|
|
||||||
|
|
||||||
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
|
|
||||||
// subtract after iterating
|
|
||||||
const auto pages_interval = PageMap::interval_type::right_open(page_start, page_end);
|
|
||||||
if (delta > 0)
|
|
||||||
cached_pages.add({pages_interval, delta});
|
|
||||||
|
|
||||||
for (const auto& pair : RangeFromInterval(cached_pages, pages_interval)) {
|
|
||||||
const auto interval = pair.first & pages_interval;
|
|
||||||
const int count = pair.second;
|
|
||||||
|
|
||||||
const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval)
|
|
||||||
<< Tegra::MemoryManager::PAGE_BITS;
|
|
||||||
const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval)
|
|
||||||
<< Tegra::MemoryManager::PAGE_BITS;
|
|
||||||
const u64 interval_size = interval_end_addr - interval_start_addr;
|
|
||||||
|
|
||||||
if (delta > 0 && count == delta)
|
|
||||||
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, true);
|
|
||||||
else if (delta < 0 && count == -delta)
|
|
||||||
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, false);
|
|
||||||
else
|
|
||||||
ASSERT(count >= 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delta < 0)
|
|
||||||
cached_pages.add({pages_interval, delta});
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unordered_map<Tegra::GPUVAddr, T> cached_objects;
|
|
||||||
PageMap cached_pages;
|
|
||||||
};
|
};
|
||||||
|
@ -27,14 +27,14 @@ public:
|
|||||||
virtual void FlushAll() = 0;
|
virtual void FlushAll() = 0;
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
virtual void FlushRegion(Tegra::GPUVAddr addr, u64 size) = 0;
|
virtual void FlushRegion(VAddr addr, u64 size) = 0;
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
virtual void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
|
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
/// and invalidated
|
/// and invalidated
|
||||||
virtual void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
|
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
|
||||||
|
|
||||||
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
|
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
|
||||||
virtual bool AccelerateDisplayTransfer(const void* config) {
|
virtual bool AccelerateDisplayTransfer(const void* config) {
|
||||||
@ -60,5 +60,8 @@ public:
|
|||||||
virtual bool AccelerateDrawBatch(bool is_indexed) {
|
virtual bool AccelerateDrawBatch(bool is_indexed) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Increase/decrease the number of object in pages touching the specified region
|
||||||
|
virtual void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {}
|
||||||
};
|
};
|
||||||
} // namespace VideoCore
|
} // namespace VideoCore
|
||||||
|
@ -274,6 +274,41 @@ bool RasterizerOpenGL::AccelerateDrawBatch(bool is_indexed) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Map, typename Interval>
|
||||||
|
static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
||||||
|
return boost::make_iterator_range(map.equal_range(interval));
|
||||||
|
}
|
||||||
|
|
||||||
|
void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
||||||
|
const u64 page_start{addr >> Memory::PAGE_BITS};
|
||||||
|
const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS};
|
||||||
|
|
||||||
|
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
|
||||||
|
// subtract after iterating
|
||||||
|
const auto pages_interval = CachedPageMap::interval_type::right_open(page_start, page_end);
|
||||||
|
if (delta > 0)
|
||||||
|
cached_pages.add({pages_interval, delta});
|
||||||
|
|
||||||
|
for (const auto& pair : RangeFromInterval(cached_pages, pages_interval)) {
|
||||||
|
const auto interval = pair.first & pages_interval;
|
||||||
|
const int count = pair.second;
|
||||||
|
|
||||||
|
const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS;
|
||||||
|
const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS;
|
||||||
|
const u64 interval_size = interval_end_addr - interval_start_addr;
|
||||||
|
|
||||||
|
if (delta > 0 && count == delta)
|
||||||
|
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, true);
|
||||||
|
else if (delta < 0 && count == -delta)
|
||||||
|
Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, false);
|
||||||
|
else
|
||||||
|
ASSERT(count >= 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (delta < 0)
|
||||||
|
cached_pages.add({pages_interval, delta});
|
||||||
|
}
|
||||||
|
|
||||||
std::pair<Surface, Surface> RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb,
|
std::pair<Surface, Surface> RasterizerOpenGL::ConfigureFramebuffers(bool using_color_fb,
|
||||||
bool using_depth_fb,
|
bool using_depth_fb,
|
||||||
bool preserve_contents) {
|
bool preserve_contents) {
|
||||||
@ -397,16 +432,6 @@ void RasterizerOpenGL::Clear() {
|
|||||||
glClearStencil(regs.clear_stencil);
|
glClearStencil(regs.clear_stencil);
|
||||||
|
|
||||||
glClear(clear_mask);
|
glClear(clear_mask);
|
||||||
|
|
||||||
// Mark framebuffer surfaces as dirty
|
|
||||||
if (Settings::values.use_accurate_framebuffers) {
|
|
||||||
if (dirty_color_surface != nullptr) {
|
|
||||||
res_cache.FlushSurface(dirty_color_surface);
|
|
||||||
}
|
|
||||||
if (dirty_depth_surface != nullptr) {
|
|
||||||
res_cache.FlushSurface(dirty_depth_surface);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<u8*, GLintptr> RasterizerOpenGL::AlignBuffer(u8* buffer_ptr, GLintptr buffer_offset,
|
std::pair<u8*, GLintptr> RasterizerOpenGL::AlignBuffer(u8* buffer_ptr, GLintptr buffer_offset,
|
||||||
@ -522,16 +547,6 @@ void RasterizerOpenGL::DrawArrays() {
|
|||||||
texture_unit.Unbind();
|
texture_unit.Unbind();
|
||||||
}
|
}
|
||||||
state.Apply();
|
state.Apply();
|
||||||
|
|
||||||
// Mark framebuffer surfaces as dirty
|
|
||||||
if (Settings::values.use_accurate_framebuffers) {
|
|
||||||
if (dirty_color_surface != nullptr) {
|
|
||||||
res_cache.FlushSurface(dirty_color_surface);
|
|
||||||
}
|
|
||||||
if (dirty_depth_surface != nullptr) {
|
|
||||||
res_cache.FlushSurface(dirty_depth_surface);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::NotifyMaxwellRegisterChanged(u32 method) {}
|
void RasterizerOpenGL::NotifyMaxwellRegisterChanged(u32 method) {}
|
||||||
@ -540,17 +555,17 @@ void RasterizerOpenGL::FlushAll() {
|
|||||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) {
|
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||||
res_cache.InvalidateRegion(addr, size);
|
res_cache.InvalidateRegion(addr, size);
|
||||||
shader_cache.InvalidateRegion(addr, size);
|
shader_cache.InvalidateRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
|
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||||
InvalidateRegion(addr, size);
|
InvalidateRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,11 @@
|
|||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include <boost/icl/interval_map.hpp>
|
||||||
|
#include <boost/range/iterator_range.hpp>
|
||||||
#include <glad/glad.h>
|
#include <glad/glad.h>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/engines/maxwell_3d.h"
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
@ -40,15 +44,16 @@ public:
|
|||||||
void Clear() override;
|
void Clear() override;
|
||||||
void NotifyMaxwellRegisterChanged(u32 method) override;
|
void NotifyMaxwellRegisterChanged(u32 method) override;
|
||||||
void FlushAll() override;
|
void FlushAll() override;
|
||||||
void FlushRegion(Tegra::GPUVAddr addr, u64 size) override;
|
void FlushRegion(VAddr addr, u64 size) override;
|
||||||
void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
|
void InvalidateRegion(VAddr addr, u64 size) override;
|
||||||
void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
|
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
|
||||||
bool AccelerateDisplayTransfer(const void* config) override;
|
bool AccelerateDisplayTransfer(const void* config) override;
|
||||||
bool AccelerateTextureCopy(const void* config) override;
|
bool AccelerateTextureCopy(const void* config) override;
|
||||||
bool AccelerateFill(const void* config) override;
|
bool AccelerateFill(const void* config) override;
|
||||||
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
|
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
|
||||||
u32 pixel_stride) override;
|
u32 pixel_stride) override;
|
||||||
bool AccelerateDrawBatch(bool is_indexed) override;
|
bool AccelerateDrawBatch(bool is_indexed) override;
|
||||||
|
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) override;
|
||||||
|
|
||||||
/// OpenGL shader generated for a given Maxwell register state
|
/// OpenGL shader generated for a given Maxwell register state
|
||||||
struct MaxwellShader {
|
struct MaxwellShader {
|
||||||
@ -187,6 +192,9 @@ private:
|
|||||||
|
|
||||||
enum class AccelDraw { Disabled, Arrays, Indexed };
|
enum class AccelDraw { Disabled, Arrays, Indexed };
|
||||||
AccelDraw accelerate_draw = AccelDraw::Disabled;
|
AccelDraw accelerate_draw = AccelDraw::Disabled;
|
||||||
|
|
||||||
|
using CachedPageMap = boost::icl::interval_map<u64, int>;
|
||||||
|
CachedPageMap cached_pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace OpenGL
|
} // namespace OpenGL
|
||||||
|
@ -33,11 +33,16 @@ struct FormatTuple {
|
|||||||
bool compressed;
|
bool compressed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) {
|
||||||
|
auto& gpu{Core::System::GetInstance().GPU()};
|
||||||
|
const auto cpu_addr{gpu.MemoryManager().GpuToCpuAddress(gpu_addr)};
|
||||||
|
return cpu_addr ? *cpu_addr : 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
|
/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
|
||||||
const Tegra::Texture::FullTextureInfo& config) {
|
const Tegra::Texture::FullTextureInfo& config) {
|
||||||
|
|
||||||
SurfaceParams params{};
|
SurfaceParams params{};
|
||||||
params.addr = config.tic.Address();
|
params.addr = TryGetCpuAddr(config.tic.Address());
|
||||||
params.is_tiled = config.tic.IsTiled();
|
params.is_tiled = config.tic.IsTiled();
|
||||||
params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
|
params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
|
||||||
params.pixel_format =
|
params.pixel_format =
|
||||||
@ -55,9 +60,8 @@ struct FormatTuple {
|
|||||||
|
|
||||||
/*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(
|
/*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(
|
||||||
const Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig& config) {
|
const Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig& config) {
|
||||||
|
|
||||||
SurfaceParams params{};
|
SurfaceParams params{};
|
||||||
params.addr = config.Address();
|
params.addr = TryGetCpuAddr(config.Address());
|
||||||
params.is_tiled = true;
|
params.is_tiled = true;
|
||||||
params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
||||||
params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
|
params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
|
||||||
@ -75,9 +79,8 @@ struct FormatTuple {
|
|||||||
/*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer(u32 zeta_width, u32 zeta_height,
|
/*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer(u32 zeta_width, u32 zeta_height,
|
||||||
Tegra::GPUVAddr zeta_address,
|
Tegra::GPUVAddr zeta_address,
|
||||||
Tegra::DepthFormat format) {
|
Tegra::DepthFormat format) {
|
||||||
|
|
||||||
SurfaceParams params{};
|
SurfaceParams params{};
|
||||||
params.addr = zeta_address;
|
params.addr = TryGetCpuAddr(zeta_address);
|
||||||
params.is_tiled = true;
|
params.is_tiled = true;
|
||||||
params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
|
||||||
params.pixel_format = PixelFormatFromDepthFormat(format);
|
params.pixel_format = PixelFormatFromDepthFormat(format);
|
||||||
@ -171,11 +174,6 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType
|
|||||||
return format;
|
return format;
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr SurfaceParams::GetCpuAddr() const {
|
|
||||||
auto& gpu = Core::System::GetInstance().GPU();
|
|
||||||
return *gpu.MemoryManager().GpuToCpuAddress(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool IsPixelFormatASTC(PixelFormat format) {
|
static bool IsPixelFormatASTC(PixelFormat format) {
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case PixelFormat::ASTC_2D_4X4:
|
case PixelFormat::ASTC_2D_4X4:
|
||||||
@ -222,33 +220,28 @@ static bool IsFormatBCn(PixelFormat format) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool morton_to_gl, PixelFormat format>
|
template <bool morton_to_gl, PixelFormat format>
|
||||||
void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_buffer,
|
void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_buffer, VAddr addr) {
|
||||||
Tegra::GPUVAddr addr) {
|
|
||||||
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
|
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
|
||||||
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
|
||||||
auto& gpu = Core::System::GetInstance().GPU();
|
|
||||||
|
|
||||||
if (morton_to_gl) {
|
if (morton_to_gl) {
|
||||||
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
|
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
|
||||||
// pixel values.
|
// pixel values.
|
||||||
const u32 tile_size{IsFormatBCn(format) ? 4U : 1U};
|
const u32 tile_size{IsFormatBCn(format) ? 4U : 1U};
|
||||||
const std::vector<u8> data =
|
const std::vector<u8> data = Tegra::Texture::UnswizzleTexture(
|
||||||
Tegra::Texture::UnswizzleTexture(*gpu.MemoryManager().GpuToCpuAddress(addr), tile_size,
|
addr, tile_size, bytes_per_pixel, stride, height, block_height);
|
||||||
bytes_per_pixel, stride, height, block_height);
|
|
||||||
const size_t size_to_copy{std::min(gl_buffer.size(), data.size())};
|
const size_t size_to_copy{std::min(gl_buffer.size(), data.size())};
|
||||||
gl_buffer.assign(data.begin(), data.begin() + size_to_copy);
|
gl_buffer.assign(data.begin(), data.begin() + size_to_copy);
|
||||||
} else {
|
} else {
|
||||||
// TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should
|
// TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should
|
||||||
// check the configuration for this and perform more generic un/swizzle
|
// check the configuration for this and perform more generic un/swizzle
|
||||||
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
|
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
|
||||||
VideoCore::MortonCopyPixels128(
|
VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel,
|
||||||
stride, height, bytes_per_pixel, gl_bytes_per_pixel,
|
Memory::GetPointer(addr), gl_buffer.data(), morton_to_gl);
|
||||||
Memory::GetPointer(*gpu.MemoryManager().GpuToCpuAddress(addr)), gl_buffer.data(),
|
|
||||||
morton_to_gl);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPUVAddr),
|
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, VAddr),
|
||||||
SurfaceParams::MaxPixelFormat>
|
SurfaceParams::MaxPixelFormat>
|
||||||
morton_to_gl_fns = {
|
morton_to_gl_fns = {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
@ -305,7 +298,7 @@ static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPU
|
|||||||
// clang-format on
|
// clang-format on
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPUVAddr),
|
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, VAddr),
|
||||||
SurfaceParams::MaxPixelFormat>
|
SurfaceParams::MaxPixelFormat>
|
||||||
gl_to_morton_fns = {
|
gl_to_morton_fns = {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
@ -542,7 +535,7 @@ MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64
|
|||||||
void CachedSurface::LoadGLBuffer() {
|
void CachedSurface::LoadGLBuffer() {
|
||||||
ASSERT(params.type != SurfaceType::Fill);
|
ASSERT(params.type != SurfaceType::Fill);
|
||||||
|
|
||||||
const u8* const texture_src_data = Memory::GetPointer(params.GetCpuAddr());
|
const u8* const texture_src_data = Memory::GetPointer(params.addr);
|
||||||
|
|
||||||
ASSERT(texture_src_data);
|
ASSERT(texture_src_data);
|
||||||
|
|
||||||
@ -567,7 +560,7 @@ void CachedSurface::LoadGLBuffer() {
|
|||||||
|
|
||||||
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
|
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
|
||||||
void CachedSurface::FlushGLBuffer() {
|
void CachedSurface::FlushGLBuffer() {
|
||||||
u8* const dst_buffer = Memory::GetPointer(params.GetCpuAddr());
|
u8* const dst_buffer = Memory::GetPointer(params.addr);
|
||||||
|
|
||||||
ASSERT(dst_buffer);
|
ASSERT(dst_buffer);
|
||||||
ASSERT(gl_buffer.size() ==
|
ASSERT(gl_buffer.size() ==
|
||||||
@ -764,19 +757,10 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& gpu = Core::System::GetInstance().GPU();
|
|
||||||
// Don't try to create any entries in the cache if the address of the texture is invalid.
|
|
||||||
if (gpu.MemoryManager().GpuToCpuAddress(params.addr) == boost::none)
|
|
||||||
return {};
|
|
||||||
|
|
||||||
// Look up surface in the cache based on address
|
// Look up surface in the cache based on address
|
||||||
Surface surface{TryGet(params.addr)};
|
Surface surface{TryGet(params.addr)};
|
||||||
if (surface) {
|
if (surface) {
|
||||||
if (Settings::values.use_accurate_framebuffers) {
|
if (surface->GetSurfaceParams().IsCompatibleSurface(params)) {
|
||||||
// If use_accurate_framebuffers is enabled, always load from memory
|
|
||||||
FlushSurface(surface);
|
|
||||||
Unregister(surface);
|
|
||||||
} else if (surface->GetSurfaceParams().IsCompatibleSurface(params)) {
|
|
||||||
// Use the cached surface as-is
|
// Use the cached surface as-is
|
||||||
return surface;
|
return surface;
|
||||||
} else if (preserve_contents) {
|
} else if (preserve_contents) {
|
||||||
@ -792,15 +776,9 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to get a previously reserved surface
|
// No cached surface found - get a new one
|
||||||
surface = TryGetReservedSurface(params);
|
surface = GetUncachedSurface(params);
|
||||||
|
Register(surface);
|
||||||
// No surface found - create a new one
|
|
||||||
if (!surface) {
|
|
||||||
surface = std::make_shared<CachedSurface>(params);
|
|
||||||
ReserveSurface(surface);
|
|
||||||
Register(surface);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only load surface from memory if we care about the contents
|
// Only load surface from memory if we care about the contents
|
||||||
if (preserve_contents) {
|
if (preserve_contents) {
|
||||||
@ -810,13 +788,23 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
|
|||||||
return surface;
|
return surface;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Surface RasterizerCacheOpenGL::GetUncachedSurface(const SurfaceParams& params) {
|
||||||
|
Surface surface{TryGetReservedSurface(params)};
|
||||||
|
if (!surface) {
|
||||||
|
// No reserved surface available, create a new one and reserve it
|
||||||
|
surface = std::make_shared<CachedSurface>(params);
|
||||||
|
ReserveSurface(surface);
|
||||||
|
}
|
||||||
|
return surface;
|
||||||
|
}
|
||||||
|
|
||||||
Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
|
Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
|
||||||
const SurfaceParams& new_params) {
|
const SurfaceParams& new_params) {
|
||||||
// Verify surface is compatible for blitting
|
// Verify surface is compatible for blitting
|
||||||
const auto& params{surface->GetSurfaceParams()};
|
const auto& params{surface->GetSurfaceParams()};
|
||||||
|
|
||||||
// Create a new surface with the new parameters, and blit the previous surface to it
|
// Get a new surface with the new parameters, and blit the previous surface to it
|
||||||
Surface new_surface{std::make_shared<CachedSurface>(new_params)};
|
Surface new_surface{GetUncachedSurface(new_params)};
|
||||||
|
|
||||||
// If format is unchanged, we can do a faster blit without reinterpreting pixel data
|
// If format is unchanged, we can do a faster blit without reinterpreting pixel data
|
||||||
if (params.pixel_format == new_params.pixel_format) {
|
if (params.pixel_format == new_params.pixel_format) {
|
||||||
@ -826,92 +814,73 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
|
|||||||
return new_surface;
|
return new_surface;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto source_format = GetFormatTuple(params.pixel_format, params.component_type);
|
// When using accurate framebuffers, always copy old data to new surface, regardless of format
|
||||||
auto dest_format = GetFormatTuple(new_params.pixel_format, new_params.component_type);
|
if (Settings::values.use_accurate_framebuffers) {
|
||||||
|
auto source_format = GetFormatTuple(params.pixel_format, params.component_type);
|
||||||
|
auto dest_format = GetFormatTuple(new_params.pixel_format, new_params.component_type);
|
||||||
|
|
||||||
size_t buffer_size = std::max(params.SizeInBytes(), new_params.SizeInBytes());
|
size_t buffer_size = std::max(params.SizeInBytes(), new_params.SizeInBytes());
|
||||||
|
|
||||||
// Use a Pixel Buffer Object to download the previous texture and then upload it to the new one
|
// Use a Pixel Buffer Object to download the previous texture and then upload it to the new
|
||||||
// using the new format.
|
// one using the new format.
|
||||||
OGLBuffer pbo;
|
OGLBuffer pbo;
|
||||||
pbo.Create();
|
pbo.Create();
|
||||||
|
|
||||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo.handle);
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo.handle);
|
||||||
glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB);
|
glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB);
|
||||||
if (source_format.compressed) {
|
if (source_format.compressed) {
|
||||||
glGetCompressedTextureImage(surface->Texture().handle, 0,
|
glGetCompressedTextureImage(surface->Texture().handle, 0,
|
||||||
static_cast<GLsizei>(params.SizeInBytes()), nullptr);
|
static_cast<GLsizei>(params.SizeInBytes()), nullptr);
|
||||||
} else {
|
} else {
|
||||||
glGetTextureImage(surface->Texture().handle, 0, source_format.format, source_format.type,
|
glGetTextureImage(surface->Texture().handle, 0, source_format.format,
|
||||||
static_cast<GLsizei>(params.SizeInBytes()), nullptr);
|
source_format.type, static_cast<GLsizei>(params.SizeInBytes()),
|
||||||
}
|
nullptr);
|
||||||
// If the new texture is bigger than the previous one, we need to fill in the rest with data
|
|
||||||
// from the CPU.
|
|
||||||
if (params.SizeInBytes() < new_params.SizeInBytes()) {
|
|
||||||
// Upload the rest of the memory.
|
|
||||||
if (new_params.is_tiled) {
|
|
||||||
// TODO(Subv): We might have to de-tile the subtexture and re-tile it with the rest of
|
|
||||||
// the data in this case. Games like Super Mario Odyssey seem to hit this case when
|
|
||||||
// drawing, it re-uses the memory of a previous texture as a bigger framebuffer but it
|
|
||||||
// doesn't clear it beforehand, the texture is already full of zeros.
|
|
||||||
LOG_CRITICAL(HW_GPU, "Trying to upload extra texture data from the CPU during "
|
|
||||||
"reinterpretation but the texture is tiled.");
|
|
||||||
}
|
}
|
||||||
size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes();
|
// If the new texture is bigger than the previous one, we need to fill in the rest with data
|
||||||
auto address = Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(
|
// from the CPU.
|
||||||
new_params.addr + params.SizeInBytes());
|
if (params.SizeInBytes() < new_params.SizeInBytes()) {
|
||||||
std::vector<u8> data(remaining_size);
|
// Upload the rest of the memory.
|
||||||
Memory::ReadBlock(*address, data.data(), data.size());
|
if (new_params.is_tiled) {
|
||||||
glBufferSubData(GL_PIXEL_PACK_BUFFER, params.SizeInBytes(), remaining_size, data.data());
|
// TODO(Subv): We might have to de-tile the subtexture and re-tile it with the rest
|
||||||
|
// of the data in this case. Games like Super Mario Odyssey seem to hit this case
|
||||||
|
// when drawing, it re-uses the memory of a previous texture as a bigger framebuffer
|
||||||
|
// but it doesn't clear it beforehand, the texture is already full of zeros.
|
||||||
|
LOG_CRITICAL(HW_GPU, "Trying to upload extra texture data from the CPU during "
|
||||||
|
"reinterpretation but the texture is tiled.");
|
||||||
|
}
|
||||||
|
size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes();
|
||||||
|
std::vector<u8> data(remaining_size);
|
||||||
|
Memory::ReadBlock(new_params.addr + params.SizeInBytes(), data.data(), data.size());
|
||||||
|
glBufferSubData(GL_PIXEL_PACK_BUFFER, params.SizeInBytes(), remaining_size,
|
||||||
|
data.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
||||||
|
|
||||||
|
const auto& dest_rect{new_params.GetRect()};
|
||||||
|
|
||||||
|
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo.handle);
|
||||||
|
if (dest_format.compressed) {
|
||||||
|
glCompressedTexSubImage2D(
|
||||||
|
GL_TEXTURE_2D, 0, 0, 0, static_cast<GLsizei>(dest_rect.GetWidth()),
|
||||||
|
static_cast<GLsizei>(dest_rect.GetHeight()), dest_format.format,
|
||||||
|
static_cast<GLsizei>(new_params.SizeInBytes()), nullptr);
|
||||||
|
} else {
|
||||||
|
glTextureSubImage2D(new_surface->Texture().handle, 0, 0, 0,
|
||||||
|
static_cast<GLsizei>(dest_rect.GetWidth()),
|
||||||
|
static_cast<GLsizei>(dest_rect.GetHeight()), dest_format.format,
|
||||||
|
dest_format.type, nullptr);
|
||||||
|
}
|
||||||
|
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
|
||||||
|
|
||||||
|
pbo.Release();
|
||||||
}
|
}
|
||||||
|
|
||||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
|
||||||
|
|
||||||
const auto& dest_rect{new_params.GetRect()};
|
|
||||||
|
|
||||||
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo.handle);
|
|
||||||
if (dest_format.compressed) {
|
|
||||||
glCompressedTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
|
|
||||||
static_cast<GLsizei>(dest_rect.GetWidth()),
|
|
||||||
static_cast<GLsizei>(dest_rect.GetHeight()), dest_format.format,
|
|
||||||
static_cast<GLsizei>(new_params.SizeInBytes()), nullptr);
|
|
||||||
} else {
|
|
||||||
glTextureSubImage2D(new_surface->Texture().handle, 0, 0, 0,
|
|
||||||
static_cast<GLsizei>(dest_rect.GetWidth()),
|
|
||||||
static_cast<GLsizei>(dest_rect.GetHeight()), dest_format.format,
|
|
||||||
dest_format.type, nullptr);
|
|
||||||
}
|
|
||||||
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
|
|
||||||
|
|
||||||
pbo.Release();
|
|
||||||
|
|
||||||
return new_surface;
|
return new_surface;
|
||||||
}
|
}
|
||||||
|
|
||||||
Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr cpu_addr) const {
|
Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr addr) const {
|
||||||
// Tries to find the GPU address of a framebuffer based on the CPU address. This is because
|
return TryGet(addr);
|
||||||
// final output framebuffers are specified by CPU address, but internally our GPU cache uses
|
|
||||||
// GPU addresses. We iterate through all cached framebuffers, and compare their starting CPU
|
|
||||||
// address to the one provided. This is obviously not great, and won't work if the
|
|
||||||
// framebuffer overlaps surfaces.
|
|
||||||
|
|
||||||
std::vector<Surface> surfaces;
|
|
||||||
for (const auto& surface : GetCache()) {
|
|
||||||
const auto& params = surface.second->GetSurfaceParams();
|
|
||||||
const VAddr surface_cpu_addr = params.GetCpuAddr();
|
|
||||||
if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + params.size_in_bytes)) {
|
|
||||||
ASSERT_MSG(cpu_addr == surface_cpu_addr, "overlapping surfaces are unsupported");
|
|
||||||
surfaces.push_back(surface.second);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (surfaces.empty()) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT_MSG(surfaces.size() == 1, ">1 surface is unsupported");
|
|
||||||
|
|
||||||
return surfaces[0];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
|
void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
|
||||||
@ -923,7 +892,6 @@ Surface RasterizerCacheOpenGL::TryGetReservedSurface(const SurfaceParams& params
|
|||||||
const auto& surface_reserve_key{SurfaceReserveKey::Create(params)};
|
const auto& surface_reserve_key{SurfaceReserveKey::Create(params)};
|
||||||
auto search{surface_reserve.find(surface_reserve_key)};
|
auto search{surface_reserve.find(surface_reserve_key)};
|
||||||
if (search != surface_reserve.end()) {
|
if (search != surface_reserve.end()) {
|
||||||
Register(search->second);
|
|
||||||
return search->second;
|
return search->second;
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
|
@ -638,9 +638,6 @@ struct SurfaceParams {
|
|||||||
GetFormatBpp(pixel_format) / CHAR_BIT;
|
GetFormatBpp(pixel_format) / CHAR_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the CPU virtual address for this surface
|
|
||||||
VAddr GetCpuAddr() const;
|
|
||||||
|
|
||||||
/// Creates SurfaceParams from a texture configuration
|
/// Creates SurfaceParams from a texture configuration
|
||||||
static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config);
|
static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config);
|
||||||
|
|
||||||
@ -653,25 +650,13 @@ struct SurfaceParams {
|
|||||||
Tegra::GPUVAddr zeta_address,
|
Tegra::GPUVAddr zeta_address,
|
||||||
Tegra::DepthFormat format);
|
Tegra::DepthFormat format);
|
||||||
|
|
||||||
bool operator==(const SurfaceParams& other) const {
|
|
||||||
return std::tie(addr, is_tiled, block_height, pixel_format, component_type, type, width,
|
|
||||||
height, unaligned_height, size_in_bytes) ==
|
|
||||||
std::tie(other.addr, other.is_tiled, other.block_height, other.pixel_format,
|
|
||||||
other.component_type, other.type, other.width, other.height,
|
|
||||||
other.unaligned_height, other.size_in_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator!=(const SurfaceParams& other) const {
|
|
||||||
return !operator==(other);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if surfaces are compatible for caching
|
/// Checks if surfaces are compatible for caching
|
||||||
bool IsCompatibleSurface(const SurfaceParams& other) const {
|
bool IsCompatibleSurface(const SurfaceParams& other) const {
|
||||||
return std::tie(pixel_format, type, cache_width, cache_height) ==
|
return std::tie(pixel_format, type, cache_width, cache_height) ==
|
||||||
std::tie(other.pixel_format, other.type, other.cache_width, other.cache_height);
|
std::tie(other.pixel_format, other.type, other.cache_width, other.cache_height);
|
||||||
}
|
}
|
||||||
|
|
||||||
Tegra::GPUVAddr addr;
|
VAddr addr;
|
||||||
bool is_tiled;
|
bool is_tiled;
|
||||||
u32 block_height;
|
u32 block_height;
|
||||||
PixelFormat pixel_format;
|
PixelFormat pixel_format;
|
||||||
@ -712,7 +697,7 @@ class CachedSurface final {
|
|||||||
public:
|
public:
|
||||||
CachedSurface(const SurfaceParams& params);
|
CachedSurface(const SurfaceParams& params);
|
||||||
|
|
||||||
Tegra::GPUVAddr GetAddr() const {
|
VAddr GetAddr() const {
|
||||||
return params.addr;
|
return params.addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -763,13 +748,16 @@ public:
|
|||||||
/// Flushes the surface to Switch memory
|
/// Flushes the surface to Switch memory
|
||||||
void FlushSurface(const Surface& surface);
|
void FlushSurface(const Surface& surface);
|
||||||
|
|
||||||
/// Tries to find a framebuffer GPU address based on the provided CPU address
|
/// Tries to find a framebuffer using on the provided CPU address
|
||||||
Surface TryFindFramebufferSurface(VAddr cpu_addr) const;
|
Surface TryFindFramebufferSurface(VAddr addr) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void LoadSurface(const Surface& surface);
|
void LoadSurface(const Surface& surface);
|
||||||
Surface GetSurface(const SurfaceParams& params, bool preserve_contents = true);
|
Surface GetSurface(const SurfaceParams& params, bool preserve_contents = true);
|
||||||
|
|
||||||
|
/// Gets an uncached surface, creating it if need be
|
||||||
|
Surface GetUncachedSurface(const SurfaceParams& params);
|
||||||
|
|
||||||
/// Recreates a surface with new parameters
|
/// Recreates a surface with new parameters
|
||||||
Surface RecreateSurface(const Surface& surface, const SurfaceParams& new_params);
|
Surface RecreateSurface(const Surface& surface, const SurfaceParams& new_params);
|
||||||
|
|
||||||
|
@ -12,21 +12,17 @@
|
|||||||
namespace OpenGL {
|
namespace OpenGL {
|
||||||
|
|
||||||
/// Gets the address for the specified shader stage program
|
/// Gets the address for the specified shader stage program
|
||||||
static Tegra::GPUVAddr GetShaderAddress(Maxwell::ShaderProgram program) {
|
static VAddr GetShaderAddress(Maxwell::ShaderProgram program) {
|
||||||
auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
|
auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
|
||||||
auto& shader_config = gpu.regs.shader_config[static_cast<size_t>(program)];
|
auto& shader_config = gpu.regs.shader_config[static_cast<size_t>(program)];
|
||||||
|
return *gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() +
|
||||||
return gpu.regs.code_address.CodeAddress() + shader_config.offset;
|
shader_config.offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the shader program code from memory for the specified address
|
/// Gets the shader program code from memory for the specified address
|
||||||
static GLShader::ProgramCode GetShaderCode(Tegra::GPUVAddr addr) {
|
static GLShader::ProgramCode GetShaderCode(VAddr addr) {
|
||||||
auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
|
|
||||||
|
|
||||||
GLShader::ProgramCode program_code(GLShader::MAX_PROGRAM_CODE_LENGTH);
|
GLShader::ProgramCode program_code(GLShader::MAX_PROGRAM_CODE_LENGTH);
|
||||||
const boost::optional<VAddr> cpu_address{gpu.memory_manager.GpuToCpuAddress(addr)};
|
Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64));
|
||||||
Memory::ReadBlock(*cpu_address, program_code.data(), program_code.size() * sizeof(u64));
|
|
||||||
|
|
||||||
return program_code;
|
return program_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,7 +51,7 @@ static void SetShaderUniformBlockBindings(GLuint shader) {
|
|||||||
sizeof(GLShader::MaxwellUniformData));
|
sizeof(GLShader::MaxwellUniformData));
|
||||||
}
|
}
|
||||||
|
|
||||||
CachedShader::CachedShader(Tegra::GPUVAddr addr, Maxwell::ShaderProgram program_type)
|
CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type)
|
||||||
: addr{addr}, program_type{program_type}, setup{GetShaderCode(addr)} {
|
: addr{addr}, program_type{program_type}, setup{GetShaderCode(addr)} {
|
||||||
|
|
||||||
GLShader::ProgramResult program_result;
|
GLShader::ProgramResult program_result;
|
||||||
@ -113,7 +109,7 @@ GLint CachedShader::GetUniformLocation(const std::string& name) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
||||||
const Tegra::GPUVAddr program_addr{GetShaderAddress(program)};
|
const VAddr program_addr{GetShaderAddress(program)};
|
||||||
|
|
||||||
// Look up shader in the cache based on address
|
// Look up shader in the cache based on address
|
||||||
Shader shader{TryGet(program_addr)};
|
Shader shader{TryGet(program_addr)};
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/memory_manager.h"
|
|
||||||
#include "video_core/rasterizer_cache.h"
|
#include "video_core/rasterizer_cache.h"
|
||||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||||
@ -21,10 +20,10 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
|||||||
|
|
||||||
class CachedShader final {
|
class CachedShader final {
|
||||||
public:
|
public:
|
||||||
CachedShader(Tegra::GPUVAddr addr, Maxwell::ShaderProgram program_type);
|
CachedShader(VAddr addr, Maxwell::ShaderProgram program_type);
|
||||||
|
|
||||||
/// Gets the address of the shader in guest memory, required for cache management
|
/// Gets the address of the shader in guest memory, required for cache management
|
||||||
Tegra::GPUVAddr GetAddr() const {
|
VAddr GetAddr() const {
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,7 +49,7 @@ public:
|
|||||||
GLint GetUniformLocation(const std::string& name);
|
GLint GetUniformLocation(const std::string& name);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Tegra::GPUVAddr addr;
|
VAddr addr;
|
||||||
Maxwell::ShaderProgram program_type;
|
Maxwell::ShaderProgram program_type;
|
||||||
GLShader::ShaderSetup setup;
|
GLShader::ShaderSetup setup;
|
||||||
GLShader::ShaderEntries entries;
|
GLShader::ShaderEntries entries;
|
||||||
|
Loading…
Reference in New Issue
Block a user