Merge pull request #8784 from Docteh/nosnek
code: dodge PAGE_SIZE #define
This commit is contained in:
		| @@ -22,12 +22,3 @@ typedef void* HANDLE; | ||||
| #include <microprofile.h> | ||||
|  | ||||
| #define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0) | ||||
|  | ||||
| // On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with | ||||
| // identifiers we use. | ||||
| #ifdef PAGE_SIZE | ||||
| #undef PAGE_SIZE | ||||
| #endif | ||||
| #ifdef PAGE_MASK | ||||
| #undef PAGE_MASK | ||||
| #endif | ||||
|   | ||||
| @@ -190,8 +190,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* | ||||
|     config.callbacks = cb.get(); | ||||
|     config.coprocessors[15] = cp15; | ||||
|     config.define_unpredictable_behaviour = true; | ||||
|     static constexpr std::size_t PAGE_BITS = 12; | ||||
|     static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS); | ||||
|     static constexpr std::size_t YUZU_PAGEBITS = 12; | ||||
|     static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS); | ||||
|     if (page_table) { | ||||
|         config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( | ||||
|             page_table->pointers.data()); | ||||
|   | ||||
| @@ -14,7 +14,7 @@ namespace Loader { | ||||
|  | ||||
| namespace { | ||||
| constexpr u32 PageAlignSize(u32 size) { | ||||
|     return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); | ||||
|     return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK); | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
|   | ||||
| @@ -125,7 +125,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& nro_file) { | ||||
| } | ||||
|  | ||||
| static constexpr u32 PageAlignSize(u32 size) { | ||||
|     return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); | ||||
|     return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK); | ||||
| } | ||||
|  | ||||
| static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) { | ||||
|   | ||||
| @@ -45,7 +45,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, | ||||
| } | ||||
|  | ||||
| constexpr u32 PageAlignSize(u32 size) { | ||||
|     return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); | ||||
|     return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK); | ||||
| } | ||||
| } // Anonymous namespace | ||||
|  | ||||
|   | ||||
| @@ -36,10 +36,11 @@ struct Memory::Impl { | ||||
|     } | ||||
|  | ||||
|     void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { | ||||
|         ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | ||||
|         ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | ||||
|         ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); | ||||
|         ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); | ||||
|         ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); | ||||
|         MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); | ||||
|         MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, | ||||
|                  Common::PageType::Memory); | ||||
|  | ||||
|         if (Settings::IsFastmemEnabled()) { | ||||
|             system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); | ||||
| @@ -47,9 +48,10 @@ struct Memory::Impl { | ||||
|     } | ||||
|  | ||||
|     void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { | ||||
|         ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | ||||
|         ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | ||||
|         MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); | ||||
|         ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); | ||||
|         ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); | ||||
|         MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, | ||||
|                  Common::PageType::Unmapped); | ||||
|  | ||||
|         if (Settings::IsFastmemEnabled()) { | ||||
|             system.DeviceMemory().buffer.Unmap(base, size); | ||||
| @@ -57,7 +59,7 @@ struct Memory::Impl { | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { | ||||
|         const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; | ||||
|         const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; | ||||
|  | ||||
|         if (!paddr) { | ||||
|             return {}; | ||||
| @@ -67,7 +69,7 @@ struct Memory::Impl { | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { | ||||
|         const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; | ||||
|         const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; | ||||
|  | ||||
|         if (paddr == 0) { | ||||
|             return {}; | ||||
| @@ -176,13 +178,14 @@ struct Memory::Impl { | ||||
|                    auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { | ||||
|         const auto& page_table = process.PageTable().PageTableImpl(); | ||||
|         std::size_t remaining_size = size; | ||||
|         std::size_t page_index = addr >> PAGE_BITS; | ||||
|         std::size_t page_offset = addr & PAGE_MASK; | ||||
|         std::size_t page_index = addr >> YUZU_PAGEBITS; | ||||
|         std::size_t page_offset = addr & YUZU_PAGEMASK; | ||||
|  | ||||
|         while (remaining_size) { | ||||
|             const std::size_t copy_amount = | ||||
|                 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); | ||||
|             const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||||
|                 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size); | ||||
|             const auto current_vaddr = | ||||
|                 static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset); | ||||
|  | ||||
|             const auto [pointer, type] = page_table.pointers[page_index].PointerType(); | ||||
|             switch (type) { | ||||
| @@ -192,7 +195,7 @@ struct Memory::Impl { | ||||
|             } | ||||
|             case Common::PageType::Memory: { | ||||
|                 DEBUG_ASSERT(pointer); | ||||
|                 u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); | ||||
|                 u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); | ||||
|                 on_memory(copy_amount, mem_ptr); | ||||
|                 break; | ||||
|             } | ||||
| @@ -339,10 +342,10 @@ struct Memory::Impl { | ||||
|         // Iterate over a contiguous CPU address space, marking/unmarking the region. | ||||
|         // The region is at a granularity of CPU pages. | ||||
|  | ||||
|         const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; | ||||
|         for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { | ||||
|         const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1; | ||||
|         for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) { | ||||
|             const Common::PageType page_type{ | ||||
|                 current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; | ||||
|                 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()}; | ||||
|             if (debug) { | ||||
|                 // Switch page type to debug if now debug | ||||
|                 switch (page_type) { | ||||
| @@ -354,7 +357,7 @@ struct Memory::Impl { | ||||
|                     // Page is already marked. | ||||
|                     break; | ||||
|                 case Common::PageType::Memory: | ||||
|                     current_page_table->pointers[vaddr >> PAGE_BITS].Store( | ||||
|                     current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | ||||
|                         nullptr, Common::PageType::DebugMemory); | ||||
|                     break; | ||||
|                 default: | ||||
| @@ -371,9 +374,9 @@ struct Memory::Impl { | ||||
|                     // Don't mess with already non-debug or rasterizer memory. | ||||
|                     break; | ||||
|                 case Common::PageType::DebugMemory: { | ||||
|                     u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)}; | ||||
|                     current_page_table->pointers[vaddr >> PAGE_BITS].Store( | ||||
|                         pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); | ||||
|                     u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; | ||||
|                     current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | ||||
|                         pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | ||||
|                     break; | ||||
|                 } | ||||
|                 default: | ||||
| @@ -398,10 +401,10 @@ struct Memory::Impl { | ||||
|         // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size | ||||
|         // is different). This assumes the specified GPU address region is contiguous as well. | ||||
|  | ||||
|         const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; | ||||
|         for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { | ||||
|         const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1; | ||||
|         for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) { | ||||
|             const Common::PageType page_type{ | ||||
|                 current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; | ||||
|                 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()}; | ||||
|             if (cached) { | ||||
|                 // Switch page type to cached if now cached | ||||
|                 switch (page_type) { | ||||
| @@ -411,7 +414,7 @@ struct Memory::Impl { | ||||
|                     break; | ||||
|                 case Common::PageType::DebugMemory: | ||||
|                 case Common::PageType::Memory: | ||||
|                     current_page_table->pointers[vaddr >> PAGE_BITS].Store( | ||||
|                     current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | ||||
|                         nullptr, Common::PageType::RasterizerCachedMemory); | ||||
|                     break; | ||||
|                 case Common::PageType::RasterizerCachedMemory: | ||||
| @@ -434,16 +437,16 @@ struct Memory::Impl { | ||||
|                     // that this area is already unmarked as cached. | ||||
|                     break; | ||||
|                 case Common::PageType::RasterizerCachedMemory: { | ||||
|                     u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; | ||||
|                     u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)}; | ||||
|                     if (pointer == nullptr) { | ||||
|                         // It's possible that this function has been called while updating the | ||||
|                         // pagetable after unmapping a VMA. In that case the underlying VMA will no | ||||
|                         // longer exist, and we should just leave the pagetable entry blank. | ||||
|                         current_page_table->pointers[vaddr >> PAGE_BITS].Store( | ||||
|                         current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | ||||
|                             nullptr, Common::PageType::Unmapped); | ||||
|                     } else { | ||||
|                         current_page_table->pointers[vaddr >> PAGE_BITS].Store( | ||||
|                             pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); | ||||
|                         current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | ||||
|                             pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | ||||
|                     } | ||||
|                     break; | ||||
|                 } | ||||
| @@ -465,8 +468,8 @@ struct Memory::Impl { | ||||
|      */ | ||||
|     void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, | ||||
|                   Common::PageType type) { | ||||
|         LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, | ||||
|                   (base + size) * PAGE_SIZE); | ||||
|         LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE, | ||||
|                   (base + size) * YUZU_PAGESIZE); | ||||
|  | ||||
|         // During boot, current_page_table might not be set yet, in which case we need not flush | ||||
|         if (system.IsPoweredOn()) { | ||||
| @@ -474,7 +477,7 @@ struct Memory::Impl { | ||||
|             for (u64 i = 0; i < size; i++) { | ||||
|                 const auto page = base + i; | ||||
|                 if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { | ||||
|                     gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); | ||||
|                     gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @@ -485,7 +488,7 @@ struct Memory::Impl { | ||||
|  | ||||
|         if (!target) { | ||||
|             ASSERT_MSG(type != Common::PageType::Memory, | ||||
|                        "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE); | ||||
|                        "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); | ||||
|  | ||||
|             while (base != end) { | ||||
|                 page_table.pointers[base].Store(nullptr, type); | ||||
| @@ -496,14 +499,14 @@ struct Memory::Impl { | ||||
|         } else { | ||||
|             while (base != end) { | ||||
|                 page_table.pointers[base].Store( | ||||
|                     system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type); | ||||
|                 page_table.backing_addr[base] = target - (base << PAGE_BITS); | ||||
|                     system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); | ||||
|                 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); | ||||
|  | ||||
|                 ASSERT_MSG(page_table.pointers[base].Pointer(), | ||||
|                            "memory mapping base yield a nullptr within the table"); | ||||
|  | ||||
|                 base += 1; | ||||
|                 target += PAGE_SIZE; | ||||
|                 target += YUZU_PAGESIZE; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| @@ -518,7 +521,7 @@ struct Memory::Impl { | ||||
|         } | ||||
|  | ||||
|         // Avoid adding any extra logic to this fast-path block | ||||
|         const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); | ||||
|         const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); | ||||
|         if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | ||||
|             return &pointer[vaddr]; | ||||
|         } | ||||
| @@ -657,7 +660,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { | ||||
| bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { | ||||
|     const Kernel::KProcess& process = *system.CurrentProcess(); | ||||
|     const auto& page_table = process.PageTable().PageTableImpl(); | ||||
|     const size_t page = vaddr >> PAGE_BITS; | ||||
|     const size_t page = vaddr >> YUZU_PAGEBITS; | ||||
|     if (page >= page_table.pointers.size()) { | ||||
|         return false; | ||||
|     } | ||||
| @@ -668,9 +671,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { | ||||
|  | ||||
| bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { | ||||
|     VAddr end = base + size; | ||||
|     VAddr page = Common::AlignDown(base, PAGE_SIZE); | ||||
|     VAddr page = Common::AlignDown(base, YUZU_PAGESIZE); | ||||
|  | ||||
|     for (; page < end; page += PAGE_SIZE) { | ||||
|     for (; page < end; page += YUZU_PAGESIZE) { | ||||
|         if (!IsValidVirtualAddress(page)) { | ||||
|             return false; | ||||
|         } | ||||
|   | ||||
| @@ -27,9 +27,9 @@ namespace Core::Memory { | ||||
|  * Page size used by the ARM architecture. This is the smallest granularity with which memory can | ||||
|  * be mapped. | ||||
|  */ | ||||
| constexpr std::size_t PAGE_BITS = 12; | ||||
| constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; | ||||
| constexpr u64 PAGE_MASK = PAGE_SIZE - 1; | ||||
| constexpr std::size_t YUZU_PAGEBITS = 12; | ||||
| constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS; | ||||
| constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1; | ||||
|  | ||||
| /// Virtual user-space memory regions | ||||
| enum : VAddr { | ||||
|   | ||||
| @@ -22,8 +22,9 @@ constexpr VAddr c = 0x1328914000; | ||||
| class RasterizerInterface { | ||||
| public: | ||||
|     void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { | ||||
|         const u64 page_start{addr >> Core::Memory::PAGE_BITS}; | ||||
|         const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; | ||||
|         const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS}; | ||||
|         const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >> | ||||
|                            Core::Memory::YUZU_PAGEBITS}; | ||||
|         for (u64 page = page_start; page < page_end; ++page) { | ||||
|             int& value = page_table[page]; | ||||
|             value += delta; | ||||
| @@ -37,7 +38,7 @@ public: | ||||
|     } | ||||
|  | ||||
|     [[nodiscard]] int Count(VAddr addr) const noexcept { | ||||
|         const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS); | ||||
|         const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS); | ||||
|         return it == page_table.end() ? 0 : it->second; | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -36,7 +36,7 @@ struct NullBufferParams {}; | ||||
| template <class RasterizerInterface> | ||||
| class BufferBase { | ||||
|     static constexpr u64 PAGES_PER_WORD = 64; | ||||
|     static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE; | ||||
|     static constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE; | ||||
|     static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE; | ||||
|  | ||||
|     /// Vector tracking modified pages tightly packed with small vector optimization | ||||
|   | ||||
| @@ -60,8 +60,8 @@ class BufferCache { | ||||
|  | ||||
|     // Page size for caching purposes. | ||||
|     // This is unrelated to the CPU page size and it can be changed as it seems optimal. | ||||
|     static constexpr u32 PAGE_BITS = 16; | ||||
|     static constexpr u64 PAGE_SIZE = u64{1} << PAGE_BITS; | ||||
|     static constexpr u32 YUZU_PAGEBITS = 16; | ||||
|     static constexpr u64 YUZU_PAGESIZE = u64{1} << YUZU_PAGEBITS; | ||||
|  | ||||
|     static constexpr bool IS_OPENGL = P::IS_OPENGL; | ||||
|     static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = | ||||
| @@ -216,8 +216,8 @@ private: | ||||
|  | ||||
|     template <typename Func> | ||||
|     void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) { | ||||
|         const u64 page_end = Common::DivCeil(cpu_addr + size, PAGE_SIZE); | ||||
|         for (u64 page = cpu_addr >> PAGE_BITS; page < page_end;) { | ||||
|         const u64 page_end = Common::DivCeil(cpu_addr + size, YUZU_PAGESIZE); | ||||
|         for (u64 page = cpu_addr >> YUZU_PAGEBITS; page < page_end;) { | ||||
|             const BufferId buffer_id = page_table[page]; | ||||
|             if (!buffer_id) { | ||||
|                 ++page; | ||||
| @@ -227,7 +227,7 @@ private: | ||||
|             func(buffer_id, buffer); | ||||
|  | ||||
|             const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); | ||||
|             page = Common::DivCeil(end_addr, PAGE_SIZE); | ||||
|             page = Common::DivCeil(end_addr, YUZU_PAGESIZE); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -262,8 +262,8 @@ private: | ||||
|     } | ||||
|  | ||||
|     static bool IsRangeGranular(VAddr cpu_addr, size_t size) { | ||||
|         return (cpu_addr & ~Core::Memory::PAGE_MASK) == | ||||
|                ((cpu_addr + size) & ~Core::Memory::PAGE_MASK); | ||||
|         return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) == | ||||
|                ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK); | ||||
|     } | ||||
|  | ||||
|     void RunGarbageCollector(); | ||||
| @@ -439,7 +439,7 @@ private: | ||||
|     u64 minimum_memory = 0; | ||||
|     u64 critical_memory = 0; | ||||
|  | ||||
|     std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table; | ||||
|     std::array<BufferId, ((1ULL << 39) >> YUZU_PAGEBITS)> page_table; | ||||
| }; | ||||
|  | ||||
| template <class P> | ||||
| @@ -926,8 +926,8 @@ void BufferCache<P>::PopAsyncFlushes() {} | ||||
|  | ||||
| template <class P> | ||||
| bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) { | ||||
|     const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); | ||||
|     for (u64 page = addr >> PAGE_BITS; page < page_end;) { | ||||
|     const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); | ||||
|     for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) { | ||||
|         const BufferId image_id = page_table[page]; | ||||
|         if (!image_id) { | ||||
|             ++page; | ||||
| @@ -938,7 +938,7 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) { | ||||
|             return true; | ||||
|         } | ||||
|         const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); | ||||
|         page = Common::DivCeil(end_addr, PAGE_SIZE); | ||||
|         page = Common::DivCeil(end_addr, YUZU_PAGESIZE); | ||||
|     } | ||||
|     return false; | ||||
| } | ||||
| @@ -946,8 +946,8 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) { | ||||
| template <class P> | ||||
| bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) { | ||||
|     const VAddr end_addr = addr + size; | ||||
|     const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE); | ||||
|     for (u64 page = addr >> PAGE_BITS; page < page_end;) { | ||||
|     const u64 page_end = Common::DivCeil(end_addr, YUZU_PAGESIZE); | ||||
|     for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) { | ||||
|         const BufferId buffer_id = page_table[page]; | ||||
|         if (!buffer_id) { | ||||
|             ++page; | ||||
| @@ -959,15 +959,15 @@ bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) { | ||||
|         if (buf_start_addr < end_addr && addr < buf_end_addr) { | ||||
|             return true; | ||||
|         } | ||||
|         page = Common::DivCeil(end_addr, PAGE_SIZE); | ||||
|         page = Common::DivCeil(end_addr, YUZU_PAGESIZE); | ||||
|     } | ||||
|     return false; | ||||
| } | ||||
|  | ||||
| template <class P> | ||||
| bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) { | ||||
|     const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); | ||||
|     for (u64 page = addr >> PAGE_BITS; page < page_end;) { | ||||
|     const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); | ||||
|     for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) { | ||||
|         const BufferId image_id = page_table[page]; | ||||
|         if (!image_id) { | ||||
|             ++page; | ||||
| @@ -978,7 +978,7 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) { | ||||
|             return true; | ||||
|         } | ||||
|         const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); | ||||
|         page = Common::DivCeil(end_addr, PAGE_SIZE); | ||||
|         page = Common::DivCeil(end_addr, YUZU_PAGESIZE); | ||||
|     } | ||||
|     return false; | ||||
| } | ||||
| @@ -1472,7 +1472,7 @@ BufferId BufferCache<P>::FindBuffer(VAddr cpu_addr, u32 size) { | ||||
|     if (cpu_addr == 0) { | ||||
|         return NULL_BUFFER_ID; | ||||
|     } | ||||
|     const u64 page = cpu_addr >> PAGE_BITS; | ||||
|     const u64 page = cpu_addr >> YUZU_PAGEBITS; | ||||
|     const BufferId buffer_id = page_table[page]; | ||||
|     if (!buffer_id) { | ||||
|         return CreateBuffer(cpu_addr, size); | ||||
| @@ -1493,8 +1493,9 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu | ||||
|     VAddr end = cpu_addr + wanted_size; | ||||
|     int stream_score = 0; | ||||
|     bool has_stream_leap = false; | ||||
|     for (; cpu_addr >> PAGE_BITS < Common::DivCeil(end, PAGE_SIZE); cpu_addr += PAGE_SIZE) { | ||||
|         const BufferId overlap_id = page_table[cpu_addr >> PAGE_BITS]; | ||||
|     for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE); | ||||
|          cpu_addr += YUZU_PAGESIZE) { | ||||
|         const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS]; | ||||
|         if (!overlap_id) { | ||||
|             continue; | ||||
|         } | ||||
| @@ -1520,11 +1521,11 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu | ||||
|             // as a stream buffer. Increase the size to skip constantly recreating buffers. | ||||
|             has_stream_leap = true; | ||||
|             if (expands_right) { | ||||
|                 begin -= PAGE_SIZE * 256; | ||||
|                 begin -= YUZU_PAGESIZE * 256; | ||||
|                 cpu_addr = begin; | ||||
|             } | ||||
|             if (expands_left) { | ||||
|                 end += PAGE_SIZE * 256; | ||||
|                 end += YUZU_PAGESIZE * 256; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| @@ -1598,8 +1599,8 @@ void BufferCache<P>::ChangeRegister(BufferId buffer_id) { | ||||
|     } | ||||
|     const VAddr cpu_addr_begin = buffer.CpuAddr(); | ||||
|     const VAddr cpu_addr_end = cpu_addr_begin + size; | ||||
|     const u64 page_begin = cpu_addr_begin / PAGE_SIZE; | ||||
|     const u64 page_end = Common::DivCeil(cpu_addr_end, PAGE_SIZE); | ||||
|     const u64 page_begin = cpu_addr_begin / YUZU_PAGESIZE; | ||||
|     const u64 page_end = Common::DivCeil(cpu_addr_end, YUZU_PAGESIZE); | ||||
|     for (u64 page = page_begin; page != page_end; ++page) { | ||||
|         if constexpr (insert) { | ||||
|             page_table[page] = buffer_id; | ||||
|   | ||||
| @@ -369,8 +369,8 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { | ||||
|     if (!cpu_addr) { | ||||
|         return false; | ||||
|     } | ||||
|     const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size}; | ||||
|     return page <= Core::Memory::PAGE_SIZE; | ||||
|     const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; | ||||
|     return page <= Core::Memory::YUZU_PAGESIZE; | ||||
| } | ||||
|  | ||||
| bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { | ||||
|   | ||||
| @@ -214,8 +214,8 @@ private: | ||||
|             return cache_begin < addr_end && addr_begin < cache_end; | ||||
|         }; | ||||
|  | ||||
|         const u64 page_end = addr_end >> PAGE_BITS; | ||||
|         for (u64 page = addr_begin >> PAGE_BITS; page <= page_end; ++page) { | ||||
|         const u64 page_end = addr_end >> YUZU_PAGEBITS; | ||||
|         for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) { | ||||
|             const auto& it = cached_queries.find(page); | ||||
|             if (it == std::end(cached_queries)) { | ||||
|                 continue; | ||||
| @@ -235,14 +235,14 @@ private: | ||||
|     /// Registers the passed parameters as cached and returns a pointer to the stored cached query. | ||||
|     CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) { | ||||
|         rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1); | ||||
|         const u64 page = static_cast<u64>(cpu_addr) >> PAGE_BITS; | ||||
|         const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS; | ||||
|         return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr, | ||||
|                                                   host_ptr); | ||||
|     } | ||||
|  | ||||
|     /// Tries to a get a cached query. Returns nullptr on failure. | ||||
|     CachedQuery* TryGet(VAddr addr) { | ||||
|         const u64 page = static_cast<u64>(addr) >> PAGE_BITS; | ||||
|         const u64 page = static_cast<u64>(addr) >> YUZU_PAGEBITS; | ||||
|         const auto it = cached_queries.find(page); | ||||
|         if (it == std::end(cached_queries)) { | ||||
|             return nullptr; | ||||
| @@ -260,8 +260,8 @@ private: | ||||
|         uncommitted_flushes->push_back(addr); | ||||
|     } | ||||
|  | ||||
|     static constexpr std::uintptr_t PAGE_SIZE = 4096; | ||||
|     static constexpr unsigned PAGE_BITS = 12; | ||||
|     static constexpr std::uintptr_t YUZU_PAGESIZE = 4096; | ||||
|     static constexpr unsigned YUZU_PAGEBITS = 12; | ||||
|  | ||||
|     VideoCore::RasterizerInterface& rasterizer; | ||||
|     Tegra::Engines::Maxwell3D& maxwell3d; | ||||
|   | ||||
| @@ -24,8 +24,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del | ||||
|     u64 cache_bytes = 0; | ||||
|  | ||||
|     std::atomic_thread_fence(std::memory_order_acquire); | ||||
|     const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); | ||||
|     for (u64 page = addr >> PAGE_BITS; page != page_end; ++page) { | ||||
|     const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); | ||||
|     for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) { | ||||
|         std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page); | ||||
|  | ||||
|         if (delta > 0) { | ||||
| @@ -44,26 +44,27 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del | ||||
|             if (uncache_bytes == 0) { | ||||
|                 uncache_begin = page; | ||||
|             } | ||||
|             uncache_bytes += PAGE_SIZE; | ||||
|             uncache_bytes += YUZU_PAGESIZE; | ||||
|         } else if (uncache_bytes > 0) { | ||||
|             cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); | ||||
|             cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, | ||||
|                                                   false); | ||||
|             uncache_bytes = 0; | ||||
|         } | ||||
|         if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { | ||||
|             if (cache_bytes == 0) { | ||||
|                 cache_begin = page; | ||||
|             } | ||||
|             cache_bytes += PAGE_SIZE; | ||||
|             cache_bytes += YUZU_PAGESIZE; | ||||
|         } else if (cache_bytes > 0) { | ||||
|             cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); | ||||
|             cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); | ||||
|             cache_bytes = 0; | ||||
|         } | ||||
|     } | ||||
|     if (uncache_bytes > 0) { | ||||
|         cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); | ||||
|         cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false); | ||||
|     } | ||||
|     if (cache_bytes > 0) { | ||||
|         cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); | ||||
|         cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); | ||||
|     } | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -123,8 +123,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t | ||||
|     const VAddr addr_end = addr + size; | ||||
|     Entry* const entry = NewEntry(addr, addr_end, data.get()); | ||||
|  | ||||
|     const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; | ||||
|     for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { | ||||
|     const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS; | ||||
|     for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) { | ||||
|         invalidation_cache[page].push_back(entry); | ||||
|     } | ||||
|  | ||||
| @@ -135,8 +135,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t | ||||
|  | ||||
| void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { | ||||
|     const VAddr addr_end = addr + size; | ||||
|     const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; | ||||
|     for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { | ||||
|     const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS; | ||||
|     for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) { | ||||
|         auto it = invalidation_cache.find(page); | ||||
|         if (it == invalidation_cache.end()) { | ||||
|             continue; | ||||
| @@ -189,8 +189,8 @@ void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr | ||||
| } | ||||
|  | ||||
| void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) { | ||||
|     const u64 page_end = (entry->addr_end + PAGE_SIZE - 1) >> PAGE_BITS; | ||||
|     for (u64 page = entry->addr_start >> PAGE_BITS; page < page_end; ++page) { | ||||
|     const u64 page_end = (entry->addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS; | ||||
|     for (u64 page = entry->addr_start >> YUZU_PAGEBITS; page < page_end; ++page) { | ||||
|         const auto entries_it = invalidation_cache.find(page); | ||||
|         ASSERT(entries_it != invalidation_cache.end()); | ||||
|         std::vector<Entry*>& entries = entries_it->second; | ||||
|   | ||||
| @@ -29,8 +29,8 @@ struct ShaderInfo { | ||||
| }; | ||||
|  | ||||
| class ShaderCache { | ||||
|     static constexpr u64 PAGE_BITS = 14; | ||||
|     static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS; | ||||
|     static constexpr u64 YUZU_PAGEBITS = 14; | ||||
|     static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; | ||||
|  | ||||
|     static constexpr size_t NUM_PROGRAMS = 6; | ||||
|  | ||||
|   | ||||
| @@ -589,7 +589,7 @@ void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, | ||||
| template <class P> | ||||
| typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) { | ||||
|     // TODO: Properly implement this | ||||
|     const auto it = page_table.find(cpu_addr >> PAGE_BITS); | ||||
|     const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS); | ||||
|     if (it == page_table.end()) { | ||||
|         return nullptr; | ||||
|     } | ||||
| @@ -1485,14 +1485,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { | ||||
|             std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) { | ||||
|             const auto page_it = selected_page_table.find(page); | ||||
|             if (page_it == selected_page_table.end()) { | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); | ||||
|                 return; | ||||
|             } | ||||
|             std::vector<ImageId>& image_ids = page_it->second; | ||||
|             const auto vector_it = std::ranges::find(image_ids, image_id); | ||||
|             if (vector_it == image_ids.end()) { | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", | ||||
|                            page << PAGE_BITS); | ||||
|                            page << YUZU_PAGEBITS); | ||||
|                 return; | ||||
|             } | ||||
|             image_ids.erase(vector_it); | ||||
| @@ -1504,14 +1504,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { | ||||
|         ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { | ||||
|             const auto page_it = page_table.find(page); | ||||
|             if (page_it == page_table.end()) { | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); | ||||
|                 return; | ||||
|             } | ||||
|             std::vector<ImageMapId>& image_map_ids = page_it->second; | ||||
|             const auto vector_it = std::ranges::find(image_map_ids, map_id); | ||||
|             if (vector_it == image_map_ids.end()) { | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", | ||||
|                            page << PAGE_BITS); | ||||
|                            page << YUZU_PAGEBITS); | ||||
|                 return; | ||||
|             } | ||||
|             image_map_ids.erase(vector_it); | ||||
| @@ -1532,7 +1532,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) { | ||||
|         ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) { | ||||
|             const auto page_it = page_table.find(page); | ||||
|             if (page_it == page_table.end()) { | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); | ||||
|                 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); | ||||
|                 return; | ||||
|             } | ||||
|             std::vector<ImageMapId>& image_map_ids = page_it->second; | ||||
|   | ||||
| @@ -47,7 +47,7 @@ struct ImageViewInOut { | ||||
| template <class P> | ||||
| class TextureCache { | ||||
|     /// Address shift for caching images into a hash table | ||||
|     static constexpr u64 PAGE_BITS = 20; | ||||
|     static constexpr u64 YUZU_PAGEBITS = 20; | ||||
|  | ||||
|     /// Enables debugging features to the texture cache | ||||
|     static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION; | ||||
| @@ -178,8 +178,8 @@ private: | ||||
|     template <typename Func> | ||||
|     static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) { | ||||
|         static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>; | ||||
|         const u64 page_end = (addr + size - 1) >> PAGE_BITS; | ||||
|         for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { | ||||
|         const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS; | ||||
|         for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) { | ||||
|             if constexpr (RETURNS_BOOL) { | ||||
|                 if (func(page)) { | ||||
|                     break; | ||||
| @@ -193,8 +193,8 @@ private: | ||||
|     template <typename Func> | ||||
|     static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) { | ||||
|         static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>; | ||||
|         const u64 page_end = (addr + size - 1) >> PAGE_BITS; | ||||
|         for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { | ||||
|         const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS; | ||||
|         for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) { | ||||
|             if constexpr (RETURNS_BOOL) { | ||||
|                 if (func(page)) { | ||||
|                     break; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 liamwhite
					liamwhite