mirror of
https://github.com/citra-emu/citra.git
synced 2024-12-19 22:41:05 +00:00
Memory: move memory chunk into pImpl and make them dynamically allocated
Otherwise MSVC would give out-of-memory error on compile time
This commit is contained in:
parent
ac1cda21c3
commit
7e8ba6ed8e
@ -188,10 +188,10 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
|
|||||||
u32 interval_size = interval.upper() - interval.lower();
|
u32 interval_size = interval.upper() - interval.lower();
|
||||||
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
|
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
|
||||||
interval.upper());
|
interval.upper());
|
||||||
std::fill(kernel.memory.fcram.begin() + interval.lower(),
|
std::fill(kernel.memory.GetFCRAMPointer(interval.lower()),
|
||||||
kernel.memory.fcram.begin() + interval.upper(), 0);
|
kernel.memory.GetFCRAMPointer(interval.upper()), 0);
|
||||||
auto vma = vm_manager.MapBackingMemory(interval_target,
|
auto vma = vm_manager.MapBackingMemory(interval_target,
|
||||||
kernel.memory.fcram.data() + interval.lower(),
|
kernel.memory.GetFCRAMPointer(interval.lower()),
|
||||||
interval_size, memory_state);
|
interval_size, memory_state);
|
||||||
ASSERT(vma.Succeeded());
|
ASSERT(vma.Succeeded());
|
||||||
vm_manager.Reprotect(vma.Unwrap(), perms);
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
||||||
@ -263,7 +263,7 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* backing_memory = kernel.memory.fcram.data() + physical_offset;
|
u8* backing_memory = kernel.memory.GetFCRAMPointer(physical_offset);
|
||||||
|
|
||||||
std::fill(backing_memory, backing_memory + size, 0);
|
std::fill(backing_memory, backing_memory + size, 0);
|
||||||
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
||||||
|
@ -43,8 +43,8 @@ ResultVal<SharedPtr<SharedMemory>> KernelSystem::CreateSharedMemory(
|
|||||||
|
|
||||||
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
|
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
|
||||||
|
|
||||||
std::fill(memory.fcram.data() + *offset, memory.fcram.data() + *offset + size, 0);
|
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
|
||||||
shared_memory->backing_blocks = {{memory.fcram.data() + *offset, size}};
|
shared_memory->backing_blocks = {{memory.GetFCRAMPointer(*offset), size}};
|
||||||
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
|
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
|
||||||
shared_memory->linear_heap_phys_offset = *offset;
|
shared_memory->linear_heap_phys_offset = *offset;
|
||||||
|
|
||||||
@ -86,8 +86,8 @@ SharedPtr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
|
|||||||
shared_memory->other_permissions = other_permissions;
|
shared_memory->other_permissions = other_permissions;
|
||||||
for (const auto& interval : backing_blocks) {
|
for (const auto& interval : backing_blocks) {
|
||||||
shared_memory->backing_blocks.push_back(
|
shared_memory->backing_blocks.push_back(
|
||||||
{memory.fcram.data() + interval.lower(), interval.upper() - interval.lower()});
|
{memory.GetFCRAMPointer(interval.lower()), interval.upper() - interval.lower()});
|
||||||
std::fill(memory.fcram.data() + interval.lower(), memory.fcram.data() + interval.upper(),
|
std::fill(memory.GetFCRAMPointer(interval.lower()), memory.GetFCRAMPointer(interval.upper()),
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
shared_memory->base_address = Memory::HEAP_VADDR + offset;
|
shared_memory->base_address = Memory::HEAP_VADDR + offset;
|
||||||
|
@ -355,7 +355,7 @@ ResultVal<SharedPtr<Thread>> KernelSystem::CreateThread(std::string name, VAddr
|
|||||||
|
|
||||||
// Map the page to the current process' address space.
|
// Map the page to the current process' address space.
|
||||||
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
|
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
|
||||||
memory.fcram.data() + *offset, Memory::PAGE_SIZE,
|
memory.GetFCRAMPointer(*offset), Memory::PAGE_SIZE,
|
||||||
MemoryState::Locked);
|
MemoryState::Locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,15 +21,34 @@
|
|||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
|
|
||||||
|
class MemorySystem::Impl {
|
||||||
|
public:
|
||||||
|
Impl() {
|
||||||
|
std::fill(fcram.get(), fcram.get() + Memory::FCRAM_N3DS_SIZE, 0);
|
||||||
|
std::fill(vram.get(), vram.get() + Memory::VRAM_SIZE, 0);
|
||||||
|
std::fill(n3ds_extra_ram.get(), n3ds_extra_ram.get() + Memory::N3DS_EXTRA_RAM_SIZE, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visual Studio would try to allocate these on compile time if they are std::array, which would exceed the memory limit.
|
||||||
|
std::unique_ptr<u8[]> fcram = std::make_unique<u8[]>(Memory::FCRAM_N3DS_SIZE);
|
||||||
|
std::unique_ptr<u8[]> vram = std::make_unique<u8[]>(Memory::VRAM_SIZE);
|
||||||
|
std::unique_ptr<u8[]> n3ds_extra_ram = std::make_unique<u8[]>(Memory::N3DS_EXTRA_RAM_SIZE);
|
||||||
|
|
||||||
|
PageTable* current_page_table = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {}
|
||||||
|
MemorySystem::~MemorySystem() = default;
|
||||||
|
|
||||||
void MemorySystem::SetCurrentPageTable(PageTable* page_table) {
|
void MemorySystem::SetCurrentPageTable(PageTable* page_table) {
|
||||||
current_page_table = page_table;
|
impl->current_page_table = page_table;
|
||||||
if (Core::System::GetInstance().IsPoweredOn()) {
|
if (Core::System::GetInstance().IsPoweredOn()) {
|
||||||
Core::CPU().PageTableChanged();
|
Core::CPU().PageTableChanged();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
PageTable* MemorySystem::GetCurrentPageTable() const {
|
PageTable* MemorySystem::GetCurrentPageTable() const {
|
||||||
return current_page_table;
|
return impl->current_page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
|
static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
|
||||||
@ -74,13 +93,13 @@ void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
|
|||||||
|
|
||||||
u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
||||||
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||||
return fcram.data() + (addr - LINEAR_HEAP_VADDR);
|
return impl->fcram.get() + (addr - LINEAR_HEAP_VADDR);
|
||||||
}
|
}
|
||||||
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
||||||
return fcram.data() + (addr - NEW_LINEAR_HEAP_VADDR);
|
return impl->fcram.get() + (addr - NEW_LINEAR_HEAP_VADDR);
|
||||||
}
|
}
|
||||||
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
||||||
return vram.data() + (addr - VRAM_VADDR);
|
return impl->vram.get() + (addr - VRAM_VADDR);
|
||||||
}
|
}
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -103,7 +122,7 @@ T ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr);
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T MemorySystem::Read(const VAddr vaddr) {
|
T MemorySystem::Read(const VAddr vaddr) {
|
||||||
const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
|
const u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||||
if (page_pointer) {
|
if (page_pointer) {
|
||||||
// NOTE: Avoid adding any extra logic to this fast-path block
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||||
T value;
|
T value;
|
||||||
@ -114,7 +133,7 @@ T MemorySystem::Read(const VAddr vaddr) {
|
|||||||
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
|
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
|
||||||
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
||||||
|
|
||||||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case PageType::Unmapped:
|
case PageType::Unmapped:
|
||||||
LOG_ERROR(HW_Memory, "unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
LOG_ERROR(HW_Memory, "unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
||||||
@ -130,7 +149,7 @@ T MemorySystem::Read(const VAddr vaddr) {
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
case PageType::Special:
|
case PageType::Special:
|
||||||
return ReadMMIO<T>(GetMMIOHandler(*current_page_table, vaddr), vaddr);
|
return ReadMMIO<T>(GetMMIOHandler(*impl->current_page_table, vaddr), vaddr);
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -141,7 +160,7 @@ void WriteMMIO(MMIORegionPointer mmio_handler, VAddr addr, const T data);
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void MemorySystem::Write(const VAddr vaddr, const T data) {
|
void MemorySystem::Write(const VAddr vaddr, const T data) {
|
||||||
u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
|
u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||||
if (page_pointer) {
|
if (page_pointer) {
|
||||||
// NOTE: Avoid adding any extra logic to this fast-path block
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||||
std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
|
std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
|
||||||
@ -151,7 +170,7 @@ void MemorySystem::Write(const VAddr vaddr, const T data) {
|
|||||||
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
|
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
|
||||||
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
||||||
|
|
||||||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case PageType::Unmapped:
|
case PageType::Unmapped:
|
||||||
LOG_ERROR(HW_Memory, "unmapped Write{} 0x{:08X} @ 0x{:08X}", sizeof(data) * 8, (u32)data,
|
LOG_ERROR(HW_Memory, "unmapped Write{} 0x{:08X} @ 0x{:08X}", sizeof(data) * 8, (u32)data,
|
||||||
@ -166,7 +185,7 @@ void MemorySystem::Write(const VAddr vaddr, const T data) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Special:
|
case PageType::Special:
|
||||||
WriteMMIO<T>(GetMMIOHandler(*current_page_table, vaddr), vaddr, data);
|
WriteMMIO<T>(GetMMIOHandler(*impl->current_page_table, vaddr), vaddr, data);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -199,12 +218,12 @@ bool MemorySystem::IsValidPhysicalAddress(const PAddr paddr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
||||||
u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
|
u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
||||||
if (page_pointer) {
|
if (page_pointer) {
|
||||||
return page_pointer + (vaddr & PAGE_MASK);
|
return page_pointer + (vaddr & PAGE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) {
|
if (impl->current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) {
|
||||||
return GetPointerForRasterizerCache(vaddr);
|
return GetPointerForRasterizerCache(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,16 +275,16 @@ u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
|||||||
u8* target_pointer = nullptr;
|
u8* target_pointer = nullptr;
|
||||||
switch (area->paddr_base) {
|
switch (area->paddr_base) {
|
||||||
case VRAM_PADDR:
|
case VRAM_PADDR:
|
||||||
target_pointer = vram.data() + offset_into_region;
|
target_pointer = impl->vram.get() + offset_into_region;
|
||||||
break;
|
break;
|
||||||
case DSP_RAM_PADDR:
|
case DSP_RAM_PADDR:
|
||||||
target_pointer = Core::DSP().GetDspMemory().data() + offset_into_region;
|
target_pointer = Core::DSP().GetDspMemory().data() + offset_into_region;
|
||||||
break;
|
break;
|
||||||
case FCRAM_PADDR:
|
case FCRAM_PADDR:
|
||||||
target_pointer = fcram.data() + offset_into_region;
|
target_pointer = impl->fcram.get() + offset_into_region;
|
||||||
break;
|
break;
|
||||||
case N3DS_EXTRA_RAM_PADDR:
|
case N3DS_EXTRA_RAM_PADDR:
|
||||||
target_pointer = n3ds_extra_ram.data() + offset_into_region;
|
target_pointer = impl->n3ds_extra_ram.get() + offset_into_region;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -303,7 +322,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
|||||||
|
|
||||||
for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
|
for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
|
||||||
for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) {
|
for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) {
|
||||||
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
PageType& page_type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||||
|
|
||||||
if (cached) {
|
if (cached) {
|
||||||
// Switch page type to cached if now cached
|
// Switch page type to cached if now cached
|
||||||
@ -314,7 +333,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
|||||||
break;
|
break;
|
||||||
case PageType::Memory:
|
case PageType::Memory:
|
||||||
page_type = PageType::RasterizerCachedMemory;
|
page_type = PageType::RasterizerCachedMemory;
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
|
impl->current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -328,7 +347,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
|||||||
break;
|
break;
|
||||||
case PageType::RasterizerCachedMemory: {
|
case PageType::RasterizerCachedMemory: {
|
||||||
page_type = PageType::Memory;
|
page_type = PageType::Memory;
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS] =
|
impl->current_page_table->pointers[vaddr >> PAGE_BITS] =
|
||||||
GetPointerForRasterizerCache(vaddr & ~PAGE_MASK);
|
GetPointerForRasterizerCache(vaddr & ~PAGE_MASK);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -730,8 +749,13 @@ void WriteMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr, const u64 data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
u32 MemorySystem::GetFCRAMOffset(u8* pointer) {
|
u32 MemorySystem::GetFCRAMOffset(u8* pointer) {
|
||||||
ASSERT(pointer >= fcram.data() && pointer < fcram.data() + fcram.size());
|
ASSERT(pointer >= impl->fcram.get() && pointer <= impl->fcram.get() + Memory::FCRAM_N3DS_SIZE);
|
||||||
return pointer - fcram.data();
|
return pointer - impl->fcram.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* MemorySystem::GetFCRAMPointer(u32 offset) {
|
||||||
|
ASSERT(offset <= Memory::FCRAM_N3DS_SIZE);
|
||||||
|
return impl->fcram.get() + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Memory
|
} // namespace Memory
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
@ -210,6 +211,9 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode);
|
|||||||
|
|
||||||
class MemorySystem {
|
class MemorySystem {
|
||||||
public:
|
public:
|
||||||
|
MemorySystem();
|
||||||
|
~MemorySystem();
|
||||||
|
|
||||||
/// Currently active page table
|
/// Currently active page table
|
||||||
void SetCurrentPageTable(PageTable* page_table);
|
void SetCurrentPageTable(PageTable* page_table);
|
||||||
PageTable* GetCurrentPageTable() const;
|
PageTable* GetCurrentPageTable() const;
|
||||||
@ -248,13 +252,14 @@ public:
|
|||||||
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
||||||
u32 GetFCRAMOffset(u8* pointer);
|
u32 GetFCRAMOffset(u8* pointer);
|
||||||
|
|
||||||
|
/// Gets pointer in FCRAM with given offset
|
||||||
|
u8* GetFCRAMPointer(u32 offset);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Mark each page touching the region as cached.
|
* Mark each page touching the region as cached.
|
||||||
*/
|
*/
|
||||||
void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached);
|
void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached);
|
||||||
|
|
||||||
std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram{};
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T Read(const VAddr vaddr);
|
T Read(const VAddr vaddr);
|
||||||
@ -270,10 +275,9 @@ private:
|
|||||||
*/
|
*/
|
||||||
u8* GetPointerForRasterizerCache(VAddr addr);
|
u8* GetPointerForRasterizerCache(VAddr addr);
|
||||||
|
|
||||||
std::array<u8, Memory::VRAM_SIZE> vram{};
|
class Impl;
|
||||||
std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram{};
|
|
||||||
|
|
||||||
PageTable* current_page_table = nullptr;
|
std::unique_ptr<Impl> impl;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Determines if the given VAddr is valid for the specified process.
|
/// Determines if the given VAddr is valid for the specified process.
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
class EmuWindow;
|
class EmuWindow;
|
||||||
class RendererBase;
|
class RendererBase;
|
||||||
|
|
||||||
namespace Memory{
|
namespace Memory {
|
||||||
class MemorySystem;
|
class MemorySystem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user