mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-15 13:30:05 +00:00
kernel: optimize page free on shutdown
This commit is contained in:
parent
f90a022d3a
commit
2f0b57ca13
@ -28,14 +28,14 @@ Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
||||||
HostUnmapCallback&& host_unmap_callback) {
|
BlockCallback&& block_callback) {
|
||||||
// Erase every block until we have none left.
|
// Erase every block until we have none left.
|
||||||
auto it = m_memory_block_tree.begin();
|
auto it = m_memory_block_tree.begin();
|
||||||
while (it != m_memory_block_tree.end()) {
|
while (it != m_memory_block_tree.end()) {
|
||||||
KMemoryBlock* block = std::addressof(*it);
|
KMemoryBlock* block = std::addressof(*it);
|
||||||
it = m_memory_block_tree.erase(it);
|
it = m_memory_block_tree.erase(it);
|
||||||
|
block_callback(block->GetAddress(), block->GetSize());
|
||||||
slab_manager->Free(block);
|
slab_manager->Free(block);
|
||||||
host_unmap_callback(block->GetAddress(), block->GetSize());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(m_memory_block_tree.empty());
|
ASSERT(m_memory_block_tree.empty());
|
||||||
|
@ -85,11 +85,11 @@ public:
|
|||||||
public:
|
public:
|
||||||
KMemoryBlockManager();
|
KMemoryBlockManager();
|
||||||
|
|
||||||
using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
|
using BlockCallback = std::function<void(Common::ProcessAddress, u64)>;
|
||||||
|
|
||||||
Result Initialize(KProcessAddress st, KProcessAddress nd,
|
Result Initialize(KProcessAddress st, KProcessAddress nd,
|
||||||
KMemoryBlockSlabManager* slab_manager);
|
KMemoryBlockSlabManager* slab_manager);
|
||||||
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
|
void Finalize(KMemoryBlockSlabManager* slab_manager, BlockCallback&& block_callback);
|
||||||
|
|
||||||
iterator end() {
|
iterator end() {
|
||||||
return m_memory_block_tree.end();
|
return m_memory_block_tree.end();
|
||||||
|
@ -435,69 +435,14 @@ Result KPageTableBase::FinalizeProcess() {
|
|||||||
// Only process tables should be finalized.
|
// Only process tables should be finalized.
|
||||||
ASSERT(!this->IsKernel());
|
ASSERT(!this->IsKernel());
|
||||||
|
|
||||||
// HLE processes don't have memory mapped.
|
|
||||||
R_SUCCEED_IF(m_impl == nullptr);
|
|
||||||
|
|
||||||
// NOTE: Here Nintendo calls an unknown OnFinalize function.
|
// NOTE: Here Nintendo calls an unknown OnFinalize function.
|
||||||
// this->OnFinalize();
|
// this->OnFinalize();
|
||||||
|
|
||||||
// NOTE: Here Nintendo calls a second unknown OnFinalize function.
|
// NOTE: Here Nintendo calls a second unknown OnFinalize function.
|
||||||
// this->OnFinalize2();
|
// this->OnFinalize2();
|
||||||
|
|
||||||
// Get implementation objects.
|
// NOTE: Here Nintendo does a page table walk to discover heap pages to free.
|
||||||
auto& impl = this->GetImpl();
|
// We will use the block manager finalization below to free them.
|
||||||
auto& mm = m_kernel.MemoryManager();
|
|
||||||
|
|
||||||
// Traverse, freeing all pages.
|
|
||||||
{
|
|
||||||
// Get the address space size.
|
|
||||||
const size_t as_size = this->GetAddressSpaceSize();
|
|
||||||
|
|
||||||
// Begin the traversal.
|
|
||||||
TraversalContext context;
|
|
||||||
TraversalEntry cur_entry = {
|
|
||||||
.phys_addr = 0,
|
|
||||||
.block_size = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
bool cur_valid = false;
|
|
||||||
TraversalEntry next_entry;
|
|
||||||
bool next_valid;
|
|
||||||
size_t tot_size = 0;
|
|
||||||
|
|
||||||
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context),
|
|
||||||
this->GetAddressSpaceStart());
|
|
||||||
|
|
||||||
// Iterate over entries.
|
|
||||||
while (true) {
|
|
||||||
if ((!next_valid && !cur_valid) ||
|
|
||||||
(next_valid && cur_valid &&
|
|
||||||
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
|
|
||||||
cur_entry.block_size += next_entry.block_size;
|
|
||||||
} else {
|
|
||||||
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
|
|
||||||
mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update tracking variables.
|
|
||||||
tot_size += cur_entry.block_size;
|
|
||||||
cur_entry = next_entry;
|
|
||||||
cur_valid = next_valid;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cur_entry.block_size + tot_size >= as_size) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
next_valid =
|
|
||||||
impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle the last block.
|
|
||||||
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
|
|
||||||
mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
@ -505,14 +450,24 @@ Result KPageTableBase::FinalizeProcess() {
|
|||||||
void KPageTableBase::Finalize() {
|
void KPageTableBase::Finalize() {
|
||||||
this->FinalizeProcess();
|
this->FinalizeProcess();
|
||||||
|
|
||||||
auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
|
auto BlockCallback = [&](KProcessAddress addr, u64 size) {
|
||||||
if (m_impl->fastmem_arena) {
|
if (m_impl->fastmem_arena) {
|
||||||
m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
|
m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get physical pages.
|
||||||
|
KPageGroup pg(m_kernel, m_block_info_manager);
|
||||||
|
this->MakePageGroup(pg, addr, size / PageSize);
|
||||||
|
|
||||||
|
// Free the pages.
|
||||||
|
pg.CloseAndReset();
|
||||||
};
|
};
|
||||||
|
|
||||||
// Finalize memory blocks.
|
// Finalize memory blocks.
|
||||||
m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
|
{
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(BlockCallback));
|
||||||
|
}
|
||||||
|
|
||||||
// Free any unsafe mapped memory.
|
// Free any unsafe mapped memory.
|
||||||
if (m_mapped_unsafe_physical_memory) {
|
if (m_mapped_unsafe_physical_memory) {
|
||||||
|
Loading…
Reference in New Issue
Block a user