mirror of
https://github.com/citra-emu/citra.git
synced 2024-11-25 13:20:14 +00:00
memory: Rework backing memory implementation
This commit is contained in:
parent
7b3c89b8bb
commit
d30ed62608
@ -81,9 +81,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) = 0;
|
virtual void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) = 0;
|
||||||
|
|
||||||
/// Returns a reference to the array backing DSP memory
|
|
||||||
virtual std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory() = 0;
|
|
||||||
|
|
||||||
/// Sets the dsp class that we trigger interrupts for
|
/// Sets the dsp class that we trigger interrupts for
|
||||||
virtual void SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) = 0;
|
virtual void SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) = 0;
|
||||||
|
|
||||||
|
@ -62,8 +62,6 @@ public:
|
|||||||
std::size_t GetPipeReadableSize(DspPipe pipe_number) const;
|
std::size_t GetPipeReadableSize(DspPipe pipe_number) const;
|
||||||
void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer);
|
void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer);
|
||||||
|
|
||||||
std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory();
|
|
||||||
|
|
||||||
void SetServiceToInterrupt(std::weak_ptr<DSP_DSP> dsp);
|
void SetServiceToInterrupt(std::weak_ptr<DSP_DSP> dsp);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -82,7 +80,7 @@ private:
|
|||||||
DspState dsp_state = DspState::Off;
|
DspState dsp_state = DspState::Off;
|
||||||
std::array<std::vector<u8>, num_dsp_pipe> pipe_data{};
|
std::array<std::vector<u8>, num_dsp_pipe> pipe_data{};
|
||||||
|
|
||||||
HLE::DspMemory dsp_memory;
|
HLE::DspMemory& dsp_memory;
|
||||||
std::array<HLE::Source, HLE::num_sources> sources{{
|
std::array<HLE::Source, HLE::num_sources> sources{{
|
||||||
HLE::Source(0), HLE::Source(1), HLE::Source(2), HLE::Source(3), HLE::Source(4),
|
HLE::Source(0), HLE::Source(1), HLE::Source(2), HLE::Source(3), HLE::Source(4),
|
||||||
HLE::Source(5), HLE::Source(6), HLE::Source(7), HLE::Source(8), HLE::Source(9),
|
HLE::Source(5), HLE::Source(6), HLE::Source(7), HLE::Source(8), HLE::Source(9),
|
||||||
@ -103,7 +101,6 @@ private:
|
|||||||
void serialize(Archive& ar, const unsigned int) {
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
ar& dsp_state;
|
ar& dsp_state;
|
||||||
ar& pipe_data;
|
ar& pipe_data;
|
||||||
ar& dsp_memory.raw_memory;
|
|
||||||
ar& sources;
|
ar& sources;
|
||||||
ar& mixers;
|
ar& mixers;
|
||||||
ar& dsp_dsp;
|
ar& dsp_dsp;
|
||||||
@ -111,7 +108,8 @@ private:
|
|||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
|
|
||||||
DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory) : parent(parent_) {
|
DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory)
|
||||||
|
: dsp_memory(*reinterpret_cast<HLE::DspMemory*>(memory.GetDspMemory())), parent(parent_) {
|
||||||
dsp_memory.raw_memory.fill(0);
|
dsp_memory.raw_memory.fill(0);
|
||||||
|
|
||||||
for (auto& source : sources) {
|
for (auto& source : sources) {
|
||||||
@ -309,10 +307,6 @@ void DspHle::Impl::PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::array<u8, Memory::DSP_RAM_SIZE>& DspHle::Impl::GetDspMemory() {
|
|
||||||
return dsp_memory.raw_memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
void DspHle::Impl::SetServiceToInterrupt(std::weak_ptr<DSP_DSP> dsp) {
|
void DspHle::Impl::SetServiceToInterrupt(std::weak_ptr<DSP_DSP> dsp) {
|
||||||
dsp_dsp = std::move(dsp);
|
dsp_dsp = std::move(dsp);
|
||||||
}
|
}
|
||||||
@ -478,10 +472,6 @@ void DspHle::PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) {
|
|||||||
impl->PipeWrite(pipe_number, buffer);
|
impl->PipeWrite(pipe_number, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::array<u8, Memory::DSP_RAM_SIZE>& DspHle::GetDspMemory() {
|
|
||||||
return impl->GetDspMemory();
|
|
||||||
}
|
|
||||||
|
|
||||||
void DspHle::SetServiceToInterrupt(std::weak_ptr<DSP_DSP> dsp) {
|
void DspHle::SetServiceToInterrupt(std::weak_ptr<DSP_DSP> dsp) {
|
||||||
impl->SetServiceToInterrupt(std::move(dsp));
|
impl->SetServiceToInterrupt(std::move(dsp));
|
||||||
}
|
}
|
||||||
|
@ -32,8 +32,6 @@ public:
|
|||||||
std::size_t GetPipeReadableSize(DspPipe pipe_number) const override;
|
std::size_t GetPipeReadableSize(DspPipe pipe_number) const override;
|
||||||
void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) override;
|
void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) override;
|
||||||
|
|
||||||
std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory() override;
|
|
||||||
|
|
||||||
void SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) override;
|
void SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) override;
|
||||||
|
|
||||||
void LoadComponent(const std::vector<u8>& buffer) override;
|
void LoadComponent(const std::vector<u8>& buffer) override;
|
||||||
|
@ -402,10 +402,6 @@ void DspLle::PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) {
|
|||||||
impl->WritePipe(static_cast<u8>(pipe_number), buffer);
|
impl->WritePipe(static_cast<u8>(pipe_number), buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::array<u8, Memory::DSP_RAM_SIZE>& DspLle::GetDspMemory() {
|
|
||||||
return impl->teakra.GetDspMemory();
|
|
||||||
}
|
|
||||||
|
|
||||||
void DspLle::SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) {
|
void DspLle::SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) {
|
||||||
impl->teakra.SetRecvDataHandler(0, [this, dsp]() {
|
impl->teakra.SetRecvDataHandler(0, [this, dsp]() {
|
||||||
if (!impl->loaded)
|
if (!impl->loaded)
|
||||||
|
@ -20,8 +20,6 @@ public:
|
|||||||
std::size_t GetPipeReadableSize(DspPipe pipe_number) const override;
|
std::size_t GetPipeReadableSize(DspPipe pipe_number) const override;
|
||||||
void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) override;
|
void PipeWrite(DspPipe pipe_number, const std::vector<u8>& buffer) override;
|
||||||
|
|
||||||
std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory() override;
|
|
||||||
|
|
||||||
void SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) override;
|
void SetServiceToInterrupt(std::weak_ptr<Service::DSP::DSP_DSP> dsp) override;
|
||||||
|
|
||||||
void LoadComponent(const std::vector<u8>& buffer) override;
|
void LoadComponent(const std::vector<u8>& buffer) override;
|
||||||
|
@ -80,8 +80,6 @@ add_library(common STATIC
|
|||||||
logging/text_formatter.cpp
|
logging/text_formatter.cpp
|
||||||
logging/text_formatter.h
|
logging/text_formatter.h
|
||||||
math_util.h
|
math_util.h
|
||||||
memory_ref.h
|
|
||||||
memory_ref.cpp
|
|
||||||
microprofile.cpp
|
microprofile.cpp
|
||||||
microprofile.h
|
microprofile.h
|
||||||
microprofileui.h
|
microprofileui.h
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
// Copyright 2020 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#include "common/archives.h"
|
|
||||||
#include "common/memory_ref.h"
|
|
||||||
|
|
||||||
SERIALIZE_EXPORT_IMPL(BufferMem)
|
|
@ -1,136 +0,0 @@
|
|||||||
// Copyright 2020 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <vector>
|
|
||||||
#include <boost/serialization/export.hpp>
|
|
||||||
#include <boost/serialization/shared_ptr.hpp>
|
|
||||||
#include <boost/serialization/vector.hpp>
|
|
||||||
#include "common/assert.h"
|
|
||||||
#include "common/common_types.h"
|
|
||||||
|
|
||||||
/// Abstract host-side memory - for example a static buffer, or local vector
|
|
||||||
class BackingMem {
|
|
||||||
public:
|
|
||||||
virtual ~BackingMem() = default;
|
|
||||||
virtual u8* GetPtr() = 0;
|
|
||||||
virtual const u8* GetPtr() const = 0;
|
|
||||||
virtual std::size_t GetSize() const = 0;
|
|
||||||
|
|
||||||
private:
|
|
||||||
template <class Archive>
|
|
||||||
void serialize(Archive& ar, const unsigned int) {}
|
|
||||||
friend class boost::serialization::access;
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Backing memory implemented by a local buffer
|
|
||||||
class BufferMem : public BackingMem {
|
|
||||||
public:
|
|
||||||
BufferMem() = default;
|
|
||||||
explicit BufferMem(std::size_t size) : data(size) {}
|
|
||||||
|
|
||||||
u8* GetPtr() override {
|
|
||||||
return data.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
const u8* GetPtr() const override {
|
|
||||||
return data.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::size_t GetSize() const override {
|
|
||||||
return data.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<u8>& Vector() {
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::vector<u8>& Vector() const {
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::vector<u8> data;
|
|
||||||
|
|
||||||
template <class Archive>
|
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
|
||||||
ar& boost::serialization::base_object<BackingMem>(*this);
|
|
||||||
ar& data;
|
|
||||||
}
|
|
||||||
friend class boost::serialization::access;
|
|
||||||
};
|
|
||||||
|
|
||||||
BOOST_CLASS_EXPORT_KEY(BufferMem);
|
|
||||||
|
|
||||||
/// A managed reference to host-side memory. Fast enough to be used everywhere instead of u8*
|
|
||||||
/// Supports serialization.
|
|
||||||
class MemoryRef {
|
|
||||||
public:
|
|
||||||
MemoryRef() = default;
|
|
||||||
MemoryRef(std::nullptr_t) {}
|
|
||||||
MemoryRef(std::shared_ptr<BackingMem> backing_mem_)
|
|
||||||
: backing_mem(std::move(backing_mem_)), offset(0) {
|
|
||||||
Init();
|
|
||||||
}
|
|
||||||
MemoryRef(std::shared_ptr<BackingMem> backing_mem_, u64 offset_)
|
|
||||||
: backing_mem(std::move(backing_mem_)), offset(offset_) {
|
|
||||||
ASSERT(offset < backing_mem->GetSize());
|
|
||||||
Init();
|
|
||||||
}
|
|
||||||
explicit operator bool() const {
|
|
||||||
return cptr != nullptr;
|
|
||||||
}
|
|
||||||
operator u8*() {
|
|
||||||
return cptr;
|
|
||||||
}
|
|
||||||
u8* GetPtr() {
|
|
||||||
return cptr;
|
|
||||||
}
|
|
||||||
operator const u8*() const {
|
|
||||||
return cptr;
|
|
||||||
}
|
|
||||||
const u8* GetPtr() const {
|
|
||||||
return cptr;
|
|
||||||
}
|
|
||||||
std::size_t GetSize() const {
|
|
||||||
return csize;
|
|
||||||
}
|
|
||||||
MemoryRef& operator+=(u32 offset_by) {
|
|
||||||
ASSERT(offset_by < csize);
|
|
||||||
offset += offset_by;
|
|
||||||
Init();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
MemoryRef operator+(u32 offset_by) const {
|
|
||||||
ASSERT(offset_by < csize);
|
|
||||||
return MemoryRef(backing_mem, offset + offset_by);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::shared_ptr<BackingMem> backing_mem{};
|
|
||||||
u64 offset{};
|
|
||||||
// Cached values for speed
|
|
||||||
u8* cptr{};
|
|
||||||
std::size_t csize{};
|
|
||||||
|
|
||||||
void Init() {
|
|
||||||
if (backing_mem) {
|
|
||||||
cptr = backing_mem->GetPtr() + offset;
|
|
||||||
csize = static_cast<std::size_t>(backing_mem->GetSize() - offset);
|
|
||||||
} else {
|
|
||||||
cptr = nullptr;
|
|
||||||
csize = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class Archive>
|
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
|
||||||
ar& backing_mem;
|
|
||||||
ar& offset;
|
|
||||||
Init();
|
|
||||||
}
|
|
||||||
friend class boost::serialization::access;
|
|
||||||
};
|
|
@ -2,7 +2,10 @@ add_library(core STATIC
|
|||||||
3ds.h
|
3ds.h
|
||||||
announce_multiplayer_session.cpp
|
announce_multiplayer_session.cpp
|
||||||
announce_multiplayer_session.h
|
announce_multiplayer_session.h
|
||||||
|
backing_memory_manager.h
|
||||||
|
backing_memory_manager_generic.cpp
|
||||||
arm/arm_interface.h
|
arm/arm_interface.h
|
||||||
|
arm/arm_thread_context.h
|
||||||
arm/dyncom/arm_dyncom.cpp
|
arm/dyncom/arm_dyncom.cpp
|
||||||
arm/dyncom/arm_dyncom.h
|
arm/dyncom/arm_dyncom.h
|
||||||
arm/dyncom/arm_dyncom_dec.cpp
|
arm/dyncom/arm_dyncom_dec.cpp
|
||||||
@ -438,6 +441,8 @@ add_library(core STATIC
|
|||||||
loader/smdh.h
|
loader/smdh.h
|
||||||
memory.cpp
|
memory.cpp
|
||||||
memory.h
|
memory.h
|
||||||
|
memory_constants.h
|
||||||
|
memory_ref.h
|
||||||
mmio.h
|
mmio.h
|
||||||
movie.cpp
|
movie.cpp
|
||||||
movie.h
|
movie.h
|
||||||
|
@ -10,13 +10,15 @@
|
|||||||
#include <boost/serialization/split_member.hpp>
|
#include <boost/serialization/split_member.hpp>
|
||||||
#include <boost/serialization/version.hpp>
|
#include <boost/serialization/version.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "core/arm/arm_thread_context.h"
|
||||||
#include "core/arm/skyeye_common/arm_regformat.h"
|
#include "core/arm/skyeye_common/arm_regformat.h"
|
||||||
#include "core/arm/skyeye_common/vfp/asm_vfp.h"
|
#include "core/arm/skyeye_common/vfp/asm_vfp.h"
|
||||||
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
struct PageTable;
|
class PageTable;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Generic ARM11 CPU interface
|
/// Generic ARM11 CPU interface
|
||||||
@ -26,86 +28,6 @@ public:
|
|||||||
: timer(timer), id(id){};
|
: timer(timer), id(id){};
|
||||||
virtual ~ARM_Interface() {}
|
virtual ~ARM_Interface() {}
|
||||||
|
|
||||||
class ThreadContext {
|
|
||||||
friend class boost::serialization::access;
|
|
||||||
|
|
||||||
template <class Archive>
|
|
||||||
void save(Archive& ar, const unsigned int file_version) const {
|
|
||||||
for (std::size_t i = 0; i < 16; i++) {
|
|
||||||
const auto r = GetCpuRegister(i);
|
|
||||||
ar << r;
|
|
||||||
}
|
|
||||||
std::size_t fpu_reg_count = file_version == 0 ? 16 : 64;
|
|
||||||
for (std::size_t i = 0; i < fpu_reg_count; i++) {
|
|
||||||
const auto r = GetFpuRegister(i);
|
|
||||||
ar << r;
|
|
||||||
}
|
|
||||||
const auto r1 = GetCpsr();
|
|
||||||
ar << r1;
|
|
||||||
const auto r2 = GetFpscr();
|
|
||||||
ar << r2;
|
|
||||||
const auto r3 = GetFpexc();
|
|
||||||
ar << r3;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class Archive>
|
|
||||||
void load(Archive& ar, const unsigned int file_version) {
|
|
||||||
u32 r;
|
|
||||||
for (std::size_t i = 0; i < 16; i++) {
|
|
||||||
ar >> r;
|
|
||||||
SetCpuRegister(i, r);
|
|
||||||
}
|
|
||||||
std::size_t fpu_reg_count = file_version == 0 ? 16 : 64;
|
|
||||||
for (std::size_t i = 0; i < fpu_reg_count; i++) {
|
|
||||||
ar >> r;
|
|
||||||
SetFpuRegister(i, r);
|
|
||||||
}
|
|
||||||
ar >> r;
|
|
||||||
SetCpsr(r);
|
|
||||||
ar >> r;
|
|
||||||
SetFpscr(r);
|
|
||||||
ar >> r;
|
|
||||||
SetFpexc(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
|
||||||
public:
|
|
||||||
virtual ~ThreadContext() = default;
|
|
||||||
|
|
||||||
virtual void Reset() = 0;
|
|
||||||
virtual u32 GetCpuRegister(std::size_t index) const = 0;
|
|
||||||
virtual void SetCpuRegister(std::size_t index, u32 value) = 0;
|
|
||||||
virtual u32 GetCpsr() const = 0;
|
|
||||||
virtual void SetCpsr(u32 value) = 0;
|
|
||||||
virtual u32 GetFpuRegister(std::size_t index) const = 0;
|
|
||||||
virtual void SetFpuRegister(std::size_t index, u32 value) = 0;
|
|
||||||
virtual u32 GetFpscr() const = 0;
|
|
||||||
virtual void SetFpscr(u32 value) = 0;
|
|
||||||
virtual u32 GetFpexc() const = 0;
|
|
||||||
virtual void SetFpexc(u32 value) = 0;
|
|
||||||
|
|
||||||
u32 GetStackPointer() const {
|
|
||||||
return GetCpuRegister(13);
|
|
||||||
}
|
|
||||||
void SetStackPointer(u32 value) {
|
|
||||||
return SetCpuRegister(13, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GetLinkRegister() const {
|
|
||||||
return GetCpuRegister(14);
|
|
||||||
}
|
|
||||||
void SetLinkRegister(u32 value) {
|
|
||||||
return SetCpuRegister(14, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GetProgramCounter() const {
|
|
||||||
return GetCpuRegister(15);
|
|
||||||
}
|
|
||||||
void SetProgramCounter(u32 value) {
|
|
||||||
return SetCpuRegister(15, value);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Runs the CPU until an event happens
|
/// Runs the CPU until an event happens
|
||||||
virtual void Run() = 0;
|
virtual void Run() = 0;
|
||||||
|
|
||||||
@ -249,10 +171,11 @@ private:
|
|||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void save(Archive& ar, const unsigned int file_version) const {
|
void save(Archive& ar, const unsigned int file_version) const {
|
||||||
|
const size_t page_table_index = Core::System::GetInstance().Memory().SerializePageTable(GetPageTable());
|
||||||
|
ar << page_table_index;
|
||||||
|
|
||||||
ar << timer;
|
ar << timer;
|
||||||
ar << id;
|
ar << id;
|
||||||
const auto page_table = GetPageTable();
|
|
||||||
ar << page_table;
|
|
||||||
for (int i = 0; i < 15; i++) {
|
for (int i = 0; i < 15; i++) {
|
||||||
const auto r = GetReg(i);
|
const auto r = GetReg(i);
|
||||||
ar << r;
|
ar << r;
|
||||||
@ -275,11 +198,13 @@ private:
|
|||||||
template <class Archive>
|
template <class Archive>
|
||||||
void load(Archive& ar, const unsigned int file_version) {
|
void load(Archive& ar, const unsigned int file_version) {
|
||||||
PurgeState();
|
PurgeState();
|
||||||
|
|
||||||
|
size_t page_table_index;
|
||||||
|
ar >> page_table_index;
|
||||||
|
SetPageTable(Core::System::GetInstance().Memory().UnserializePageTable(page_table_index));
|
||||||
|
|
||||||
ar >> timer;
|
ar >> timer;
|
||||||
ar >> id;
|
ar >> id;
|
||||||
std::shared_ptr<Memory::PageTable> page_table{};
|
|
||||||
ar >> page_table;
|
|
||||||
SetPageTable(page_table);
|
|
||||||
u32 r;
|
u32 r;
|
||||||
for (int i = 0; i < 15; i++) {
|
for (int i = 0; i < 15; i++) {
|
||||||
ar >> r;
|
ar >> r;
|
||||||
@ -308,4 +233,3 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
BOOST_CLASS_VERSION(ARM_Interface, 1)
|
BOOST_CLASS_VERSION(ARM_Interface, 1)
|
||||||
BOOST_CLASS_VERSION(ARM_Interface::ThreadContext, 1)
|
|
||||||
|
92
src/core/arm/arm_thread_context.h
Normal file
92
src/core/arm/arm_thread_context.h
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
// Copyright 2014 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <boost/serialization/split_member.hpp>
|
||||||
|
#include <boost/serialization/version.hpp>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
class ThreadContext {
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void save(Archive& ar, const unsigned int file_version) const {
|
||||||
|
for (std::size_t i = 0; i < 16; i++) {
|
||||||
|
const auto r = GetCpuRegister(i);
|
||||||
|
ar << r;
|
||||||
|
}
|
||||||
|
std::size_t fpu_reg_count = file_version == 0 ? 16 : 64;
|
||||||
|
for (std::size_t i = 0; i < fpu_reg_count; i++) {
|
||||||
|
const auto r = GetFpuRegister(i);
|
||||||
|
ar << r;
|
||||||
|
}
|
||||||
|
const auto r1 = GetCpsr();
|
||||||
|
ar << r1;
|
||||||
|
const auto r2 = GetFpscr();
|
||||||
|
ar << r2;
|
||||||
|
const auto r3 = GetFpexc();
|
||||||
|
ar << r3;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load(Archive& ar, const unsigned int file_version) {
|
||||||
|
u32 r;
|
||||||
|
for (std::size_t i = 0; i < 16; i++) {
|
||||||
|
ar >> r;
|
||||||
|
SetCpuRegister(i, r);
|
||||||
|
}
|
||||||
|
std::size_t fpu_reg_count = file_version == 0 ? 16 : 64;
|
||||||
|
for (std::size_t i = 0; i < fpu_reg_count; i++) {
|
||||||
|
ar >> r;
|
||||||
|
SetFpuRegister(i, r);
|
||||||
|
}
|
||||||
|
ar >> r;
|
||||||
|
SetCpsr(r);
|
||||||
|
ar >> r;
|
||||||
|
SetFpscr(r);
|
||||||
|
ar >> r;
|
||||||
|
SetFpexc(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
||||||
|
public:
|
||||||
|
virtual ~ThreadContext() = default;
|
||||||
|
|
||||||
|
virtual void Reset() = 0;
|
||||||
|
virtual u32 GetCpuRegister(std::size_t index) const = 0;
|
||||||
|
virtual void SetCpuRegister(std::size_t index, u32 value) = 0;
|
||||||
|
virtual u32 GetCpsr() const = 0;
|
||||||
|
virtual void SetCpsr(u32 value) = 0;
|
||||||
|
virtual u32 GetFpuRegister(std::size_t index) const = 0;
|
||||||
|
virtual void SetFpuRegister(std::size_t index, u32 value) = 0;
|
||||||
|
virtual u32 GetFpscr() const = 0;
|
||||||
|
virtual void SetFpscr(u32 value) = 0;
|
||||||
|
virtual u32 GetFpexc() const = 0;
|
||||||
|
virtual void SetFpexc(u32 value) = 0;
|
||||||
|
|
||||||
|
u32 GetStackPointer() const {
|
||||||
|
return GetCpuRegister(13);
|
||||||
|
}
|
||||||
|
void SetStackPointer(u32 value) {
|
||||||
|
return SetCpuRegister(13, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetLinkRegister() const {
|
||||||
|
return GetCpuRegister(14);
|
||||||
|
}
|
||||||
|
void SetLinkRegister(u32 value) {
|
||||||
|
return SetCpuRegister(14, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetProgramCounter() const {
|
||||||
|
return GetCpuRegister(15);
|
||||||
|
}
|
||||||
|
void SetProgramCounter(u32 value) {
|
||||||
|
return SetCpuRegister(15, value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
BOOST_CLASS_VERSION(ThreadContext, 1)
|
@ -15,7 +15,7 @@
|
|||||||
#include "core/hle/kernel/svc.h"
|
#include "core/hle/kernel/svc.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
class DynarmicThreadContext final : public ARM_Interface::ThreadContext {
|
class DynarmicThreadContext final : public ThreadContext {
|
||||||
public:
|
public:
|
||||||
DynarmicThreadContext() {
|
DynarmicThreadContext() {
|
||||||
Reset();
|
Reset();
|
||||||
@ -251,7 +251,7 @@ void ARM_Dynarmic::SetCP15Register(CP15Register reg, u32 value) {
|
|||||||
UNREACHABLE_MSG("Unknown CP15 register: {}", static_cast<size_t>(reg));
|
UNREACHABLE_MSG("Unknown CP15 register: {}", static_cast<size_t>(reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ARM_Interface::ThreadContext> ARM_Dynarmic::NewContext() const {
|
std::unique_ptr<ThreadContext> ARM_Dynarmic::NewContext() const {
|
||||||
return std::make_unique<DynarmicThreadContext>();
|
return std::make_unique<DynarmicThreadContext>();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,7 +321,9 @@ void ARM_Dynarmic::ServeBreak() {
|
|||||||
std::unique_ptr<Dynarmic::A32::Jit> ARM_Dynarmic::MakeJit() {
|
std::unique_ptr<Dynarmic::A32::Jit> ARM_Dynarmic::MakeJit() {
|
||||||
Dynarmic::A32::UserConfig config;
|
Dynarmic::A32::UserConfig config;
|
||||||
config.callbacks = cb.get();
|
config.callbacks = cb.get();
|
||||||
config.page_table = ¤t_page_table->GetPointerArray();
|
if (current_page_table) {
|
||||||
|
config.page_table = current_page_table->GetRawPageTables();
|
||||||
|
}
|
||||||
config.coprocessors[15] = std::make_shared<DynarmicCP15>(cp15_state);
|
config.coprocessors[15] = std::make_shared<DynarmicCP15>(cp15_state);
|
||||||
config.define_unpredictable_behaviour = true;
|
config.define_unpredictable_behaviour = true;
|
||||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
|
#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
|
||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
struct PageTable;
|
class PageTable;
|
||||||
class MemorySystem;
|
class MemorySystem;
|
||||||
} // namespace Memory
|
} // namespace Memory
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
|
|
||||||
class DynComThreadContext final : public ARM_Interface::ThreadContext {
|
class DynComThreadContext final : public ThreadContext {
|
||||||
public:
|
public:
|
||||||
DynComThreadContext() {
|
DynComThreadContext() {
|
||||||
Reset();
|
Reset();
|
||||||
@ -162,7 +162,7 @@ void ARM_DynCom::ExecuteInstructions(u64 num_instructions) {
|
|||||||
state->ServeBreak();
|
state->ServeBreak();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ARM_Interface::ThreadContext> ARM_DynCom::NewContext() const {
|
std::unique_ptr<ThreadContext> ARM_DynCom::NewContext() const {
|
||||||
return std::make_unique<DynComThreadContext>();
|
return std::make_unique<DynComThreadContext>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/arm/dyncom/arm_dyncom_dec.h"
|
#include "core/arm/dyncom/arm_dyncom_dec.h"
|
||||||
#include "core/arm/dyncom/arm_dyncom_interpreter.h"
|
#include "core/arm/dyncom/arm_dyncom_interpreter.h"
|
||||||
#include "core/arm/dyncom/arm_dyncom_run.h"
|
#include "core/arm/dyncom/arm_dyncom_run.h"
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/arm/skyeye_common/armstate.h"
|
#include "core/arm/skyeye_common/armstate.h"
|
||||||
#include "core/arm/skyeye_common/vfp/vfp.h"
|
#include "core/arm/skyeye_common/vfp/vfp.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
113
src/core/backing_memory_manager.h
Normal file
113
src/core/backing_memory_manager.h
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
// Copyright 2020 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <memory>
|
||||||
|
#include <boost/serialization/access.hpp>
|
||||||
|
#include <boost/serialization/split_member.hpp>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/memory_constants.h"
|
||||||
|
#include "core/memory_ref.h"
|
||||||
|
|
||||||
|
namespace Memory {
|
||||||
|
|
||||||
|
class BackingMemoryManager;
|
||||||
|
class PageTable;
|
||||||
|
class MemorySystem;
|
||||||
|
|
||||||
|
class BackingMemory final {
|
||||||
|
public:
|
||||||
|
BackingMemory(std::shared_ptr<BackingMemoryManager> manager_, u8* pointer_, MemoryRef ref_, size_t size_);
|
||||||
|
~BackingMemory();
|
||||||
|
|
||||||
|
u8* Get() const {
|
||||||
|
return pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRef GetRef() const {
|
||||||
|
return ref;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSize() const {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::shared_ptr<BackingMemoryManager> manager;
|
||||||
|
u8* pointer;
|
||||||
|
MemoryRef ref;
|
||||||
|
size_t size;
|
||||||
|
};
|
||||||
|
|
||||||
|
class FastmemRegion final {
|
||||||
|
public:
|
||||||
|
FastmemRegion();
|
||||||
|
FastmemRegion(std::shared_ptr<BackingMemoryManager> manager_, u8* pointer_);
|
||||||
|
~FastmemRegion();
|
||||||
|
|
||||||
|
u8* Get() const {
|
||||||
|
return pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::shared_ptr<BackingMemoryManager> manager;
|
||||||
|
u8* pointer;
|
||||||
|
};
|
||||||
|
|
||||||
|
class BackingMemoryManager final : public std::enable_shared_from_this<BackingMemoryManager> {
|
||||||
|
public:
|
||||||
|
explicit BackingMemoryManager(std::size_t total_required);
|
||||||
|
~BackingMemoryManager();
|
||||||
|
|
||||||
|
/// Allocate backing memory from our pre-allocated chunk of shared memory.
|
||||||
|
/// This chunk of memory is automatically freed when BackingMemoryManager is destructed.
|
||||||
|
BackingMemory AllocateBackingMemory(std::size_t size);
|
||||||
|
|
||||||
|
/// Frees backing memory back to pre-allocated chunk of shared memory.
|
||||||
|
void FreeBackingMemory(const BackingMemory& memory) {
|
||||||
|
FreeBackingMemory(memory.GetRef());
|
||||||
|
}
|
||||||
|
void FreeBackingMemory(MemoryRef memory);
|
||||||
|
|
||||||
|
/// Allocate a 4GiB chunk of virtual address space for use in a PageTable.
|
||||||
|
/// This address space is automatically released when BackingMemoryManager is destructed.
|
||||||
|
FastmemRegion AllocateFastmemRegion();
|
||||||
|
|
||||||
|
u8* GetPointerForRef(MemoryRef ref);
|
||||||
|
|
||||||
|
MemoryRef GetRefForPointer(u8* pointer);
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class BackingMemory;
|
||||||
|
friend class FastmemRegion;
|
||||||
|
friend class MemorySystem;
|
||||||
|
friend class PageTable;
|
||||||
|
|
||||||
|
void Map(Memory::PageTable& page_table, VAddr vaddr, u8* backing_memory, std::size_t size);
|
||||||
|
void Unmap(Memory::PageTable& page_table, VAddr vaddr, std::size_t size);
|
||||||
|
|
||||||
|
void Serialize(std::array<std::ptrdiff_t, PAGE_TABLE_NUM_ENTRIES>& out,
|
||||||
|
const std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& in);
|
||||||
|
void Unserialize(std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& out,
|
||||||
|
const std::array<std::ptrdiff_t, PAGE_TABLE_NUM_ENTRIES>& in);
|
||||||
|
|
||||||
|
struct Impl;
|
||||||
|
std::unique_ptr<Impl> impl;
|
||||||
|
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
|
||||||
|
BackingMemoryManager();
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void save(Archive& ar, const unsigned int version) const;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load(Archive& ar, const unsigned int version);
|
||||||
|
|
||||||
|
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Memory
|
185
src/core/backing_memory_manager_generic.cpp
Normal file
185
src/core/backing_memory_manager_generic.cpp
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
// Copyright 2020 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <iterator>
|
||||||
|
#include <list>
|
||||||
|
#include <memory>
|
||||||
|
#include <boost/serialization/binary_object.hpp>
|
||||||
|
#include "common/archives.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "core/backing_memory_manager.h"
|
||||||
|
|
||||||
|
SERIALIZE_EXPORT_IMPL(Memory::BackingMemoryManager)
|
||||||
|
|
||||||
|
namespace Memory {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
struct Allocation {
|
||||||
|
bool is_free;
|
||||||
|
std::size_t offset;
|
||||||
|
std::size_t size;
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int version) {
|
||||||
|
ar& is_free;
|
||||||
|
ar& offset;
|
||||||
|
ar& size;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // anonymous namespace
|
||||||
|
|
||||||
|
struct BackingMemoryManager::Impl final {
|
||||||
|
u8* memory;
|
||||||
|
std::size_t max_alloc;
|
||||||
|
std::list<Allocation> allocations;
|
||||||
|
};
|
||||||
|
|
||||||
|
BackingMemory::BackingMemory(std::shared_ptr<BackingMemoryManager> manager_, u8* pointer_, MemoryRef ref_, size_t size_) : manager(manager_), pointer(pointer_), ref(ref_), size(size_) {}
|
||||||
|
BackingMemory::~BackingMemory() = default;
|
||||||
|
|
||||||
|
FastmemRegion::FastmemRegion() : manager(nullptr), pointer(nullptr) {}
|
||||||
|
FastmemRegion::FastmemRegion(std::shared_ptr<BackingMemoryManager> manager_, u8* pointer_) : manager(manager_), pointer(pointer_) {}
|
||||||
|
FastmemRegion::~FastmemRegion() = default;
|
||||||
|
|
||||||
|
BackingMemoryManager::BackingMemoryManager(std::size_t total_required) : impl(std::make_unique<Impl>()) {
|
||||||
|
impl->memory = static_cast<u8*>(std::malloc(total_required));
|
||||||
|
impl->max_alloc = total_required;
|
||||||
|
impl->allocations.emplace_back(Allocation{true, 0, total_required});
|
||||||
|
}
|
||||||
|
|
||||||
|
BackingMemoryManager::~BackingMemoryManager() {
|
||||||
|
std::free(static_cast<void*>(impl->memory));
|
||||||
|
}
|
||||||
|
|
||||||
|
BackingMemory BackingMemoryManager::AllocateBackingMemory(std::size_t size) {
|
||||||
|
const auto iter = std::find_if(impl->allocations.begin(), impl->allocations.end(), [size](const auto& allocation) { return allocation.is_free && allocation.size >= size; });
|
||||||
|
ASSERT_MSG(iter != impl->allocations.end(), "Out of memory when allcoating {} bytes", size);
|
||||||
|
|
||||||
|
if (iter->size == size) {
|
||||||
|
iter->is_free = false;
|
||||||
|
return BackingMemory{shared_from_this(), impl->memory + iter->offset, static_cast<MemoryRef>(iter->offset), size};
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::size_t offset = iter->offset;
|
||||||
|
|
||||||
|
iter->offset += size;
|
||||||
|
iter->size -= size;
|
||||||
|
|
||||||
|
impl->allocations.insert(iter, Allocation{false, offset, size});
|
||||||
|
|
||||||
|
return BackingMemory{shared_from_this(), impl->memory + offset, static_cast<MemoryRef>(offset), size};
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackingMemoryManager::FreeBackingMemory(MemoryRef offset) {
|
||||||
|
auto iter = std::find_if(impl->allocations.begin(), impl->allocations.end(), [offset](const auto& allocation) { return !allocation.is_free && MemoryRef{allocation.offset} == offset; });
|
||||||
|
ASSERT_MSG(iter != impl->allocations.end(), "Could not find backing memory to free");
|
||||||
|
|
||||||
|
iter->is_free = true;
|
||||||
|
|
||||||
|
// Coalesce free space
|
||||||
|
|
||||||
|
if (iter != impl->allocations.begin()) {
|
||||||
|
auto prev_iter = std::prev(iter);
|
||||||
|
ASSERT(prev_iter->offset + prev_iter->size == iter->offset);
|
||||||
|
if (prev_iter->is_free) {
|
||||||
|
prev_iter->size += iter->size;
|
||||||
|
impl->allocations.erase(iter);
|
||||||
|
iter = prev_iter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto next_iter = std::next(iter);
|
||||||
|
if (next_iter != impl->allocations.end()) {
|
||||||
|
ASSERT(iter->offset + iter->size == next_iter->offset);
|
||||||
|
if (next_iter->is_free) {
|
||||||
|
iter->size += next_iter->size;
|
||||||
|
impl->allocations.erase(next_iter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* BackingMemoryManager::GetPointerForRef(MemoryRef ref) {
|
||||||
|
return impl->memory + ref;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRef BackingMemoryManager::GetRefForPointer(u8* pointer) {
|
||||||
|
return MemoryRef{pointer - impl->memory};
|
||||||
|
}
|
||||||
|
|
||||||
|
FastmemRegion BackingMemoryManager::AllocateFastmemRegion() {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackingMemoryManager::Map(Memory::PageTable&, VAddr, u8* in, std::size_t) {
|
||||||
|
const std::ptrdiff_t offset = in - impl->memory;
|
||||||
|
ASSERT(0 <= offset && offset < static_cast<std::ptrdiff_t>(impl->max_alloc));
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackingMemoryManager::Unmap(Memory::PageTable&, VAddr, std::size_t) {}
|
||||||
|
|
||||||
|
void BackingMemoryManager::Serialize(std::array<std::ptrdiff_t, PAGE_TABLE_NUM_ENTRIES>& out, const std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& in) {
|
||||||
|
for (size_t i = 0; i < PAGE_TABLE_NUM_ENTRIES; ++i) {
|
||||||
|
if (in[i] == nullptr) {
|
||||||
|
out[i] = -1;
|
||||||
|
} else {
|
||||||
|
const std::ptrdiff_t offset = in[i] - impl->memory;
|
||||||
|
ASSERT(0 <= offset && offset < static_cast<std::ptrdiff_t>(impl->max_alloc));
|
||||||
|
out[i] = offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackingMemoryManager::Unserialize(std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& out, const std::array<std::ptrdiff_t, PAGE_TABLE_NUM_ENTRIES>& in) {
|
||||||
|
for (size_t i = 0; i < PAGE_TABLE_NUM_ENTRIES; ++i) {
|
||||||
|
if (in[i] == -1) {
|
||||||
|
out[i] = nullptr;
|
||||||
|
} else {
|
||||||
|
const std::ptrdiff_t offset = in[i];
|
||||||
|
ASSERT(0 <= offset && offset < static_cast<std::ptrdiff_t>(impl->max_alloc));
|
||||||
|
out[i] = impl->memory + offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BackingMemoryManager::BackingMemoryManager() : impl(std::make_unique<Impl>()) {}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void BackingMemoryManager::save(Archive& ar, const unsigned int file_version) const {
|
||||||
|
ar << impl->max_alloc;
|
||||||
|
|
||||||
|
const size_t count = impl->allocations.size();
|
||||||
|
ar << count;
|
||||||
|
for (const auto& allocation : impl->allocations) {
|
||||||
|
ar << allocation;
|
||||||
|
ar << boost::serialization::make_binary_object(impl->memory + allocation.offset, allocation.size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void BackingMemoryManager::load(Archive& ar, const unsigned int file_version) {
|
||||||
|
ar >> impl->max_alloc;
|
||||||
|
|
||||||
|
if (!impl->memory) {
|
||||||
|
impl->memory = static_cast<u8*>(std::malloc(impl->max_alloc));
|
||||||
|
}
|
||||||
|
|
||||||
|
impl->allocations.clear();
|
||||||
|
size_t count;
|
||||||
|
ar >> count;
|
||||||
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
Allocation allocation;
|
||||||
|
ar >> allocation;
|
||||||
|
ar >> boost::serialization::make_binary_object(impl->memory + allocation.offset, allocation.size);
|
||||||
|
impl->allocations.push_back(allocation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SERIALIZE_IMPL(BackingMemoryManager)
|
||||||
|
|
||||||
|
} // namespace Memory
|
@ -323,6 +323,12 @@ PerfStats::Results System::GetAndResetPerfStats() {
|
|||||||
: PerfStats::Results{};
|
: PerfStats::Results{};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void System::InvalidateCacheRange(u32 start_address, std::size_t length) {
|
||||||
|
for (const auto& cpu : cpu_cores) {
|
||||||
|
cpu->InvalidateCacheRange(start_address, length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void System::Reschedule() {
|
void System::Reschedule() {
|
||||||
if (!reschedule_pending) {
|
if (!reschedule_pending) {
|
||||||
return;
|
return;
|
||||||
@ -377,8 +383,6 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mo
|
|||||||
dsp_core = std::make_unique<AudioCore::DspHle>(*memory);
|
dsp_core = std::make_unique<AudioCore::DspHle>(*memory);
|
||||||
}
|
}
|
||||||
|
|
||||||
memory->SetDSP(*dsp_core);
|
|
||||||
|
|
||||||
dsp_core->SetSink(Settings::values.sink_id, Settings::values.audio_device_id);
|
dsp_core->SetSink(Settings::values.sink_id, Settings::values.audio_device_id);
|
||||||
dsp_core->EnableStretching(Settings::values.enable_audio_stretching);
|
dsp_core->EnableStretching(Settings::values.enable_audio_stretching);
|
||||||
|
|
||||||
@ -573,6 +577,8 @@ void System::serialize(Archive& ar, const unsigned int file_version) {
|
|||||||
// flush on save, don't flush on load
|
// flush on save, don't flush on load
|
||||||
bool should_flush = !Archive::is_loading::value;
|
bool should_flush = !Archive::is_loading::value;
|
||||||
Memory::RasterizerClearAll(should_flush);
|
Memory::RasterizerClearAll(should_flush);
|
||||||
|
ar&* memory.get();
|
||||||
|
|
||||||
ar&* timing.get();
|
ar&* timing.get();
|
||||||
for (u32 i = 0; i < num_cores; i++) {
|
for (u32 i = 0; i < num_cores; i++) {
|
||||||
ar&* cpu_cores[i].get();
|
ar&* cpu_cores[i].get();
|
||||||
@ -591,7 +597,6 @@ void System::serialize(Archive& ar, const unsigned int file_version) {
|
|||||||
throw std::runtime_error("LLE audio not supported for save states");
|
throw std::runtime_error("LLE audio not supported for save states");
|
||||||
}
|
}
|
||||||
|
|
||||||
ar&* memory.get();
|
|
||||||
ar&* kernel.get();
|
ar&* kernel.get();
|
||||||
VideoCore::serialize(ar, file_version);
|
VideoCore::serialize(ar, file_version);
|
||||||
if (file_version >= 1) {
|
if (file_version >= 1) {
|
||||||
@ -601,7 +606,6 @@ void System::serialize(Archive& ar, const unsigned int file_version) {
|
|||||||
// This needs to be set from somewhere - might as well be here!
|
// This needs to be set from somewhere - might as well be here!
|
||||||
if (Archive::is_loading::value) {
|
if (Archive::is_loading::value) {
|
||||||
Service::GSP::SetGlobalModule(*this);
|
Service::GSP::SetGlobalModule(*this);
|
||||||
memory->SetDSP(*dsp_core);
|
|
||||||
cheat_engine->Connect();
|
cheat_engine->Connect();
|
||||||
VideoCore::g_renderer->Sync();
|
VideoCore::g_renderer->Sync();
|
||||||
}
|
}
|
||||||
|
@ -191,11 +191,7 @@ public:
|
|||||||
return static_cast<u32>(cpu_cores.size());
|
return static_cast<u32>(cpu_cores.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void InvalidateCacheRange(u32 start_address, std::size_t length) {
|
void InvalidateCacheRange(u32 start_address, std::size_t length);
|
||||||
for (const auto& cpu : cpu_cores) {
|
|
||||||
cpu->InvalidateCacheRange(start_address, length);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets a reference to the emulated DSP.
|
* Gets a reference to the emulated DSP.
|
||||||
|
@ -4,15 +4,38 @@
|
|||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include "common/archives.h"
|
#include "common/archives.h"
|
||||||
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/config_mem.h"
|
#include "core/hle/kernel/config_mem.h"
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SERIALIZE_EXPORT_IMPL(ConfigMem::Handler)
|
SERIALIZE_EXPORT_IMPL(ConfigMem::Handler)
|
||||||
|
|
||||||
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void save_construct_data(Archive& ar, const ConfigMem::Handler* t, const unsigned int) {
|
||||||
|
ar << t->GetRef();
|
||||||
|
}
|
||||||
|
template void save_construct_data<oarchive>(oarchive& ar, const ConfigMem::Handler* t,
|
||||||
|
const unsigned int);
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load_construct_data(Archive& ar, ConfigMem::Handler* t, const unsigned int) {
|
||||||
|
Memory::MemoryRef ref;
|
||||||
|
ar >> ref;
|
||||||
|
::new (t) ConfigMem::Handler(Core::System::GetInstance().Memory().GetPointerForRef(ref), ref);
|
||||||
|
}
|
||||||
|
template void load_construct_data<iarchive>(iarchive& ar, ConfigMem::Handler* t,
|
||||||
|
const unsigned int);
|
||||||
|
|
||||||
|
} // namespace boost::serialization
|
||||||
|
|
||||||
namespace ConfigMem {
|
namespace ConfigMem {
|
||||||
|
|
||||||
Handler::Handler() {
|
Handler::Handler(Memory::BackingMemory backing_memory)
|
||||||
|
: config_mem(*reinterpret_cast<ConfigMemDef*>(backing_memory.Get())),
|
||||||
|
ref(backing_memory.GetRef()) {
|
||||||
std::memset(&config_mem, 0, sizeof(config_mem));
|
std::memset(&config_mem, 0, sizeof(config_mem));
|
||||||
|
|
||||||
// Values extracted from firmware 11.2.0-35E
|
// Values extracted from firmware 11.2.0-35E
|
||||||
@ -29,8 +52,7 @@ Handler::Handler() {
|
|||||||
config_mem.firm_ctr_sdk_ver = 0x0000F297;
|
config_mem.firm_ctr_sdk_ver = 0x0000F297;
|
||||||
}
|
}
|
||||||
|
|
||||||
ConfigMemDef& Handler::GetConfigMem() {
|
Handler::Handler(u8* config_mem, Memory::MemoryRef ref)
|
||||||
return config_mem;
|
: config_mem(*reinterpret_cast<ConfigMemDef*>(config_mem)), ref(ref) {}
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace ConfigMem
|
} // namespace ConfigMem
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#include <boost/serialization/export.hpp>
|
#include <boost/serialization/export.hpp>
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/memory_ref.h"
|
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
@ -52,34 +51,39 @@ struct ConfigMemDef {
|
|||||||
static_assert(sizeof(ConfigMemDef) == Memory::CONFIG_MEMORY_SIZE,
|
static_assert(sizeof(ConfigMemDef) == Memory::CONFIG_MEMORY_SIZE,
|
||||||
"Config Memory structure size is wrong");
|
"Config Memory structure size is wrong");
|
||||||
|
|
||||||
class Handler : public BackingMem {
|
class Handler {
|
||||||
public:
|
public:
|
||||||
Handler();
|
explicit Handler(Memory::BackingMemory backing_memory);
|
||||||
ConfigMemDef& GetConfigMem();
|
Handler(u8* config_mem, Memory::MemoryRef ref);
|
||||||
|
|
||||||
u8* GetPtr() override {
|
ConfigMemDef& GetConfigMem() {
|
||||||
return reinterpret_cast<u8*>(&config_mem);
|
return config_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u8* GetPtr() const override {
|
Memory::MemoryRef GetRef() const {
|
||||||
return reinterpret_cast<const u8*>(&config_mem);
|
return ref;
|
||||||
}
|
|
||||||
|
|
||||||
std::size_t GetSize() const override {
|
|
||||||
return sizeof(config_mem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ConfigMemDef config_mem;
|
ConfigMemDef& config_mem;
|
||||||
|
Memory::MemoryRef ref;
|
||||||
|
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int file_version) {
|
void serialize(Archive& ar, const unsigned int) {}
|
||||||
ar& boost::serialization::base_object<BackingMem>(*this);
|
|
||||||
ar& boost::serialization::make_binary_object(&config_mem, sizeof(config_mem));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ConfigMem
|
} // namespace ConfigMem
|
||||||
|
|
||||||
|
|
||||||
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void save_construct_data(Archive& ar, const ConfigMem::Handler* t, const unsigned int);
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load_construct_data(Archive& ar, ConfigMem::Handler* t, const unsigned int);
|
||||||
|
|
||||||
|
} // namespace boost::serialization
|
||||||
|
|
||||||
BOOST_CLASS_EXPORT_KEY(ConfigMem::Handler)
|
BOOST_CLASS_EXPORT_KEY(ConfigMem::Handler)
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
#include "common/memory_ref.h"
|
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/ipc.h"
|
#include "core/hle/ipc.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
@ -183,6 +182,8 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||||||
page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE);
|
page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE);
|
||||||
ASSERT(result == RESULT_SUCCESS);
|
ASSERT(result == RESULT_SUCCESS);
|
||||||
|
|
||||||
|
memory.GetBackingMemoryManager().FreeBackingMemory(found->buffer);
|
||||||
|
memory.GetBackingMemoryManager().FreeBackingMemory(found->reserve_buffer);
|
||||||
mapped_buffer_context.erase(found);
|
mapped_buffer_context.erase(found);
|
||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
@ -194,33 +195,34 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
|||||||
// TODO(Subv): Perform permission checks.
|
// TODO(Subv): Perform permission checks.
|
||||||
|
|
||||||
// Reserve a page of memory before the mapped buffer
|
// Reserve a page of memory before the mapped buffer
|
||||||
std::shared_ptr<BackingMem> reserve_buffer =
|
|
||||||
std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
Memory::BackingMemory reserve_buffer =
|
||||||
|
memory.GetBackingMemoryManager().AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
dst_process->vm_manager.MapBackingMemoryToBase(
|
dst_process->vm_manager.MapBackingMemoryToBase(
|
||||||
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer,
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer.GetRef(),
|
||||||
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
Memory::PAGE_SIZE, Kernel::MemoryState::Reserved);
|
||||||
|
|
||||||
std::shared_ptr<BackingMem> buffer =
|
Memory::BackingMemory buffer = memory.GetBackingMemoryManager().AllocateBackingMemory(
|
||||||
std::make_shared<BufferMem>(num_pages * Memory::PAGE_SIZE);
|
num_pages * Memory::PAGE_SIZE);
|
||||||
memory.ReadBlock(*src_process, source_address, buffer->GetPtr() + page_offset, size);
|
memory.ReadBlock(*src_process, source_address, buffer.Get() + page_offset, size);
|
||||||
|
|
||||||
// Map the page(s) into the target process' address space.
|
// Map the page(s) into the target process' address space.
|
||||||
target_address =
|
target_address = dst_process->vm_manager
|
||||||
dst_process->vm_manager
|
.MapBackingMemoryToBase(
|
||||||
.MapBackingMemoryToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
|
||||||
buffer, buffer->GetSize(), Kernel::MemoryState::Shared)
|
buffer.GetRef(), buffer.GetSize(), Kernel::MemoryState::Shared)
|
||||||
.Unwrap();
|
.Unwrap();
|
||||||
|
|
||||||
cmd_buf[i++] = target_address + page_offset;
|
cmd_buf[i++] = target_address + page_offset;
|
||||||
|
|
||||||
// Reserve a page of memory after the mapped buffer
|
// Reserve a page of memory after the mapped buffer
|
||||||
dst_process->vm_manager.MapBackingMemoryToBase(
|
dst_process->vm_manager.MapBackingMemoryToBase(
|
||||||
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer,
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer.GetRef(),
|
||||||
reserve_buffer->GetSize(), Kernel::MemoryState::Reserved);
|
reserve_buffer.GetSize(), Kernel::MemoryState::Reserved);
|
||||||
|
|
||||||
mapped_buffer_context.push_back({permissions, size, source_address,
|
mapped_buffer_context.push_back({permissions, size, source_address,
|
||||||
target_address + page_offset, std::move(buffer),
|
target_address + page_offset, buffer.GetRef(),
|
||||||
std::move(reserve_buffer)});
|
reserve_buffer.GetRef()});
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/ipc.h"
|
#include "core/hle/ipc.h"
|
||||||
#include "core/hle/kernel/thread.h"
|
#include "core/hle/kernel/thread.h"
|
||||||
|
#include "core/memory_ref.h"
|
||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
class MemorySystem;
|
class MemorySystem;
|
||||||
@ -25,8 +26,8 @@ struct MappedBufferContext {
|
|||||||
VAddr source_address;
|
VAddr source_address;
|
||||||
VAddr target_address;
|
VAddr target_address;
|
||||||
|
|
||||||
std::shared_ptr<BackingMem> buffer;
|
Memory::MemoryRef buffer;
|
||||||
std::shared_ptr<BackingMem> reserve_buffer;
|
Memory::MemoryRef reserve_buffer;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <boost/serialization/vector.hpp>
|
#include <boost/serialization/vector.hpp>
|
||||||
#include "common/archives.h"
|
#include "common/archives.h"
|
||||||
#include "common/serialization/atomic.h"
|
#include "common/serialization/atomic.h"
|
||||||
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/hle/kernel/client_port.h"
|
#include "core/hle/kernel/client_port.h"
|
||||||
#include "core/hle/kernel/config_mem.h"
|
#include "core/hle/kernel/config_mem.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
|
@ -79,14 +79,16 @@ void KernelSystem::MemoryInit(u32 mem_type, u8 n3ds_mode) {
|
|||||||
// We must've allocated the entire FCRAM by the end
|
// We must've allocated the entire FCRAM by the end
|
||||||
ASSERT(base == (is_new_3ds ? Memory::FCRAM_N3DS_SIZE : Memory::FCRAM_SIZE));
|
ASSERT(base == (is_new_3ds ? Memory::FCRAM_N3DS_SIZE : Memory::FCRAM_SIZE));
|
||||||
|
|
||||||
config_mem_handler = std::make_shared<ConfigMem::Handler>();
|
config_mem_handler = std::make_shared<ConfigMem::Handler>(
|
||||||
|
memory.GetBackingMemoryManager().AllocateBackingMemory(Memory::CONFIG_MEMORY_SIZE));
|
||||||
auto& config_mem = config_mem_handler->GetConfigMem();
|
auto& config_mem = config_mem_handler->GetConfigMem();
|
||||||
config_mem.app_mem_type = reported_mem_type;
|
config_mem.app_mem_type = reported_mem_type;
|
||||||
config_mem.app_mem_alloc = memory_region_sizes[reported_mem_type][0];
|
config_mem.app_mem_alloc = memory_region_sizes[reported_mem_type][0];
|
||||||
config_mem.sys_mem_alloc = memory_regions[1]->size;
|
config_mem.sys_mem_alloc = memory_regions[1]->size;
|
||||||
config_mem.base_mem_alloc = memory_regions[2]->size;
|
config_mem.base_mem_alloc = memory_regions[2]->size;
|
||||||
|
|
||||||
shared_page_handler = std::make_shared<SharedPage::Handler>(timing);
|
shared_page_handler = std::make_shared<SharedPage::Handler>(
|
||||||
|
timing, memory.GetBackingMemoryManager().AllocateBackingMemory(Memory::SHARED_PAGE_SIZE));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<MemoryRegionInfo> KernelSystem::GetMemoryRegion(MemoryRegion region) {
|
std::shared_ptr<MemoryRegionInfo> KernelSystem::GetMemoryRegion(MemoryRegion region) {
|
||||||
@ -160,16 +162,18 @@ void KernelSystem::HandleSpecialMapping(VMManager& address_space, const AddressM
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KernelSystem::MapSharedPages(VMManager& address_space) {
|
void KernelSystem::MapSharedPages(VMManager& address_space) {
|
||||||
auto cfg_mem_vma = address_space
|
auto cfg_mem_vma =
|
||||||
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR, {config_mem_handler},
|
address_space
|
||||||
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
|
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR, config_mem_handler->GetRef(),
|
||||||
.Unwrap();
|
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
|
||||||
|
.Unwrap();
|
||||||
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
|
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
|
||||||
|
|
||||||
auto shared_page_vma = address_space
|
auto shared_page_vma =
|
||||||
.MapBackingMemory(Memory::SHARED_PAGE_VADDR, {shared_page_handler},
|
address_space
|
||||||
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
|
.MapBackingMemory(Memory::SHARED_PAGE_VADDR, shared_page_handler->GetRef(),
|
||||||
.Unwrap();
|
Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
|
||||||
|
.Unwrap();
|
||||||
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
|
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/archives.h"
|
#include "common/archives.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/global.h"
|
#include "core/global.h"
|
||||||
#include "core/hle/kernel/errors.h"
|
#include "core/hle/kernel/errors.h"
|
||||||
|
@ -250,7 +250,7 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
|
|||||||
// Free heaps block by block
|
// Free heaps block by block
|
||||||
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
|
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
|
||||||
for (const auto [backing_memory, block_size] : backing_blocks) {
|
for (const auto [backing_memory, block_size] : backing_blocks) {
|
||||||
memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory.GetPtr()), block_size);
|
memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory), block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode result = vm_manager.UnmapRange(target, size);
|
ResultCode result = vm_manager.UnmapRange(target, size);
|
||||||
@ -296,7 +296,7 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
|
|||||||
|
|
||||||
auto backing_memory = kernel.memory.GetFCRAMRef(physical_offset);
|
auto backing_memory = kernel.memory.GetFCRAMRef(physical_offset);
|
||||||
|
|
||||||
std::fill(backing_memory.GetPtr(), backing_memory.GetPtr() + size, 0);
|
std::memset(kernel.memory.GetPointerForRef(backing_memory), 0, size);
|
||||||
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
||||||
ASSERT(vma.Succeeded());
|
ASSERT(vma.Succeeded());
|
||||||
vm_manager.Reprotect(vma.Unwrap(), perms);
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
||||||
|
@ -196,14 +196,14 @@ u8* SharedMemory::GetPointer(u32 offset) {
|
|||||||
if (backing_blocks.size() != 1) {
|
if (backing_blocks.size() != 1) {
|
||||||
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
|
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
|
||||||
}
|
}
|
||||||
return backing_blocks[0].first + offset;
|
return kernel.memory.GetPointerForRef(backing_blocks[0].first) + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u8* SharedMemory::GetPointer(u32 offset) const {
|
const u8* SharedMemory::GetPointer(u32 offset) const {
|
||||||
if (backing_blocks.size() != 1) {
|
if (backing_blocks.size() != 1) {
|
||||||
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
|
LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory");
|
||||||
}
|
}
|
||||||
return backing_blocks[0].first + offset;
|
return kernel.memory.GetPointerForRef(backing_blocks[0].first) + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -10,10 +10,10 @@
|
|||||||
#include <boost/serialization/export.hpp>
|
#include <boost/serialization/export.hpp>
|
||||||
#include <boost/serialization/string.hpp>
|
#include <boost/serialization/string.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/memory_ref.h"
|
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/process.h"
|
#include "core/hle/kernel/process.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
#include "core/memory_ref.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ private:
|
|||||||
/// during creation.
|
/// during creation.
|
||||||
PAddr linear_heap_phys_offset = 0;
|
PAddr linear_heap_phys_offset = 0;
|
||||||
/// Backing memory for this shared memory block.
|
/// Backing memory for this shared memory block.
|
||||||
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
|
std::vector<std::pair<Memory::MemoryRef, u32>> backing_blocks;
|
||||||
/// Size of the memory block. Page-aligned.
|
/// Size of the memory block. Page-aligned.
|
||||||
u32 size = 0;
|
u32 size = 0;
|
||||||
/// Permission restrictions applied to the process which created the block.
|
/// Permission restrictions applied to the process which created the block.
|
||||||
|
@ -19,9 +19,19 @@ SERIALIZE_EXPORT_IMPL(SharedPage::Handler)
|
|||||||
|
|
||||||
namespace boost::serialization {
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void save_construct_data(Archive& ar, const SharedPage::Handler* t, const unsigned int) {
|
||||||
|
ar << t->GetRef();
|
||||||
|
}
|
||||||
|
template void save_construct_data<oarchive>(oarchive& ar, const SharedPage::Handler* t,
|
||||||
|
const unsigned int);
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int) {
|
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int) {
|
||||||
::new (t) SharedPage::Handler(Core::System::GetInstance().CoreTiming());
|
Memory::MemoryRef ref;
|
||||||
|
ar >> ref;
|
||||||
|
::new (t) SharedPage::Handler(Core::System::GetInstance().CoreTiming(),
|
||||||
|
Core::System::GetInstance().Memory().GetPointerForRef(ref), ref);
|
||||||
}
|
}
|
||||||
template void load_construct_data<iarchive>(iarchive& ar, SharedPage::Handler* t,
|
template void load_construct_data<iarchive>(iarchive& ar, SharedPage::Handler* t,
|
||||||
const unsigned int);
|
const unsigned int);
|
||||||
@ -55,7 +65,9 @@ static std::chrono::seconds GetInitTime() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Handler::Handler(Core::Timing& timing) : timing(timing) {
|
Handler::Handler(Core::Timing& timing, Memory::BackingMemory backing_memory)
|
||||||
|
: timing(timing), shared_page(*reinterpret_cast<SharedPageDef*>(backing_memory.Get())),
|
||||||
|
ref(backing_memory.GetRef()) {
|
||||||
std::memset(&shared_page, 0, sizeof(shared_page));
|
std::memset(&shared_page, 0, sizeof(shared_page));
|
||||||
|
|
||||||
shared_page.running_hw = 0x1; // product
|
shared_page.running_hw = 0x1; // product
|
||||||
@ -140,8 +152,7 @@ void Handler::Set3DSlider(float slidestate) {
|
|||||||
shared_page.sliderstate_3d = static_cast<float_le>(slidestate);
|
shared_page.sliderstate_3d = static_cast<float_le>(slidestate);
|
||||||
}
|
}
|
||||||
|
|
||||||
SharedPageDef& Handler::GetSharedPage() {
|
Handler::Handler(Core::Timing& timing, u8* shared_page, Memory::MemoryRef ref)
|
||||||
return shared_page;
|
: timing(timing), shared_page(*reinterpret_cast<SharedPageDef*>(shared_page)), ref(ref) {}
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace SharedPage
|
} // namespace SharedPage
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
#include "common/bit_field.h"
|
#include "common/bit_field.h"
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/memory_ref.h"
|
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
|
#include "core/backing_memory_manager.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -86,9 +86,10 @@ struct SharedPageDef {
|
|||||||
static_assert(sizeof(SharedPageDef) == Memory::SHARED_PAGE_SIZE,
|
static_assert(sizeof(SharedPageDef) == Memory::SHARED_PAGE_SIZE,
|
||||||
"Shared page structure size is wrong");
|
"Shared page structure size is wrong");
|
||||||
|
|
||||||
class Handler : public BackingMem {
|
class Handler {
|
||||||
public:
|
public:
|
||||||
Handler(Core::Timing& timing);
|
Handler(Core::Timing& timing, Memory::BackingMemory backing_memory);
|
||||||
|
Handler(Core::Timing& timing, u8* shared_page, Memory::MemoryRef ref);
|
||||||
|
|
||||||
void SetMacAddress(const MacAddress&);
|
void SetMacAddress(const MacAddress&);
|
||||||
|
|
||||||
@ -98,18 +99,12 @@ public:
|
|||||||
|
|
||||||
void Set3DSlider(float);
|
void Set3DSlider(float);
|
||||||
|
|
||||||
SharedPageDef& GetSharedPage();
|
SharedPageDef& GetSharedPage() {
|
||||||
|
return shared_page;
|
||||||
u8* GetPtr() override {
|
|
||||||
return reinterpret_cast<u8*>(&shared_page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const u8* GetPtr() const override {
|
Memory::MemoryRef GetRef() const {
|
||||||
return reinterpret_cast<const u8*>(&shared_page);
|
return ref;
|
||||||
}
|
|
||||||
|
|
||||||
std::size_t GetSize() const override {
|
|
||||||
return sizeof(shared_page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -119,20 +114,21 @@ private:
|
|||||||
Core::TimingEventType* update_time_event;
|
Core::TimingEventType* update_time_event;
|
||||||
std::chrono::seconds init_time;
|
std::chrono::seconds init_time;
|
||||||
|
|
||||||
SharedPageDef shared_page;
|
SharedPageDef& shared_page;
|
||||||
|
Memory::MemoryRef ref;
|
||||||
|
|
||||||
template <class Archive>
|
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
|
||||||
ar& boost::serialization::base_object<BackingMem>(*this);
|
|
||||||
ar& boost::serialization::make_binary_object(&shared_page, sizeof(shared_page));
|
|
||||||
}
|
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, const unsigned int) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace SharedPage
|
} // namespace SharedPage
|
||||||
|
|
||||||
namespace boost::serialization {
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void save_construct_data(Archive& ar, const SharedPage::Handler* t, const unsigned int);
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int);
|
void load_construct_data(Archive& ar, SharedPage::Handler* t, const unsigned int);
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ static std::tuple<std::size_t, std::size_t, bool> GetFreeThreadLocalSlot(
|
|||||||
* @param entry_point Address of entry point for execution
|
* @param entry_point Address of entry point for execution
|
||||||
* @param arg User argument for thread
|
* @param arg User argument for thread
|
||||||
*/
|
*/
|
||||||
static void ResetThreadContext(const std::unique_ptr<ARM_Interface::ThreadContext>& context,
|
static void ResetThreadContext(const std::unique_ptr<ThreadContext>& context,
|
||||||
u32 stack_top, u32 entry_point, u32 arg) {
|
u32 stack_top, u32 entry_point, u32 arg) {
|
||||||
context->Reset();
|
context->Reset();
|
||||||
context->SetCpuRegister(0, arg);
|
context->SetCpuRegister(0, arg);
|
||||||
@ -505,4 +505,8 @@ const std::vector<std::shared_ptr<Thread>>& ThreadManager::GetThreadList() {
|
|||||||
return thread_list;
|
return thread_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ThreadContext> ThreadManager::NewContext() {
|
||||||
|
return cpu->NewContext();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -15,12 +15,14 @@
|
|||||||
#include <boost/serialization/vector.hpp>
|
#include <boost/serialization/vector.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/thread_queue_list.h"
|
#include "common/thread_queue_list.h"
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_thread_context.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/wait_object.h"
|
#include "core/hle/kernel/wait_object.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
|
||||||
|
class ARM_Interface;
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class Mutex;
|
class Mutex;
|
||||||
@ -119,9 +121,7 @@ public:
|
|||||||
this->cpu = &cpu;
|
this->cpu = &cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ARM_Interface::ThreadContext> NewContext() {
|
std::unique_ptr<ThreadContext> NewContext();
|
||||||
return cpu->NewContext();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/**
|
/**
|
||||||
@ -285,7 +285,7 @@ public:
|
|||||||
return status == ThreadStatus::WaitSynchAll;
|
return status == ThreadStatus::WaitSynchAll;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ARM_Interface::ThreadContext> context;
|
std::unique_ptr<ThreadContext> context;
|
||||||
|
|
||||||
u32 thread_id;
|
u32 thread_id;
|
||||||
|
|
||||||
|
@ -27,8 +27,7 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
|
|||||||
type != next.type) {
|
type != next.type) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (type == VMAType::BackingMemory &&
|
if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
|
||||||
backing_memory.GetPtr() + size != next.backing_memory.GetPtr()) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (type == VMAType::MMIO && paddr + size != next.paddr) {
|
if (type == VMAType::MMIO && paddr + size != next.paddr) {
|
||||||
@ -37,8 +36,8 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
VMManager::VMManager(Memory::MemorySystem& memory)
|
VMManager::VMManager(Memory::MemorySystem& memory_)
|
||||||
: memory(memory), page_table(std::make_shared<Memory::PageTable>()) {
|
: page_table(memory_.NewPageTable()), memory(memory_) {
|
||||||
Reset();
|
Reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,7 +51,7 @@ void VMManager::Reset() {
|
|||||||
initial_vma.size = MAX_ADDRESS;
|
initial_vma.size = MAX_ADDRESS;
|
||||||
vma_map.emplace(initial_vma.base, initial_vma);
|
vma_map.emplace(initial_vma.base, initial_vma);
|
||||||
|
|
||||||
page_table->Clear();
|
page_table->Reset();
|
||||||
|
|
||||||
UpdatePageTableForVMA(initial_vma);
|
UpdatePageTableForVMA(initial_vma);
|
||||||
}
|
}
|
||||||
@ -65,8 +64,9 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory,
|
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size,
|
||||||
u32 size, MemoryState state) {
|
Memory::MemoryRef memory, u32 size,
|
||||||
|
MemoryState state) {
|
||||||
|
|
||||||
// Find the first Free VMA.
|
// Find the first Free VMA.
|
||||||
VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
|
VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
|
||||||
@ -94,10 +94,8 @@ ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size,
|
|||||||
return MakeResult<VAddr>(target);
|
return MakeResult<VAddr>(target);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory,
|
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, Memory::MemoryRef memory,
|
||||||
u32 size, MemoryState state) {
|
u32 size, MemoryState state) {
|
||||||
ASSERT(memory.GetPtr() != nullptr);
|
|
||||||
|
|
||||||
// This is the appropriately sized VMA that will turn into our allocation.
|
// This is the appropriately sized VMA that will turn into our allocation.
|
||||||
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
||||||
VirtualMemoryArea& final_vma = vma_handle->second;
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||||
@ -172,7 +170,7 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
|||||||
vma.permissions = VMAPermission::None;
|
vma.permissions = VMAPermission::None;
|
||||||
vma.meminfo_state = MemoryState::Free;
|
vma.meminfo_state = MemoryState::Free;
|
||||||
|
|
||||||
vma.backing_memory = nullptr;
|
vma.backing_memory = Memory::INVALID_MEMORY_REF;
|
||||||
vma.paddr = 0;
|
vma.paddr = 0;
|
||||||
|
|
||||||
UpdatePageTableForVMA(vma);
|
UpdatePageTableForVMA(vma);
|
||||||
@ -363,9 +361,9 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksForRange(VAddr address,
|
ResultVal<std::vector<std::pair<Memory::MemoryRef, u32>>> VMManager::GetBackingBlocksForRange(
|
||||||
u32 size) {
|
VAddr address, u32 size) {
|
||||||
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
|
std::vector<std::pair<Memory::MemoryRef, u32>> backing_blocks;
|
||||||
VAddr interval_target = address;
|
VAddr interval_target = address;
|
||||||
while (interval_target != address + size) {
|
while (interval_target != address + size) {
|
||||||
auto vma = FindVMA(interval_target);
|
auto vma = FindVMA(interval_target);
|
||||||
@ -376,7 +374,8 @@ ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksFor
|
|||||||
|
|
||||||
VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size);
|
VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size);
|
||||||
u32 interval_size = interval_end - interval_target;
|
u32 interval_size = interval_end - interval_target;
|
||||||
auto backing_memory = vma->second.backing_memory + (interval_target - vma->second.base);
|
auto backing_memory =
|
||||||
|
Memory::MemoryRef{vma->second.backing_memory + (interval_target - vma->second.base)};
|
||||||
backing_blocks.push_back({backing_memory, interval_size});
|
backing_blocks.push_back({backing_memory, interval_size});
|
||||||
|
|
||||||
interval_target += interval_size;
|
interval_target += interval_size;
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#include <boost/serialization/shared_ptr.hpp>
|
#include <boost/serialization/shared_ptr.hpp>
|
||||||
#include <boost/serialization/split_member.hpp>
|
#include <boost/serialization/split_member.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/memory_ref.h"
|
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "core/mmio.h"
|
#include "core/mmio.h"
|
||||||
@ -75,7 +74,7 @@ struct VirtualMemoryArea {
|
|||||||
|
|
||||||
// Settings for type = BackingMemory
|
// Settings for type = BackingMemory
|
||||||
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
||||||
MemoryRef backing_memory{};
|
Memory::MemoryRef backing_memory{};
|
||||||
|
|
||||||
// Settings for type = MMIO
|
// Settings for type = MMIO
|
||||||
/// Physical address of the register area this VMA maps to.
|
/// Physical address of the register area this VMA maps to.
|
||||||
@ -85,8 +84,6 @@ struct VirtualMemoryArea {
|
|||||||
/// Tests if this area can be merged to the right with `next`.
|
/// Tests if this area can be merged to the right with `next`.
|
||||||
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
|
bool CanBeMergedWith(const VirtualMemoryArea& next) const;
|
||||||
|
|
||||||
private:
|
|
||||||
friend class boost::serialization::access;
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int file_version) {
|
void serialize(Archive& ar, const unsigned int file_version) {
|
||||||
ar& base;
|
ar& base;
|
||||||
@ -152,8 +149,8 @@ public:
|
|||||||
* @param state MemoryState tag to attach to the VMA.
|
* @param state MemoryState tag to attach to the VMA.
|
||||||
* @returns The address at which the memory was mapped.
|
* @returns The address at which the memory was mapped.
|
||||||
*/
|
*/
|
||||||
ResultVal<VAddr> MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory, u32 size,
|
ResultVal<VAddr> MapBackingMemoryToBase(VAddr base, u32 region_size, Memory::MemoryRef memory,
|
||||||
MemoryState state);
|
u32 size, MemoryState state);
|
||||||
/**
|
/**
|
||||||
* Maps an unmanaged host memory pointer at a given address.
|
* Maps an unmanaged host memory pointer at a given address.
|
||||||
*
|
*
|
||||||
@ -162,7 +159,7 @@ public:
|
|||||||
* @param size Size of the mapping.
|
* @param size Size of the mapping.
|
||||||
* @param state MemoryState tag to attach to the VMA.
|
* @param state MemoryState tag to attach to the VMA.
|
||||||
*/
|
*/
|
||||||
ResultVal<VMAHandle> MapBackingMemory(VAddr target, MemoryRef memory, u32 size,
|
ResultVal<VMAHandle> MapBackingMemory(VAddr target, Memory::MemoryRef memory, u32 size,
|
||||||
MemoryState state);
|
MemoryState state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -205,8 +202,8 @@ public:
|
|||||||
void LogLayout(Log::Level log_level) const;
|
void LogLayout(Log::Level log_level) const;
|
||||||
|
|
||||||
/// Gets a list of backing memory blocks for the specified range
|
/// Gets a list of backing memory blocks for the specified range
|
||||||
ResultVal<std::vector<std::pair<MemoryRef, u32>>> GetBackingBlocksForRange(VAddr address,
|
ResultVal<std::vector<std::pair<Memory::MemoryRef, u32>>> GetBackingBlocksForRange(
|
||||||
u32 size);
|
VAddr address, u32 size);
|
||||||
|
|
||||||
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
/// Each VMManager has its own page table, which is set as the main one when the owning process
|
||||||
/// is scheduled.
|
/// is scheduled.
|
||||||
@ -251,10 +248,21 @@ private:
|
|||||||
Memory::MemorySystem& memory;
|
Memory::MemorySystem& memory;
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
void save(Archive& ar, const unsigned int version) const {
|
||||||
ar& vma_map;
|
ar << vma_map;
|
||||||
ar& page_table;
|
const size_t page_table_index = memory.SerializePageTable(page_table);
|
||||||
|
ar << page_table_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load(Archive& ar, const unsigned int version) {
|
||||||
|
ar >> vma_map;
|
||||||
|
size_t page_table_index;
|
||||||
|
ar >> page_table_index;
|
||||||
|
page_table = memory.UnserializePageTable(page_table_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -23,17 +23,28 @@
|
|||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
#include "video_core/video_core.h"
|
#include "video_core/video_core.h"
|
||||||
|
|
||||||
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::FCRAM>)
|
|
||||||
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::VRAM>)
|
|
||||||
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::DSP>)
|
|
||||||
SERIALIZE_EXPORT_IMPL(Memory::MemorySystem::BackingMemImpl<Memory::Region::N3DS>)
|
|
||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
|
|
||||||
void PageTable::Clear() {
|
PageTable::PageTable(std::shared_ptr<BackingMemoryManager> backing_memory_manager)
|
||||||
pointers.raw.fill(nullptr);
|
: backing_memory_manager(std::move(backing_memory_manager)) {}
|
||||||
pointers.refs.fill(MemoryRef());
|
|
||||||
|
PageTable::~PageTable() = default;
|
||||||
|
|
||||||
|
void PageTable::Reset() {
|
||||||
|
pointers.fill(nullptr);
|
||||||
attributes.fill(PageType::Unmapped);
|
attributes.fill(PageType::Unmapped);
|
||||||
|
fastmem_base = backing_memory_manager->AllocateFastmemRegion();
|
||||||
|
}
|
||||||
|
|
||||||
|
MMIORegionPointer PageTable::GetMMIOHandler(VAddr vaddr) {
|
||||||
|
DEBUG_ASSERT(GetAttribute(vaddr) == PageType::Special);
|
||||||
|
for (const auto& region : special_regions) {
|
||||||
|
if (vaddr >= region.base && vaddr < (region.base + region.size)) {
|
||||||
|
return region.handler;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT_MSG(false, "Mapped IO page without a handler @ {:08X}", vaddr);
|
||||||
|
return nullptr; // Should never happen
|
||||||
}
|
}
|
||||||
|
|
||||||
class RasterizerCacheMarker {
|
class RasterizerCacheMarker {
|
||||||
@ -81,128 +92,86 @@ private:
|
|||||||
|
|
||||||
class MemorySystem::Impl {
|
class MemorySystem::Impl {
|
||||||
public:
|
public:
|
||||||
// Visual Studio would try to allocate these on compile time if they are std::array, which would
|
static constexpr size_t TOTAL_REQUIRED_MEMORY =
|
||||||
// exceed the memory limit.
|
Memory::FCRAM_N3DS_SIZE + Memory::VRAM_SIZE + Memory::N3DS_EXTRA_RAM_SIZE +
|
||||||
std::unique_ptr<u8[]> fcram = std::make_unique<u8[]>(Memory::FCRAM_N3DS_SIZE);
|
Memory::DSP_RAM_SIZE + Memory::SHARED_MEMORY_SIZE + Memory::CONFIG_MEMORY_SIZE +
|
||||||
std::unique_ptr<u8[]> vram = std::make_unique<u8[]>(Memory::VRAM_SIZE);
|
Memory::IPC_MAPPING_SIZE;
|
||||||
std::unique_ptr<u8[]> n3ds_extra_ram = std::make_unique<u8[]>(Memory::N3DS_EXTRA_RAM_SIZE);
|
std::shared_ptr<BackingMemoryManager> backing_memory =
|
||||||
|
std::make_shared<BackingMemoryManager>(TOTAL_REQUIRED_MEMORY);
|
||||||
|
|
||||||
|
BackingMemory fcram = backing_memory->AllocateBackingMemory(Memory::FCRAM_N3DS_SIZE);
|
||||||
|
BackingMemory vram = backing_memory->AllocateBackingMemory(Memory::VRAM_SIZE);
|
||||||
|
BackingMemory n3ds_extra_ram =
|
||||||
|
backing_memory->AllocateBackingMemory(Memory::N3DS_EXTRA_RAM_SIZE);
|
||||||
|
BackingMemory dspram = backing_memory->AllocateBackingMemory(Memory::DSP_RAM_SIZE);
|
||||||
|
|
||||||
std::shared_ptr<PageTable> current_page_table = nullptr;
|
|
||||||
RasterizerCacheMarker cache_marker;
|
RasterizerCacheMarker cache_marker;
|
||||||
std::vector<std::shared_ptr<PageTable>> page_table_list;
|
std::vector<std::shared_ptr<PageTable>> page_table_list;
|
||||||
|
|
||||||
|
std::shared_ptr<PageTable> current_page_table = nullptr;
|
||||||
|
|
||||||
AudioCore::DspInterface* dsp = nullptr;
|
AudioCore::DspInterface* dsp = nullptr;
|
||||||
|
|
||||||
std::shared_ptr<BackingMem> fcram_mem;
|
size_t SerializePageTable(std::shared_ptr<PageTable> page_table) const {
|
||||||
std::shared_ptr<BackingMem> vram_mem;
|
const auto iter = std::find(page_table_list.begin(), page_table_list.end(), page_table);
|
||||||
std::shared_ptr<BackingMem> n3ds_extra_ram_mem;
|
if (iter == page_table_list.end()) {
|
||||||
std::shared_ptr<BackingMem> dsp_mem;
|
return static_cast<size_t>(-1);
|
||||||
|
|
||||||
Impl();
|
|
||||||
|
|
||||||
const u8* GetPtr(Region r) const {
|
|
||||||
switch (r) {
|
|
||||||
case Region::VRAM:
|
|
||||||
return vram.get();
|
|
||||||
case Region::DSP:
|
|
||||||
return dsp->GetDspMemory().data();
|
|
||||||
case Region::FCRAM:
|
|
||||||
return fcram.get();
|
|
||||||
case Region::N3DS:
|
|
||||||
return n3ds_extra_ram.get();
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
return std::distance(page_table_list.begin(), iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* GetPtr(Region r) {
|
std::shared_ptr<PageTable> UnserializePageTable(size_t page_table_index) const {
|
||||||
switch (r) {
|
if (page_table_index == static_cast<size_t>(-1)) {
|
||||||
case Region::VRAM:
|
return nullptr;
|
||||||
return vram.get();
|
|
||||||
case Region::DSP:
|
|
||||||
return dsp->GetDspMemory().data();
|
|
||||||
case Region::FCRAM:
|
|
||||||
return fcram.get();
|
|
||||||
case Region::N3DS:
|
|
||||||
return n3ds_extra_ram.get();
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GetSize(Region r) const {
|
|
||||||
switch (r) {
|
|
||||||
case Region::VRAM:
|
|
||||||
return VRAM_SIZE;
|
|
||||||
case Region::DSP:
|
|
||||||
return DSP_RAM_SIZE;
|
|
||||||
case Region::FCRAM:
|
|
||||||
return FCRAM_N3DS_SIZE;
|
|
||||||
case Region::N3DS:
|
|
||||||
return N3DS_EXTRA_RAM_SIZE;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
return page_table_list[page_table_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class Archive>
|
|
||||||
void serialize(Archive& ar, const unsigned int file_version) {
|
|
||||||
bool save_n3ds_ram = Settings::values.is_new_3ds;
|
|
||||||
ar& save_n3ds_ram;
|
|
||||||
ar& boost::serialization::make_binary_object(vram.get(), Memory::VRAM_SIZE);
|
|
||||||
ar& boost::serialization::make_binary_object(
|
|
||||||
fcram.get(), save_n3ds_ram ? Memory::FCRAM_N3DS_SIZE : Memory::FCRAM_SIZE);
|
|
||||||
ar& boost::serialization::make_binary_object(
|
|
||||||
n3ds_extra_ram.get(), save_n3ds_ram ? Memory::N3DS_EXTRA_RAM_SIZE : 0);
|
|
||||||
ar& cache_marker;
|
|
||||||
ar& page_table_list;
|
|
||||||
// dsp is set from Core::System at startup
|
|
||||||
ar& current_page_table;
|
|
||||||
ar& fcram_mem;
|
|
||||||
ar& vram_mem;
|
|
||||||
ar& n3ds_extra_ram_mem;
|
|
||||||
ar& dsp_mem;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// We use this rather than BufferMem because we don't want new objects to be allocated when
|
|
||||||
// deserializing. This avoids unnecessary memory thrashing.
|
|
||||||
template <Region R>
|
|
||||||
class MemorySystem::BackingMemImpl : public BackingMem {
|
|
||||||
public:
|
|
||||||
BackingMemImpl() : impl(*Core::Global<Core::System>().Memory().impl) {}
|
|
||||||
explicit BackingMemImpl(MemorySystem::Impl& impl_) : impl(impl_) {}
|
|
||||||
u8* GetPtr() override {
|
|
||||||
return impl.GetPtr(R);
|
|
||||||
}
|
|
||||||
const u8* GetPtr() const override {
|
|
||||||
return impl.GetPtr(R);
|
|
||||||
}
|
|
||||||
std::size_t GetSize() const override {
|
|
||||||
return impl.GetSize(R);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
MemorySystem::Impl& impl;
|
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
void save(Archive& ar, const unsigned int version) const {
|
||||||
ar& boost::serialization::base_object<BackingMem>(*this);
|
ar << *backing_memory;
|
||||||
|
ar << cache_marker;
|
||||||
|
const size_t num_page_tables = page_table_list.size();
|
||||||
|
ar << num_page_tables;
|
||||||
|
for (size_t i = 0; i < num_page_tables; ++i) {
|
||||||
|
page_table_list[i]->save(ar, version);
|
||||||
|
}
|
||||||
|
const size_t current_index = SerializePageTable(current_page_table);
|
||||||
|
ar << current_index;
|
||||||
}
|
}
|
||||||
friend class boost::serialization::access;
|
|
||||||
};
|
|
||||||
|
|
||||||
MemorySystem::Impl::Impl()
|
template <class Archive>
|
||||||
: fcram_mem(std::make_shared<BackingMemImpl<Region::FCRAM>>(*this)),
|
void load(Archive& ar, const unsigned int version) {
|
||||||
vram_mem(std::make_shared<BackingMemImpl<Region::VRAM>>(*this)),
|
ar >> *backing_memory;
|
||||||
n3ds_extra_ram_mem(std::make_shared<BackingMemImpl<Region::N3DS>>(*this)),
|
ar >> cache_marker;
|
||||||
dsp_mem(std::make_shared<BackingMemImpl<Region::DSP>>(*this)) {}
|
size_t num_page_tables;
|
||||||
|
ar >> num_page_tables;
|
||||||
|
for (size_t i = 0; i < num_page_tables; ++i) {
|
||||||
|
page_table_list.emplace_back(std::make_shared<PageTable>(backing_memory));
|
||||||
|
page_table_list[i]->load(ar, version);
|
||||||
|
}
|
||||||
|
size_t current_index;
|
||||||
|
ar >> current_index;
|
||||||
|
current_page_table = UnserializePageTable(current_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
||||||
|
};
|
||||||
|
|
||||||
MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {}
|
MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {}
|
||||||
MemorySystem::~MemorySystem() = default;
|
MemorySystem::~MemorySystem() = default;
|
||||||
|
|
||||||
|
BackingMemoryManager& MemorySystem::GetBackingMemoryManager() {
|
||||||
|
return *impl->backing_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<PageTable> MemorySystem::NewPageTable() {
|
||||||
|
return std::make_shared<PageTable>(impl->backing_memory);
|
||||||
|
}
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void MemorySystem::serialize(Archive& ar, const unsigned int file_version) {
|
void MemorySystem::serialize(Archive& ar, const unsigned int file_version) {
|
||||||
ar&* impl.get();
|
ar&* impl.get();
|
||||||
@ -218,9 +187,8 @@ std::shared_ptr<PageTable> MemorySystem::GetCurrentPageTable() const {
|
|||||||
return impl->current_page_table;
|
return impl->current_page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory,
|
void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
|
||||||
PageType type) {
|
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", memory, base * PAGE_SIZE,
|
||||||
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), base * PAGE_SIZE,
|
|
||||||
(base + size) * PAGE_SIZE);
|
(base + size) * PAGE_SIZE);
|
||||||
|
|
||||||
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
|
RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
|
||||||
@ -230,25 +198,32 @@ void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef
|
|||||||
while (base != end) {
|
while (base != end) {
|
||||||
ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at {:08X}", base);
|
ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at {:08X}", base);
|
||||||
|
|
||||||
page_table.attributes[base] = type;
|
|
||||||
page_table.pointers[base] = memory;
|
|
||||||
|
|
||||||
// If the memory to map is already rasterizer-cached, mark the page
|
// If the memory to map is already rasterizer-cached, mark the page
|
||||||
if (type == PageType::Memory && impl->cache_marker.IsCached(base * PAGE_SIZE)) {
|
if (type == PageType::Memory) {
|
||||||
page_table.attributes[base] = PageType::RasterizerCachedMemory;
|
if (impl->cache_marker.IsCached(base * PAGE_SIZE)) {
|
||||||
page_table.pointers[base] = nullptr;
|
page_table.SetRasterizerCachedMemory(base * PAGE_SIZE);
|
||||||
|
impl->backing_memory->Unmap(page_table, base * PAGE_SIZE, PAGE_SIZE);
|
||||||
|
} else {
|
||||||
|
page_table.SetMemory(base * PAGE_SIZE, memory);
|
||||||
|
impl->backing_memory->Map(page_table, base * PAGE_SIZE, memory, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
page_table.Set(type, base * PAGE_SIZE, memory);
|
||||||
|
impl->backing_memory->Unmap(page_table, base * PAGE_SIZE, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
base += 1;
|
base += 1;
|
||||||
if (memory != nullptr && memory.GetSize() > PAGE_SIZE)
|
if (memory != nullptr)
|
||||||
memory += PAGE_SIZE;
|
memory += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target) {
|
void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size,
|
||||||
|
Memory::MemoryRef target) {
|
||||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
|
||||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
|
||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, GetPointerForRef(target),
|
||||||
|
PageType::Memory);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemorySystem::MapIoRegion(PageTable& page_table, VAddr base, u32 size,
|
void MemorySystem::MapIoRegion(PageTable& page_table, VAddr base, u32 size,
|
||||||
@ -266,15 +241,15 @@ void MemorySystem::UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
|
|||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryRef MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
||||||
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||||
return {impl->fcram_mem, addr - LINEAR_HEAP_VADDR};
|
return impl->fcram.Get() + (addr - LINEAR_HEAP_VADDR);
|
||||||
}
|
}
|
||||||
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
||||||
return {impl->fcram_mem, addr - NEW_LINEAR_HEAP_VADDR};
|
return impl->fcram.Get() + (addr - NEW_LINEAR_HEAP_VADDR);
|
||||||
}
|
}
|
||||||
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
||||||
return {impl->vram_mem, addr - VRAM_VADDR};
|
return impl->vram.Get() + (addr - VRAM_VADDR);
|
||||||
}
|
}
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -290,17 +265,12 @@ void MemorySystem::UnregisterPageTable(std::shared_ptr<PageTable> page_table) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
size_t MemorySystem::SerializePageTable(std::shared_ptr<PageTable> page_table) {
|
||||||
* This function should only be called for virtual addreses with attribute `PageType::Special`.
|
return impl->SerializePageTable(page_table);
|
||||||
*/
|
}
|
||||||
static MMIORegionPointer GetMMIOHandler(const PageTable& page_table, VAddr vaddr) {
|
|
||||||
for (const auto& region : page_table.special_regions) {
|
std::shared_ptr<PageTable> MemorySystem::UnserializePageTable(size_t page_table_index) {
|
||||||
if (vaddr >= region.base && vaddr < (region.base + region.size)) {
|
return impl->UnserializePageTable(page_table_index);
|
||||||
return region.handler;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ASSERT_MSG(false, "Mapped IO page without a handler @ {:08X}", vaddr);
|
|
||||||
return nullptr; // Should never happen
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -308,16 +278,14 @@ T ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr);
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T MemorySystem::Read(const VAddr vaddr) {
|
T MemorySystem::Read(const VAddr vaddr) {
|
||||||
const u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
if (const u8* pointer = impl->current_page_table->Get(vaddr)) {
|
||||||
if (page_pointer) {
|
|
||||||
// NOTE: Avoid adding any extra logic to this fast-path block
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||||
T value;
|
T value;
|
||||||
std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
|
std::memcpy(&value, pointer, sizeof(T));
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
switch (impl->current_page_table->GetAttribute(vaddr)) {
|
||||||
switch (type) {
|
|
||||||
case PageType::Unmapped:
|
case PageType::Unmapped:
|
||||||
LOG_ERROR(HW_Memory, "unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
LOG_ERROR(HW_Memory, "unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
|
||||||
return 0;
|
return 0;
|
||||||
@ -332,7 +300,7 @@ T MemorySystem::Read(const VAddr vaddr) {
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
case PageType::Special:
|
case PageType::Special:
|
||||||
return ReadMMIO<T>(GetMMIOHandler(*impl->current_page_table, vaddr), vaddr);
|
return ReadMMIO<T>(impl->current_page_table->GetMMIOHandler(vaddr), vaddr);
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
@ -343,15 +311,13 @@ void WriteMMIO(MMIORegionPointer mmio_handler, VAddr addr, const T data);
|
|||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void MemorySystem::Write(const VAddr vaddr, const T data) {
|
void MemorySystem::Write(const VAddr vaddr, const T data) {
|
||||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
if (u8* pointer = impl->current_page_table->Get(vaddr)) {
|
||||||
if (page_pointer) {
|
|
||||||
// NOTE: Avoid adding any extra logic to this fast-path block
|
// NOTE: Avoid adding any extra logic to this fast-path block
|
||||||
std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
|
std::memcpy(pointer, &data, sizeof(T));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
PageType type = impl->current_page_table->attributes[vaddr >> PAGE_BITS];
|
switch (impl->current_page_table->GetAttribute(vaddr)) {
|
||||||
switch (type) {
|
|
||||||
case PageType::Unmapped:
|
case PageType::Unmapped:
|
||||||
LOG_ERROR(HW_Memory, "unmapped Write{} 0x{:08X} @ 0x{:08X}", sizeof(data) * 8, (u32)data,
|
LOG_ERROR(HW_Memory, "unmapped Write{} 0x{:08X} @ 0x{:08X}", sizeof(data) * 8, (u32)data,
|
||||||
vaddr);
|
vaddr);
|
||||||
@ -365,30 +331,27 @@ void MemorySystem::Write(const VAddr vaddr, const T data) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Special:
|
case PageType::Special:
|
||||||
WriteMMIO<T>(GetMMIOHandler(*impl->current_page_table, vaddr), vaddr, data);
|
WriteMMIO<T>(impl->current_page_table->GetMMIOHandler(vaddr), vaddr, data);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
|
bool MemorySystem::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
|
||||||
auto& page_table = *process.vm_manager.page_table;
|
auto& page_table = *process.vm_manager.page_table;
|
||||||
|
|
||||||
auto page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
|
if (auto page_pointer = page_table.Get(vaddr))
|
||||||
if (page_pointer)
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory)
|
if (page_table.GetAttribute(vaddr) == PageType::RasterizerCachedMemory)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special)
|
if (page_table.GetAttribute(vaddr) != PageType::Special)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
MMIORegionPointer mmio_region = GetMMIOHandler(page_table, vaddr);
|
if (MMIORegionPointer mmio_region = page_table.GetMMIOHandler(vaddr))
|
||||||
if (mmio_region) {
|
|
||||||
return mmio_region->IsValidAddress(vaddr);
|
return mmio_region->IsValidAddress(vaddr);
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -398,13 +361,11 @@ bool MemorySystem::IsValidPhysicalAddress(const PAddr paddr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
u8* MemorySystem::GetPointer(const VAddr vaddr) {
|
||||||
u8* page_pointer = impl->current_page_table->pointers[vaddr >> PAGE_BITS];
|
if (u8* pointer = impl->current_page_table->Get(vaddr)) {
|
||||||
if (page_pointer) {
|
return pointer;
|
||||||
return page_pointer + (vaddr & PAGE_MASK);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (impl->current_page_table->attributes[vaddr >> PAGE_BITS] ==
|
if (impl->current_page_table->GetAttribute(vaddr) == PageType::RasterizerCachedMemory) {
|
||||||
PageType::RasterizerCachedMemory) {
|
|
||||||
return GetPointerForRasterizerCache(vaddr);
|
return GetPointerForRasterizerCache(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -426,11 +387,11 @@ std::string MemorySystem::ReadCString(VAddr vaddr, std::size_t max_length) {
|
|||||||
return string;
|
return string;
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
MemoryRef MemorySystem::GetPhysicalRef(PAddr address) {
|
||||||
return GetPhysicalRef(address);
|
return GetRefForPointer(GetPhysicalPointer(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryRef MemorySystem::GetPhysicalRef(PAddr address) {
|
u8* MemorySystem::GetPhysicalPointer(PAddr address) {
|
||||||
struct MemoryArea {
|
struct MemoryArea {
|
||||||
PAddr paddr_base;
|
PAddr paddr_base;
|
||||||
u32 size;
|
u32 size;
|
||||||
@ -455,30 +416,20 @@ MemoryRef MemorySystem::GetPhysicalRef(PAddr address) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 offset_into_region = address - area->paddr_base;
|
const u32 offset_into_region = address - area->paddr_base;
|
||||||
|
|
||||||
std::shared_ptr<BackingMem> target_mem = nullptr;
|
|
||||||
switch (area->paddr_base) {
|
switch (area->paddr_base) {
|
||||||
case VRAM_PADDR:
|
case VRAM_PADDR:
|
||||||
target_mem = impl->vram_mem;
|
return impl->vram.Get() + offset_into_region;
|
||||||
break;
|
|
||||||
case DSP_RAM_PADDR:
|
case DSP_RAM_PADDR:
|
||||||
target_mem = impl->dsp_mem;
|
return impl->dspram.Get() + offset_into_region;
|
||||||
break;
|
|
||||||
case FCRAM_PADDR:
|
case FCRAM_PADDR:
|
||||||
target_mem = impl->fcram_mem;
|
return impl->fcram.Get() + offset_into_region;
|
||||||
break;
|
|
||||||
case N3DS_EXTRA_RAM_PADDR:
|
case N3DS_EXTRA_RAM_PADDR:
|
||||||
target_mem = impl->n3ds_extra_ram_mem;
|
return impl->n3ds_extra_ram.Get() + offset_into_region;
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
if (offset_into_region >= target_mem->GetSize()) {
|
|
||||||
return {nullptr};
|
|
||||||
}
|
|
||||||
|
|
||||||
return {target_mem, offset_into_region};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For a rasterizer-accessible PAddr, gets a list of all possible VAddr
|
/// For a rasterizer-accessible PAddr, gets a list of all possible VAddr
|
||||||
@ -512,7 +463,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
|||||||
for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) {
|
for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) {
|
||||||
impl->cache_marker.Mark(vaddr, cached);
|
impl->cache_marker.Mark(vaddr, cached);
|
||||||
for (auto page_table : impl->page_table_list) {
|
for (auto page_table : impl->page_table_list) {
|
||||||
PageType& page_type = page_table->attributes[vaddr >> PAGE_BITS];
|
const PageType page_type = page_table->GetAttribute(vaddr);
|
||||||
|
|
||||||
if (cached) {
|
if (cached) {
|
||||||
// Switch page type to cached if now cached
|
// Switch page type to cached if now cached
|
||||||
@ -522,8 +473,8 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
|||||||
// address space, for example, a system module need not have a VRAM mapping.
|
// address space, for example, a system module need not have a VRAM mapping.
|
||||||
break;
|
break;
|
||||||
case PageType::Memory:
|
case PageType::Memory:
|
||||||
page_type = PageType::RasterizerCachedMemory;
|
page_table->SetRasterizerCachedMemory(vaddr);
|
||||||
page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
|
impl->backing_memory->Unmap(*page_table, vaddr, PAGE_SIZE);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -536,9 +487,9 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
|
|||||||
// address space, for example, a system module need not have a VRAM mapping.
|
// address space, for example, a system module need not have a VRAM mapping.
|
||||||
break;
|
break;
|
||||||
case PageType::RasterizerCachedMemory: {
|
case PageType::RasterizerCachedMemory: {
|
||||||
page_type = PageType::Memory;
|
u8* memory = GetPointerForRasterizerCache(vaddr & ~PAGE_MASK);
|
||||||
page_table->pointers[vaddr >> PAGE_BITS] =
|
page_table->SetMemory(vaddr, memory);
|
||||||
GetPointerForRasterizerCache(vaddr & ~PAGE_MASK);
|
impl->backing_memory->Map(*page_table, vaddr, memory, PAGE_SIZE);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -653,7 +604,7 @@ void MemorySystem::ReadBlock(const Kernel::Process& process, const VAddr src_add
|
|||||||
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
||||||
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
||||||
|
|
||||||
switch (page_table.attributes[page_index]) {
|
switch (page_table.GetAttribute(current_vaddr)) {
|
||||||
case PageType::Unmapped: {
|
case PageType::Unmapped: {
|
||||||
LOG_ERROR(HW_Memory,
|
LOG_ERROR(HW_Memory,
|
||||||
"unmapped ReadBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
"unmapped ReadBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
||||||
@ -662,14 +613,14 @@ void MemorySystem::ReadBlock(const Kernel::Process& process, const VAddr src_add
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Memory: {
|
case PageType::Memory: {
|
||||||
DEBUG_ASSERT(page_table.pointers[page_index]);
|
|
||||||
|
|
||||||
const u8* src_ptr = page_table.pointers[page_index] + page_offset;
|
const u8* src_ptr = page_table.Get(current_vaddr);
|
||||||
|
DEBUG_ASSERT(src_ptr);
|
||||||
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Special: {
|
case PageType::Special: {
|
||||||
MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
|
MMIORegionPointer handler = page_table.GetMMIOHandler(current_vaddr);
|
||||||
DEBUG_ASSERT(handler);
|
DEBUG_ASSERT(handler);
|
||||||
handler->ReadBlock(current_vaddr, dest_buffer, copy_amount);
|
handler->ReadBlock(current_vaddr, dest_buffer, copy_amount);
|
||||||
break;
|
break;
|
||||||
@ -718,7 +669,7 @@ void MemorySystem::WriteBlock(const Kernel::Process& process, const VAddr dest_a
|
|||||||
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
||||||
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
||||||
|
|
||||||
switch (page_table.attributes[page_index]) {
|
switch (page_table.GetAttribute(current_vaddr)) {
|
||||||
case PageType::Unmapped: {
|
case PageType::Unmapped: {
|
||||||
LOG_ERROR(HW_Memory,
|
LOG_ERROR(HW_Memory,
|
||||||
"unmapped WriteBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
"unmapped WriteBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
||||||
@ -726,14 +677,13 @@ void MemorySystem::WriteBlock(const Kernel::Process& process, const VAddr dest_a
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Memory: {
|
case PageType::Memory: {
|
||||||
DEBUG_ASSERT(page_table.pointers[page_index]);
|
u8* dest_ptr = page_table.Get(current_vaddr);
|
||||||
|
DEBUG_ASSERT(dest_ptr);
|
||||||
u8* dest_ptr = page_table.pointers[page_index] + page_offset;
|
|
||||||
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Special: {
|
case PageType::Special: {
|
||||||
MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
|
MMIORegionPointer handler = page_table.GetMMIOHandler(current_vaddr);
|
||||||
DEBUG_ASSERT(handler);
|
DEBUG_ASSERT(handler);
|
||||||
handler->WriteBlock(current_vaddr, src_buffer, copy_amount);
|
handler->WriteBlock(current_vaddr, src_buffer, copy_amount);
|
||||||
break;
|
break;
|
||||||
@ -768,7 +718,7 @@ void MemorySystem::ZeroBlock(const Kernel::Process& process, const VAddr dest_ad
|
|||||||
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
||||||
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
||||||
|
|
||||||
switch (page_table.attributes[page_index]) {
|
switch (page_table.GetAttribute(current_vaddr)) {
|
||||||
case PageType::Unmapped: {
|
case PageType::Unmapped: {
|
||||||
LOG_ERROR(HW_Memory,
|
LOG_ERROR(HW_Memory,
|
||||||
"unmapped ZeroBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
"unmapped ZeroBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
||||||
@ -776,14 +726,13 @@ void MemorySystem::ZeroBlock(const Kernel::Process& process, const VAddr dest_ad
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Memory: {
|
case PageType::Memory: {
|
||||||
DEBUG_ASSERT(page_table.pointers[page_index]);
|
u8* dest_ptr = page_table.Get(current_vaddr);
|
||||||
|
DEBUG_ASSERT(dest_ptr);
|
||||||
u8* dest_ptr = page_table.pointers[page_index] + page_offset;
|
|
||||||
std::memset(dest_ptr, 0, copy_amount);
|
std::memset(dest_ptr, 0, copy_amount);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Special: {
|
case PageType::Special: {
|
||||||
MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
|
MMIORegionPointer handler = page_table.GetMMIOHandler(current_vaddr);
|
||||||
DEBUG_ASSERT(handler);
|
DEBUG_ASSERT(handler);
|
||||||
handler->WriteBlock(current_vaddr, zeros.data(), copy_amount);
|
handler->WriteBlock(current_vaddr, zeros.data(), copy_amount);
|
||||||
break;
|
break;
|
||||||
@ -821,7 +770,7 @@ void MemorySystem::CopyBlock(const Kernel::Process& dest_process,
|
|||||||
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
||||||
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
||||||
|
|
||||||
switch (page_table.attributes[page_index]) {
|
switch (page_table.GetAttribute(current_vaddr)) {
|
||||||
case PageType::Unmapped: {
|
case PageType::Unmapped: {
|
||||||
LOG_ERROR(HW_Memory,
|
LOG_ERROR(HW_Memory,
|
||||||
"unmapped CopyBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
"unmapped CopyBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
||||||
@ -830,13 +779,13 @@ void MemorySystem::CopyBlock(const Kernel::Process& dest_process,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Memory: {
|
case PageType::Memory: {
|
||||||
DEBUG_ASSERT(page_table.pointers[page_index]);
|
const u8* src_ptr = page_table.Get(current_vaddr);
|
||||||
const u8* src_ptr = page_table.pointers[page_index] + page_offset;
|
DEBUG_ASSERT(src_ptr);
|
||||||
WriteBlock(dest_process, dest_addr, src_ptr, copy_amount);
|
WriteBlock(dest_process, dest_addr, src_ptr, copy_amount);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case PageType::Special: {
|
case PageType::Special: {
|
||||||
MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
|
MMIORegionPointer handler = page_table.GetMMIOHandler(current_vaddr);
|
||||||
DEBUG_ASSERT(handler);
|
DEBUG_ASSERT(handler);
|
||||||
std::vector<u8> buffer(copy_amount);
|
std::vector<u8> buffer(copy_amount);
|
||||||
handler->ReadBlock(current_vaddr, buffer.data(), buffer.size());
|
handler->ReadBlock(current_vaddr, buffer.data(), buffer.size());
|
||||||
@ -903,22 +852,27 @@ void WriteMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr, const u64 data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
u32 MemorySystem::GetFCRAMOffset(const u8* pointer) {
|
u32 MemorySystem::GetFCRAMOffset(const u8* pointer) {
|
||||||
ASSERT(pointer >= impl->fcram.get() && pointer <= impl->fcram.get() + Memory::FCRAM_N3DS_SIZE);
|
ASSERT(pointer >= impl->fcram.Get() && pointer <= impl->fcram.Get() + Memory::FCRAM_N3DS_SIZE);
|
||||||
return static_cast<u32>(pointer - impl->fcram.get());
|
return static_cast<u32>(pointer - impl->fcram.Get());
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 MemorySystem::GetFCRAMOffset(MemoryRef ref) {
|
||||||
|
u64 offset = static_cast<u64>(ref - impl->fcram.GetRef());
|
||||||
|
ASSERT(offset < Memory::FCRAM_N3DS_SIZE);
|
||||||
|
return static_cast<u32>(offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* MemorySystem::GetFCRAMPointer(u32 offset) {
|
u8* MemorySystem::GetFCRAMPointer(u32 offset) {
|
||||||
ASSERT(offset <= Memory::FCRAM_N3DS_SIZE);
|
ASSERT(offset <= Memory::FCRAM_N3DS_SIZE);
|
||||||
return impl->fcram.get() + offset;
|
return impl->fcram.Get() + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryRef MemorySystem::GetFCRAMRef(u32 offset) {
|
MemoryRef MemorySystem::GetFCRAMRef(u32 offset) {
|
||||||
ASSERT(offset <= Memory::FCRAM_N3DS_SIZE);
|
return GetRefForPointer(GetFCRAMPointer(offset));
|
||||||
return MemoryRef(impl->fcram_mem, offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemorySystem::SetDSP(AudioCore::DspInterface& dsp) {
|
u8* MemorySystem::GetDspMemory() const {
|
||||||
impl->dsp = &dsp;
|
return impl->dspram.Get();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Memory
|
} // namespace Memory
|
||||||
|
@ -12,7 +12,9 @@
|
|||||||
#include <boost/serialization/array.hpp>
|
#include <boost/serialization/array.hpp>
|
||||||
#include <boost/serialization/vector.hpp>
|
#include <boost/serialization/vector.hpp>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/memory_ref.h"
|
#include "core/backing_memory_manager.h"
|
||||||
|
#include "core/memory_constants.h"
|
||||||
|
#include "core/memory_ref.h"
|
||||||
#include "core/mmio.h"
|
#include "core/mmio.h"
|
||||||
|
|
||||||
class ARM_Interface;
|
class ARM_Interface;
|
||||||
@ -27,19 +29,10 @@ class DspInterface;
|
|||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
|
|
||||||
// Are defined in a system header
|
class BackingMemoryManager;
|
||||||
#undef PAGE_SIZE
|
class MemorySystem;
|
||||||
#undef PAGE_MASK
|
|
||||||
/**
|
|
||||||
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
|
|
||||||
* be mapped.
|
|
||||||
*/
|
|
||||||
const u32 PAGE_SIZE = 0x1000;
|
|
||||||
const u32 PAGE_MASK = PAGE_SIZE - 1;
|
|
||||||
const int PAGE_BITS = 12;
|
|
||||||
const std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
|
|
||||||
|
|
||||||
enum class PageType {
|
enum class PageType : u8 {
|
||||||
/// Page is unmapped and should cause an access error.
|
/// Page is unmapped and should cause an access error.
|
||||||
Unmapped,
|
Unmapped,
|
||||||
/// Page is mapped to regular memory. This is the only type you can get pointers to.
|
/// Page is mapped to regular memory. This is the only type you can get pointers to.
|
||||||
@ -72,45 +65,63 @@ private:
|
|||||||
* fetching requirements when accessing. In the usual case of an access to regular memory, it only
|
* fetching requirements when accessing. In the usual case of an access to regular memory, it only
|
||||||
* requires an indexed fetch and a check for NULL.
|
* requires an indexed fetch and a check for NULL.
|
||||||
*/
|
*/
|
||||||
struct PageTable {
|
class PageTable final {
|
||||||
|
public:
|
||||||
|
explicit PageTable(std::shared_ptr<BackingMemoryManager> backing_memory_manager);
|
||||||
|
~PageTable();
|
||||||
|
|
||||||
|
void Reset();
|
||||||
|
|
||||||
|
std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetRawPageTables() {
|
||||||
|
return &pointers;
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* GetFastmemBase() {
|
||||||
|
return fastmem_base.Get();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
friend class BackingMemoryManager;
|
||||||
|
friend class MemorySystem;
|
||||||
|
|
||||||
|
u8* Get(VAddr vaddr) const {
|
||||||
|
if (u8* page_ptr = pointers[vaddr >> PAGE_BITS])
|
||||||
|
return page_ptr + (vaddr & PAGE_MASK);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
PageType GetAttribute(VAddr vaddr) const {
|
||||||
|
return attributes[vaddr >> PAGE_BITS];
|
||||||
|
}
|
||||||
|
|
||||||
|
void Set(PageType page_type, VAddr vaddr, u8* backing_memory) {
|
||||||
|
attributes[vaddr >> PAGE_BITS] = page_type;
|
||||||
|
pointers[vaddr >> PAGE_BITS] = backing_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetMemory(VAddr vaddr, u8* backing_memory) {
|
||||||
|
Set(PageType::Memory, vaddr, backing_memory);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetRasterizerCachedMemory(VAddr vaddr) {
|
||||||
|
Set(PageType::RasterizerCachedMemory, vaddr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
MMIORegionPointer GetMMIOHandler(VAddr vaddr);
|
||||||
|
|
||||||
|
std::shared_ptr<BackingMemoryManager> backing_memory_manager;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Array of memory pointers backing each page. An entry can only be non-null if the
|
* Array of memory pointers backing each page. An entry can only be non-null if the
|
||||||
* corresponding entry in the `attributes` array is of type `Memory`.
|
* corresponding entry in the `attributes` array is of type `Memory`.
|
||||||
*/
|
*/
|
||||||
|
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
|
||||||
|
|
||||||
// The reason for this rigmarole is to keep the 'raw' and 'refs' arrays in sync.
|
/**
|
||||||
// We need 'raw' for dynarmic and 'refs' for serialization
|
* Array of fine grained page attributes. If it is set to any value other than `Memory`, then
|
||||||
struct Pointers {
|
* the corresponding entry in `pointers` MUST be set to null.
|
||||||
|
*/
|
||||||
struct Entry {
|
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
|
||||||
Entry(Pointers& pointers_, VAddr idx_) : pointers(pointers_), idx(idx_) {}
|
|
||||||
|
|
||||||
void operator=(MemoryRef value) {
|
|
||||||
pointers.refs[idx] = value;
|
|
||||||
pointers.raw[idx] = value.GetPtr();
|
|
||||||
}
|
|
||||||
|
|
||||||
operator u8*() {
|
|
||||||
return pointers.raw[idx];
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
Pointers& pointers;
|
|
||||||
VAddr idx;
|
|
||||||
};
|
|
||||||
|
|
||||||
Entry operator[](std::size_t idx) {
|
|
||||||
return Entry(*this, static_cast<VAddr>(idx));
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> raw;
|
|
||||||
|
|
||||||
std::array<MemoryRef, PAGE_TABLE_NUM_ENTRIES> refs;
|
|
||||||
|
|
||||||
friend struct PageTable;
|
|
||||||
};
|
|
||||||
Pointers pointers;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
|
* Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
|
||||||
@ -119,27 +130,31 @@ struct PageTable {
|
|||||||
std::vector<SpecialRegion> special_regions;
|
std::vector<SpecialRegion> special_regions;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Array of fine grained page attributes. If it is set to any value other than `Memory`, then
|
* Base address of a 4GiB region in the host address space that corresponds 1:1 to the
|
||||||
* the corresponding entry in `pointers` MUST be set to null.
|
* entire guest address space. There may be holes in this address space in order to
|
||||||
|
* intentionally trigger segfaults for memory managed by the rasterizer cache.
|
||||||
*/
|
*/
|
||||||
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
|
FastmemRegion fastmem_base;
|
||||||
|
|
||||||
std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& GetPointerArray() {
|
|
||||||
return pointers.raw;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Clear();
|
|
||||||
|
|
||||||
private:
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
void save(Archive& ar, const unsigned int version) const {
|
||||||
ar& pointers.refs;
|
auto offsets = std::make_unique<std::array<std::ptrdiff_t, PAGE_TABLE_NUM_ENTRIES>>();
|
||||||
ar& special_regions;
|
backing_memory_manager->Serialize(*offsets, pointers);
|
||||||
ar& attributes;
|
ar << *offsets;
|
||||||
for (std::size_t i = 0; i < PAGE_TABLE_NUM_ENTRIES; i++) {
|
ar << special_regions;
|
||||||
pointers.raw[i] = pointers.refs[i].GetPtr();
|
ar << attributes;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
void load(Archive& ar, const unsigned int version) {
|
||||||
|
auto offsets = std::make_unique<std::array<std::ptrdiff_t, PAGE_TABLE_NUM_ENTRIES>>();
|
||||||
|
ar >> *offsets;
|
||||||
|
ar >> special_regions;
|
||||||
|
ar >> attributes;
|
||||||
|
backing_memory_manager->Unserialize(pointers, *offsets);
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOST_SERIALIZATION_SPLIT_MEMBER()
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -291,6 +306,18 @@ public:
|
|||||||
MemorySystem();
|
MemorySystem();
|
||||||
~MemorySystem();
|
~MemorySystem();
|
||||||
|
|
||||||
|
BackingMemoryManager& GetBackingMemoryManager();
|
||||||
|
|
||||||
|
u8* GetPointerForRef(MemoryRef ref) {
|
||||||
|
return GetBackingMemoryManager().GetPointerForRef(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRef GetRefForPointer(u8* pointer) {
|
||||||
|
return GetBackingMemoryManager().GetRefForPointer(pointer);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<PageTable> NewPageTable();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maps an allocated buffer onto a region of the emulated process address space.
|
* Maps an allocated buffer onto a region of the emulated process address space.
|
||||||
*
|
*
|
||||||
@ -347,17 +374,24 @@ public:
|
|||||||
|
|
||||||
u8* GetPointer(VAddr vaddr);
|
u8* GetPointer(VAddr vaddr);
|
||||||
|
|
||||||
|
/// Determines if the given VAddr is valid for the specified process.
|
||||||
|
static bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr);
|
||||||
|
|
||||||
bool IsValidPhysicalAddress(PAddr paddr);
|
bool IsValidPhysicalAddress(PAddr paddr);
|
||||||
|
|
||||||
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
/// Gets offset in FCRAM from a pointer inside FCRAM range
|
||||||
u32 GetFCRAMOffset(const u8* pointer);
|
u32 GetFCRAMOffset(const u8* pointer);
|
||||||
|
|
||||||
|
u32 GetFCRAMOffset(MemoryRef ref);
|
||||||
|
|
||||||
/// Gets pointer in FCRAM with given offset
|
/// Gets pointer in FCRAM with given offset
|
||||||
u8* GetFCRAMPointer(u32 offset);
|
u8* GetFCRAMPointer(u32 offset);
|
||||||
|
|
||||||
/// Gets a serializable ref to FCRAM with the given offset
|
/// Gets a serializable ref to FCRAM with the given offset
|
||||||
MemoryRef GetFCRAMRef(u32 offset);
|
MemoryRef GetFCRAMRef(u32 offset);
|
||||||
|
|
||||||
|
u8* GetDspMemory() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Mark each page touching the region as cached.
|
* Mark each page touching the region as cached.
|
||||||
*/
|
*/
|
||||||
@ -369,7 +403,9 @@ public:
|
|||||||
/// Unregisters page table for rasterizer cache marking
|
/// Unregisters page table for rasterizer cache marking
|
||||||
void UnregisterPageTable(std::shared_ptr<PageTable> page_table);
|
void UnregisterPageTable(std::shared_ptr<PageTable> page_table);
|
||||||
|
|
||||||
void SetDSP(AudioCore::DspInterface& dsp);
|
size_t SerializePageTable(std::shared_ptr<PageTable> page_table);
|
||||||
|
|
||||||
|
std::shared_ptr<PageTable> UnserializePageTable(size_t page_table_index);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -384,9 +420,9 @@ private:
|
|||||||
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
|
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
|
||||||
* pointer of such virtual address
|
* pointer of such virtual address
|
||||||
*/
|
*/
|
||||||
MemoryRef GetPointerForRasterizerCache(VAddr addr);
|
u8* GetPointerForRasterizerCache(VAddr addr);
|
||||||
|
|
||||||
void MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory, PageType type);
|
void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type);
|
||||||
|
|
||||||
class Impl;
|
class Impl;
|
||||||
|
|
||||||
@ -395,18 +431,10 @@ private:
|
|||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int file_version);
|
void serialize(Archive& ar, const unsigned int file_version);
|
||||||
|
|
||||||
public:
|
|
||||||
template <Region R>
|
|
||||||
class BackingMemImpl;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Determines if the given VAddr is valid for the specified process.
|
inline bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
|
||||||
bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);
|
return MemorySystem::IsValidVirtualAddress(process, vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Memory
|
} // namespace Memory
|
||||||
|
|
||||||
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::FCRAM>)
|
|
||||||
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::VRAM>)
|
|
||||||
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::DSP>)
|
|
||||||
BOOST_CLASS_EXPORT_KEY(Memory::MemorySystem::BackingMemImpl<Memory::Region::N3DS>)
|
|
||||||
|
21
src/core/memory_constants.h
Normal file
21
src/core/memory_constants.h
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2014 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace Memory {
|
||||||
|
|
||||||
|
// Are defined in a system header
|
||||||
|
#undef PAGE_SIZE
|
||||||
|
#undef PAGE_MASK
|
||||||
|
/**
|
||||||
|
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
|
||||||
|
* be mapped.
|
||||||
|
*/
|
||||||
|
const u32 PAGE_SIZE = 0x1000;
|
||||||
|
const u32 PAGE_MASK = PAGE_SIZE - 1;
|
||||||
|
const int PAGE_BITS = 12;
|
||||||
|
const std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
|
||||||
|
|
||||||
|
} // namespace Memory
|
25
src/core/memory_ref.h
Normal file
25
src/core/memory_ref.h
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2020 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <boost/serialization/strong_typedef.hpp>
|
||||||
|
|
||||||
|
namespace Memory {
|
||||||
|
|
||||||
|
BOOST_STRONG_TYPEDEF(std::ptrdiff_t, MemoryRef);
|
||||||
|
|
||||||
|
inline const MemoryRef INVALID_MEMORY_REF{-1};
|
||||||
|
|
||||||
|
} // namespace Memory
|
||||||
|
|
||||||
|
namespace boost::serialization {
|
||||||
|
|
||||||
|
template<class Archive>
|
||||||
|
inline void serialize(Archive& ar, Memory::MemoryRef& ref, const unsigned) {
|
||||||
|
ar& ref.t;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace boost::serialization
|
@ -17,12 +17,13 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
|
|||||||
|
|
||||||
timing = std::make_unique<Core::Timing>(1, 100);
|
timing = std::make_unique<Core::Timing>(1, 100);
|
||||||
memory = std::make_unique<Memory::MemorySystem>();
|
memory = std::make_unique<Memory::MemorySystem>();
|
||||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing, [] {}, 0, 1, 0);
|
kernel = std::make_unique<Kernel::KernelSystem>(
|
||||||
|
*memory, *timing, [] {}, 0, 1, 0);
|
||||||
|
|
||||||
kernel->SetCurrentProcess(kernel->CreateProcess(kernel->CreateCodeSet("", 0)));
|
kernel->SetCurrentProcess(kernel->CreateProcess(kernel->CreateCodeSet("", 0)));
|
||||||
page_table = kernel->GetCurrentProcess()->vm_manager.page_table;
|
page_table = kernel->GetCurrentProcess()->vm_manager.page_table;
|
||||||
|
|
||||||
page_table->Clear();
|
page_table->Reset();
|
||||||
|
|
||||||
memory->MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
|
memory->MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
|
||||||
memory->MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
|
memory->MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
|
||||||
|
@ -21,10 +21,16 @@ static std::shared_ptr<Object> MakeObject(Kernel::KernelSystem& kernel) {
|
|||||||
return kernel.CreateEvent(ResetType::OneShot);
|
return kernel.CreateEvent(ResetType::OneShot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::vector<u8> ToVector(Memory::BackingMemory& bm) {
|
||||||
|
return {bm.Get(), bm.Get() + bm.GetSize()};
|
||||||
|
}
|
||||||
|
|
||||||
TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel]") {
|
TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel]") {
|
||||||
Core::Timing timing(1, 100);
|
Core::Timing timing(1, 100);
|
||||||
Memory::MemorySystem memory;
|
Memory::MemorySystem memory;
|
||||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1, 0);
|
Memory::BackingMemoryManager& bmm = memory.GetBackingMemoryManager();
|
||||||
|
Kernel::KernelSystem kernel(
|
||||||
|
memory, timing, [] {}, 0, 1, 0);
|
||||||
auto [server, client] = kernel.CreateSessionPair();
|
auto [server, client] = kernel.CreateSessionPair();
|
||||||
HLERequestContext context(kernel, std::move(server), nullptr);
|
HLERequestContext context(kernel, std::move(server), nullptr);
|
||||||
|
|
||||||
@ -136,13 +142,12 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
}
|
}
|
||||||
|
|
||||||
SECTION("translates StaticBuffer descriptors") {
|
SECTION("translates StaticBuffer descriptors") {
|
||||||
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
auto buffer = bmm.AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
MemoryRef buffer{mem};
|
std::fill(buffer.Get(), buffer.Get() + buffer.GetSize(), 0xAB);
|
||||||
std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xAB);
|
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(target_address, buffer, buffer.GetSize(),
|
auto result = process->vm_manager.MapBackingMemory(target_address, buffer.GetRef(),
|
||||||
MemoryState::Private);
|
buffer.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
const u32_le input[]{
|
const u32_le input[]{
|
||||||
@ -153,19 +158,18 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
|
|
||||||
context.PopulateFromIncomingCommandBuffer(input, process);
|
context.PopulateFromIncomingCommandBuffer(input, process);
|
||||||
|
|
||||||
CHECK(context.GetStaticBuffer(0) == mem->Vector());
|
CHECK(context.GetStaticBuffer(0) == ToVector(buffer));
|
||||||
|
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer.GetSize()) == RESULT_SUCCESS);
|
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer.GetSize()) == RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("translates MappedBuffer descriptors") {
|
SECTION("translates MappedBuffer descriptors") {
|
||||||
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
auto buffer = bmm.AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
MemoryRef buffer{mem};
|
std::fill(buffer.Get(), buffer.Get() + buffer.GetSize(), 0xCD);
|
||||||
std::fill(buffer.GetPtr(), buffer.GetPtr() + buffer.GetSize(), 0xCD);
|
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(target_address, buffer, buffer.GetSize(),
|
auto result = process->vm_manager.MapBackingMemory(target_address, buffer.GetRef(),
|
||||||
MemoryState::Private);
|
buffer.GetSize(), MemoryState::Private);
|
||||||
|
|
||||||
const u32_le input[]{
|
const u32_le input[]{
|
||||||
IPC::MakeHeader(0, 0, 2),
|
IPC::MakeHeader(0, 0, 2),
|
||||||
@ -178,28 +182,28 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
std::vector<u8> other_buffer(buffer.GetSize());
|
std::vector<u8> other_buffer(buffer.GetSize());
|
||||||
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer.GetSize());
|
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer.GetSize());
|
||||||
|
|
||||||
CHECK(other_buffer == mem->Vector());
|
CHECK(other_buffer == ToVector(buffer));
|
||||||
|
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer.GetSize()) == RESULT_SUCCESS);
|
REQUIRE(process->vm_manager.UnmapRange(target_address, buffer.GetSize()) == RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("translates mixed params") {
|
SECTION("translates mixed params") {
|
||||||
auto mem_static = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
auto buffer_static = bmm.AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
MemoryRef buffer_static{mem_static};
|
std::fill(buffer_static.Get(), buffer_static.Get() + buffer_static.GetSize(), 0xCE);
|
||||||
std::fill(buffer_static.GetPtr(), buffer_static.GetPtr() + buffer_static.GetSize(), 0xCE);
|
|
||||||
|
|
||||||
auto mem_mapped = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
auto buffer_mapped = bmm.AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
MemoryRef buffer_mapped{mem_mapped};
|
std::fill(buffer_mapped.Get(), buffer_mapped.Get() + buffer_mapped.GetSize(), 0xDF);
|
||||||
std::fill(buffer_mapped.GetPtr(), buffer_mapped.GetPtr() + buffer_mapped.GetSize(), 0xDF);
|
|
||||||
|
|
||||||
VAddr target_address_static = 0x10000000;
|
VAddr target_address_static = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(
|
auto result =
|
||||||
target_address_static, buffer_static, buffer_static.GetSize(), MemoryState::Private);
|
process->vm_manager.MapBackingMemory(target_address_static, buffer_static.GetRef(),
|
||||||
|
buffer_static.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
VAddr target_address_mapped = 0x20000000;
|
VAddr target_address_mapped = 0x20000000;
|
||||||
result = process->vm_manager.MapBackingMemory(
|
result =
|
||||||
target_address_mapped, buffer_mapped, buffer_mapped.GetSize(), MemoryState::Private);
|
process->vm_manager.MapBackingMemory(target_address_mapped, buffer_mapped.GetRef(),
|
||||||
|
buffer_mapped.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
auto a = MakeObject(kernel);
|
auto a = MakeObject(kernel);
|
||||||
@ -224,10 +228,10 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
CHECK(output[2] == 0xABCDEF00);
|
CHECK(output[2] == 0xABCDEF00);
|
||||||
CHECK(context.GetIncomingHandle(output[4]) == a);
|
CHECK(context.GetIncomingHandle(output[4]) == a);
|
||||||
CHECK(output[6] == process->process_id);
|
CHECK(output[6] == process->process_id);
|
||||||
CHECK(context.GetStaticBuffer(0) == mem_static->Vector());
|
CHECK(context.GetStaticBuffer(0) == ToVector(buffer_static));
|
||||||
std::vector<u8> other_buffer(buffer_mapped.GetSize());
|
std::vector<u8> other_buffer(buffer_mapped.GetSize());
|
||||||
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer_mapped.GetSize());
|
context.GetMappedBuffer(0).Read(other_buffer.data(), 0, buffer_mapped.GetSize());
|
||||||
CHECK(other_buffer == mem_mapped->Vector());
|
CHECK(other_buffer == ToVector(buffer_mapped));
|
||||||
|
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address_static, buffer_static.GetSize()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address_static, buffer_static.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
@ -239,7 +243,9 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||||||
TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
||||||
Core::Timing timing(1, 100);
|
Core::Timing timing(1, 100);
|
||||||
Memory::MemorySystem memory;
|
Memory::MemorySystem memory;
|
||||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1, 0);
|
Memory::BackingMemoryManager& bmm = memory.GetBackingMemoryManager();
|
||||||
|
Kernel::KernelSystem kernel(
|
||||||
|
memory, timing, [] {}, 0, 1, 0);
|
||||||
auto [server, client] = kernel.CreateSessionPair();
|
auto [server, client] = kernel.CreateSessionPair();
|
||||||
HLERequestContext context(kernel, std::move(server), nullptr);
|
HLERequestContext context(kernel, std::move(server), nullptr);
|
||||||
|
|
||||||
@ -318,12 +324,11 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
|
|
||||||
context.AddStaticBuffer(0, input_buffer);
|
context.AddStaticBuffer(0, input_buffer);
|
||||||
|
|
||||||
auto output_mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
auto output_buffer = bmm.AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
MemoryRef output_buffer{output_mem};
|
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(
|
auto result = process->vm_manager.MapBackingMemory(
|
||||||
target_address, output_buffer, output_buffer.GetSize(), MemoryState::Private);
|
target_address, output_buffer.GetRef(), output_buffer.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
input[0] = IPC::MakeHeader(0, 0, 2);
|
input[0] = IPC::MakeHeader(0, 0, 2);
|
||||||
@ -340,7 +345,7 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
|
|
||||||
context.WriteToOutgoingCommandBuffer(output_cmdbuff.data(), *process);
|
context.WriteToOutgoingCommandBuffer(output_cmdbuff.data(), *process);
|
||||||
|
|
||||||
CHECK(output_mem->Vector() == input_buffer);
|
CHECK(ToVector(output_buffer) == input_buffer);
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer.GetSize()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
@ -349,12 +354,11 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
std::vector<u8> input_buffer(Memory::PAGE_SIZE);
|
std::vector<u8> input_buffer(Memory::PAGE_SIZE);
|
||||||
std::fill(input_buffer.begin(), input_buffer.end(), 0xAB);
|
std::fill(input_buffer.begin(), input_buffer.end(), 0xAB);
|
||||||
|
|
||||||
auto output_mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
auto output_buffer = bmm.AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
MemoryRef output_buffer{output_mem};
|
|
||||||
|
|
||||||
VAddr target_address = 0x10000000;
|
VAddr target_address = 0x10000000;
|
||||||
auto result = process->vm_manager.MapBackingMemory(
|
auto result = process->vm_manager.MapBackingMemory(
|
||||||
target_address, output_buffer, output_buffer.GetSize(), MemoryState::Private);
|
target_address, output_buffer.GetRef(), output_buffer.GetSize(), MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
const u32_le input_cmdbuff[]{
|
const u32_le input_cmdbuff[]{
|
||||||
@ -375,7 +379,7 @@ TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
|||||||
|
|
||||||
CHECK(output[1] == IPC::MappedBufferDesc(output_buffer.GetSize(), IPC::W));
|
CHECK(output[1] == IPC::MappedBufferDesc(output_buffer.GetSize(), IPC::W));
|
||||||
CHECK(output[2] == target_address);
|
CHECK(output[2] == target_address);
|
||||||
CHECK(output_mem->Vector() == input_buffer);
|
CHECK(ToVector(output_buffer) == input_buffer);
|
||||||
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer.GetSize()) ==
|
REQUIRE(process->vm_manager.UnmapRange(target_address, output_buffer.GetSize()) ==
|
||||||
RESULT_SUCCESS);
|
RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
@ -10,13 +10,13 @@
|
|||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
TEST_CASE("Memory Basics", "[kernel][memory]") {
|
TEST_CASE("Memory Basics", "[kernel][memory]") {
|
||||||
auto mem = std::make_shared<BufferMem>(Memory::PAGE_SIZE);
|
|
||||||
MemoryRef block{mem};
|
|
||||||
Memory::MemorySystem memory;
|
Memory::MemorySystem memory;
|
||||||
|
auto block = memory.GetBackingMemoryManager().AllocateBackingMemory(Memory::PAGE_SIZE);
|
||||||
|
|
||||||
SECTION("mapping memory") {
|
SECTION("mapping memory") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block.GetRef(), block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
@ -24,14 +24,14 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
CHECK(vma != manager->vma_map.end());
|
CHECK(vma != manager->vma_map.end());
|
||||||
CHECK(vma->second.size == block.GetSize());
|
CHECK(vma->second.size == block.GetSize());
|
||||||
CHECK(vma->second.type == Kernel::VMAType::BackingMemory);
|
CHECK(vma->second.type == Kernel::VMAType::BackingMemory);
|
||||||
CHECK(vma->second.backing_memory.GetPtr() == block.GetPtr());
|
CHECK(vma->second.backing_memory == block.GetRef());
|
||||||
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
|
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("unmapping memory") {
|
SECTION("unmapping memory") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block.GetRef(), block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
@ -41,13 +41,13 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
auto vma = manager->FindVMA(Memory::HEAP_VADDR);
|
auto vma = manager->FindVMA(Memory::HEAP_VADDR);
|
||||||
CHECK(vma != manager->vma_map.end());
|
CHECK(vma != manager->vma_map.end());
|
||||||
CHECK(vma->second.type == Kernel::VMAType::Free);
|
CHECK(vma->second.type == Kernel::VMAType::Free);
|
||||||
CHECK(vma->second.backing_memory.GetPtr() == nullptr);
|
CHECK(vma->second.backing_memory == Memory::INVALID_MEMORY_REF);
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTION("changing memory permissions") {
|
SECTION("changing memory permissions") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block.GetRef(), block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
|
|||||||
SECTION("changing memory state") {
|
SECTION("changing memory state") {
|
||||||
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
|
||||||
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
auto manager = std::make_unique<Kernel::VMManager>(memory);
|
||||||
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block, block.GetSize(),
|
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block.GetRef(), block.GetSize(),
|
||||||
Kernel::MemoryState::Private);
|
Kernel::MemoryState::Private);
|
||||||
REQUIRE(result.Code() == RESULT_SUCCESS);
|
REQUIRE(result.Code() == RESULT_SUCCESS);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user