mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-15 06:20:06 +00:00
Merge pull request #10990 from comex/ubsan
Fixes and workarounds to make UBSan happier on macOS
This commit is contained in:
commit
d3da1e6517
@ -51,7 +51,7 @@ struct PageTable {
|
|||||||
class PageInfo {
|
class PageInfo {
|
||||||
public:
|
public:
|
||||||
/// Returns the page pointer
|
/// Returns the page pointer
|
||||||
[[nodiscard]] u8* Pointer() const noexcept {
|
[[nodiscard]] uintptr_t Pointer() const noexcept {
|
||||||
return ExtractPointer(raw.load(std::memory_order_relaxed));
|
return ExtractPointer(raw.load(std::memory_order_relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ struct PageTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the page pointer and attribute pair, extracted from the same atomic read
|
/// Returns the page pointer and attribute pair, extracted from the same atomic read
|
||||||
[[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept {
|
[[nodiscard]] std::pair<uintptr_t, PageType> PointerType() const noexcept {
|
||||||
const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed);
|
const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed);
|
||||||
return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)};
|
return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)};
|
||||||
}
|
}
|
||||||
@ -73,13 +73,13 @@ struct PageTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Write a page pointer and type pair atomically
|
/// Write a page pointer and type pair atomically
|
||||||
void Store(u8* pointer, PageType type) noexcept {
|
void Store(uintptr_t pointer, PageType type) noexcept {
|
||||||
raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type));
|
raw.store(pointer | static_cast<uintptr_t>(type));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unpack a pointer from a page info raw representation
|
/// Unpack a pointer from a page info raw representation
|
||||||
[[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept {
|
[[nodiscard]] static uintptr_t ExtractPointer(uintptr_t raw) noexcept {
|
||||||
return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS));
|
return raw & (~uintptr_t{0} << ATTRIBUTE_BITS);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unpack a page type from a page info raw representation
|
/// Unpack a page type from a page info raw representation
|
||||||
|
@ -217,8 +217,8 @@ void ARM_Interface::Run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
|
void ARM_Interface::LoadWatchpointArray(const WatchpointArray* wp) {
|
||||||
watchpoints = ℘
|
watchpoints = wp;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
|
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
|
||||||
|
@ -186,7 +186,7 @@ public:
|
|||||||
virtual void SaveContext(ThreadContext64& ctx) const = 0;
|
virtual void SaveContext(ThreadContext64& ctx) const = 0;
|
||||||
virtual void LoadContext(const ThreadContext32& ctx) = 0;
|
virtual void LoadContext(const ThreadContext32& ctx) = 0;
|
||||||
virtual void LoadContext(const ThreadContext64& ctx) = 0;
|
virtual void LoadContext(const ThreadContext64& ctx) = 0;
|
||||||
void LoadWatchpointArray(const WatchpointArray& wp);
|
void LoadWatchpointArray(const WatchpointArray* wp);
|
||||||
|
|
||||||
/// Clears the exclusive monitor's state.
|
/// Clears the exclusive monitor's state.
|
||||||
virtual void ClearExclusiveState() = 0;
|
virtual void ClearExclusiveState() = 0;
|
||||||
|
@ -15,8 +15,8 @@ void KAutoObject::RegisterWithKernel() {
|
|||||||
m_kernel.RegisterKernelObject(this);
|
m_kernel.RegisterKernelObject(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KAutoObject::UnregisterWithKernel() {
|
void KAutoObject::UnregisterWithKernel(KernelCore& kernel, KAutoObject* self) {
|
||||||
m_kernel.UnregisterKernelObject(this);
|
kernel.UnregisterKernelObject(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -159,14 +159,15 @@ public:
|
|||||||
|
|
||||||
// If ref count hits zero, destroy the object.
|
// If ref count hits zero, destroy the object.
|
||||||
if (cur_ref_count - 1 == 0) {
|
if (cur_ref_count - 1 == 0) {
|
||||||
|
KernelCore& kernel = m_kernel;
|
||||||
this->Destroy();
|
this->Destroy();
|
||||||
this->UnregisterWithKernel();
|
KAutoObject::UnregisterWithKernel(kernel, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void RegisterWithKernel();
|
void RegisterWithKernel();
|
||||||
void UnregisterWithKernel();
|
static void UnregisterWithKernel(KernelCore& kernel, KAutoObject* self);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
KernelCore& m_kernel;
|
KernelCore& m_kernel;
|
||||||
|
@ -510,11 +510,12 @@ void KScheduler::Unload(KThread* thread) {
|
|||||||
|
|
||||||
void KScheduler::Reload(KThread* thread) {
|
void KScheduler::Reload(KThread* thread) {
|
||||||
auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
|
auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
|
||||||
|
auto* process = thread->GetOwnerProcess();
|
||||||
cpu_core.LoadContext(thread->GetContext32());
|
cpu_core.LoadContext(thread->GetContext32());
|
||||||
cpu_core.LoadContext(thread->GetContext64());
|
cpu_core.LoadContext(thread->GetContext64());
|
||||||
cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
|
cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
|
||||||
cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
|
cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
|
||||||
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
|
cpu_core.LoadWatchpointArray(process ? &process->GetWatchpoints() : nullptr);
|
||||||
cpu_core.ClearExclusiveState();
|
cpu_core.ClearExclusiveState();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
|
|||||||
case ThreadType::User:
|
case ThreadType::User:
|
||||||
ASSERT(((owner == nullptr) ||
|
ASSERT(((owner == nullptr) ||
|
||||||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
||||||
ASSERT(((owner == nullptr) ||
|
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
|
||||||
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
||||||
break;
|
break;
|
||||||
case ThreadType::Kernel:
|
case ThreadType::Kernel:
|
||||||
|
@ -73,7 +73,7 @@ struct Memory::Impl {
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
|
return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
|
[[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
|
||||||
@ -84,7 +84,7 @@ struct Memory::Impl {
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
|
return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 Read8(const Common::ProcessAddress addr) {
|
u8 Read8(const Common::ProcessAddress addr) {
|
||||||
@ -205,7 +205,8 @@ struct Memory::Impl {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Common::PageType::Memory: {
|
case Common::PageType::Memory: {
|
||||||
u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
|
u8* mem_ptr =
|
||||||
|
reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS));
|
||||||
on_memory(copy_amount, mem_ptr);
|
on_memory(copy_amount, mem_ptr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -447,7 +448,7 @@ struct Memory::Impl {
|
|||||||
break;
|
break;
|
||||||
case Common::PageType::Memory:
|
case Common::PageType::Memory:
|
||||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
nullptr, Common::PageType::DebugMemory);
|
0, Common::PageType::DebugMemory);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
@ -465,7 +466,8 @@ struct Memory::Impl {
|
|||||||
case Common::PageType::DebugMemory: {
|
case Common::PageType::DebugMemory: {
|
||||||
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
|
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
|
||||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
|
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
|
||||||
|
Common::PageType::Memory);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -505,7 +507,7 @@ struct Memory::Impl {
|
|||||||
case Common::PageType::DebugMemory:
|
case Common::PageType::DebugMemory:
|
||||||
case Common::PageType::Memory:
|
case Common::PageType::Memory:
|
||||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
nullptr, Common::PageType::RasterizerCachedMemory);
|
0, Common::PageType::RasterizerCachedMemory);
|
||||||
break;
|
break;
|
||||||
case Common::PageType::RasterizerCachedMemory:
|
case Common::PageType::RasterizerCachedMemory:
|
||||||
// There can be more than one GPU region mapped per CPU region, so it's common
|
// There can be more than one GPU region mapped per CPU region, so it's common
|
||||||
@ -533,10 +535,11 @@ struct Memory::Impl {
|
|||||||
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
||||||
// longer exist, and we should just leave the pagetable entry blank.
|
// longer exist, and we should just leave the pagetable entry blank.
|
||||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
nullptr, Common::PageType::Unmapped);
|
0, Common::PageType::Unmapped);
|
||||||
} else {
|
} else {
|
||||||
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
|
reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
|
||||||
|
Common::PageType::Memory);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -583,7 +586,7 @@ struct Memory::Impl {
|
|||||||
"Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
|
"Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
|
||||||
|
|
||||||
while (base != end) {
|
while (base != end) {
|
||||||
page_table.pointers[base].Store(nullptr, type);
|
page_table.pointers[base].Store(0, type);
|
||||||
page_table.backing_addr[base] = 0;
|
page_table.backing_addr[base] = 0;
|
||||||
page_table.blocks[base] = 0;
|
page_table.blocks[base] = 0;
|
||||||
base += 1;
|
base += 1;
|
||||||
@ -592,7 +595,8 @@ struct Memory::Impl {
|
|||||||
auto orig_base = base;
|
auto orig_base = base;
|
||||||
while (base != end) {
|
while (base != end) {
|
||||||
auto host_ptr =
|
auto host_ptr =
|
||||||
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS);
|
reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) -
|
||||||
|
(base << YUZU_PAGEBITS);
|
||||||
auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
|
auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
|
||||||
page_table.pointers[base].Store(host_ptr, type);
|
page_table.pointers[base].Store(host_ptr, type);
|
||||||
page_table.backing_addr[base] = backing;
|
page_table.backing_addr[base] = backing;
|
||||||
@ -618,8 +622,8 @@ struct Memory::Impl {
|
|||||||
|
|
||||||
// Avoid adding any extra logic to this fast-path block
|
// Avoid adding any extra logic to this fast-path block
|
||||||
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
|
||||||
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
||||||
return &pointer[vaddr];
|
return reinterpret_cast<u8*>(pointer + vaddr);
|
||||||
}
|
}
|
||||||
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
|
||||||
case Common::PageType::Unmapped:
|
case Common::PageType::Unmapped:
|
||||||
@ -813,7 +817,7 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const auto [pointer, type] = page_table.pointers[page].PointerType();
|
const auto [pointer, type] = page_table.pointers[page].PointerType();
|
||||||
return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory ||
|
return pointer != 0 || type == Common::PageType::RasterizerCachedMemory ||
|
||||||
type == Common::PageType::DebugMemory;
|
type == Common::PageType::DebugMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +103,9 @@ public:
|
|||||||
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
|
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Core::Memory::Memory& cpu_memory_)
|
Core::Memory::Memory& cpu_memory_)
|
||||||
: rasterizer{rasterizer_},
|
: rasterizer{rasterizer_},
|
||||||
cpu_memory{cpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
|
// Use reinterpret_cast instead of static_cast as workaround for
|
||||||
|
// UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
|
||||||
|
cpu_memory{cpu_memory_}, streams{{CounterStream{reinterpret_cast<QueryCache&>(*this),
|
||||||
VideoCore::QueryType::SamplesPassed}}} {
|
VideoCore::QueryType::SamplesPassed}}} {
|
||||||
(void)slot_async_jobs.insert(); // Null value
|
(void)slot_async_jobs.insert(); // Null value
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,8 @@ namespace VideoCore {
|
|||||||
|
|
||||||
using namespace Core::Memory;
|
using namespace Core::Memory;
|
||||||
|
|
||||||
RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : cpu_memory{cpu_memory_} {}
|
RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_)
|
||||||
|
: cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {}
|
||||||
|
|
||||||
RasterizerAccelerated::~RasterizerAccelerated() = default;
|
RasterizerAccelerated::~RasterizerAccelerated() = default;
|
||||||
|
|
||||||
@ -26,7 +27,7 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
|
|||||||
std::atomic_thread_fence(std::memory_order_acquire);
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
|
const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
|
||||||
for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
|
||||||
std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page);
|
std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
|
||||||
|
|
||||||
if (delta > 0) {
|
if (delta > 0) {
|
||||||
ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
|
ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
|
||||||
|
@ -41,7 +41,8 @@ private:
|
|||||||
};
|
};
|
||||||
static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
|
static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
|
||||||
|
|
||||||
std::array<CacheEntry, 0x2000000> cached_pages;
|
using CachedPages = std::array<CacheEntry, 0x2000000>;
|
||||||
|
std::unique_ptr<CachedPages> cached_pages;
|
||||||
Core::Memory::Memory& cpu_memory;
|
Core::Memory::Memory& cpu_memory;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user