hle: kernel: Migrate more of KThread to KAutoObject.
This commit is contained in:
		| @@ -168,6 +168,8 @@ add_library(core STATIC | ||||
|     hle/kernel/k_auto_object_container.cpp | ||||
|     hle/kernel/k_auto_object_container.h | ||||
|     hle/kernel/k_affinity_mask.h | ||||
|     hle/kernel/k_class_token.cpp | ||||
|     hle/kernel/k_class_token.h | ||||
|     hle/kernel/k_condition_variable.cpp | ||||
|     hle/kernel/k_condition_variable.h | ||||
|     hle/kernel/k_event.cpp | ||||
|   | ||||
| @@ -72,6 +72,33 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) { | ||||
|     return MakeResult<Handle>(handle); | ||||
| } | ||||
|  | ||||
| ResultCode HandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | ||||
|     ASSERT(obj != nullptr); | ||||
|  | ||||
|     const u16 slot = next_free_slot; | ||||
|     if (slot >= table_size) { | ||||
|         LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use."); | ||||
|         return ResultHandleTableFull; | ||||
|     } | ||||
|     next_free_slot = generations[slot]; | ||||
|  | ||||
|     const u16 generation = next_generation++; | ||||
|  | ||||
|     // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. | ||||
|     // Horizon OS uses zero to represent an invalid handle, so skip to 1. | ||||
|     if (next_generation >= (1 << 15)) { | ||||
|         next_generation = 1; | ||||
|     } | ||||
|  | ||||
|     generations[slot] = generation; | ||||
|     objects_new[slot] = obj; | ||||
|     obj->Open(); | ||||
|  | ||||
|     *out_handle = generation | (slot << 15); | ||||
|  | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| ResultVal<Handle> HandleTable::Duplicate(Handle handle) { | ||||
|     std::shared_ptr<Object> object = GetGeneric(handle); | ||||
|     if (object == nullptr) { | ||||
| @@ -81,30 +108,36 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) { | ||||
|     return Create(std::move(object)); | ||||
| } | ||||
|  | ||||
| ResultCode HandleTable::Close(Handle handle) { | ||||
| bool HandleTable::Remove(Handle handle) { | ||||
|     if (!IsValid(handle)) { | ||||
|         LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle); | ||||
|         return ResultInvalidHandle; | ||||
|         return {}; | ||||
|     } | ||||
|  | ||||
|     const u16 slot = GetSlot(handle); | ||||
|  | ||||
|     if (objects[slot].use_count() == 1) { | ||||
|         objects[slot]->Finalize(); | ||||
|     if (objects[slot]) { | ||||
|         objects[slot]->Close(); | ||||
|     } | ||||
|  | ||||
|     if (objects_new[slot]) { | ||||
|         objects_new[slot]->Close(); | ||||
|     } | ||||
|  | ||||
|     objects[slot] = nullptr; | ||||
|     objects_new[slot] = nullptr; | ||||
|  | ||||
|     generations[slot] = next_free_slot; | ||||
|     next_free_slot = slot; | ||||
|     return RESULT_SUCCESS; | ||||
|  | ||||
|     return true; | ||||
| } | ||||
|  | ||||
| bool HandleTable::IsValid(Handle handle) const { | ||||
|     const std::size_t slot = GetSlot(handle); | ||||
|     const u16 generation = GetGeneration(handle); | ||||
|  | ||||
|     return slot < table_size && objects[slot] != nullptr && generations[slot] == generation; | ||||
|     const bool is_object_valid = (objects[slot] != nullptr) || (objects_new[slot] != nullptr); | ||||
|     return slot < table_size && is_object_valid && generations[slot] == generation; | ||||
| } | ||||
|  | ||||
| std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { | ||||
| @@ -124,6 +157,7 @@ void HandleTable::Clear() { | ||||
|     for (u16 i = 0; i < table_size; ++i) { | ||||
|         generations[i] = static_cast<u16>(i + 1); | ||||
|         objects[i] = nullptr; | ||||
|         objects_new[i] = nullptr; | ||||
|     } | ||||
|     next_free_slot = 0; | ||||
| } | ||||
|   | ||||
| @@ -9,6 +9,8 @@ | ||||
| #include <memory> | ||||
|  | ||||
| #include "common/common_types.h" | ||||
| #include "core/hle/kernel/k_auto_object.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
| #include "core/hle/kernel/object.h" | ||||
| #include "core/hle/result.h" | ||||
|  | ||||
| @@ -87,7 +89,7 @@ public: | ||||
|      * @return `RESULT_SUCCESS` or one of the following errors: | ||||
|      *           - `ERR_INVALID_HANDLE`: an invalid handle was passed in. | ||||
|      */ | ||||
|     ResultCode Close(Handle handle); | ||||
|     bool Remove(Handle handle); | ||||
|  | ||||
|     /// Checks if a handle is valid and points to an existing object. | ||||
|     bool IsValid(Handle handle) const; | ||||
| @@ -108,12 +110,48 @@ public: | ||||
|         return DynamicObjectCast<T>(GetGeneric(handle)); | ||||
|     } | ||||
|  | ||||
|     template <typename T = KAutoObject> | ||||
|     KScopedAutoObject<T> GetObject(Handle handle) const { | ||||
|         if (handle == CurrentThread) { | ||||
|             return kernel.CurrentScheduler()->GetCurrentThread()->DynamicCast<T*>(); | ||||
|         } else if (handle == CurrentProcess) { | ||||
|             return kernel.CurrentProcess()->DynamicCast<T*>(); | ||||
|         } | ||||
|  | ||||
|         if (!IsValid(handle)) { | ||||
|             return nullptr; | ||||
|         } | ||||
|  | ||||
|         auto* obj = objects_new[static_cast<u16>(handle >> 15)]; | ||||
|         return obj->DynamicCast<T*>(); | ||||
|     } | ||||
|  | ||||
|     template <typename T = KAutoObject> | ||||
|     KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | ||||
|         if (!IsValid(handle)) { | ||||
|             return nullptr; | ||||
|         } | ||||
|         auto* obj = objects_new[static_cast<u16>(handle >> 15)]; | ||||
|         return obj->DynamicCast<T*>(); | ||||
|     } | ||||
|  | ||||
|     /// Closes all handles held in this table. | ||||
|     void Clear(); | ||||
|  | ||||
|     // NEW IMPL | ||||
|  | ||||
|     template <typename T> | ||||
|     ResultCode Add(Handle* out_handle, T* obj) { | ||||
|         static_assert(std::is_base_of<KAutoObject, T>::value); | ||||
|         return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken()); | ||||
|     } | ||||
|  | ||||
|     ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type); | ||||
|  | ||||
| private: | ||||
|     /// Stores the Object referenced by the handle or null if the slot is empty. | ||||
|     std::array<std::shared_ptr<Object>, MAX_COUNT> objects; | ||||
|     std::array<KAutoObject*, MAX_COUNT> objects_new{}; | ||||
|  | ||||
|     /** | ||||
|      * The value of `next_generation` when the handle was created, used to check for validity. For | ||||
|   | ||||
| @@ -291,8 +291,8 @@ private: | ||||
|     // TODO(yuriks): Check common usage of this and optimize size accordingly | ||||
|     boost::container::small_vector<Handle, 8> move_handles; | ||||
|     boost::container::small_vector<Handle, 8> copy_handles; | ||||
|     boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; | ||||
|     boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; | ||||
|     boost::container::small_vector<Object*, 8> move_objects; | ||||
|     boost::container::small_vector<Object*, 8> copy_objects; | ||||
|     boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; | ||||
|  | ||||
|     std::optional<IPC::CommandHeader> command_header; | ||||
|   | ||||
| @@ -11,9 +11,11 @@ | ||||
| #include "common/common_types.h" | ||||
| #include "common/intrusive_red_black_tree.h" | ||||
| #include "core/hle/kernel/k_class_token.h" | ||||
| #include "core/hle/kernel/object.h" | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
| class KernelCore; | ||||
| class Process; | ||||
|  | ||||
| #define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS)                                                \ | ||||
| @@ -46,7 +48,7 @@ public: | ||||
|                                                                                                    \ | ||||
| private: | ||||
|  | ||||
| class KAutoObject { | ||||
| class KAutoObject : public Object { | ||||
| protected: | ||||
|     class TypeObj { | ||||
|     private: | ||||
| @@ -84,11 +86,14 @@ private: | ||||
| private: | ||||
|     std::atomic<u32> m_ref_count; | ||||
|  | ||||
| protected: | ||||
|     KernelCore& kernel; | ||||
|  | ||||
| public: | ||||
|     static KAutoObject* Create(KAutoObject* ptr); | ||||
|  | ||||
| public: | ||||
|     constexpr explicit KAutoObject() : m_ref_count(0) {} | ||||
|     explicit KAutoObject(KernelCore& kernel_) : Object{kernel_}, m_ref_count(0), kernel(kernel_) {} | ||||
|     virtual ~KAutoObject() {} | ||||
|  | ||||
|     // Destroy is responsible for destroying the auto object's resources when ref_count hits zero. | ||||
| @@ -97,9 +102,7 @@ public: | ||||
|     } | ||||
|  | ||||
|     // Finalize is responsible for cleaning up resource, but does not destroy the object. | ||||
|     virtual void Finalize() { | ||||
|         UNIMPLEMENTED(); | ||||
|     } | ||||
|     virtual void Finalize() {} | ||||
|  | ||||
|     virtual Process* GetOwner() const { | ||||
|         return nullptr; | ||||
| @@ -179,7 +182,12 @@ private: | ||||
| private: | ||||
|     Common::IntrusiveRedBlackTreeNode list_node; | ||||
|  | ||||
| protected: | ||||
|     KernelCore& kernel; | ||||
|  | ||||
| public: | ||||
|     explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_), kernel(kernel_) {} | ||||
|  | ||||
|     static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { | ||||
|         const u64 lid = lhs.GetId(); | ||||
|         const u64 rid = rhs.GetId(); | ||||
| @@ -208,7 +216,7 @@ private: | ||||
|     friend class KScopedAutoObject; | ||||
|  | ||||
| private: | ||||
|     T* m_obj; | ||||
|     T* m_obj{}; | ||||
|  | ||||
| private: | ||||
|     constexpr void Swap(KScopedAutoObject& rhs) { | ||||
| @@ -216,8 +224,8 @@ private: | ||||
|     } | ||||
|  | ||||
| public: | ||||
|     constexpr KScopedAutoObject() : m_obj(nullptr) { // ... | ||||
|     } | ||||
|     constexpr KScopedAutoObject() = default; | ||||
|  | ||||
|     constexpr KScopedAutoObject(T* o) : m_obj(o) { | ||||
|         if (m_obj != nullptr) { | ||||
|             m_obj->Open(); | ||||
| @@ -273,6 +281,10 @@ public: | ||||
|         return m_obj; | ||||
|     } | ||||
|  | ||||
|     constexpr T* GetPointerUnsafe() const { | ||||
|         return m_obj; | ||||
|     } | ||||
|  | ||||
|     constexpr T* ReleasePointerUnsafe() { | ||||
|         T* ret = m_obj; | ||||
|         m_obj = nullptr; | ||||
|   | ||||
							
								
								
									
										7
									
								
								src/core/hle/kernel/k_class_token.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								src/core/hle/kernel/k_class_token.cpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| // Copyright 2021 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #include "core/hle/kernel/k_class_token.h" | ||||
|  | ||||
| namespace Kernel {} // namespace Kernel | ||||
							
								
								
									
										131
									
								
								src/core/hle/kernel/k_class_token.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										131
									
								
								src/core/hle/kernel/k_class_token.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,131 @@ | ||||
| // Copyright 2021 yuzu Emulator Project | ||||
| // Licensed under GPLv2 or any later version | ||||
| // Refer to the license.txt file included. | ||||
|  | ||||
| #pragma once | ||||
|  | ||||
| #include <atomic> | ||||
|  | ||||
| #include "common/assert.h" | ||||
| #include "common/bit_util.h" | ||||
| #include "common/common_types.h" | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
| class KAutoObject; | ||||
|  | ||||
| class KClassTokenGenerator { | ||||
| public: | ||||
|     using TokenBaseType = u16; | ||||
|  | ||||
| public: | ||||
|     static constexpr size_t BaseClassBits = 8; | ||||
|     static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits; | ||||
|     // One bit per base class. | ||||
|     static constexpr size_t NumBaseClasses = BaseClassBits; | ||||
|     // Final classes are permutations of three bits. | ||||
|     static constexpr size_t NumFinalClasses = [] { | ||||
|         TokenBaseType index = 0; | ||||
|         for (size_t i = 0; i < FinalClassBits; i++) { | ||||
|             for (size_t j = i + 1; j < FinalClassBits; j++) { | ||||
|                 for (size_t k = j + 1; k < FinalClassBits; k++) { | ||||
|                     index++; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         return index; | ||||
|     }(); | ||||
|  | ||||
| private: | ||||
|     template <TokenBaseType Index> | ||||
|     static constexpr inline TokenBaseType BaseClassToken = BIT(Index); | ||||
|  | ||||
|     template <TokenBaseType Index> | ||||
|     static constexpr inline TokenBaseType FinalClassToken = [] { | ||||
|         TokenBaseType index = 0; | ||||
|         for (size_t i = 0; i < FinalClassBits; i++) { | ||||
|             for (size_t j = i + 1; j < FinalClassBits; j++) { | ||||
|                 for (size_t k = j + 1; k < FinalClassBits; k++) { | ||||
|                     if ((index++) == Index) { | ||||
|                         return static_cast<TokenBaseType>(((1ULL << i) | (1ULL << j) | (1ULL << k)) | ||||
|                                                           << BaseClassBits); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     }(); | ||||
|  | ||||
|     template <typename T> | ||||
|     static constexpr inline TokenBaseType GetClassToken() { | ||||
|         static_assert(std::is_base_of<KAutoObject, T>::value); | ||||
|         if constexpr (std::is_same<T, KAutoObject>::value) { | ||||
|             static_assert(T::ObjectType == ObjectType::KAutoObject); | ||||
|             return 0; | ||||
|         } else if constexpr (!std::is_final<T>::value) { | ||||
|             static_assert(ObjectType::BaseClassesStart <= T::ObjectType && | ||||
|                           T::ObjectType < ObjectType::BaseClassesEnd); | ||||
|             constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | ||||
|                                         static_cast<TokenBaseType>(ObjectType::BaseClassesStart); | ||||
|             return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>(); | ||||
|         } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && | ||||
|                              T::ObjectType < ObjectType::FinalClassesEnd) { | ||||
|             constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | ||||
|                                         static_cast<TokenBaseType>(ObjectType::FinalClassesStart); | ||||
|             return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>(); | ||||
|         } else { | ||||
|             static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type"); | ||||
|         } | ||||
|     }; | ||||
|  | ||||
| public: | ||||
|     enum class ObjectType { | ||||
|         KAutoObject, | ||||
|  | ||||
|         BaseClassesStart, | ||||
|  | ||||
|         KSynchronizationObject = BaseClassesStart, | ||||
|         KReadableEvent, | ||||
|  | ||||
|         BaseClassesEnd, | ||||
|  | ||||
|         FinalClassesStart = BaseClassesEnd, | ||||
|  | ||||
|         KInterruptEvent = FinalClassesStart, | ||||
|         KDebug, | ||||
|         KThread, | ||||
|         KServerPort, | ||||
|         KServerSession, | ||||
|         KClientPort, | ||||
|         KClientSession, | ||||
|         Process, | ||||
|         KResourceLimit, | ||||
|         KLightSession, | ||||
|         KPort, | ||||
|         KSession, | ||||
|         KSharedMemory, | ||||
|         KEvent, | ||||
|         KWritableEvent, | ||||
|         KLightClientSession, | ||||
|         KLightServerSession, | ||||
|         KTransferMemory, | ||||
|         KDeviceAddressSpace, | ||||
|         KSessionRequest, | ||||
|         KCodeMemory, | ||||
|  | ||||
|         // NOTE: True order for these has not been determined yet. | ||||
|         KAlpha, | ||||
|         KBeta, | ||||
|  | ||||
|         FinalClassesEnd = FinalClassesStart + NumFinalClasses, | ||||
|     }; | ||||
|  | ||||
|     template <typename T> | ||||
|     static constexpr inline TokenBaseType ClassToken = GetClassToken<T>(); | ||||
| }; | ||||
|  | ||||
| using ClassTokenType = KClassTokenGenerator::TokenBaseType; | ||||
|  | ||||
| template <typename T> | ||||
| static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>; | ||||
|  | ||||
| } // namespace Kernel | ||||
| @@ -7,6 +7,7 @@ | ||||
| #include "core/arm/exclusive_monitor.h" | ||||
| #include "core/core.h" | ||||
| #include "core/hle/kernel/k_condition_variable.h" | ||||
| #include "core/hle/kernel/k_linked_list.h" | ||||
| #include "core/hle/kernel/k_scheduler.h" | ||||
| #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | ||||
| #include "core/hle/kernel/k_synchronization_object.h" | ||||
| @@ -107,8 +108,8 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val | ||||
|  | ||||
|     // Wait for the address. | ||||
|     { | ||||
|         std::shared_ptr<KThread> owner_thread; | ||||
|         ASSERT(!owner_thread); | ||||
|         KScopedAutoObject<KThread> owner_thread; | ||||
|         ASSERT(owner_thread.IsNull()); | ||||
|         { | ||||
|             KScopedSchedulerLock sl(kernel); | ||||
|             cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); | ||||
| @@ -126,8 +127,10 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val | ||||
|                 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); | ||||
|  | ||||
|                 // Get the lock owner thread. | ||||
|                 owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle); | ||||
|                 R_UNLESS(owner_thread, ResultInvalidHandle); | ||||
|                 owner_thread = | ||||
|                     kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>( | ||||
|                         handle); | ||||
|                 R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); | ||||
|  | ||||
|                 // Update the lock. | ||||
|                 cur_thread->SetAddressKey(addr, value); | ||||
| @@ -137,7 +140,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val | ||||
|                 cur_thread->SetMutexWaitAddressForDebugging(addr); | ||||
|             } | ||||
|         } | ||||
|         ASSERT(owner_thread); | ||||
|         ASSERT(owner_thread.IsNotNull()); | ||||
|     } | ||||
|  | ||||
|     // Remove the thread as a waiter from the lock owner. | ||||
| @@ -182,13 +185,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) { | ||||
|             thread->Wakeup(); | ||||
|         } else { | ||||
|             // Get the previous owner. | ||||
|             auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>( | ||||
|                 prev_tag & ~Svc::HandleWaitMask); | ||||
|             KThread* owner_thread = | ||||
|                 kernel.CurrentProcess()->GetHandleTable() | ||||
|                     .GetObjectWithoutPseudoHandle<KThread>( | ||||
|                         static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) | ||||
|                     .ReleasePointerUnsafe(); | ||||
|  | ||||
|             if (owner_thread) { | ||||
|                 // Add the thread as a waiter on the owner. | ||||
|                 owner_thread->AddWaiter(thread); | ||||
|                 thread_to_close = owner_thread.get(); | ||||
|                 thread_to_close = owner_thread; | ||||
|             } else { | ||||
|                 // The lock was tagged with a thread that doesn't exist. | ||||
|                 thread->SetSyncedObject(nullptr, ResultInvalidState); | ||||
| @@ -208,9 +214,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | ||||
|     // Prepare for signaling. | ||||
|     constexpr int MaxThreads = 16; | ||||
|  | ||||
|     // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using | ||||
|     // std::shared_ptr. | ||||
|     std::vector<std::shared_ptr<KThread>> thread_list; | ||||
|     KLinkedList<KThread> thread_list; | ||||
|     std::array<KThread*, MaxThreads> thread_array; | ||||
|     s32 num_to_close{}; | ||||
|  | ||||
| @@ -228,7 +232,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | ||||
|                 if (num_to_close < MaxThreads) { | ||||
|                     thread_array[num_to_close++] = thread; | ||||
|                 } else { | ||||
|                     thread_list.push_back(SharedFrom(thread)); | ||||
|                     thread_list.push_back(*thread); | ||||
|                 } | ||||
|             } | ||||
|  | ||||
| @@ -251,7 +255,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | ||||
|  | ||||
|     // Close threads in the list. | ||||
|     for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { | ||||
|         (*it)->Close(); | ||||
|         (*it).Close(); | ||||
|     } | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -148,6 +148,14 @@ public: | ||||
|         return obj; | ||||
|     } | ||||
|  | ||||
|     T* AllocateWithKernel(KernelCore& kernel) { | ||||
|         T* obj = static_cast<T*>(AllocateImpl()); | ||||
|         if (obj != nullptr) { | ||||
|             new (obj) T(kernel); | ||||
|         } | ||||
|         return obj; | ||||
|     } | ||||
|  | ||||
|     void Free(T* obj) { | ||||
|         FreeImpl(obj); | ||||
|     } | ||||
|   | ||||
| @@ -13,6 +13,11 @@ | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
| void KSynchronizationObject::Finalize() { | ||||
|     this->OnFinalizeSynchronizationObject(); | ||||
|     KAutoObject::Finalize(); | ||||
| } | ||||
|  | ||||
| ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | ||||
|                                         KSynchronizationObject** objects, const s32 num_objects, | ||||
|                                         s64 timeout) { | ||||
| @@ -130,10 +135,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | ||||
|     return wait_result; | ||||
| } | ||||
|  | ||||
| KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} | ||||
|  | ||||
| KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name) | ||||
|     : Object{kernel, std::move(name)} {} | ||||
| KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {} | ||||
|  | ||||
| KSynchronizationObject::~KSynchronizationObject() = default; | ||||
|  | ||||
|   | ||||
| @@ -6,7 +6,7 @@ | ||||
|  | ||||
| #include <vector> | ||||
|  | ||||
| #include "core/hle/kernel/object.h" | ||||
| #include "core/hle/kernel/k_auto_object.h" | ||||
| #include "core/hle/result.h" | ||||
|  | ||||
| namespace Kernel { | ||||
| @@ -16,7 +16,9 @@ class Synchronization; | ||||
| class KThread; | ||||
|  | ||||
| /// Class that represents a Kernel object that a thread can be waiting on | ||||
| class KSynchronizationObject : public Object { | ||||
| class KSynchronizationObject : public KAutoObjectWithList { | ||||
|     KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject); | ||||
|  | ||||
| public: | ||||
|     struct ThreadListNode { | ||||
|         ThreadListNode* next{}; | ||||
| @@ -27,15 +29,18 @@ public: | ||||
|                                          KSynchronizationObject** objects, const s32 num_objects, | ||||
|                                          s64 timeout); | ||||
|  | ||||
|     virtual void Finalize() override; | ||||
|  | ||||
|     [[nodiscard]] virtual bool IsSignaled() const = 0; | ||||
|  | ||||
|     [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; | ||||
|  | ||||
| protected: | ||||
|     explicit KSynchronizationObject(KernelCore& kernel); | ||||
|     explicit KSynchronizationObject(KernelCore& kernel, std::string&& name); | ||||
|     virtual ~KSynchronizationObject(); | ||||
|  | ||||
|     virtual void OnFinalizeSynchronizationObject() {} | ||||
|  | ||||
|     void NotifyAvailable(ResultCode result); | ||||
|     void NotifyAvailable() { | ||||
|         return this->NotifyAvailable(RESULT_SUCCESS); | ||||
|   | ||||
| @@ -28,6 +28,7 @@ | ||||
| #include "core/hardware_properties.h" | ||||
| #include "core/hle/kernel/client_port.h" | ||||
| #include "core/hle/kernel/handle_table.h" | ||||
| #include "core/hle/kernel/init/init_slab_setup.h" | ||||
| #include "core/hle/kernel/k_memory_layout.h" | ||||
| #include "core/hle/kernel/k_memory_manager.h" | ||||
| #include "core/hle/kernel/k_resource_limit.h" | ||||
| @@ -51,7 +52,8 @@ namespace Kernel { | ||||
|  | ||||
| struct KernelCore::Impl { | ||||
|     explicit Impl(Core::System& system, KernelCore& kernel) | ||||
|         : time_manager{system}, global_handle_table{kernel}, system{system} {} | ||||
|         : time_manager{system}, global_handle_table{kernel}, | ||||
|           object_list_container{kernel}, system{system} {} | ||||
|  | ||||
|     void SetMulticore(bool is_multicore) { | ||||
|         this->is_multicore = is_multicore; | ||||
| @@ -69,9 +71,12 @@ struct KernelCore::Impl { | ||||
|         // Derive the initial memory layout from the emulated board | ||||
|         KMemoryLayout memory_layout; | ||||
|         DeriveInitialMemoryLayout(memory_layout); | ||||
|         Init::InitializeSlabHeaps(system, memory_layout); | ||||
|  | ||||
|         // Initialize kernel memory and resources. | ||||
|         InitializeMemoryLayout(memory_layout); | ||||
|         InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); | ||||
|         InitializeSlabHeaps(); | ||||
|         InitializePageSlab(); | ||||
|         InitializeSchedulers(); | ||||
|         InitializeSuspendThreads(); | ||||
|         InitializePreemption(kernel); | ||||
| @@ -99,7 +104,7 @@ struct KernelCore::Impl { | ||||
|  | ||||
|         for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||||
|             if (suspend_threads[i]) { | ||||
|                 suspend_threads[i].reset(); | ||||
|                 suspend_threads[i]->Close(); | ||||
|             } | ||||
|         } | ||||
|  | ||||
| @@ -189,15 +194,12 @@ struct KernelCore::Impl { | ||||
|     } | ||||
|  | ||||
|     void InitializeSuspendThreads() { | ||||
|         for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||||
|             std::string name = "Suspend Thread Id:" + std::to_string(i); | ||||
|             std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); | ||||
|             void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||||
|             auto thread_res = KThread::CreateThread( | ||||
|                 system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, | ||||
|                 nullptr, std::move(init_func), init_func_parameter); | ||||
|  | ||||
|             suspend_threads[i] = std::move(thread_res).Unwrap(); | ||||
|         for (s32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             suspend_threads[core_id] = KThread::CreateWithKernel(system.Kernel()); | ||||
|             ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {}, | ||||
|                                                          core_id) | ||||
|                        .IsSuccess()); | ||||
|             suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @@ -232,12 +234,15 @@ struct KernelCore::Impl { | ||||
|  | ||||
|     // Gets the dummy KThread for the caller, allocating a new one if this is the first time | ||||
|     KThread* GetHostDummyThread() { | ||||
|         const thread_local auto thread = | ||||
|             KThread::CreateThread( | ||||
|                 system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0, | ||||
|                 KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr) | ||||
|                 .Unwrap(); | ||||
|         return thread.get(); | ||||
|         auto make_thread = [this]() { | ||||
|             KThread* thread = KThread::CreateWithKernel(system.Kernel()); | ||||
|             ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); | ||||
|             thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); | ||||
|             return thread; | ||||
|         }; | ||||
|  | ||||
|         thread_local auto thread = make_thread(); | ||||
|         return thread; | ||||
|     } | ||||
|  | ||||
|     /// Registers a CPU core thread by allocating a host thread ID for it | ||||
| @@ -371,7 +376,8 @@ struct KernelCore::Impl { | ||||
|         const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); | ||||
|  | ||||
|         // Determine the size of the slab region. | ||||
|         const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); | ||||
|         const size_t slab_region_size = | ||||
|             Common::AlignUp(Init::CalculateTotalSlabHeapSize(), PageSize); | ||||
|         ASSERT(slab_region_size <= resource_region_size); | ||||
|  | ||||
|         // Setup the slab region. | ||||
| @@ -587,7 +593,7 @@ struct KernelCore::Impl { | ||||
|             "Time:SharedMemory"); | ||||
|     } | ||||
|  | ||||
|     void InitializeSlabHeaps() { | ||||
|     void InitializePageSlab() { | ||||
|         // Allocate slab heaps | ||||
|         user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); | ||||
|  | ||||
| @@ -596,7 +602,7 @@ struct KernelCore::Impl { | ||||
|         // Reserve slab heaps | ||||
|         ASSERT( | ||||
|             system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); | ||||
|         // Initialize slab heaps | ||||
|         // Initialize slab heap | ||||
|         user_slab_heap_pages->Initialize( | ||||
|             system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), | ||||
|             user_slab_heap_size); | ||||
| @@ -621,6 +627,8 @@ struct KernelCore::Impl { | ||||
|     // stores all the objects in place. | ||||
|     HandleTable global_handle_table; | ||||
|  | ||||
|     KAutoObjectWithListContainer object_list_container; | ||||
|  | ||||
|     /// Map of named ports managed by the kernel, which can be retrieved using | ||||
|     /// the ConnectToPort SVC. | ||||
|     NamedPortTable named_ports; | ||||
| @@ -648,7 +656,7 @@ struct KernelCore::Impl { | ||||
|     // the release of itself | ||||
|     std::unique_ptr<Common::ThreadWorker> service_thread_manager; | ||||
|  | ||||
|     std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | ||||
|     std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | ||||
|     std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | ||||
|     std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | ||||
|  | ||||
| @@ -687,8 +695,8 @@ std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const { | ||||
|     return impl->system_resource_limit; | ||||
| } | ||||
|  | ||||
| std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { | ||||
|     return impl->global_handle_table.Get<KThread>(handle); | ||||
| KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { | ||||
|     return impl->global_handle_table.GetObject<KThread>(handle); | ||||
| } | ||||
|  | ||||
| void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { | ||||
| @@ -781,6 +789,14 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { | ||||
|     return *impl->exclusive_monitor; | ||||
| } | ||||
|  | ||||
| KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { | ||||
|     return impl->object_list_container; | ||||
| } | ||||
|  | ||||
| const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { | ||||
|     return impl->object_list_container; | ||||
| } | ||||
|  | ||||
| void KernelCore::InvalidateAllInstructionCaches() { | ||||
|     for (auto& physical_core : impl->cores) { | ||||
|         physical_core.ArmInterface().ClearInstructionCache(); | ||||
| @@ -960,4 +976,12 @@ void KernelCore::SetIsPhantomModeForSingleCore(bool value) { | ||||
|     impl->SetIsPhantomModeForSingleCore(value); | ||||
| } | ||||
|  | ||||
| Core::System& KernelCore::System() { | ||||
|     return impl->system; | ||||
| } | ||||
|  | ||||
| const Core::System& KernelCore::System() const { | ||||
|     return impl->system; | ||||
| } | ||||
|  | ||||
| } // namespace Kernel | ||||
|   | ||||
| @@ -13,6 +13,7 @@ | ||||
| #include "core/hardware_properties.h" | ||||
| #include "core/hle/kernel/memory_types.h" | ||||
| #include "core/hle/kernel/object.h" | ||||
| #include "core/hle/kernel/k_auto_object.h" | ||||
|  | ||||
| namespace Core { | ||||
| class CPUInterruptHandler; | ||||
| @@ -30,6 +31,7 @@ namespace Kernel { | ||||
| class ClientPort; | ||||
| class GlobalSchedulerContext; | ||||
| class HandleTable; | ||||
| class KAutoObjectWithListContainer; | ||||
| class KMemoryManager; | ||||
| class KResourceLimit; | ||||
| class KScheduler; | ||||
| @@ -86,7 +88,7 @@ public: | ||||
|     std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const; | ||||
|  | ||||
|     /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. | ||||
|     std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; | ||||
|     KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; | ||||
|  | ||||
|     /// Adds the given shared pointer to an internal list of active processes. | ||||
|     void AppendNewProcess(std::shared_ptr<Process> process); | ||||
| @@ -143,6 +145,10 @@ public: | ||||
|  | ||||
|     const Core::ExclusiveMonitor& GetExclusiveMonitor() const; | ||||
|  | ||||
|     KAutoObjectWithListContainer& ObjectListContainer(); | ||||
|  | ||||
|     const KAutoObjectWithListContainer& ObjectListContainer() const; | ||||
|  | ||||
|     std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); | ||||
|  | ||||
|     const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; | ||||
| @@ -243,6 +249,9 @@ public: | ||||
|     bool IsPhantomModeForSingleCore() const; | ||||
|     void SetIsPhantomModeForSingleCore(bool value); | ||||
|  | ||||
|     Core::System& System(); | ||||
|     const Core::System& System() const; | ||||
|  | ||||
| private: | ||||
|     friend class Object; | ||||
|     friend class Process; | ||||
|   | ||||
| @@ -40,14 +40,15 @@ namespace { | ||||
| void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { | ||||
|     const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); | ||||
|     ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); | ||||
|     auto thread_res = | ||||
|         KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0, | ||||
|                                   owner_process.GetIdealCoreId(), stack_top, &owner_process); | ||||
|  | ||||
|     std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap(); | ||||
|     KThread* thread = KThread::CreateWithKernel(system.Kernel()); | ||||
|     ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority, | ||||
|                                          owner_process.GetIdealCoreId(), &owner_process) | ||||
|                .IsSuccess()); | ||||
|  | ||||
|     // Register 1 must be a handle to the main thread | ||||
|     const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); | ||||
|     Handle thread_handle{}; | ||||
|     owner_process.GetHandleTable().Add(&thread_handle, thread); | ||||
|     thread->GetContext32().cpu_registers[0] = 0; | ||||
|     thread->GetContext64().cpu_registers[0] = 0; | ||||
|     thread->GetContext32().cpu_registers[1] = thread_handle; | ||||
| @@ -337,12 +338,12 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { | ||||
| void Process::PrepareForTermination() { | ||||
|     ChangeStatus(ProcessStatus::Exiting); | ||||
|  | ||||
|     const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) { | ||||
|     const auto stop_threads = [this](const std::vector<KThread*>& thread_list) { | ||||
|         for (auto& thread : thread_list) { | ||||
|             if (thread->GetOwnerProcess() != this) | ||||
|                 continue; | ||||
|  | ||||
|             if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) | ||||
|             if (thread == kernel.CurrentScheduler()->GetCurrentThread()) | ||||
|                 continue; | ||||
|  | ||||
|             // TODO(Subv): When are the other running/ready threads terminated? | ||||
|   | ||||
| @@ -14,6 +14,7 @@ | ||||
| #include "core/hle/kernel/k_auto_object_container.h" | ||||
| #include "core/hle/kernel/k_light_lock.h" | ||||
| #include "core/hle/kernel/k_slab_heap.h" | ||||
| #include "core/hle/kernel/kernel.h" | ||||
|  | ||||
| namespace Kernel { | ||||
|  | ||||
| @@ -66,13 +67,17 @@ class KAutoObjectWithSlabHeapAndContainer : public Base { | ||||
|  | ||||
| private: | ||||
|     static inline KSlabHeap<Derived> s_slab_heap; | ||||
|     static inline KAutoObjectWithListContainer s_container; | ||||
|     KernelCore& m_kernel; | ||||
|  | ||||
| private: | ||||
|     static Derived* Allocate() { | ||||
|         return s_slab_heap.Allocate(); | ||||
|     } | ||||
|  | ||||
|     static Derived* AllocateWithKernel(KernelCore& kernel) { | ||||
|         return s_slab_heap.AllocateWithKernel(kernel); | ||||
|     } | ||||
|  | ||||
|     static void Free(Derived* obj) { | ||||
|         s_slab_heap.Free(obj); | ||||
|     } | ||||
| @@ -80,19 +85,20 @@ private: | ||||
| public: | ||||
|     class ListAccessor : public KAutoObjectWithListContainer::ListAccessor { | ||||
|     public: | ||||
|         ListAccessor() : KAutoObjectWithListContainer::ListAccessor(s_container) {} | ||||
|         ListAccessor() | ||||
|             : KAutoObjectWithListContainer::ListAccessor(m_kernel.ObjectListContainer()) {} | ||||
|         ~ListAccessor() = default; | ||||
|     }; | ||||
|  | ||||
| public: | ||||
|     constexpr KAutoObjectWithSlabHeapAndContainer() : Base() {} | ||||
|     KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel), m_kernel(kernel) {} | ||||
|     virtual ~KAutoObjectWithSlabHeapAndContainer() {} | ||||
|  | ||||
|     virtual void Destroy() override { | ||||
|         const bool is_initialized = this->IsInitialized(); | ||||
|         uintptr_t arg = 0; | ||||
|         if (is_initialized) { | ||||
|             s_container.Unregister(this); | ||||
|             m_kernel.ObjectListContainer().Unregister(this); | ||||
|             arg = this->GetPostDestroyArgument(); | ||||
|             this->Finalize(); | ||||
|         } | ||||
| @@ -114,21 +120,29 @@ public: | ||||
|     } | ||||
|  | ||||
| public: | ||||
|     static void InitializeSlabHeap(void* memory, size_t memory_size) { | ||||
|     static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { | ||||
|         s_slab_heap.Initialize(memory, memory_size); | ||||
|         s_container.Initialize(); | ||||
|         kernel.ObjectListContainer().Initialize(); | ||||
|     } | ||||
|  | ||||
|     static Derived* Create() { | ||||
|         Derived* obj = Allocate(); | ||||
|         if (AMS_LIKELY(obj != nullptr)) { | ||||
|         if (obj != nullptr) { | ||||
|             KAutoObject::Create(obj); | ||||
|         } | ||||
|         return obj; | ||||
|     } | ||||
|  | ||||
|     static void Register(Derived* obj) { | ||||
|         return s_container.Register(obj); | ||||
|     static Derived* CreateWithKernel(KernelCore& kernel) { | ||||
|         Derived* obj = AllocateWithKernel(kernel); | ||||
|         if (obj != nullptr) { | ||||
|             KAutoObject::Create(obj); | ||||
|         } | ||||
|         return obj; | ||||
|     } | ||||
|  | ||||
|     static void Register(KernelCore& kernel, Derived* obj) { | ||||
|         return kernel.ObjectListContainer().Register(obj); | ||||
|     } | ||||
|  | ||||
|     static size_t GetObjectSize() { | ||||
|   | ||||
| @@ -355,7 +355,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { | ||||
|         KScopedSchedulerLock lock(kernel); | ||||
|         thread->SetState(ThreadState::Waiting); | ||||
|         thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); | ||||
|         session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); | ||||
|         session->SendSyncRequest(thread, system.Memory(), system.CoreTiming()); | ||||
|     } | ||||
|  | ||||
|     KSynchronizationObject* dummy{}; | ||||
| @@ -368,18 +368,13 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { | ||||
|  | ||||
| /// Get the ID for the specified thread. | ||||
| static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { | ||||
|     LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|     KScopedAutoObject thread = | ||||
|         system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); | ||||
|     R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); | ||||
|  | ||||
|     // Get the thread's id. | ||||
|     *out_thread_id = thread->GetThreadID(); | ||||
|     *out_thread_id = thread->GetId(); | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| @@ -396,30 +391,7 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low, | ||||
|  | ||||
| /// Gets the ID of the specified process or a specified thread's owning process. | ||||
| static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) { | ||||
|     LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle); | ||||
|  | ||||
|     const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     const std::shared_ptr<Process> process = handle_table.Get<Process>(handle); | ||||
|     if (process) { | ||||
|         *process_id = process->GetProcessID(); | ||||
|         return RESULT_SUCCESS; | ||||
|     } | ||||
|  | ||||
|     const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); | ||||
|     if (thread) { | ||||
|         const Process* const owner_process = thread->GetOwnerProcess(); | ||||
|         if (!owner_process) { | ||||
|             LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered."); | ||||
|             return ResultInvalidHandle; | ||||
|         } | ||||
|  | ||||
|         *process_id = owner_process->GetProcessID(); | ||||
|         return RESULT_SUCCESS; | ||||
|     } | ||||
|  | ||||
|     // NOTE: This should also handle debug objects before returning. | ||||
|  | ||||
|     LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle); | ||||
|     __debugbreak(); | ||||
|     return ResultInvalidHandle; | ||||
| } | ||||
|  | ||||
| @@ -460,14 +432,30 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha | ||||
|  | ||||
|     for (u64 i = 0; i < handle_count; ++i) { | ||||
|         const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); | ||||
|         const auto object = handle_table.Get<KSynchronizationObject>(handle); | ||||
|  | ||||
|         if (object == nullptr) { | ||||
|             LOG_ERROR(Kernel_SVC, "Object is a nullptr"); | ||||
|             return ResultInvalidHandle; | ||||
|         bool succeeded{}; | ||||
|         { | ||||
|             auto object = handle_table.Get<KSynchronizationObject>(handle); | ||||
|             if (object) { | ||||
|                 objects[i] = object.get(); | ||||
|                 succeeded = true; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         objects[i] = object.get(); | ||||
|         // TODO(bunnei): WORKAROUND WHILE WE HAVE TWO HANDLE TABLES | ||||
|         if (!succeeded) { | ||||
|             { | ||||
|                 auto object = handle_table.GetObject<KSynchronizationObject>(handle); | ||||
|  | ||||
|                 if (object.IsNull()) { | ||||
|                     LOG_ERROR(Kernel_SVC, "Object is a nullptr"); | ||||
|                     return ResultInvalidHandle; | ||||
|                 } | ||||
|  | ||||
|                 objects[i] = object.GetPointerUnsafe(); | ||||
|                 succeeded = true; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     return KSynchronizationObject::Wait(kernel, index, objects.data(), | ||||
|                                         static_cast<s32>(objects.size()), nano_seconds); | ||||
| @@ -481,19 +469,7 @@ static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u | ||||
|  | ||||
| /// Resumes a thread waiting on WaitSynchronization | ||||
| static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { | ||||
|     LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||||
|  | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|  | ||||
|     // Cancel the thread's wait. | ||||
|     thread->WaitCancel(); | ||||
|     __debugbreak(); | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| @@ -899,9 +875,10 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | ||||
|             return ResultInvalidCombination; | ||||
|         } | ||||
|  | ||||
|         const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>( | ||||
|             static_cast<Handle>(handle)); | ||||
|         if (!thread) { | ||||
|         KScopedAutoObject thread = | ||||
|             system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>( | ||||
|                 static_cast<Handle>(handle)); | ||||
|         if (thread.IsNull()) { | ||||
|             LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", | ||||
|                       static_cast<Handle>(handle)); | ||||
|             return ResultInvalidHandle; | ||||
| @@ -910,7 +887,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | ||||
|         const auto& core_timing = system.CoreTiming(); | ||||
|         const auto& scheduler = *system.Kernel().CurrentScheduler(); | ||||
|         const auto* const current_thread = scheduler.GetCurrentThread(); | ||||
|         const bool same_thread = current_thread == thread.get(); | ||||
|         const bool same_thread = current_thread == thread.GetPointerUnsafe(); | ||||
|  | ||||
|         const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); | ||||
|         u64 out_ticks = 0; | ||||
| @@ -1055,45 +1032,7 @@ static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size | ||||
| /// Sets the thread activity | ||||
| static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle, | ||||
|                                     ThreadActivity thread_activity) { | ||||
|     LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle, | ||||
|               thread_activity); | ||||
|  | ||||
|     // Validate the activity. | ||||
|     constexpr auto IsValidThreadActivity = [](ThreadActivity activity) { | ||||
|         return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused; | ||||
|     }; | ||||
|     if (!IsValidThreadActivity(thread_activity)) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})", | ||||
|                   thread_activity); | ||||
|         return ResultInvalidEnumValue; | ||||
|     } | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     auto& kernel = system.Kernel(); | ||||
|     const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); | ||||
|     const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|  | ||||
|     // Check that the activity is being set on a non-current thread for the current process. | ||||
|     if (thread->GetOwnerProcess() != kernel.CurrentProcess()) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread."); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|     if (thread.get() == GetCurrentThreadPointer(kernel)) { | ||||
|         LOG_ERROR(Kernel_SVC, "Thread is busy"); | ||||
|         return ResultBusy; | ||||
|     } | ||||
|  | ||||
|     // Set the activity. | ||||
|     const auto set_result = thread->SetActivity(thread_activity); | ||||
|     if (set_result.IsError()) { | ||||
|         LOG_ERROR(Kernel_SVC, "Failed to set thread activity."); | ||||
|         return set_result; | ||||
|     } | ||||
|  | ||||
|     __debugbreak(); | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| @@ -1107,36 +1046,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand | ||||
|     LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context, | ||||
|               thread_handle); | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     const auto* current_process = system.Kernel().CurrentProcess(); | ||||
|     const std::shared_ptr<KThread> thread = | ||||
|         current_process->GetHandleTable().Get<KThread>(thread_handle); | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|  | ||||
|     // Require the handle be to a non-current thread in the current process. | ||||
|     if (thread->GetOwnerProcess() != current_process) { | ||||
|         LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process."); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|     if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { | ||||
|         LOG_ERROR(Kernel_SVC, "Current thread is busy."); | ||||
|         return ResultBusy; | ||||
|     } | ||||
|  | ||||
|     // Get the thread context. | ||||
|     std::vector<u8> context; | ||||
|     const auto context_result = thread->GetThreadContext3(context); | ||||
|     if (context_result.IsError()) { | ||||
|         LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})", | ||||
|                   context_result.raw); | ||||
|         return context_result; | ||||
|     } | ||||
|  | ||||
|     // Copy the thread context to user space. | ||||
|     system.Memory().WriteBlock(out_context, context.data(), context.size()); | ||||
|     __debugbreak(); | ||||
|  | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
| @@ -1164,30 +1074,26 @@ static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, H | ||||
| } | ||||
|  | ||||
| /// Sets the priority for the specified thread | ||||
| static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) { | ||||
|     LOG_TRACE(Kernel_SVC, "called"); | ||||
| static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) { | ||||
|     // Get the current process. | ||||
|     Process& process = *system.Kernel().CurrentProcess(); | ||||
|  | ||||
|     // Validate the priority. | ||||
|     if (HighestThreadPriority > priority || priority > LowestThreadPriority) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority); | ||||
|         return ResultInvalidPriority; | ||||
|     } | ||||
|     R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority, | ||||
|              ResultInvalidPriority); | ||||
|     R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority); | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|     KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle); | ||||
|     R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); | ||||
|  | ||||
|     // Set the thread priority. | ||||
|     thread->SetBasePriority(priority); | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) { | ||||
|     return SetThreadPriority(system, handle, priority); | ||||
| static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) { | ||||
|     return SetThreadPriority(system, thread_handle, priority); | ||||
| } | ||||
|  | ||||
| /// Get which CPU core is executing the current thread | ||||
| @@ -1480,7 +1386,7 @@ static void ExitProcess32(Core::System& system) { | ||||
|     ExitProcess(system); | ||||
| } | ||||
|  | ||||
| static constexpr bool IsValidCoreId(int32_t core_id) { | ||||
| static constexpr bool IsValidVirtualCoreId(int32_t core_id) { | ||||
|     return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES)); | ||||
| } | ||||
|  | ||||
| @@ -1500,7 +1406,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e | ||||
|     } | ||||
|  | ||||
|     // Validate arguments. | ||||
|     if (!IsValidCoreId(core_id)) { | ||||
|     if (!IsValidVirtualCoreId(core_id)) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); | ||||
|         return ResultInvalidCoreId; | ||||
|     } | ||||
| @@ -1822,8 +1728,11 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) | ||||
| static ResultCode CloseHandle(Core::System& system, Handle handle) { | ||||
|     LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); | ||||
|  | ||||
|     auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     return handle_table.Close(handle); | ||||
|     // Remove the handle. | ||||
|     R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle), | ||||
|              ResultInvalidHandle); | ||||
|  | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| static ResultCode CloseHandle32(Core::System& system, Handle handle) { | ||||
| @@ -1925,23 +1834,7 @@ static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u | ||||
|  | ||||
| static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id, | ||||
|                                     u64* out_affinity_mask) { | ||||
|     LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|  | ||||
|     // Get the core mask. | ||||
|     const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask); | ||||
|     if (result.IsError()) { | ||||
|         LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw); | ||||
|         return result; | ||||
|     } | ||||
|  | ||||
|     __debugbreak(); | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| @@ -1956,58 +1849,33 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle | ||||
|  | ||||
| static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, | ||||
|                                     u64 affinity_mask) { | ||||
|     LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}", | ||||
|               thread_handle, core_id, affinity_mask); | ||||
|  | ||||
|     const auto& current_process = *system.Kernel().CurrentProcess(); | ||||
|  | ||||
|     // Determine the core id/affinity mask. | ||||
|     if (core_id == Svc::IdealCoreUseProcessValue) { | ||||
|         core_id = current_process.GetIdealCoreId(); | ||||
|     if (core_id == IdealCoreUseProcessValue) { | ||||
|         core_id = system.Kernel().CurrentProcess()->GetIdealCoreId(); | ||||
|         affinity_mask = (1ULL << core_id); | ||||
|     } else { | ||||
|         // Validate the affinity mask. | ||||
|         const u64 process_core_mask = current_process.GetCoreMask(); | ||||
|         if ((affinity_mask | process_core_mask) != process_core_mask) { | ||||
|             LOG_ERROR(Kernel_SVC, | ||||
|                       "Affinity mask does match the process core mask (affinity mask={:016X}, core " | ||||
|                       "mask={:016X})", | ||||
|                       affinity_mask, process_core_mask); | ||||
|             return ResultInvalidCoreId; | ||||
|         } | ||||
|         if (affinity_mask == 0) { | ||||
|             LOG_ERROR(Kernel_SVC, "Affinity mask is zero."); | ||||
|             return ResultInvalidCombination; | ||||
|         } | ||||
|         const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask(); | ||||
|         R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId); | ||||
|         R_UNLESS(affinity_mask != 0, ResultInvalidCombination); | ||||
|  | ||||
|         // Validate the core id. | ||||
|         if (IsValidCoreId(core_id)) { | ||||
|             if (((1ULL << core_id) & affinity_mask) == 0) { | ||||
|                 LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); | ||||
|                 return ResultInvalidCombination; | ||||
|             } | ||||
|         if (IsValidVirtualCoreId(core_id)) { | ||||
|             R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination); | ||||
|         } else { | ||||
|             if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) { | ||||
|                 LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); | ||||
|                 return ResultInvalidCoreId; | ||||
|             } | ||||
|             R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare, | ||||
|                      ResultInvalidCoreId); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // Get the thread from its handle. | ||||
|     const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||||
|     const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||||
|     if (!thread) { | ||||
|         LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle); | ||||
|         return ResultInvalidHandle; | ||||
|     } | ||||
|     KScopedAutoObject thread = | ||||
|         system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); | ||||
|     R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); | ||||
|  | ||||
|     // Set the core mask. | ||||
|     const auto set_result = thread->SetCoreMask(core_id, affinity_mask); | ||||
|     if (set_result.IsError()) { | ||||
|         LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw); | ||||
|         return set_result; | ||||
|     } | ||||
|     R_TRY(thread->SetCoreMask(core_id, affinity_mask)); | ||||
|  | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
|  | ||||
| @@ -2105,7 +1973,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o | ||||
|     *out_write = *write_create_result; | ||||
|  | ||||
|     // Add the writable event to the handle table. | ||||
|     auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); }); | ||||
|     auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*write_create_result); }); | ||||
|  | ||||
|     // Add the readable event to the handle table. | ||||
|     const auto read_create_result = handle_table.Create(event->GetReadableEvent()); | ||||
|   | ||||
| @@ -15,16 +15,12 @@ | ||||
| namespace Kernel { | ||||
|  | ||||
| TimeManager::TimeManager(Core::System& system_) : system{system_} { | ||||
|     time_manager_event_type = Core::Timing::CreateEvent( | ||||
|         "Kernel::TimeManagerCallback", | ||||
|         [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { | ||||
|             std::shared_ptr<KThread> thread; | ||||
|             { | ||||
|                 std::lock_guard lock{mutex}; | ||||
|                 thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle)); | ||||
|             } | ||||
|             thread->Wakeup(); | ||||
|         }); | ||||
|     time_manager_event_type = | ||||
|         Core::Timing::CreateEvent("Kernel::TimeManagerCallback", | ||||
|                                   [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { | ||||
|                                       KThread* thread = reinterpret_cast<KThread*>(thread_handle); | ||||
|                                       thread->Wakeup(); | ||||
|                                   }); | ||||
| } | ||||
|  | ||||
| void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { | ||||
|   | ||||
| @@ -91,7 +91,7 @@ std::size_t WaitTreeItem::Row() const { | ||||
| std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() { | ||||
|     std::vector<std::unique_ptr<WaitTreeThread>> item_list; | ||||
|     std::size_t row = 0; | ||||
|     auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::KThread>>& threads) { | ||||
|     auto add_threads = [&](const std::vector<Kernel::KThread*>& threads) { | ||||
|         for (std::size_t i = 0; i < threads.size(); ++i) { | ||||
|             if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) { | ||||
|                 item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i])); | ||||
| @@ -183,10 +183,12 @@ bool WaitTreeExpandableItem::IsExpandable() const { | ||||
| } | ||||
|  | ||||
| QString WaitTreeSynchronizationObject::GetText() const { | ||||
|     return tr("[%1]%2 %3") | ||||
|         .arg(object.GetObjectId()) | ||||
|         .arg(QString::fromStdString(object.GetTypeName()), | ||||
|              QString::fromStdString(object.GetName())); | ||||
|     // return tr("[%1]%2 %3") | ||||
|     //    .arg(object.GetObjectId()) | ||||
|     //    .arg(QString::fromStdString(object.GetTypeName()), | ||||
|     //         QString::fromStdString(object.GetName())); | ||||
|  | ||||
|     return tr("UNIMPLEMENTED"); | ||||
| } | ||||
|  | ||||
| std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make( | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 bunnei
					bunnei