diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 0f7970ebe..ac80fb0a6 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -143,6 +143,28 @@ void ArbitrateAllThreads(u32 address) { } } +/// Boost low priority threads (temporarily) that have been starved +static void PriorityBoostStarvedThreads() { + u64 current_ticks = CoreTiming::GetTicks(); + + for (auto& thread : thread_list) { + // TODO(bunnei): Threads that have been waiting to be scheduled for `boost_ticks` (or + // longer) will have their priority temporarily adjusted to 1 higher than the highest + // priority thread to prevent thread starvation. This general behavior has been verified + // on hardware. However, this is almost certainly not perfect, and the real CTR OS scheduler + // should probably be reversed to verify this. + + const u64 boost_timeout = 2000000; // Boost threads that have been ready for > this long + + u64 delta = current_ticks - thread->last_running_ticks; + + if (thread->status == THREADSTATUS_READY && delta > boost_timeout) { + const s32 priority = (ready_queue.get_first()->current_priority - 1, 0); + thread->BoostPriority(priority); + } + } +} + /** * Switches the CPU's active thread context to that of the specified thread * @param new_thread The thread to switch to @@ -178,6 +200,9 @@ static void SwitchContext(Thread* new_thread) { ready_queue.remove(new_thread->current_priority, new_thread); new_thread->status = THREADSTATUS_RUNNING; + // Restores thread to its nominal priority if it has been temporarily changed + new_thread->current_priority = new_thread->nominal_priority; + if (previous_process != current_thread->owner_process) { Kernel::g_current_process = current_thread->owner_process; SetCurrentPageTable(&Kernel::g_current_process->vm_manager.page_table); @@ -513,6 +538,7 @@ bool HaveReadyThreads() { } void Reschedule() { + PriorityBoostStarvedThreads(); Thread* cur = GetCurrentThread(); Thread* next = PopNextReadyThread();