mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-15 18:50:06 +00:00
Scheduler: Implement Yield Count and Core migration on Thread Preemption.
This commit is contained in:
parent
2d382de6fa
commit
0cf26cee59
@ -241,10 +241,83 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
|
|||||||
void GlobalScheduler::PreemptThreads() {
|
void GlobalScheduler::PreemptThreads() {
|
||||||
for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
|
for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
|
||||||
const u32 priority = preemption_priorities[core_id];
|
const u32 priority = preemption_priorities[core_id];
|
||||||
if (scheduled_queue[core_id].size(priority) > 1) {
|
|
||||||
|
if (scheduled_queue[core_id].size(priority) > 0) {
|
||||||
|
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
|
||||||
scheduled_queue[core_id].yield(priority);
|
scheduled_queue[core_id].yield(priority);
|
||||||
reselection_pending.store(true, std::memory_order_release);
|
if (scheduled_queue[core_id].size(priority) > 1) {
|
||||||
|
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Thread* current_thread =
|
||||||
|
scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
|
||||||
|
Thread* winner = nullptr;
|
||||||
|
for (auto& thread : suggested_queue[core_id]) {
|
||||||
|
const s32 source_core = thread->GetProcessorID();
|
||||||
|
if (thread->GetPriority() != priority) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (source_core >= 0) {
|
||||||
|
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||||
|
? nullptr
|
||||||
|
: scheduled_queue[source_core].front();
|
||||||
|
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (next_thread == thread) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (current_thread != nullptr &&
|
||||||
|
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||||
|
winner = thread;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (winner != nullptr) {
|
||||||
|
if (winner->IsRunning()) {
|
||||||
|
UnloadThread(winner->GetProcessorID());
|
||||||
|
}
|
||||||
|
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||||
|
current_thread = winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current_thread != nullptr && current_thread->GetPriority() > priority) {
|
||||||
|
for (auto& thread : suggested_queue[core_id]) {
|
||||||
|
const s32 source_core = thread->GetProcessorID();
|
||||||
|
if (thread->GetPriority() > priority) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (source_core >= 0) {
|
||||||
|
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||||
|
? nullptr
|
||||||
|
: scheduled_queue[source_core].front();
|
||||||
|
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (next_thread == thread) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (current_thread != nullptr &&
|
||||||
|
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||||
|
winner = thread;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (winner != nullptr) {
|
||||||
|
if (winner->IsRunning()) {
|
||||||
|
UnloadThread(winner->GetProcessorID());
|
||||||
|
}
|
||||||
|
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||||
|
current_thread = winner;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
reselection_pending.store(true, std::memory_order_release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,9 +333,7 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
|
|||||||
|
|
||||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
|
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
|
||||||
if (current_thread == winner) {
|
if (current_thread == winner) {
|
||||||
// TODO(blinkhawk): manage redundant operations, this is not implemented.
|
current_thread->IncrementYieldCount();
|
||||||
// as its mostly an optimization.
|
|
||||||
// current_thread->SetRedundantSchedulerOperation();
|
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
reselection_pending.store(true, std::memory_order_release);
|
reselection_pending.store(true, std::memory_order_release);
|
||||||
|
@ -416,6 +416,14 @@ public:
|
|||||||
/// Yields this thread and if the core is left idle, loads are rebalanced
|
/// Yields this thread and if the core is left idle, loads are rebalanced
|
||||||
bool YieldAndWaitForLoadBalancing();
|
bool YieldAndWaitForLoadBalancing();
|
||||||
|
|
||||||
|
void IncrementYieldCount() {
|
||||||
|
yield_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetYieldCount() const {
|
||||||
|
return yield_count;
|
||||||
|
}
|
||||||
|
|
||||||
ThreadSchedStatus GetSchedulingStatus() const {
|
ThreadSchedStatus GetSchedulingStatus() const {
|
||||||
return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
|
return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
|
||||||
}
|
}
|
||||||
@ -460,6 +468,7 @@ private:
|
|||||||
|
|
||||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||||
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
|
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
|
||||||
|
u64 yield_count = 0; ///< Number of innecessaries yields occured.
|
||||||
|
|
||||||
s32 processor_id = 0;
|
s32 processor_id = 0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user