Kernel: Initial implementation of thread preemption.
This commit is contained in:
parent
103f3a2fe5
commit
b49c0dab87
@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
|
#include "core/core_timing_util.h"
|
||||||
#include "core/hle/kernel/address_arbiter.h"
|
#include "core/hle/kernel/address_arbiter.h"
|
||||||
#include "core/hle/kernel/client_port.h"
|
#include "core/hle/kernel/client_port.h"
|
||||||
#include "core/hle/kernel/handle_table.h"
|
#include "core/hle/kernel/handle_table.h"
|
||||||
@ -96,6 +97,7 @@ struct KernelCore::Impl {
|
|||||||
|
|
||||||
InitializeSystemResourceLimit(kernel);
|
InitializeSystemResourceLimit(kernel);
|
||||||
InitializeThreads();
|
InitializeThreads();
|
||||||
|
InitializePreemption();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Shutdown() {
|
void Shutdown() {
|
||||||
@ -111,6 +113,7 @@ struct KernelCore::Impl {
|
|||||||
|
|
||||||
thread_wakeup_callback_handle_table.Clear();
|
thread_wakeup_callback_handle_table.Clear();
|
||||||
thread_wakeup_event_type = nullptr;
|
thread_wakeup_event_type = nullptr;
|
||||||
|
preemption_event = nullptr;
|
||||||
|
|
||||||
named_ports.clear();
|
named_ports.clear();
|
||||||
}
|
}
|
||||||
@ -133,6 +136,18 @@ struct KernelCore::Impl {
|
|||||||
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InitializePreemption() {
|
||||||
|
preemption_event = system.CoreTiming().RegisterEvent(
|
||||||
|
"PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
|
||||||
|
global_scheduler.PreemptThreads();
|
||||||
|
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||||
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
|
});
|
||||||
|
|
||||||
|
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||||
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
|
}
|
||||||
|
|
||||||
std::atomic<u32> next_object_id{0};
|
std::atomic<u32> next_object_id{0};
|
||||||
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
||||||
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
||||||
@ -146,6 +161,7 @@ struct KernelCore::Impl {
|
|||||||
SharedPtr<ResourceLimit> system_resource_limit;
|
SharedPtr<ResourceLimit> system_resource_limit;
|
||||||
|
|
||||||
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
|
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
|
||||||
|
Core::Timing::EventType* preemption_event = nullptr;
|
||||||
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
|
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
|
||||||
// allowing us to simply use a pool index or similar.
|
// allowing us to simply use a pool index or similar.
|
||||||
Kernel::HandleTable thread_wakeup_callback_handle_table;
|
Kernel::HandleTable thread_wakeup_callback_handle_table;
|
||||||
|
@ -238,6 +238,16 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
|
|||||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GlobalScheduler::PreemptThreads() {
|
||||||
|
for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
|
||||||
|
const u64 priority = preemption_priorities[core_id];
|
||||||
|
if (scheduled_queue[core_id].size(priority) > 1) {
|
||||||
|
scheduled_queue[core_id].yield(priority);
|
||||||
|
reselection_pending.store(true, std::memory_order_release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
|
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
|
||||||
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
|
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
|
||||||
scheduled_queue[core].add(thread, priority);
|
scheduled_queue[core].add(thread, priority);
|
||||||
|
@ -133,6 +133,8 @@ public:
|
|||||||
*/
|
*/
|
||||||
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
|
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
|
||||||
|
|
||||||
|
void PreemptThreads();
|
||||||
|
|
||||||
u32 CpuCoresCount() const {
|
u32 CpuCoresCount() const {
|
||||||
return NUM_CPU_CORES;
|
return NUM_CPU_CORES;
|
||||||
}
|
}
|
||||||
@ -153,6 +155,8 @@ private:
|
|||||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
|
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
|
||||||
std::atomic<bool> reselection_pending;
|
std::atomic<bool> reselection_pending;
|
||||||
|
|
||||||
|
std::array<u64, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
|
||||||
|
|
||||||
/// Lists all thread ids that aren't deleted/etc.
|
/// Lists all thread ids that aren't deleted/etc.
|
||||||
std::vector<SharedPtr<Thread>> thread_list;
|
std::vector<SharedPtr<Thread>> thread_list;
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
Loading…
Reference in New Issue
Block a user