hle: kernel: Migrate some code from Common::SpinLock to KSpinLock.
This commit is contained in:
parent
541b4353e4
commit
5872561077
@ -62,7 +62,7 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
|
|||||||
}
|
}
|
||||||
|
|
||||||
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||||
std::scoped_lock lock{guard};
|
KScopedSpinLock lk{guard};
|
||||||
if (KThread* prev_highest_thread = state.highest_priority_thread;
|
if (KThread* prev_highest_thread = state.highest_priority_thread;
|
||||||
prev_highest_thread != highest_thread) {
|
prev_highest_thread != highest_thread) {
|
||||||
if (prev_highest_thread != nullptr) {
|
if (prev_highest_thread != nullptr) {
|
||||||
@ -637,11 +637,11 @@ void KScheduler::RescheduleCurrentCore() {
|
|||||||
if (phys_core.IsInterrupted()) {
|
if (phys_core.IsInterrupted()) {
|
||||||
phys_core.ClearInterrupt();
|
phys_core.ClearInterrupt();
|
||||||
}
|
}
|
||||||
guard.lock();
|
guard.Lock();
|
||||||
if (state.needs_scheduling.load()) {
|
if (state.needs_scheduling.load()) {
|
||||||
Schedule();
|
Schedule();
|
||||||
} else {
|
} else {
|
||||||
guard.unlock();
|
guard.Unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -669,7 +669,7 @@ void KScheduler::Unload(KThread* thread) {
|
|||||||
} else {
|
} else {
|
||||||
prev_thread = nullptr;
|
prev_thread = nullptr;
|
||||||
}
|
}
|
||||||
thread->context_guard.unlock();
|
thread->context_guard.Unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -713,7 +713,7 @@ void KScheduler::ScheduleImpl() {
|
|||||||
|
|
||||||
// If we're not actually switching thread, there's nothing to do.
|
// If we're not actually switching thread, there's nothing to do.
|
||||||
if (next_thread == current_thread.load()) {
|
if (next_thread == current_thread.load()) {
|
||||||
guard.unlock();
|
guard.Unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -732,7 +732,7 @@ void KScheduler::ScheduleImpl() {
|
|||||||
} else {
|
} else {
|
||||||
old_context = &idle_thread->GetHostContext();
|
old_context = &idle_thread->GetHostContext();
|
||||||
}
|
}
|
||||||
guard.unlock();
|
guard.Unlock();
|
||||||
|
|
||||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||||
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
||||||
@ -748,24 +748,24 @@ void KScheduler::OnSwitch(void* this_scheduler) {
|
|||||||
void KScheduler::SwitchToCurrent() {
|
void KScheduler::SwitchToCurrent() {
|
||||||
while (true) {
|
while (true) {
|
||||||
{
|
{
|
||||||
std::scoped_lock lock{guard};
|
KScopedSpinLock lk{guard};
|
||||||
current_thread.store(state.highest_priority_thread);
|
current_thread.store(state.highest_priority_thread);
|
||||||
state.needs_scheduling.store(false);
|
state.needs_scheduling.store(false);
|
||||||
}
|
}
|
||||||
const auto is_switch_pending = [this] {
|
const auto is_switch_pending = [this] {
|
||||||
std::scoped_lock lock{guard};
|
KScopedSpinLock lk{guard};
|
||||||
return state.needs_scheduling.load();
|
return state.needs_scheduling.load();
|
||||||
};
|
};
|
||||||
do {
|
do {
|
||||||
auto next_thread = current_thread.load();
|
auto next_thread = current_thread.load();
|
||||||
if (next_thread != nullptr) {
|
if (next_thread != nullptr) {
|
||||||
next_thread->context_guard.lock();
|
next_thread->context_guard.Lock();
|
||||||
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
||||||
next_thread->context_guard.unlock();
|
next_thread->context_guard.Unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (next_thread->GetActiveCore() != core_id) {
|
if (next_thread->GetActiveCore() != core_id) {
|
||||||
next_thread->context_guard.unlock();
|
next_thread->context_guard.Unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,19 +2,16 @@
|
|||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
|
||||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/spin_lock.h"
|
|
||||||
#include "core/hle/kernel/global_scheduler_context.h"
|
#include "core/hle/kernel/global_scheduler_context.h"
|
||||||
#include "core/hle/kernel/k_priority_queue.h"
|
#include "core/hle/kernel/k_priority_queue.h"
|
||||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||||
#include "core/hle/kernel/k_scoped_lock.h"
|
#include "core/hle/kernel/k_scoped_lock.h"
|
||||||
|
#include "core/hle/kernel/k_spin_lock.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
class Fiber;
|
class Fiber;
|
||||||
@ -195,7 +192,7 @@ private:
|
|||||||
u64 last_context_switch_time{};
|
u64 last_context_switch_time{};
|
||||||
const s32 core_id;
|
const s32 core_id;
|
||||||
|
|
||||||
Common::SpinLock guard{};
|
KSpinLock guard{};
|
||||||
};
|
};
|
||||||
|
|
||||||
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
||||||
|
@ -2,14 +2,11 @@
|
|||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
|
||||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/spin_lock.h"
|
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
|
#include "core/hle/kernel/k_spin_lock.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
|
||||||
@ -34,7 +31,7 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
||||||
SchedulerType::DisableScheduling(kernel);
|
SchedulerType::DisableScheduling(kernel);
|
||||||
spin_lock.lock();
|
spin_lock.Lock();
|
||||||
|
|
||||||
// For debug, ensure that our state is valid.
|
// For debug, ensure that our state is valid.
|
||||||
ASSERT(lock_count == 0);
|
ASSERT(lock_count == 0);
|
||||||
@ -58,7 +55,7 @@ public:
|
|||||||
|
|
||||||
// Note that we no longer hold the lock, and unlock the spinlock.
|
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||||
owner_thread = nullptr;
|
owner_thread = nullptr;
|
||||||
spin_lock.unlock();
|
spin_lock.Unlock();
|
||||||
|
|
||||||
// Enable scheduling, and perform a rescheduling operation.
|
// Enable scheduling, and perform a rescheduling operation.
|
||||||
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
|
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
|
||||||
@ -67,7 +64,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
Common::SpinLock spin_lock{};
|
KAlignedSpinLock spin_lock{};
|
||||||
s32 lock_count{};
|
s32 lock_count{};
|
||||||
KThread* owner_thread{};
|
KThread* owner_thread{};
|
||||||
};
|
};
|
||||||
|
@ -28,6 +28,12 @@ private:
|
|||||||
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
|
||||||
|
using KAlignedSpinLock = KSpinLock;
|
||||||
|
using KNotAlignedSpinLock = KSpinLock;
|
||||||
|
|
||||||
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
||||||
|
using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>;
|
||||||
|
using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>;
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -14,10 +14,10 @@
|
|||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/intrusive_red_black_tree.h"
|
#include "common/intrusive_red_black_tree.h"
|
||||||
#include "common/spin_lock.h"
|
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/hle/kernel/k_affinity_mask.h"
|
#include "core/hle/kernel/k_affinity_mask.h"
|
||||||
#include "core/hle/kernel/k_light_lock.h"
|
#include "core/hle/kernel/k_light_lock.h"
|
||||||
|
#include "core/hle/kernel/k_spin_lock.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/object.h"
|
#include "core/hle/kernel/object.h"
|
||||||
#include "core/hle/kernel/svc_common.h"
|
#include "core/hle/kernel/svc_common.h"
|
||||||
@ -732,7 +732,7 @@ private:
|
|||||||
s8 priority_inheritance_count{};
|
s8 priority_inheritance_count{};
|
||||||
bool resource_limit_release_hint{};
|
bool resource_limit_release_hint{};
|
||||||
StackParameters stack_parameters{};
|
StackParameters stack_parameters{};
|
||||||
Common::SpinLock context_guard{};
|
KSpinLock context_guard{};
|
||||||
|
|
||||||
// For emulation
|
// For emulation
|
||||||
std::shared_ptr<Common::Fiber> host_context{};
|
std::shared_ptr<Common::Fiber> host_context{};
|
||||||
|
Loading…
Reference in New Issue
Block a user