kernel: Ensure all uses of disable_count are balanced
This commit is contained in:
parent
77137583cd
commit
a9a83fa726
@ -63,14 +63,8 @@ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduli
|
|||||||
auto* scheduler{kernel.CurrentScheduler()};
|
auto* scheduler{kernel.CurrentScheduler()};
|
||||||
|
|
||||||
if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
|
if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
|
||||||
// HACK: we cannot schedule from this thread, it is not a core thread
|
KScheduler::RescheduleCores(kernel, cores_needing_scheduling);
|
||||||
RescheduleCores(kernel, cores_needing_scheduling);
|
KScheduler::RescheduleCurrentHLEThread(kernel);
|
||||||
if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) {
|
|
||||||
// Special case to ensure dummy threads that are waiting block
|
|
||||||
GetCurrentThread(kernel).IfDummyThreadTryWait();
|
|
||||||
}
|
|
||||||
GetCurrentThread(kernel).EnableDispatch();
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,6 +77,17 @@ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduli
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
|
||||||
|
// HACK: we cannot schedule from this thread, it is not a core thread
|
||||||
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
|
||||||
|
|
||||||
|
// Special case to ensure dummy threads that are waiting block
|
||||||
|
GetCurrentThread(kernel).IfDummyThreadTryWait();
|
||||||
|
|
||||||
|
ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
|
||||||
|
GetCurrentThread(kernel).EnableDispatch();
|
||||||
|
}
|
||||||
|
|
||||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||||
if (IsSchedulerUpdateNeeded(kernel)) {
|
if (IsSchedulerUpdateNeeded(kernel)) {
|
||||||
return UpdateHighestPriorityThreadsImpl(kernel);
|
return UpdateHighestPriorityThreadsImpl(kernel);
|
||||||
|
@ -119,6 +119,8 @@ private:
|
|||||||
}
|
}
|
||||||
static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
|
static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
|
||||||
|
|
||||||
|
static void RescheduleCurrentHLEThread(KernelCore& kernel);
|
||||||
|
|
||||||
// Instanced private API.
|
// Instanced private API.
|
||||||
void ScheduleImpl();
|
void ScheduleImpl();
|
||||||
void ScheduleImplFiber();
|
void ScheduleImplFiber();
|
||||||
|
@ -1106,6 +1106,8 @@ void KThread::IfDummyThreadTryWait() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ASSERT(!kernel.IsPhantomModeForSingleCore());
|
||||||
|
|
||||||
// Block until we are no longer waiting.
|
// Block until we are no longer waiting.
|
||||||
std::unique_lock lk(dummy_wait_lock);
|
std::unique_lock lk(dummy_wait_lock);
|
||||||
dummy_wait_cv.wait(
|
dummy_wait_cv.wait(
|
||||||
@ -1211,10 +1213,12 @@ KScopedDisableDispatch::~KScopedDisableDispatch() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
|
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
|
||||||
auto scheduler = kernel.CurrentScheduler();
|
auto* scheduler = kernel.CurrentScheduler();
|
||||||
|
|
||||||
if (scheduler) {
|
if (scheduler && !kernel.IsPhantomModeForSingleCore()) {
|
||||||
scheduler->RescheduleCurrentCore();
|
scheduler->RescheduleCurrentCore();
|
||||||
|
} else {
|
||||||
|
KScheduler::RescheduleCurrentHLEThread(kernel);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
GetCurrentThread(kernel).EnableDispatch();
|
GetCurrentThread(kernel).EnableDispatch();
|
||||||
|
Loading…
Reference in New Issue
Block a user