hle: kernel: KConditionVariable: Various updates & simplifications.

This commit is contained in:
bunnei 2021-11-09 22:06:49 -08:00
parent f62c7091a2
commit f3d6e31e78
2 changed files with 67 additions and 123 deletions

View File

@ -121,26 +121,31 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
// Determine the next tag. // Determine the next tag.
u32 next_value{}; u32 next_value{};
if (next_owner_thread) { if (next_owner_thread != nullptr) {
next_value = next_owner_thread->GetAddressKeyValue(); next_value = next_owner_thread->GetAddressKeyValue();
if (num_waiters > 1) { if (num_waiters > 1) {
next_value |= Svc::HandleWaitMask; next_value |= Svc::HandleWaitMask;
} }
next_owner_thread->EndWait(ResultSuccess);
}
// Write the value to userspace. // Write the value to userspace.
if (!WriteToUser(system, addr, std::addressof(next_value))) { ResultCode result{ResultSuccess};
if (next_owner_thread) { if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
next_owner_thread->SetWaitResult(ResultInvalidCurrentMemory); result = ResultSuccess;
} else {
result = ResultInvalidCurrentMemory;
} }
return ResultInvalidCurrentMemory; // Signal the next owner thread.
} next_owner_thread->EndWait(result);
} return result;
} else {
// Just write the value to userspace.
R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
ResultInvalidCurrentMemory);
return ResultSuccess; return ResultSuccess;
}
}
} }
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
@ -148,30 +153,26 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel); ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address. // Wait for the address.
{ KThread* owner_thread{};
KScopedAutoObject<KThread> owner_thread;
ASSERT(owner_thread.IsNull());
{ {
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(kernel);
cur_thread->SetWaitResult(ResultSuccess);
// Check if the thread should terminate. // Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
{
// Read the tag from userspace. // Read the tag from userspace.
u32 test_tag{}; u32 test_tag{};
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done. // If the tag isn't the handle (with wait mask), we're done.
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess); R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
// Get the lock owner thread. // Get the lock owner thread.
owner_thread = owner_thread = kernel.CurrentProcess()
kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>( ->GetHandleTable()
handle); .GetObjectWithoutPseudoHandle<KThread>(handle)
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); .ReleasePointerUnsafe();
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
// Update the lock. // Update the lock.
cur_thread->SetAddressKey(addr, value); cur_thread->SetAddressKey(addr, value);
@ -182,24 +183,15 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
cur_thread->SetMutexWaitAddressForDebugging(addr); cur_thread->SetMutexWaitAddressForDebugging(addr);
} }
}
ASSERT(owner_thread.IsNotNull());
}
// Remove the thread as a waiter from the lock owner. // Close our reference to the owner thread, now that the wait is over.
{ owner_thread->Close();
KScopedSchedulerLock sl(kernel);
KThread* owner_thread = cur_thread->GetLockOwner();
if (owner_thread != nullptr) {
owner_thread->RemoveWaiter(cur_thread);
}
}
// Get the wait result. // Get the wait result.
return cur_thread->GetWaitResult(); return cur_thread->GetWaitResult();
} }
KThread* KConditionVariable::SignalImpl(KThread* thread) { void KConditionVariable::SignalImpl(KThread* thread) {
// Check pre-conditions. // Check pre-conditions.
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(kernel.GlobalSchedulerContext().IsLocked());
@ -213,14 +205,13 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here. // TODO(bunnei): We should call CanAccessAtomic(..) here.
can_access = true; can_access = true;
if (can_access) { if (can_access) [[likely]] {
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
Svc::HandleWaitMask); Svc::HandleWaitMask);
} }
} }
KThread* thread_to_close = nullptr; if (can_access) [[likely]] {
if (can_access) {
if (prev_tag == Svc::InvalidHandle) { if (prev_tag == Svc::InvalidHandle) {
// If nobody held the lock previously, we're all good. // If nobody held the lock previously, we're all good.
thread->EndWait(ResultSuccess); thread->EndWait(ResultSuccess);
@ -232,10 +223,10 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
.ReleasePointerUnsafe(); .ReleasePointerUnsafe();
if (owner_thread) { if (owner_thread) [[likely]] {
// Add the thread as a waiter on the owner. // Add the thread as a waiter on the owner.
owner_thread->AddWaiter(thread); owner_thread->AddWaiter(thread);
thread_to_close = owner_thread; owner_thread->Close();
} else { } else {
// The lock was tagged with a thread that doesn't exist. // The lock was tagged with a thread that doesn't exist.
thread->EndWait(ResultInvalidState); thread->EndWait(ResultInvalidState);
@ -245,20 +236,11 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
// If the address wasn't accessible, note so. // If the address wasn't accessible, note so.
thread->EndWait(ResultInvalidCurrentMemory); thread->EndWait(ResultInvalidCurrentMemory);
} }
return thread_to_close;
} }
void KConditionVariable::Signal(u64 cv_key, s32 count) { void KConditionVariable::Signal(u64 cv_key, s32 count) {
// Prepare for signaling.
constexpr int MaxThreads = 16;
KLinkedList<KThread> thread_list{kernel};
std::array<KThread*, MaxThreads> thread_array;
s32 num_to_close{};
// Perform signaling. // Perform signaling.
s32 num_waiters{}; int num_waiters = 0;
{ {
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(kernel);
@ -267,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
(it->GetConditionVariableKey() == cv_key)) { (it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it); KThread* target_thread = std::addressof(*it);
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { this->SignalImpl(target_thread);
if (num_to_close < MaxThreads) {
thread_array[num_to_close++] = thread;
} else {
thread_list.push_back(*thread);
}
}
it = thread_tree.erase(it); it = thread_tree.erase(it);
target_thread->ClearConditionVariable(); target_thread->ClearConditionVariable();
++num_waiters; ++num_waiters;
@ -282,33 +257,20 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
// If we have no waiters, clear the has waiter flag. // If we have no waiters, clear the has waiter flag.
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
const u32 has_waiter_flag{}; const u32 has_waiter_flag = 0;
WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
} }
} }
// Close threads in the array.
for (auto i = 0; i < num_to_close; ++i) {
thread_array[i]->Close();
}
// Close threads in the list.
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
(*it).Close();
}
} }
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait. // Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue( ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
kernel, std::addressof(thread_tree)); kernel, std::addressof(thread_tree));
{ {
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
// Set the synced object.
cur_thread->SetWaitResult(ResultTimedOut);
// Check that the thread isn't terminating. // Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) { if (cur_thread->IsTerminationRequested()) {
@ -350,38 +312,20 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
} }
} }
// If timeout is zero, time out.
R_UNLESS(timeout != 0, ResultTimedOut);
// Update condition variable tracking. // Update condition variable tracking.
{
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
thread_tree.insert(*cur_thread); thread_tree.insert(*cur_thread);
}
// If the timeout is non-zero, set the thread as waiting. // Begin waiting.
if (timeout != 0) {
cur_thread->BeginWait(std::addressof(wait_queue)); cur_thread->BeginWait(std::addressof(wait_queue));
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
cur_thread->SetMutexWaitAddressForDebugging(addr); cur_thread->SetMutexWaitAddressForDebugging(addr);
} }
}
// Cancel the timer wait. // Get the wait result.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
// Remove from the condition variable.
{
KScopedSchedulerLock sl(kernel);
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(cur_thread);
}
if (cur_thread->IsWaitingForConditionVariable()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearConditionVariable();
}
}
// Get the result.
return cur_thread->GetWaitResult(); return cur_thread->GetWaitResult();
} }

View File

@ -34,7 +34,7 @@ public:
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
private: private:
[[nodiscard]] KThread* SignalImpl(KThread* thread); void SignalImpl(KThread* thread);
ThreadTree thread_tree; ThreadTree thread_tree;