|
|
@ -191,7 +191,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
|
|
|
|
light_ipc_data = nullptr;
|
|
|
|
light_ipc_data = nullptr;
|
|
|
|
|
|
|
|
|
|
|
|
// We're not waiting for a lock, and we haven't disabled migration.
|
|
|
|
// We're not waiting for a lock, and we haven't disabled migration.
|
|
|
|
lock_owner = nullptr;
|
|
|
|
waiting_lock_info = nullptr;
|
|
|
|
num_core_migration_disables = 0;
|
|
|
|
num_core_migration_disables = 0;
|
|
|
|
|
|
|
|
|
|
|
|
// We have no waiters, but we do have an entrypoint.
|
|
|
|
// We have no waiters, but we do have an entrypoint.
|
|
|
@ -341,25 +341,39 @@ void KThread::Finalize() {
|
|
|
|
|
|
|
|
|
|
|
|
// Release any waiters.
|
|
|
|
// Release any waiters.
|
|
|
|
{
|
|
|
|
{
|
|
|
|
ASSERT(lock_owner == nullptr);
|
|
|
|
ASSERT(waiting_lock_info == nullptr);
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
|
|
|
|
auto it = waiter_list.begin();
|
|
|
|
// Check that we have no kernel waiters.
|
|
|
|
while (it != waiter_list.end()) {
|
|
|
|
ASSERT(num_kernel_waiters == 0);
|
|
|
|
// Get the thread.
|
|
|
|
|
|
|
|
KThread* const waiter = std::addressof(*it);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// The thread shouldn't be a kernel waiter.
|
|
|
|
auto it = held_lock_info_list.begin();
|
|
|
|
ASSERT(!waiter->GetAddressKeyIsKernel());
|
|
|
|
while (it != held_lock_info_list.end()) {
|
|
|
|
|
|
|
|
// Get the lock info.
|
|
|
|
|
|
|
|
auto* const lock_info = std::addressof(*it);
|
|
|
|
|
|
|
|
|
|
|
|
// Clear the lock owner.
|
|
|
|
// The lock shouldn't have a kernel waiter.
|
|
|
|
waiter->SetLockOwner(nullptr);
|
|
|
|
ASSERT(!lock_info->GetIsKernelAddressKey());
|
|
|
|
|
|
|
|
|
|
|
|
// Erase the waiter from our list.
|
|
|
|
// Remove all waiters.
|
|
|
|
it = waiter_list.erase(it);
|
|
|
|
while (lock_info->GetWaiterCount() != 0) {
|
|
|
|
|
|
|
|
// Get the front waiter.
|
|
|
|
|
|
|
|
KThread* const waiter = lock_info->GetHighestPriorityWaiter();
|
|
|
|
|
|
|
|
|
|
|
|
// Cancel the thread's wait.
|
|
|
|
// Remove it from the lock.
|
|
|
|
waiter->CancelWait(ResultInvalidState, true);
|
|
|
|
if (lock_info->RemoveWaiter(waiter)) {
|
|
|
|
|
|
|
|
ASSERT(lock_info->GetWaiterCount() == 0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Cancel the thread's wait.
|
|
|
|
|
|
|
|
waiter->CancelWait(ResultInvalidState, true);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Remove the held lock from our list.
|
|
|
|
|
|
|
|
it = held_lock_info_list.erase(it);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Free the lock info.
|
|
|
|
|
|
|
|
LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -708,6 +722,24 @@ void KThread::SetBasePriority(s32 value) {
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
KThread* KThread::GetLockOwner() const {
|
|
|
|
|
|
|
|
return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::IncreaseBasePriority(s32 priority_) {
|
|
|
|
|
|
|
|
ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority);
|
|
|
|
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
ASSERT(!this->GetStackParameters().is_pinned);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Set our base priority.
|
|
|
|
|
|
|
|
if (base_priority > priority_) {
|
|
|
|
|
|
|
|
base_priority = priority_;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Perform a priority restoration.
|
|
|
|
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::RequestSuspend(SuspendType type) {
|
|
|
|
void KThread::RequestSuspend(SuspendType type) {
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
KScopedSchedulerLock sl{kernel};
|
|
|
|
|
|
|
|
|
|
|
@ -891,51 +923,87 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
|
|
|
R_SUCCEED();
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::AddWaiterImpl(KThread* thread) {
|
|
|
|
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Set ourselves as the lock's owner.
|
|
|
|
|
|
|
|
lock_info->SetOwner(this);
|
|
|
|
|
|
|
|
|
|
|
|
// Find the right spot to insert the waiter.
|
|
|
|
// Add the lock to our held list.
|
|
|
|
auto it = waiter_list.begin();
|
|
|
|
held_lock_info_list.push_front(*lock_info);
|
|
|
|
while (it != waiter_list.end()) {
|
|
|
|
}
|
|
|
|
if (it->GetPriority() > thread->GetPriority()) {
|
|
|
|
|
|
|
|
break;
|
|
|
|
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_) {
|
|
|
|
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Try to find an existing held lock.
|
|
|
|
|
|
|
|
for (auto& held_lock : held_lock_info_list) {
|
|
|
|
|
|
|
|
if (held_lock.GetAddressKey() == address_key_) {
|
|
|
|
|
|
|
|
return std::addressof(held_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it++;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::AddWaiterImpl(KThread* thread) {
|
|
|
|
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
ASSERT(thread->GetConditionVariableTree() == nullptr);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Get the thread's address key.
|
|
|
|
|
|
|
|
const auto address_key_ = thread->GetAddressKey();
|
|
|
|
|
|
|
|
const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey();
|
|
|
|
|
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
if (thread->GetAddressKeyIsKernel()) {
|
|
|
|
if (is_kernel_address_key_) {
|
|
|
|
ASSERT((num_kernel_waiters++) >= 0);
|
|
|
|
ASSERT((num_kernel_waiters++) >= 0);
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Insert the waiter.
|
|
|
|
// Get the relevant lock info.
|
|
|
|
waiter_list.insert(it, *thread);
|
|
|
|
auto* lock_info = this->FindHeldLock(address_key_);
|
|
|
|
thread->SetLockOwner(this);
|
|
|
|
if (lock_info == nullptr) {
|
|
|
|
|
|
|
|
// Create a new lock for the address key.
|
|
|
|
|
|
|
|
lock_info =
|
|
|
|
|
|
|
|
LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Add the new lock to our list.
|
|
|
|
|
|
|
|
this->AddHeldLock(lock_info);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Add the thread as waiter to the lock info.
|
|
|
|
|
|
|
|
lock_info->AddWaiter(thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::RemoveWaiterImpl(KThread* thread) {
|
|
|
|
void KThread::RemoveWaiterImpl(KThread* thread) {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
if (thread->GetAddressKeyIsKernel()) {
|
|
|
|
if (thread->GetIsKernelAddressKey()) {
|
|
|
|
ASSERT((num_kernel_waiters--) > 0);
|
|
|
|
ASSERT((num_kernel_waiters--) > 0);
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Get the info for the lock the thread is waiting on.
|
|
|
|
|
|
|
|
auto* lock_info = thread->GetWaitingLockInfo();
|
|
|
|
|
|
|
|
ASSERT(lock_info->GetOwner() == this);
|
|
|
|
|
|
|
|
|
|
|
|
// Remove the waiter.
|
|
|
|
// Remove the waiter.
|
|
|
|
waiter_list.erase(waiter_list.iterator_to(*thread));
|
|
|
|
if (lock_info->RemoveWaiter(thread)) {
|
|
|
|
thread->SetLockOwner(nullptr);
|
|
|
|
held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
|
|
|
|
|
|
|
|
LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
|
|
|
|
void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
|
|
|
|
ASSERT(kernel_ctx.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
while (thread != nullptr) {
|
|
|
|
// We want to inherit priority where possible.
|
|
|
|
// We want to inherit priority where possible.
|
|
|
|
s32 new_priority = thread->GetBasePriority();
|
|
|
|
s32 new_priority = thread->GetBasePriority();
|
|
|
|
if (thread->HasWaiters()) {
|
|
|
|
for (const auto& held_lock : thread->held_lock_info_list) {
|
|
|
|
new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
|
|
|
|
new_priority =
|
|
|
|
|
|
|
|
std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// If the priority we would inherit is not different from ours, don't do anything.
|
|
|
|
// If the priority we would inherit is not different from ours, don't do anything.
|
|
|
@ -943,9 +1011,18 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
|
|
|
|
return;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Get the owner of whatever lock this thread is waiting on.
|
|
|
|
|
|
|
|
KThread* const lock_owner = thread->GetLockOwner();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If the thread is waiting on some lock, remove it as a waiter to prevent violating red
|
|
|
|
|
|
|
|
// black tree invariants.
|
|
|
|
|
|
|
|
if (lock_owner != nullptr) {
|
|
|
|
|
|
|
|
lock_owner->RemoveWaiterImpl(thread);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Ensure we don't violate condition variable red black tree invariants.
|
|
|
|
// Ensure we don't violate condition variable red black tree invariants.
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
BeforeUpdatePriority(kernel_ctx, cv_tree, thread);
|
|
|
|
BeforeUpdatePriority(kernel, cv_tree, thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Change the priority.
|
|
|
|
// Change the priority.
|
|
|
@ -954,73 +1031,99 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
|
|
|
|
|
|
|
|
|
|
|
|
// Restore the condition variable, if relevant.
|
|
|
|
// Restore the condition variable, if relevant.
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
|
|
|
AfterUpdatePriority(kernel_ctx, cv_tree, thread);
|
|
|
|
AfterUpdatePriority(kernel, cv_tree, thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Update the scheduler.
|
|
|
|
// If we removed the thread from some lock's waiting list, add it back.
|
|
|
|
KScheduler::OnThreadPriorityChanged(kernel_ctx, thread, old_priority);
|
|
|
|
if (lock_owner != nullptr) {
|
|
|
|
|
|
|
|
lock_owner->AddWaiterImpl(thread);
|
|
|
|
// Keep the lock owner up to date.
|
|
|
|
|
|
|
|
KThread* lock_owner = thread->GetLockOwner();
|
|
|
|
|
|
|
|
if (lock_owner == nullptr) {
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Update the thread in the lock owner's sorted list, and continue inheriting.
|
|
|
|
// Update the scheduler.
|
|
|
|
lock_owner->RemoveWaiterImpl(thread);
|
|
|
|
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
|
|
|
lock_owner->AddWaiterImpl(thread);
|
|
|
|
|
|
|
|
|
|
|
|
// Continue inheriting priority.
|
|
|
|
thread = lock_owner;
|
|
|
|
thread = lock_owner;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::AddWaiter(KThread* thread) {
|
|
|
|
void KThread::AddWaiter(KThread* thread) {
|
|
|
|
AddWaiterImpl(thread);
|
|
|
|
this->AddWaiterImpl(thread);
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
|
|
|
|
|
|
|
|
// If the thread has a higher priority than us, we should inherit.
|
|
|
|
|
|
|
|
if (thread->GetPriority() < this->GetPriority()) {
|
|
|
|
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void KThread::RemoveWaiter(KThread* thread) {
|
|
|
|
void KThread::RemoveWaiter(KThread* thread) {
|
|
|
|
RemoveWaiterImpl(thread);
|
|
|
|
this->RemoveWaiterImpl(thread);
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
|
|
|
|
|
|
|
|
// If our priority is the same as the thread's (and we've inherited), we may need to restore to
|
|
|
|
|
|
|
|
// lower priority.
|
|
|
|
|
|
|
|
if (this->GetPriority() == thread->GetPriority() &&
|
|
|
|
|
|
|
|
this->GetPriority() < this->GetBasePriority()) {
|
|
|
|
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
|
|
|
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key) {
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
|
|
|
|
|
|
|
|
|
|
|
s32 num_waiters{};
|
|
|
|
// Get the relevant lock info.
|
|
|
|
KThread* next_lock_owner{};
|
|
|
|
auto* lock_info = this->FindHeldLock(key);
|
|
|
|
auto it = waiter_list.begin();
|
|
|
|
if (lock_info == nullptr) {
|
|
|
|
while (it != waiter_list.end()) {
|
|
|
|
*out_has_waiters = false;
|
|
|
|
if (it->GetAddressKey() == key) {
|
|
|
|
return nullptr;
|
|
|
|
KThread* thread = std::addressof(*it);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
|
|
|
|
if (thread->GetAddressKeyIsKernel()) {
|
|
|
|
|
|
|
|
ASSERT((num_kernel_waiters--) > 0);
|
|
|
|
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
it = waiter_list.erase(it);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Update the next lock owner.
|
|
|
|
// Remove the lock info from our held list.
|
|
|
|
if (next_lock_owner == nullptr) {
|
|
|
|
held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
|
|
|
|
next_lock_owner = thread;
|
|
|
|
|
|
|
|
next_lock_owner->SetLockOwner(nullptr);
|
|
|
|
// Keep track of how many kernel waiters we have.
|
|
|
|
} else {
|
|
|
|
if (lock_info->GetIsKernelAddressKey()) {
|
|
|
|
next_lock_owner->AddWaiterImpl(thread);
|
|
|
|
num_kernel_waiters -= lock_info->GetWaiterCount();
|
|
|
|
}
|
|
|
|
ASSERT(num_kernel_waiters >= 0);
|
|
|
|
num_waiters++;
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
|
|
} else {
|
|
|
|
}
|
|
|
|
it++;
|
|
|
|
|
|
|
|
|
|
|
|
ASSERT(lock_info->GetWaiterCount() > 0);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Remove the highest priority waiter from the lock to be the next owner.
|
|
|
|
|
|
|
|
KThread* next_lock_owner = lock_info->GetHighestPriorityWaiter();
|
|
|
|
|
|
|
|
if (lock_info->RemoveWaiter(next_lock_owner)) {
|
|
|
|
|
|
|
|
// The new owner was the only waiter.
|
|
|
|
|
|
|
|
*out_has_waiters = false;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Free the lock info, since it has no waiters.
|
|
|
|
|
|
|
|
LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
// There are additional waiters on the lock.
|
|
|
|
|
|
|
|
*out_has_waiters = true;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Add the lock to the new owner's held list.
|
|
|
|
|
|
|
|
next_lock_owner->AddHeldLock(lock_info);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Keep track of any kernel waiters for the new owner.
|
|
|
|
|
|
|
|
if (lock_info->GetIsKernelAddressKey()) {
|
|
|
|
|
|
|
|
next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount();
|
|
|
|
|
|
|
|
ASSERT(next_lock_owner->num_kernel_waiters > 0);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// NOTE: No need to set scheduler update needed, because we will have already done so
|
|
|
|
|
|
|
|
// when removing earlier.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Do priority updates, if we have a next owner.
|
|
|
|
// If our priority is the same as the next owner's (and we've inherited), we may need to restore
|
|
|
|
if (next_lock_owner) {
|
|
|
|
// to lower priority.
|
|
|
|
|
|
|
|
if (this->GetPriority() == next_lock_owner->GetPriority() &&
|
|
|
|
|
|
|
|
this->GetPriority() < this->GetBasePriority()) {
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
RestorePriority(kernel, this);
|
|
|
|
RestorePriority(kernel, next_lock_owner);
|
|
|
|
// NOTE: No need to restore priority on the next lock owner, because it was already the
|
|
|
|
|
|
|
|
// highest priority waiter on the lock.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Return output.
|
|
|
|
// Return the next lock owner.
|
|
|
|
*out_num_waiters = num_waiters;
|
|
|
|
|
|
|
|
return next_lock_owner;
|
|
|
|
return next_lock_owner;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -1137,9 +1240,7 @@ ThreadState KThread::RequestTerminate() {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Change the thread's priority to be higher than any system thread's.
|
|
|
|
// Change the thread's priority to be higher than any system thread's.
|
|
|
|
if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
|
|
|
|
this->IncreaseBasePriority(TerminatingThreadPriority);
|
|
|
|
this->SetBasePriority(TerminatingThreadPriority);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// If the thread is runnable, send a termination interrupt to other cores.
|
|
|
|
// If the thread is runnable, send a termination interrupt to other cores.
|
|
|
|
if (this->GetState() == ThreadState::Runnable) {
|
|
|
|
if (this->GetState() == ThreadState::Runnable) {
|
|
|
|