static OSThread * peekNextThreadNoLock(uint32_t core) { emuassert(isSchedulerLocked()); auto thread = sCoreRunQueue[core]->head; if (thread) { emuassert(thread->state == OSThreadState::Ready); emuassert(thread->suspendCounter == 0); emuassert(thread->attr & (1 << core)); } return thread; }
void markThreadInactiveNoLock(OSThread *thread) { emuassert(ActiveQueue::contains(sActiveThreads, thread)); ActiveQueue::erase(sActiveThreads, thread); checkActiveThreadsNoLock(); }
void disableScheduler() { emuassert(!OSIsInterruptEnabled()); uint32_t coreId = cpu::this_core::id(); sSchedulerEnabled[coreId] = false; }
void markThreadActiveNoLock(OSThread *thread) { emuassert(!ActiveQueue::contains(sActiveThreads, thread)); ActiveQueue::append(sActiveThreads, thread); checkActiveThreadsNoLock(); }
void unlockScheduler() { auto core = 1 << cpu::this_core::id(); auto oldCore = sSchedulerLock.exchange(0, std::memory_order_release); emuassert(oldCore == core); }
void handleAlarmInterrupt(OSContext *context) { auto core_id = cpu::this_core::id(); auto queue = sAlarmQueue[core_id]; auto cbQueue = sAlarmCallbackQueue[core_id]; auto cbThreadQueue = sAlarmCallbackThreadQueue[core_id]; auto now = OSGetTime(); auto next = std::chrono::time_point<std::chrono::system_clock>::max(); bool callbacksNeeded = false; internal::lockScheduler(); acquireIdLock(sAlarmLock); for (OSAlarm *alarm = queue->head; alarm; ) { auto nextAlarm = alarm->link.next; // Expire it if its past its nextFire time if (alarm->nextFire <= now) { emuassert(alarm->state == OSAlarmState::Set); internal::AlarmQueue::erase(queue, alarm); alarm->alarmQueue = nullptr; alarm->state = OSAlarmState::Expired; alarm->context = context; if (alarm->threadQueue.head) { wakeupThreadNoLock(&alarm->threadQueue); rescheduleOtherCoreNoLock(); } if (alarm->group == 0xFFFFFFFF) { // System-internal alarm if (alarm->callback) { auto originalMask = cpu::this_core::setInterruptMask(0); alarm->callback(alarm, context); cpu::this_core::setInterruptMask(originalMask); } } else { internal::AlarmQueue::append(cbQueue, alarm); alarm->alarmQueue = cbQueue; wakeupThreadNoLock(cbThreadQueue); } } alarm = nextAlarm; } internal::updateCpuAlarmNoALock(); releaseIdLock(sAlarmLock); internal::unlockScheduler(); }
void resumeAll() { auto oldState = sIsPaused.exchange(false); emuassert(oldState); for (auto i = 0; i < 3; ++i) { sCorePauseState[i] = nullptr; } sPauseReleaseCond.notify_all(); }
static void stepCore(uint32_t coreId, bool stepOver) { emuassert(sIsPaused.load()); const cpu::CoreRegs *state = sCorePauseState[coreId]; uint32_t nextInstr = calculateNextInstr(state, stepOver); cpu::addBreakpoint(nextInstr, cpu::SYSTEM_BPFLAG); resumeAll(); }
static void queueThreadNoLock(OSThread *thread) { emuassert(isSchedulerLocked()); emuassert(!OSIsThreadSuspended(thread)); emuassert(thread->state == OSThreadState::Ready); emuassert(thread->priority >= -1 && thread->priority <= 32); // Schedule this thread on any cores which can run it! if (thread->attr & OSThreadAttributes::AffinityCPU0) { CoreRunQueue0::insert(sCoreRunQueue[0], thread); } if (thread->attr & OSThreadAttributes::AffinityCPU1) { CoreRunQueue1::insert(sCoreRunQueue[1], thread); } if (thread->attr & OSThreadAttributes::AffinityCPU2) { CoreRunQueue2::insert(sCoreRunQueue[2], thread); } }
OSThread * setThreadActualPriorityNoLock(OSThread *thread, int32_t priority) { emuassert(isSchedulerLocked()); thread->priority = priority; if (thread->state == OSThreadState::Ready) { if (thread->suspendCounter == 0) { unqueueThreadNoLock(thread); queueThreadNoLock(thread); } } else if (thread->state == OSThreadState::Waiting) { // Move towards head of queue if needed while (thread->link.prev && priority < thread->link.prev->priority) { auto prev = thread->link.prev; auto next = thread->link.next; thread->link.prev = prev->link.prev; thread->link.next = prev; prev->link.prev = thread; prev->link.next = next; if (next) { next->link.prev = prev; } } // Move towards tail of queue if needed while (thread->link.next && thread->link.next->priority < priority) { auto prev = thread->link.prev; auto next = thread->link.next; thread->link.prev = next; thread->link.next = next->link.next; next->link.prev = prev; next->link.next = thread; if (prev) { prev->link.next = next; } } // If we are waiting for a mutex, return its owner if (thread->mutex) { return thread->mutex->owner; } } return nullptr; }
void doAsyncFileCallback(FSClient *client, FSCmdBlock *block, FSStatus result, FSAsyncData *asyncData) { emuassert(!asyncData->queue); FSAsyncResult *asyncRes = coreinit::internal::sysAlloc<FSAsyncResult>(); asyncRes->userParams = *asyncData; asyncRes->status = result; asyncRes->client = client; asyncRes->block = block; asyncRes->ioMsg.message = asyncRes; asyncRes->ioMsg.args[2] = AppIoEventType::FsAsyncCallback; internal::sendMessage(&asyncRes->ioMsg); }
int32_t calculateThreadPriorityNoLock(OSThread *thread) { emuassert(isSchedulerLocked()); auto priority = thread->basePriority; // If thread is holding a spinlock, it is always highest priority if (thread->context.spinLockCount > 0) { return 0; } // For all mutex we own, boost our priority over anyone waiting to own our mutex for (auto mutex = thread->mutexQueue.head; mutex; mutex = mutex->link.next) { // We only need to check the head of mutex thread queue as it is in priority order auto other = mutex->queue.head; if (other && other->priority < priority) { priority = other->priority; } } // TODO: Owned Fast Mutex queue return priority; }
void checkRunningThreadNoLock(bool yielding) { emuassert(isSchedulerLocked()); auto coreId = cpu::this_core::id(); auto thread = sCurrentThread[coreId]; // Do a check to see if anything has become corrupted... if (thread) { checkActiveThreadsNoLock(); } if (!sSchedulerEnabled[coreId]) { return; } auto next = peekNextThreadNoLock(coreId); if (thread && thread->suspendCounter <= 0 && thread->state == OSThreadState::Running) { if (!next) { // There is no other viable thread, keep running current. return; } if (thread->priority < next->priority) { // Next thread has lower priority, keep running current. return; } else if (!yielding && thread->priority == next->priority) { // Next thread has same priority, but we are not yielding. return; } } // If thread is in running state then leave it in Ready to run state if (thread && thread->state == OSThreadState::Running) { thread->state = OSThreadState::Ready; queueThreadNoLock(thread); } // *snip* log thread switch *snip* ... const char *threadName = "?"; const char *nextName = "?"; if (thread && thread->name) { threadName = thread->name; } if (next && next->name) { nextName = next->name; } if (thread) { if (next) { gLog->trace("Core {} leaving thread {}[{}] to thread {}[{}]", coreId, thread->id, threadName, next->id, nextName); } else { gLog->trace("Core {} leaving thread {}[{}] to idle", coreId, thread->id, threadName); } } else { if (next) { gLog->trace("Core {} leaving idle to thread {}[{}]", coreId, next->id, nextName); } else { gLog->trace("Core {} leaving idle to idle", coreId); } } // Remove next thread from Run Queue if (next) { next->state = OSThreadState::Running; unqueueThreadNoLock(next); } // Update thread core time tracking stuff if (thread) { auto now = std::chrono::high_resolution_clock::now(); auto diff = now - sLastSwitchTime[coreId]; thread->coreTimeConsumedNs += diff.count(); sLastSwitchTime[coreId] = now; } if (next) { next->wakeCount++; } // Switch thread sCurrentThread[coreId] = next; internal::unlockScheduler(); kernel::setContext(&next->context); internal::lockScheduler(); if (thread) { checkActiveThreadsNoLock(); } }
static void validateThread(OSThread *thread) { emuassert(*thread->stackEnd == 0xDEADBABE); emuassert((thread->attr & OSThreadAttributes::AffinityAny) != 0); }