void suspend(tcb_t *target) { ipcCancel(target); setThreadState(target, ThreadState_Inactive); tcbSchedDequeue(target); }
void switchToThread(tcb_t *thread) { Arch_switchToThread(thread); tcbSchedDequeue(thread); ksCurThread = thread; }
static void handleYield(void) { tcbSchedDequeue(ksCurThread); tcbSchedAppend(ksCurThread); rescheduleRequired(); }
void switchToThread(tcb_t *thread) { #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_switch(ksCurThread, thread); #endif Arch_switchToThread(thread); tcbSchedDequeue(thread); ksCurThread = thread; }
void suspend(tcb_t *target) { cancelIPC(target); /*if (cap_get_capType(TCB_PTR_CTE_PTR(target, tcbCaller)->cap) == cap_reply_cap)*/ { deleteCallerCap(target); } setThreadState(target, ThreadState_Inactive); tcbSchedDequeue(target); }
void setPolicy(tcb_t *tptr, policy_t policy) { tcbSchedDequeue(tptr); tptr->tcbPolicy = policy; if (isRunnable(tptr)) { tcbSchedEnqueue(tptr); } if (tptr == ksCurThread) { rescheduleRequired(); } }
void setPriority(tcb_t *tptr, prio_t prio) { tcbSchedDequeue(tptr); tptr->tcbPriority = prio; if (isRunnable(tptr)) { tcbSchedEnqueue(tptr); } if (tptr == ksCurThread) { rescheduleRequired(); } }
void setDomain(tcb_t *tptr, dom_t dom) { tcbSchedDequeue(tptr); tptr->tcbDomain = dom; if (isRunnable(tptr)) { tcbSchedEnqueue(tptr); } if (tptr == ksCurThread) { rescheduleRequired(); } }