/* * Select the KSE that will be run next. From that find the thread, and * remove it from the KSEGRP's run queue. If there is thread clustering, * this will be what does it. */ struct thread * choosethread(void) { struct kse *ke; struct thread *td; struct ksegrp *kg; #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) if (smp_active == 0 && PCPU_GET(cpuid) != 0) { /* Shutting down, run idlethread on AP's */ td = PCPU_GET(idlethread); ke = td->td_kse; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); ke->ke_flags |= KEF_DIDRUN; TD_SET_RUNNING(td); return (td); } #endif retry: ke = sched_choose(); if (ke) { td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; if (td->td_proc->p_flag & P_HADTHREADS) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; } CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", td, td->td_priority); } else { /* Simulate runq_choose() having returned the idle thread */ td = PCPU_GET(idlethread); ke = td->td_kse; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); } ke->ke_flags |= KEF_DIDRUN; /* * If we are in panic, only allow system threads, * plus the one we are running in, to be run. */ if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && (td->td_flags & TDF_INPANIC) == 0)) { /* note that it is no longer on the run queue */ TD_SET_CAN_RUN(td); goto retry; } TD_SET_RUNNING(td); return (td); }
void sched_schedule() { // find a thread to schedule. the scheduled thread is removed from the // queue, and the currently-run thread is re-added to the queue. spl_lock(&_sched_lock); thread_t* old = thr_current(); if(old) { switch(old->state) { case Runnable: // don't stop if the thread still has time! if(old->preempt_at > systime()) { goto done; } break; case Yielded: // timeslice is given up. old->state = Runnable; break; default: // not relevant here. break; } } // BUG: this algorithm will start to choose the idle thread when // only one other thread is remaining runnable. thread_t* thr = sched_choose(old); if(thr) { trace("switching: %d:%d (%d)\n", thr->parent->id, thr->id, thr->priority); thr->preempt_at = systime() + SCHED_TIMESLICE_US; thr_switch(thr); sched_remove_unlocked(thr); if(old) sched_add_unlocked(old); goto done; } // let things stay as they are if only one thread exists. if(old->state == Runnable) { goto done; } fatal("no thread left to schedule - this is bad!\n"); done: spl_unlock(&_sched_lock); }
void sched_switch(thread_t *newtd) { if (!sched_active) return; cs_enter(); thread_t *td = thread_self(); td->td_flags &= ~(TDF_SLICEEND | TDF_NEEDSWITCH); if (td->td_state == TDS_RUNNING) sched_add(td); if (newtd == NULL) newtd = sched_choose(); newtd->td_state = TDS_RUNNING; cs_leave(); if (td != newtd) ctx_switch(td, newtd); }