void ast_check(void) { register int mycpu; register processor_t myprocessor; register thread_t thread = current_thread(); spl_t s = splsched(); mp_disable_preemption(); mycpu = cpu_number(); /* * Check processor state for ast conditions. */ myprocessor = cpu_to_processor(mycpu); switch(myprocessor->state) { case PROCESSOR_OFF_LINE: case PROCESSOR_IDLE: case PROCESSOR_DISPATCHING: /* * No ast. */ break; #if NCPUS > 1 case PROCESSOR_ASSIGN: case PROCESSOR_SHUTDOWN: /* * Need ast to force action thread onto processor. * * XXX Should check if action thread is already there. */ ast_on(mycpu, AST_BLOCK); break; #endif /* NCPUS > 1 */ case PROCESSOR_RUNNING: case PROCESSOR_VIDLE: /* * Propagate thread ast to processor. If we already * need an ast, don't look for more reasons. */ ast_propagate(current_act(), mycpu); if (ast_needed(mycpu)) break; /* * Context switch check. */ if (csw_needed(thread, myprocessor)) { ast_on(mycpu, (myprocessor->first_quantum ? AST_BLOCK : AST_QUANTUM)); } break; default: panic("ast_check: Bad processor state"); } mp_enable_preemption(); splx(s); }
void ast_check(void) { int mycpu = cpu_number(); processor_t myprocessor; thread_t thread = current_thread(); run_queue_t rq; spl_t s = splsched(); /* * Check processor state for ast conditions. */ myprocessor = cpu_to_processor(mycpu); switch(myprocessor->state) { case PROCESSOR_OFF_LINE: case PROCESSOR_IDLE: case PROCESSOR_DISPATCHING: /* * No ast. */ break; #if NCPUS > 1 case PROCESSOR_ASSIGN: case PROCESSOR_SHUTDOWN: /* * Need ast to force action thread onto processor. * * XXX Should check if action thread is already there. */ ast_on(mycpu, AST_BLOCK); break; #endif /* NCPUS > 1 */ case PROCESSOR_RUNNING: /* * Propagate thread ast to processor. If we already * need an ast, don't look for more reasons. */ ast_propagate(thread, mycpu); if (ast_needed(mycpu)) break; /* * Context switch check. The csw_needed macro isn't * used here because the rq->low hint may be wrong, * and fixing it here avoids an extra ast. * First check the easy cases. */ if (thread->state & TH_SUSP || myprocessor->runq.count > 0) { ast_on(mycpu, AST_BLOCK); break; } /* * Update lazy evaluated runq->low if only timesharing. */ #if MACH_FIXPRI if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) { if (csw_needed(thread,myprocessor)) { ast_on(mycpu, AST_BLOCK); break; } else { /* * For fixed priority threads, set first_quantum * so entire new quantum is used. */ if (thread->policy == POLICY_FIXEDPRI) myprocessor->first_quantum = TRUE; } } else { #endif /* MACH_FIXPRI */ rq = &(myprocessor->processor_set->runq); if (!(myprocessor->first_quantum) && (rq->count > 0)) { queue_t q; /* * This is not the first quantum, and there may * be something in the processor_set runq. * Check whether low hint is accurate. */ q = rq->runq + *(volatile int *)&rq->low; if (queue_empty(q)) { int i; /* * Need to recheck and possibly update hint. */ simple_lock(&rq->lock); q = rq->runq + rq->low; if (rq->count > 0) { for (i = rq->low; i < NRQS; i++) { if(!(queue_empty(q))) break; q++; } rq->low = i; } simple_unlock(&rq->lock); } if (rq->low <= thread->sched_pri) { ast_on(mycpu, AST_BLOCK); break; } } #if MACH_FIXPRI } #endif /* MACH_FIXPRI */ break; default: panic("ast_check: Bad processor state (cpu %d processor %08x) state: %d", mycpu, myprocessor, myprocessor->state); } (void) splx(s); }