void ast_taken(void) { thread_t self = current_thread(); ast_t reasons; /* * Interrupts are still disabled. * We must clear need_ast and then enable interrupts. */ reasons = need_ast[cpu_number()]; need_ast[cpu_number()] = AST_ZILCH; (void) spl0(); /* * These actions must not block. */ if (reasons & AST_NETWORK) net_ast(); /* * Make darn sure that we don't call thread_halt_self * or thread_block from the idle thread. */ if (self != current_processor()->idle_thread) { #ifndef MIGRATING_THREADS while (thread_should_halt(self)) thread_halt_self(); #endif /* * One of the previous actions might well have * woken a high-priority thread, so we use * csw_needed in addition to AST_BLOCK. */ if ((reasons & AST_BLOCK) || csw_needed(self, current_processor())) { counter(c_ast_taken_block++); thread_block(thread_exception_return); } } }
void ast_taken( boolean_t preemption, ast_t mask, spl_t old_spl #if FAST_IDLE ,int thread_type #endif /* FAST_IDLE */ ) { register thread_t self = current_thread(); register processor_t mypr; register ast_t reasons; register int mycpu; thread_act_t act = self->top_act; /* * Interrupts are still disabled. * We must clear need_ast and then enable interrupts. */ extern void log_thread_action(thread_t, char *); #if 0 log_thread_action (current_thread(), "ast_taken"); #endif mp_disable_preemption(); mycpu = cpu_number(); reasons = need_ast[mycpu] & mask; need_ast[mycpu] &= ~reasons; mp_enable_preemption(); splx(old_spl); /* * These actions must not block. */ #if MCMSG if (reasons & AST_MCMSG) mcmsg_ast(); #endif /* MCMSG */ if (reasons & AST_NETWORK) net_ast(); #if MCMSG_ENG if (reasons & AST_RPCREQ) rpc_engine_request_intr(); if (reasons & AST_RPCREPLY) rpc_engine_reply_intr(); if (reasons & AST_RPCDEPART) rpc_engine_depart_intr(); if (reasons & AST_RDMASEND) rdma_engine_send_intr(); if (reasons & AST_RDMARECV) rdma_engine_recv_intr(); if (reasons & AST_RDMATXF) rdma_engine_send_fault_intr(); if (reasons & AST_RDMARXF) rdma_engine_recv_fault_intr(); #endif /* MCMSG_ENG */ #if PARAGON860 && MCMSG_ENG if (reasons & AST_SCAN_INPUT) scan_input_ast(); #endif /* PARAGON860 */ #if DIPC if (reasons & AST_DIPC) dipc_ast(); #endif /* DIPC */ /* * Make darn sure that we don't call thread_halt_self * or thread_block from the idle thread. */ /* XXX - this isn't currently right for the HALT case... */ mp_disable_preemption(); mypr = current_processor(); if (self == mypr->idle_thread) { #if NCPUS == 1 if (reasons & AST_URGENT) { if (!preemption) panic("ast_taken: AST_URGENT for idle_thr w/o preemption"); } #endif mp_enable_preemption(); return; } mp_enable_preemption(); #if FAST_IDLE if (thread_type != NO_IDLE_THREAD) return; #endif /* FAST_IDLE */ #if TASK_SWAPPER /* must be before AST_APC */ if (reasons & AST_SWAPOUT) { spl_t s; swapout_ast(); s = splsched(); mp_disable_preemption(); mycpu = cpu_number(); if (need_ast[mycpu] & AST_APC) { /* generated in swapout_ast() to get suspended */ reasons |= AST_APC; /* process now ... */ need_ast[mycpu] &= ~AST_APC; /* ... and not later */ } mp_enable_preemption(); splx(s); } #endif /* TASK_SWAPPER */ /* migration APC hook */ if (reasons & AST_APC) { act_execute_returnhandlers(); return; /* auto-retry will catch anything new */ } /* * thread_block needs to know if the thread's quantum * expired so the thread can be put on the tail of * run queue. One of the previous actions might well * have woken a high-priority thread, so we also use * csw_needed check. */ { void (*safept)(void) = (void (*)(void))SAFE_EXCEPTION_RETURN; if (reasons &= AST_PREEMPT) { if (preemption) safept = (void (*)(void)) 0; } else { mp_disable_preemption(); mypr = current_processor(); if (csw_needed(self, mypr)) { reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); } mp_enable_preemption(); } if (reasons) { counter(c_ast_taken_block++); thread_block_reason(safept, reasons); } } }