예제 #1
0
/*
 * Called at splsched.
 */
void
ast_taken(
	ast_t			reasons,
	boolean_t		enable
)
{
	register thread_t		self = current_thread();
	register int			mycpu = cpu_number();
	boolean_t				preempt_trap = (reasons == AST_PREEMPTION);

	reasons &= need_ast[mycpu];
	need_ast[mycpu] &= ~reasons;

	/*
	 * Handle ASTs for all threads
	 * except idle processor threads.
	 */
	if (!(self->state & TH_IDLE)) {
		/*
		 * Check for urgent preemption.
		 */
		if (	(reasons & AST_URGENT)				&&
				wait_queue_assert_possible(self)		) {
			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason(THREAD_CONTINUE_NULL,
										AST_PREEMPT | AST_URGENT);
			}

			reasons &= ~AST_PREEMPTION;
		}

		/*
		 * The kernel preempt traps
		 * skip all other ASTs.
		 */
		if (!preempt_trap) {
			ml_set_interrupts_enabled(enable);

#ifdef	MACH_BSD
			/*
			 * Handle BSD hook.
			 */
			if (reasons & AST_BSD) {
				extern void		bsd_ast(thread_act_t	act);
				thread_act_t	act = self->top_act;

				thread_ast_clear(act, AST_BSD);
				bsd_ast(act);
			}
#endif

			/* 
			 * Thread APC hook.
			 */
			if (reasons & AST_APC)
				act_execute_returnhandlers();

			ml_set_interrupts_enabled(FALSE);

			/* 
			 * Check for preemption.
			 */
			if (reasons & AST_PREEMPT) {
				processor_t		myprocessor = current_processor();

				if (csw_needed(self, myprocessor))
					reasons = AST_PREEMPT;
				else
					reasons = AST_NONE;
			}
			if (	(reasons & AST_PREEMPT)				&&
					wait_queue_assert_possible(self)		) {		
				counter(c_ast_taken_block++);
				thread_block_reason(thread_exception_return, AST_PREEMPT);
			}
		}
	}

	ml_set_interrupts_enabled(enable);
}
예제 #2
0
파일: ast.c 프로젝트: rohsaini/mkunity
void
ast_taken(
	boolean_t		preemption,
	ast_t			mask,
	spl_t			old_spl
#if	FAST_IDLE
        ,int			thread_type
#endif	/* FAST_IDLE */
)
{
	register thread_t	self = current_thread();
	register processor_t	mypr;
	register ast_t		reasons;
	register int		mycpu;
	thread_act_t		act = self->top_act;

	/*
	 *	Interrupts are still disabled.
	 *	We must clear need_ast and then enable interrupts.
	 */

extern void	log_thread_action(thread_t, char *);

#if 0
	log_thread_action (current_thread(), "ast_taken");
#endif

	mp_disable_preemption();
	mycpu = cpu_number();
	reasons = need_ast[mycpu] & mask;
	need_ast[mycpu] &= ~reasons;
	mp_enable_preemption();

	splx(old_spl);

	/*
	 *	These actions must not block.
	 */

#if	MCMSG
	if (reasons & AST_MCMSG)
		mcmsg_ast();
#endif	/* MCMSG */

	if (reasons & AST_NETWORK)
		net_ast();

#if	MCMSG_ENG
	if (reasons & AST_RPCREQ)
		rpc_engine_request_intr();

	if (reasons & AST_RPCREPLY)
		rpc_engine_reply_intr();

	if (reasons & AST_RPCDEPART)
		rpc_engine_depart_intr();

	if (reasons & AST_RDMASEND)
		rdma_engine_send_intr();

	if (reasons & AST_RDMARECV)
		rdma_engine_recv_intr();

	if (reasons & AST_RDMATXF)
		rdma_engine_send_fault_intr();

	if (reasons & AST_RDMARXF)
		rdma_engine_recv_fault_intr();
#endif	/* MCMSG_ENG */

#if	PARAGON860 && MCMSG_ENG
	if (reasons & AST_SCAN_INPUT)
		scan_input_ast();
#endif	/* PARAGON860 */

#if	DIPC
	if (reasons & AST_DIPC)
		dipc_ast();
#endif	/* DIPC */

	/*
	 *	Make darn sure that we don't call thread_halt_self
	 *	or thread_block from the idle thread.
	 */

	/* XXX - this isn't currently right for the HALT case... */

	mp_disable_preemption();
	mypr = current_processor();
	if (self == mypr->idle_thread) {
#if	NCPUS == 1
	    if (reasons & AST_URGENT) {
		if (!preemption)
		    panic("ast_taken: AST_URGENT for idle_thr w/o preemption");
	    }
#endif
	    mp_enable_preemption();
	    return;
	}
	mp_enable_preemption();

#if	FAST_IDLE
	if (thread_type != NO_IDLE_THREAD)
		return;
#endif	/* FAST_IDLE */

#if	TASK_SWAPPER
	/* must be before AST_APC */
	if (reasons & AST_SWAPOUT) {
		spl_t s;
		swapout_ast();
		s = splsched();
		mp_disable_preemption();
		mycpu = cpu_number();
		if (need_ast[mycpu] & AST_APC) {
			/* generated in swapout_ast() to get suspended */
			reasons |= AST_APC;		/* process now ... */
			need_ast[mycpu] &= ~AST_APC;	/* ... and not later */
		}
		mp_enable_preemption();
		splx(s);
	}
#endif	/* TASK_SWAPPER */

	/* migration APC hook */
	if (reasons & AST_APC) {
		act_execute_returnhandlers();
		return;	/* auto-retry will catch anything new */
	}

	/* 
	 *	thread_block needs to know if the thread's quantum 
	 *	expired so the thread can be put on the tail of
	 *	run queue. One of the previous actions might well
	 *	have woken a high-priority thread, so we also use
	 *	csw_needed check.
	 */
	{   void (*safept)(void) = (void (*)(void))SAFE_EXCEPTION_RETURN;

	    if (reasons &= AST_PREEMPT) {
		    if (preemption)
			    safept = (void (*)(void)) 0;
	    } else {
		    mp_disable_preemption();
		    mypr = current_processor();
		    if (csw_needed(self, mypr)) {
			    reasons = (mypr->first_quantum
				       ? AST_BLOCK
				       : AST_QUANTUM);
		    }
		    mp_enable_preemption();
	    }
	    if (reasons) {
		    counter(c_ast_taken_block++);
		    thread_block_reason(safept, reasons);
	    }
	}
}