예제 #1
0
파일: ast.c 프로젝트: JackieXie168/xnu
/*
 * Called at splsched.
 */
void
ast_check(
	processor_t processor)
{
	thread_t thread = processor->active_thread;

	if (processor->state == PROCESSOR_RUNNING ||
	    processor->state == PROCESSOR_SHUTDOWN) {
		ast_t preempt;

		/*
		 *	Propagate thread ast to processor.
		 */
		pal_ast_check(thread);

		ast_propagate(thread->ast);

		/*
		 *	Context switch check.
		 */
		thread_lock(thread);

		processor->current_pri = thread->sched_pri;
		processor->current_thmode = thread->sched_mode;
		processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);

		if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
			ast_on(preempt);

		thread_unlock(thread);
	}
}
예제 #2
0
/*
 * Called at splsched.
 */
void
ast_check(
	processor_t		processor)
{
	register thread_t		self = processor->active_thread;

	processor->current_pri = self->sched_pri;
	if (processor->state == PROCESSOR_RUNNING) {
		register ast_t		preempt;
processor_running:

		/*
		 *	Propagate thread ast to processor.
		 */
		ast_propagate(self->top_act->ast);

		/*
		 *	Context switch check.
		 */
		if ((preempt = csw_check(self, processor)) != AST_NONE)
			ast_on(preempt);
	}
	else
	if (	processor->state == PROCESSOR_DISPATCHING	||
			processor->state == PROCESSOR_IDLE			) {
		return;
	}
	else
	if (processor->state == PROCESSOR_SHUTDOWN)
		goto processor_running;
}
예제 #3
0
/*
 * install_special_handler_locked:
 *
 *	Do the work of installing the special_handler.
 *
 *	Called with the thread mutex and scheduling lock held.
 */
void
install_special_handler_locked(
	thread_t				thread)
{
	ReturnHandler	**rh;

	/* The work handler must always be the last ReturnHandler on the list,
	   because it can do tricky things like detach the thr_act.  */
	for (rh = &thread->handlers; *rh; rh = &(*rh)->next)
		continue;

	if (rh != &thread->special_handler.next)
		*rh = &thread->special_handler;

	/*
	 * Temporarily undepress, so target has
	 * a chance to do locking required to
	 * block itself in special_handler().
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
		SCHED(compute_priority)(thread, TRUE);

	thread_ast_set(thread, AST_APC);

	if (thread == current_thread())
		ast_propagate(thread->ast);
	else {
		processor_t		processor = thread->last_processor;

		if (	processor != PROCESSOR_NULL					&&
				processor->state == PROCESSOR_RUNNING		&&
				processor->active_thread == thread			)
			cause_ast_check(processor);
	}
}
예제 #4
0
static void
act_set_ast(
	    thread_t	thread,
	    ast_t ast)
{
	spl_t		s = splsched();
	
	if (thread == current_thread()) {
		thread_ast_set(thread, ast);
		ast_propagate(thread->ast);
	}
	else {
		processor_t		processor;

		thread_lock(thread);
		thread_ast_set(thread, ast);
		processor = thread->last_processor;
		if ( processor != PROCESSOR_NULL            &&
		     processor->state == PROCESSOR_RUNNING  &&
		     processor->active_thread == thread	     )
			cause_ast_check(processor);
		thread_unlock(thread);
	}
	
	splx(s);
}
예제 #5
0
/*
 * install_special_handler_locked:
 *
 *	Do the work of installing the special_handler.
 *
 *	Called with the thread mutex and scheduling lock held.
 */
void
install_special_handler_locked(
	thread_t				thread)
{
	
	/*
	 * Temporarily undepress, so target has
	 * a chance to do locking required to
	 * block itself in special_handler().
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
		thread_recompute_sched_pri(thread, TRUE);

	thread_ast_set(thread, AST_APC);

	if (thread == current_thread())
		ast_propagate(thread->ast);
	else {
		processor_t		processor = thread->last_processor;

		if (	processor != PROCESSOR_NULL					&&
				processor->state == PROCESSOR_RUNNING		&&
				processor->active_thread == thread			)
			cause_ast_check(processor);
	}
}
예제 #6
0
/*
 * thread_set_apc_ast_locked:
 *
 * Do the work of registering for the AST_APC callback.
 *
 * Called with the thread mutex and scheduling lock held.
 */
static void
thread_set_apc_ast_locked(thread_t thread)
{
	/*
	 * Temporarily undepress, so target has
	 * a chance to do locking required to
	 * block itself in thread_suspended.
	 *
	 * Leaves the depress flag set so we can reinstate when it's blocked.
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK)
		thread_recompute_sched_pri(thread, TRUE);

	thread_ast_set(thread, AST_APC);

	if (thread == current_thread()) {
		ast_propagate(thread);
	} else {
		processor_t processor = thread->last_processor;

		if (processor != PROCESSOR_NULL &&
		    processor->state == PROCESSOR_RUNNING &&
		    processor->active_thread == thread) {
			cause_ast_check(processor);
		}
	}
}
예제 #7
0
파일: ast.c 프로젝트: aglab2/darwin-xnu
/*
 * Handle preemption IPI or IPI in response to setting an AST flag
 * Triggered by cause_ast_check
 * Called at splsched
 */
void
ast_check(processor_t processor)
{
	if (processor->state != PROCESSOR_RUNNING &&
	    processor->state != PROCESSOR_SHUTDOWN)
		return;

	thread_t thread = processor->active_thread;

	assert(thread == current_thread());

	thread_lock(thread);

	/*
	 * Propagate thread ast to processor.
	 * (handles IPI in response to setting AST flag)
	 */
	ast_propagate(thread);

	boolean_t needs_callout = false;
	processor->current_pri = thread->sched_pri;
	processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
	processor->current_recommended_pset_type = recommended_pset_type(thread);
	perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread);
	if (thread_class != processor->current_perfctl_class) {
	    /* We updated the perfctl class of this thread from another core. 
	     * Since we dont do CLPC callouts from another core, do a callout
	     * here to let CLPC know that the currently running thread has a new
	     * class.
	     */
	    needs_callout = true;
	}
	processor->current_perfctl_class = thread_class;

	ast_t preempt;

	if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
		ast_on(preempt);

	thread_unlock(thread);

	if (needs_callout) {
	    machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
		    mach_approximate_time(), 0, thread);
	}
}
예제 #8
0
/*
 * Mark the current thread for an interrupt-based
 * telemetry record, to be sampled at the next AST boundary.
 */
void telemetry_mark_curthread(boolean_t interrupted_userspace)
{
	uint32_t ast_bits = 0;
	thread_t thread = current_thread();

	/*
	 * If telemetry isn't active for this thread, return and try
	 * again next time.
	 */
	if (telemetry_is_active(thread) == FALSE) {
		return;
	}

	ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);

	telemetry_needs_record = FALSE;
	thread_ast_set(thread, ast_bits);
	ast_propagate(thread);
}
예제 #9
0
void
act_set_astkevent(thread_t thread, uint16_t bits)
{
	spl_t s = splsched();

	/*
	 * Do not send an IPI if the thread is running on
	 * another processor, wait for the next quantum
	 * expiration to load the AST.
	 */

	atomic_fetch_or(&thread->kevent_ast_bits, bits);
	thread_ast_set(thread, AST_KEVENT);

	if (thread == current_thread()) {
		ast_propagate(thread);
	}

	splx(s);
}
예제 #10
0
파일: ast.c 프로젝트: aglab2/darwin-xnu
/*
 * An AST flag was set while returning to user mode
 * Called with interrupts disabled, returns with interrupts enabled
 * May call continuation instead of returning
 */
void
ast_taken_user(void)
{
	assert(ml_get_interrupts_enabled() == FALSE);

	thread_t thread = current_thread();

	/* We are about to return to userspace, there must not be a pending wait */
	assert(waitq_wait_possible(thread));
	assert((thread->state & TH_IDLE) == 0);

	/* TODO: Add more 'return to userspace' assertions here */

	/*
	 * If this thread was urgently preempted in userspace,
	 * take the preemption before processing the ASTs.
	 * The trap handler will call us again if we have more ASTs, so it's
	 * safe to block in a continuation here.
	 */
	if (ast_peek(AST_URGENT) == AST_URGENT) {
		ast_t urgent_reason = ast_consume(AST_PREEMPTION);

		assert(urgent_reason & AST_PREEMPT);

		/* TODO: Should we csw_check again to notice if conditions have changed? */

		thread_block_reason(thread_preempted, NULL, urgent_reason);
		/* NOTREACHED */
	}

	/*
	 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
	 * on a different processor. Only the ast bit on the thread will be set.
	 *
	 * Force a propagate for concurrent updates without an IPI.
	 */
	ast_propagate(thread);

	/*
	 * Consume all non-preemption processor ASTs matching reasons
	 * because we're handling them here.
	 *
	 * If one of the AST handlers blocks in a continuation,
	 * we'll reinstate the unserviced thread-level AST flags
	 * from the thread to the processor on context switch.
	 * If one of the AST handlers sets another AST,
	 * the trap handler will call ast_taken_user again.
	 *
	 * We expect the AST handlers not to thread_exception_return
	 * without an ast_propagate or context switch to reinstate
	 * the per-processor ASTs.
	 *
	 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
	 */
	ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);

	ml_set_interrupts_enabled(TRUE);

#if CONFIG_DTRACE
	if (reasons & AST_DTRACE) {
		dtrace_ast();
	}
#endif

#ifdef MACH_BSD
	if (reasons & AST_BSD) {
		thread_ast_clear(thread, AST_BSD);
		bsd_ast(thread);
	}
#endif

#if CONFIG_MACF
	if (reasons & AST_MACF) {
		thread_ast_clear(thread, AST_MACF);
		mac_thread_userret(thread);
	}
#endif

	if (reasons & AST_APC) {
		thread_ast_clear(thread, AST_APC);
		thread_apc_ast(thread);
	}

	if (reasons & AST_GUARD) {
		thread_ast_clear(thread, AST_GUARD);
		guard_ast(thread);
	}

	if (reasons & AST_LEDGER) {
		thread_ast_clear(thread, AST_LEDGER);
		ledger_ast(thread);
	}

	if (reasons & AST_KPERF) {
		thread_ast_clear(thread, AST_KPERF);
		kperf_kpc_thread_ast(thread);
	}

	if (reasons & AST_KEVENT) {
		thread_ast_clear(thread, AST_KEVENT);
		uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
		if (bits) kevent_ast(thread, bits);
	}

#if CONFIG_TELEMETRY
	if (reasons & AST_TELEMETRY_ALL) {
		ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
		thread_ast_clear(thread, AST_TELEMETRY_ALL);
		telemetry_ast(thread, telemetry_reasons);
	}
#endif

	spl_t s = splsched();

#if CONFIG_SCHED_SFI
	/*
	 * SFI is currently a per-processor AST, not a per-thread AST
	 *      TODO: SFI should be a per-thread AST
	 */
	if (ast_consume(AST_SFI) == AST_SFI) {
		sfi_ast(thread);
	}
#endif

	/* We are about to return to userspace, there must not be a pending wait */
	assert(waitq_wait_possible(thread));

	/*
	 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
	 *
	 * We delay reading the preemption bits until now in case the thread
	 * blocks while handling per-thread ASTs.
	 *
	 * If one of the AST handlers had managed to set a new AST bit,
	 * thread_exception_return will call ast_taken again.
	 */
	ast_t preemption_reasons = ast_consume(AST_PREEMPTION);

	if (preemption_reasons & AST_PREEMPT) {
		/* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */

		thread_lock(thread);
		preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM));
		thread_unlock(thread);

#if CONFIG_SCHED_SFI
		/* csw_check might tell us that SFI is needed */
		if (preemption_reasons & AST_SFI) {
			sfi_ast(thread);
		}
#endif

		if (preemption_reasons & AST_PREEMPT) {
			counter(c_ast_taken_block++);
			/* switching to a continuation implicitly re-enables interrupts */
			thread_block_reason(thread_preempted, NULL, preemption_reasons);
			/* NOTREACHED */
		}
	}

	splx(s);
}
예제 #11
0
파일: ast.c 프로젝트: rohsaini/mkunity
void
ast_check(void)
{
	register int		mycpu;
	register processor_t	myprocessor;
	register thread_t	thread = current_thread();
	spl_t			s = splsched();

	mp_disable_preemption();
	mycpu = cpu_number();

	/*
	 *	Check processor state for ast conditions.
	 */
	myprocessor = cpu_to_processor(mycpu);
	switch(myprocessor->state) {
	    case PROCESSOR_OFF_LINE:
	    case PROCESSOR_IDLE:
	    case PROCESSOR_DISPATCHING:
		/*
		 *	No ast.
		 */
	    	break;

#if	NCPUS > 1
	    case PROCESSOR_ASSIGN:
	    case PROCESSOR_SHUTDOWN:
	        /*
		 * 	Need ast to force action thread onto processor.
		 *
		 * XXX  Should check if action thread is already there.
		 */
		ast_on(mycpu, AST_BLOCK);
		break;
#endif	/* NCPUS > 1 */

	    case PROCESSOR_RUNNING:
	    case PROCESSOR_VIDLE:

		/*
		 *	Propagate thread ast to processor.  If we already
		 *	need an ast, don't look for more reasons.
		 */
		ast_propagate(current_act(), mycpu);
		if (ast_needed(mycpu))
			break;

		/*
		 *	Context switch check.
		 */
		if (csw_needed(thread, myprocessor)) {
			ast_on(mycpu, (myprocessor->first_quantum ?
			       AST_BLOCK : AST_QUANTUM));
		}
		break;

	    default:
	        panic("ast_check: Bad processor state");
	}
	mp_enable_preemption();
	splx(s);
}
예제 #12
0
파일: ast.c 프로젝트: quan-na/gnumach
void
ast_check(void)
{
	int			mycpu = cpu_number();
	processor_t		myprocessor;
	thread_t		thread = current_thread();
	run_queue_t		rq;
	spl_t			s = splsched();

	/*
	 *	Check processor state for ast conditions.
	 */
	myprocessor = cpu_to_processor(mycpu);
	switch(myprocessor->state) {
	    case PROCESSOR_OFF_LINE:
	    case PROCESSOR_IDLE:
	    case PROCESSOR_DISPATCHING:
		/*
		 *	No ast.
		 */
	    	break;

#if	NCPUS > 1
	    case PROCESSOR_ASSIGN:
	    case PROCESSOR_SHUTDOWN:
	        /*
		 * 	Need ast to force action thread onto processor.
		 *
		 * XXX  Should check if action thread is already there.
		 */
		ast_on(mycpu, AST_BLOCK);
		break;
#endif	/* NCPUS > 1 */

	    case PROCESSOR_RUNNING:

		/*
		 *	Propagate thread ast to processor.  If we already
		 *	need an ast, don't look for more reasons.
		 */
		ast_propagate(thread, mycpu);
		if (ast_needed(mycpu))
			break;

		/*
		 *	Context switch check.  The csw_needed macro isn't
		 *	used here because the rq->low hint may be wrong,
		 *	and fixing it here avoids an extra ast.
		 *	First check the easy cases.
		 */
		if (thread->state & TH_SUSP || myprocessor->runq.count > 0) {
			ast_on(mycpu, AST_BLOCK);
			break;
		}

		/*
		 *	Update lazy evaluated runq->low if only timesharing.
		 */
#if	MACH_FIXPRI
		if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) {
		    if (csw_needed(thread,myprocessor)) {
			ast_on(mycpu, AST_BLOCK);
			break;
		    }
		    else {
			/*
			 *	For fixed priority threads, set first_quantum
			 *	so entire new quantum is used.
			 */
			if (thread->policy == POLICY_FIXEDPRI)
			    myprocessor->first_quantum = TRUE;
		    }
		}
		else {
#endif	/* MACH_FIXPRI			 */
		rq = &(myprocessor->processor_set->runq);
		if (!(myprocessor->first_quantum) && (rq->count > 0)) {
		    queue_t 		q;
		    /*
		     *	This is not the first quantum, and there may
		     *	be something in the processor_set runq.
		     *	Check whether low hint is accurate.
		     */
		    q = rq->runq + *(volatile int *)&rq->low;
		    if (queue_empty(q)) {
			int i;

			/*
			 *	Need to recheck and possibly update hint.
			 */
			simple_lock(&rq->lock);
			q = rq->runq + rq->low;
			if (rq->count > 0) {
			    for (i = rq->low; i < NRQS; i++) {
				if(!(queue_empty(q)))
				    break;
				q++;
			    }
			    rq->low = i;
			}
			simple_unlock(&rq->lock);
		    }

		    if (rq->low <= thread->sched_pri) {
			ast_on(mycpu, AST_BLOCK);
			break;
		    }
		}
#if	MACH_FIXPRI
		}
#endif	/* MACH_FIXPRI */
		break;

	    default:
	        panic("ast_check: Bad processor state (cpu %d processor %08x) state: %d",
			mycpu, myprocessor, myprocessor->state);
	}

	(void) splx(s);
}