示例#1
0
文件: ast.c 项目: JackieXie168/xnu
/*
 * Called at splsched.
 */
void
ast_check(
	processor_t processor)
{
	thread_t thread = processor->active_thread;

	if (processor->state == PROCESSOR_RUNNING ||
	    processor->state == PROCESSOR_SHUTDOWN) {
		ast_t preempt;

		/*
		 *	Propagate thread ast to processor.
		 */
		pal_ast_check(thread);

		ast_propagate(thread->ast);

		/*
		 *	Context switch check.
		 */
		thread_lock(thread);

		processor->current_pri = thread->sched_pri;
		processor->current_thmode = thread->sched_mode;
		processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);

		if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
			ast_on(preempt);

		thread_unlock(thread);
	}
}
示例#2
0
/*
 * Called at splsched.
 */
void
ast_check(
	processor_t		processor)
{
	register thread_t		self = processor->active_thread;

	processor->current_pri = self->sched_pri;
	if (processor->state == PROCESSOR_RUNNING) {
		register ast_t		preempt;
processor_running:

		/*
		 *	Propagate thread ast to processor.
		 */
		ast_propagate(self->top_act->ast);

		/*
		 *	Context switch check.
		 */
		if ((preempt = csw_check(self, processor)) != AST_NONE)
			ast_on(preempt);
	}
	else
	if (	processor->state == PROCESSOR_DISPATCHING	||
			processor->state == PROCESSOR_IDLE			) {
		return;
	}
	else
	if (processor->state == PROCESSOR_SHUTDOWN)
		goto processor_running;
}
示例#3
0
文件: ast.c 项目: aglab2/darwin-xnu
/*
 * Handle preemption IPI or IPI in response to setting an AST flag
 * Triggered by cause_ast_check
 * Called at splsched
 */
void
ast_check(processor_t processor)
{
	if (processor->state != PROCESSOR_RUNNING &&
	    processor->state != PROCESSOR_SHUTDOWN)
		return;

	thread_t thread = processor->active_thread;

	assert(thread == current_thread());

	thread_lock(thread);

	/*
	 * Propagate thread ast to processor.
	 * (handles IPI in response to setting AST flag)
	 */
	ast_propagate(thread);

	boolean_t needs_callout = false;
	processor->current_pri = thread->sched_pri;
	processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
	processor->current_recommended_pset_type = recommended_pset_type(thread);
	perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread);
	if (thread_class != processor->current_perfctl_class) {
	    /* We updated the perfctl class of this thread from another core. 
	     * Since we dont do CLPC callouts from another core, do a callout
	     * here to let CLPC know that the currently running thread has a new
	     * class.
	     */
	    needs_callout = true;
	}
	processor->current_perfctl_class = thread_class;

	ast_t preempt;

	if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
		ast_on(preempt);

	thread_unlock(thread);

	if (needs_callout) {
	    machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
		    mach_approximate_time(), 0, thread);
	}
}
示例#4
0
void
thread_poll_yield(
	thread_t		self)
{
	spl_t			s;

	assert(self == current_thread());

	s = splsched();
	if (self->sched_mode == TH_MODE_FIXED) {
		uint64_t			total_computation, abstime;

		abstime = mach_absolute_time();
		total_computation = abstime - self->computation_epoch;
		total_computation += self->computation_metered;
		if (total_computation >= max_poll_computation) {
			processor_t		myprocessor = current_processor();
			ast_t			preempt;

			thread_lock(self);
			if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
				self->sched_pri = DEPRESSPRI;
				myprocessor->current_pri = self->sched_pri;
			}
			self->computation_epoch = abstime;
			self->computation_metered = 0;
			self->sched_flags |= TH_SFLAG_POLLDEPRESS;

			abstime += (total_computation >> sched_poll_yield_shift);
			if (!timer_call_enter(&self->depress_timer, abstime, TIMER_CALL_USER_CRITICAL))
				self->depress_timer_active++;

			if ((preempt = csw_check(myprocessor, AST_NONE)) != AST_NONE)
				ast_on(preempt);

			thread_unlock(self);
		}
示例#5
0
文件: ast.c 项目: JackieXie168/xnu
/*
 * Called at splsched.
 */
void
ast_taken(
	ast_t		reasons,
	boolean_t	enable
)
{
	boolean_t		preempt_trap = (reasons == AST_PREEMPTION);
	ast_t			*myast = ast_pending();
	thread_t		thread = current_thread();
	perfASTCallback	perf_hook = perfASTHook;

	/*
	 * CHUD hook - all threads including idle processor threads
	 */
	if (perf_hook) {
		if (*myast & AST_CHUD_ALL) {
			(*perf_hook)(reasons, myast);
			
			if (*myast == AST_NONE)
				return;
		}
	}
	else
		*myast &= ~AST_CHUD_ALL;

	reasons &= *myast;
	*myast &= ~reasons;

	/*
	 * Handle ASTs for all threads
	 * except idle processor threads.
	 */
	if (!(thread->state & TH_IDLE)) {
		/*
		 * Check for urgent preemption.
		 */
		if (	(reasons & AST_URGENT)				&&
				waitq_wait_possible(thread)		) {
			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason(THREAD_CONTINUE_NULL, NULL,
										reasons & AST_PREEMPTION);
			}

			reasons &= ~AST_PREEMPTION;
		}

		/*
		 * The kernel preempt traps
		 * skip all other ASTs.
		 */
		if (!preempt_trap) {
			ml_set_interrupts_enabled(enable);

#ifdef	MACH_BSD
			/*
			 * Handle BSD hook.
			 */
			if (reasons & AST_BSD) {
				thread_ast_clear(thread, AST_BSD);
				bsd_ast(thread);
			}
#endif
#if CONFIG_MACF
			/*
			 * Handle MACF hook.
			 */
			if (reasons & AST_MACF) {
				thread_ast_clear(thread, AST_MACF);
				mac_thread_userret(thread);
			}
#endif
			/* 
			 * Thread APC hook.
			 */
			if (reasons & AST_APC) {
				thread_ast_clear(thread, AST_APC);
				special_handler(thread);
			}
			
			if (reasons & AST_GUARD) {
				thread_ast_clear(thread, AST_GUARD);
				guard_ast(thread);
			}
			
			if (reasons & AST_LEDGER) {
				thread_ast_clear(thread, AST_LEDGER);
				ledger_ast(thread);
			}

			/*
			 * Kernel Profiling Hook
			 */
			if (reasons & AST_KPERF) {
				thread_ast_clear(thread, AST_KPERF);
				chudxnu_thread_ast(thread);
			}

#if CONFIG_TELEMETRY
			if (reasons & AST_TELEMETRY_ALL) {
				boolean_t interrupted_userspace = FALSE;
				boolean_t is_windowed = FALSE;

				assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
				interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
				is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
				thread_ast_clear(thread, AST_TELEMETRY_ALL);
				telemetry_ast(thread, interrupted_userspace, is_windowed);
			}
#endif

			ml_set_interrupts_enabled(FALSE);

#if CONFIG_SCHED_SFI
			if (reasons & AST_SFI) {
				sfi_ast(thread);
			}
#endif

			/*
			 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
			 */
			thread_lock(thread);
			if (reasons & AST_PREEMPT)
				reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
			thread_unlock(thread);

			assert(waitq_wait_possible(thread));

			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
			}
		}
	}

	ml_set_interrupts_enabled(enable);
}
示例#6
0
文件: ast.c 项目: aglab2/darwin-xnu
/*
 * An AST flag was set while returning to user mode
 * Called with interrupts disabled, returns with interrupts enabled
 * May call continuation instead of returning
 */
void
ast_taken_user(void)
{
	assert(ml_get_interrupts_enabled() == FALSE);

	thread_t thread = current_thread();

	/* We are about to return to userspace, there must not be a pending wait */
	assert(waitq_wait_possible(thread));
	assert((thread->state & TH_IDLE) == 0);

	/* TODO: Add more 'return to userspace' assertions here */

	/*
	 * If this thread was urgently preempted in userspace,
	 * take the preemption before processing the ASTs.
	 * The trap handler will call us again if we have more ASTs, so it's
	 * safe to block in a continuation here.
	 */
	if (ast_peek(AST_URGENT) == AST_URGENT) {
		ast_t urgent_reason = ast_consume(AST_PREEMPTION);

		assert(urgent_reason & AST_PREEMPT);

		/* TODO: Should we csw_check again to notice if conditions have changed? */

		thread_block_reason(thread_preempted, NULL, urgent_reason);
		/* NOTREACHED */
	}

	/*
	 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
	 * on a different processor. Only the ast bit on the thread will be set.
	 *
	 * Force a propagate for concurrent updates without an IPI.
	 */
	ast_propagate(thread);

	/*
	 * Consume all non-preemption processor ASTs matching reasons
	 * because we're handling them here.
	 *
	 * If one of the AST handlers blocks in a continuation,
	 * we'll reinstate the unserviced thread-level AST flags
	 * from the thread to the processor on context switch.
	 * If one of the AST handlers sets another AST,
	 * the trap handler will call ast_taken_user again.
	 *
	 * We expect the AST handlers not to thread_exception_return
	 * without an ast_propagate or context switch to reinstate
	 * the per-processor ASTs.
	 *
	 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
	 */
	ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);

	ml_set_interrupts_enabled(TRUE);

#if CONFIG_DTRACE
	if (reasons & AST_DTRACE) {
		dtrace_ast();
	}
#endif

#ifdef MACH_BSD
	if (reasons & AST_BSD) {
		thread_ast_clear(thread, AST_BSD);
		bsd_ast(thread);
	}
#endif

#if CONFIG_MACF
	if (reasons & AST_MACF) {
		thread_ast_clear(thread, AST_MACF);
		mac_thread_userret(thread);
	}
#endif

	if (reasons & AST_APC) {
		thread_ast_clear(thread, AST_APC);
		thread_apc_ast(thread);
	}

	if (reasons & AST_GUARD) {
		thread_ast_clear(thread, AST_GUARD);
		guard_ast(thread);
	}

	if (reasons & AST_LEDGER) {
		thread_ast_clear(thread, AST_LEDGER);
		ledger_ast(thread);
	}

	if (reasons & AST_KPERF) {
		thread_ast_clear(thread, AST_KPERF);
		kperf_kpc_thread_ast(thread);
	}

	if (reasons & AST_KEVENT) {
		thread_ast_clear(thread, AST_KEVENT);
		uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
		if (bits) kevent_ast(thread, bits);
	}

#if CONFIG_TELEMETRY
	if (reasons & AST_TELEMETRY_ALL) {
		ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
		thread_ast_clear(thread, AST_TELEMETRY_ALL);
		telemetry_ast(thread, telemetry_reasons);
	}
#endif

	spl_t s = splsched();

#if CONFIG_SCHED_SFI
	/*
	 * SFI is currently a per-processor AST, not a per-thread AST
	 *      TODO: SFI should be a per-thread AST
	 */
	if (ast_consume(AST_SFI) == AST_SFI) {
		sfi_ast(thread);
	}
#endif

	/* We are about to return to userspace, there must not be a pending wait */
	assert(waitq_wait_possible(thread));

	/*
	 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
	 *
	 * We delay reading the preemption bits until now in case the thread
	 * blocks while handling per-thread ASTs.
	 *
	 * If one of the AST handlers had managed to set a new AST bit,
	 * thread_exception_return will call ast_taken again.
	 */
	ast_t preemption_reasons = ast_consume(AST_PREEMPTION);

	if (preemption_reasons & AST_PREEMPT) {
		/* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */

		thread_lock(thread);
		preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM));
		thread_unlock(thread);

#if CONFIG_SCHED_SFI
		/* csw_check might tell us that SFI is needed */
		if (preemption_reasons & AST_SFI) {
			sfi_ast(thread);
		}
#endif

		if (preemption_reasons & AST_PREEMPT) {
			counter(c_ast_taken_block++);
			/* switching to a continuation implicitly re-enables interrupts */
			thread_block_reason(thread_preempted, NULL, preemption_reasons);
			/* NOTREACHED */
		}
	}

	splx(s);
}