示例#1
0
boolean_t
swtch_pri(
__unused	struct swtch_pri_args *args)
{
	register processor_t	myprocessor;
	boolean_t				result;

	disable_preemption();
	myprocessor = current_processor();
	if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
		mp_enable_preemption();

		return (FALSE);
	}
	enable_preemption();

	counter(c_swtch_pri_block++);

	thread_depress_abstime(thread_depress_time);

	thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);

	thread_depress_abort_internal(current_thread());

	disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	return (result);
}
示例#2
0
文件: ast.c 项目: aglab2/darwin-xnu
/*
 * AST_URGENT was detected while in kernel mode
 * Called with interrupts disabled, returns the same way
 * Must return to caller
 */
void
ast_taken_kernel(void)
{
	assert(ml_get_interrupts_enabled() == FALSE);

	thread_t thread = current_thread();

	/* Idle threads handle preemption themselves */
	if ((thread->state & TH_IDLE)) {
		ast_off(AST_PREEMPTION);
		return;
	}

	/*
	 * It's possible for this to be called after AST_URGENT
	 * has already been handled, due to races in enable_preemption
	 */
	if (ast_peek(AST_URGENT) != AST_URGENT)
		return;

	/*
	 * Don't preempt if the thread is already preparing to block.
	 * TODO: the thread can cheese this with clear_wait()
	 */
	if (waitq_wait_possible(thread) == FALSE) {
		/* Consume AST_URGENT or the interrupt will call us again */
		ast_consume(AST_URGENT);
		return;
	}

	/* TODO: Should we csw_check again to notice if conditions have changed? */

	ast_t urgent_reason = ast_consume(AST_PREEMPTION);

	assert(urgent_reason & AST_PREEMPT);

	counter(c_ast_taken_block++);

	thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);

	assert(ml_get_interrupts_enabled() == FALSE);
}
示例#3
0
文件: ast.c 项目: JackieXie168/xnu
/*
 * Called at splsched.
 */
void
ast_taken(
	ast_t		reasons,
	boolean_t	enable
)
{
	boolean_t		preempt_trap = (reasons == AST_PREEMPTION);
	ast_t			*myast = ast_pending();
	thread_t		thread = current_thread();
	perfASTCallback	perf_hook = perfASTHook;

	/*
	 * CHUD hook - all threads including idle processor threads
	 */
	if (perf_hook) {
		if (*myast & AST_CHUD_ALL) {
			(*perf_hook)(reasons, myast);
			
			if (*myast == AST_NONE)
				return;
		}
	}
	else
		*myast &= ~AST_CHUD_ALL;

	reasons &= *myast;
	*myast &= ~reasons;

	/*
	 * Handle ASTs for all threads
	 * except idle processor threads.
	 */
	if (!(thread->state & TH_IDLE)) {
		/*
		 * Check for urgent preemption.
		 */
		if (	(reasons & AST_URGENT)				&&
				waitq_wait_possible(thread)		) {
			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason(THREAD_CONTINUE_NULL, NULL,
										reasons & AST_PREEMPTION);
			}

			reasons &= ~AST_PREEMPTION;
		}

		/*
		 * The kernel preempt traps
		 * skip all other ASTs.
		 */
		if (!preempt_trap) {
			ml_set_interrupts_enabled(enable);

#ifdef	MACH_BSD
			/*
			 * Handle BSD hook.
			 */
			if (reasons & AST_BSD) {
				thread_ast_clear(thread, AST_BSD);
				bsd_ast(thread);
			}
#endif
#if CONFIG_MACF
			/*
			 * Handle MACF hook.
			 */
			if (reasons & AST_MACF) {
				thread_ast_clear(thread, AST_MACF);
				mac_thread_userret(thread);
			}
#endif
			/* 
			 * Thread APC hook.
			 */
			if (reasons & AST_APC) {
				thread_ast_clear(thread, AST_APC);
				special_handler(thread);
			}
			
			if (reasons & AST_GUARD) {
				thread_ast_clear(thread, AST_GUARD);
				guard_ast(thread);
			}
			
			if (reasons & AST_LEDGER) {
				thread_ast_clear(thread, AST_LEDGER);
				ledger_ast(thread);
			}

			/*
			 * Kernel Profiling Hook
			 */
			if (reasons & AST_KPERF) {
				thread_ast_clear(thread, AST_KPERF);
				chudxnu_thread_ast(thread);
			}

#if CONFIG_TELEMETRY
			if (reasons & AST_TELEMETRY_ALL) {
				boolean_t interrupted_userspace = FALSE;
				boolean_t is_windowed = FALSE;

				assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
				interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
				is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
				thread_ast_clear(thread, AST_TELEMETRY_ALL);
				telemetry_ast(thread, interrupted_userspace, is_windowed);
			}
#endif

			ml_set_interrupts_enabled(FALSE);

#if CONFIG_SCHED_SFI
			if (reasons & AST_SFI) {
				sfi_ast(thread);
			}
#endif

			/*
			 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
			 */
			thread_lock(thread);
			if (reasons & AST_PREEMPT)
				reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
			thread_unlock(thread);

			assert(waitq_wait_possible(thread));

			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
			}
		}
	}

	ml_set_interrupts_enabled(enable);
}
示例#4
0
/*
 * Called at splsched.
 */
void
ast_taken(
	ast_t			reasons,
	boolean_t		enable
)
{
	register thread_t		self = current_thread();
	register int			mycpu = cpu_number();
	boolean_t				preempt_trap = (reasons == AST_PREEMPTION);

	reasons &= need_ast[mycpu];
	need_ast[mycpu] &= ~reasons;

	/*
	 * Handle ASTs for all threads
	 * except idle processor threads.
	 */
	if (!(self->state & TH_IDLE)) {
		/*
		 * Check for urgent preemption.
		 */
		if (	(reasons & AST_URGENT)				&&
				wait_queue_assert_possible(self)		) {
			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason(THREAD_CONTINUE_NULL,
										AST_PREEMPT | AST_URGENT);
			}

			reasons &= ~AST_PREEMPTION;
		}

		/*
		 * The kernel preempt traps
		 * skip all other ASTs.
		 */
		if (!preempt_trap) {
			ml_set_interrupts_enabled(enable);

#ifdef	MACH_BSD
			/*
			 * Handle BSD hook.
			 */
			if (reasons & AST_BSD) {
				extern void		bsd_ast(thread_act_t	act);
				thread_act_t	act = self->top_act;

				thread_ast_clear(act, AST_BSD);
				bsd_ast(act);
			}
#endif

			/* 
			 * Thread APC hook.
			 */
			if (reasons & AST_APC)
				act_execute_returnhandlers();

			ml_set_interrupts_enabled(FALSE);

			/* 
			 * Check for preemption.
			 */
			if (reasons & AST_PREEMPT) {
				processor_t		myprocessor = current_processor();

				if (csw_needed(self, myprocessor))
					reasons = AST_PREEMPT;
				else
					reasons = AST_NONE;
			}
			if (	(reasons & AST_PREEMPT)				&&
					wait_queue_assert_possible(self)		) {		
				counter(c_ast_taken_block++);
				thread_block_reason(thread_exception_return, AST_PREEMPT);
			}
		}
	}

	ml_set_interrupts_enabled(enable);
}
示例#5
0
文件: ast.c 项目: aglab2/darwin-xnu
/*
 * An AST flag was set while returning to user mode
 * Called with interrupts disabled, returns with interrupts enabled
 * May call continuation instead of returning
 */
void
ast_taken_user(void)
{
	assert(ml_get_interrupts_enabled() == FALSE);

	thread_t thread = current_thread();

	/* We are about to return to userspace, there must not be a pending wait */
	assert(waitq_wait_possible(thread));
	assert((thread->state & TH_IDLE) == 0);

	/* TODO: Add more 'return to userspace' assertions here */

	/*
	 * If this thread was urgently preempted in userspace,
	 * take the preemption before processing the ASTs.
	 * The trap handler will call us again if we have more ASTs, so it's
	 * safe to block in a continuation here.
	 */
	if (ast_peek(AST_URGENT) == AST_URGENT) {
		ast_t urgent_reason = ast_consume(AST_PREEMPTION);

		assert(urgent_reason & AST_PREEMPT);

		/* TODO: Should we csw_check again to notice if conditions have changed? */

		thread_block_reason(thread_preempted, NULL, urgent_reason);
		/* NOTREACHED */
	}

	/*
	 * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
	 * on a different processor. Only the ast bit on the thread will be set.
	 *
	 * Force a propagate for concurrent updates without an IPI.
	 */
	ast_propagate(thread);

	/*
	 * Consume all non-preemption processor ASTs matching reasons
	 * because we're handling them here.
	 *
	 * If one of the AST handlers blocks in a continuation,
	 * we'll reinstate the unserviced thread-level AST flags
	 * from the thread to the processor on context switch.
	 * If one of the AST handlers sets another AST,
	 * the trap handler will call ast_taken_user again.
	 *
	 * We expect the AST handlers not to thread_exception_return
	 * without an ast_propagate or context switch to reinstate
	 * the per-processor ASTs.
	 *
	 * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
	 */
	ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);

	ml_set_interrupts_enabled(TRUE);

#if CONFIG_DTRACE
	if (reasons & AST_DTRACE) {
		dtrace_ast();
	}
#endif

#ifdef MACH_BSD
	if (reasons & AST_BSD) {
		thread_ast_clear(thread, AST_BSD);
		bsd_ast(thread);
	}
#endif

#if CONFIG_MACF
	if (reasons & AST_MACF) {
		thread_ast_clear(thread, AST_MACF);
		mac_thread_userret(thread);
	}
#endif

	if (reasons & AST_APC) {
		thread_ast_clear(thread, AST_APC);
		thread_apc_ast(thread);
	}

	if (reasons & AST_GUARD) {
		thread_ast_clear(thread, AST_GUARD);
		guard_ast(thread);
	}

	if (reasons & AST_LEDGER) {
		thread_ast_clear(thread, AST_LEDGER);
		ledger_ast(thread);
	}

	if (reasons & AST_KPERF) {
		thread_ast_clear(thread, AST_KPERF);
		kperf_kpc_thread_ast(thread);
	}

	if (reasons & AST_KEVENT) {
		thread_ast_clear(thread, AST_KEVENT);
		uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
		if (bits) kevent_ast(thread, bits);
	}

#if CONFIG_TELEMETRY
	if (reasons & AST_TELEMETRY_ALL) {
		ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
		thread_ast_clear(thread, AST_TELEMETRY_ALL);
		telemetry_ast(thread, telemetry_reasons);
	}
#endif

	spl_t s = splsched();

#if CONFIG_SCHED_SFI
	/*
	 * SFI is currently a per-processor AST, not a per-thread AST
	 *      TODO: SFI should be a per-thread AST
	 */
	if (ast_consume(AST_SFI) == AST_SFI) {
		sfi_ast(thread);
	}
#endif

	/* We are about to return to userspace, there must not be a pending wait */
	assert(waitq_wait_possible(thread));

	/*
	 * We've handled all per-thread ASTs, time to handle non-urgent preemption.
	 *
	 * We delay reading the preemption bits until now in case the thread
	 * blocks while handling per-thread ASTs.
	 *
	 * If one of the AST handlers had managed to set a new AST bit,
	 * thread_exception_return will call ast_taken again.
	 */
	ast_t preemption_reasons = ast_consume(AST_PREEMPTION);

	if (preemption_reasons & AST_PREEMPT) {
		/* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */

		thread_lock(thread);
		preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM));
		thread_unlock(thread);

#if CONFIG_SCHED_SFI
		/* csw_check might tell us that SFI is needed */
		if (preemption_reasons & AST_SFI) {
			sfi_ast(thread);
		}
#endif

		if (preemption_reasons & AST_PREEMPT) {
			counter(c_ast_taken_block++);
			/* switching to a continuation implicitly re-enables interrupts */
			thread_block_reason(thread_preempted, NULL, preemption_reasons);
			/* NOTREACHED */
		}
	}

	splx(s);
}
示例#6
0
/*
 *	thread_switch:
 *
 *	Context switch.  User may supply thread hint.
 */
kern_return_t
thread_switch(
	struct thread_switch_args *args)
{
	thread_t			thread = THREAD_NULL;
	thread_t			self = current_thread();
	mach_port_name_t		thread_name = args->thread_name;
	int						option = args->option;
	mach_msg_timeout_t		option_time = args->option_time;
	uint32_t				scale_factor = NSEC_PER_MSEC;
	boolean_t				reenable_workq_callback = FALSE;
	boolean_t				depress_option = FALSE;
	boolean_t				wait_option = FALSE;

    /*
     *	Validate and process option.
     */
    switch (option) {

	case SWITCH_OPTION_NONE:
		workqueue_thread_yielded();
		break;
	case SWITCH_OPTION_WAIT:
		wait_option = TRUE;
		workqueue_thread_yielded();
		break;
	case SWITCH_OPTION_DEPRESS:
		depress_option = TRUE;
		workqueue_thread_yielded();
		break;
	case SWITCH_OPTION_DISPATCH_CONTENTION:
		scale_factor = NSEC_PER_USEC;
		wait_option = TRUE;
		if (thread_switch_disable_workqueue_sched_callback())
			reenable_workq_callback = TRUE;
		break;
	case SWITCH_OPTION_OSLOCK_DEPRESS:
		depress_option = TRUE;
		if (thread_switch_disable_workqueue_sched_callback())
			reenable_workq_callback = TRUE;
		break;
	case SWITCH_OPTION_OSLOCK_WAIT:
		wait_option = TRUE;
		if (thread_switch_disable_workqueue_sched_callback())
			reenable_workq_callback = TRUE;
		break;
	default:
	    return (KERN_INVALID_ARGUMENT);
    }

	/*
	 * Translate the port name if supplied.
	 */
	if (thread_name != MACH_PORT_NULL) {
		ipc_port_t port;

		if (ipc_port_translate_send(self->task->itk_space,
		                            thread_name, &port) == KERN_SUCCESS) {
			ip_reference(port);
			ip_unlock(port);

			thread = convert_port_to_thread(port);
			ip_release(port);

			if (thread == self) {
				thread_deallocate(thread);
				thread = THREAD_NULL;
			}
		}
	}

	if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) {
		if (thread != THREAD_NULL) {

			if (thread->task != self->task) {
				/*
				 * OSLock boosting only applies to other threads
				 * in your same task (even if you have a port for
				 * a thread in another task)
				 */

				thread_deallocate(thread);
				thread = THREAD_NULL;
			} else {
				/*
				 * Attempt to kick the lock owner up to our same IO throttling tier.
				 * If the thread is currently blocked in throttle_lowpri_io(),
				 * it will immediately break out.
				 *
				 * TODO: SFI break out?
				 */
				int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO);

				set_thread_iotier_override(thread, new_policy);
			}
		}
	}

	/*
	 * Try to handoff if supplied.
	 */
	if (thread != THREAD_NULL) {
		spl_t s = splsched();

		/* This may return a different thread if the target is pushing on something */
		thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);

		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
				      thread_tid(thread), thread->state,
				      pulled_thread ? TRUE : FALSE, 0, 0);

		if (pulled_thread != THREAD_NULL) {
			/* We can't be dropping the last ref here */
			thread_deallocate_safe(thread);

			if (wait_option)
				assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
				                    option_time, scale_factor);
			else if (depress_option)
				thread_depress_ms(option_time);

			self->saved.swtch.option = option;
			self->saved.swtch.reenable_workq_callback = reenable_workq_callback;

			thread_run(self, (thread_continue_t)thread_switch_continue, NULL, pulled_thread);
			/* NOTREACHED */
			panic("returned from thread_run!");
		}

		splx(s);

		thread_deallocate(thread);
	}

	if (wait_option)
		assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, scale_factor);
	else if (depress_option)
		thread_depress_ms(option_time);

	self->saved.swtch.option = option;
	self->saved.swtch.reenable_workq_callback = reenable_workq_callback;

	thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);

	if (depress_option)
		thread_depress_abort_internal(self);

	if (reenable_workq_callback)
		thread_switch_enable_workqueue_sched_callback();

    return (KERN_SUCCESS);
}
示例#7
0
文件: ast.c 项目: rohsaini/mkunity
void
ast_taken(
	boolean_t		preemption,
	ast_t			mask,
	spl_t			old_spl
#if	FAST_IDLE
        ,int			thread_type
#endif	/* FAST_IDLE */
)
{
	register thread_t	self = current_thread();
	register processor_t	mypr;
	register ast_t		reasons;
	register int		mycpu;
	thread_act_t		act = self->top_act;

	/*
	 *	Interrupts are still disabled.
	 *	We must clear need_ast and then enable interrupts.
	 */

extern void	log_thread_action(thread_t, char *);

#if 0
	log_thread_action (current_thread(), "ast_taken");
#endif

	mp_disable_preemption();
	mycpu = cpu_number();
	reasons = need_ast[mycpu] & mask;
	need_ast[mycpu] &= ~reasons;
	mp_enable_preemption();

	splx(old_spl);

	/*
	 *	These actions must not block.
	 */

#if	MCMSG
	if (reasons & AST_MCMSG)
		mcmsg_ast();
#endif	/* MCMSG */

	if (reasons & AST_NETWORK)
		net_ast();

#if	MCMSG_ENG
	if (reasons & AST_RPCREQ)
		rpc_engine_request_intr();

	if (reasons & AST_RPCREPLY)
		rpc_engine_reply_intr();

	if (reasons & AST_RPCDEPART)
		rpc_engine_depart_intr();

	if (reasons & AST_RDMASEND)
		rdma_engine_send_intr();

	if (reasons & AST_RDMARECV)
		rdma_engine_recv_intr();

	if (reasons & AST_RDMATXF)
		rdma_engine_send_fault_intr();

	if (reasons & AST_RDMARXF)
		rdma_engine_recv_fault_intr();
#endif	/* MCMSG_ENG */

#if	PARAGON860 && MCMSG_ENG
	if (reasons & AST_SCAN_INPUT)
		scan_input_ast();
#endif	/* PARAGON860 */

#if	DIPC
	if (reasons & AST_DIPC)
		dipc_ast();
#endif	/* DIPC */

	/*
	 *	Make darn sure that we don't call thread_halt_self
	 *	or thread_block from the idle thread.
	 */

	/* XXX - this isn't currently right for the HALT case... */

	mp_disable_preemption();
	mypr = current_processor();
	if (self == mypr->idle_thread) {
#if	NCPUS == 1
	    if (reasons & AST_URGENT) {
		if (!preemption)
		    panic("ast_taken: AST_URGENT for idle_thr w/o preemption");
	    }
#endif
	    mp_enable_preemption();
	    return;
	}
	mp_enable_preemption();

#if	FAST_IDLE
	if (thread_type != NO_IDLE_THREAD)
		return;
#endif	/* FAST_IDLE */

#if	TASK_SWAPPER
	/* must be before AST_APC */
	if (reasons & AST_SWAPOUT) {
		spl_t s;
		swapout_ast();
		s = splsched();
		mp_disable_preemption();
		mycpu = cpu_number();
		if (need_ast[mycpu] & AST_APC) {
			/* generated in swapout_ast() to get suspended */
			reasons |= AST_APC;		/* process now ... */
			need_ast[mycpu] &= ~AST_APC;	/* ... and not later */
		}
		mp_enable_preemption();
		splx(s);
	}
#endif	/* TASK_SWAPPER */

	/* migration APC hook */
	if (reasons & AST_APC) {
		act_execute_returnhandlers();
		return;	/* auto-retry will catch anything new */
	}

	/* 
	 *	thread_block needs to know if the thread's quantum 
	 *	expired so the thread can be put on the tail of
	 *	run queue. One of the previous actions might well
	 *	have woken a high-priority thread, so we also use
	 *	csw_needed check.
	 */
	{   void (*safept)(void) = (void (*)(void))SAFE_EXCEPTION_RETURN;

	    if (reasons &= AST_PREEMPT) {
		    if (preemption)
			    safept = (void (*)(void)) 0;
	    } else {
		    mp_disable_preemption();
		    mypr = current_processor();
		    if (csw_needed(self, mypr)) {
			    reasons = (mypr->first_quantum
				       ? AST_BLOCK
				       : AST_QUANTUM);
		    }
		    mp_enable_preemption();
	    }
	    if (reasons) {
		    counter(c_ast_taken_block++);
		    thread_block_reason(safept, reasons);
	    }
	}
}