Пример #1
0
void
timer_call_shutdown(
	processor_t			processor)
{
	timer_call_t		call;
	queue_t				queue, myqueue;

	assert(processor != current_processor());

	queue = &PROCESSOR_DATA(processor, timer_call_queue);
	myqueue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

	simple_lock(&timer_call_lock);

	call = TC(queue_first(queue));

	while (!queue_end(queue, qe(call))) {
		_delayed_call_dequeue(call);

		_delayed_call_enqueue(myqueue, call);

		call = TC(queue_first(queue));
	}

	call = TC(queue_first(myqueue));

	if (!queue_end(myqueue, qe(call)))
		_set_delayed_call_timer(call);

	simple_unlock(&timer_call_lock);
}
Пример #2
0
boolean_t
swtch_pri(
__unused	struct swtch_pri_args *args)
{
	register processor_t	myprocessor;
	boolean_t				result;

	disable_preemption();
	myprocessor = current_processor();
	if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
		mp_enable_preemption();

		return (FALSE);
	}
	enable_preemption();

	counter(c_swtch_pri_block++);

	thread_depress_abstime(thread_depress_time);

	thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);

	thread_depress_abort_internal(current_thread());

	disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	return (result);
}
Пример #3
0
boolean_t  swtch_pri(int pri)
{
	thread_t	thread = current_thread();
	processor_t	myprocessor;

#if	NCPUS > 1
	myprocessor = current_processor();
	if (myprocessor->runq.count == 0 &&
	    myprocessor->processor_set->runq.count == 0)
		return(FALSE);
#endif	/* NCPUS > 1 */

	/*
	 *	XXX need to think about depression duration.
	 *	XXX currently using min quantum.
	 */
	thread_depress_priority(thread, min_quantum);

	counter(c_swtch_pri_block++);
	thread_block(swtch_pri_continue);

	if (thread->depress_priority >= 0)
		(void) thread_depress_abort(thread);
	myprocessor = current_processor();
	return(myprocessor->runq.count > 0 ||
	       myprocessor->processor_set->runq.count > 0);
}
Пример #4
0
int
kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes,
                        int *curcpu, uint64_t *buf)
{
	int curcpu_id = current_processor()->cpu_id;
	uint32_t cfg_count = kpc_configurable_count(), offset = 0;
	uint64_t pmc_mask = 0ULL;
	boolean_t enabled;

	assert(buf);

	enabled = ml_set_interrupts_enabled(FALSE);

	curcpu_id = current_processor()->cpu_id;
	if (curcpu)
		*curcpu = curcpu_id;

	for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) {
		/* filter if the caller did not request all cpus */
		if (!all_cpus && (cpu != curcpu_id))
			continue;

		if (classes & KPC_CLASS_FIXED_MASK) {
			uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
			memcpy(&buf[offset], &FIXED_SHADOW_CPU(cpu, 0), count * sizeof(uint64_t));
			offset += count;
		}

		if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);

			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
				if ((1ULL << cfg_ctr) & pmc_mask)
					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
		}

		if (classes & KPC_CLASS_POWER_MASK) {
			pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);

			for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr)
				if ((1ULL << cfg_ctr) & pmc_mask)
					buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr);
		}
	}

	ml_set_interrupts_enabled(enabled);

	return offset;
}
Пример #5
0
/*
 *	stack_alloc_try:
 *
 *	Non-blocking attempt to allocate a
 *	stack for a thread.
 *
 *	Returns TRUE on success.
 *
 *	Called at splsched.
 */
boolean_t
stack_alloc_try(
	thread_t		thread)
{
	struct stack_cache	*cache;
	vm_offset_t			stack;

	cache = &PROCESSOR_DATA(current_processor(), stack_cache);
	stack = cache->free;
	if (stack != 0) {
		cache->free = stack_next(stack);
		cache->count--;
	}
	else {
		if (stack_free_list != 0) {
			stack_lock();
			stack = stack_free_list;
			if (stack != 0) {
				stack_free_list = stack_next(stack);
				stack_free_count--;
				stack_free_delta--;
			}
			stack_unlock();
		}
	}

	if (stack != 0 || (stack = thread->reserved_stack) != 0) {
		machine_stack_attach(thread, stack);
		return (TRUE);
	}

	return (FALSE);
}
Пример #6
0
void
stack_free_stack(
	vm_offset_t		stack)
{
	struct stack_cache	*cache;
	spl_t				s;

	s = splsched();
	cache = &PROCESSOR_DATA(current_processor(), stack_cache);
	if (cache->count < STACK_CACHE_SIZE) {
		stack_next(stack) = cache->free;
		cache->free = stack;
		cache->count++;
	}
	else {
		stack_lock();
		stack_next(stack) = stack_free_list;
		stack_free_list = stack;
		if (++stack_free_count > stack_free_hiwat)
			stack_free_hiwat = stack_free_count;
		stack_free_delta++;
		stack_unlock();
	}
	splx(s);
}
Пример #7
0
/* may be called from an IPI */
int
kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf)
{
	int enabled=0, offset=0;
	uint64_t pmc_mask = 0ULL;

	assert(buf);

	enabled = ml_set_interrupts_enabled(FALSE);

	/* grab counters and CPU number as close as possible */
	if (curcpu)
		*curcpu = current_processor()->cpu_id;

	if (classes & KPC_CLASS_FIXED_MASK) {
		kpc_get_fixed_counters(&buf[offset]);
		offset += kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
	}

	if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK);
		kpc_get_configurable_counters(&buf[offset], pmc_mask);
		offset += kpc_popcount(pmc_mask);
	}

	if (classes & KPC_CLASS_POWER_MASK) {
		pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK);
		kpc_get_configurable_counters(&buf[offset], pmc_mask);
		offset += kpc_popcount(pmc_mask);
	}

	ml_set_interrupts_enabled(enabled);

	return offset;
}
Пример #8
0
boolean_t
timer_call_cancel(
	timer_call_t			call)
{
	boolean_t		result = TRUE;
	spl_t			s;

	s = splclock();
	simple_lock(&timer_call_lock);

	if (call->state == DELAYED) {
		queue_t			queue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

		if (queue_first(queue) == qe(call)) {
			_delayed_call_dequeue(call);

			if (!queue_empty(queue))
				_set_delayed_call_timer((timer_call_t)queue_first(queue));
		}
		else
			_delayed_call_dequeue(call);
	}
	else
		result = FALSE;

	simple_unlock(&timer_call_lock);
	splx(s);

	return (result);
}
Пример #9
0
boolean_t
timer_call_enter1(
	timer_call_t			call,
	timer_call_param_t		param1,
	uint64_t				deadline)
{
	boolean_t		result = TRUE;
	queue_t			queue;
	spl_t			s;

	s = splclock();
	simple_lock(&timer_call_lock);

	if (call->state == DELAYED)
		_delayed_call_dequeue(call);
	else
		result = FALSE;

	call->param1	= param1;
	call->deadline	= deadline;

	queue = &PROCESSOR_DATA(current_processor(), timer_call_queue);

	_delayed_call_enqueue(queue, call);

	if (queue_first(queue) == qe(call))
		_set_delayed_call_timer(call);

	simple_unlock(&timer_call_lock);
	splx(s);

	return (result);
}
Пример #10
0
/**
 * etimer_intr
 *
 * Timer interrupt routine, called from the realtime clock interrupt
 * routine.
 */
void etimer_intr(int inuser, uint64_t iaddr)
{
    uint64_t abstime;
    rtclock_timer_t *mytimer;
    cpu_data_t *pp;
    int32_t latency;

    pp = current_cpu_datap();

    SCHED_STATS_TIMER_POP(current_processor());

    abstime = mach_absolute_time(); /* Get the time now */

    /*
     * has a pending clock timer expired? 
     */
    mytimer = &pp->rt_timer;    /* Point to the event timer */
    if (mytimer->deadline <= abstime) {
        mytimer->has_expired = TRUE;    /* Remember that we popped */
        mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
        mytimer->has_expired = FALSE;
    }

    pp->rtcPop = EndOfAllTime;  /* any real deadline will be earlier */
    /*
     * schedule our next deadline 
     */

    etimer_resync_deadlines();
}
Пример #11
0
/*
 *	Routine:	cpu_doshutdown
 *	Function:
 */
void
cpu_doshutdown(
	void)
{
	enable_preemption();
	processor_offline(current_processor());
}
Пример #12
0
boolean_t swtch(void)
{
	register processor_t	myprocessor;

#if	NCPUS > 1
	myprocessor = current_processor();
	if (myprocessor->runq.count == 0 &&
	    myprocessor->processor_set->runq.count == 0)
		return(FALSE);
#endif	/* NCPUS > 1 */

	counter(c_swtch_block++);
	thread_block(swtch_continue);
	myprocessor = current_processor();
	return(myprocessor->runq.count > 0 ||
	       myprocessor->processor_set->runq.count > 0);
}
Пример #13
0
void
ast_taken(void)
{
	thread_t self = current_thread();
	ast_t reasons;

	/*
	 *	Interrupts are still disabled.
	 *	We must clear need_ast and then enable interrupts.
	 */

	reasons = need_ast[cpu_number()];
	need_ast[cpu_number()] = AST_ZILCH;
	(void) spl0();

	/*
	 *	These actions must not block.
	 */

	if (reasons & AST_NETWORK)
		net_ast();

	/*
	 *	Make darn sure that we don't call thread_halt_self
	 *	or thread_block from the idle thread.
	 */

	if (self != current_processor()->idle_thread) {
#ifndef MIGRATING_THREADS
		while (thread_should_halt(self))
			thread_halt_self();
#endif

		/*
		 *	One of the previous actions might well have
		 *	woken a high-priority thread, so we use
		 *	csw_needed in addition to AST_BLOCK.
		 */

		if ((reasons & AST_BLOCK) ||
		    csw_needed(self, current_processor())) {
			counter(c_ast_taken_block++);
			thread_block(thread_exception_return);
		}
	}
}
Пример #14
0
void swtch_continue(void)
{
	register processor_t	myprocessor;

	myprocessor = current_processor();
	thread_syscall_return(myprocessor->runq.count > 0 ||
			      myprocessor->processor_set->runq.count > 0);
	/*NOTREACHED*/
}
Пример #15
0
/*
 *	load_context:
 *
 *	Start the first thread on a processor.
 */
static void
load_context(
	thread_t		thread)
{
	processor_t		processor = current_processor();


#define load_context_kprintf(x...) /* kprintf("load_context: " x) */

	load_context_kprintf("calling machine_set_current_thread\n");
	machine_set_current_thread(thread);

	load_context_kprintf("calling processor_up\n");
	processor_up(processor);

	PMAP_ACTIVATE_KERNEL(processor->cpu_id);

	/*
	 * Acquire a stack if none attached.  The panic
	 * should never occur since the thread is expected
	 * to have reserved stack.
	 */
	load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread,
			     thread->kernel_stack, thread->machine.kstackptr);
	if (!thread->kernel_stack) {
		load_context_kprintf("calling stack_alloc_try\n");
		if (!stack_alloc_try(thread))
			panic("load_context");
	}

	/*
	 * The idle processor threads are not counted as
	 * running for load calculations.
	 */
	if (!(thread->state & TH_IDLE))
		sched_run_incr();

	processor->active_thread = thread;
	processor->current_pri = thread->sched_pri;
	processor->current_thmode = thread->sched_mode;
	processor->deadline = UINT64_MAX;
	thread->last_processor = processor;

	processor->last_dispatch = mach_absolute_time();
	timer_start(&thread->system_timer, processor->last_dispatch);
	PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;

	timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
	PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);

	PMAP_ACTIVATE_USER(thread, processor->cpu_id);

	load_context_kprintf("calling machine_load_context\n");
	machine_load_context(thread);
	/*NOTREACHED*/
}
Пример #16
0
void swtch_pri_continue(void)
{
	register thread_t	thread = current_thread();
	register processor_t	myprocessor;

	if (thread->depress_priority >= 0)
		(void) thread_depress_abort(thread);
	myprocessor = current_processor();
	thread_syscall_return(myprocessor->runq.count > 0 ||
			      myprocessor->processor_set->runq.count > 0);
	/*NOTREACHED*/
}
Пример #17
0
int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
{
	if (thread != THREAD_NULL) {
		processor_t             processor = current_processor();
		uint64_t 				abstime = mach_absolute_time();
		timer_t					timer;

		timer = PROCESSOR_DATA(processor, thread_timer);

		return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
				(abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
	} else
		return 0;
}
Пример #18
0
static void
swtch_continue(void)
{
	register processor_t	myprocessor;
    boolean_t				result;

    disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	thread_syscall_return(result);
	/*NOTREACHED*/
}
Пример #19
0
/*
 * Adjust the Universal (Posix) time gradually.
 */
kern_return_t
host_adjust_time(
	host_t		host,
	time_value_t	newadj,
	time_value_t	*oldadj)	/* OUT */
{
	time_value_t	oadj;
	integer_t	ndelta;
	spl_t		s;

	if (host == HOST_NULL)
		return (KERN_INVALID_HOST);

	ndelta = (newadj.seconds * 1000000) + newadj.microseconds;

#if	NCPUS > 1
	thread_bind(current_thread(), master_processor);
	mp_disable_preemption();
	if (current_processor() != master_processor) {
		mp_enable_preemption();
		thread_block((void (*)(void)) 0);
	} else {
		mp_enable_preemption();
	}
#endif	/* NCPUS > 1 */

	s = splclock();
	oadj.seconds = timedelta / 1000000;
	oadj.microseconds = timedelta % 1000000;
	if (timedelta == 0) {
		if (ndelta > bigadj)
			tickdelta = 10 * tickadj;
		else
			tickdelta = tickadj;
	}
	if (ndelta % tickdelta)
		ndelta = ndelta / tickdelta * tickdelta;
	timedelta = ndelta;
	splx(s);

#if	NCPUS > 1
	thread_bind(current_thread(), PROCESSOR_NULL);
#endif	/* NCPUS > 1 */

	*oldadj = oadj;

	return (KERN_SUCCESS);
}
Пример #20
0
/*
 *	processor_start_thread:
 *
 *	First thread to execute on a started processor.
 *
 *	Called at splsched.
 */
void
processor_start_thread(void *machine_param)
{
    processor_t		processor = current_processor();
    thread_t		self = current_thread();

    slave_machine_init(machine_param);

    /*
     *	If running the idle processor thread,
     *	reenter the idle loop, else terminate.
     */
    if (self == processor->idle_thread)
        thread_block((thread_continue_t)idle_thread);

    thread_terminate(self);
    /*NOTREACHED*/
}
Пример #21
0
/*
 * The scheduler is too messy for my old little brain
 */
void
simpler_thread_setrun(
	thread_t	th,
	boolean_t	may_preempt)
{
	register struct run_queue	*rq;
	register int			whichq;

	/*
	 *	XXX should replace queue with a boolean in this case.
	 */
	if (default_pset.idle_count > 0) {
		processor_t	processor;

		processor = (processor_t) queue_first(&default_pset.idle_queue);
		queue_remove(&default_pset.idle_queue, processor,
		processor_t, processor_queue);
		default_pset.idle_count--;
		processor->next_thread = th;
		processor->state = PROCESSOR_DISPATCHING;
		return;
	}
	rq = &(master_processor->runq);
	ast_on(cpu_number(), AST_BLOCK);

	whichq = (th)->sched_pri;
	simple_lock(&(rq)->lock);	/* lock the run queue */
	enqueue_head(&(rq)->runq[whichq], (queue_entry_t) (th));

	if (whichq < (rq)->low || (rq)->count == 0)
		 (rq)->low = whichq;	/* minimize */
	(rq)->count++;
#ifdef MIGRATING_THREADS
	(th)->shuttle.runq = (rq);
#else
	(th)->runq = (rq);
#endif
	simple_unlock(&(rq)->lock);

	/*
	 *	Turn off first_quantum to allow context switch.
	 */
	current_processor()->first_quantum = FALSE;
}
Пример #22
0
/*
 * Complete the shutdown and place the processor offline.
 *
 * Called at splsched in the shutdown context.
 * This performs a minimal thread_invoke() to the idle thread,
 * so it needs to be kept in sync with what thread_invoke() does.
 *
 * The onlining half of this is done in load_context().
 */
void
processor_offline(
	processor_t			processor)
{
	assert(processor == current_processor());
	assert(processor->active_thread == current_thread());

	thread_t old_thread = processor->active_thread;
	thread_t new_thread = processor->idle_thread;

	processor->active_thread = new_thread;
	processor->current_pri = IDLEPRI;
	processor->current_thmode = TH_MODE_NONE;
	processor->starting_pri = IDLEPRI;
	processor->current_sfi_class = SFI_CLASS_KERNEL;
	processor->deadline = UINT64_MAX;
	new_thread->last_processor = processor;

	uint64_t ctime = mach_absolute_time();

	processor->last_dispatch = ctime;
	old_thread->last_run_time = ctime;

	/* Update processor->thread_timer and ->kernel_timer to point to the new thread */
	thread_timer_event(ctime, &new_thread->system_timer);
	PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer;

	timer_stop(PROCESSOR_DATA(processor, current_state), ctime);

	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
	                          MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
	                          old_thread->reason, (uintptr_t)thread_tid(new_thread),
	                          old_thread->sched_pri, new_thread->sched_pri, 0);

	machine_set_current_thread(new_thread);

	thread_dispatch(old_thread, new_thread);

	PMAP_DEACTIVATE_KERNEL(processor->cpu_id);

	cpu_sleep();
	panic("zombie processor");
	/*NOTREACHED*/
}
Пример #23
0
static int
pmThreadGetUrgency(uint64_t *rt_period, uint64_t *rt_deadline)
{
	int             urgency;
	uint64_t        arg1, arg2;

	urgency = thread_get_urgency(current_processor()->next_thread, &arg1, &arg2);

	if (urgency == THREAD_URGENCY_REAL_TIME) {
		if (rt_period != NULL)
			*rt_period = arg1;
		
		if (rt_deadline != NULL)
			*rt_deadline = arg2;
	}

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), urgency, arg1, arg2, 0, 0);

	return(urgency);
}
Пример #24
0
/*
 * Set the Universal (Posix) time. Privileged call.
 */
kern_return_t
host_set_time(
	host_t		host,
	time_value_t	new_time)
{
	spl_t	s;

	if (host == HOST_NULL)
		return(KERN_INVALID_HOST);

#if	NCPUS > 1
	thread_bind(current_thread(), master_processor);
	mp_disable_preemption();
	if (current_processor() != master_processor) {
		mp_enable_preemption();
		thread_block((void (*)(void)) 0);
	} else {
		mp_enable_preemption();
	}
#endif	/* NCPUS > 1 */

	s = splhigh();
	time = new_time;
	update_mapped_time(&time);
#if	PTIME_MACH_RT
	rtc_gettime_interrupts_disabled((tvalspec_t *)&last_utime_tick);
#endif	/* PTIME_MACH_RT */
#if 0
	(void)bbc_settime((time_value_t *)&time);
#endif
	splx(s);

#if	NCPUS > 1
	thread_bind(current_thread(), PROCESSOR_NULL);
#endif	/* NCPUS > 1 */

	return (KERN_SUCCESS);
}
Пример #25
0
/*
 *	slave_main:
 *
 *	Load the first thread to start a processor.
 */
void
slave_main(void *machine_param)
{
    processor_t		processor = current_processor();
    thread_t		thread;

    /*
     *	Use the idle processor thread if there
     *	is no dedicated start up thread.
     */
    if (processor->next_thread == THREAD_NULL) {
        thread = processor->idle_thread;
        thread->continuation = (thread_continue_t)processor_start_thread;
        thread->parameter = machine_param;
    }
    else {
        thread = processor->next_thread;
        processor->next_thread = THREAD_NULL;
    }

    load_context(thread);
    /*NOTREACHED*/
}
Пример #26
0
void
thread_poll_yield(
	thread_t		self)
{
	spl_t			s;

	assert(self == current_thread());

	s = splsched();
	if (self->sched_mode == TH_MODE_FIXED) {
		uint64_t			total_computation, abstime;

		abstime = mach_absolute_time();
		total_computation = abstime - self->computation_epoch;
		total_computation += self->computation_metered;
		if (total_computation >= max_poll_computation) {
			processor_t		myprocessor = current_processor();
			ast_t			preempt;

			thread_lock(self);
			if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
				self->sched_pri = DEPRESSPRI;
				myprocessor->current_pri = self->sched_pri;
			}
			self->computation_epoch = abstime;
			self->computation_metered = 0;
			self->sched_flags |= TH_SFLAG_POLLDEPRESS;

			abstime += (total_computation >> sched_poll_yield_shift);
			if (!timer_call_enter(&self->depress_timer, abstime, TIMER_CALL_USER_CRITICAL))
				self->depress_timer_active++;

			if ((preempt = csw_check(myprocessor, AST_NONE)) != AST_NONE)
				ast_on(preempt);

			thread_unlock(self);
		}
Пример #27
0
/*
 * Called at splsched.
 */
void
ast_taken(
	ast_t		reasons,
	boolean_t	enable
)
{
	boolean_t		preempt_trap = (reasons == AST_PREEMPTION);
	ast_t			*myast = ast_pending();
	thread_t		thread = current_thread();
	perfASTCallback	perf_hook = perfASTHook;

	/*
	 * CHUD hook - all threads including idle processor threads
	 */
	if (perf_hook) {
		if (*myast & AST_CHUD_ALL) {
			(*perf_hook)(reasons, myast);
			
			if (*myast == AST_NONE)
				return;
		}
	}
	else
		*myast &= ~AST_CHUD_ALL;

	reasons &= *myast;
	*myast &= ~reasons;

	/*
	 * Handle ASTs for all threads
	 * except idle processor threads.
	 */
	if (!(thread->state & TH_IDLE)) {
		/*
		 * Check for urgent preemption.
		 */
		if (	(reasons & AST_URGENT)				&&
				waitq_wait_possible(thread)		) {
			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason(THREAD_CONTINUE_NULL, NULL,
										reasons & AST_PREEMPTION);
			}

			reasons &= ~AST_PREEMPTION;
		}

		/*
		 * The kernel preempt traps
		 * skip all other ASTs.
		 */
		if (!preempt_trap) {
			ml_set_interrupts_enabled(enable);

#ifdef	MACH_BSD
			/*
			 * Handle BSD hook.
			 */
			if (reasons & AST_BSD) {
				thread_ast_clear(thread, AST_BSD);
				bsd_ast(thread);
			}
#endif
#if CONFIG_MACF
			/*
			 * Handle MACF hook.
			 */
			if (reasons & AST_MACF) {
				thread_ast_clear(thread, AST_MACF);
				mac_thread_userret(thread);
			}
#endif
			/* 
			 * Thread APC hook.
			 */
			if (reasons & AST_APC) {
				thread_ast_clear(thread, AST_APC);
				special_handler(thread);
			}
			
			if (reasons & AST_GUARD) {
				thread_ast_clear(thread, AST_GUARD);
				guard_ast(thread);
			}
			
			if (reasons & AST_LEDGER) {
				thread_ast_clear(thread, AST_LEDGER);
				ledger_ast(thread);
			}

			/*
			 * Kernel Profiling Hook
			 */
			if (reasons & AST_KPERF) {
				thread_ast_clear(thread, AST_KPERF);
				chudxnu_thread_ast(thread);
			}

#if CONFIG_TELEMETRY
			if (reasons & AST_TELEMETRY_ALL) {
				boolean_t interrupted_userspace = FALSE;
				boolean_t is_windowed = FALSE;

				assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
				interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
				is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
				thread_ast_clear(thread, AST_TELEMETRY_ALL);
				telemetry_ast(thread, interrupted_userspace, is_windowed);
			}
#endif

			ml_set_interrupts_enabled(FALSE);

#if CONFIG_SCHED_SFI
			if (reasons & AST_SFI) {
				sfi_ast(thread);
			}
#endif

			/*
			 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
			 */
			thread_lock(thread);
			if (reasons & AST_PREEMPT)
				reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
			thread_unlock(thread);

			assert(waitq_wait_possible(thread));

			if (reasons & AST_PREEMPT) {
				counter(c_ast_taken_block++);
				thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
			}
		}
	}

	ml_set_interrupts_enabled(enable);
}
Пример #28
0
/*
 * Now running in a thread.  Kick off other services,
 * invoke user bootstrap, enter pageout loop.
 */
static void
kernel_bootstrap_thread(void)
{
    processor_t		processor = current_processor();

#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
    kernel_bootstrap_thread_kprintf("calling idle_thread_create\n");
    /*
     * Create the idle processor thread.
     */
    idle_thread_create(processor);

    /*
     * N.B. Do not stick anything else
     * before this point.
     *
     * Start up the scheduler services.
     */
    kernel_bootstrap_thread_kprintf("calling sched_startup\n");
    sched_startup();

    /*
     * Thread lifecycle maintenance (teardown, stack allocation)
     */
    kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n");
    thread_daemon_init();

    /*
     * Thread callout service.
     */
    kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n");
    thread_call_initialize();

    /*
     * Remain on current processor as
     * additional processors come online.
     */
    kernel_bootstrap_thread_kprintf("calling thread_bind\n");
    thread_bind(processor);

    /*
     * Kick off memory mapping adjustments.
     */
    kernel_bootstrap_thread_kprintf("calling mapping_adjust\n");
    mapping_adjust();

    /*
     *	Create the clock service.
     */
    kernel_bootstrap_thread_kprintf("calling clock_service_create\n");
    clock_service_create();

    /*
     *	Create the device service.
     */
    device_service_create();

    kth_started = 1;

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
    /*
     * Create and initialize the physical copy window for processor 0
     * This is required before starting kicking off  IOKit.
     */
    cpu_physwindow_init(0);
#endif

    vm_kernel_reserved_entry_init();

#if MACH_KDP
    kernel_bootstrap_kprintf("calling kdp_init\n");
    kdp_init();
#endif

#if CONFIG_COUNTERS
    pmc_bootstrap();
#endif

#if (defined(__i386__) || defined(__x86_64__))
    if (turn_on_log_leaks && !new_nkdbufs)
        new_nkdbufs = 200000;
    start_kern_tracing(new_nkdbufs);
    if (turn_on_log_leaks)
        log_leaks = 1;
#endif

#ifdef	IOKIT
    PE_init_iokit();
#endif

    (void) spllo();		/* Allow interruptions */

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
    /*
     * Create and initialize the copy window for processor 0
     * This also allocates window space for all other processors.
     * However, this is dependent on the number of processors - so this call
     * must be after IOKit has been started because IOKit performs processor
     * discovery.
     */
    cpu_userwindow_init(0);
#endif

#if (!defined(__i386__) && !defined(__x86_64__))
    if (turn_on_log_leaks && !new_nkdbufs)
        new_nkdbufs = 200000;
    start_kern_tracing(new_nkdbufs);
    if (turn_on_log_leaks)
        log_leaks = 1;
#endif

    /*
     *	Initialize the shared region module.
     */
    vm_shared_region_init();
    vm_commpage_init();
    vm_commpage_text_init();

#if CONFIG_MACF
    mac_policy_initmach();
#endif

    /*
     * Initialize the global used for permuting kernel
     * addresses that may be exported to userland as tokens
     * using VM_KERNEL_ADDRPERM(). Force the random number
     * to be odd to avoid mapping a non-zero
     * word-aligned address to zero via addition.
     */
    vm_kernel_addrperm = (vm_offset_t)early_random() | 1;

    /*
     *	Start the user bootstrap.
     */
#ifdef	MACH_BSD
    bsd_init();
#endif

    /*
     * Get rid of segments used to bootstrap kext loading. This removes
     * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
     */
#if 0
    OSKextRemoveKextBootstrap();
#endif

    serial_keyboard_init();		/* Start serial keyboard if wanted */

    vm_page_init_local_q();

    thread_bind(PROCESSOR_NULL);

    /*
     *	Become the pageout daemon.
     */
    vm_pageout();
    /*NOTREACHED*/
}
Пример #29
0
uint64_t
timer_queue_expire_with_options(
	mpqueue_head_t		*queue,
	uint64_t		deadline,
	boolean_t		rescan)
{
	timer_call_t	call = NULL;
	uint32_t tc_iterations = 0;
	DBG("timer_queue_expire(%p,)\n", queue);

	uint64_t cur_deadline = deadline;
	timer_queue_lock_spin(queue);

	while (!queue_empty(&queue->head)) {
		/* Upon processing one or more timer calls, refresh the
		 * deadline to account for time elapsed in the callout
		 */
		if (++tc_iterations > 1)
			cur_deadline = mach_absolute_time();

		if (call == NULL)
			call = TIMER_CALL(queue_first(&queue->head));

		if (call->soft_deadline <= cur_deadline) {
			timer_call_func_t		func;
			timer_call_param_t		param0, param1;

			TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0);
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
				call,
				call->soft_deadline,
				CE(call)->deadline,
				CE(call)->entry_time, 0);

			/* Bit 0 of the "soft" deadline indicates that
			 * this particular timer call is rate-limited
			 * and hence shouldn't be processed before its
			 * hard deadline.
			 */
			if ((call->soft_deadline & 0x1) &&
			    (CE(call)->deadline > cur_deadline)) {
				if (rescan == FALSE)
					break;
			}

			if (!simple_lock_try(&call->lock)) {
				/* case (2b) lock inversion, dequeue and skip */
				timer_queue_expire_lock_skips++;
				timer_call_entry_dequeue_async(call);
				call = NULL;
				continue;
			}

			timer_call_entry_dequeue(call);

			func = CE(call)->func;
			param0 = CE(call)->param0;
			param1 = CE(call)->param1;

			simple_unlock(&call->lock);
			timer_queue_unlock(queue);

			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_START,
				call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);

#if CONFIG_DTRACE
			DTRACE_TMR7(callout__start, timer_call_func_t, func,
			    timer_call_param_t, param0, unsigned, call->flags,
			    0, (call->ttd >> 32),
			    (unsigned) (call->ttd & 0xFFFFFFFF), call);
#endif
			/* Maintain time-to-deadline in per-processor data
			 * structure for thread wakeup deadline statistics.
			 */
			uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
			*ttdp = call->ttd;
			(*func)(param0, param1);
			*ttdp = 0;
#if CONFIG_DTRACE
			DTRACE_TMR4(callout__end, timer_call_func_t, func,
			    param0, param1, call);
#endif

			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_END,
				call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
			call = NULL;
			timer_queue_lock_spin(queue);
		} else {
			if (__probable(rescan == FALSE)) {
Пример #30
0
/*
 * 	Event timer interrupt.
 *
 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
 *     that occur before the entire chain completes.
 *
 * XXX a better implementation would use a set of generic callouts and iterate over them
 */
void
timer_intr(int		user_mode,
           uint64_t	rip)
{
    uint64_t		abstime;
    rtclock_timer_t		*mytimer;
    cpu_data_t		*pp;
    int64_t			latency;
    uint64_t		pmdeadline;
    boolean_t		timer_processed = FALSE;

    pp = current_cpu_datap();

    SCHED_STATS_TIMER_POP(current_processor());

    abstime = mach_absolute_time();		/* Get the time now */

    /* has a pending clock timer expired? */
    mytimer = &pp->rtclock_timer;		/* Point to the event timer */

    if ((timer_processed = ((mytimer->deadline <= abstime) ||
                            (abstime >= (mytimer->queue.earliest_soft_deadline))))) {
        /*
         * Log interrupt service latency (-ve value expected by tool)
         * a non-PM event is expected next.
         * The requested deadline may be earlier than when it was set
         * - use MAX to avoid reporting bogus latencies.
         */
        latency = (int64_t) (abstime - MAX(mytimer->deadline,
                                           mytimer->when_set));
        /* Log zero timer latencies when opportunistically processing
         * coalesced timers.
         */
        if (latency < 0) {
            TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0);
            latency = 0;
        }

        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_TRAP_LATENCY | DBG_FUNC_NONE,
                                  -latency,
                                  ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
                                  user_mode, 0, 0);

        mytimer->has_expired = TRUE;	/* Remember that we popped */
        mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
        mytimer->has_expired = FALSE;

        /* Get the time again since we ran a bit */
        abstime = mach_absolute_time();
        mytimer->when_set = abstime;
    }

    /* is it time for power management state change? */
    if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_PM_DEADLINE | DBG_FUNC_START,
                                  0, 0, 0, 0, 0);
        pmCPUDeadline(pp);
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_PM_DEADLINE | DBG_FUNC_END,
                                  0, 0, 0, 0, 0);
        timer_processed = TRUE;
    }

    /* schedule our next deadline */
    x86_lcpu()->rtcDeadline = EndOfAllTime;
    timer_resync_deadlines();

    if (__improbable(timer_processed == FALSE))
        spurious_timers++;
}