Example #1
0
/*
 * Depress thread's priority to lowest possible for the specified interval,
 * with a value of zero resulting in no timeout being scheduled.
 */
void
thread_depress_abstime(
	uint64_t				interval)
{
	register thread_t		self = current_thread();
	uint64_t				deadline;
    spl_t					s;

    s = splsched();
    thread_lock(self);
	if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
		processor_t		myprocessor = self->last_processor;

		self->sched_pri = DEPRESSPRI;
		myprocessor->current_pri = self->sched_pri;
		self->sched_flags |= TH_SFLAG_DEPRESS;

		if (interval != 0) {
			clock_absolutetime_interval_to_deadline(interval, &deadline);
			if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL))
				self->depress_timer_active++;
		}
	}
	thread_unlock(self);
    splx(s);
}
Example #2
0
/*
 *	_set_delayed_call_timer:
 *
 *	Reset the timer so that it
 *	next expires when the entry is due.
 *
 *	Called with thread_call_lock held.
 */
static __inline__ void
_set_delayed_call_timer(
    thread_call_t		call,
	thread_call_group_t	group)
{
    timer_call_enter(&group->delayed_timer, call->tc_call.deadline, 0);
}
__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_enter(
	chudxnu_cpu_timer_callback_func_t	func,
	uint32_t				time,
	uint32_t				units)
{
	chudcpu_data_t	*chud_proc_info;
	boolean_t	oldlevel;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	// cancel any existing callback for this cpu
	timer_call_cancel(&(chud_proc_info->cpu_timer_call));

	chud_proc_info->cpu_timer_callback_fn = func;

	clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
	timer_call_setup(&(chud_proc_info->cpu_timer_call),
			 chudxnu_private_cpu_timer_callback, NULL);
	timer_call_enter(&(chud_proc_info->cpu_timer_call),
			 chud_proc_info->t_deadline);

	KERNEL_DEBUG_CONSTANT(
		MACHDBG_CODE(DBG_MACH_CHUD,
			     CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
		(uint32_t) func, time, units, 0, 0);

	ml_set_interrupts_enabled(oldlevel);
	return KERN_SUCCESS;
}
Example #4
0
/* program the timer from the pet thread */
int
kperf_timer_pet_set( unsigned timer, uint64_t elapsed_ticks )
{
	static uint64_t pet_min_ticks = 0;

	uint64_t now;
	struct time_trigger *trigger = NULL;
	uint64_t period = 0;
	uint64_t deadline;

	/* compute ns -> ticks */
	if( pet_min_ticks == 0 )
		nanoseconds_to_absolutetime(MIN_PET_TIMER_NS, &pet_min_ticks);

	if( timer != pet_timer )
		panic( "PET setting with bogus ID\n" );

	if( timer >= timerc )
		return EINVAL;

	if( kperf_sampling_status() == KPERF_SAMPLING_OFF ) {
		BUF_INFO1(PERF_PET_END, SAMPLE_OFF);
		return 0;
	}

	// don't repgram the timer if it's been shutdown
	if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN ) {
		BUF_INFO1(PERF_PET_END, SAMPLE_SHUTDOWN);
		return 0;
	}

	/* CHECKME: we probably took so damn long in the PET thread,
	 * it makes sense to take the time again.
	 */
	now = mach_absolute_time();
	trigger = &timerv[timer];

	/* if we re-programmed the timer to zero, just drop it */
	if( !trigger->period )
		return 0;

	/* subtract the time the pet sample took being careful not to underflow */
	if ( trigger->period > elapsed_ticks )
		period = trigger->period - elapsed_ticks;

	/* make sure we don't set the next PET sample to happen too soon */
	if ( period < pet_min_ticks )
		period = pet_min_ticks;

	/* calculate deadline */
	deadline = now + period;

	BUF_INFO(PERF_PET_SCHED, trigger->period, period, elapsed_ticks, deadline);

	/* re-schedule the timer, making sure we don't apply slop */
	timer_call_enter( &trigger->tcall, deadline, TIMER_CALL_SYS_CRITICAL);

	return 0;
}
Example #5
0
/* program the timer from the PET thread */
void
kperf_timer_pet_rearm(uint64_t elapsed_ticks)
{
	struct kperf_timer *timer = NULL;
	uint64_t period = 0;
	uint64_t deadline;

	/*
	 * If the pet_timer_id is invalid, it has been disabled, so this should
	 * do nothing.
	 */
	if (pet_timer_id >= kperf_timerc) {
		return;
	}

	unsigned int status = kperf_sampling_status();
	/* do not reprogram the timer if it has been shutdown or sampling is off */
	if (status == KPERF_SAMPLING_OFF) {
		BUF_INFO(PERF_PET_END, SAMPLE_OFF);
		return;
	} else if (status == KPERF_SAMPLING_SHUTDOWN) {
		BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN);
		return;
	}

	timer = &(kperf_timerv[pet_timer_id]);

	/* if we re-programmed the timer to zero, just drop it */
	if (!timer->period) {
		return;
	}

	/* subtract the time the pet sample took being careful not to underflow */
	if (timer->period > elapsed_ticks) {
		period = timer->period - elapsed_ticks;
	}

	/* make sure we don't set the next PET sample to happen too soon */
	if (period < min_period_pet_abstime) {
		period = min_period_pet_abstime;
	}

	/* we probably took so long in the PET thread, it makes sense to take
	 * the time again.
	 */
	deadline = mach_absolute_time() + period;

	BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline);

	/* re-schedule the timer, making sure we don't apply slop */
	timer_call_enter(&(timer->tcall), deadline, TIMER_CALL_SYS_CRITICAL);

	return;
}
Example #6
0
static void
kperf_timer_schedule(struct kperf_timer *timer, uint64_t now)
{
	BUF_INFO(PERF_TM_SCHED, timer->period);

	/* if we re-programmed the timer to zero, just drop it */
	if (timer->period == 0) {
		return;
	}

	/* calculate deadline */
	uint64_t deadline = now + timer->period;

	/* re-schedule the timer, making sure we don't apply slop */
	timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL);
}
Example #7
0
/*
 * Schedule timer to deallocate a worker thread if we have a surplus 
 * of threads (in excess of the group's target) and at least one thread
 * is idle the whole time.
 */
static void
thread_call_start_deallocate_timer(
		thread_call_group_t group)
{
        uint64_t deadline;
        boolean_t onqueue;

	assert(group->idle_count > 0);

        group->flags |= TCG_DEALLOC_ACTIVE;
        deadline = group->idle_timestamp + thread_call_dealloc_interval_abs;
        onqueue = timer_call_enter(&group->dealloc_timer, deadline, 0); 

        if (onqueue) {
                panic("Deallocate timer already active?");
        }   
}
Example #8
0
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu

    cpu_timer_callback_fn[cpu] = func;

    clock_interval_to_deadline(time, units, &(t_deadline[cpu]));
    timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL);
    timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]);

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
Example #9
0
File: clock.c Project: Prajna/xnu
static void
calend_adjust_call(void)
{
	uint32_t	interval;
	spl_t		s;

	s = splclock();
	clock_lock();

	if (--calend_adjactive == 0) {
		interval = calend_adjust();
		if (interval != 0) {
			clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);

			if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
				calend_adjactive++;
		}
	}

	clock_unlock();
	splx(s);
}
Example #10
0
void
thread_poll_yield(
	thread_t		self)
{
	spl_t			s;

	assert(self == current_thread());

	s = splsched();
	if (self->sched_mode == TH_MODE_FIXED) {
		uint64_t			total_computation, abstime;

		abstime = mach_absolute_time();
		total_computation = abstime - self->computation_epoch;
		total_computation += self->computation_metered;
		if (total_computation >= max_poll_computation) {
			processor_t		myprocessor = current_processor();
			ast_t			preempt;

			thread_lock(self);
			if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
				self->sched_pri = DEPRESSPRI;
				myprocessor->current_pri = self->sched_pri;
			}
			self->computation_epoch = abstime;
			self->computation_metered = 0;
			self->sched_flags |= TH_SFLAG_POLLDEPRESS;

			abstime += (total_computation >> sched_poll_yield_shift);
			if (!timer_call_enter(&self->depress_timer, abstime, TIMER_CALL_USER_CRITICAL))
				self->depress_timer_active++;

			if ((preempt = csw_check(myprocessor, AST_NONE)) != AST_NONE)
				ast_on(preempt);

			thread_unlock(self);
		}
Example #11
0
File: clock.c Project: Prajna/xnu
/*
 *	clock_adjtime:
 *
 *	Interface to adjtime() syscall.
 *
 *	Calculates adjustment variables and
 *	initiates adjustment.
 */
void
clock_adjtime(
	long		*secs,
	int			*microsecs)
{
	uint32_t	interval;
	spl_t		s;

	s = splclock();
	clock_lock();

	interval = calend_set_adjustment(secs, microsecs);
	if (interval != 0) {
		calend_adjdeadline = mach_absolute_time() + interval;
		if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
			calend_adjactive++;
	}
	else
	if (timer_call_cancel(&calend_adjcall))
		calend_adjactive--;

	clock_unlock();
	splx(s);
}
Example #12
0
/*
 *	Routine:	wait_queue_assert_wait64_locked
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *
 *	Conditions:
 *		The wait queue is assumed locked.
 *		The waiting thread is assumed locked.
 *
 */
__private_extern__ wait_result_t
wait_queue_assert_wait64_locked(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible,
	uint64_t deadline,
	thread_t thread)
{
	wait_result_t wait_result;
	boolean_t realtime;

	if (!wait_queue_assert_possible(thread))
		panic("wait_queue_assert_wait64_locked");

	if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
		wait_queue_set_t wqs = (wait_queue_set_t)wq;

		if (event == NO_EVENT64 && wqs_is_preposted(wqs))
			return(THREAD_AWAKENED);
	}

	/*
	 * Realtime threads get priority for wait queue placements.
	 * This allows wait_queue_wakeup_one to prefer a waiting
	 * realtime thread, similar in principle to performing
	 * a wait_queue_wakeup_all and allowing scheduler prioritization
	 * to run the realtime thread, but without causing the
	 * lock contention of that scenario.
	 */
	realtime = (thread->sched_pri >= BASEPRI_REALTIME);

	/*
	 * This is the extent to which we currently take scheduling attributes
	 * into account.  If the thread is vm priviledged, we stick it at
	 * the front of the queue.  Later, these queues will honor the policy
	 * value set at wait_queue_init time.
	 */
	wait_result = thread_mark_wait_locked(thread, interruptible);
	if (wait_result == THREAD_WAITING) {
		if (!wq->wq_fifo
			|| (thread->options & TH_OPT_VMPRIV)
			|| realtime)
			enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
		else
			enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);

		thread->wait_event = event;
		thread->wait_queue = wq;

		if (deadline != 0) {
			uint32_t flags;

			flags = realtime ? TIMER_CALL_CRITICAL : 0;

			if (!timer_call_enter(&thread->wait_timer, deadline, flags))
				thread->wait_timer_active++;
			thread->wait_timer_is_set = TRUE;
		}
	}
	return(wait_result);
}