コード例 #1
0
__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_enter(
	chudxnu_cpu_timer_callback_func_t	func,
	uint32_t				time,
	uint32_t				units)
{
	chudcpu_data_t	*chud_proc_info;
	boolean_t	oldlevel;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	// cancel any existing callback for this cpu
	timer_call_cancel(&(chud_proc_info->cpu_timer_call));

	chud_proc_info->cpu_timer_callback_fn = func;

	clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
	timer_call_setup(&(chud_proc_info->cpu_timer_call),
			 chudxnu_private_cpu_timer_callback, NULL);
	timer_call_enter(&(chud_proc_info->cpu_timer_call),
			 chud_proc_info->t_deadline);

	KERNEL_DEBUG_CONSTANT(
		MACHDBG_CODE(DBG_MACH_CHUD,
			     CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
		(uint32_t) func, time, units, 0, 0);

	ml_set_interrupts_enabled(oldlevel);
	return KERN_SUCCESS;
}
コード例 #2
0
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
{
    int cpu;

    for(cpu=0; cpu<NCPUS; cpu++) {
        timer_call_cancel(&(cpu_timer_call[cpu]));
        t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
        cpu_timer_callback_fn[cpu] = NULL;
    }
    return KERN_SUCCESS;
}
コード例 #3
0
static void
timer_call_remove_cyclic(cyclic_id_t cyclic)
{
	wrap_timer_call_t *wrapTC = (wrap_timer_call_t *)cyclic;

	while (!timer_call_cancel(&(wrapTC->call))) {
		int ret = assert_wait(wrapTC, THREAD_UNINT);
		ASSERT(ret == THREAD_WAITING);

		wrapTC->when.cyt_interval = WAKEUP_REAPER;

		ret = thread_block(THREAD_CONTINUE_NULL);
		ASSERT(ret == THREAD_AWAKENED);
	}
}
コード例 #4
0
__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_cancel_all(void)
{
	unsigned int	cpu;
	chudcpu_data_t	*chud_proc_info;

	for(cpu=0; cpu < real_ncpus; cpu++) {
		chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
		if (chud_proc_info == NULL)
			continue;
		timer_call_cancel(&(chud_proc_info->cpu_timer_call));
		chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
		chud_proc_info->cpu_timer_callback_fn = NULL;
	}
	return KERN_SUCCESS;
}
コード例 #5
0
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_cancel(void)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    timer_call_cancel(&(cpu_timer_call[cpu]));
    t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
    cpu_timer_callback_fn[cpu] = NULL;

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
コード例 #6
0
ファイル: kperf_timer.c プロジェクト: wzw19890321/xnu-1
void
kperf_timer_stop(void)
{
	for (unsigned int i = 0; i < kperf_timerc; i++) {
		if (kperf_timerv[i].period == 0) {
			continue;
		}

		/* wait for the timer to stop */
		while (kperf_timerv[i].active);

		timer_call_cancel(&(kperf_timerv[i].tcall));
	}

	/* wait for PET to stop, too */
	kperf_pet_config(0);
}
コード例 #7
0
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu

    cpu_timer_callback_fn[cpu] = func;

    clock_interval_to_deadline(time, units, &(t_deadline[cpu]));
    timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL);
    timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]);

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
コード例 #8
0
ファイル: timetrigger.c プロジェクト: Apple-FOSS-Mirror/xnu
extern int
kperf_timer_stop(void)
{
	unsigned i;

	for( i = 0; i < timerc; i++ )
	{
		if( timerv[i].period == 0 )
			continue;

		while (timerv[i].active)
			;

		timer_call_cancel( &timerv[i].tcall );
	}

	/* wait for PET to stop, too */
	kperf_pet_thread_wait();

	return 0;
}
コード例 #9
0
__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_cancel(void)
{
	chudcpu_data_t	*chud_proc_info;
	boolean_t	oldlevel;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	timer_call_cancel(&(chud_proc_info->cpu_timer_call));

	KERNEL_DEBUG_CONSTANT(
		MACHDBG_CODE(DBG_MACH_CHUD,
			     CHUD_TIMER_CALLBACK_CANCEL) | DBG_FUNC_NONE,
		0, 0, 0, 0, 0);

	// set to max value:
	chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
	chud_proc_info->cpu_timer_callback_fn = NULL;

	ml_set_interrupts_enabled(oldlevel);
 	return KERN_SUCCESS;
}
コード例 #10
0
ファイル: thread_call.c プロジェクト: CptFrazz/xnu
/*
 *	thread_call_wake:
 *
 *	Wake a call thread to service
 *	pending call entries.  May wake
 *	the daemon thread in order to
 *	create additional call threads.
 *
 *	Called with thread_call_lock held.
 *
 *	For high-priority group, only does wakeup/creation if there are no threads
 *	running.
 */
static __inline__ void
thread_call_wake(
	thread_call_group_t		group)
{
	/* 
	 * New behavior: use threads if you've got 'em.
	 * Traditional behavior: wake only if no threads running.
	 */
	if (group_isparallel(group) || group->active_count == 0) {
		if (wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_AWAKENED, -1) == KERN_SUCCESS) {
			group->idle_count--; group->active_count++;

			if (group->idle_count == 0) {
				timer_call_cancel(&group->dealloc_timer);
				group->flags &= TCG_DEALLOC_ACTIVE;
			}
		} else {
			if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) {
				thread_call_daemon_awake = TRUE;
				wait_queue_wakeup_one(&daemon_wqueue, NO_EVENT, THREAD_AWAKENED, -1);
			}
		}
	}
}
コード例 #11
0
ファイル: syscall_subr.c プロジェクト: JackieXie168/xnu
/*
 *	Prematurely abort priority depression if there is one.
 */
kern_return_t
thread_depress_abort_internal(
	thread_t				thread)
{
    kern_return_t 			result = KERN_NOT_DEPRESSED;
    spl_t					s;

    s = splsched();
    thread_lock(thread);
	if (!(thread->sched_flags & TH_SFLAG_POLLDEPRESS)) {
		if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
			thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
			thread_recompute_sched_pri(thread, FALSE);
			result = KERN_SUCCESS;
		}

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}
	thread_unlock(thread);
    splx(s);

    return (result);
}
コード例 #12
0
ファイル: clock.c プロジェクト: Prajna/xnu
/*
 *	clock_adjtime:
 *
 *	Interface to adjtime() syscall.
 *
 *	Calculates adjustment variables and
 *	initiates adjustment.
 */
void
clock_adjtime(
	long		*secs,
	int			*microsecs)
{
	uint32_t	interval;
	spl_t		s;

	s = splclock();
	clock_lock();

	interval = calend_set_adjustment(secs, microsecs);
	if (interval != 0) {
		calend_adjdeadline = mach_absolute_time() + interval;
		if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_CRITICAL))
			calend_adjactive++;
	}
	else
	if (timer_call_cancel(&calend_adjcall))
		calend_adjactive--;

	clock_unlock();
	splx(s);
}
コード例 #13
0
ファイル: thread.c プロジェクト: CptFrazz/xnu
/*
 *	thread_terminate_self:
 */
void
thread_terminate_self(void)
{
	thread_t		thread = current_thread();

	task_t			task;
	spl_t			s;
	int threadcnt;

	DTRACE_PROC(lwp__exit);

	thread_mtx_lock(thread);

	ulock_release_all(thread);

	ipc_thread_disable(thread);
	
	thread_mtx_unlock(thread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel priority depression, wait for concurrent expirations
	 *	on other processors.
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
		thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}

	while (thread->depress_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	thread_sched_call(thread, NULL);

	thread_unlock(thread);
	splx(s);

	thread_policy_reset(thread);

#if CONFIG_EMBEDDED
	thead_remove_taskwatch(thread);
#endif /* CONFIG_EMBEDDED */

	task = thread->task;
	uthread_cleanup(task, thread->uthread, task->bsd_info);
	threadcnt = hw_atomic_sub(&task->active_thread_count, 1);

	/*
	 * If we are the last thread to terminate and the task is
	 * associated with a BSD process, perform BSD process exit.
	 */
	if (threadcnt == 0 && task->bsd_info != NULL)
		proc_exit(task->bsd_info);

	uthread_cred_free(thread->uthread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel wait timer, and wait for
	 *	concurrent expirations.
	 */
	if (thread->wait_timer_is_set) {
		thread->wait_timer_is_set = FALSE;

		if (timer_call_cancel(&thread->wait_timer))
			thread->wait_timer_active--;
	}

	while (thread->wait_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	/*
	 *	If there is a reserved stack, release it.
	 */
	if (thread->reserved_stack != 0) {
		stack_free_reserved(thread);
		thread->reserved_stack = 0;
	}

	/*
	 *	Mark thread as terminating, and block.
	 */
	thread->state |= TH_TERMINATE;
	thread_mark_wait_locked(thread, THREAD_UNINT);
	thread_unlock(thread);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_continue);
	/*NOTREACHED*/
}
コード例 #14
0
ファイル: thread.c プロジェクト: MACasuba/MACasuba-Utils-git
/*
 *	thread_terminate_self:
 */
void
thread_terminate_self(void)
{
	thread_t		thread = current_thread();
	task_t			task;
	spl_t			s;
	int lastthread = 0;

	thread_mtx_lock(thread);

	ulock_release_all(thread);

	ipc_thread_disable(thread);
	
	thread_mtx_unlock(thread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel priority depression, wait for concurrent expirations
	 *	on other processors.
	 */
	if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
		thread->sched_mode &= ~TH_MODE_ISDEPRESSED;

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}

	while (thread->depress_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	thread_unlock(thread);
	splx(s);

	thread_policy_reset(thread);

	/*
	 * If we are the last thread to terminate and the task is
	 * associated with a BSD process, perform BSD process exit.
	 */
	task = thread->task;
	uthread_cleanup(task, thread->uthread, task->bsd_info);
	if (hw_atomic_sub(&task->active_thread_count, 1) == 0	&&
					task->bsd_info != NULL) {
		lastthread = 1;
	} 
	
	if (lastthread != 0)
		proc_exit(task->bsd_info);

	uthread_cred_free(thread->uthread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel wait timer, and wait for
	 *	concurrent expirations.
	 */
	if (thread->wait_timer_is_set) {
		thread->wait_timer_is_set = FALSE;

		if (timer_call_cancel(&thread->wait_timer))
			thread->wait_timer_active--;
	}

	while (thread->wait_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	/*
	 *	If there is a reserved stack, release it.
	 */
	if (thread->reserved_stack != 0) {
		if (thread->reserved_stack != thread->kernel_stack)
			stack_free_stack(thread->reserved_stack);
		thread->reserved_stack = 0;
	}

	/*
	 *	Mark thread as terminating, and block.
	 */
	thread->state |= TH_TERMINATE;
	thread_mark_wait_locked(thread, THREAD_UNINT);
	assert(thread->promotions == 0);
	thread_unlock(thread);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_continue);
	/*NOTREACHED*/
}