Example #1
0
/*
 *	thread_call_daemon:
 */
static void
thread_call_daemon_continue(
	thread_call_group_t		group)
{
	kern_return_t	result;
	thread_t		thread;

    (void) splsched();
    thread_call_lock_spin();
        
	while (group->active_count == 0	&& group->pending_count > 0) {
		group->active_count++;

		thread_call_unlock();
		(void) spllo();
	
		result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
		if (result != KERN_SUCCESS)
			panic("thread_call_daemon");

		thread_deallocate(thread);

		(void) splsched();
		thread_call_lock_spin();
    }

    thread_call_daemon_awake = FALSE;
    wait_queue_assert_wait(&group->daemon_wqueue, NO_EVENT, THREAD_UNINT, 0);
    
    thread_call_unlock();
	(void) spllo();
    
	thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
	/* NOTREACHED */
}
Example #2
0
/*
 * Wait for all requested invocations of a thread call prior to now
 * to finish.  Can only be invoked on thread calls whose storage we manage.  
 * Just waits for the finish count to catch up to the submit count we find
 * at the beginning of our wait.
 */
static void
thread_call_wait_locked(thread_call_t call)
{
	uint64_t submit_count;
	wait_result_t res;

	assert(call->tc_flags & THREAD_CALL_ALLOC);

	submit_count = call->tc_submit_count;

	while (call->tc_finish_count < submit_count) {
		call->tc_flags |= THREAD_CALL_WAIT;

		res = assert_wait(call, THREAD_UNINT);
		if (res != THREAD_WAITING) {
			panic("Unable to assert wait?");
		}

		thread_call_unlock();
		(void) spllo();

		res = thread_block(NULL);
		if (res != THREAD_AWAKENED) {
			panic("Awoken with %d?", res);
		}
	
		(void) splsched();
		thread_call_lock_spin();
	}
}
Example #3
0
/* 
 * Interrupts disabled, lock held; returns the same way. 
 * Only called on thread calls whose storage we own.  Wakes up
 * anyone who might be waiting on this work item and frees it
 * if the client has so requested.
 */
static void
thread_call_finish(thread_call_t call)
{
	boolean_t dowake = FALSE;

	call->tc_finish_count++;
	call->tc_refs--;

	if ((call->tc_flags & THREAD_CALL_WAIT) != 0) {
		dowake = TRUE;
		call->tc_flags &= ~THREAD_CALL_WAIT;

		/* 
		 * Dropping lock here because the sched call for the 
		 * high-pri group can take the big lock from under
		 * a thread lock.
		 */
		thread_call_unlock();
		thread_wakeup((event_t)call);
		thread_call_lock_spin();
	}

	if (call->tc_refs == 0) {
		if (dowake) {
			panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func);
		}

		enable_ints_and_unlock();

		zfree(thread_call_zone, call);

		(void)disable_ints_and_lock();
	}

}
Example #4
0
/*
 *	sched_call_thread:
 *
 *	Call out invoked by the scheduler.  Used only for high-priority
 *	thread call group.
 */
static void
sched_call_thread(
		int				type,
		__unused	thread_t		thread)
{
	thread_call_group_t		group;

	group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; /* XXX */

	thread_call_lock_spin();

	switch (type) {

		case SCHED_CALL_BLOCK:
			--group->active_count;
			if (group->pending_count > 0)
				thread_call_wake(group);
			break;

		case SCHED_CALL_UNBLOCK:
			group->active_count++;
			break;
	}

	thread_call_unlock();
}
Example #5
0
/*
 *	thread_call_func:
 *
 *	Enqueue a function callout.
 *
 *	Guarantees { function, argument }
 *	uniqueness if unique_call is TRUE.
 */
void
thread_call_func(
    thread_call_func_t		func,
    thread_call_param_t		param,
    boolean_t				unique_call)
{
	thread_call_t		call;
	thread_call_group_t	group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
	spl_t			s;

	s = splsched();
	thread_call_lock_spin();

	call = TC(queue_first(&group->pending_queue));

	while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
		if (call->tc_call.func == func && call->tc_call.param0 == param) {
			break;
		}

		call = TC(queue_next(qe(call)));
	}

	if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
		call = _internal_call_allocate();
		call->tc_call.func	= func;
		call->tc_call.param0	= param;
		call->tc_call.param1	= NULL;

		_pending_call_enqueue(call, group);
	}

	thread_call_unlock();
	splx(s);
}
Example #6
0
/*
 *	thread_call_func_delayed:
 *
 *	Enqueue a function callout to
 *	occur at the stated time.
 */
void
thread_call_func_delayed(
		thread_call_func_t		func,
		thread_call_param_t		param,
		uint64_t			deadline)
{
	thread_call_t		call;
	thread_call_group_t	group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
	spl_t			s;

	s = splsched();
	thread_call_lock_spin();

	call = _internal_call_allocate();
	call->tc_call.func	= func;
	call->tc_call.param0	= param;
	call->tc_call.param1	= 0;

	_delayed_call_enqueue(call, group, deadline);

	if (queue_first(&group->delayed_queue) == qe(call))
		_set_delayed_call_timer(call, group);

	thread_call_unlock();
	splx(s);
}
Example #7
0
boolean_t
thread_call_enter1(
		thread_call_t			call,
		thread_call_param_t		param1)
{
	boolean_t		result = TRUE;
	thread_call_group_t	group;
	spl_t			s;

	group = thread_call_get_group(call);

	s = splsched();
	thread_call_lock_spin();

	if (call->tc_call.queue != &group->pending_queue) {
		result = _pending_call_enqueue(call, group);
	}

	call->tc_call.param1 = param1;

	thread_call_unlock();
	splx(s);

	return (result);
}
Example #8
0
/*
 *	thread_call_free:
 *
 *	Release a callout.  If the callout is currently
 *	executing, it will be freed when all invocations
 *	finish.
 */
boolean_t
thread_call_free(
		thread_call_t		call)
{
	spl_t	s;
	int32_t refs;

	s = splsched();
	thread_call_lock_spin();

	if (call->tc_call.queue != NULL) {
		thread_call_unlock();
		splx(s);

		return (FALSE);
	}

	refs = --call->tc_refs;
	if (refs < 0) {
		panic("Refcount negative: %d\n", refs);
	}	

	thread_call_unlock();
	splx(s);

	if (refs == 0) {
		zfree(thread_call_zone, call);
	}

	return (TRUE);
}
Example #9
0
boolean_t
thread_call_enter1_delayed(
		thread_call_t			call,
		thread_call_param_t		param1,
		uint64_t			deadline)
{
	boolean_t		result = TRUE;
	thread_call_group_t	group;
	spl_t			s;

	group = thread_call_get_group(call);

	s = splsched();
	thread_call_lock_spin();

	result = _delayed_call_enqueue(call, group, deadline);

	if (queue_first(&group->delayed_queue) == qe(call))
		_set_delayed_call_timer(call, group);

	call->tc_call.param1 = param1;

	thread_call_unlock();
	splx(s);

	return (result);
}
Example #10
0
boolean_t
thread_call_enter1(
    thread_call_t			call,
    thread_call_param_t		param1)
{
	boolean_t				result = TRUE;
	thread_call_group_t		group = &thread_call_group0;
	spl_t					s;
    
	s = splsched();
	thread_call_lock_spin();
    
    if (call->queue != &group->pending_queue) {
    	result = _pending_call_enqueue(call, group);
		
		if (group->active_count == 0)
			thread_call_wake(group);
	}

	call->param1 = param1;

	thread_call_unlock();
	splx(s);

	return (result);
}
Example #11
0
/*
 * Cancel a thread call.  If it cannot be cancelled (i.e.
 * is already in flight), waits for the most recent invocation
 * to finish.  Note that if clients re-submit this thread call,
 * it may still be pending or in flight when thread_call_cancel_wait
 * returns, but all requests to execute this work item prior
 * to the call to thread_call_cancel_wait will have finished.
 */
boolean_t
thread_call_cancel_wait(
		thread_call_t		call)
{
	boolean_t		result;
	thread_call_group_t	group;

	if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) {
		panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__);
	}

	group = thread_call_get_group(call);

	(void) splsched();
	thread_call_lock_spin();

	result = _call_dequeue(call, group);
	if (result == FALSE) {
		thread_call_wait_locked(call);
	}

	thread_call_unlock();
	(void) spllo();

	return result;
}
Example #12
0
void
thread_call_delayed_timer(
		timer_call_param_t		p0,
		__unused timer_call_param_t	p1
)
{
	thread_call_t			call;
	thread_call_group_t		group = p0;
	uint64_t				timestamp;

	thread_call_lock_spin();

	timestamp = mach_absolute_time();

	call = TC(queue_first(&group->delayed_queue));

	while (!queue_end(&group->delayed_queue, qe(call))) {
		if (call->tc_call.deadline <= timestamp) {
			_pending_call_enqueue(call, group);
		}
		else
			break;

		call = TC(queue_first(&group->delayed_queue));
	}

	if (!queue_end(&group->delayed_queue, qe(call)))
		_set_delayed_call_timer(call, group);

	thread_call_unlock();
}
Example #13
0
/*
 *	thread_call_is_delayed:
 *
 *	Returns TRUE if the call is
 *	currently on a delayed queue.
 *
 *	Optionally returns the expiration time.
 */
boolean_t
thread_call_is_delayed(
	thread_call_t		call,
	uint64_t			*deadline)
{
	boolean_t			result = FALSE;
	thread_call_group_t		group;
	spl_t				s;

	group = thread_call_get_group(call);

	s = splsched();
	thread_call_lock_spin();

	if (call->tc_call.queue == &group->delayed_queue) {
		if (deadline != NULL)
			*deadline = call->tc_call.deadline;
		result = TRUE;
	}

	thread_call_unlock();
	splx(s);

	return (result);
}
Example #14
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Example #15
0
static inline spl_t
disable_ints_and_lock(void)
{
	spl_t s;

	s = splsched();
	thread_call_lock_spin();

	return s;
}
Example #16
0
/*
 * Timer callback to tell a thread to terminate if
 * we have an excess of threads and at least one has been
 * idle for a long time.
 */
static void
thread_call_dealloc_timer(
		timer_call_param_t 		p0,
		__unused timer_call_param_t 	p1)
{
	thread_call_group_t group = (thread_call_group_t)p0;
	uint64_t now;
	kern_return_t res;
	boolean_t terminated = FALSE;
	
	thread_call_lock_spin();

	now = mach_absolute_time();
	if (group->idle_count > 0) {
		if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) {
			terminated = TRUE;
			group->idle_count--;
			res = wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTED, -1);
			if (res != KERN_SUCCESS) {
				panic("Unable to wake up idle thread for termination?");
			}
		}

	}

	/*
	 * If we still have an excess of threads, schedule another
	 * invocation of this function.
	 */
	if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) {
		/*
		 * If we killed someone just now, push out the
		 * next deadline.
		 */
		if (terminated) {
			group->idle_timestamp = now;
		}

		thread_call_start_deallocate_timer(group);
	} else {
		group->flags &= ~TCG_DEALLOC_ACTIVE;
	}

	thread_call_unlock();
}
Example #17
0
/*
 *	thread_call_cancel:
 *
 *	Dequeue a callout entry.
 *
 *	Returns TRUE if the call was
 *	on a queue.
 */
boolean_t
thread_call_cancel(
    thread_call_t		call)
{
	boolean_t				result;
	thread_call_group_t		group = &thread_call_group0;
	spl_t					s;
    
	s = splsched();
	thread_call_lock_spin();

	result = _call_dequeue(call, group);
	
	thread_call_unlock();
	splx(s);

	return (result);
}
Example #18
0
/*
 *	thread_call_func:
 *
 *	Enqueue a function callout.
 *
 *	Guarantees { function, argument }
 *	uniqueness if unique_call is TRUE.
 */
void
thread_call_func(
    thread_call_func_t		func,
    thread_call_param_t		param,
    boolean_t				unique_call)
{
    thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
    spl_t					s;
    
    s = splsched();
    thread_call_lock_spin();
    
    call = TC(queue_first(&group->pending_queue));
    
	while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
    	if (	call->func == func			&&
				call->param0 == param			) {
			break;
		}
	
		call = TC(queue_next(qe(call)));
    }
    
    if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
		call = _internal_call_allocate();
		call->func			= func;
		call->param0		= param;
		call->param1		= NULL;
	
		_pending_call_enqueue(call, group);
		
		if (group->active_count == 0)
			thread_call_wake(group);
    }

    thread_call_unlock();
    splx(s);
}
Example #19
0
/*
 *	thread_call_free:
 *
 *	Free a callout entry.
 */
boolean_t
thread_call_free(
    thread_call_t		call)
{
    spl_t		s;
    
    s = splsched();
    thread_call_lock_spin();
    
    if (call->queue != NULL) {
	    thread_call_unlock();
	    splx(s);

	    return (FALSE);
    }
    
    thread_call_unlock();
    splx(s);
    
	zfree(thread_call_zone, call);

	return (TRUE);
}
Example #20
0
/*
 *	sched_call_thread:
 *
 *	Call out invoked by the scheduler.
 */
static void
sched_call_thread(
	int				type,
__unused	thread_t		thread)
{
	thread_call_group_t		group = &thread_call_group0;

	thread_call_lock_spin();

	switch (type) {

	case SCHED_CALL_BLOCK:
		if (--group->active_count == 0 && group->pending_count > 0)
			thread_call_wake(group);
		break;

	case SCHED_CALL_UNBLOCK:
		group->active_count++;
		break;
	}

	thread_call_unlock();
}
Example #21
0
/*
 *	thread_call_enter_delayed:
 *
 *	Enqueue a callout entry to occur
 *	at the stated time.
 *
 *	Returns TRUE if the call was
 *	already on a queue.
 */
boolean_t
thread_call_enter_delayed(
    thread_call_t		call,
    uint64_t			deadline)
{
	boolean_t				result = TRUE;
	thread_call_group_t		group = &thread_call_group0;
	spl_t					s;

	s = splsched();
	thread_call_lock_spin();

	result = _delayed_call_enqueue(call, group, deadline);

	if (queue_first(&group->delayed_queue) == qe(call))
		_set_delayed_call_timer(call, group);

	call->param1 = 0;

	thread_call_unlock();
	splx(s);

	return (result);
}
Example #22
0
/*
 *	thread_call_func_cancel:
 *
 *	Dequeue a function callout.
 *
 *	Removes one (or all) { function, argument }
 *	instance(s) from either (or both)
 *	the pending and	the delayed queue,
 *	in that order.
 *
 *	Returns TRUE if any calls were cancelled.
 */
boolean_t
thread_call_func_cancel(
		thread_call_func_t		func,
		thread_call_param_t		param,
		boolean_t			cancel_all)
{
	boolean_t	result;
	spl_t		s;

	s = splsched();
	thread_call_lock_spin();

	if (cancel_all)
		result = _remove_from_pending_queue(func, param, cancel_all) |
			_remove_from_delayed_queue(func, param, cancel_all);
	else
		result = _remove_from_pending_queue(func, param, cancel_all) ||
			_remove_from_delayed_queue(func, param, cancel_all);

	thread_call_unlock();
	splx(s);

	return (result);
}
Example #23
0
void
thread_call_delayed_timer(
	timer_call_param_t				p0,
	__unused timer_call_param_t		p1
)
{
    thread_call_t			call;
	thread_call_group_t		group = p0;
	boolean_t				new_pending = FALSE;
	uint64_t				timestamp;

	thread_call_lock_spin();

	timestamp = mach_absolute_time();
    
    call = TC(queue_first(&group->delayed_queue));
    
    while (!queue_end(&group->delayed_queue, qe(call))) {
    	if (call->deadline <= timestamp) {
			_pending_call_enqueue(call, group);
			new_pending = TRUE;
		}
		else
			break;
	    
		call = TC(queue_first(&group->delayed_queue));
    }

	if (!queue_end(&group->delayed_queue, qe(call)))
		_set_delayed_call_timer(call, group);

    if (new_pending && group->active_count == 0)
		thread_call_wake(group);

    thread_call_unlock();
}
Example #24
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
	thread_call_group_t		group)
{
	thread_t		self = current_thread();

	(void) splsched();
	thread_call_lock_spin();

	thread_sched_call(self, sched_call_thread);

    while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->func;
		param0 = call->param0;
		param1 = call->param1;
	
		call->queue = NULL;

		_internal_call_release(call);

		thread_call_unlock();
		(void) spllo();

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				func, param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
				  pl, func, param0, param1);
		}
		
		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) splsched();
		thread_call_lock_spin();
    }

	thread_sched_call(self, NULL);
	group->active_count--;

    if (group->idle_count < thread_call_thread_min) {
		group->idle_count++;

		wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0);
	
		thread_call_unlock();
		(void) spllo();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
		/* NOTREACHED */
    }

    thread_call_unlock();
    (void) spllo();
    
    thread_terminate(self);
	/* NOTREACHED */
}