Example #1
0
static void
thread_switch_enable_workqueue_sched_callback(void)
{
	sched_call_t callback = workqueue_get_sched_callback();
	thread_t self = current_thread();
	spl_t s = splsched();
	thread_lock(self);
	thread_sched_call(self, callback);
	thread_unlock(self);
	splx(s);
}
Example #2
0
static int
thread_switch_disable_workqueue_sched_callback(void)
{
	sched_call_t callback = workqueue_get_sched_callback();
	thread_t self = current_thread();
	if (!callback || self->sched_call != callback) {
		return FALSE;
	}
	spl_t s = splsched();
	thread_lock(self);
	thread_sched_call(self, NULL);
	thread_unlock(self);
	splx(s);
	return TRUE;
}
Example #3
0
/*
 *	thread_terminate_self:
 */
void
thread_terminate_self(void)
{
	thread_t		thread = current_thread();

	task_t			task;
	spl_t			s;
	int threadcnt;

	DTRACE_PROC(lwp__exit);

	thread_mtx_lock(thread);

	ulock_release_all(thread);

	ipc_thread_disable(thread);
	
	thread_mtx_unlock(thread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel priority depression, wait for concurrent expirations
	 *	on other processors.
	 */
	if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
		thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;

		if (timer_call_cancel(&thread->depress_timer))
			thread->depress_timer_active--;
	}

	while (thread->depress_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	thread_sched_call(thread, NULL);

	thread_unlock(thread);
	splx(s);

	thread_policy_reset(thread);

#if CONFIG_EMBEDDED
	thead_remove_taskwatch(thread);
#endif /* CONFIG_EMBEDDED */

	task = thread->task;
	uthread_cleanup(task, thread->uthread, task->bsd_info);
	threadcnt = hw_atomic_sub(&task->active_thread_count, 1);

	/*
	 * If we are the last thread to terminate and the task is
	 * associated with a BSD process, perform BSD process exit.
	 */
	if (threadcnt == 0 && task->bsd_info != NULL)
		proc_exit(task->bsd_info);

	uthread_cred_free(thread->uthread);

	s = splsched();
	thread_lock(thread);

	/*
	 *	Cancel wait timer, and wait for
	 *	concurrent expirations.
	 */
	if (thread->wait_timer_is_set) {
		thread->wait_timer_is_set = FALSE;

		if (timer_call_cancel(&thread->wait_timer))
			thread->wait_timer_active--;
	}

	while (thread->wait_timer_active > 0) {
		thread_unlock(thread);
		splx(s);

		delay(1);

		s = splsched();
		thread_lock(thread);
	}

	/*
	 *	If there is a reserved stack, release it.
	 */
	if (thread->reserved_stack != 0) {
		stack_free_reserved(thread);
		thread->reserved_stack = 0;
	}

	/*
	 *	Mark thread as terminating, and block.
	 */
	thread->state |= TH_TERMINATE;
	thread_mark_wait_locked(thread, THREAD_UNINT);
	thread_unlock(thread);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_continue);
	/*NOTREACHED*/
}
Example #4
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
		thread_call_group_t		group,
		wait_result_t			wres)
{
	thread_t	self = current_thread();
	boolean_t	canwait;

	/*
	 * A wakeup with THREAD_INTERRUPTED indicates that 
	 * we should terminate.
	 */
	if (wres == THREAD_INTERRUPTED) {
		thread_terminate(self);

		/* NOTREACHED */
		panic("thread_terminate() returned?");
	}

	(void)disable_ints_and_lock();

	thread_sched_call(self, group->sched_call);

	while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->tc_call.func;
		param0 = call->tc_call.param0;
		param1 = call->tc_call.param1;

		call->tc_call.queue = NULL;

		_internal_call_release(call);

		/*
		 * Can only do wakeups for thread calls whose storage
		 * we control.
		 */
		if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
			canwait = TRUE;
			call->tc_refs++;	/* Delay free until we're done */
		} else
			canwait = FALSE;

		enable_ints_and_unlock();

		KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
					pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
		}

		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) disable_ints_and_lock();
		
		if (canwait) {
			/* Frees if so desired */
			thread_call_finish(call);
		}
	}

	thread_sched_call(self, NULL);
	group->active_count--;

	if (group_isparallel(group)) {
		/*
		 * For new style of thread group, thread always blocks. 
		 * If we have more than the target number of threads,
		 * and this is the first to block, and it isn't active 
		 * already, set a timer for deallocating a thread if we 
		 * continue to have a surplus.
		 */
		group->idle_count++;

		if (group->idle_count == 1) {
			group->idle_timestamp = mach_absolute_time();
		}   

		if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) &&
				((group->active_count + group->idle_count) > group->target_thread_count)) {
			group->flags |= TCG_DEALLOC_ACTIVE;
			thread_call_start_deallocate_timer(group);
		}   

		/* Wait for more work (or termination) */
		wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); 
		if (wres != THREAD_WAITING) {
			panic("kcall worker unable to assert wait?");
		}   

		enable_ints_and_unlock();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
	} else {
		if (group->idle_count < group->target_thread_count) {
			group->idle_count++;

			wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */

			enable_ints_and_unlock();

			thread_block_parameter((thread_continue_t)thread_call_thread, group);
			/* NOTREACHED */
		}
	}

	enable_ints_and_unlock();

	thread_terminate(self);
	/* NOTREACHED */
}
Example #5
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
	thread_call_group_t		group)
{
	thread_t		self = current_thread();

	(void) splsched();
	thread_call_lock_spin();

	thread_sched_call(self, sched_call_thread);

    while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->func;
		param0 = call->param0;
		param1 = call->param1;
	
		call->queue = NULL;

		_internal_call_release(call);

		thread_call_unlock();
		(void) spllo();

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				func, param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
				  pl, func, param0, param1);
		}
		
		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) splsched();
		thread_call_lock_spin();
    }

	thread_sched_call(self, NULL);
	group->active_count--;

    if (group->idle_count < thread_call_thread_min) {
		group->idle_count++;

		wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0);
	
		thread_call_unlock();
		(void) spllo();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
		/* NOTREACHED */
    }

    thread_call_unlock();
    (void) spllo();
    
    thread_terminate(self);
	/* NOTREACHED */
}