Beispiel #1
0
kern_return_t
thread_get_special_port(
	thread_act_t	thr_act,
	int		which,
	ipc_port_t	*portp)
{
	ipc_port_t	*whichp;
	ipc_port_t	port;
	thread_t	thread;

	if (!thr_act)
		return KERN_INVALID_ARGUMENT;
 	thread = act_lock_thread(thr_act);
	switch (which) {
		case THREAD_KERNEL_PORT:
			whichp = &thr_act->ith_sself;
			break;

		default:
			act_unlock_thread(thr_act);
			return KERN_INVALID_ARGUMENT;
	}

	if (!thr_act->active) {
		act_unlock_thread(thr_act);
		return KERN_FAILURE;
	}

	port = ipc_port_copy_send(*whichp);
	act_unlock_thread(thr_act);

	*portp = port;
	return KERN_SUCCESS;
}
Beispiel #2
0
kern_return_t
thread_abort_safely(
	thread_act_t	act)
{
	thread_t		thread;
	kern_return_t	ret;
	spl_t			s;

	if (	act == THR_ACT_NULL )
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	s = splsched();
	thread_lock(thread);
	if (!thread->at_safe_point ||
		clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
		if (!(thread->state & TH_ABORT)) {
			thread->state |= (TH_ABORT|TH_ABORT_SAFELY);
			install_special_handler_locked(act);
		}
	}
	thread_unlock(thread);
	splx(s);
		
	act_unlock_thread(act);

	return (KERN_SUCCESS);
}
Beispiel #3
0
kern_return_t
thread_info(
	thread_act_t			thr_act,
	thread_flavor_t			flavor,
	thread_info_t			thread_info_out,
	mach_msg_type_number_t	*thread_info_count)
{
	register thread_t		thread;
	kern_return_t			result;

	if (thr_act == THR_ACT_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(thr_act);
	if (!thr_act->active) {
		act_unlock_thread(thr_act);

		return (KERN_TERMINATED);
	}

	result = thread_info_shuttle(thr_act, flavor,
					thread_info_out, thread_info_count);

	act_unlock_thread(thr_act);

	return (result);
}
Beispiel #4
0
/*
 *	Change thread's machine-dependent state.  Called with nothing
 *	locked.  Returns same way.
 */
kern_return_t
thread_set_state(
	register thread_act_t	act,
	int						flavor,
	thread_state_t			state,
	mach_msg_type_number_t	state_count)
{
	kern_return_t		result = KERN_SUCCESS;
	thread_t			thread;

	if (act == THR_ACT_NULL || act == current_act())
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	thread_hold(act);

	for (;;) {
		thread_t			thread1;

		if (	thread == THREAD_NULL		||
				thread->top_act != act		)
			break;
		act_unlock_thread(act);

		if (!thread_stop(thread)) {
			result = KERN_ABORTED;
			(void)act_lock_thread(act);
			thread = THREAD_NULL;
			break;
		}

		thread1 = act_lock_thread(act);
		if (thread1 == thread)
			break;

		thread_unstop(thread);
		thread = thread1;
	}

	if (result == KERN_SUCCESS)
		result = machine_thread_set_state(act, flavor, state, state_count);

	if (	thread != THREAD_NULL		&&
			thread->top_act == act		)
	    thread_unstop(thread);

	thread_release(act);
	act_unlock_thread(act);

	return (result);
}
Beispiel #5
0
kern_return_t
thread_dup(
	register thread_act_t	target)
{
	kern_return_t		result = KERN_SUCCESS;
	thread_act_t		self = current_act();
	thread_t			thread;

	if (target == THR_ACT_NULL || target == self)
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(target);

	if (!target->active) {
		act_unlock_thread(target);
		return (KERN_TERMINATED);
	}

	thread_hold(target);

	for (;;) {
		thread_t			thread1;

		if (	thread == THREAD_NULL		||
				thread->top_act != target	)
			break;
		act_unlock_thread(target);

		if (!thread_stop(thread)) {
			result = KERN_ABORTED;
			(void)act_lock_thread(target);
			thread = THREAD_NULL;
			break;
		}

		thread1 = act_lock_thread(target);
		if (thread1 == thread)
			break;

		thread_unstop(thread);
		thread = thread1;
	}

	if (result == KERN_SUCCESS)
		result = machine_thread_dup(self, target);

	if (	thread != THREAD_NULL		&&
			thread->top_act == target	)
	    thread_unstop(thread);

	thread_release(target);
	act_unlock_thread(target);

	return (result);
}
Beispiel #6
0
kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
{

        thread_act_t inc;
        thread_act_t ninc;
        thread_act_t thr_act;
		thread_t	th;
		int found=0;

	task_lock(task);
	if (!task->active) {
		task_unlock(task);
		return(KERN_FAILURE);
	}

        thr_act = THR_ACT_NULL;
        for (inc  = (thread_act_t)queue_first(&task->threads);
			 !queue_end(&task->threads, (queue_entry_t)inc);
             inc  = ninc) {

				if (inc != thact) {
                	ninc = (thread_act_t)queue_next(&inc->task_threads);
						continue;
				}
                th = act_lock_thread(inc);
                if ((inc->active)  && 
                    ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
					found = 1;
                    thr_act = inc;
                   break;
                }
                act_unlock_thread(inc);
                /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
				break;
        }
out:
		if (found) {
            if (setast)
				act_set_astbsd(thr_act);

           act_unlock_thread(thr_act);
        }
		task_unlock(task);

        if (found) 
            return(KERN_SUCCESS);
        else 
            return(KERN_FAILURE);
}
Beispiel #7
0
/*
 *	thread_getstatus:
 *
 *	Get the status of the specified thread.
 */
kern_return_t
thread_getstatus(
	register thread_act_t	act,
	int						flavor,
	thread_state_t			tstate,
	mach_msg_type_number_t	*count)
{
	kern_return_t		result = KERN_SUCCESS;
	thread_t			thread;

	thread = act_lock_thread(act);

	if (	act != current_act()			&&
			(act->suspend_count == 0	||
			 thread == THREAD_NULL		||
			 (thread->state & TH_RUN)	||
			 thread->top_act != act)		)
		result = KERN_FAILURE;

	if (result == KERN_SUCCESS)
		result = machine_thread_get_state(act, flavor, tstate, count);

	act_unlock_thread(act);

	return (result);
}
Beispiel #8
0
kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
{

        thread_act_t inc;
        thread_act_t ninc;
        thread_act_t thr_act;
	thread_t	th;

	task_lock(task);
	if (!task->active) {
		task_unlock(task);
		return(KERN_FAILURE);
	}

        thr_act = THR_ACT_NULL;
        for (inc  = (thread_act_t)queue_first(&task->threads);
			 !queue_end(&task->threads, (queue_entry_t)inc);
             inc  = ninc) {
                th = act_lock_thread(inc);
                if ((inc->active)  && 
                    ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
                    thr_act = inc;
                   break;
                }
                act_unlock_thread(inc);
                ninc = (thread_act_t)queue_next(&inc->task_threads);
        }
out:
        if (thact) 
                *thact = thr_act;
        if (thr_act) {
                if (setast)
                    act_set_astbsd(thr_act);

                act_unlock_thread(thr_act);
        }
	task_unlock(task);

        if (thr_act) 
            return(KERN_SUCCESS);
        else 
            return(KERN_FAILURE);
}
Beispiel #9
0
/*
 * Internal routine to terminate a thread.
 * Sometimes called with task already locked.
 */
kern_return_t
thread_terminate_internal(
	register thread_act_t	act)
{
	kern_return_t	result;
	thread_t		thread;

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	act_disable(act);
	result = act_abort(act, FALSE);

	/* 
	 * Make sure this thread enters the kernel
	 * Must unlock the act, but leave the shuttle
	 * captured in this act.
	 */
	if (thread != current_thread()) {
		act_unlock(act);

		if (thread_stop(thread))
			thread_unstop(thread);
		else
			result = KERN_ABORTED;

		act_lock(act);
	}

	clear_wait(thread, act->started? THREAD_INTERRUPTED: THREAD_AWAKENED);
	act_unlock_thread(act);

	return (result);
}
Beispiel #10
0
/*
 * thread_stop_freeze
 *	Block the thread in the kernel and freeze the processor set.
 * return value:
 *	TRUE - the thread has blocked interruptibly, is stopped, and
 *		the processor set assignment is frozen
 *	FALSE - the thread is no longer in the processor set, so it
 *		isn't stopped, and the processor set assignment
 *		is released.
 */
int
thread_stop_freeze( thread_t thread, processor_set_t pset )
{
	thread_act_t	thr_act;
	spl_t	s;

	/*
	 * hold it, and wait for it to stop.
	 */
	thr_act = thread_lock_act(thread);
	thread_hold(thr_act);
	act_unlock_thread(thr_act);

	thread_stop(thread);

	s = splsched();
	wake_lock(thread);
        while( thread->state & (TH_RUN|TH_UNINT) ) {
                thread->wake_active = TRUE;
                assert_wait((event_t)&thread->wake_active, FALSE);
                wake_unlock(thread);
                splx(s);
                thread_block( (void (*)(void)) 0 );
                (void) splsched();
                wake_lock(thread);
        }

	/*
	 * Now, the thread has blocked uninterruptibly; freeze the 
	 * assignment and make sure it's still part of the processor set.
	 */
	wake_unlock(thread);
	thread_freeze(thread);
	thread_lock(thread);

	/*
	 * if the processor set has changed, release the freeze and
	 * then unstop it.
	 */
	if( thread->processor_set != pset ) {
		thread_unlock(thread);
		splx(s);
		thread_unfreeze(thread);
		thread_unstop(thread);
		return FALSE;
	}
	thread_unlock(thread);
	splx(s);
	return TRUE;
}
Beispiel #11
0
kern_return_t
thread_abort(
	register thread_act_t	act)
{
	kern_return_t	result;
	thread_t		thread;

	if (act == THR_ACT_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	result = act_abort(act, FALSE);
	clear_wait(thread, THREAD_INTERRUPTED);
	act_unlock_thread(act);

	return (result);
}
Beispiel #12
0
kern_return_t
thread_set_special_port(
	thread_act_t	thr_act,
	int		which,
	ipc_port_t	port)
{
	ipc_port_t	*whichp;
	ipc_port_t	old;
	thread_t	thread;

	if (thr_act == 0)
		return KERN_INVALID_ARGUMENT;

	thread = act_lock_thread(thr_act);
	switch (which) {
		case THREAD_KERNEL_PORT:
			whichp = &thr_act->ith_self;
			break;

		default:
			act_unlock_thread(thr_act);
			return KERN_INVALID_ARGUMENT;
	}

	if (!thr_act->active) {
		act_unlock_thread(thr_act);
		return KERN_FAILURE;
	}

	old = *whichp;
	*whichp = port;
	act_unlock_thread(thr_act);

	if (IP_VALID(old))
		ipc_port_release_send(old);
	return KERN_SUCCESS;
}
Beispiel #13
0
kern_return_t
thread_suspend(
	register thread_act_t	act)
{
	thread_t	thread;

	if (act == THR_ACT_NULL || act->task == kernel_task)
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	if (	act->user_stop_count++ == 0		&&
			act->suspend_count++ == 0		) {
		install_special_handler(act);
		if (	thread != current_thread()		&&
				thread != THREAD_NULL			&&
				thread->top_act == act			) {
			assert(act->started);
			thread_wakeup_one(&act->suspend_count);
			act_unlock_thread(act);

			thread_wait(thread);
		}
		else
			act_unlock_thread(act);
	}
	else
		act_unlock_thread(act);

	return (KERN_SUCCESS);
}
Beispiel #14
0
/*
 *	thread_depress_abort:
 *
 *	Prematurely abort priority depression if there is one.
 */
kern_return_t
thread_depress_abort(
	register thread_act_t	thr_act)
{
    register thread_t		thread;
	kern_return_t			result;

    if (thr_act == THR_ACT_NULL)
		return (KERN_INVALID_ARGUMENT);

    thread = act_lock_thread(thr_act);
    /* if activation is terminating, this operation is not meaningful */
    if (!thr_act->active) {
		act_unlock_thread(thr_act);

		return (KERN_TERMINATED);
    }

    result = _mk_sp_thread_depress_abort(thread, FALSE);

    act_unlock_thread(thr_act);

	return (result);
}
Beispiel #15
0
kern_return_t
thread_resume(
	register thread_act_t	act)
{
	kern_return_t	result = KERN_SUCCESS;
	thread_t		thread;

	if (act == THR_ACT_NULL || act->task == kernel_task)
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (act->active) {
		if (act->user_stop_count > 0) {
			if (	--act->user_stop_count == 0		&&
					--act->suspend_count == 0		&&
					thread != THREAD_NULL			&&
					thread->top_act == act			) {
				if (!act->started) {
					clear_wait(thread, THREAD_AWAKENED);
					act->started = TRUE;
				}
				else
					thread_wakeup_one(&act->suspend_count);
			}
		}
		else
			result = KERN_FAILURE;
	}
	else
		result = KERN_TERMINATED;

	act_unlock_thread(act);

	return (result);
}
Beispiel #16
0
kern_return_t
thread_policy_set(
    thread_act_t			act,
    thread_policy_flavor_t	flavor,
    thread_policy_t			policy_info,
    mach_msg_type_number_t	count)
{
    kern_return_t			result = KERN_SUCCESS;
    thread_t				thread;
    spl_t					s;

    if (act == THR_ACT_NULL)
        return (KERN_INVALID_ARGUMENT);

    thread = act_lock_thread(act);
    if (!act->active) {
        act_unlock_thread(act);

        return (KERN_TERMINATED);
    }

    assert(thread != THREAD_NULL);

    switch (flavor) {

    case THREAD_EXTENDED_POLICY:
    {
        boolean_t				timeshare = TRUE;

        if (count >= THREAD_EXTENDED_POLICY_COUNT) {
            thread_extended_policy_t	info;

            info = (thread_extended_policy_t)policy_info;
            timeshare = info->timeshare;
        }

        s = splsched();
        thread_lock(thread);

        if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
            integer_t	oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);

            thread->sched_mode &= ~TH_MODE_REALTIME;

            if (timeshare && !oldmode) {
                thread->sched_mode |= TH_MODE_TIMESHARE;

                if (thread->state & TH_RUN)
                    pset_share_incr(thread->processor_set);
            }
            else if (!timeshare && oldmode) {
                thread->sched_mode &= ~TH_MODE_TIMESHARE;

                if (thread->state & TH_RUN)
                    pset_share_decr(thread->processor_set);
            }

            thread_recompute_priority(thread);
        }
        else {
            thread->safe_mode &= ~TH_MODE_REALTIME;

            if (timeshare)
                thread->safe_mode |= TH_MODE_TIMESHARE;
            else
                thread->safe_mode &= ~TH_MODE_TIMESHARE;
        }

        thread_unlock(thread);
        splx(s);

        break;
    }

    case THREAD_TIME_CONSTRAINT_POLICY:
    {
        thread_time_constraint_policy_t		info;

        if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        info = (thread_time_constraint_policy_t)policy_info;
        if (	info->constraint < info->computation	||
                info->computation > max_rt_quantum		||
                info->computation < min_rt_quantum		) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        s = splsched();
        thread_lock(thread);

        thread->realtime.period = info->period;
        thread->realtime.computation = info->computation;
        thread->realtime.constraint = info->constraint;
        thread->realtime.preemptible = info->preemptible;

        if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
            if (thread->sched_mode & TH_MODE_TIMESHARE) {
                thread->sched_mode &= ~TH_MODE_TIMESHARE;

                if (thread->state & TH_RUN)
                    pset_share_decr(thread->processor_set);
            }
            thread->sched_mode |= TH_MODE_REALTIME;
            thread_recompute_priority(thread);
        }
        else {
            thread->safe_mode &= ~TH_MODE_TIMESHARE;
            thread->safe_mode |= TH_MODE_REALTIME;
        }

        thread_unlock(thread);
        splx(s);

        break;
    }

    case THREAD_PRECEDENCE_POLICY:
    {
        thread_precedence_policy_t		info;

        if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        info = (thread_precedence_policy_t)policy_info;

        s = splsched();
        thread_lock(thread);

        thread->importance = info->importance;

        thread_recompute_priority(thread);

        thread_unlock(thread);
        splx(s);

        break;
    }

    default:
        result = KERN_INVALID_ARGUMENT;
        break;
    }

    act_unlock_thread(act);

    return (result);
}
Beispiel #17
0
kern_return_t
thread_policy_get(
    thread_act_t			act,
    thread_policy_flavor_t	flavor,
    thread_policy_t			policy_info,
    mach_msg_type_number_t	*count,
    boolean_t				*get_default)
{
    kern_return_t			result = KERN_SUCCESS;
    thread_t				thread;
    spl_t					s;

    if (act == THR_ACT_NULL)
        return (KERN_INVALID_ARGUMENT);

    thread = act_lock_thread(act);
    if (!act->active) {
        act_unlock_thread(act);

        return (KERN_TERMINATED);
    }

    assert(thread != THREAD_NULL);

    switch (flavor) {

    case THREAD_EXTENDED_POLICY:
    {
        boolean_t		timeshare = TRUE;

        if (!(*get_default)) {
            s = splsched();
            thread_lock(thread);

            if (	!(thread->sched_mode & TH_MODE_REALTIME)	&&
                    !(thread->safe_mode & TH_MODE_REALTIME)			) {
                if (!(thread->sched_mode & TH_MODE_FAILSAFE))
                    timeshare = (thread->sched_mode & TH_MODE_TIMESHARE) != 0;
                else
                    timeshare = (thread->safe_mode & TH_MODE_TIMESHARE) != 0;
            }
            else
                *get_default = TRUE;

            thread_unlock(thread);
            splx(s);
        }

        if (*count >= THREAD_EXTENDED_POLICY_COUNT) {
            thread_extended_policy_t	info;

            info = (thread_extended_policy_t)policy_info;
            info->timeshare = timeshare;
        }

        break;
    }

    case THREAD_TIME_CONSTRAINT_POLICY:
    {
        thread_time_constraint_policy_t		info;

        if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        info = (thread_time_constraint_policy_t)policy_info;

        if (!(*get_default)) {
            s = splsched();
            thread_lock(thread);

            if (	(thread->sched_mode & TH_MODE_REALTIME)	||
                    (thread->safe_mode & TH_MODE_REALTIME)		) {
                info->period = thread->realtime.period;
                info->computation = thread->realtime.computation;
                info->constraint = thread->realtime.constraint;
                info->preemptible = thread->realtime.preemptible;
            }
            else
                *get_default = TRUE;

            thread_unlock(thread);
            splx(s);
        }

        if (*get_default) {
            info->period = 0;
            info->computation = std_quantum / 2;
            info->constraint = std_quantum;
            info->preemptible = TRUE;
        }

        break;
    }

    case THREAD_PRECEDENCE_POLICY:
    {
        thread_precedence_policy_t		info;

        if (*count < THREAD_PRECEDENCE_POLICY_COUNT) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        info = (thread_precedence_policy_t)policy_info;

        if (!(*get_default)) {
            s = splsched();
            thread_lock(thread);

            info->importance = thread->importance;

            thread_unlock(thread);
            splx(s);
        }
        else
            info->importance = 0;

        break;
    }

    default:
        result = KERN_INVALID_ARGUMENT;
        break;
    }

    act_unlock_thread(act);

    return (result);
}
Beispiel #18
0
/*
 *	task_swap_swapout_thread: [exported]
 *
 *	Executes as a separate kernel thread.
 *	Its job is to swap out threads that have been halted by AST_SWAPOUT.
 */
void
task_swap_swapout_thread(void)
{
	thread_act_t thr_act;
	thread_t thread, nthread;
	task_t task;
	int s;

	thread_swappable(current_act(), FALSE);
	stack_privilege(current_thread());

	spllo();

	while (TRUE) {
		task_swapper_lock();
		while (! queue_empty(&swapout_thread_q)) {

			queue_remove_first(&swapout_thread_q, thr_act,
					   thread_act_t, swap_queue);
			/*
			 * If we're racing with task_swapin, we need
			 * to make it safe for it to do remque on the
			 * thread, so make its links point to itself.
			 * Allowing this ugliness is cheaper than 
			 * making task_swapin search the entire queue.
			 */
			act_lock(thr_act);
			queue_init((queue_t) &thr_act->swap_queue);
			act_unlock(thr_act);
			task_swapper_unlock();
			/*
			 * Wait for thread's RUN bit to be deasserted.
			 */
			thread = act_lock_thread(thr_act);
			if (thread == THREAD_NULL)
				act_unlock_thread(thr_act);
			else {
				boolean_t r;

				thread_reference(thread);
				thread_hold(thr_act);
				act_unlock_thread(thr_act);
				r = thread_stop_wait(thread);
				nthread = act_lock_thread(thr_act);
				thread_release(thr_act);
				thread_deallocate(thread);
				act_unlock_thread(thr_act);
				if (!r || nthread != thread) {
					task_swapper_lock();
					continue;
				}
			}
			task = thr_act->task;
			task_lock(task);
			/* 
			 * we can race with swapin, which would set the
			 * state to TASK_SW_IN. 
			 */
			if ((task->swap_state != TASK_SW_OUT) &&
			    (task->swap_state != TASK_SW_GOING_OUT)) {
				task_unlock(task);
				task_swapper_lock();
				TASK_STATS_INCR(task_sw_race_in_won);
				if (thread != THREAD_NULL)
					thread_unstop(thread);
				continue;
			}
			nthread = act_lock_thread(thr_act);
			if (nthread != thread || thr_act->active == FALSE) {
				act_unlock_thread(thr_act);
				task_unlock(task);
				task_swapper_lock();
				TASK_STATS_INCR(task_sw_act_inactive);
				if (thread != THREAD_NULL)
					thread_unstop(thread);
				continue;
			}
			s = splsched();
			if (thread != THREAD_NULL)
				thread_lock(thread);
			/* 
			 * Thread cannot have been swapped out yet because
			 * TH_SW_TASK_SWAPPING was set in AST.  If task_swapin
			 * beat us here, we either wouldn't have found it on
			 * the queue, or the task->swap_state would have
			 * changed.  The synchronization is on the
			 * task's swap_state and the task_lock.
			 * The thread can't be swapped in any other way
			 * because its task has been swapped.
			 */
			assert(thr_act->swap_state & TH_SW_TASK_SWAPPING);
			assert(thread == THREAD_NULL ||
			       !(thread->state & (TH_SWAPPED_OUT|TH_RUN)));
			assert((thr_act->swap_state & TH_SW_STATE) == TH_SW_IN);
			/* assert(thread->state & TH_HALTED); */
			/* this also clears TH_SW_TASK_SWAPPING flag */
			thr_act->swap_state = TH_SW_GOING_OUT;
			if (thread != THREAD_NULL) {
				if (thread->top_act == thr_act) {
					thread->state |= TH_SWAPPED_OUT;
					/*
					 * Once we unlock the task, things can happen
					 * to the thread, so make sure it's consistent
					 * for thread_swapout.
					 */
				}
				thread->ref_count++;
				thread_unlock(thread);
				thread_unstop(thread);
			}
			splx(s);
			act_locked_act_reference(thr_act);
			act_unlock_thread(thr_act);
			task_unlock(task);

			thread_swapout(thr_act);	/* do the work */

			if (thread != THREAD_NULL)
				thread_deallocate(thread);
			act_deallocate(thr_act);
			task_swapper_lock();
		}
		assert_wait((event_t)&swapout_thread_q, FALSE);
		task_swapper_unlock();
		thread_block((void (*)(void)) 0);
	}
}
Beispiel #19
0
kern_return_t
task_swapin(task_t task, boolean_t make_unswappable)
{
	register queue_head_t	*list;
	register thread_act_t	thr_act, next;
	thread_t		thread;
	int			s;
	boolean_t		swappable = TRUE;

	task_lock(task);
	switch (task->swap_state) {
	    case TASK_SW_OUT:
			{
			vm_map_t map = task->map;
			/*
			 * Task has made it all the way out, which means
			 * that vm_map_res_deallocate has been done; set 
			 * state to TASK_SW_COMING_IN, then bring map
			 * back in.  We could actually be racing with
			 * the thread_swapout_enqueue, which does the
			 * vm_map_res_deallocate, but that race is covered.
			 */
			task->swap_state = TASK_SW_COMING_IN;
			assert(task->swap_ast_waiting == 0);
			assert(map->res_count >= 0);
			task_unlock(task);
			mutex_lock(&map->s_lock);
			vm_map_res_reference(map);
			mutex_unlock(&map->s_lock);
			task_lock(task);
			assert(task->swap_state == TASK_SW_COMING_IN);
			}
			break;

	    case TASK_SW_GOING_OUT:
			/*
			 * Task isn't all the way out yet.  There is
			 * still at least one thread not swapped, and
			 * vm_map_res_deallocate has not been done.
			 */
			task->swap_state = TASK_SW_COMING_IN;
			assert(task->swap_ast_waiting > 0 ||
			       (task->swap_ast_waiting == 0 &&
				task->thr_act_count == 0));
			assert(task->map->res_count > 0);
			TASK_STATS_INCR(task_sw_race_going_out);
			break;
	    case TASK_SW_IN:
			assert(task->map->res_count > 0);
#if	TASK_SW_DEBUG
			task_swapper_lock();
			if (task_swap_debug && on_swapped_list(task)) {
				printf("task 0x%X on list, state is SW_IN\n",
					task);
				Debugger("");
			}
			task_swapper_unlock();
#endif	/* TASK_SW_DEBUG */
			TASK_STATS_INCR(task_sw_race_in);
			if (make_unswappable) {
				task->swap_state = TASK_SW_UNSWAPPABLE;
				task_unlock(task);
				task_swapout_ineligible(task);
			} else
				task_unlock(task);
			return(KERN_SUCCESS);
	    case TASK_SW_COMING_IN:
			/* 
			 * Raced with another task_swapin and lost;
			 * wait for other one to complete first
			 */
			assert(task->map->res_count >= 0);
			/*
			 * set MAKE_UNSWAPPABLE so that whoever is swapping
			 * the task in will make it unswappable, and return
			 */
			if (make_unswappable)
				task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE;
			task->swap_flags |= TASK_SW_WANT_IN;
			assert_wait((event_t)&task->swap_state, FALSE);
			task_unlock(task);
			thread_block((void (*)(void)) 0);
			TASK_STATS_INCR(task_sw_race_coming_in);
			return(KERN_SUCCESS);
	    case TASK_SW_UNSWAPPABLE:
			/* 
			 * This can happen, since task_terminate 
			 * unconditionally calls task_swapin.
			 */
			task_unlock(task);
			return(KERN_SUCCESS);
	    default:
			panic("task_swapin bad state");
			break;
	}
	if (make_unswappable)
		task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE;
	assert(task->swap_state == TASK_SW_COMING_IN);
	task_swapper_lock();
#if	TASK_SW_DEBUG
	if (task_swap_debug && !on_swapped_list(task)) {
		printf("task 0x%X not on list\n", task);
		Debugger("");
	}
#endif	/* TASK_SW_DEBUG */
	queue_remove(&swapped_tasks, task, task_t, swapped_tasks);
	tasks_swapped_out--;
	task_swapins++;
	task_swapper_unlock();

	/*
	 * Iterate through all threads for this task and 
	 * release them, as required.  They may not have been swapped
	 * out yet.  The task remains locked throughout.
	 */
	list = &task->thr_acts;
	thr_act = (thread_act_t) queue_first(list);
	while (!queue_end(list, (queue_entry_t) thr_act)) {
		boolean_t need_to_release;
		next = (thread_act_t) queue_next(&thr_act->thr_acts);
		/*
		 * Keep task_swapper_lock across thread handling
		 * to synchronize with task_swap_swapout_thread
		 */
		task_swapper_lock();
		thread = act_lock_thread(thr_act);
		s = splsched();
		if (thr_act->ast & AST_SWAPOUT) {
			/* thread hasn't gotten the AST yet, just clear it */
			thread_ast_clear(thr_act, AST_SWAPOUT);
			need_to_release = FALSE;
			TASK_STATS_INCR(task_sw_before_ast);
			splx(s);
			act_unlock_thread(thr_act);
		} else {
			/*
			 * If AST_SWAPOUT was cleared, then thread_hold,
			 * or equivalent was done.
			 */
			need_to_release = TRUE;
			/*
			 * Thread has hit AST, but it may not have
			 * been dequeued yet, so we need to check.
			 * NOTE: the thread may have been dequeued, but
			 * has not yet been swapped (the task_swapper_lock
			 * has been dropped, but the thread is not yet
			 * locked), and the TH_SW_TASK_SWAPPING flag may 
			 * not have been cleared.  In this case, we will do 
			 * an extra remque, which the task_swap_swapout_thread
			 * has made safe, and clear the flag, which is also
			 * checked by the t_s_s_t before doing the swapout.
			 */
			if (thread)
				thread_lock(thread);
			if (thr_act->swap_state & TH_SW_TASK_SWAPPING) {
				/* 
				 * hasn't yet been dequeued for swapout,
				 * so clear flags and dequeue it first.
				 */
				thr_act->swap_state &= ~TH_SW_TASK_SWAPPING;
				assert(thr_act->thread == THREAD_NULL || 
				       !(thr_act->thread->state &
					 TH_SWAPPED_OUT));
				queue_remove(&swapout_thread_q, thr_act,
					     thread_act_t, swap_queue);
				TASK_STATS_INCR(task_sw_before_swap);
			} else {
				TASK_STATS_INCR(task_sw_after_swap);
				/*
				 * It's possible that the thread was
				 * made unswappable before hitting the
				 * AST, in which case it's still running.
				 */
				if (thr_act->swap_state == TH_SW_UNSWAPPABLE) {
					need_to_release = FALSE;
					TASK_STATS_INCR(task_sw_unswappable);
				}
			}
			if (thread)
				thread_unlock(thread);
			splx(s);
			act_unlock_thread(thr_act);
		}
		task_swapper_unlock();

		/* 
		 * thread_release will swap in the thread if it's been
		 * swapped out.
		 */
		if (need_to_release) {
			act_lock_thread(thr_act);
			thread_release(thr_act);
			act_unlock_thread(thr_act);
		}
		thr_act = next;
	}

	if (task->swap_flags & TASK_SW_MAKE_UNSWAPPABLE) {
		task->swap_flags &= ~TASK_SW_MAKE_UNSWAPPABLE;
		task->swap_state = TASK_SW_UNSWAPPABLE;
		swappable = FALSE;
	} else {
		task->swap_state = TASK_SW_IN;
	}

	task_swaprss_in += pmap_resident_count(task->map->pmap);
	task_swap_total_time += sched_tick - task->swap_stamp;
	/* note when task came back in */
	task->swap_stamp = sched_tick;
	if (task->swap_flags & TASK_SW_WANT_IN) {
		task->swap_flags &= ~TASK_SW_WANT_IN;
		thread_wakeup((event_t)&task->swap_state);
	}
	assert((task->swap_flags & TASK_SW_ELIGIBLE) == 0);
	task_unlock(task);
#if	TASK_SW_DEBUG
	task_swapper_lock();
	if (task_swap_debug && on_swapped_list(task)) {
		printf("task 0x%X on list at end of swap in\n", task);
		Debugger("");
	}
	task_swapper_unlock();
#endif	/* TASK_SW_DEBUG */
	/*
	 * Make the task eligible to be swapped again
	 */
	if (swappable)
		task_swapout_eligible(task);
	return(KERN_SUCCESS);
}
Beispiel #20
0
task_t
pick_outtask(void)
{
	register task_t		task;
	register task_t		target_task = TASK_NULL;
	unsigned long		task_rss;
	unsigned long		target_rss = 0;
	boolean_t		wired;
	boolean_t		active;
	int			nactive = 0;

	task_swapout_lock();
	if (queue_empty(&eligible_tasks)) {
		/* not likely to happen */
		task_swapout_unlock();
		return(TASK_NULL);
	}
	task = (task_t)queue_first(&eligible_tasks);
	while (!queue_end(&eligible_tasks, (queue_entry_t)task)) {
		int s;
		register thread_act_t thr_act;
		thread_t th;
		

		task_lock(task);
#if	MACH_RT
		/*
		 * Don't swap real-time tasks.
		 * XXX Should we enforce that or can we let really critical
		 * tasks use task_swappable() to make sure they never end up
		 * n the eligible list ?
		 */
		if (task->policy & POLICYCLASS_FIXEDPRI) {
			goto tryagain;
		}
#endif	/* MACH_RT */
		if (!task->active) {
			TASK_STATS_INCR(inactive_task_count);
			goto tryagain;
		}
		if (task->res_act_count == 0) {
			TASK_STATS_INCR(empty_task_count);
			goto tryagain;
		}
		assert(!queue_empty(&task->thr_acts));
		thr_act = (thread_act_t)queue_first(&task->thr_acts);
		active = FALSE;
		th = act_lock_thread(thr_act);
		s = splsched();
		if (th != THREAD_NULL)
			thread_lock(th);
		if ((th == THREAD_NULL) ||
		    (th->state == TH_RUN) ||
		    (th->state & TH_WAIT)) {
			/*
		 	 * thread is "active": either runnable 
			 * or sleeping.  Count it and examine 
			 * it further below.
	 		 */
			nactive++;
			active = TRUE;
		}
		if (th != THREAD_NULL)
			thread_unlock(th);
		splx(s);
		act_unlock_thread(thr_act);
		if (active &&
		    (task->swap_state == TASK_SW_IN) &&
		    ((sched_tick - task->swap_stamp) > min_res_time)) {
			long rescount = pmap_resident_count(task->map->pmap);
			/*
			 * thread must be "active", task must be swapped
			 * in and resident for at least min_res_time
			 */
#if 0
/* DEBUG Test round-robin strategy.  Picking biggest task could cause extreme
 * unfairness to such large interactive programs as xterm.  Instead, pick the
 * first task that has any pages resident:
 */
			if (rescount > 1) {
				task->ref_count++;
				target_task = task;
				task_unlock(task);
				task_swapout_unlock();
				return(target_task);
			}
#else
			if (rescount > target_rss) {
				/*
				 * task is not swapped, and it has the
				 * largest rss seen so far.
				 */
				task->ref_count++;
				target_rss = rescount;
				assert(target_task != task);
				if (target_task != TASK_NULL)
					task_deallocate(target_task);
				target_task = task;
			}
#endif
		}
tryagain:
		task_unlock(task);
		task = (task_t)queue_next(&task->swapped_tasks);
	}
	task_swapout_unlock();
	/* only swap out if there are at least min_active_tasks */
	if (nactive < min_active_tasks) {
		if (target_task != TASK_NULL) {
			task_deallocate(target_task);
			target_task = TASK_NULL;
		}
	}
	return(target_task);
}
Beispiel #21
0
/*
 *	task_swapout:
 * 	A reference to the task must be held.
 *
 *	Start swapping out a task by sending an AST_SWAPOUT to each thread.
 *	When the threads reach a clean point, they queue themselves up on the
 *	swapout_thread_q to be swapped out by the task_swap_swapout_thread.
 *	The task can be swapped in at any point in this process.
 *
 *	A task will not be fully swapped out (i.e. its map residence count
 *	at zero) until all currently-swapped threads run and reach
 *	a clean point, at which time they will be swapped again,
 *	decrementing the swap_ast_waiting count on the task.
 *
 *	Locking: no locks held upon entry and exit.
 *		 Task_lock is held throughout this function.
 */
kern_return_t
task_swapout(task_t task)
{
	thread_act_t thr_act;
	thread_t thread;
	queue_head_t *list;
	int s;

	task_swapout_lock();
	task_lock(task);
	/*
	 * NOTE: look into turning these into assertions if they
	 * are invariants.
	 */
	if ((task->swap_state != TASK_SW_IN) || (!task->active)) {
		task_unlock(task);
		task_swapout_unlock();
		return(KERN_FAILURE);
	}
	if (task->swap_flags & TASK_SW_ELIGIBLE) {
		queue_remove(&eligible_tasks, task, task_t, swapped_tasks);
		task->swap_flags &= ~TASK_SW_ELIGIBLE;
	}
	task_swapout_unlock();

	/* set state to avoid races with task_swappable(FALSE) */
	task->swap_state = TASK_SW_GOING_OUT;
	task->swap_rss = pmap_resident_count(task->map->pmap);
	task_swaprss_out += task->swap_rss;
	task->swap_ast_waiting = task->thr_act_count;

	/*
	 * halt all threads in this task:
	 * We don't need the thread list lock for traversal.
	 */
	list = &task->thr_acts;
	thr_act = (thread_act_t) queue_first(list);
	while (!queue_end(list, (queue_entry_t) thr_act)) {
		boolean_t swappable;
		thread_act_t ract;

		thread = act_lock_thread(thr_act);
		s = splsched();
		if (!thread)
			swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE);
		else {
			thread_lock(thread);
			swappable = TRUE;
			for (ract = thread->top_act; ract; ract = ract->lower)
				if (ract->swap_state == TH_SW_UNSWAPPABLE) {
					swappable = FALSE;
					break;
				}
		}
		if (swappable)
			thread_ast_set(thr_act, AST_SWAPOUT);
		if (thread)
			thread_unlock(thread);
		splx(s);
		assert((thr_act->ast & AST_TERMINATE) == 0);
		act_unlock_thread(thr_act);
		thr_act = (thread_act_t) queue_next(&thr_act->thr_acts);
	}

	task->swap_stamp = sched_tick;
	task->swap_nswap++;
	assert((task->swap_flags&TASK_SW_WANT_IN) == 0);
	/* put task on the queue of swapped out tasks */
	task_swapper_lock();
#if	TASK_SW_DEBUG
	if (task_swap_debug && on_swapped_list(task)) {
		printf("task 0x%X already on list\n", task);
		Debugger("");
	}
#endif	/* TASK_SW_DEBUG */
	queue_enter(&swapped_tasks, task, task_t, swapped_tasks);
	tasks_swapped_out++;
	task_swapouts++;
	task_swapper_unlock();
	task_unlock(task);

	return(KERN_SUCCESS);
}
Beispiel #22
0
void
processor_doaction(
	processor_t	processor)
{
	thread_t			this_thread;
	spl_t				s;
	register processor_set_t	pset;
#if	MACH_HOST
	register processor_set_t	new_pset;
	register thread_t		thread;
	register thread_t		prev_thread = THREAD_NULL;
	thread_act_t			thr_act;
	boolean_t			have_pset_ref = FALSE;
#endif	/* MACH_HOST */

	/*
	 *	Get onto the processor to shutdown
	 */
	this_thread = current_thread();
	thread_bind(this_thread, processor);
	thread_block((void (*)(void)) 0);

	pset = processor->processor_set;
#if	MACH_HOST
	/*
	 *	If this is the last processor in the processor_set,
	 *	stop all the threads first.
	 */
	pset_lock(pset);
	if (pset->processor_count == 1) {
		thread = (thread_t) queue_first(&pset->threads);
		prev_thread = THREAD_NULL;
		pset->ref_count++;
		have_pset_ref = TRUE;
		pset->empty = TRUE;

		/*
		 * loop through freezing the processor set assignment
		 * and reference counting the threads;
		 */
		while (!queue_end(&pset->threads, (queue_entry_t) thread)) {
		    thread_reference(thread);
		    pset_unlock(pset);

		    /*
		     * Freeze the thread on the processor set.
		     * If it's moved, just release the reference.
		     * Get the next thread in the processor set list
		     * from the last one which was frozen.
		     */
		    if( thread_stop_freeze(thread, pset) )
		        prev_thread = thread;
		    else
			thread_deallocate(thread);

		    pset_lock(pset);
		    if( prev_thread != THREAD_NULL ) 
		        thread = (thread_t)queue_next(&prev_thread->pset_threads);
		    else
			thread = (thread_t) queue_first(&pset->threads);
		}

		/*
		 * Remove the processor from the set so that when the threads
		 * are unstopped below the ones blocked in the kernel don't
		 * start running again.
		 */
		s = splsched();
		processor_lock(processor);
		pset_remove_processor(pset, processor);

		/*
		 * Prevent race with another processor being added to the set
		 * See code after Restart_pset:
		 *   while(new_pset->empty && new_pset->processor_count > 0)
		 *
		 * ... it tests for the condition where a new processor is
		 * added to the set while the last one is still being removed.
		 */
		pset->processor_count++;	/* block new processors being added */
		assert( pset->processor_count == 1 );

		/*
		 * Release the thread assignment locks, unstop the threads and
		 * release the thread references which were taken above.
		 */
		thread = (thread_t) queue_first(&pset->threads);
		while( !queue_empty( &pset->threads) && (thread != THREAD_NULL) ) {
		    prev_thread = thread;
		    if( queue_end(&pset->threads, (queue_entry_t) thread) )
			thread = THREAD_NULL;
		    else
		        thread = (thread_t) queue_next(&prev_thread->pset_threads);
		    pset_unlock(pset);
		    thread_unfreeze(prev_thread);
		    thread_unstop(prev_thread);
		    thread_deallocate(prev_thread);
		    pset_lock(pset);
		}
		/*
		 * allow a processor to be added to the empty pset
		 */
		pset->processor_count--;
	}
	else { 
		/* not last processor in set */
#endif	/* MACH_HOST */
		/*
		 * At this point, it is ok to rm the processor from the pset.
		 */
		s = splsched();
		processor_lock(processor);
		pset_remove_processor(pset, processor);
#if	MACH_HOST
	}
	pset_unlock(pset);

	/*
	 *	Copy the next pset pointer into a local variable and clear
	 *	it because we are taking over its reference.
	 */
	new_pset = processor->processor_set_next;
	processor->processor_set_next = PROCESSOR_SET_NULL;

	if (processor->state == PROCESSOR_ASSIGN) {

Restart_pset:
	    /*
	     *	Nasty problem: we want to lock the target pset, but
	     *	we have to enable interrupts to do that which requires
	     *  dropping the processor lock.  While the processor
	     *  is unlocked, it could be reassigned or shutdown.
	     */
	    processor_unlock(processor);
	    splx(s);

	    /*
	     *  Lock target pset and handle remove last / assign first race.
	     *	Only happens if there is more than one action thread.
	     */
	    pset_lock(new_pset);
	    while (new_pset->empty && new_pset->processor_count > 0) {
		pset_unlock(new_pset);
		while (*(volatile boolean_t *)&new_pset->empty &&
		       *(volatile int *)&new_pset->processor_count > 0)
			/* spin */;
		pset_lock(new_pset);
	    }

	    /*
	     *	Finally relock the processor and see if something changed.
	     *	The only possibilities are assignment to a different pset
	     *	and shutdown.
	     */
	    s = splsched();
	    processor_lock(processor);

	    if (processor->state == PROCESSOR_SHUTDOWN) {
		pset_unlock(new_pset);
		goto shutdown; /* will release pset reference */
	    }

	    if (processor->processor_set_next != PROCESSOR_SET_NULL) {
		/*
		 *	Processor was reassigned.  Drop the reference
		 *	we have on the wrong new_pset, and get the
		 *	right one.  Involves lots of lock juggling.
		 */
		processor_unlock(processor);
		splx(s);
		pset_unlock(new_pset);
		pset_deallocate(new_pset);
		s = splsched();
	        processor_lock(processor);
		new_pset = processor->processor_set_next;
		processor->processor_set_next = PROCESSOR_SET_NULL;
		goto Restart_pset;
	    }

	    /*
	     *	If the pset has been deactivated since the operation
	     *	was requested, redirect to the default pset.
	     */
	    if (!(new_pset->active)) {
		pset_unlock(new_pset);
		pset_deallocate(new_pset);
		new_pset = &default_pset;
		pset_lock(new_pset);
		new_pset->ref_count++;
	    }

	    /*
	     *	Do assignment, then wakeup anyone waiting for it.
	     *	Finally context switch to have it take effect.
	     */
	    pset_add_processor(new_pset, processor);
	    if (new_pset->empty) {
		/*
		 *	Set all the threads loose
		 */
		thread = (thread_t) queue_first(&new_pset->threads);
		while (!queue_end(&new_pset->threads,(queue_entry_t)thread)) {
		    thr_act = thread_lock_act(thread);
		    thread_release(thread->top_act);
		    act_unlock_thread(thr_act);
		    thread = (thread_t) queue_next(&thread->pset_threads);
		}
		new_pset->empty = FALSE;
	    }
	    processor->processor_set_next = PROCESSOR_SET_NULL;
	    processor->state = PROCESSOR_RUNNING;
	    thread_wakeup((event_t)processor);
	    processor_unlock(processor);
	    splx(s);
	    pset_unlock(new_pset);

	    /*
	     *	Clean up dangling references, and release our binding.
	     */
	    pset_deallocate(new_pset);
	    if (have_pset_ref)
		pset_deallocate(pset);
	    if (prev_thread != THREAD_NULL)
		thread_deallocate(prev_thread);
	    thread_bind(this_thread, PROCESSOR_NULL);

	    thread_block((void (*)(void)) 0);
	    return;
	}

shutdown:
#endif	/* MACH_HOST */
	
	/*
	 *	Do shutdown, make sure we live when processor dies.
	 */
	if (processor->state != PROCESSOR_SHUTDOWN) {
		printf("state: %d\n", processor->state);
	    	panic("action_thread -- bad processor state");
	}
	processor_unlock(processor);
	/*
	 *	Clean up dangling references, and release our binding.
	 */
#if	MACH_HOST
	if (new_pset != PROCESSOR_SET_NULL)
		pset_deallocate(new_pset);
	if (have_pset_ref)
		pset_deallocate(pset);
	if (prev_thread != THREAD_NULL)
		thread_deallocate(prev_thread);
#endif	/* MACH_HOST */

	thread_bind(this_thread, PROCESSOR_NULL);
	switch_to_shutdown_context(this_thread,
				   processor_doshutdown,
				   processor);
	splx(s);
}
Beispiel #23
0
/*
 *	Process an AST_SWAPOUT.
 */
void
swapout_ast()
{
	spl_t		s;
	thread_act_t	act;
	thread_t	thread;

	act = current_act();

	/*
	 * Task is being swapped out.  First mark it as suspended
	 * and halted, then call thread_swapout_enqueue to put
	 * the thread on the queue for task_swap_swapout_threads
	 * to swap out the thread.
	 */
	/*
	 * Don't swap unswappable threads
	 */
	thread = act_lock_thread(act);
	s = splsched();
	if (thread)
		thread_lock(thread);
	if ((act->ast & AST_SWAPOUT) == 0) {
		/*
		 * Race with task_swapin. Abort swapout.
		 */
		task_swap_ast_aborted++;	/* not locked XXX */
		if (thread)
			thread_unlock(thread);
		splx(s);
		act_unlock_thread(act);
	} else if (act->swap_state == TH_SW_IN) {
		/*
		 * Mark swap_state as TH_SW_TASK_SWAPPING to avoid
		 * race with thread swapper, which will only
		 * swap thread if swap_state is TH_SW_IN.
		 * This way, the thread can only be swapped by
		 * the task swapping mechanism.
		 */
		act->swap_state |= TH_SW_TASK_SWAPPING;
		/* assert(act->suspend_count == 0); XXX ? */
		if (thread)
			thread_unlock(thread);
		if (act->suspend_count++ == 0)	/* inline thread_hold */
			install_special_handler(act);
		/* self->state |= TH_HALTED; */
		thread_ast_clear(act, AST_SWAPOUT);
		/*
		 * Initialize the swap_queue fields to allow an extra
		 * queue_remove() in task_swapin if we lose the race
		 * (task_swapin can be called before we complete
		 * thread_swapout_enqueue).
		 */
		queue_init((queue_t) &act->swap_queue);
		splx(s);
		act_unlock_thread(act);
		/* this must be called at normal interrupt level */
		thread_swapout_enqueue(act);
	} else {
		/* thread isn't swappable; continue running */
		assert(act->swap_state == TH_SW_UNSWAPPABLE);
		if (thread)
			thread_unlock(thread);
		thread_ast_clear(act, AST_SWAPOUT);
		splx(s);
		act_unlock_thread(act);
	}
}