Exemplo n.º 1
0
/*
 * Terminate a thread.
 */
kern_return_t
thread_terminate(
	register thread_act_t	act)
{
	kern_return_t	result;

	if (act == THR_ACT_NULL)
		return (KERN_INVALID_ARGUMENT);

	if (	act->task == kernel_task	&&
			act != current_act()			)
		return (KERN_FAILURE);

	result = thread_terminate_internal(act);

	/*
	 * If a kernel thread is terminating itself, force an AST here.
	 * Kernel threads don't normally pass through the AST checking
	 * code - and all threads finish their own termination in the
	 * special handler APC.
	 */
	if (act->task == kernel_task) {
		ml_set_interrupts_enabled(FALSE);
		assert(act == current_act());
		ast_taken(AST_APC, TRUE);
		panic("thread_terminate");
	}

	return (result);
}
Exemplo n.º 2
0
__private_extern__
kern_return_t chudxnu_thread_set_state(thread_act_t thr_act, 
									thread_flavor_t flavor,
                                    thread_state_t tstate,
                                    mach_msg_type_number_t count,
                                    boolean_t user_only)
{
	if(thr_act==current_act()) {
		if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) {
			struct savearea *sv;
			if(user_only) {
				sv = chudxnu_private_get_user_regs();
			} else {
				sv = chudxnu_private_get_regs();
			}
			return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
		} else if(flavor==PPC_FLOAT_STATE && user_only) {
#warning chudxnu_thread_set_state() does not yet support supervisor FP
			return machine_thread_set_state(current_act(), flavor, tstate, count);
		} else if(flavor==PPC_VECTOR_STATE && user_only) {
#warning chudxnu_thread_set_state() does not yet support supervisor VMX
			return machine_thread_set_state(current_act(), flavor, tstate, count);
		} else {
			return KERN_INVALID_ARGUMENT;
		}
	} else {
		return machine_thread_set_state(thr_act, flavor, tstate, count);
	}
}
Exemplo n.º 3
0
boolean_t
db_check_access(
	vm_offset_t	addr,
	int		size,
	task_t		task)
{
	register int	n;
	unsigned int	kern_addr;

	if (task == kernel_task || task == TASK_NULL) {
	    if (kernel_task == TASK_NULL)  return(TRUE);
	    task = kernel_task;
	} else if (task == TASK_NULL) {
	    if (current_act() == THR_ACT_NULL) return(FALSE);
	    task = current_act()->task;
	}

	while (size > 0) {
		if(!pmap_find_phys(task->map->pmap, (addr64_t)addr)) return (FALSE);	/* Fail if page not mapped */
	    n = trunc_page_32(addr+PPC_PGBYTES) - addr;
	    if (n > size)
		n = size;
	    size -= n;
	    addr += n;
	}
	return(TRUE);
}
Exemplo n.º 4
0
/*
 *	thread_getstatus:
 *
 *	Get the status of the specified thread.
 */
kern_return_t
thread_getstatus(
	register thread_act_t	act,
	int						flavor,
	thread_state_t			tstate,
	mach_msg_type_number_t	*count)
{
	kern_return_t		result = KERN_SUCCESS;
	thread_t			thread;

	thread = act_lock_thread(act);

	if (	act != current_act()			&&
			(act->suspend_count == 0	||
			 thread == THREAD_NULL		||
			 (thread->state & TH_RUN)	||
			 thread->top_act != act)		)
		result = KERN_FAILURE;

	if (result == KERN_SUCCESS)
		result = machine_thread_get_state(act, flavor, tstate, count);

	act_unlock_thread(act);

	return (result);
}
Exemplo n.º 5
0
kern_return_t
thread_set_cthread_self(int self)
{
   current_act()->mact.pcb->cthread_self = (unsigned int)self;
   
   return (KERN_SUCCESS);
}
Exemplo n.º 6
0
void
db_task_trap(
	int		type,
	int		code,
	boolean_t	user_space)
{
	jmp_buf_t db_jmpbuf;
	jmp_buf_t *prev;
	boolean_t	bkpt;
	boolean_t	watchpt;
	task_t		task;
	task_t		task_space;

	task = db_current_task();
	task_space = db_target_space(current_act(), user_space);
	bkpt = IS_BREAKPOINT_TRAP(type, code);
	watchpt = IS_WATCHPOINT_TRAP(type, code);

	/*
	 * Note:  we look up PC values in an address space (task_space),
	 * but print symbols using a (task-specific) symbol table, found
	 * using task.
	 */
	db_init_default_act();
	db_check_breakpoint_valid();
	if (db_stop_at_pc(&bkpt, task, task_space)) {
	    if (db_inst_count) {
		db_printf("After %d instructions (%d loads, %d stores),\n",
			  db_inst_count, db_load_count, db_store_count);
	    }
	    if (bkpt)
		db_printf("Breakpoint at  ");
	    else if (watchpt)
		db_printf("Watchpoint at  ");
	    else
		db_printf("Stopped at  ");
	    db_dot = PC_REGS(DDB_REGS);

	    prev = db_recover;
	    if (_setjmp(db_recover = &db_jmpbuf) == 0) {
#if defined(__alpha)
		db_print_loc(db_dot, task_space);
		db_printf("\n\t");
		db_print_inst(db_dot, task_space);
#else /* !defined(__alpha) */
#if defined(__powerpc__)
		db_print_loc_and_inst(db_dot, task_space);
#else	/* __powerpc__ */
		db_print_loc_and_inst(db_dot, task);
#endif	/* __powerpc__ */
#endif /* defined(__alpha) */
	    } else
		db_printf("Trouble printing location %#X.\n", db_dot);
	    db_recover = prev;

	    db_command_loop();
	}

	db_restart_at_pc(watchpt, task_space);
}
Exemplo n.º 7
0
/*
 * Examine (print) data.
 */
void
db_examine_cmd(
    db_expr_t	addr,
    int		have_addr,
    db_expr_t	count,
    char *		modif)
{
    thread_act_t	thr_act;
    extern char	db_last_modifier[];

    if (modif[0] != '\0')
        strcpy(db_examine_format, modif);

    if (count == -1)
        count = 1;
    db_examine_count = count;
    if (db_option(modif, 't')) {
        if (modif == db_last_modifier)
            thr_act = db_examine_act;
        else if (!db_get_next_act(&thr_act, 0))
            return;
    } else if (db_option(modif,'u'))
        thr_act = current_act();
    else
        thr_act = THR_ACT_NULL;

    db_examine_act = thr_act;
    db_examine((db_addr_t) addr, db_examine_format, count,
               db_act_to_task(thr_act));
}
Exemplo n.º 8
0
void
action_thread(void)
{
	register processor_t	processor;
	spl_t			s;

	thread_swappable(current_act(), FALSE);

	while (TRUE) {
		s = splsched();
		simple_lock(&action_lock);
		while ( !queue_empty(&action_queue)) {
			processor = (processor_t) queue_first(&action_queue);
			queue_remove(&action_queue, processor, processor_t,
				     processor_queue);
			simple_unlock(&action_lock);
			splx(s);

			processor_doaction(processor);

			s = splsched();
			simple_lock(&action_lock);
		}

		assert_wait((event_t) &action_queue, FALSE);
		simple_unlock(&action_lock);
		splx(s);
		counter(c_action_thread_block++);
		thread_block((void (*)(void)) 0);
	}
}
Exemplo n.º 9
0
/*
 *	Change thread's machine-dependent state.  Called with nothing
 *	locked.  Returns same way.
 */
kern_return_t
thread_set_state(
	register thread_act_t	act,
	int						flavor,
	thread_state_t			state,
	mach_msg_type_number_t	state_count)
{
	kern_return_t		result = KERN_SUCCESS;
	thread_t			thread;

	if (act == THR_ACT_NULL || act == current_act())
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(act);

	if (!act->active) {
		act_unlock_thread(act);
		return (KERN_TERMINATED);
	}

	thread_hold(act);

	for (;;) {
		thread_t			thread1;

		if (	thread == THREAD_NULL		||
				thread->top_act != act		)
			break;
		act_unlock_thread(act);

		if (!thread_stop(thread)) {
			result = KERN_ABORTED;
			(void)act_lock_thread(act);
			thread = THREAD_NULL;
			break;
		}

		thread1 = act_lock_thread(act);
		if (thread1 == thread)
			break;

		thread_unstop(thread);
		thread = thread1;
	}

	if (result == KERN_SUCCESS)
		result = machine_thread_set_state(act, flavor, state, state_count);

	if (	thread != THREAD_NULL		&&
			thread->top_act == act		)
	    thread_unstop(thread);

	thread_release(act);
	act_unlock_thread(act);

	return (result);
}
Exemplo n.º 10
0
kern_return_t
thread_fast_set_cthread_self(int self)
{
  pcb_t pcb;
  pcb = (pcb_t)current_act()->mact.pcb;
  thread_compose_cthread_desc((unsigned int)self, pcb);
  pcb->cthread_self = (unsigned int)self; /* preserve old func too */
  return (USER_CTHREAD);
}
Exemplo n.º 11
0
kern_return_t
thread_dup(
	register thread_act_t	target)
{
	kern_return_t		result = KERN_SUCCESS;
	thread_act_t		self = current_act();
	thread_t			thread;

	if (target == THR_ACT_NULL || target == self)
		return (KERN_INVALID_ARGUMENT);

	thread = act_lock_thread(target);

	if (!target->active) {
		act_unlock_thread(target);
		return (KERN_TERMINATED);
	}

	thread_hold(target);

	for (;;) {
		thread_t			thread1;

		if (	thread == THREAD_NULL		||
				thread->top_act != target	)
			break;
		act_unlock_thread(target);

		if (!thread_stop(thread)) {
			result = KERN_ABORTED;
			(void)act_lock_thread(target);
			thread = THREAD_NULL;
			break;
		}

		thread1 = act_lock_thread(target);
		if (thread1 == thread)
			break;

		thread_unstop(thread);
		thread = thread1;
	}

	if (result == KERN_SUCCESS)
		result = machine_thread_dup(self, target);

	if (	thread != THREAD_NULL		&&
			thread->top_act == target	)
	    thread_unstop(thread);

	thread_release(target);
	act_unlock_thread(target);

	return (result);
}
Exemplo n.º 12
0
static void
_sleep_continue(void)
{
	register struct proc *p;
	register thread_t self = current_act();
	struct uthread * ut;
	int sig, catch;
	int error = 0;

	ut = get_bsdthread_info(self);
	catch = ut->uu_pri & PCATCH;
Exemplo n.º 13
0
db_thread_breakpoint_t
db_find_thread_breakpoint_here(
	task_t		task,
	db_addr_t	addr)
{
	db_breakpoint_t bkpt;

	bkpt = db_find_breakpoint(task, (db_addr_t)addr);
	if (bkpt == 0)
	    return(0);
	return(db_find_thread_breakpoint(bkpt, current_act()));
}
Exemplo n.º 14
0
void
unix_syscall_return(int error)
{
    thread_act_t		thread;
	volatile int *rval;
	struct i386_saved_state *regs;
	struct proc *p;
	struct proc *current_proc();
	unsigned short code;
	vm_offset_t params;
	struct sysent *callp;
	extern int nsysent;

    thread = current_act();
    rval = (int *)get_bsduthreadrval(thread);
	p = current_proc();

	regs = USER_REGS(thread);

	/* reconstruct code for tracing before blasting eax */
	code = regs->eax;
	params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
	callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
	if (callp == sysent) {
	  code = fuword(params);
	}

	if (error == ERESTART) {
		regs->eip -= 7;
	}
	else if (error != EJUSTRETURN) {
		if (error) {
		    regs->eax = error;
		    regs->efl |= EFL_CF;	/* carry bit */
		} else { /* (not error) */
		    regs->eax = rval[0];
		    regs->edx = rval[1];
		    regs->efl &= ~EFL_CF;
		} 
	}

	ktrsysret(p, code, error, rval[0], callp->sy_funnel);

	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
		error, rval[0], rval[1], 0, 0);

	if (callp->sy_funnel != NO_FUNNEL)
    		(void) thread_funnel_set(current_thread()->funnel_lock, FALSE);

    thread_exception_return();
    /* NOTREACHED */
}
Exemplo n.º 15
0
boolean_t
db_phys_eq(
	task_t		task1,
	vm_offset_t	addr1,
	task_t		task2,
	vm_offset_t	addr2)
{
	addr64_t	physa, physb;

	if ((addr1 & (PPC_PGBYTES-1)) != (addr2 & (PPC_PGBYTES-1)))	/* Is byte displacement the same? */
		return FALSE;

	if (task1 == TASK_NULL) {						/* See if there is a task active */
		if (current_act() == THR_ACT_NULL)			/* See if there is a current task */
			return FALSE;
		task1 = current_act()->task;				/* If so, use that one */
	}
	
	if(!(physa = db_vtophys(task1->map->pmap, (vm_offset_t)trunc_page_32(addr1)))) return FALSE;	/* Get real address of the first */
	if(!(physb = db_vtophys(task2->map->pmap, (vm_offset_t)trunc_page_32(addr2)))) return FALSE;	/* Get real address of the second */
	
	return (physa == physb);						/* Check if they are equal, then return... */
}
Exemplo n.º 16
0
/*
 * The old map reference is returned.
 */
vm_map_t
swap_task_map(task_t task,vm_map_t map)
{
	thread_act_t act = current_act();
	vm_map_t old_map;

	if (task != act->task)
		panic("swap_task_map");

	task_lock(task);
	old_map = task->map;
	act->map = task->map = map;
	task_unlock(task);
	return old_map;
}
Exemplo n.º 17
0
void 
afs_osi_fullSigMask()
{
#ifndef AFS_DARWIN80_ENV
    struct uthread *user_thread = (struct uthread *)get_bsdthread_info(current_act());
       
    /* Protect original sigmask */
    if (!user_thread->uu_oldmask) {
	/* Back up current sigmask */
	user_thread->uu_oldmask = user_thread->uu_sigmask;
	/* Mask all signals */
	user_thread->uu_sigmask = ~(sigset_t)0;
    }
#endif
}
Exemplo n.º 18
0
void 
afs_osi_fullSigRestore()
{
#ifndef AFS_DARWIN80_ENV
    struct uthread *user_thread = (struct uthread *)get_bsdthread_info(current_act());
       
    /* Protect original sigmask */
    if (user_thread->uu_oldmask) {
	/* Restore original sigmask */
	user_thread->uu_sigmask = user_thread->uu_oldmask;
	/* Clear the oldmask */
	user_thread->uu_oldmask = (sigset_t)0;
    }
#endif
}
Exemplo n.º 19
0
void
db_clear_breakpoints(void)
{
	register db_breakpoint_t bkpt, *bkptp;
	register task_t	 task;
	db_expr_t inst;
	thread_act_t	 cur_act = current_act();
	task_t	 cur_task = (cur_act && !cur_act->kernel_loaded) ?
			cur_act->task: TASK_NULL;

	if (db_breakpoints_inserted) {
	    bkptp = &db_breakpoint_list;
	    for (bkpt = *bkptp; bkpt; bkpt = *bkptp) {
		task = bkpt->task;
		if (bkpt->flags & BKPT_USR_GLOBAL) {
		    if (cur_task == TASK_NULL) {
			bkptp = &bkpt->link;
			continue;
		    }
		    task = cur_task;
		}
		if ((bkpt->flags & BKPT_SET_IN_MEM)
		    && DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) {
		    inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE, 
								task);
		    if (inst != BKPT_SET(inst)) {
			if (bkpt->flags & BKPT_USR_GLOBAL) {
			    bkptp = &bkpt->link;
			    continue;
			}
			db_force_delete_breakpoint(bkpt, 0, FALSE);
			*bkptp = bkpt->link;
		        db_breakpoint_free(bkpt);
			continue;
		    }
		    db_put_task_value(bkpt->address, BKPT_SIZE,
				 bkpt->bkpt_inst, task);
		    bkpt->flags &= ~BKPT_SET_IN_MEM;
		}
		bkptp = &bkpt->link;
	    }
	    db_breakpoints_inserted = FALSE;
	}
}
Exemplo n.º 20
0
static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
{
    int cpu;
    boolean_t oldlevel;
    struct ppc_thread_state64 state;
    mach_msg_type_number_t count;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    count = PPC_THREAD_STATE64_COUNT;
    if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
        if(cpu_timer_callback_fn[cpu]) {
            (cpu_timer_callback_fn[cpu])(PPC_THREAD_STATE64, (thread_state_t)&state, count);
        }
    }

    ml_set_interrupts_enabled(oldlevel);
}
Exemplo n.º 21
0
void
db_set_breakpoints(void)
{
	register db_breakpoint_t bkpt;
	register task_t	task;
	db_expr_t	inst;
	thread_act_t	cur_act = current_act();
	task_t		cur_task =
				(cur_act && !cur_act->kernel_loaded) ?
					cur_act->task : TASK_NULL;
	boolean_t	inserted = TRUE;

	if (!db_breakpoints_inserted) {
	    for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
		if (bkpt->flags & BKPT_SET_IN_MEM)
		    continue;
		task = bkpt->task;
		if (bkpt->flags & BKPT_USR_GLOBAL) {
		    if ((bkpt->flags & BKPT_1ST_SET) == 0) {
		        if (cur_task == TASK_NULL)
			    continue;
		        task = cur_task;
		    } else
			bkpt->flags &= ~BKPT_1ST_SET;
		}
		if (DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) {
		    inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE,
								task);
		    if (inst == BKPT_SET(inst))
			continue;
		    bkpt->bkpt_inst = inst;
		    db_put_task_value(bkpt->address,
				BKPT_SIZE,
				BKPT_SET(bkpt->bkpt_inst), task);
		    bkpt->flags |= BKPT_SET_IN_MEM;
		} else {
		    inserted = FALSE;
		}
	    }
	    db_breakpoints_inserted = inserted;
	}
}
Exemplo n.º 22
0
void
profile_thread(void)
{
    spl_t	    s;
    buffer_t	    buf_entry;
    queue_entry_t   prof_queue_entry;
    prof_data_t	    pbuf;
    kern_return_t   kr;
    int		    j;

    thread_swappable(current_act(), FALSE);

    /* Initialise the queue header for the prof_queue */
    mpqueue_init(&prof_queue);

    while (TRUE) {

	/* Dequeue the first buffer. */
	s = splsched();
	mpdequeue_head(&prof_queue, &prof_queue_entry);
	splx(s);

	if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) { 
	    assert_wait((event_t) profile_thread, FALSE);
	    thread_block((void (*)(void)) 0);
	    if (current_thread()->wait_result != THREAD_AWAKENED)
		break;
	} else 
#if DCI
	{
	    register int    sum_samples = 0;
	    int		    i;

	    pbuf = buf_entry->p_prof;
/*
 * sum all the points from all the cpus on the machine.
*/
	    for(i=0;i < NCPUS; i++)
		sum_samples += buf_entry->p_index[i];

	    kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
			(mach_msg_type_number_t)sum_samples);
	    if (kr != KERN_SUCCESS)
	    {
		task_suspend(pbuf->task); /* suspend task */
		kr = send_notices(pbuf->prof_port, (void *)buf_entry->p_zone,
				  (mach_msg_type_number_t)sum_samples,
				  MACH_SEND_ALWAYS);
	    }
	    bzero((char *)buf_entry->p_zone, NCPUS*SIZE_PROF_BUFFER);
#else
	{
	    int		    dropped;

	    pbuf = buf_entry->p_prof;
	    kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
			(mach_msg_type_number_t)buf_entry->p_index);
	    profile_sample_count += buf_entry->p_index;
	    if (kr != KERN_SUCCESS)
	      printf("send_samples(%x, %x, %d) error %x\n",
			pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr); 
	    dropped = buf_entry->p_dropped;
	    if (dropped > 0) {
		printf("kernel: profile dropped %d sample%s\n", dropped,
		       dropped == 1 ? "" : "s");
		buf_entry->p_dropped = 0;
	    }

#endif /* DCI */
	    /* Indicate you've finished the dirty job */
#if DCI
	    {
		int i;
		for(i=0;i<NCPUS;i++)
		    buf_entry->p_full[i] = FALSE;
	    }
#else
	    buf_entry->p_full = FALSE;
#endif /* DCI */
	    if (buf_entry->p_wakeme)
	      thread_wakeup((event_t) &buf_entry->p_wakeme);
	}

    }
    /* The profile thread has been signalled to exit.  Any threads waiting
       for the last buffer of samples to be acknowledged should be woken
       up now.  */
    profile_thread_id = THREAD_NULL;
    while (1) {
	s = splsched();
	mpdequeue_head(&prof_queue, &prof_queue_entry);
	splx(s);
	if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
	    break;
	if (buf_entry->p_wakeme)
	    thread_wakeup((event_t) &buf_entry->p_wakeme);
    }
#if 0	/* XXXXX */
    thread_halt_self();
#else
	panic("profile_thread(): halt_self");
#endif	/* XXXXX */
}

/*
 *****************************************************************************
 * send_last_sample is the drain mechanism to allow partial profiled buffers
 * to be sent to the receive_prof thread in the server.
 *****************************************************************************
*/

void
send_last_sample_buf(prof_data_t pbuf)
{
    spl_t    s;
    buffer_t buf_entry;

    if (pbuf == NULLPROFDATA)
	return;

    /* Ask for the sending of the last PC buffer.
     * Make a request to the profile_thread by inserting
     * the buffer in the send queue, and wake it up. 
     * The last buffer must be inserted at the head of the
     * send queue, so the profile_thread handles it immediatly. 
     */ 
    buf_entry = pbuf->prof_area + pbuf->prof_index;
    buf_entry->p_prof = pbuf;

    /* 
       Watch out in case profile thread exits while we are about to
       queue data for it.
     */
    s = splsched();
    if (profile_thread_id == THREAD_NULL)
	splx(s);
    else {
	buf_entry->p_wakeme = 1;
	mpenqueue_tail(&prof_queue, &buf_entry->p_list);
	thread_wakeup((event_t) profile_thread);
	assert_wait((event_t) &buf_entry->p_wakeme, TRUE);
	splx(s); 
	thread_block((void (*)(void)) 0);
    }
}
Exemplo n.º 23
0
void
ast_check(void)
{
	register int		mycpu;
	register processor_t	myprocessor;
	register thread_t	thread = current_thread();
	spl_t			s = splsched();

	mp_disable_preemption();
	mycpu = cpu_number();

	/*
	 *	Check processor state for ast conditions.
	 */
	myprocessor = cpu_to_processor(mycpu);
	switch(myprocessor->state) {
	    case PROCESSOR_OFF_LINE:
	    case PROCESSOR_IDLE:
	    case PROCESSOR_DISPATCHING:
		/*
		 *	No ast.
		 */
	    	break;

#if	NCPUS > 1
	    case PROCESSOR_ASSIGN:
	    case PROCESSOR_SHUTDOWN:
	        /*
		 * 	Need ast to force action thread onto processor.
		 *
		 * XXX  Should check if action thread is already there.
		 */
		ast_on(mycpu, AST_BLOCK);
		break;
#endif	/* NCPUS > 1 */

	    case PROCESSOR_RUNNING:
	    case PROCESSOR_VIDLE:

		/*
		 *	Propagate thread ast to processor.  If we already
		 *	need an ast, don't look for more reasons.
		 */
		ast_propagate(current_act(), mycpu);
		if (ast_needed(mycpu))
			break;

		/*
		 *	Context switch check.
		 */
		if (csw_needed(thread, myprocessor)) {
			ast_on(mycpu, (myprocessor->first_quantum ?
			       AST_BLOCK : AST_QUANTUM));
		}
		break;

	    default:
	        panic("ast_check: Bad processor state");
	}
	mp_enable_preemption();
	splx(s);
}
Exemplo n.º 24
0
/*
 * thread_setstatus:
 *
 * Set the status of the specified thread.
 */
kern_return_t 
act_machine_set_state(
		      thread_act_t	     thr_act,
		      thread_flavor_t	     flavor,
		      thread_state_t	     tstate,
		      mach_msg_type_number_t count)
{
    int	kernel_act = thr_act->kernel_loading ||	thr_act->kernel_loaded;

#if	MACH_ASSERT
    if (watchacts & WA_STATE)
	printf("act_%x act_m_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n",
	       current_act(), thr_act, flavor, tstate, count);
#endif	/* MACH_ASSERT */

    switch (flavor) {
    case THREAD_SYSCALL_STATE:
    {
	register struct thread_syscall_state *state;
	register struct hp700_saved_state   *saved_state = USER_REGS(thr_act);

	state = (struct thread_syscall_state *)tstate;

	saved_state->r1 = state->r1;
	saved_state->rp = state->rp;
	saved_state->r3 = state->r3;
	saved_state->t1 = state->t1;
	saved_state->arg3 = state->arg3;
	saved_state->arg2 = state->arg2;
	saved_state->arg1 = state->arg1;
	saved_state->arg0 = state->arg0;
	saved_state->dp = state->dp;
	saved_state->ret0 = state->ret0;
	saved_state->ret1 = state->ret1;
	saved_state->sp = state->sp;
	if(kernel_act)
	{
	    saved_state->iioq_head = state->iioq_head;
	    saved_state->iioq_tail = state->iioq_tail;
	}
	else
	{
	    saved_state->iioq_head = state->iioq_head | PC_PRIV_USER;
	    saved_state->iioq_tail = state->iioq_tail | PC_PRIV_USER;
	}

	return KERN_SUCCESS;
    }
    case HP700_THREAD_STATE:
    {
	register struct hp700_thread_state *state;
	register struct hp700_saved_state  *saved_state = USER_REGS(thr_act);

	state = (struct hp700_thread_state *)tstate;

	if(state->flags & (SS_INSYSCALL|SS_INTRAP))
		saved_state->flags = state->flags;
	saved_state->r1 = state->r1;
	saved_state->rp = state->rp;
	saved_state->r3 = state->r3;
	saved_state->r4 = state->r4;
	saved_state->r5 = state->r5;
	saved_state->r6 = state->r6;
	saved_state->r7 = state->r7;
	saved_state->r8 = state->r8;
	saved_state->r9 = state->r9;
	saved_state->r10 = state->r10;
	saved_state->r11 = state->r11;
	saved_state->r12 = state->r12;
	saved_state->r13 = state->r13;
	saved_state->r14 = state->r14;
	saved_state->r15 = state->r15;
	saved_state->r16 = state->r16;
	saved_state->r17 = state->r17;
	saved_state->r18 = state->r18;
	saved_state->t4 = state->t4;
	saved_state->t3 = state->t3;
	saved_state->t2 = state->t2;
	saved_state->t1 = state->t1;
	saved_state->arg3 = state->arg3;
	saved_state->arg2 = state->arg2;
	saved_state->arg1 = state->arg1;
	saved_state->arg0 = state->arg0;
	saved_state->dp = state->dp;
	saved_state->ret0 = state->ret0;
	saved_state->ret1 = state->ret1;
	saved_state->sp = state->sp;
	saved_state->r31 = state->r31;
	saved_state->sar = state->sar;

	if(kernel_act) {
	    saved_state->iioq_head = state->iioq_head;
	    saved_state->iioq_tail = state->iioq_tail;
	}
	else {
	    saved_state->iioq_head = state->iioq_head | PC_PRIV_USER;
	    saved_state->iioq_tail = state->iioq_tail | PC_PRIV_USER;
	}
	
	saved_state->iisq_head = state->iisq_head;
	saved_state->iisq_tail = state->iisq_tail;

	saved_state->sr0 = state->sr0;
	saved_state->sr1 = state->sr1;
	saved_state->sr2 = state->sr2;
	saved_state->sr3 = state->sr3;

	saved_state->fpu = state->fpu;

	/*
	 * Make sure only PSW_T, PSW_X, PSW_N, PSW_B, PSW_V and PSW_CB 
	 * bits are set by users.
	 */
	saved_state->ipsw = state->ipsw &
		(PSW_R | PSW_T | PSW_X | PSW_N | PSW_B | PSW_V | PSW_CB);

	/*
	 * Always make sure that PSW_C, PSW_Q, PSW_P, PSW_D are set. The PSW_I
	 * bit is set according to eiem at context switch.
	 */
	saved_state->ipsw |= PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I;

	/*
	 * if single step, set the count to 0 so that 1 instruction
         * is excecuted before trapping.
         */
	if(saved_state->ipsw & PSW_R)
		saved_state->rctr = 0;

	return KERN_SUCCESS;
    }
    case HP700_FLOAT_STATE:
    {
	register struct hp700_float_state *state;
	register struct hp700_float_state *saved_state = USER_FREGS(thr_act);

	state = (struct hp700_float_state *) tstate;

	if (fpu_pcb == thr_act->mact.pcb)
		fpu_pcb = 0;	/* must restore coprocessor */

	bcopy((char *)state, (char *)saved_state,
	      sizeof(struct hp700_float_state));
	thr_act->mact.pcb->ss.fpu = 1;
	
	return KERN_SUCCESS;
    }
    default:
	return KERN_INVALID_ARGUMENT;
    }
}
Exemplo n.º 25
0
kern_return_t 
act_machine_get_state(
		      thread_act_t           thr_act,
		      thread_flavor_t        flavor,
		      thread_state_t         tstate,
		      mach_msg_type_number_t *count)
{
#if	MACH_ASSERT
    if (watchacts & WA_STATE)
	printf("act_%x act_m_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n",
	       current_act(), thr_act, flavor, tstate,
	       count, (count ? *count : 0));
#endif	/* MACH_ASSERT */

    switch (flavor) {
    case THREAD_STATE_FLAVOR_LIST:
	if (*count < 3)
	    return (KERN_INVALID_ARGUMENT);

	tstate[0] = HP700_THREAD_STATE;
	tstate[1] = HP700_FLOAT_STATE;
	tstate[2] = THREAD_SYSCALL_STATE;
	*count = 3;

	return KERN_SUCCESS;

    case THREAD_SYSCALL_STATE:
    {
	register struct thread_syscall_state *state;
	register struct hp700_saved_state   *saved_state = USER_REGS(thr_act);

	if (*count < HP700_SYSCALL_STATE_COUNT)
	    return KERN_INVALID_ARGUMENT;

	state = (struct thread_syscall_state *) tstate;

	state->r1  = saved_state->r1;
	state->rp  = saved_state->rp;
	state->r3  = saved_state->r3;
	state->t1 = saved_state->t1;
	state->arg3 = saved_state->arg3;
	state->arg2 = saved_state->arg2;
	state->arg1 = saved_state->arg1;
	state->arg0 = saved_state->arg0;
	state->dp = saved_state->dp;
	state->ret0 = saved_state->ret0;
	state->ret1 = saved_state->ret1;
	state->sp = saved_state->sp;
    
	state->iioq_head = saved_state->iioq_head;
	state->iioq_tail = saved_state->iioq_tail;
	
	*count = HP700_SYSCALL_STATE_COUNT;
	return KERN_SUCCESS;
    }

    case HP700_THREAD_STATE:
    {
	register struct hp700_thread_state *state;
	register struct hp700_saved_state  *saved_state = USER_REGS(thr_act);

	if (*count < HP700_THREAD_STATE_COUNT)
	    return KERN_INVALID_ARGUMENT;

	state = (struct hp700_thread_state *) tstate;
	state->flags = saved_state->flags;
	state->r1  = saved_state->r1;
	state->rp  = saved_state->rp;
	state->r3  = saved_state->r3;
	state->r4  = saved_state->r4;
	state->r5  = saved_state->r5;
	state->r6  = saved_state->r6;
	state->r7  = saved_state->r7;
	state->r8  = saved_state->r8;
	state->r9  = saved_state->r9;
	state->r10 = saved_state->r10;
	state->r11 = saved_state->r11;
	state->r12 = saved_state->r12;
	state->r13 = saved_state->r13;
	state->r14 = saved_state->r14;
	state->r15 = saved_state->r15;
	state->r16 = saved_state->r16;
	state->r17 = saved_state->r17;
	state->r18 = saved_state->r18;
	state->t4 = saved_state->t4;
	state->t3 = saved_state->t3;
	state->t2 = saved_state->t2;
	state->t1 = saved_state->t1;
	state->arg3 = saved_state->arg3;
	state->arg2 = saved_state->arg2;
	state->arg1 = saved_state->arg1;
	state->arg0 = saved_state->arg0;
	state->dp = saved_state->dp;
	state->ret0 = saved_state->ret0;
	state->ret1 = saved_state->ret1;
	state->sp = saved_state->sp;
	state->r31 = saved_state->r31;
    
	state->sr0 = saved_state->sr0;
	state->sr1 = saved_state->sr1;
	state->sr2 = saved_state->sr2;
	state->sr3 = saved_state->sr3;
	state->sr4 = saved_state->sr4;
	state->sr5 = saved_state->sr5;
	state->sr6 = saved_state->sr6;

	state->rctr = saved_state->rctr;
	state->pidr1 = saved_state->pidr1;
	state->pidr2 = saved_state->pidr2;
	state->ccr = saved_state->ccr;
	state->sar = saved_state->sar;
	state->pidr3 = saved_state->pidr3;
	state->pidr4 = saved_state->pidr4;

	state->iioq_head = saved_state->iioq_head;
	state->iioq_tail = saved_state->iioq_tail;
	
	state->iisq_head = saved_state->iisq_head;
	state->iisq_tail = saved_state->iisq_tail;

	state->fpu = saved_state->fpu;

	/* 
	 * Only export the meaningful bits of the psw
	 */
	state->ipsw = saved_state->ipsw & 
	    (PSW_R | PSW_X | PSW_T | PSW_N | PSW_B | PSW_V | PSW_CB);

	*count = HP700_THREAD_STATE_COUNT;
	return KERN_SUCCESS;
    }

    case HP700_FLOAT_STATE:
    {
	register struct hp700_float_state *state;
	register struct hp700_float_state *saved_state = USER_FREGS(thr_act);

	if (*count < HP700_FLOAT_STATE_COUNT)
	    return KERN_INVALID_ARGUMENT;

	state = (struct hp700_float_state *) tstate;

	if (fpu_pcb == thr_act->mact.pcb)
		fpu_flush();

	if(thr_act->mact.pcb->ss.fpu) {
		assert(thr_act->mact.pcb->ss.fpu == 1);
		bcopy((char *)saved_state, (char *)state, sizeof(struct hp700_float_state));
	}
	else 
		bzero((char*)state, sizeof(struct hp700_float_state));

	*count = HP700_FLOAT_STATE_COUNT;
	return KERN_SUCCESS;
    }
    default:
	return KERN_INVALID_ARGUMENT;
    }
}
Exemplo n.º 26
0
kern_return_t
thread_get_cthread_self(void)
{
    return ((kern_return_t)current_act()->mact.pcb->cthread_self);
}
Exemplo n.º 27
0
void
task_swapper(void)
{
	task_t	outtask, intask;
	int timeout;
	int loopcnt = 0;
	boolean_t start_swapping;
	boolean_t stop_swapping;
	int local_page_free_avg;
	extern int hz;

	thread_swappable(current_act(), FALSE);
	stack_privilege(current_thread());

	spllo();

	for (;;) {
	local_page_free_avg = vm_page_free_avg;
	while (TRUE) {
#if	0
		if (task_swap_debug)
			printf("task_swapper: top of loop; cnt = %d\n",loopcnt);
#endif
		intask = pick_intask();

		start_swapping = ((vm_pageout_rate_avg > swap_start_pageout_rate) ||
				  (vm_grab_rate_avg > max_grab_rate));
		stop_swapping = (vm_pageout_rate_avg < swap_stop_pageout_rate);

		/*
		 * If a lot of paging is going on, or another task should come
		 * in but memory is tight, find something to swap out and start
		 * it.  Don't swap any task out if task swapping is disabled.
		 * vm_page_queue_free_lock protects the vm globals.
		 */
		outtask = TASK_NULL;
		if (start_swapping ||
		    (!stop_swapping && intask &&
		     ((local_page_free_avg / AVE_SCALE) < vm_page_free_target))
		   ) {
			if (task_swap_enable &&
			    (outtask = pick_outtask()) &&
			    (task_swapout(outtask) == KERN_SUCCESS)) {
				unsigned long rss;
#if	TASK_SW_DEBUG
				if (task_swap_debug)
				    print_pid(outtask, local_page_free_avg / AVE_SCALE,
					      vm_page_free_target, "<",
					      "out");
#endif
				rss = outtask->swap_rss;
				if (outtask->swap_nswap == 1)
					rss /= 2; /* divide by 2 if never out */
				local_page_free_avg += (rss/short_avg_interval) * AVE_SCALE;
			}
			if (outtask != TASK_NULL)
				task_deallocate(outtask);
		}

		/*
		 * If there is an eligible task to bring in and there are at
		 * least vm_page_free_target free pages, swap it in.  If task
		 * swapping has been disabled, bring the task in anyway.
		 */
		if (intask && ((local_page_free_avg / AVE_SCALE) >=
							vm_page_free_target ||
				stop_swapping || !task_swap_enable)) {
			if (task_swapin(intask, FALSE) == KERN_SUCCESS) {
				unsigned long rss;
#if	TASK_SW_DEBUG
				if (task_swap_debug)
				    print_pid(intask, local_page_free_avg / AVE_SCALE,
					      vm_page_free_target, ">=",
					      "in");
#endif
				rss = intask->swap_rss;
				if (intask->swap_nswap == 1)
					rss /= 2; /* divide by 2 if never out */
				local_page_free_avg -= (rss/short_avg_interval) * AVE_SCALE;
			}
		}
		/*
		 * XXX
		 * Here we have to decide whether to continue swapping
		 * in and/or out before sleeping.  The decision should
		 * be made based on the previous action (swapin/out) and
		 * current system parameters, such as paging rates and
		 * demand.
		 * The function, compute_vm_averages, which does these
		 * calculations, depends on being called every second,
		 * so we can't just do the same thing.
		 */
		if (++loopcnt < MAX_LOOP)
			continue;

		/*
		 * Arrange to be awakened if paging is still heavy or there are
		 * any tasks partially or completely swapped out.  (Otherwise,
		 * the wakeup will come from the external trigger(s).)
		 */
		timeout = 0;
		if (start_swapping)
			timeout = task_swap_cycle_time;
		else {
			task_swapper_lock();
			if (!queue_empty(&swapped_tasks))
				timeout = min_swap_time;
			task_swapper_unlock();
		}
		assert_wait((event_t)&swapped_tasks, FALSE);
		if (timeout) {
			if (task_swap_debug)
				printf("task_swapper: set timeout of %d\n",
								timeout);
			thread_set_timeout(timeout*hz);
		}
		if (task_swap_debug)
			printf("task_swapper: blocking\n");
		thread_block((void (*)(void)) 0);
		if (timeout) {
			reset_timeout_check(&current_thread()->timer);
		}
		/* reset locals */
		loopcnt = 0;
		local_page_free_avg = vm_page_free_avg;
	}
	}
}
Exemplo n.º 28
0
void
unix_syscall(struct i386_saved_state *regs)
{
    thread_act_t		thread;
    void	*vt; 
    unsigned short	code;
    struct sysent		*callp;
	int	nargs, error;
	volatile int *rval;
	int funnel_type;
    vm_offset_t		params;
    extern int nsysent;
	struct proc *p;
	struct proc *current_proc();

    thread = current_act();
    p = current_proc();
    rval = (int *)get_bsduthreadrval(thread);

    //printf("[scall : eax %x]",  regs->eax);
    code = regs->eax;
    params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
    callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
    if (callp == sysent) {
	code = fuword(params);
	params += sizeof (int);
	callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
    }
    
    vt = get_bsduthreadarg(thread);

    if ((nargs = (callp->sy_narg * sizeof (int))) &&
	    (error = copyin((char *) params, (char *)vt , nargs)) != 0) {
	regs->eax = error;
	regs->efl |= EFL_CF;
	thread_exception_return();
	/* NOTREACHED */
    }
    
    rval[0] = 0;
    rval[1] = regs->edx;

	funnel_type = callp->sy_funnel;
	if(funnel_type == KERNEL_FUNNEL)
		(void) thread_funnel_set(kernel_flock, TRUE);
	else if (funnel_type == NETWORK_FUNNEL)
		(void) thread_funnel_set(network_flock, TRUE);
	
   set_bsduthreadargs(thread, regs, NULL);

    if (callp->sy_narg > 8)
	panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);

	ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);

	{ 
	  int *ip = (int *)vt;
	  KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
	      *ip, *(ip+1), *(ip+2), *(ip+3), 0);
	}

    error = (*(callp->sy_call))(p, (void *) vt, (int *) &rval[0]);
	
#if 0
	/* May be needed with vfork changes */
	regs = USER_REGS(thread);
#endif
	if (error == ERESTART) {
		regs->eip -= 7;
	}
	else if (error != EJUSTRETURN) {
		if (error) {
		    regs->eax = error;
		    regs->efl |= EFL_CF;	/* carry bit */
		} else { /* (not error) */
		    regs->eax = rval[0];
		    regs->edx = rval[1];
		    regs->efl &= ~EFL_CF;
		} 
	}

	ktrsysret(p, code, error, rval[0], funnel_type);

	KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
		error, rval[0], rval[1], 0, 0);

	if(funnel_type != NO_FUNNEL)
    		(void) thread_funnel_set(current_thread()->funnel_lock, FALSE);

    thread_exception_return();
    /* NOTREACHED */
}
Exemplo n.º 29
0
/*
 *	task_swap_swapout_thread: [exported]
 *
 *	Executes as a separate kernel thread.
 *	Its job is to swap out threads that have been halted by AST_SWAPOUT.
 */
void
task_swap_swapout_thread(void)
{
	thread_act_t thr_act;
	thread_t thread, nthread;
	task_t task;
	int s;

	thread_swappable(current_act(), FALSE);
	stack_privilege(current_thread());

	spllo();

	while (TRUE) {
		task_swapper_lock();
		while (! queue_empty(&swapout_thread_q)) {

			queue_remove_first(&swapout_thread_q, thr_act,
					   thread_act_t, swap_queue);
			/*
			 * If we're racing with task_swapin, we need
			 * to make it safe for it to do remque on the
			 * thread, so make its links point to itself.
			 * Allowing this ugliness is cheaper than 
			 * making task_swapin search the entire queue.
			 */
			act_lock(thr_act);
			queue_init((queue_t) &thr_act->swap_queue);
			act_unlock(thr_act);
			task_swapper_unlock();
			/*
			 * Wait for thread's RUN bit to be deasserted.
			 */
			thread = act_lock_thread(thr_act);
			if (thread == THREAD_NULL)
				act_unlock_thread(thr_act);
			else {
				boolean_t r;

				thread_reference(thread);
				thread_hold(thr_act);
				act_unlock_thread(thr_act);
				r = thread_stop_wait(thread);
				nthread = act_lock_thread(thr_act);
				thread_release(thr_act);
				thread_deallocate(thread);
				act_unlock_thread(thr_act);
				if (!r || nthread != thread) {
					task_swapper_lock();
					continue;
				}
			}
			task = thr_act->task;
			task_lock(task);
			/* 
			 * we can race with swapin, which would set the
			 * state to TASK_SW_IN. 
			 */
			if ((task->swap_state != TASK_SW_OUT) &&
			    (task->swap_state != TASK_SW_GOING_OUT)) {
				task_unlock(task);
				task_swapper_lock();
				TASK_STATS_INCR(task_sw_race_in_won);
				if (thread != THREAD_NULL)
					thread_unstop(thread);
				continue;
			}
			nthread = act_lock_thread(thr_act);
			if (nthread != thread || thr_act->active == FALSE) {
				act_unlock_thread(thr_act);
				task_unlock(task);
				task_swapper_lock();
				TASK_STATS_INCR(task_sw_act_inactive);
				if (thread != THREAD_NULL)
					thread_unstop(thread);
				continue;
			}
			s = splsched();
			if (thread != THREAD_NULL)
				thread_lock(thread);
			/* 
			 * Thread cannot have been swapped out yet because
			 * TH_SW_TASK_SWAPPING was set in AST.  If task_swapin
			 * beat us here, we either wouldn't have found it on
			 * the queue, or the task->swap_state would have
			 * changed.  The synchronization is on the
			 * task's swap_state and the task_lock.
			 * The thread can't be swapped in any other way
			 * because its task has been swapped.
			 */
			assert(thr_act->swap_state & TH_SW_TASK_SWAPPING);
			assert(thread == THREAD_NULL ||
			       !(thread->state & (TH_SWAPPED_OUT|TH_RUN)));
			assert((thr_act->swap_state & TH_SW_STATE) == TH_SW_IN);
			/* assert(thread->state & TH_HALTED); */
			/* this also clears TH_SW_TASK_SWAPPING flag */
			thr_act->swap_state = TH_SW_GOING_OUT;
			if (thread != THREAD_NULL) {
				if (thread->top_act == thr_act) {
					thread->state |= TH_SWAPPED_OUT;
					/*
					 * Once we unlock the task, things can happen
					 * to the thread, so make sure it's consistent
					 * for thread_swapout.
					 */
				}
				thread->ref_count++;
				thread_unlock(thread);
				thread_unstop(thread);
			}
			splx(s);
			act_locked_act_reference(thr_act);
			act_unlock_thread(thr_act);
			task_unlock(task);

			thread_swapout(thr_act);	/* do the work */

			if (thread != THREAD_NULL)
				thread_deallocate(thread);
			act_deallocate(thr_act);
			task_swapper_lock();
		}
		assert_wait((event_t)&swapout_thread_q, FALSE);
		task_swapper_unlock();
		thread_block((void (*)(void)) 0);
	}
}
Exemplo n.º 30
0
/*
 *	Process an AST_SWAPOUT.
 */
void
swapout_ast()
{
	spl_t		s;
	thread_act_t	act;
	thread_t	thread;

	act = current_act();

	/*
	 * Task is being swapped out.  First mark it as suspended
	 * and halted, then call thread_swapout_enqueue to put
	 * the thread on the queue for task_swap_swapout_threads
	 * to swap out the thread.
	 */
	/*
	 * Don't swap unswappable threads
	 */
	thread = act_lock_thread(act);
	s = splsched();
	if (thread)
		thread_lock(thread);
	if ((act->ast & AST_SWAPOUT) == 0) {
		/*
		 * Race with task_swapin. Abort swapout.
		 */
		task_swap_ast_aborted++;	/* not locked XXX */
		if (thread)
			thread_unlock(thread);
		splx(s);
		act_unlock_thread(act);
	} else if (act->swap_state == TH_SW_IN) {
		/*
		 * Mark swap_state as TH_SW_TASK_SWAPPING to avoid
		 * race with thread swapper, which will only
		 * swap thread if swap_state is TH_SW_IN.
		 * This way, the thread can only be swapped by
		 * the task swapping mechanism.
		 */
		act->swap_state |= TH_SW_TASK_SWAPPING;
		/* assert(act->suspend_count == 0); XXX ? */
		if (thread)
			thread_unlock(thread);
		if (act->suspend_count++ == 0)	/* inline thread_hold */
			install_special_handler(act);
		/* self->state |= TH_HALTED; */
		thread_ast_clear(act, AST_SWAPOUT);
		/*
		 * Initialize the swap_queue fields to allow an extra
		 * queue_remove() in task_swapin if we lose the race
		 * (task_swapin can be called before we complete
		 * thread_swapout_enqueue).
		 */
		queue_init((queue_t) &act->swap_queue);
		splx(s);
		act_unlock_thread(act);
		/* this must be called at normal interrupt level */
		thread_swapout_enqueue(act);
	} else {
		/* thread isn't swappable; continue running */
		assert(act->swap_state == TH_SW_UNSWAPPABLE);
		if (thread)
			thread_unlock(thread);
		thread_ast_clear(act, AST_SWAPOUT);
		splx(s);
		act_unlock_thread(act);
	}
}