コード例 #1
0
ファイル: machine.c プロジェクト: rohsaini/mkunity
/*
 *	cpu_up:
 *
 *	Flag specified cpu as up and running.  Called when a processor comes
 *	online.
 */
void
cpu_up(
	int	cpu)
{
	register struct machine_slot	*ms;
	register processor_t	processor;
	spl_t	s;

	processor = cpu_to_processor(cpu);
	/*
	 * Can't risk blocking with no current thread established.
	 * Just twiddle our thumbs; we've got nothing better to do
	 * yet, anyway.
	 */
	while (!pset_lock_try(&default_pset))
		;
	s = splsched();
	processor_lock(processor);
#if	NCPUS > 1
	init_ast_check(processor);
#endif	/* NCPUS > 1 */
	ms = &machine_slot[cpu];
	ms->running = TRUE;
	machine_info.avail_cpus++;
	pset_add_processor(&default_pset, processor);
	processor->state = PROCESSOR_RUNNING;
	processor_unlock(processor);
	splx(s);
	pset_unlock(&default_pset);
}
コード例 #2
0
kern_return_t
host_processors(
    host_t			host,
    processor_array_t	*processor_list,
    mach_msg_type_number_t	*countp)
{
    register int		i;
    register processor_t	*tp;
    vm_offset_t		addr;
    unsigned int		count;
    boolean_t rt = FALSE; /* ### This boolean is FALSE, because there
			       * currently exists no mechanism to determine
			       * whether or not the reply port is an RT port
			       */

    if (host == HOST_NULL)
        return(KERN_INVALID_ARGUMENT);

    /*
     *	Determine how many processors we have.
     *	(This number shouldn't change.)
     */

    count = 0;
    for (i = 0; i < NCPUS; i++)
        if (machine_slot[i].is_cpu)
            count++;

    if (count == 0)
        panic("host_processors");

    addr = KALLOC((vm_size_t) (count * sizeof(mach_port_t)), rt);
    if (addr == 0)
        return KERN_RESOURCE_SHORTAGE;

    tp = (processor_t *) addr;
    for (i = 0; i < NCPUS; i++)
        if (machine_slot[i].is_cpu)
            *tp++ = cpu_to_processor(i);

    *countp = count;
    *processor_list = (mach_port_t *) addr;

    /* do the conversion that Mig should handle */

    tp = (processor_t *) addr;
    for (i = 0; i < count; i++)
        ((mach_port_t *) tp)[i] =
            (mach_port_t)convert_processor_to_port(tp[i]);

    return KERN_SUCCESS;
}
コード例 #3
0
ファイル: host.c プロジェクト: openmach/openmach
kern_return_t host_processors(
	host_t			host,
	processor_array_t	*processor_list,
	natural_t		*countp)
{
	register int		i;
	register processor_t	*tp;
	vm_offset_t		addr;
	unsigned int		count;

	if (host == HOST_NULL)
		return KERN_INVALID_ARGUMENT;

	/*
	 *	Determine how many processors we have.
	 *	(This number shouldn't change.)
	 */

	count = 0;
	for (i = 0; i < NCPUS; i++)
		if (machine_slot[i].is_cpu)
			count++;

	if (count == 0)
		panic("host_processors");

	addr = kalloc((vm_size_t) (count * sizeof(mach_port_t)));
	if (addr == 0)
		return KERN_RESOURCE_SHORTAGE;

	tp = (processor_t *) addr;
	for (i = 0; i < NCPUS; i++)
		if (machine_slot[i].is_cpu)
			*tp++ = cpu_to_processor(i);

	*countp = count;
	*processor_list = (mach_port_t *) addr;

	/* do the conversion that Mig should handle */

	tp = (processor_t *) addr;
	for (i = 0; i < count; i++)
		((mach_port_t *) tp)[i] =
		      (mach_port_t)convert_processor_to_port(tp[i]);

	return KERN_SUCCESS;
}
コード例 #4
0
ファイル: startup.c プロジェクト: rohsaini/mkunity
/*
 *	Start up the first thread on a CPU.
 *	First thread is specified for the master CPU.
 */
void
cpu_launch_first_thread(
	register thread_t	th)
{
	register int	mycpu;

	mycpu = cpu_number();

#if	MACH_ASSERT
	if (watchacts & WA_BOOT)
		printf("cpu_launch_first_thread(%x) cpu=%d\n", th, mycpu);
#endif	/* MACH_ASSERT */

	cpu_up(mycpu);

	start_timer(&kernel_timer[mycpu]);

	/*
	 * Block all interrupts for choose_thread.
	 */
	(void) splhigh();

	if (th == THREAD_NULL) {
	    th = cpu_to_processor(mycpu)->idle_thread;
		if (th == THREAD_NULL || !rem_runq(th))
		    panic("cpu_launch_first_thread");
	}

	rtclock_reset();		/* start realtime clock ticking */
	PMAP_ACTIVATE_KERNEL(mycpu);

	thread_machine_set_current(th);
	thread_lock(th);
	th->state &= ~TH_UNINT;
	thread_unlock(th);
	timer_switch(&th->system_timer);

	PMAP_ACTIVATE_USER(th->top_act, mycpu);

	assert(mycpu == cpu_number());

        /* The following is necessary to keep things balanced */
        disable_preemption();

	load_context(th);
	/*NOTREACHED*/
}
コード例 #5
0
void
processor_bootstrap(void)
{
	pset_init(&pset0, &pset_node0);
	pset_node0.psets = &pset0;

	simple_lock_init(&pset_node_lock, 0);

	queue_init(&tasks);
	queue_init(&threads);

	simple_lock_init(&processor_list_lock, 0);

	master_processor = cpu_to_processor(master_cpu);

	processor_init(master_processor, master_cpu, &pset0);
}
コード例 #6
0
__private_extern__
kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable)
{
    chudxnu_unbind_thread(current_thread(), 0);
	
    if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
        return KERN_FAILURE;
	
	if((cpu_data_ptr[cpu] != NULL) && cpu != master_cpu) {
		processor_t processor = cpu_to_processor(cpu);
		
		if(processor == master_processor) // don't mess with the boot processor
			return KERN_FAILURE;

        if(enable) {
            return processor_start(processor);
        } else {
            return processor_exit(processor);
        }
    }
    return KERN_FAILURE;
}
コード例 #7
0
ファイル: machine.c プロジェクト: rohsaini/mkunity
void
cpu_down(
	int	cpu)
{
	register struct machine_slot	*ms;
	register processor_t	processor;
	spl_t	s;

	processor = cpu_to_processor(cpu);
	s = splsched();
	processor_lock(processor);
	ms = &machine_slot[cpu];
	ms->running = FALSE;
	machine_info.avail_cpus--;
	/*
	 *	processor has already been removed from pset.
	 */
	processor->processor_set_next = PROCESSOR_SET_NULL;
	processor->state = PROCESSOR_OFF_LINE;
	processor_unlock(processor);
	splx(s);
}
コード例 #8
0
ファイル: chud_thread.c プロジェクト: JackieXie168/xnu
/*
 * This method will bind a given thread to the requested CPU starting at the
 * next time quantum.  If the thread is the current thread, this method will
 * force a thread_block().  The result is that if you call this method on the
 * current thread, you will be on the requested CPU when this method returns.
 */
__private_extern__ kern_return_t
chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
{
    processor_t proc = NULL;

	if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
		return KERN_FAILURE;

	// temporary restriction until after phase 2 of the scheduler
	if(thread != current_thread())
		return KERN_FAILURE; 
	
	proc = cpu_to_processor(cpu);

	/* 
	 * Potentially racey, but mainly to prevent bind to shutdown
	 * processor.
	 */
	if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
			!(proc->state == PROCESSOR_SHUTDOWN)) {
		
		thread_bind(proc);

		/*
		 * If we're trying to bind the current thread, and
		 * we're not on the target cpu, and not at interrupt
		 * context, block the current thread to force a
		 * reschedule on the target CPU.
		 */
		if(thread == current_thread() && 
			!ml_at_interrupt_context() && cpu_number() != cpu) {
			(void)thread_block(THREAD_CONTINUE_NULL);
		}
		return KERN_SUCCESS;
	}
    return KERN_FAILURE;
}
コード例 #9
0
ファイル: ast.c プロジェクト: rohsaini/mkunity
void
ast_check(void)
{
	register int		mycpu;
	register processor_t	myprocessor;
	register thread_t	thread = current_thread();
	spl_t			s = splsched();

	mp_disable_preemption();
	mycpu = cpu_number();

	/*
	 *	Check processor state for ast conditions.
	 */
	myprocessor = cpu_to_processor(mycpu);
	switch(myprocessor->state) {
	    case PROCESSOR_OFF_LINE:
	    case PROCESSOR_IDLE:
	    case PROCESSOR_DISPATCHING:
		/*
		 *	No ast.
		 */
	    	break;

#if	NCPUS > 1
	    case PROCESSOR_ASSIGN:
	    case PROCESSOR_SHUTDOWN:
	        /*
		 * 	Need ast to force action thread onto processor.
		 *
		 * XXX  Should check if action thread is already there.
		 */
		ast_on(mycpu, AST_BLOCK);
		break;
#endif	/* NCPUS > 1 */

	    case PROCESSOR_RUNNING:
	    case PROCESSOR_VIDLE:

		/*
		 *	Propagate thread ast to processor.  If we already
		 *	need an ast, don't look for more reasons.
		 */
		ast_propagate(current_act(), mycpu);
		if (ast_needed(mycpu))
			break;

		/*
		 *	Context switch check.
		 */
		if (csw_needed(thread, myprocessor)) {
			ast_on(mycpu, (myprocessor->first_quantum ?
			       AST_BLOCK : AST_QUANTUM));
		}
		break;

	    default:
	        panic("ast_check: Bad processor state");
	}
	mp_enable_preemption();
	splx(s);
}
コード例 #10
0
ファイル: startup.c プロジェクト: rohsaini/mkunity
/*
 * Now running in a thread.  Create the rest of the kernel threads
 * and the bootstrap task.
 */
void
start_kernel_threads(void)
{
	register int	i;

	/*
	 *	Create the idle threads and the other
	 *	service threads.
	 */
	for (i = 0; i < NCPUS; i++) {
	    if (machine_slot[i].is_cpu) {
		thread_t	th;
		spl_t	s;
		processor_t processor = cpu_to_processor(i);

		(void) thread_create_at(kernel_task, &th, idle_thread);
		s=splsched();
		thread_lock(th);
		thread_bind_locked(th, processor);
		processor->idle_thread = th;
		/*(void) thread_resume(th->top_act);*/
		th->state |= TH_RUN;
		thread_setrun( th, TRUE, TAIL_Q);
		thread_unlock( th );
		splx(s);
	    }
	}

	(void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
#if	THREAD_SWAPPER
	(void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
	(void) kernel_thread(kernel_task, swapout_thread, (char *) 0);
#endif	/* THREAD_SWAPPER */
#if	TASK_SWAPPER
	if (task_swap_on) {
		(void) kernel_thread(kernel_task, task_swapper, (char *) 0);
		(void) kernel_thread(kernel_task, task_swap_swapout_thread,
				     (char *) 0);
	}
#endif	/* TASK_SWAPPER */
	(void) kernel_thread(kernel_task, sched_thread, (char *) 0);
	(void) kernel_thread(kernel_task, timeout_thread, (char *) 0);
#if	NORMA_VM
	(void) kernel_thread(kernel_task, vm_object_thread, (char *) 0);
#endif	/* NORMA_VM */

	/*
	 *	Create the clock service.
	 */
	clock_service_create();

	/*
	 *	Create the device service.
	 */
	device_service_create();

	/*
	 *	Initialize distributed services, starting
	 *	with distributed IPC and progressing to any
	 *	services layered on top of that.
	 *
	 *	This stub exists even in non-NORMA systems.
	 */
	norma_bootstrap();

	/*
	 *	Initialize any testing services blocking the main kernel
	 *	thread so that the in-kernel tests run without interference
	 *	from other boot time activities. We will resume this thread
	 *	in kernel_test_thread(). 
	 */

#if	KERNEL_TEST

	/*
	 *	Initialize the lock that will be used to guard
	 *	variables that will be used in the test synchronization
	 *	scheme.
	 */ 
		simple_lock_init(&kernel_test_lock, ETAP_MISC_KERNEL_TEST);

#if	PARAGON860
	{
		char		*s;
		unsigned int	firstnode;

		/*
		 *	Only start up loopback tests on boot node.
		 */
		if ((s = (char *) getbootenv("BOOT_FIRST_NODE")) == 0)
			panic("startup");
		firstnode = atoi(s);
		(void) kernel_thread(kernel_task, kernel_test_thread,
				     (char * )(dipc_node_self() == 
						       (node_name) firstnode));
	}
#else	/* PARAGON860 */
	(void) kernel_thread(kernel_task, kernel_test_thread, (char *) 0);
#endif	/* PARAGON860 */
	{

		/*
		 *	The synchronization scheme uses a simple lock, two
		 *	booleans and the wakeup event. The wakeup event will
		 *	be posted by kernel_test_thread().
		 */
		spl_t s;
		s = splsched();
		simple_lock(&kernel_test_lock);
		while(!kernel_test_thread_sync_done){
			assert_wait((event_t) &start_kernel_threads, FALSE);
			start_kernel_threads_blocked = TRUE;
			simple_unlock(&kernel_test_lock);
			splx(s);
			thread_block((void (*)(void)) 0);
			s = splsched();
			simple_lock(&kernel_test_lock);
			start_kernel_threads_blocked = FALSE;
		}
		kernel_test_thread_sync_done = FALSE; /* Reset for next use */
		simple_unlock(&kernel_test_lock);
		splx(s);
	}


#endif	/* KERNEL_TEST */
	/*
	 *	Start the user bootstrap.
	 */
	bootstrap_create();

#if	XPR_DEBUG
	xprinit();		/* XXX */
#endif	/* XPR_DEBUG */

#if	NCPUS > 1
	/*
	 *	Create the shutdown thread.
	 */
	(void) kernel_thread(kernel_task, action_thread, (char *) 0);

	/*
	 *	Allow other CPUs to run.
	 *
	 * (this must be last, to allow bootstrap_create to fiddle with
	 *  its child thread before some cpu tries to run it)
	 */
	start_other_cpus();
#endif	/* NCPUS > 1 */

	/*
	 *	Become the pageout daemon.
	 */
	(void) spllo();
	vm_pageout();
	/*NOTREACHED*/
}
コード例 #11
0
ファイル: startup.c プロジェクト: rohsaini/mkunity
/*
 *	Running in virtual memory, on the interrupt stack.
 *	Does not return.  Dispatches initial thread.
 *
 *	Assumes that master_cpu is set.
 */
void
setup_main(void)
{
	thread_t		startup_thread;

	printf_init();
	panic_init();

	sched_init();
	vm_mem_bootstrap();
	ipc_bootstrap();
	vm_mem_init();
	ipc_init();

	/*
	 * As soon as the virtual memory system is up, we record
	 * that this CPU is using the kernel pmap.
	 */
	PMAP_ACTIVATE_KERNEL(master_cpu);

	init_timers();
	timeout_init();

#if	CDLI > 0
	ns_init();	/* Initialize CDLI */
#endif	/* CDLI > 0 */

	dev_lookup_init();
	timeout_init();
	machine_init();

	machine_info.max_cpus = NCPUS;
	machine_info.memory_size = mem_size;
	machine_info.avail_cpus = 0;
	machine_info.major_version = KERNEL_MAJOR_VERSION;
	machine_info.minor_version = KERNEL_MINOR_VERSION;

#if	XPR_DEBUG
	xprbootstrap();
#endif	/* XPR_DEBUG */

	/*
	 *	Initialize the IPC, task, and thread subsystems.
	 */
	clock_init();
	utime_init();
        ledger_init();
#if	THREAD_SWAPPER
	thread_swapper_init();
#endif	/* THREAD_SWAPPER */
#if	TASK_SWAPPER
	task_swapper_init();
#endif	/* TASK_SWAPPER */
	task_init();
	act_init();
	thread_init();
	subsystem_init();
#if	TASK_SWAPPER
	task_swappable(&realhost, kernel_task, FALSE);
#endif	/* TASK_SWAPPER */
#if	MACH_HOST
	pset_sys_init();
#endif	/* MACH_HOST */

	/*
	 *	Kick off the time-out driven routines by calling
	 *	them the first time.
	 */
	recompute_priorities();
	compute_mach_factor();

	/*
	 *	Initialize the Event Trace Analysis Package.
	 * 	Dynamic Phase: 2 of 2
	 */
	etap_init_phase2();
	
	/*
	 *	Create a kernel thread to start the other kernel
	 *	threads.  Thread_resume (from kernel_thread) calls
	 *	thread_setrun, which may look at current thread;
	 *	we must avoid this, since there is no current thread.
	 */

	/*
	 * Create the thread, and point it at the routine.
	 */
	(void) thread_create_at(kernel_task, &startup_thread,
							start_kernel_threads);
#if	NCPUS > 1 && PARAGON860
	thread_bind(startup_thread, cpu_to_processor(master_cpu));
#endif
	/*
	 * Pretend it is already running, and resume it.
	 * Since it looks as if it is running, thread_resume
	 * will not try to put it on the run queues.
	 *
	 * We can do all of this without locking, because nothing
	 * else is running yet.
	 */
	startup_thread->state |= TH_RUN;
	(void) thread_resume(startup_thread->top_act);

	/*
	 * Start the thread.
	 */
	cpu_launch_first_thread(startup_thread);
	/*NOTREACHED*/
	panic("cpu_launch_first_thread returns!");
}
コード例 #12
0
ファイル: startup.c プロジェクト: openmach/openmach
/*
 * Now running in a thread.  Create the rest of the kernel threads
 * and the bootstrap task.
 */
void start_kernel_threads()
{
	register int	i;

	/*
	 *	Create the idle threads and the other
	 *	service threads.
	 */
	for (i = 0; i < NCPUS; i++) {
	    if (machine_slot[i].is_cpu) {
		thread_t	th;

		(void) thread_create(kernel_task, &th);
		thread_bind(th, cpu_to_processor(i));
		thread_start(th, idle_thread);
		thread_doswapin(th);
		(void) thread_resume(th);
	    }
	}

	(void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
	(void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
	(void) kernel_thread(kernel_task, sched_thread, (char *) 0);

#if	NCPUS > 1
	/*
	 *	Create the shutdown thread.
	 */
	(void) kernel_thread(kernel_task, action_thread, (char *) 0);

	/*
	 *	Allow other CPUs to run.
	 */
	start_other_cpus();
#endif	NCPUS > 1

	/*
	 *	Create the device service.
	 */
	device_service_create();

	/*
	 *	Initialize NORMA ipc system.
	 */
#if	NORMA_IPC
	norma_ipc_init();
#endif	NORMA_IPC

	/*
	 *	Initialize NORMA vm system.
	 */
#if	NORMA_VM
	norma_vm_init();
#endif	NORMA_VM

	/*
	 *	Start the user bootstrap.
	 */
	bootstrap_create();

#if	XPR_DEBUG
	xprinit();		/* XXX */
#endif	XPR_DEBUG

	/*
	 *	Become the pageout daemon.
	 */
	(void) spl0();
	vm_pageout();
	/*NOTREACHED*/
}
コード例 #13
0
ファイル: startup.c プロジェクト: ctos/bpi
/*
 * Now running in a thread.  Create the rest of the kernel threads
 * and the bootstrap task.
 */
void start_kernel_threads()
{
	register int	i;

	/*
	 *	Create the idle threads and the other
	 *	service threads.
	 */
	for (i = 0; i < NCPUS; i++) {
	    if (machine_slot[i].is_cpu) {
		thread_t	th;

		(void) thread_create(kernel_task, &th);
		thread_bind(th, cpu_to_processor(i));
		thread_start(th, idle_thread);
		thread_doswapin(th);
		(void) thread_resume(th);
	    }
	}

	(void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
	(void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
	(void) kernel_thread(kernel_task, sched_thread, (char *) 0);

#if	NCPUS > 1
	/*
	 *	Create the shutdown thread.
	 */
	(void) kernel_thread(kernel_task, action_thread, (char *) 0);

	/*
	 *	Allow other CPUs to run.
	 */
	start_other_cpus();
#endif	/* NCPUS > 1 */

	/*
	 *	Create the device service.
	 */
	device_service_create();

	/*
	 * 	Initialize kernel task's creation time.
	 * When we created the kernel task in task_init, the mapped
	 * time was not yet available.  Now, last thing before starting
	 * the user bootstrap, record the current time as the kernel
	 * task's creation time.
	 */
	record_time_stamp (&kernel_task->creation_time);

	/*
	 *	Start the user bootstrap.
	 */
	bootstrap_create();

#if	XPR_DEBUG
	xprinit();		/* XXX */
#endif	/* XPR_DEBUG */

	/*
	 *	Become the pageout daemon.
	 */
	(void) spl0();
	vm_pageout();
	/*NOTREACHED*/
}
コード例 #14
0
ファイル: ast.c プロジェクト: quan-na/gnumach
void
ast_check(void)
{
	int			mycpu = cpu_number();
	processor_t		myprocessor;
	thread_t		thread = current_thread();
	run_queue_t		rq;
	spl_t			s = splsched();

	/*
	 *	Check processor state for ast conditions.
	 */
	myprocessor = cpu_to_processor(mycpu);
	switch(myprocessor->state) {
	    case PROCESSOR_OFF_LINE:
	    case PROCESSOR_IDLE:
	    case PROCESSOR_DISPATCHING:
		/*
		 *	No ast.
		 */
	    	break;

#if	NCPUS > 1
	    case PROCESSOR_ASSIGN:
	    case PROCESSOR_SHUTDOWN:
	        /*
		 * 	Need ast to force action thread onto processor.
		 *
		 * XXX  Should check if action thread is already there.
		 */
		ast_on(mycpu, AST_BLOCK);
		break;
#endif	/* NCPUS > 1 */

	    case PROCESSOR_RUNNING:

		/*
		 *	Propagate thread ast to processor.  If we already
		 *	need an ast, don't look for more reasons.
		 */
		ast_propagate(thread, mycpu);
		if (ast_needed(mycpu))
			break;

		/*
		 *	Context switch check.  The csw_needed macro isn't
		 *	used here because the rq->low hint may be wrong,
		 *	and fixing it here avoids an extra ast.
		 *	First check the easy cases.
		 */
		if (thread->state & TH_SUSP || myprocessor->runq.count > 0) {
			ast_on(mycpu, AST_BLOCK);
			break;
		}

		/*
		 *	Update lazy evaluated runq->low if only timesharing.
		 */
#if	MACH_FIXPRI
		if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) {
		    if (csw_needed(thread,myprocessor)) {
			ast_on(mycpu, AST_BLOCK);
			break;
		    }
		    else {
			/*
			 *	For fixed priority threads, set first_quantum
			 *	so entire new quantum is used.
			 */
			if (thread->policy == POLICY_FIXEDPRI)
			    myprocessor->first_quantum = TRUE;
		    }
		}
		else {
#endif	/* MACH_FIXPRI			 */
		rq = &(myprocessor->processor_set->runq);
		if (!(myprocessor->first_quantum) && (rq->count > 0)) {
		    queue_t 		q;
		    /*
		     *	This is not the first quantum, and there may
		     *	be something in the processor_set runq.
		     *	Check whether low hint is accurate.
		     */
		    q = rq->runq + *(volatile int *)&rq->low;
		    if (queue_empty(q)) {
			int i;

			/*
			 *	Need to recheck and possibly update hint.
			 */
			simple_lock(&rq->lock);
			q = rq->runq + rq->low;
			if (rq->count > 0) {
			    for (i = rq->low; i < NRQS; i++) {
				if(!(queue_empty(q)))
				    break;
				q++;
			    }
			    rq->low = i;
			}
			simple_unlock(&rq->lock);
		    }

		    if (rq->low <= thread->sched_pri) {
			ast_on(mycpu, AST_BLOCK);
			break;
		    }
		}
#if	MACH_FIXPRI
		}
#endif	/* MACH_FIXPRI */
		break;

	    default:
	        panic("ast_check: Bad processor state (cpu %d processor %08x) state: %d",
			mycpu, myprocessor, myprocessor->state);
	}

	(void) splx(s);
}