Пример #1
0
/*ARGSUSED*/
static int
xen_uppc_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
{
	int ret = PSM_SUCCESS;
	cpuset_t cpus;

	if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
		atomic_add_16(&xen_uppc_irq_shared_table[irqno], 1);

	/*
	 * We are called at splhi() so we can't call anything that might end
	 * up trying to context switch.
	 */
	if (irqno >= PIRQ_BASE && irqno < NR_PIRQS &&
	    DOMAIN_IS_INITDOMAIN(xen_info)) {
		CPUSET_ZERO(cpus);
		CPUSET_ADD(cpus, 0);
		ec_setup_pirq(irqno, ipl, &cpus);
	} else {
		/*
		 * Set priority/affinity/enable for non PIRQs
		 */
		ret = ec_set_irq_priority(irqno, ipl);
		ASSERT(ret == 0);
		CPUSET_ZERO(cpus);
		CPUSET_ADD(cpus, 0);
		ec_set_irq_affinity(irqno, cpus);
		ec_enable_irq(irqno);
	}

	return (ret);
}
Пример #2
0
void
cpu_hatch(void)
{
	char *v = (char*)CPUINFO_VA;
	int i;

	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
		flush(v + i);

	cpu_pmap_init(curcpu());
	CPUSET_ADD(cpus_active, cpu_number());
	cpu_reset_fpustate();
	curlwp = curcpu()->ci_data.cpu_idlelwp;
	membar_Sync();

	/* wait for the boot CPU to flip the switch */
	while (sync_tick == 0) {
		/* we do nothing here */
	}
	settick(0);
	if (curcpu()->ci_system_clockrate[0] != 0) {
		setstick(0);
		stickintr_establish(PIL_CLOCK, stickintr);
	} else {
		tickintr_establish(PIL_CLOCK, tickintr);
	}
	spl0();
}
Пример #3
0
static int
poweron_vcpu(struct cpu *cp)
{
	int error;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (HYPERVISOR_vcpu_op(VCPUOP_is_up, cp->cpu_id, NULL) != 0) {
		printf("poweron_vcpu: vcpu%d is not available!\n",
		    cp->cpu_id);
		return (ENXIO);
	}

	if ((error = xen_vcpu_up(cp->cpu_id)) == 0) {
		CPUSET_ADD(cpu_ready_set, cp->cpu_id);
		cp->cpu_flags |= CPU_EXISTS | CPU_READY | CPU_RUNNING;
		cp->cpu_flags &= ~CPU_POWEROFF;
		/*
		 * There are some nasty races possible here.
		 * Tell the vcpu it's up one more time.
		 * XXPV	Is this enough?  Is this safe?
		 */
		(void) xen_vcpu_up(cp->cpu_id);

		cpu_phase[cp->cpu_id] = CPU_PHASE_NONE;

		cpu_set_state(cp);
	}
	return (error);
}
Пример #4
0
/*
 * Internal cpu startup sequencer
 * The sequence is as follows:
 *
 * MASTER	SLAVE
 * -------	----------
 * assume the kernel data is initialized
 * clear the proxy bit
 * start the slave cpu
 * wait for the slave cpu to set the proxy
 *
 *		the slave runs slave_startup and then sets the proxy
 *		the slave waits for the master to add slave to the ready set
 *
 * the master finishes the initialization and
 * adds the slave to the ready set
 *
 *		the slave exits the startup thread and is running
 */
void
start_cpu(int cpuid, void(*flag_func)(int))
{
	extern void cpu_startup(int);
	int timout;

	ASSERT(MUTEX_HELD(&cpu_lock));

	/*
	 * Before we begin the dance, tell DTrace that we're about to start
	 * a CPU.
	 */
	if (dtrace_cpustart_init != NULL)
		(*dtrace_cpustart_init)();

	/* start the slave cpu */
	CPUSET_DEL(proxy_ready_set, cpuid);
	if (prom_test("SUNW,start-cpu-by-cpuid") == 0) {
		(void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup,
		    cpuid);
	} else {
		/* "by-cpuid" interface didn't exist.  Do it the old way */
		pnode_t nodeid = cpunodes[cpuid].nodeid;

		ASSERT(nodeid != (pnode_t)0);
		(void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid);
	}

	/* wait for the slave cpu to check in. */
	for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) {
		if (CPU_IN_SET(proxy_ready_set, cpuid))
			break;
		DELAY(1000);
	}
	if (timout == 0) {
		panic("cpu%d failed to start (2)", cpuid);
	}

	/*
	 * The slave has started; we can tell DTrace that it's safe again.
	 */
	if (dtrace_cpustart_fini != NULL)
		(*dtrace_cpustart_fini)();

	/* run the master side of stick synchronization for the slave cpu */
	sticksync_master();

	/*
	 * deal with the cpu flags in a phase-specific manner
	 * for various reasons, this needs to run after the slave
	 * is checked in but before the slave is released.
	 */
	(*flag_func)(cpuid);

	/* release the slave */
	CPUSET_ADD(cpu_ready_set, cpuid);
}
Пример #5
0
/*
 * common slave cpu initialization code
 */
void
common_startup_init(cpu_t *cp, int cpuid)
{
	kthread_id_t tp;
	sfmmu_t *sfmmup;
	caddr_t	sp;

	/*
	 * Allocate and initialize the startup thread for this CPU.
	 */
	tp = thread_create(NULL, 0, slave_startup, NULL, 0, &p0,
	    TS_STOPPED, maxclsyspri);

	/*
	 * Set state to TS_ONPROC since this thread will start running
	 * as soon as the CPU comes online.
	 *
	 * All the other fields of the thread structure are setup by
	 * thread_create().
	 */
	THREAD_ONPROC(tp, cp);
	tp->t_preempt = 1;
	tp->t_bound_cpu = cp;
	tp->t_affinitycnt = 1;
	tp->t_cpu = cp;
	tp->t_disp_queue = cp->cpu_disp;

	sfmmup = astosfmmu(&kas);
	CPUSET_ADD(sfmmup->sfmmu_cpusran, cpuid);

	/*
	 * Setup thread to start in slave_startup.
	 */
	sp = tp->t_stk;
	tp->t_pc = (uintptr_t)slave_startup - 8;
	tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS;

	cp->cpu_id = cpuid;
	cp->cpu_self = cp;
	cp->cpu_thread = tp;
	cp->cpu_lwp = NULL;
	cp->cpu_dispthread = tp;
	cp->cpu_dispatch_pri = DISP_PRIO(tp);
	cp->cpu_startup_thread = tp;

	/*
	 * The dispatcher may discover the CPU before it is in cpu_ready_set
	 * and attempt to poke it. Before the CPU is in cpu_ready_set, any
	 * cross calls to it will be dropped. We initialize
	 * poke_cpu_outstanding to true so that poke_cpu will ignore any poke
	 * requests for this CPU. Pokes that come in before the CPU is in
	 * cpu_ready_set can be ignored because the CPU is about to come
	 * online.
	 */
	cp->cpu_m.poke_cpu_outstanding = B_TRUE;
}
Пример #6
0
/*
 * Process cpu stop-self event.
 * XXX could maybe add/use locoresw halt function?
 */
static void
ipi_halt(void)
{
	const u_int my_cpu = cpu_number();
	printf("cpu%u: shutting down\n", my_cpu);
	CPUSET_ADD(cpus_halted, my_cpu);
	splhigh();
	for (;;)
		;
	/* NOTREACHED */
}
Пример #7
0
/*
 * Process cpu stop-self event.
 */
void
sparc64_ipi_halt_thiscpu(void *arg, void *arg2)
{
	extern void prom_printf(const char *fmt, ...);

	printf("cpu%d: shutting down\n", cpu_number());
	if (prom_has_stop_other() || !prom_has_stopself()) {
		/*
		 * just loop here, the final cpu will stop us later
		 */
		CPUSET_ADD(cpus_spinning, cpu_number());
		CPUSET_ADD(cpus_halted, cpu_number());
		spl0();
		while (1)
			/* nothing */;
	} else {
		CPUSET_ADD(cpus_halted, cpu_number());
		prom_stopself();
	}
}
Пример #8
0
/*
 * Ensure counters are enabled on the given processor.
 */
void
kcpc_remote_program(cpu_t *cp)
{
	cpuset_t set;

	CPUSET_ZERO(set);

	CPUSET_ADD(set, cp->cpu_id);

	xc_sync(0, 0, 0, CPUSET2BV(set), (xc_func_t)kcpc_remoteprogram_func);
}
Пример #9
0
/*
 * Halt this cpu
 */
void
cpu_halt(void)
{
	int index = cpu_index(curcpu());

	printf("cpu%d: shutting down\n", index);
	CPUSET_ADD(cpus_halted, index);
	spl0();		/* allow interrupts e.g. further ipi ? */
	for (;;) ;	/* spin */

	/* NOTREACHED */
}
Пример #10
0
/*
 * Jump to the fast reboot switcher.  This function never returns.
 */
void
fast_reboot()
{
	processorid_t bootcpuid = 0;
	extern uintptr_t postbootkernelbase;
	extern char	fb_swtch_image[];
	fastboot_file_t	*fb;
	int i;

	postbootkernelbase = 0;

	fb = &newkernel.fi_files[FASTBOOT_SWTCH];

	/*
	 * Map the address into both the current proc's address
	 * space and the kernel's address space in case the panic
	 * is forced by kmdb.
	 */
	if (&kas != curproc->p_as) {
		hat_devload(curproc->p_as->a_hat, (caddr_t)fb->fb_va,
		    MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
		    PROT_READ | PROT_WRITE | PROT_EXEC,
		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
	}

	bcopy((void *)fb_swtch_image, (void *)fb->fb_va, fb->fb_size);


	/*
	 * Set fb_va to fake_va
	 */
	for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) {
		newkernel.fi_files[i].fb_va = fake_va;

	}

	if (panicstr && CPU->cpu_id != bootcpuid &&
	    CPU_ACTIVE(cpu_get(bootcpuid))) {
		extern void panic_idle(void);
		cpuset_t cpuset;

		CPUSET_ZERO(cpuset);
		CPUSET_ADD(cpuset, bootcpuid);
		xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset),
		    (xc_func_t)fastboot_xc_func);

		panic_idle();
	} else
		(void) fastboot_xc_func(&newkernel, 0, 0);
}
Пример #11
0
void
sparc64_do_pause(void)
{
#if defined(DDB)
	extern bool ddb_running_on_this_cpu(void);
	extern void db_resume_others(void);
#endif

	CPUSET_ADD(cpus_paused, cpu_number());

	do {
		membar_Sync();
	} while(CPUSET_HAS(cpus_paused, cpu_number()));
	membar_Sync();
	CPUSET_ADD(cpus_resumed, cpu_number());

#if defined(DDB)
	if (ddb_running_on_this_cpu()) {
		db_command_loop();
		db_resume_others();
	}
#endif
}
Пример #12
0
/*
 * Pause this cpu
 */
void
cpu_pause(struct reg *regsp)
{
	int s = splhigh();
	int index = cpu_index(curcpu());

	for (;;) {
		CPUSET_ADD(cpus_paused, index);
		do {
			;
		} while (CPUSET_HAS_P(cpus_paused, index));
		CPUSET_ADD(cpus_resumed, index);

#if defined(DDB)
		if (ddb_running_on_this_cpu_p())
			cpu_Debugger();
		if (ddb_running_on_any_cpu_p())
			continue;
#endif
		break;
	}

	splx(s);
}
Пример #13
0
void
cpu_hatch()
{
	char *v = (char*)CPUINFO_VA;
	int i;

	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
		flush(v + i);

	cpu_pmap_init(curcpu());
	CPUSET_ADD(cpus_active, cpu_number());
	cpu_reset_fpustate();
	curlwp = curcpu()->ci_data.cpu_idlelwp;
	membar_sync();
	tickintr_establish(PIL_CLOCK, tickintr);
	spl0();
}
Пример #14
0
void
cpu_boot_secondary_processors(void)
{
	for (struct cpu_info *ci = cpu_info_store.ci_next;
	     ci != NULL;
	     ci = ci->ci_next) {
		KASSERT(!CPU_IS_PRIMARY(ci));
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (! CPUSET_HAS_P(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		CPUSET_ADD(cpus_running, cpu_index(ci));
	}
}
Пример #15
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);

		tlb_enter(ci->ci_tlb_slot, -PAGE_SIZE, tlb_lo);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	/*
	 * Announce we are hatched
	 */
	CPUSET_ADD(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts.
	 */
	spl0();

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}
Пример #16
0
/*
 * Gets called when softcall queue is not moving forward. We choose
 * a CPU and poke except the ones which are already poked.
 */
static int
softcall_choose_cpu()
{
	cpu_t *cplist = CPU;
	cpu_t *cp;
	int intr_load = INT_MAX;
	int cpuid = -1;
	cpuset_t poke;
	int s;

	ASSERT(getpil() >= DISP_LEVEL);
	ASSERT(ncpus > 1);
	ASSERT(MUTEX_HELD(&softcall_lock));

	CPUSET_ZERO(poke);

	/*
	 * The hint is to start from current CPU.
	 */
	cp = cplist;
	do {
		/*
		 * Don't select this CPU if :
		 *   - in cpuset already
		 *   - CPU is not accepting interrupts
		 *   - CPU is being offlined
		 */
		if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
		    (cp->cpu_flags & CPU_ENABLE) == 0 ||
		    (cp == cpu_inmotion))
			continue;
#if defined(__x86)
		/*
		 * Don't select this CPU if a hypervisor indicates it
		 * isn't currently scheduled onto a physical cpu.  We are
		 * looking for a cpu that can respond quickly and the time
		 * to get the virtual cpu scheduled and switched to running
		 * state is likely to be relatively lengthy.
		 */
		if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU)
			continue;
#endif	/* __x86 */

		/* if CPU is not busy */
		if (cp->cpu_intrload == 0) {
			cpuid = cp->cpu_id;
			break;
		}

		if (cp->cpu_intrload < intr_load) {
			cpuid = cp->cpu_id;
			intr_load = cp->cpu_intrload;
		} else if (cp->cpu_intrload == intr_load) {
			/*
			 * We want to poke CPUs having similar
			 * load because we don't know which CPU is
			 * can acknowledge level1 interrupt. The
			 * list of such CPUs should not be large.
			 */
			if (cpuid != -1) {
				/*
				 * Put the last CPU chosen because
				 * it also has same interrupt load.
				 */
				CPUSET_ADD(poke, cpuid);
				cpuid = -1;
			}

			CPUSET_ADD(poke, cp->cpu_id);
		}
	} while ((cp = cp->cpu_next_onln) != cplist);

	/* if we found a CPU which suits best to poke */
	if (cpuid != -1) {
		CPUSET_ZERO(poke);
		CPUSET_ADD(poke, cpuid);
	}

	if (CPUSET_ISNULL(poke)) {
		mutex_exit(&softcall_lock);
		return (0);
	}

	/*
	 * We first set the bit in cpuset and then poke.
	 */
	CPUSET_XOR(*softcall_cpuset, poke);
	mutex_exit(&softcall_lock);

	/*
	 * If softcall() was called at low pil then we may
	 * get preempted before we raise PIL. It should be okay
	 * because we are just going to poke CPUs now or at most
	 * another thread may start choosing CPUs in this routine.
	 */
	s = splhigh();
	siron_poke_cpu(poke);
	splx(s);
	return (1);
}
Пример #17
0
/*
 * Gets called when softcall queue is not moving forward. We choose
 * a CPU and poke except the ones which are already poked.
 */
static int
softcall_choose_cpu()
{
	cpu_t *cplist = CPU;
	cpu_t *cp;
	int intr_load = INT_MAX;
	int cpuid = -1;
	cpuset_t poke;
	int s;

	ASSERT(getpil() >= DISP_LEVEL);
	ASSERT(ncpus > 1);
	ASSERT(MUTEX_HELD(&softcall_lock));

	CPUSET_ZERO(poke);

	/*
	 * The hint is to start from current CPU.
	 */
	cp = cplist;
	do {
		if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
		    (cp->cpu_flags & CPU_ENABLE) == 0)
			continue;

		/* if CPU is not busy */
		if (cp->cpu_intrload == 0) {
			cpuid = cp->cpu_id;
			break;
		}

		if (cp->cpu_intrload < intr_load) {
			cpuid = cp->cpu_id;
			intr_load = cp->cpu_intrload;
		} else if (cp->cpu_intrload == intr_load) {
			/*
			 * We want to poke CPUs having similar
			 * load because we don't know which CPU is
			 * can acknowledge level1 interrupt. The
			 * list of such CPUs should not be large.
			 */
			if (cpuid != -1) {
				/*
				 * Put the last CPU chosen because
				 * it also has same interrupt load.
				 */
				CPUSET_ADD(poke, cpuid);
				cpuid = -1;
			}

			CPUSET_ADD(poke, cp->cpu_id);
		}
	} while ((cp = cp->cpu_next_onln) != cplist);

	/* if we found a CPU which suits best to poke */
	if (cpuid != -1) {
		CPUSET_ZERO(poke);
		CPUSET_ADD(poke, cpuid);
	}

	if (CPUSET_ISNULL(poke)) {
		mutex_exit(&softcall_lock);
		return (0);
	}

	/*
	 * We first set the bit in cpuset and then poke.
	 */
	CPUSET_XOR(*softcall_cpuset, poke);
	mutex_exit(&softcall_lock);

	/*
	 * If softcall() was called at low pil then we may
	 * get preempted before we raise PIL. It should be okay
	 * because we are just going to poke CPUs now or at most
	 * another thread may start choosing CPUs in this routine.
	 */
	s = splhigh();
	siron_poke_cpu(poke);
	splx(s);
	return (1);
}
Пример #18
0
void
softint(void)
{
	softcall_t *sc = NULL;
	void (*func)();
	caddr_t arg;
	int cpu_id = CPU->cpu_id;

	mutex_enter(&softcall_lock);

	if (softcall_state & (SOFT_STEAL|SOFT_PEND)) {
		softcall_state = SOFT_DRAIN;
	} else  {
		/*
		 * The check for softcall_cpuset being
		 * NULL is required because it may get
		 * called very early during boot.
		 */
		if (softcall_cpuset != NULL &&
		    CPU_IN_SET(*softcall_cpuset, cpu_id))
			CPUSET_DEL(*softcall_cpuset, cpu_id);
		mutex_exit(&softcall_lock);
		goto out;
	}

	/*
	 * Setting softcall_latest_cpuid to current CPU ensures
	 * that there is only one active softlevel1 handler to
	 * process softcall queues.
	 *
	 * Since softcall_lock lock is dropped before calling
	 * func (callback), we need softcall_latest_cpuid
	 * to prevent two softlevel1 hanlders working on the
	 * queue when the first softlevel1 handler gets
	 * stuck due to high interrupt load.
	 */
	softcall_latest_cpuid = cpu_id;

	/* add ourself to the cpuset */
	if (!CPU_IN_SET(*softcall_cpuset, cpu_id))
		CPUSET_ADD(*softcall_cpuset, cpu_id);

	for (;;) {
		softcall_tick = lbolt;
		if ((sc = softhead) != NULL) {
			func = sc->sc_func;
			arg = sc->sc_arg;
			softhead = sc->sc_next;
			sc->sc_next = softfree;
			softfree = sc;
		}

		if (sc == NULL) {
			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
				CPUSET_DEL(*softcall_cpuset, cpu_id);

			softcall_state = SOFT_IDLE;
			ASSERT(softcall_latest_cpuid == cpu_id);
			softcall_latest_cpuid = -1;

			mutex_exit(&softcall_lock);
			break;
		}

		mutex_exit(&softcall_lock);
		func(arg);
		mutex_enter(&softcall_lock);

		/*
		 * No longer need softcall processing from current
		 * interrupt handler because either
		 *  (a) softcall is in SOFT_IDLE state or
		 *  (b) There is a CPU already draining softcall
		 *	queue and the current softlevel1 is no
		 *	longer required.
		 */
		if (softcall_latest_cpuid != cpu_id) {
			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
				CPUSET_DEL(*softcall_cpuset, cpu_id);

			mutex_exit(&softcall_lock);
			break;
		}
	}

out:
	if ((func = kdi_softcall_func) != NULL) {
		kdi_softcall_func = NULL;
		func();
	}
}
Пример #19
0
/*
 * Indicate that this core (cpuid) resides on the chip indicated by chipid.
 * Called during boot and DR add.
 */
void
cmp_add_cpu(chipid_t chipid, processorid_t cpuid)
{
	CPUSET_ADD(chips[chipid], cpuid);
}
Пример #20
0
/*
 * launch slave cpus into kernel text, pause them,
 * and restore the original prom pages
 */
void
i_cpr_mp_setup(void)
{
	extern void restart_other_cpu(int);
	cpu_t *cp;

	uint64_t kctx = kcontextreg;

	/*
	 * Do not allow setting page size codes in MMU primary context
	 * register while using cif wrapper. This is needed to work
	 * around OBP incorrect handling of this MMU register.
	 */
	kcontextreg = 0;

	/*
	 * reset cpu_ready_set so x_calls work properly
	 */
	CPUSET_ZERO(cpu_ready_set);
	CPUSET_ADD(cpu_ready_set, getprocessorid());

	/*
	 * setup cif to use the cookie from the new/tmp prom
	 * and setup tmp handling for calling prom services.
	 */
	i_cpr_cif_setup(CIF_SPLICE);

	/*
	 * at this point, only the nucleus and a few cpr pages are
	 * mapped in.  once we switch to the kernel trap table,
	 * we can access the rest of kernel space.
	 */
	prom_set_traptable(&trap_table);

	if (ncpus > 1) {
		sfmmu_init_tsbs();

		mutex_enter(&cpu_lock);
		/*
		 * All of the slave cpus are not ready at this time,
		 * yet the cpu structures have various cpu_flags set;
		 * clear cpu_flags and mutex_ready.
		 * Since we are coming up from a CPU suspend, the slave cpus
		 * are frozen.
		 */
		for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) {
			cp->cpu_flags = CPU_FROZEN;
			cp->cpu_m.mutex_ready = 0;
		}

		for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next)
			restart_other_cpu(cp->cpu_id);

		pause_cpus(NULL, NULL);
		mutex_exit(&cpu_lock);

		i_cpr_xcall(i_cpr_clear_entries);
	} else
		i_cpr_clear_entries(0, 0);

	/*
	 * now unlink the cif wrapper;  WARNING: do not call any
	 * prom_xxx() routines until after prom pages are restored.
	 */
	i_cpr_cif_setup(CIF_UNLINK);

	(void) i_cpr_prom_pages(CPR_PROM_RESTORE);

	/* allow setting page size codes in MMU primary context register */
	kcontextreg = kctx;
}
Пример #21
0
/*ARGSUSED*/
void
start_other_cpus(int flag)
{
	int cpuid;
	extern void idlestop_init(void);
	int bootcpu;

	/*
	 * Check if cpu_bringup_set has been explicitly set before
	 * initializing it.
	 */
	if (CPUSET_ISNULL(cpu_bringup_set)) {
		CPUSET_ALL(cpu_bringup_set);
	}

	if (&cpu_feature_init)
		cpu_feature_init();

	/*
	 * Initialize CPC.
	 */
	kcpc_hw_init();

	mutex_enter(&cpu_lock);

	/*
	 * Initialize our own cpu_info.
	 */
	init_cpu_info(CPU);

	/*
	 * Initialize CPU 0 cpu module private data area, including scrubber.
	 */
	cpu_init_private(CPU);
	populate_idstr(CPU);

	/*
	 * perform such initialization as is needed
	 * to be able to take CPUs on- and off-line.
	 */
	cpu_pause_init();
	xc_init();		/* initialize processor crosscalls */
	idlestop_init();

	if (!use_mp) {
		mutex_exit(&cpu_lock);
		cmn_err(CE_CONT, "?***** Not in MP mode\n");
		return;
	}
	/*
	 * should we be initializing this cpu?
	 */
	bootcpu = getprocessorid();

	/*
	 * launch all the slave cpus now
	 */
	for (cpuid = 0; cpuid < NCPU; cpuid++) {
		pnode_t nodeid = cpunodes[cpuid].nodeid;

		if (nodeid == (pnode_t)0)
			continue;

		if (cpuid == bootcpu) {
			if (!CPU_IN_SET(cpu_bringup_set, cpuid)) {
				cmn_err(CE_WARN, "boot cpu not a member "
				    "of cpu_bringup_set, adding it");
				CPUSET_ADD(cpu_bringup_set, cpuid);
			}
			continue;
		}
		if (!CPU_IN_SET(cpu_bringup_set, cpuid))
			continue;

		ASSERT(cpu[cpuid] == NULL);

		if (setup_cpu_common(cpuid)) {
			cmn_err(CE_PANIC, "cpu%d: setup failed", cpuid);
		}

		common_startup_init(cpu[cpuid], cpuid);

		start_cpu(cpuid, cold_flag_set);
		/*
		 * Because slave_startup() gets fired off after init()
		 * starts, we can't use the '?' trick to do 'boot -v'
		 * printing - so we always direct the 'cpu .. online'
		 * messages to the log.
		 */
		cmn_err(CE_CONT, "!cpu%d initialization complete - online\n",
		    cpuid);

		cpu_state_change_notify(cpuid, CPU_SETUP);

		if (dtrace_cpu_init != NULL)
			(*dtrace_cpu_init)(cpuid);
	}

	/*
	 * since all the cpus are online now, redistribute interrupts to them.
	 */
	intr_redist_all_cpus();

	mutex_exit(&cpu_lock);

	/*
	 * Start the Ecache scrubber.  Must be done after all calls to
	 * cpu_init_private for every cpu (including CPU 0).
	 */
	cpu_init_cache_scrub();

	if (&cpu_mp_init)
		cpu_mp_init();
}
Пример #22
0
/*
 * Startup function executed on 'other' CPUs.  This is the first
 * C function after cpu_start sets up the cpu registers.
 */
static void
slave_startup(void)
{
	struct cpu	*cp = CPU;
	ushort_t	original_flags = cp->cpu_flags;

	mach_htraptrace_configure(cp->cpu_id);
	cpu_intrq_register(CPU);
	cp->cpu_m.mutex_ready = 1;

	/* acknowledge that we are done with initialization */
	CPUSET_ADD(proxy_ready_set, cp->cpu_id);

	/* synchronize STICK */
	sticksync_slave();

	if (boothowto & RB_DEBUG)
		kdi_dvec_cpu_init(cp);

	/*
	 * the slave will wait here forever -- assuming that the master
	 * will get back to us.  if it doesn't we've got bigger problems
	 * than a master not replying to this slave.
	 * the small delay improves the slave's responsiveness to the
	 * master's ack and decreases the time window between master and
	 * slave operations.
	 */
	while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id))
		DELAY(1);

	/*
	 * The CPU is now in cpu_ready_set, safely able to take pokes.
	 */
	cp->cpu_m.poke_cpu_outstanding = B_FALSE;

	/* enable interrupts */
	(void) spl0();

	/*
	 * Signature block update to indicate that this CPU is in OS now.
	 * This needs to be done after the PIL is lowered since on
	 * some platforms the update code may block.
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);

	/*
	 * park the slave thread in a safe/quiet state and wait for the master
	 * to finish configuring this CPU before proceeding to thread_exit().
	 */
	while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED)
		DELAY(1);

	/*
	 * Initialize CPC CPU state.
	 */
	kcpc_hw_startup_cpu(original_flags);

	/*
	 * Notify the PG subsystem that the CPU  has started
	 */
	pg_cmt_cpu_startup(CPU);

	/*
	 * Now we are done with the startup thread, so free it up.
	 */
	thread_exit();
	cmn_err(CE_PANIC, "slave_startup: cannot return");
	/*NOTREACHED*/
}
Пример #23
0
/*ARGSUSED*/
static int
clock_tick_cpu_setup(cpu_setup_t what, int cid, void *arg)
{
	cpu_t			*cp, *ncp;
	int			i, set;
	clock_tick_set_t	*csp;

	/*
	 * This function performs some computations at CPU offline/online
	 * time. The computed values are used during tick scheduling and
	 * execution phases. This avoids having to compute things on
	 * an every tick basis. The other benefit is that we perform the
	 * computations only for onlined CPUs (not offlined ones). As a
	 * result, no tick processing is attempted for offlined CPUs.
	 *
	 * Also, cpu_offline() calls this function before checking for
	 * active interrupt threads. This allows us to avoid posting
	 * cross calls to CPUs that are being offlined.
	 */

	cp = cpu[cid];

	mutex_enter(&clock_tick_lock);

	switch (what) {
	case CPU_ON:
		clock_tick_cpus[clock_tick_total_cpus] = cp;
		set = clock_tick_total_cpus / clock_tick_ncpus;
		csp = &clock_tick_set[set];
		csp->ct_end++;
		clock_tick_total_cpus++;
		clock_tick_nsets =
		    (clock_tick_total_cpus + clock_tick_ncpus - 1) /
		    clock_tick_ncpus;
		CPUSET_ADD(clock_tick_online_cpuset, cp->cpu_id);
		membar_sync();
		break;

	case CPU_OFF:
		if (&sync_softint != NULL)
			sync_softint(clock_tick_online_cpuset);
		CPUSET_DEL(clock_tick_online_cpuset, cp->cpu_id);
		clock_tick_total_cpus--;
		clock_tick_cpus[clock_tick_total_cpus] = NULL;
		clock_tick_nsets =
		    (clock_tick_total_cpus + clock_tick_ncpus - 1) /
		    clock_tick_ncpus;
		set = clock_tick_total_cpus / clock_tick_ncpus;
		csp = &clock_tick_set[set];
		csp->ct_end--;

		i = 0;
		ncp = cpu_active;
		do {
			if (cp == ncp)
				continue;
			clock_tick_cpus[i] = ncp;
			i++;
		} while ((ncp = ncp->cpu_next_onln) != cpu_active);
		ASSERT(i == clock_tick_total_cpus);
		membar_sync();
		break;

	default:
		break;
	}

	mutex_exit(&clock_tick_lock);

	return (0);
}