Beispiel #1
0
/*
 * Flush pte on all active processors.
 */
void
smp_tlb_flush_pte(vaddr_t va, struct pmap * pm)
{
	sparc64_cpuset_t cpuset;
	struct cpu_info *ci;
	int ctx;
	bool kpm = (pm == pmap_kernel());
	/* Flush our own TLB */
	ctx = pm->pm_ctx[cpu_number()];
	KASSERT(ctx >= 0);
	if (kpm || ctx > 0)
		sp_tlb_flush_pte(va, ctx);

	CPUSET_ASSIGN(cpuset, cpus_active);
	CPUSET_DEL(cpuset, cpu_number());
	if (CPUSET_EMPTY(cpuset))
		return;

	/* Flush others */
	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
		if (CPUSET_HAS(cpuset, ci->ci_index)) {
			CPUSET_DEL(cpuset, ci->ci_index);
			ctx = pm->pm_ctx[ci->ci_index];
			KASSERT(ctx >= 0);
			if (!kpm && ctx == 0)
				continue;
			sparc64_send_ipi(ci->ci_cpuid, smp_tlb_flush_pte_func, va, ctx);
		}
	}
}
static int
poweroff_vcpu(struct cpu *cp)
{
	int error;

	ASSERT(MUTEX_HELD(&cpu_lock));

	ASSERT(CPU->cpu_id != cp->cpu_id);
	ASSERT(cp->cpu_flags & CPU_QUIESCED);

	mp_enter_barrier();

	if ((error = xen_vcpu_down(cp->cpu_id)) == 0) {
		ASSERT(cpu_phase[cp->cpu_id] == CPU_PHASE_SAFE);

		CPUSET_DEL(cpu_ready_set, cp->cpu_id);

		cp->cpu_flags |= CPU_POWEROFF | CPU_OFFLINE;
		cp->cpu_flags &=
		    ~(CPU_RUNNING | CPU_READY | CPU_EXISTS | CPU_ENABLE);

		cpu_phase[cp->cpu_id] = CPU_PHASE_POWERED_OFF;

		cpu_set_state(cp);
	}

	mp_leave_barrier();

	return (error);
}
Beispiel #3
0
/*
 * Send an IPI to all in the list but ourselves.
 */
void
sparc64_multicast_ipi(sparc64_cpuset_t cpuset, ipifunc_t func, uint64_t arg1,
		      uint64_t arg2)
{
	struct cpu_info *ci;

	CPUSET_DEL(cpuset, cpu_number());
	if (CPUSET_EMPTY(cpuset))
		return;

	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
		if (CPUSET_HAS(cpuset, ci->ci_index)) {
			CPUSET_DEL(cpuset, ci->ci_index);
			sparc64_send_ipi(ci->ci_cpuid, func, arg1, arg2);
		}
	}
}
void
cpu_multicast_ipi(__cpuset_t cpuset, int tag)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;

	CPUSET_DEL(cpuset, cpu_index(curcpu()));
	if (CPUSET_EMPTY_P(cpuset))
		return;

	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPUSET_HAS_P(cpuset, cpu_index(ci))) {
			CPUSET_DEL(cpuset, cpu_index(ci));
			(void)cpu_send_ipi(ci, tag);
		}
	}
}
/*
 * Resume a single cpu
 */
void
cpu_resume(int index)
{
	CPUSET_CLEAR(cpus_resumed);
	CPUSET_DEL(cpus_paused, index);

	if (cpu_ipi_wait(&cpus_resumed, CPUSET_SINGLE(index)))
		cpu_ipi_error("resume", cpus_resumed, CPUSET_SINGLE(index));
}
Beispiel #6
0
/*
 * Internal cpu startup sequencer
 * The sequence is as follows:
 *
 * MASTER	SLAVE
 * -------	----------
 * assume the kernel data is initialized
 * clear the proxy bit
 * start the slave cpu
 * wait for the slave cpu to set the proxy
 *
 *		the slave runs slave_startup and then sets the proxy
 *		the slave waits for the master to add slave to the ready set
 *
 * the master finishes the initialization and
 * adds the slave to the ready set
 *
 *		the slave exits the startup thread and is running
 */
void
start_cpu(int cpuid, void(*flag_func)(int))
{
	extern void cpu_startup(int);
	int timout;

	ASSERT(MUTEX_HELD(&cpu_lock));

	/*
	 * Before we begin the dance, tell DTrace that we're about to start
	 * a CPU.
	 */
	if (dtrace_cpustart_init != NULL)
		(*dtrace_cpustart_init)();

	/* start the slave cpu */
	CPUSET_DEL(proxy_ready_set, cpuid);
	if (prom_test("SUNW,start-cpu-by-cpuid") == 0) {
		(void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup,
		    cpuid);
	} else {
		/* "by-cpuid" interface didn't exist.  Do it the old way */
		pnode_t nodeid = cpunodes[cpuid].nodeid;

		ASSERT(nodeid != (pnode_t)0);
		(void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid);
	}

	/* wait for the slave cpu to check in. */
	for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) {
		if (CPU_IN_SET(proxy_ready_set, cpuid))
			break;
		DELAY(1000);
	}
	if (timout == 0) {
		panic("cpu%d failed to start (2)", cpuid);
	}

	/*
	 * The slave has started; we can tell DTrace that it's safe again.
	 */
	if (dtrace_cpustart_fini != NULL)
		(*dtrace_cpustart_fini)();

	/* run the master side of stick synchronization for the slave cpu */
	sticksync_master();

	/*
	 * deal with the cpu flags in a phase-specific manner
	 * for various reasons, this needs to run after the slave
	 * is checked in but before the slave is released.
	 */
	(*flag_func)(cpuid);

	/* release the slave */
	CPUSET_ADD(cpu_ready_set, cpuid);
}
Beispiel #7
0
/*
 * Indicate that this core (cpuid) is being DR removed.
 */
void
cmp_delete_cpu(processorid_t cpuid)
{
	chipid_t chipid;

	/* N.B. We're assuming that the cpunode[].portid is still intact */
	chipid = cpunodes[cpuid].portid;
	CPUSET_DEL(chips[chipid], cpuid);
}
Beispiel #8
0
/*
 * Print an error message.
 */
void
sparc64_ipi_error(const char *s, sparc64_cpuset_t cpus_succeeded,
	sparc64_cpuset_t cpus_expected)
{
	int cpuid;

	CPUSET_DEL(cpus_expected, cpus_succeeded);
	if (!CPUSET_EMPTY(cpus_expected)) {
		printf("Failed to %s:", s);
		do {
			cpuid = CPUSET_NEXT(cpus_expected);
			CPUSET_DEL(cpus_expected, cpuid);
			printf(" cpu%d", cpuid);
		} while(!CPUSET_EMPTY(cpus_expected));
	}

	printf("\n");
}
static void
cpu_ipi_error(const char *s, __cpuset_t succeeded, __cpuset_t expected)
{
	CPUSET_SUB(expected, succeeded);
	if (!CPUSET_EMPTY_P(expected)) {
		printf("Failed to %s:", s);
		do {
			int index = CPUSET_NEXT(expected);
			CPUSET_DEL(expected, index);
			printf(" cpu%d", index);
		} while (!CPUSET_EMPTY_P(expected));
		printf("\n");
	}
}
Beispiel #10
0
/*
 * Pause all running cpus, excluding current cpu.
 */
void
cpu_pause_others(void)
{
	__cpuset_t cpuset;

	CPUSET_ASSIGN(cpuset, cpus_running);
	CPUSET_DEL(cpuset, cpu_index(curcpu()));

	if (CPUSET_EMPTY_P(cpuset))
		return;

	cpu_multicast_ipi(cpuset, IPI_SUSPEND);
	if (cpu_ipi_wait(&cpus_paused, cpuset))
		cpu_ipi_error("pause", cpus_paused, cpuset);
}
Beispiel #11
0
static void
pwrnow_power(cpuset_t set, uint32_t req_state)
{
	/*
	 * If thread is already running on target CPU then just
	 * make the transition request. Otherwise, we'll need to
	 * make a cross-call.
	 */
	kpreempt_disable();
	if (CPU_IN_SET(set, CPU->cpu_id)) {
		pwrnow_pstate_transition(req_state);
		CPUSET_DEL(set, CPU->cpu_id);
	}
	if (!CPUSET_ISNULL(set)) {
		xc_call((xc_arg_t)req_state, NULL, NULL,
		    CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition);
	}
	kpreempt_enable();
}
Beispiel #12
0
/*
 * Pause all cpus but ourselves.
 */
void
mp_pause_cpus(void)
{
	int i = 3;
	sparc64_cpuset_t cpuset;

	CPUSET_ASSIGN(cpuset, cpus_active);
	CPUSET_DEL(cpuset, cpu_number());
	while (i-- > 0) {
		if (CPUSET_EMPTY(cpuset))
			return;

		sparc64_multicast_ipi(cpuset, sparc64_ipi_pause, 0, 0);
		if (!sparc64_ipi_wait(&cpus_paused, cpuset))
			return;
		CPUSET_SUB(cpuset, cpus_paused);
	}
	sparc64_ipi_error("pause", cpus_paused, cpuset);
}
Beispiel #13
0
/*
 * Halt all cpus but ourselves.
 */
void
mp_halt_cpus(void)
{
	sparc64_cpuset_t cpumask, cpuset;
	struct cpu_info *ci;

	CPUSET_ASSIGN(cpuset, cpus_active);
	CPUSET_DEL(cpuset, cpu_number());
	CPUSET_ASSIGN(cpumask, cpuset);
	CPUSET_SUB(cpuset, cpus_halted);

	if (CPUSET_EMPTY(cpuset))
		return;

	CPUSET_CLEAR(cpus_spinning);
	sparc64_multicast_ipi(cpuset, sparc64_ipi_halt, 0, 0);
	if (sparc64_ipi_wait(&cpus_halted, cpumask))
		sparc64_ipi_error("halt", cpumask, cpus_halted);

	/*
	 * Depending on available firmware methods, other cpus will
	 * either shut down themselfs, or spin and wait for us to
	 * stop them.
	 */
	if (CPUSET_EMPTY(cpus_spinning)) {
		/* give other cpus a few cycles to actually power down */
		delay(10000);
		return;
	}
	/* there are cpus spinning - shut them down if we can */
	if (prom_has_stop_other()) {
		for (ci = cpus; ci != NULL; ci = ci->ci_next) {
			if (!CPUSET_HAS(cpus_spinning, ci->ci_index)) continue;
			prom_stop_other(ci->ci_cpuid);
		}
	}
}
Beispiel #14
0
/*
 * Halt all running cpus, excluding current cpu.
 */
void
cpu_halt_others(void)
{
	__cpuset_t cpumask, cpuset;

	CPUSET_ASSIGN(cpuset, cpus_running);
	CPUSET_DEL(cpuset, cpu_index(curcpu()));
	CPUSET_ASSIGN(cpumask, cpuset);
	CPUSET_SUB(cpuset, cpus_halted);

	if (CPUSET_EMPTY_P(cpuset))
		return;

	cpu_multicast_ipi(cpuset, IPI_HALT);
	if (cpu_ipi_wait(&cpus_halted, cpumask))
		cpu_ipi_error("halt", cpumask, cpus_halted);

	/*
	 * TBD
	 * Depending on available firmware methods, other cpus will
	 * either shut down themselfs, or spin and wait for us to
	 * stop them.
	 */
}
Beispiel #15
0
/*
 * Resume a single cpu
 */
void
mp_resume_cpu(int cno)
{
	CPUSET_DEL(cpus_paused, cno);
	membar_Sync();
}
/*ARGSUSED*/
static int
clock_tick_cpu_setup(cpu_setup_t what, int cid, void *arg)
{
	cpu_t			*cp, *ncp;
	int			i, set;
	clock_tick_set_t	*csp;

	/*
	 * This function performs some computations at CPU offline/online
	 * time. The computed values are used during tick scheduling and
	 * execution phases. This avoids having to compute things on
	 * an every tick basis. The other benefit is that we perform the
	 * computations only for onlined CPUs (not offlined ones). As a
	 * result, no tick processing is attempted for offlined CPUs.
	 *
	 * Also, cpu_offline() calls this function before checking for
	 * active interrupt threads. This allows us to avoid posting
	 * cross calls to CPUs that are being offlined.
	 */

	cp = cpu[cid];

	mutex_enter(&clock_tick_lock);

	switch (what) {
	case CPU_ON:
		clock_tick_cpus[clock_tick_total_cpus] = cp;
		set = clock_tick_total_cpus / clock_tick_ncpus;
		csp = &clock_tick_set[set];
		csp->ct_end++;
		clock_tick_total_cpus++;
		clock_tick_nsets =
		    (clock_tick_total_cpus + clock_tick_ncpus - 1) /
		    clock_tick_ncpus;
		CPUSET_ADD(clock_tick_online_cpuset, cp->cpu_id);
		membar_sync();
		break;

	case CPU_OFF:
		if (&sync_softint != NULL)
			sync_softint(clock_tick_online_cpuset);
		CPUSET_DEL(clock_tick_online_cpuset, cp->cpu_id);
		clock_tick_total_cpus--;
		clock_tick_cpus[clock_tick_total_cpus] = NULL;
		clock_tick_nsets =
		    (clock_tick_total_cpus + clock_tick_ncpus - 1) /
		    clock_tick_ncpus;
		set = clock_tick_total_cpus / clock_tick_ncpus;
		csp = &clock_tick_set[set];
		csp->ct_end--;

		i = 0;
		ncp = cpu_active;
		do {
			if (cp == ncp)
				continue;
			clock_tick_cpus[i] = ncp;
			i++;
		} while ((ncp = ncp->cpu_next_onln) != cpu_active);
		ASSERT(i == clock_tick_total_cpus);
		membar_sync();
		break;

	default:
		break;
	}

	mutex_exit(&clock_tick_lock);

	return (0);
}
Beispiel #17
0
void
softint(void)
{
	softcall_t *sc = NULL;
	void (*func)();
	caddr_t arg;
	int cpu_id = CPU->cpu_id;

	mutex_enter(&softcall_lock);

	if (softcall_state & (SOFT_STEAL|SOFT_PEND)) {
		softcall_state = SOFT_DRAIN;
	} else  {
		/*
		 * The check for softcall_cpuset being
		 * NULL is required because it may get
		 * called very early during boot.
		 */
		if (softcall_cpuset != NULL &&
		    CPU_IN_SET(*softcall_cpuset, cpu_id))
			CPUSET_DEL(*softcall_cpuset, cpu_id);
		mutex_exit(&softcall_lock);
		goto out;
	}

	/*
	 * Setting softcall_latest_cpuid to current CPU ensures
	 * that there is only one active softlevel1 handler to
	 * process softcall queues.
	 *
	 * Since softcall_lock lock is dropped before calling
	 * func (callback), we need softcall_latest_cpuid
	 * to prevent two softlevel1 hanlders working on the
	 * queue when the first softlevel1 handler gets
	 * stuck due to high interrupt load.
	 */
	softcall_latest_cpuid = cpu_id;

	/* add ourself to the cpuset */
	if (!CPU_IN_SET(*softcall_cpuset, cpu_id))
		CPUSET_ADD(*softcall_cpuset, cpu_id);

	for (;;) {
		softcall_tick = lbolt;
		if ((sc = softhead) != NULL) {
			func = sc->sc_func;
			arg = sc->sc_arg;
			softhead = sc->sc_next;
			sc->sc_next = softfree;
			softfree = sc;
		}

		if (sc == NULL) {
			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
				CPUSET_DEL(*softcall_cpuset, cpu_id);

			softcall_state = SOFT_IDLE;
			ASSERT(softcall_latest_cpuid == cpu_id);
			softcall_latest_cpuid = -1;

			mutex_exit(&softcall_lock);
			break;
		}

		mutex_exit(&softcall_lock);
		func(arg);
		mutex_enter(&softcall_lock);

		/*
		 * No longer need softcall processing from current
		 * interrupt handler because either
		 *  (a) softcall is in SOFT_IDLE state or
		 *  (b) There is a CPU already draining softcall
		 *	queue and the current softlevel1 is no
		 *	longer required.
		 */
		if (softcall_latest_cpuid != cpu_id) {
			if (CPU_IN_SET(*softcall_cpuset, cpu_id))
				CPUSET_DEL(*softcall_cpuset, cpu_id);

			mutex_exit(&softcall_lock);
			break;
		}
	}

out:
	if ((func = kdi_softcall_func) != NULL) {
		kdi_softcall_func = NULL;
		func();
	}
}