Beispiel #1
0
/*
 * Start secondary processors in motion.
 */
void
cpu_boot_secondary_processors()
{
	int i, pstate;
	struct cpu_info *ci;

	sparc64_ipi_init();

	for (ci = cpus; ci != NULL; ci = ci->ci_next) {
		if (ci->ci_cpuid == CPU_UPAID)
			continue;

		cpu_pmap_prepare(ci, false);
		cpu_args->cb_node = ci->ci_node;
		cpu_args->cb_cpuinfo = ci->ci_paddr;
		membar_sync();

		/* Disable interrupts and start another CPU. */
		pstate = getpstate();
		setpstate(PSTATE_KERN);

		prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0);

		for (i = 0; i < 2000; i++) {
			membar_sync();
			if (CPUSET_HAS(cpus_active, ci->ci_index))
				break;
			delay(10000);
		}
		setpstate(pstate);

		if (!CPUSET_HAS(cpus_active, ci->ci_index))
			printf("cpu%d: startup failed\n", ci->ci_cpuid);
	}
}
Beispiel #2
0
/* ARGSUSED */
void
pci_bus_exit(dev_info_t *dip, ddi_acc_handle_t handle)
{
	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
	pbm_t *pbm_p = pci_p->pci_pbm_p;
	ddi_fm_error_t derr;

	ASSERT(MUTEX_HELD(&pbm_p->pbm_pokefault_mutex));

	membar_sync();

	mutex_enter(&pci_p->pci_common_p->pci_fm_mutex);
	ddi_fm_acc_err_get(pbm_p->pbm_excl_handle, &derr, DDI_FME_VERSION);

	if (derr.fme_status == DDI_FM_OK) {
		if (pci_check_error(pci_p) != 0) {
			(void) pci_pbm_err_handler(pci_p->pci_dip, &derr,
					(const void *)pci_p, PCI_BUS_EXIT_CALL);
		}
	}
	mutex_exit(&pci_p->pci_common_p->pci_fm_mutex);

	pbm_p->pbm_excl_handle = NULL;
	mutex_exit(&pbm_p->pbm_pokefault_mutex);
}
Beispiel #3
0
void
bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
	bus_addr_t offset, bus_size_t len, int ops)
{

	/* XXX: this might need some MD tweaks */
	membar_sync();
}
Beispiel #4
0
/*
 * Check condition (fipe_gbl_ctrl.cpu_cnt == ncpus) to make sure that
 * there is other CPU trying to wake up system from memory power saving state.
 * If a CPU is waking up system, fipe_disable() will set
 * fipe_gbl_ctrl.pm_active to false as soon as possible and allow other CPU's
 * to continue, and it will take the responsibility to recover system from
 * memory power saving state.
 */
static void
fipe_enable(int throttle, cpu_idle_check_wakeup_t check_func, void* check_arg)
{
	extern void membar_sync(void);

	FIPE_KSTAT_DETAIL_INC(pm_tryenter_cnt);

	/*
	 * Check CPU wakeup events.
	 */
	if (check_func != NULL) {
		(*check_func)(check_arg);
	}

	/*
	 * Try to acquire mutex, which also implicitly has the same effect
	 * of calling membar_sync().
	 * If mutex_tryenter fails, that means other CPU is waking up.
	 */
	if (mutex_tryenter(&fipe_gbl_ctrl.lock) == 0) {
		FIPE_KSTAT_DETAIL_INC(pm_race_cnt);
	/*
	 * Handle a special race condition for the case that a CPU wakes
	 * and then enters into idle state within a short period.
	 * This case can't be reliably detected by cpu_count mechanism.
	 */
	} else if (fipe_gbl_ctrl.pm_active) {
		FIPE_KSTAT_DETAIL_INC(pm_race_cnt);
		mutex_exit(&fipe_gbl_ctrl.lock);
	} else {
		fipe_gbl_ctrl.pm_active = B_TRUE;
		membar_sync();
		if (fipe_gbl_ctrl.cpu_count != ncpus) {
			FIPE_KSTAT_DETAIL_INC(pm_race_cnt);
			fipe_gbl_ctrl.pm_active = B_FALSE;
		} else if (fipe_ioat_trigger() != 0) {
			fipe_gbl_ctrl.pm_active = B_FALSE;
		} else if (fipe_gbl_ctrl.cpu_count != ncpus ||
		    fipe_mc_change(throttle) != 0) {
			fipe_gbl_ctrl.pm_active = B_FALSE;
			fipe_ioat_cancel();
			if (fipe_gbl_ctrl.cpu_count != ncpus) {
				FIPE_KSTAT_DETAIL_INC(pm_race_cnt);
			}
		} else if (fipe_gbl_ctrl.cpu_count != ncpus) {
			fipe_gbl_ctrl.pm_active = B_FALSE;
			fipe_mc_restore();
			fipe_ioat_cancel();
			FIPE_KSTAT_DETAIL_INC(pm_race_cnt);
		} else {
			FIPE_KSTAT_DETAIL_INC(pm_success_cnt);
		}
		mutex_exit(&fipe_gbl_ctrl.lock);
	}
}
Beispiel #5
0
/*
 * Lock accesses to the pci bus, to be able to protect against bus errors.
 */
void
pci_bus_enter(dev_info_t *dip, ddi_acc_handle_t handle)
{
	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
	pbm_t *pbm_p = pci_p->pci_pbm_p;

	membar_sync();

	mutex_enter(&pbm_p->pbm_pokefault_mutex);
	pbm_p->pbm_excl_handle = handle;
}
Beispiel #6
0
static void
pci_axq_hack_put64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t data)
{
	pbm_t *pbm_p = (pbm_t *)handle->ahi_common.ah_bus_private;
	uint32_t spl;

	spl = ddi_enter_critical();
	PIO_LIMIT_ENTER(pbm_p);
	i_ddi_swap_put64(handle, addr, data);
	membar_sync();
	PIO_LIMIT_EXIT(pbm_p);
	ddi_exit_critical(spl);
}
Beispiel #7
0
int
hppa_ipi_send(struct cpu_info *ci, u_long ipi)
{
	struct iomod *cpu;
	KASSERT(ci->ci_flags & CPUF_RUNNING);

	atomic_or_ulong(&ci->ci_ipi, (1L << ipi));

	/* Send an IPI to the specified CPU by triggering EIR{1} (irq 30). */
	cpu = (struct iomod *)(ci->ci_hpa);
	cpu->io_eir = 1;
	membar_sync();

	return 0;
}
static uint_t
pcmu_pbm_error_intr(caddr_t a)
{
	pcmu_t *pcmu_p = (pcmu_t *)a;
	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
	ddi_fm_error_t derr;
	int err = DDI_FM_OK;
	on_trap_data_t *otp = pcbm_p->pcbm_ontrap_data;

	bzero(&derr, sizeof (ddi_fm_error_t));
	derr.fme_version = DDI_FME_VERSION;
	mutex_enter(&pcmu_p->pcmu_err_mutex);
	if ((otp != NULL) && (otp->ot_prot & OT_DATA_ACCESS)) {
		/*
		 * ddi_poke protection, check nexus and children for
		 * expected errors.
		 */
		otp->ot_trap |= OT_DATA_ACCESS;
		membar_sync();
		derr.fme_flag = DDI_FM_ERR_POKE;
		err = pcmu_pbm_err_handler(pcmu_p->pcmu_dip, &derr,
		    (void *)pcmu_p, PCI_INTR_CALL);
	} else if (pcmu_check_error(pcmu_p) != 0) {
		/*
		 * unprotected error, check for all errors.
		 */
		if (pcmu_errtrig_pa) {
			(void) ldphysio(pcmu_errtrig_pa);
		}
		derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
		err = pcmu_pbm_err_handler(pcmu_p->pcmu_dip, &derr,
		    (void *)pcmu_p, PCI_INTR_CALL);
	}

	if (err == DDI_FM_FATAL) {
		if (pcmu_panic_on_fatal_errors) {
			mutex_exit(&pcmu_p->pcmu_err_mutex);
			cmn_err(CE_PANIC, "%s-%d: Fatal PCI bus error(s)\n",
			    ddi_driver_name(pcmu_p->pcmu_dip),
			    ddi_get_instance(pcmu_p->pcmu_dip));
		}
	}

	mutex_exit(&pcmu_p->pcmu_err_mutex);
	pcmu_ib_nintr_clear(pcmu_p->pcmu_ib_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);
	return (DDI_INTR_CLAIMED);
}
Beispiel #9
0
void
cpu_hatch()
{
	char *v = (char*)CPUINFO_VA;
	int i;

	for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long))
		flush(v + i);

	cpu_pmap_init(curcpu());
	CPUSET_ADD(cpus_active, cpu_number());
	cpu_reset_fpustate();
	curlwp = curcpu()->ci_data.cpu_idlelwp;
	membar_sync();
	tickintr_establish(PIL_CLOCK, tickintr);
	spl0();
}
Beispiel #10
0
/*
 * Allocate an IDT vector slot within the given range.
 * cpu_lock will be held unless single threaded during early boot.
 */
int
idt_vec_alloc(int low, int high)
{
	int vec;

	KASSERT(mutex_owned(&cpu_lock) || !mp_online);

	for (vec = low; vec <= high; vec++) {
		if (idt_allocmap[vec] == 0) {
			/* idt_vec_free() can be unlocked, so membar. */
			membar_sync();
			idt_allocmap[vec] = 1;
			return vec;
		}
	}
	return 0;
}
Beispiel #11
0
static void
pci_axq_hack_rep_put64(ddi_acc_impl_t *handle, uint64_t *host_addr,
	uint64_t *dev_addr, size_t repcount, uint_t flags)
{
	pbm_t *pbm_p = (pbm_t *)handle->ahi_common.ah_bus_private;
	uint32_t spl;

	spl = ddi_enter_critical();
	PIO_LIMIT_ENTER(pbm_p);
	while (repcount--) {
		i_ddi_put64(handle, dev_addr, *host_addr);
		membar_sync();
		if (flags == DDI_DEV_AUTOINCR)
			dev_addr++;
		host_addr++;
	}
	PIO_LIMIT_EXIT(pbm_p);
	ddi_exit_critical(spl);
}
Beispiel #12
0
/*
 * Lowest-level serial I/O chip register read/write
 */
static void
sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
{
	DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));

	if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
		/*
		 * The chip is mapped as "I/O" (e.g. with the side-effect
		 * bit on SPARC), therefore accesses are required to be
		 * in-order, with no value cacheing.  However, there can
		 * still be write-behind buffering, so it is not guaranteed
		 * that a write actually reaches the chip in a given time.
		 *
		 * To force the access right through to the chip, we follow
		 * the write with another write (to the SCRATCH register)
		 * and a read (of the value just written to the SCRATCH
		 * register).  The SCRATCH register is specifically provided
		 * for temporary data and has no effect on the SIO's own
		 * operation, making it ideal as a synchronising mechanism.
		 *
		 * If we didn't do this, it would be possible that the new
		 * value wouldn't reach the chip (and have the *intended*
		 * side-effects, such as disabling interrupts), for such a
		 * long time that the processor could execute a *lot* of
		 * instructions - including exiting the interrupt service
		 * routine and re-enabling interrupts.  This effect was
		 * observed to lead to spurious (unclaimed) interrupts in
		 * some circumstances.
		 *
		 * This will no longer be needed once "synchronous" access
		 * handles are available (see PSARC/2000/269 and 2000/531).
		 */
		ddi_put8(rcs->sd_state.sio_handle,
		    rcs->sd_state.sio_regs + reg, val);
		ddi_put8(rcs->sd_state.sio_handle,
		    rcs->sd_state.sio_regs + SIO_SCR, val);
		membar_sync();
		(void) ddi_get8(rcs->sd_state.sio_handle,
		    rcs->sd_state.sio_regs + SIO_SCR);
	}
}
Beispiel #13
0
/*
 * npf_config_load: the main routine performing configuration load.
 * Performs the necessary synchronisation and destroys the old config.
 */
void
npf_config_load(npf_ruleset_t *rset, npf_tableset_t *tset,
    npf_ruleset_t *nset, npf_rprocset_t *rpset,
    npf_conndb_t *conns, bool flush)
{
	const bool load = conns != NULL;
	npf_config_t *nc, *onc;

	nc = kmem_zalloc(sizeof(npf_config_t), KM_SLEEP);
	nc->n_rules = rset;
	nc->n_tables = tset;
	nc->n_nat_rules = nset;
	nc->n_rprocs = rpset;
	nc->n_default_pass = flush;

	/*
	 * Acquire the lock and perform the first phase:
	 * - Scan and use existing dynamic tables, reload only static.
	 * - Scan and use matching NAT policies to preserve the connections.
	 */
	mutex_enter(&npf_config_lock);
	if ((onc = npf_config) != NULL) {
		npf_ruleset_reload(rset, onc->n_rules, load);
		npf_tableset_reload(tset, onc->n_tables);
		npf_ruleset_reload(nset, onc->n_nat_rules, load);
	}

	/*
	 * Set the new config and release the lock.
	 */
	membar_sync();
	npf_config = nc;
	if (onc == NULL) {
		/* Initial load, done. */
		npf_ifmap_flush();
		npf_conn_load(conns, !flush);
		mutex_exit(&npf_config_lock);
		return;
	}

	/*
	 * If we are going to flush the connections or load the new ones,
	 * then disable the connection tracking for the grace period.
	 */
	if (flush || conns) {
		npf_conn_tracking(false);
	}

	/* Synchronise: drain all references. */
	pserialize_perform(npf_config_psz);
	if (flush) {
		npf_ifmap_flush();
	}

	/*
	 * G/C the existing connections and, if passed, load the new ones.
	 * If not flushing - enable the connection tracking.
	 */
	npf_conn_load(conns, !flush);
	mutex_exit(&npf_config_lock);

	/* Finally, it is safe to destroy the old config. */
	npf_config_destroy(onc);
}
/*ARGSUSED*/
static int
clock_tick_cpu_setup(cpu_setup_t what, int cid, void *arg)
{
	cpu_t			*cp, *ncp;
	int			i, set;
	clock_tick_set_t	*csp;

	/*
	 * This function performs some computations at CPU offline/online
	 * time. The computed values are used during tick scheduling and
	 * execution phases. This avoids having to compute things on
	 * an every tick basis. The other benefit is that we perform the
	 * computations only for onlined CPUs (not offlined ones). As a
	 * result, no tick processing is attempted for offlined CPUs.
	 *
	 * Also, cpu_offline() calls this function before checking for
	 * active interrupt threads. This allows us to avoid posting
	 * cross calls to CPUs that are being offlined.
	 */

	cp = cpu[cid];

	mutex_enter(&clock_tick_lock);

	switch (what) {
	case CPU_ON:
		clock_tick_cpus[clock_tick_total_cpus] = cp;
		set = clock_tick_total_cpus / clock_tick_ncpus;
		csp = &clock_tick_set[set];
		csp->ct_end++;
		clock_tick_total_cpus++;
		clock_tick_nsets =
		    (clock_tick_total_cpus + clock_tick_ncpus - 1) /
		    clock_tick_ncpus;
		CPUSET_ADD(clock_tick_online_cpuset, cp->cpu_id);
		membar_sync();
		break;

	case CPU_OFF:
		if (&sync_softint != NULL)
			sync_softint(clock_tick_online_cpuset);
		CPUSET_DEL(clock_tick_online_cpuset, cp->cpu_id);
		clock_tick_total_cpus--;
		clock_tick_cpus[clock_tick_total_cpus] = NULL;
		clock_tick_nsets =
		    (clock_tick_total_cpus + clock_tick_ncpus - 1) /
		    clock_tick_ncpus;
		set = clock_tick_total_cpus / clock_tick_ncpus;
		csp = &clock_tick_set[set];
		csp->ct_end--;

		i = 0;
		ncp = cpu_active;
		do {
			if (cp == ncp)
				continue;
			clock_tick_cpus[i] = ncp;
			i++;
		} while ((ncp = ncp->cpu_next_onln) != cpu_active);
		ASSERT(i == clock_tick_total_cpus);
		membar_sync();
		break;

	default:
		break;
	}

	mutex_exit(&clock_tick_lock);

	return (0);
}
Beispiel #15
0
/*ARGSUSED*/
int
suspend_start(char *error_reason, size_t max_reason_len)
{
	uint64_t	source_tick;
	uint64_t	source_stick;
	uint64_t	rv;
	timestruc_t	source_tod;
	int		spl;

	ASSERT(suspend_supported());
	DBG("suspend: %s", __func__);

	sfmmu_ctxdoms_lock();

	mutex_enter(&cpu_lock);

	/* Suspend the watchdog */
	watchdog_suspend();

	/* Record the TOD */
	mutex_enter(&tod_lock);
	source_tod = tod_get();
	mutex_exit(&tod_lock);

	/* Pause all other CPUs */
	pause_cpus(NULL);
	DBG_PROM("suspend: CPUs paused\n");

	/* Suspend cyclics */
	cyclic_suspend();
	DBG_PROM("suspend: cyclics suspended\n");

	/* Disable interrupts */
	spl = spl8();
	DBG_PROM("suspend: spl8()\n");

	source_tick = gettick_counter();
	source_stick = gettick();
	DBG_PROM("suspend: source_tick: 0x%lx\n", source_tick);
	DBG_PROM("suspend: source_stick: 0x%lx\n", source_stick);

	/*
	 * Call into the HV to initiate the suspend. hv_guest_suspend()
	 * returns after the guest has been resumed or if the suspend
	 * operation failed or was cancelled. After a successful suspend,
	 * the %tick and %stick registers may have changed by an amount
	 * that is not proportional to the amount of time that has passed.
	 * They may have jumped forwards or backwards. Some variation is
	 * allowed and accounted for using suspend_tick_stick_max_delta,
	 * but otherwise this jump must be uniform across all CPUs and we
	 * operate under the assumption that it is (maintaining two global
	 * offset variables--one for %tick and one for %stick.)
	 */
	DBG_PROM("suspend: suspending... \n");
	rv = hv_guest_suspend();
	if (rv != 0) {
		splx(spl);
		cyclic_resume();
		start_cpus();
		watchdog_resume();
		mutex_exit(&cpu_lock);
		sfmmu_ctxdoms_unlock();
		DBG("suspend: failed, rv: %ld\n", rv);
		return (rv);
	}

	suspend_count++;

	/* Update the global tick and stick offsets and the preserved TOD */
	set_tick_offsets(source_tick, source_stick, &source_tod);

	/* Ensure new offsets are globally visible before resuming CPUs */
	membar_sync();

	/* Enable interrupts */
	splx(spl);

	/* Set the {%tick,%stick}.NPT bits on all CPUs */
	if (enable_user_tick_stick_emulation) {
		xc_all((xcfunc_t *)enable_tick_stick_npt, NULL, NULL);
		xt_sync(cpu_ready_set);
		ASSERT(gettick_npt() != 0);
		ASSERT(getstick_npt() != 0);
	}

	/* If emulation is enabled, but not currently active, enable it */
	if (enable_user_tick_stick_emulation && !tick_stick_emulation_active) {
		tick_stick_emulation_active = B_TRUE;
	}

	sfmmu_ctxdoms_remove();

	/* Resume cyclics, unpause CPUs */
	cyclic_resume();
	start_cpus();

	/* Set the TOD */
	mutex_enter(&tod_lock);
	tod_set(source_tod);
	mutex_exit(&tod_lock);

	/* Re-enable the watchdog */
	watchdog_resume();

	mutex_exit(&cpu_lock);

	/* Download the latest MD */
	if ((rv = mach_descrip_update()) != 0)
		cmn_err(CE_PANIC, "suspend: mach_descrip_update failed: %ld",
		    rv);

	sfmmu_ctxdoms_update();
	sfmmu_ctxdoms_unlock();

	/* Get new MD, update CPU mappings/relationships */
	if (suspend_update_cpu_mappings)
		update_cpu_mappings();

	DBG("suspend: target tick: 0x%lx", gettick_counter());
	DBG("suspend: target stick: 0x%llx", gettick());
	DBG("suspend: user %%tick/%%stick emulation is %d",
	    tick_stick_emulation_active);
	DBG("suspend: finished");

	return (0);
}