Esempio n. 1
0
static void spu_init_channels(struct spu *spu)
{
	static const struct {
		 unsigned channel;
		 unsigned count;
	} zero_list[] = {
		{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
		{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
	}, count_list[] = {
		{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
		{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
		{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
	};
	struct spu_priv2 __iomem *priv2;
	int i;

	priv2 = spu->priv2;

	/* initialize all channel data to zero */
	for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
		int count;

		out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
		for (count = 0; count < zero_list[i].count; count++)
			out_be64(&priv2->spu_chnldata_RW, 0);
	}

	/* initialize channel counts to meaningful values */
	for (i = 0; i < ARRAY_SIZE(count_list); i++) {
		out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
		out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
	}
}
Esempio n. 2
0
static void xscom_reset(uint32_t gcid, bool need_delay)
{
	u64 hmer;
	uint32_t recv_status_reg, log_reg, err_reg;
	struct timespec ts;

	/* Clear errors in HMER */
	mtspr(SPR_HMER, HMER_CLR_MASK);

	/* Setup local and target scom addresses */
	if (proc_gen == proc_gen_p9) {
		recv_status_reg = 0x00090018;
		log_reg = 0x0090012;
		err_reg = 0x0090013;
	} else {
		recv_status_reg = 0x202000f;
		log_reg = 0x2020007;
		err_reg = 0x2020009;
	}

	/* First we need to write 0 to a register on our chip */
	out_be64(xscom_addr(this_cpu()->chip_id, recv_status_reg), 0);
	hmer = xscom_wait_done();
	if (hmer & SPR_HMER_XSCOM_FAIL)
		goto fail;

	/* Then we need to clear those two other registers on the target */
	out_be64(xscom_addr(gcid, log_reg), 0);
	hmer = xscom_wait_done();
	if (hmer & SPR_HMER_XSCOM_FAIL)
		goto fail;
	out_be64(xscom_addr(gcid, err_reg), 0);
	hmer = xscom_wait_done();
	if (hmer & SPR_HMER_XSCOM_FAIL)
		goto fail;

	if (need_delay) {
		/*
		 * Its observed that sometimes immediate retry of
		 * XSCOM operation returns wrong data. Adding a
		 * delay for XSCOM reset to be effective. Delay of
		 * 10 ms is found to be working fine experimentally.
		 * FIXME: Replace 10ms delay by exact delay needed
		 * or other alternate method to confirm XSCOM reset
		 * completion, after checking from HW folks.
		 */
		ts.tv_sec = 0;
		ts.tv_nsec = 10 * 1000;
		nanosleep_nopoll(&ts, NULL);
	}
	return;
 fail:
	/* Fatal error resetting XSCOM */
	log_simple_error(&e_info(OPAL_RC_XSCOM_RESET),
		"XSCOM: Fatal error resetting engine after failed access !\n");

	/* XXX Generate error log ? attn ? panic ?
	 * If we decide to panic, change the above severity to PANIC
	 */
}
Esempio n. 3
0
static void _rhea_hasher_channel_register_set(struct rhea_channel *channel,
					      unsigned hash_bits)
{
	u64 reg = 0x0ULL;
	struct rhea_pport *pport;
	struct rhea_pport_bpfc *bpfc;

	if (NULL == channel)
		return;

	/* always get pport from channel */
	pport = _rhea_pport_get(channel->pport_nr);
	if (NULL == pport) {
		rhea_error("Invalid pport number");
		return;
	}

	if (NULL == channel->qpn) {
		rhea_error("Only the main channel ID can set the hasher");
		return;
	}

	bpfc = &pport->pport_regs->bpfc;

	/* reset number of used hash bits for all channels */
	switch (channel->type) {
	case HEA_UC_PORT:

		reg = in_be64(&bpfc->p_rcu);
		reg = hea_set_u64_bits(reg, hash_bits, 9, 11);
		out_be64(&bpfc->p_rcu, reg);
		break;

	case HEA_MC_PORT:

		reg = in_be64(&bpfc->p_rcm);
		reg = hea_set_u64_bits(reg, hash_bits, 9, 11);
		out_be64(&bpfc->p_rcm, reg);
		break;

	case HEA_BC_PORT:

		reg = in_be64(&bpfc->p_rcb);
		reg = hea_set_u64_bits(reg, hash_bits, 9, 11);
		out_be64(&bpfc->p_rcb, reg);
		break;

	default:

		if (0 <= channel->type - HEA_LPORT_0) {
			int lport_index = hea_lport_index_get(channel->type);

			reg = in_be64(&bpfc->pl_rc[lport_index]);
			reg = hea_set_u64_bits(reg, hash_bits, 9, 11);
			out_be64(&bpfc->pl_rc[lport_index], reg);
		}
		break;
	}
}
Esempio n. 4
0
	inline void poweren_veth_vf_poke(unsigned short flags)
	{
		u64 *irq_trigger = poweren_ep_get_interrupt_trigger(vf_dev);

		/* reset pci trigger register, then poke */
		out_be64(irq_trigger, POWEREN_VETH_VF_RESET);
		out_be64(irq_trigger, flags);
	}
Esempio n. 5
0
int rhea_hasher_set(struct rhea_channel *channel, u64 sc,
		    u64 mask0, u64 mask1)
{
	int rc = 0;
	unsigned hash_bits = 0;
	struct rhea_pport *pport = NULL;
	struct rhea_channel_resource_map *map_qpn = NULL;

	if (NULL == channel)
		return -EINVAL;

	/* always get pport from channel */
	pport = _rhea_pport_get(channel->pport_nr);
	if (NULL == pport) {
		rhea_error("Invalid pport number");
		return -EINVAL;
	}

	if (NULL == channel->qpn) {
		rhea_error("Only the main channel ID can set the hasher");
		return -EINVAL;
	}

	spin_lock(&pport->lock);

	/* set registers */
	out_be64(&pport->pport_regs->bpfc.pg_hashm[0], mask0);
	out_be64(&pport->pport_regs->bpfc.pg_hashm[1], mask1);

	out_be64(&pport->pport_regs->bpfc.pg_hashsc, sc);

	/* save the values */
	pport->hasher->mask0 = mask0;
	pport->hasher->mask1 = mask1;
	pport->hasher->sc = sc;

	map_qpn =
		_rhea_channel_resource_map_get(channel->qpn,
					       channel->qpn_base);
	if (NULL == map_qpn) {
		rhea_error("Was not able to find the QPN map");
		return -EINVAL;
	}

	/* get number of bits used by qpn alloc */
	hash_bits = map_qpn->bits;

	rhea_debug("Hash bits: %u", hash_bits);

	/* sets the bits for the hasher */
	_rhea_hasher_channel_register_set(channel, hash_bits);

	spin_unlock(&pport->lock);

	return rc;
}
Esempio n. 6
0
static void _rhea_hasher_register_reset(struct rhea_pport_bpfc *bpfc)
{
	if (NULL == bpfc)
		return;

	/* reset hasher registers */
	out_be64(&bpfc->pg_hashm[0], ~(0x0ULL));
	out_be64(&bpfc->pg_hashm[1], ~(0x0ULL));

	out_be64(&bpfc->pg_hashsc, 0x0ULL);
}
Esempio n. 7
0
static void _rhea_tcam_register_reset(struct rhea_pport_bpfc *bpfc)
{
	int i;

	if (NULL == bpfc)
		return;

	/* reset tcam registers */
	for (i = 0; i < ARRAY_SIZE(bpfc->pg_tcampr); ++i)
		out_be64(&bpfc->pg_tcampr[i], 0x0ULL);

	for (i = 0; i < ARRAY_SIZE(bpfc->pg_tcamm); ++i)
		out_be64(&bpfc->pg_tcamm[i], 0x0ULL);
}
int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
{
	struct cbe_pmd_regs __iomem *pmd_regs;
	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
	u64 flags;
	u64 value;
#ifdef DEBUG
	long time;
#endif

	local_irq_save(flags);

	mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
	pmd_regs = cbe_get_cpu_pmd_regs(cpu);

#ifdef DEBUG
	time = jiffies;
#endif

	out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
	out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);

	out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
	out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);

	value = in_be64(&pmd_regs->pmcr);
	/* set bits to zero */
	value &= 0xFFFFFFFFFFFFFFF8ull;
	/* set bits to next pmode */
	value |= pmode;

	out_be64(&pmd_regs->pmcr, value);

#ifdef DEBUG
	/* wait until new pmode appears in status register */
	value = in_be64(&pmd_regs->pmsr) & 0x07;
	while(value != pmode) {
		cpu_relax();
		value = in_be64(&pmd_regs->pmsr) & 0x07;
	}

	time = jiffies  - time;
	time = jiffies_to_msecs(time);
	pr_debug("had to wait %lu ms for a transition using " \
		 "pervasive unit\n", time);
#endif
	local_irq_restore(flags);

	return 0;
}
Esempio n. 9
0
void __init cbe_pervasive_init(void)
{
	int cpu;

	if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
		return;

	sysreset_hack = machine_is_compatible("IBM,CBPLUS-1.0");

	for_each_possible_cpu(cpu) {
		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
		if (!regs)
			continue;

		 /* Enable Pause(0) control bit */
		out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
					    CBE_PMD_PAUSE_ZERO_CONTROL);

		/* Enable JTAG system-reset hack */
		if (sysreset_hack)
			out_be32(&regs->fir_mode_reg,
				in_be32(&regs->fir_mode_reg) |
				CBE_PMD_FIR_MODE_M8);
	}

	ppc_md.power_save = cbe_power_save;
	ppc_md.system_reset_exception = cbe_system_reset_exception;
}
Esempio n. 10
0
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
				      u64 *startp, u64 *endp)
{
	u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
	unsigned long start, end, inc;

	start = __pa(startp);
	end = __pa(endp);
	inc = L1_CACHE_BYTES; 

	if (tbl->it_busno) {
		start <<= 12;
		end <<= 12;
		inc <<= 12;
		start |= tbl->it_busno;
		end |= tbl->it_busno;
	}

	end |= inc - 1; 

	mb(); /* Make sure TCEs in memory are written */
	while (start <= end) {
		out_be64(invalidate, start);
		start += inc;
	}
}
Esempio n. 11
0
static void spu_restart_dma(struct spu *spu)
{
	struct spu_priv2 __iomem *priv2 = spu->priv2;

	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
}
Esempio n. 12
0
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
				      u64 *startp, u64 *endp)
{
	u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
	unsigned long start, end, inc;

	start = __pa(startp);
	end = __pa(endp);
	inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */

	/* If this is non-zero, change the format.  We shift the
	 * address and or in the magic from the device tree. */
	if (tbl->it_busno) {
		start <<= 12;
		end <<= 12;
		inc <<= 12;
		start |= tbl->it_busno;
		end |= tbl->it_busno;
	}

	end |= inc - 1; /* round up end to be different than start */

	mb(); /* Make sure TCEs in memory are written */
	while (start <= end) {
		out_be64(invalidate, start);
		start += inc;
	}
}
Esempio n. 13
0
File: ics.c Progetto: 03199618/linux
static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
{
	xive &= ~XIVE_ADDR_MASK;
	xive |= (irq & XIVE_ADDR_MASK);
	xive |= XIVE_WRITE_ENABLE;

	out_be64(XIVE_UPDATE_REG(ics->regs), xive);
}
Esempio n. 14
0
static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
{
	struct pci_controller *hose = data;
	struct pnv_phb *phb = hose->private_data;

	out_be64(phb->regs + offset, val);
	return 0;
}
Esempio n. 15
0
File: ics.c Progetto: 03199618/linux
static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
{
	unsigned long flags;
	u64 xive;

	spin_lock_irqsave(&ics->lock, flags);
	out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
	xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
	spin_unlock_irqrestore(&ics->lock, flags);

	return xive;
}
Esempio n. 16
0
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
	struct spu_priv2 __iomem *priv2 = spu->priv2;
	struct mm_struct *mm = spu->mm;
	u64 esid, vsid;

	pr_debug("%s\n", __FUNCTION__);

	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
		/* SLBs are pre-loaded for context switch, so
		 * we should never get here!
		 */
		printk("%s: invalid access during switch!\n", __func__);
		return 1;
	}
	if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
		/* Future: support kernel segments so that drivers
		 * can use SPUs.
		 */
		pr_debug("invalid region access at %016lx\n", ea);
		return 1;
	}

	esid = (ea & ESID_MASK) | SLB_ESID_V;
	vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
	if (in_hugepage_area(mm->context, ea))
		vsid |= SLB_VSID_L;

	out_be64(&priv2->slb_index_W, spu->slb_replace);
	out_be64(&priv2->slb_vsid_RW, vsid);
	out_be64(&priv2->slb_esid_RW, esid);

	spu->slb_replace++;
	if (spu->slb_replace >= 8)
		spu->slb_replace = 0;

	spu_restart_dma(spu);

	return 0;
}
Esempio n. 17
0
static void __init cbe_enable_pause_zero(void)
{
	unsigned long thread_switch_control;
	unsigned long temp_register;
	struct cbe_pervasive *p;
	int thread;

	spin_lock_irq(&cbe_pervasive_lock);
	p = &cbe_pervasive[smp_processor_id()];

	if (!cbe_pervasive->regs)
		goto out;

	pr_debug("Power Management: CPU %d\n", smp_processor_id());

	 /* Enable Pause(0) control bit */
	temp_register = in_be64(&p->regs->pm_control);

	out_be64(&p->regs->pm_control,
		 temp_register|PMD_PAUSE_ZERO_CONTROL);

	/* Enable DEC and EE interrupt request */
	thread_switch_control  = mfspr(SPRN_TSC_CELL);
	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;

	switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
	case CTRL_CT0:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
		thread = 0;
		break;
	case CTRL_CT1:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
		thread = 1;
		break;
	default:
		printk(KERN_WARNING "%s: unknown configuration\n",
			__FUNCTION__);
		thread = -1;
		break;
	}

	if (p->thread != thread)
		printk(KERN_WARNING "%s: device tree inconsistant, "
				     "cpu %i: %d/%d\n", __FUNCTION__,
				     smp_processor_id(),
				     p->thread, thread);

	mtspr(SPRN_TSC_CELL, thread_switch_control);

out:
	spin_unlock_irq(&cbe_pervasive_lock);
}
Esempio n. 18
0
static int __xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val)
{
	uint64_t hmer;
	int64_t ret, retries = 0;
	int64_t xscom_clear_retries = XSCOM_CLEAR_MAX_RETRIES;

	if (!xscom_gcid_ok(gcid)) {
		prerror("%s: invalid XSCOM gcid 0x%x\n", __func__, gcid);
		return OPAL_PARAMETER;
	}

	for (retries = 0; retries <= XSCOM_BUSY_MAX_RETRIES; retries++) {
		/* Clear status bits in HMER (HMER is special
		 * writing to it *ands* bits
		 */
		mtspr(SPR_HMER, HMER_CLR_MASK);

		/* Write value to SCOM */
		out_be64(xscom_addr(gcid, pcb_addr), val);

		/* Wait for done bit */
		hmer = xscom_wait_done();

		/* Check for error */
		if (!(hmer & SPR_HMER_XSCOM_FAIL))
			return OPAL_SUCCESS;

		/* Handle error and possibly eventually retry */
		ret = xscom_handle_error(hmer, gcid, pcb_addr, true, retries,
				&xscom_clear_retries);
		if (ret != OPAL_BUSY)
			break;
	}

	/* Do not print error message for multicast SCOMS */
	if (xscom_is_multicast_addr(pcb_addr) && ret == OPAL_XSCOM_CHIPLET_OFF)
		return ret;

	/*
	 * Workaround on P9: PRD does operations it *knows* will fail with this
	 * error to work around a hardware issue where accesses via the PIB
	 * (FSI or OCC) work as expected, accesses via the ADU (what xscom goes
	 * through) do not. The chip logic will always return all FFs if there
	 * is any error on the scom.
	 */
	if (proc_gen == proc_gen_p9 && ret == OPAL_XSCOM_CHIPLET_OFF)
		return ret;

	prerror("XSCOM: Write failed, ret =  %lld\n", ret);
	return ret;
}
Esempio n. 19
0
static void cdmx_mask_wsp_firs(struct copro_unit *unit)
{
	/* Workaround for erratum 122 */
#ifdef CONFIG_PPC_A2_PSR2
	u64 val;

	if (firmware_has_feature(FW_FEATURE_MAMBO))
		return;

	val  = in_be64(unit->mmio_addr + 0x230);

	/* Disable error reporting for engines 6 & 7 */
	out_be64(unit->mmio_addr + 0x230, val | 0x6);
#endif
}
Esempio n. 20
0
void __init cbe_pervasive_init(void)
{
	int cpu;
	if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
		return;

	for_each_possible_cpu(cpu) {
		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
		if (!regs)
			continue;

		 /* Enable Pause(0) control bit */
		out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
					    CBE_PMD_PAUSE_ZERO_CONTROL);
	}

	ppc_md.power_save = cbe_power_save;
	ppc_md.system_reset_exception = cbe_system_reset_exception;
}
Esempio n. 21
0
static int _rhea_tcam_register_set_status(struct rhea_channel *channel,
					  unsigned tcam_base,
					  unsigned tcam_offset,
					  unsigned enable)
{
	int rc = 0;
	unsigned tcam_index;
	struct rhea_pport *pport;
	u64 reg_pattern;

	if (NULL == channel)
		return -EINVAL;

	pport = _rhea_pport_get(channel->pport_nr);
	if (NULL == pport) {
		rhea_error("Invalid pport number");
		return -EINVAL;
	}

	if (NULL == channel->qpn || NULL == channel->tcam) {
		rhea_error("TCAM or QPN are not initialised");
		return -EINVAL;
	}

	/* get the real index in the TCAM array */
	rc = _rhea_channel_resource_index_get(channel->tcam,
					      tcam_base, tcam_offset,
					      &tcam_index);
	if (rc) {
		rhea_error("Was not able to find index in TCAM map");
		return -EINVAL;
	}

	reg_pattern = in_be64(&pport->pport_regs->bpfc.pg_tcampr[tcam_index]);

	/* enable/disable TCAM slot */
	reg_pattern = hea_set_u64_bits(reg_pattern, enable ? 1 : 0, 47, 47);

	out_be64(&pport->pport_regs->bpfc.pg_tcampr[tcam_index], reg_pattern);

	return rc;
}
Esempio n. 22
0
static int cbe_system_reset_exception(struct pt_regs *regs)
{
	int cpu;
	struct cbe_pmd_regs __iomem *pmd;

	switch (regs->msr & SRR1_WAKEMASK) {
	case SRR1_WAKEEE:
		do_IRQ(regs);
		break;
	case SRR1_WAKEDEC:
		timer_interrupt(regs);
		break;
	case SRR1_WAKEMT:
		/*
		 * The BMC can inject user triggered system reset exceptions,
		 * but cannot set the system reset reason in srr1,
		 * so check an extra register here.
		 */
		if (sysreset_hack && (cpu = smp_processor_id()) == 0) {
			pmd = cbe_get_cpu_pmd_regs(cpu);
			if (in_be64(&pmd->ras_esc_0) & 0xffff) {
				out_be64(&pmd->ras_esc_0, 0);
				return 0;
			}
		}
		break;
#ifdef CONFIG_CBE_RAS
	case SRR1_WAKESYSERR:
		cbe_system_error_exception(regs);
		break;
	case SRR1_WAKETHERM:
		cbe_thermal_exception(regs);
		break;
#endif /* CONFIG_CBE_RAS */
	default:
		/* do system reset */
		return 0;
	}
	/* everything handled */
	return 1;
}
Esempio n. 23
0
static void __init cbe_enable_pause_zero(void)
{
	unsigned long thread_switch_control;
	unsigned long temp_register;
	struct cbe_pmd_regs __iomem *pregs;

	spin_lock_irq(&cbe_pervasive_lock);
	pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
	if (pregs == NULL)
		goto out;

	pr_debug("Power Management: CPU %d\n", smp_processor_id());

	 /* Enable Pause(0) control bit */
	temp_register = in_be64(&pregs->pm_control);

	out_be64(&pregs->pm_control,
		 temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);

	/* Enable DEC and EE interrupt request */
	thread_switch_control  = mfspr(SPRN_TSC_CELL);
	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;

	switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
	case CTRL_CT0:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
		break;
	case CTRL_CT1:
		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
		break;
	default:
		printk(KERN_WARNING "%s: unknown configuration\n",
			__FUNCTION__);
		break;
	}

	mtspr(SPRN_TSC_CELL, thread_switch_control);

out:
	spin_unlock_irq(&cbe_pervasive_lock);
}
Esempio n. 24
0
static void __init
smp_85xx_kick_cpu(int nr)
{
	unsigned long flags;
	const u64 *cpu_rel_addr;
	__iomem u32 *bptr_vaddr;
	struct device_node *np;
	int n = 0;
	int ioremappable;

	WARN_ON (nr < 0 || nr >= NR_CPUS);

	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);

	np = of_get_cpu_node(nr, NULL);
	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);

	if (cpu_rel_addr == NULL) {
		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
		return;
	}

	/*
	 * A secondary core could be in a spinloop in the bootpage
	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
	 * The bootpage and highmem can be accessed via ioremap(), but
	 * we need to directly access the spinloop if its in lowmem.
	 */
	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);

	/* Map the spin table */
	if (ioremappable)
		bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
	else
		bptr_vaddr = phys_to_virt(*cpu_rel_addr);

	local_irq_save(flags);

	out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
#ifdef CONFIG_PPC32
	out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));

	if (!ioremappable)
		flush_dcache_range((ulong)bptr_vaddr,
				(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));

	/* Wait a bit for the CPU to ack. */
	while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
		mdelay(1);
#else
	out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
		__pa((u64)*((unsigned long long *) generic_secondary_smp_init)));

	smp_generic_kick_cpu(nr);
#endif

	local_irq_restore(flags);

	if (ioremappable)
		iounmap(bptr_vaddr);

	pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}
Esempio n. 25
0
static int xscom_clear_error(uint32_t gcid, uint32_t pcb_addr)
{
	u64 hmer;
	uint32_t base_xscom_addr;
	uint32_t xscom_clear_reg = 0x20010800;

	/* only in case of p9 */
	if (proc_gen != proc_gen_p9)
		return 0;

/* xscom clear address range/mask */
#define XSCOM_CLEAR_RANGE_START		0x20010A00
#define XSCOM_CLEAR_RANGE_END		0x20010ABF
#define XSCOM_CLEAR_RANGE_MASK		0x200FFBFF

	/*
	 * Due to a hardware issue where core responding to scom was delayed
	 * due to thread reconfiguration, leaves the scom logic in a state
	 * where the subsequent scom to that core can get errors. This is
	 * affected for Core PC scom registers in the range of
	 * 20010A80-20010ABF.
	 *
	 * The solution is if a xscom timeout occurs to one of Core PC scom
	 * registers in the range of 20010A80-20010ABF, a clearing scom
	 * write is done to 0x20010800 with data of '0x00000000' which will
	 * also get a timeout but clears the scom logic errors. After the
	 * clearing write is done the original scom operation can be retried.
	 *
	 * The scom timeout is reported as status 0x4 (Invalid address)
	 * in HMER[21-23].
	 */

	base_xscom_addr = pcb_addr & XSCOM_CLEAR_RANGE_MASK;
	if (!((base_xscom_addr >= XSCOM_CLEAR_RANGE_START) &&
				(base_xscom_addr <= XSCOM_CLEAR_RANGE_END)))
		return 0;

	/*
	 * Reset the XSCOM or next scom operation will fail.
	 * We also need a small delay before we go ahead with clearing write.
	 * We have observed that without a delay the clearing write has reported
	 * a wrong status.
	 */
	xscom_reset(gcid, true);

	/* Clear errors in HMER */
	mtspr(SPR_HMER, HMER_CLR_MASK);

	/* Write 0 to clear the xscom logic errors on target chip */
	out_be64(xscom_addr(gcid, xscom_clear_reg), 0);
	hmer = xscom_wait_done();

	/*
	 * Above clearing xscom write will timeout and error out with
	 * invalid access as there is no register at that address. This
	 * xscom operation just helps to clear the xscom logic error.
	 *
	 * On failure, reset the XSCOM or we'll hang on the next access
	 */
	if (hmer & SPR_HMER_XSCOM_FAIL)
		xscom_reset(gcid, true);

	return 1;
}
Esempio n. 26
0
static int debugfs_io_u64_set(void *data, u64 val)
{
	out_be64((u64 __iomem *)data, val);
	return 0;
}
Esempio n. 27
0
File: smp.c Progetto: AllenDou/linux
static int __cpuinit smp_85xx_kick_cpu(int nr)
{
	unsigned long flags;
	const u64 *cpu_rel_addr;
	__iomem struct epapr_spin_table *spin_table;
	struct device_node *np;
	int hw_cpu = get_hard_smp_processor_id(nr);
	int ioremappable;
	int ret = 0;

	WARN_ON(nr < 0 || nr >= NR_CPUS);
	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);

	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);

	np = of_get_cpu_node(nr, NULL);
	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);

	if (cpu_rel_addr == NULL) {
		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
		return -ENOENT;
	}

	/*
	 * A secondary core could be in a spinloop in the bootpage
	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
	 * The bootpage and highmem can be accessed via ioremap(), but
	 * we need to directly access the spinloop if its in lowmem.
	 */
	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);

	/* Map the spin table */
	if (ioremappable)
		spin_table = ioremap(*cpu_rel_addr,
				sizeof(struct epapr_spin_table));
	else
		spin_table = phys_to_virt(*cpu_rel_addr);

	local_irq_save(flags);
#ifdef CONFIG_PPC32
#ifdef CONFIG_HOTPLUG_CPU
	/* Corresponding to generic_set_cpu_dead() */
	generic_set_cpu_up(nr);

	if (system_state == SYSTEM_RUNNING) {
		out_be32(&spin_table->addr_l, 0);

		/*
		 * We don't set the BPTR register here since it already points
		 * to the boot page properly.
		 */
		mpic_reset_core(hw_cpu);

		/* wait until core is ready... */
		if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
						10000, 100)) {
			pr_err("%s: timeout waiting for core %d to reset\n",
							__func__, hw_cpu);
			ret = -ENOENT;
			goto out;
		}

		/*  clear the acknowledge status */
		__secondary_hold_acknowledge = -1;
	}
#endif
	out_be32(&spin_table->pir, hw_cpu);
	out_be32(&spin_table->addr_l, __pa(__early_start));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));

	/* Wait a bit for the CPU to ack. */
	if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
					10000, 100)) {
		pr_err("%s: timeout waiting for core %d to ack\n",
						__func__, hw_cpu);
		ret = -ENOENT;
		goto out;
	}
out:
#else
	smp_generic_kick_cpu(nr);

	out_be32(&spin_table->pir, hw_cpu);
	out_be64((u64 *)(&spin_table->addr_h),
	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));
#endif

	local_irq_restore(flags);

	if (ioremappable)
		iounmap(spin_table);

	return ret;
}
Esempio n. 28
0
static int _rhea_tcam_register_set(struct rhea_channel *channel,
				   unsigned tcam_base,
				   unsigned tcam_offset,
				   unsigned qpn_offset,
				   unsigned pattern, unsigned mask)
{

	int rc = 0;
	unsigned tcam_index;
	unsigned qpn_index;
	u64 reg_pattern;
	u64 reg_mask;
	struct rhea_pport *pport;

	if (NULL == channel)
		return -EINVAL;

	if (NULL == channel->tcam || NULL == channel->qpn) {
		rhea_error("TCAM or QPN is not allocated");
		return -EINVAL;
	}

	pport = _rhea_pport_get(channel->pport_nr);
	if (NULL == pport) {
		rhea_error("Invalid pport number");
		return -EINVAL;
	}

	/* get the real index in the TCAM array */
	rc = _rhea_channel_resource_index_get(channel->tcam,
					      tcam_base, tcam_offset,
					      &tcam_index);
	if (rc) {
		rhea_error("Was not able to find index in TCAM map");
		return -EINVAL;
	}

	/* check if the offset is valid for this channel */
	rc = _rhea_channel_resource_index_get(channel->qpn,
					      channel->qpn_base, qpn_offset,
					      &qpn_index);
	if (rc) {
		rhea_error("Was not able to find index in QPN map");
		return -EINVAL;
	}

	reg_pattern = in_be64(&pport->pport_regs->bpfc.pg_tcampr[tcam_index]);
	reg_mask = in_be64(&pport->pport_regs->bpfc.pg_tcamm[tcam_index]);

	/* set pattern and mask */
	reg_pattern = hea_set_u64_bits(reg_pattern, pattern, 0, 31);
	reg_mask = hea_set_u64_bits(reg_mask, mask, 0, 31);

	/* configure LPORT or PPORT */
	switch (channel->type) {
	case HEA_BC_PORT:
	case HEA_MC_PORT:
	case HEA_UC_PORT:
		/* case UC_MC_HEA_BC_PORT: */

		/* is physical port */
		reg_pattern = hea_set_u64_bits(reg_pattern, 0, 48, 48);
		reg_mask = hea_set_u64_bits(reg_mask, 1, 48, 48);
		break;

	default:

		/* is logical port */
		reg_pattern = hea_set_u64_bits(reg_pattern, 1, 48, 48);
		reg_mask = hea_set_u64_bits(reg_mask, 1, 48, 48);
		break;
	}

	/* configure which channel type is using this TCAM */
	switch (channel->type) {
	case HEA_BC_PORT:
		reg_pattern = hea_set_u64_bits(reg_pattern, 1, 54, 55);
		reg_mask = hea_set_u64_bits(reg_mask, 3, 54, 55);
		break;

	case HEA_MC_PORT:
		reg_pattern = hea_set_u64_bits(reg_pattern, 2, 54, 55);
		reg_mask = hea_set_u64_bits(reg_mask, 3, 54, 55);
		break;

	case HEA_UC_PORT:
		reg_pattern = hea_set_u64_bits(reg_pattern, 3, 54, 55);
		reg_mask = hea_set_u64_bits(reg_mask, 3, 54, 55);
		break;

	default:
	    {
		int lport_index = hea_lport_index_get(channel->type);

		/* only allow one logical port at a time
		 * --> not all combinations are possible */
		reg_pattern =
			hea_set_u64_bits(reg_pattern, lport_index, 54, 55);
		reg_mask = hea_set_u64_bits(reg_mask, 3, 54, 55);
	    }
		break;
	}

	/* This is the offset from the QPN base to be used
	 * if the packet data matches the pattern */
	reg_pattern = hea_set_u64_bits(reg_pattern, qpn_offset, 59, 63);

	/* write back registers */
	out_be64(&pport->pport_regs->bpfc.pg_tcampr[tcam_index], reg_pattern);
	out_be64(&pport->pport_regs->bpfc.pg_tcamm[tcam_index], reg_mask);

	return rc;
}