Esempio n. 1
0
static int want_buffer(struct ipath_devdata *dd)
{
	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
			 dd->ipath_sendctrl);

	return 0;
}
Esempio n. 2
0
static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
{
	if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
	    qp->ibqp.qp_type == IB_QPT_SMI) {
		unsigned long flags;

		spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
		dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
				 dd->ipath_sendctrl);
		ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
		spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
	}
}
Esempio n. 3
0
static void ipath_chk_errormask(struct ipath_devdata *dd)
{
	static u32 fixed;
	u32 ctrl;
	unsigned long errormask;
	unsigned long hwerrs;

	if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
		return;

	errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);

	if (errormask == dd->ipath_errormask)
		return;
	fixed++;

	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);

	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
		dd->ipath_errormask);

	if ((hwerrs & dd->ipath_hwerrmask) ||
		(ctrl & INFINIPATH_C_FREEZEMODE)) {
		/* force re-interrupt of pending events, just in case */
		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
		ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
		ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
		dev_info(&dd->pcidev->dev,
			"errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
			fixed, errormask, (unsigned long)dd->ipath_errormask,
			ctrl, hwerrs);
	} else
		ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
			fixed, errormask,
			(unsigned long)dd->ipath_errormask);
}
/*
 * Lemma to deal with race condition of write..read to epb regs
 */
static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
{
	int tries;
	u64 transval;


	ipath_write_kreg(dd, reg, i_val);
	/* Throw away first read, as RDY bit may be stale */
	transval = ipath_read_kreg64(dd, reg);

	for (tries = EPB_TRANS_TRIES; tries; --tries) {
		transval = ipath_read_kreg32(dd, reg);
		if (transval & EPB_TRANS_RDY)
			break;
		udelay(5);
	}
	if (transval & EPB_TRANS_ERR)
		return -1;
	if (tries > 0 && o_vp)
		*o_vp = transval;
	return tries;
}
void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
{
	int ret;

	/* clear, then re-enable parity errs */
	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
		UC_PAR_CLR_D, UC_PAR_CLR_M);
	if (ret < 0) {
		ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
		goto bail;
	}
	ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
		UC_PAR_CLR_M);

	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
	udelay(4);
	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
		INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
bail:
	return;
}
Esempio n. 6
0
/**
 * ipath_get_faststats - get word counters from chip before they overflow
 * @opaque - contains a pointer to the infinipath device ipath_devdata
 *
 * called from add_timer
 */
void ipath_get_faststats(unsigned long opaque)
{
	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
	u32 val;
	static unsigned cnt;
	unsigned long flags;

	/*
	 * don't access the chip while running diags, or memory diags can
	 * fail
	 */
	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
	    ipath_diag_inuse)
		/* but re-arm the timer, for diags case; won't hurt other */
		goto done;

	/*
	 * We now try to maintain a "active timer", based on traffic
	 * exceeding a threshold, so we need to check the word-counts
	 * even if they are 64-bit.
	 */
	ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
	ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
	spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
	if (dd->ipath_traffic_wds  >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
		atomic_add(5, &dd->ipath_active_time); /* S/B #define */
	dd->ipath_traffic_wds = 0;
	spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);

	if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
	}

	ipath_qcheck(dd);

	/*
	 * deal with repeat error suppression.  Doesn't really matter if
	 * last error was almost a full interval ago, or just a few usecs
	 * ago; still won't get more than 2 per interval.  We may want
	 * longer intervals for this eventually, could do with mod, counter
	 * or separate timer.  Also see code in ipath_handle_errors() and
	 * ipath_handle_hwerrors().
	 */

	if (dd->ipath_lasterror)
		dd->ipath_lasterror = 0;
	if (dd->ipath_lasthwerror)
		dd->ipath_lasthwerror = 0;
	if (dd->ipath_maskederrs
	    && time_after(jiffies, dd->ipath_unmasktime)) {
		char ebuf[256];
		int iserr;
		iserr = ipath_decode_err(ebuf, sizeof ebuf,
			dd->ipath_maskederrs);
		if (dd->ipath_maskederrs &
				~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
				INFINIPATH_E_PKTERRS ))
			ipath_dev_err(dd, "Re-enabling masked errors "
				      "(%s)\n", ebuf);
		else {
			/*
			 * rcvegrfull and rcvhdrqfull are "normal", for some
			 * types of processes (mostly benchmarks) that send
			 * huge numbers of messages, while not processing
			 * them.  So only complain about these at debug
			 * level.
			 */
			if (iserr)
					ipath_dbg("Re-enabling queue full errors (%s)\n",
							ebuf);
			else
				ipath_cdbg(ERRPKT, "Re-enabling packet"
						" problem interrupt (%s)\n", ebuf);
		}

		/* re-enable masked errors */
		dd->ipath_errormask |= dd->ipath_maskederrs;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
			dd->ipath_errormask);
		dd->ipath_maskederrs = 0;
	}

	/* limit qfull messages to ~one per minute per port */
	if ((++cnt & 0x10)) {
		for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
		     val--) {
			if (dd->ipath_lastegrheads[val] != -1)
				dd->ipath_lastegrheads[val] = -1;
			if (dd->ipath_lastrcvhdrqtails[val] != -1)
				dd->ipath_lastrcvhdrqtails[val] = -1;
		}
	}

	ipath_chk_errormask(dd);
done:
	mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
}
/*
 * query, claim, release ownership of the EPB (External Parallel Bus)
 * for a specified SERDES.
 * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
 * Returns <0 for errors, >0 if we had ownership, else 0.
 */
static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
{
	u16 acc;
	u64 accval;
	int owned = 0;
	u64 oct_sel = 0;

	switch (sdnum) {
	case IB_7220_SERDES :
		/*
		 * The IB SERDES "ownership" is fairly simple. A single each
		 * request/grant.
		 */
		acc = dd->ipath_kregs->kr_ib_epbacc;
		break;
	case PCIE_SERDES0 :
	case PCIE_SERDES1 :
		/* PCIe SERDES has two "octants", need to select which */
		acc = dd->ipath_kregs->kr_pcie_epbacc;
		oct_sel = (2 << (sdnum - PCIE_SERDES0));
		break;
	default :
		return 0;
	}

	/* Make sure any outstanding transaction was seen */
	ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
	udelay(15);

	accval = ipath_read_kreg32(dd, acc);

	owned = !!(accval & EPB_ACC_GNT);
	if (claim < 0) {
		/* Need to release */
		u64 pollval;
		/*
		 * The only writeable bits are the request and CS.
		 * Both should be clear
		 */
		u64 newval = 0;
		ipath_write_kreg(dd, acc, newval);
		/* First read after write is not trustworthy */
		pollval = ipath_read_kreg32(dd, acc);
		udelay(5);
		pollval = ipath_read_kreg32(dd, acc);
		if (pollval & EPB_ACC_GNT)
			owned = -1;
	} else if (claim > 0) {
		/* Need to claim */
		u64 pollval;
		u64 newval = EPB_ACC_REQ | oct_sel;
		ipath_write_kreg(dd, acc, newval);
		/* First read after write is not trustworthy */
		pollval = ipath_read_kreg32(dd, acc);
		udelay(5);
		pollval = ipath_read_kreg32(dd, acc);
		if (!(pollval & EPB_ACC_GNT))
			owned = -1;
	}
	return owned;
}
/*
 * Localize the stuff that should be done to change IB uC reset
 * returns <0 for errors.
 */
static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
{
	u64 rst_val;
	int ret = 0;
	unsigned long flags;

	rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
	if (assert_rst) {
		/*
		 * Vendor recommends "interrupting" uC before reset, to
		 * minimize possible glitches.
		 */
		spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
		epb_access(dd, IB_7220_SERDES, 1);
		rst_val |= 1ULL;
		/* Squelch possible parity error from _asserting_ reset */
		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
			dd->ipath_hwerrmask &
			~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
		/* flush write, delay to ensure it took effect */
		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
		udelay(2);
		/* once it's reset, can remove interrupt */
		epb_access(dd, IB_7220_SERDES, -1);
		spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
	} else {
		/*
		 * Before we de-assert reset, we need to deal with
		 * possible glitch on the Parity-error line.
		 * Suppress it around the reset, both in chip-level
		 * hwerrmask and in IB uC control reg. uC will allow
		 * it again during startup.
		 */
		u64 val;
		rst_val &= ~(1ULL);
		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
			dd->ipath_hwerrmask &
			~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);

		ret = ipath_resync_ibepb(dd);
		if (ret < 0)
			ipath_dev_err(dd, "unable to re-sync IB EPB\n");

		/* set uC control regs to suppress parity errs */
		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
		if (ret < 0)
			goto bail;
		/* IB uC code past Version 1.32.17 allow suppression of wdog */
		ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
			0x80);
		if (ret < 0) {
			ipath_dev_err(dd, "Failed to set WDOG disable\n");
			goto bail;
		}
		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
		/* flush write, delay for startup */
		ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
		udelay(1);
		/* clear, then re-enable parity errs */
		ipath_sd7220_clr_ibpar(dd);
		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
		if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
			ipath_dev_err(dd, "IBUC Parity still set after RST\n");
			dd->ipath_hwerrmask &=
				~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
		}
		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
			dd->ipath_hwerrmask);
	}

bail:
	return ret;
}
Esempio n. 9
0
/**
 * ipath_get_faststats - get word counters from chip before they overflow
 * @opaque - contains a pointer to the infinipath device ipath_devdata
 *
 * called from add_timer
 */
void ipath_get_faststats(unsigned long opaque)
{
	struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
	u32 val;
	static unsigned cnt;

	/*
	 * don't access the chip while running diags, or memory diags can
	 * fail
	 */
	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) ||
	    ipath_diag_inuse)
		/* but re-arm the timer, for diags case; won't hurt other */
		goto done;

	if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
	}

	ipath_qcheck(dd);

	/*
	 * deal with repeat error suppression.  Doesn't really matter if
	 * last error was almost a full interval ago, or just a few usecs
	 * ago; still won't get more than 2 per interval.  We may want
	 * longer intervals for this eventually, could do with mod, counter
	 * or separate timer.  Also see code in ipath_handle_errors() and
	 * ipath_handle_hwerrors().
	 */

	if (dd->ipath_lasterror)
		dd->ipath_lasterror = 0;
	if (dd->ipath_lasthwerror)
		dd->ipath_lasthwerror = 0;
	if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
	    && time_after(jiffies, dd->ipath_unmasktime)) {
		char ebuf[256];
		ipath_decode_err(ebuf, sizeof ebuf,
				 (dd->ipath_maskederrs & ~dd->
				  ipath_ignorederrs));
		if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
		    ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
			ipath_dev_err(dd, "Re-enabling masked errors "
				      "(%s)\n", ebuf);
		else {
			/*
			 * rcvegrfull and rcvhdrqfull are "normal", for some
			 * types of processes (mostly benchmarks) that send
			 * huge numbers of messages, while not processing
			 * them.  So only complain about these at debug
			 * level.
			 */
			ipath_dbg("Disabling frequent queue full errors "
				  "(%s)\n", ebuf);
		}
		dd->ipath_maskederrs = dd->ipath_ignorederrs;
		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
				 ~dd->ipath_maskederrs);
	}

	/* limit qfull messages to ~one per minute per port */
	if ((++cnt & 0x10)) {
		for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
		     val--) {
			if (dd->ipath_lastegrheads[val] != -1)
				dd->ipath_lastegrheads[val] = -1;
			if (dd->ipath_lastrcvhdrqtails[val] != -1)
				dd->ipath_lastrcvhdrqtails[val] = -1;
		}
	}

	if (dd->ipath_nosma_bufs) {
		dd->ipath_nosma_secs += 5;
		if (dd->ipath_nosma_secs >= 30) {
			ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
				   "cancelling pending sends\n",
				   dd->ipath_nosma_secs);
			/*
			 * issue an abort as well, in case we have a packet
			 * stuck in launch fifo.  This could corrupt an
			 * outgoing user packet in the worst case,
			 * but this is a pretty catastrophic, anyway.
			 */
			ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
					 INFINIPATH_S_ABORT);
			ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
					     dd->ipath_piobcnt2k +
					     dd->ipath_piobcnt4k -
					     dd->ipath_lastport_piobuf);
			/* start again, if necessary */
			dd->ipath_nosma_secs = 0;
		} else
			ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
				   "after %u seconds\n",
				   dd->ipath_nosma_bufs,
				   dd->ipath_nosma_secs);
	}

done:
	mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
}