Exemple #1
0
static INLINE void
dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags,
             uint32 bufcount)
{
	uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;

	/* PCI bus with big(>1G) physical address, use address extension */
	if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
		W_SM(&ddring[outidx].addrlow, BUS_SWAP32(pa + di->dataoffsetlow));
		W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(0 + di->dataoffsethigh));
		W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
		W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
	} else {
		/* address extension */
		uint32 ae;
		ASSERT(di->addrext);

		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
		pa &= ~PCI32ADDR_HIGH;

		ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
		W_SM(&ddring[outidx].addrlow, BUS_SWAP32(pa + di->dataoffsetlow));
		W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(0 + di->dataoffsethigh));
		W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
		W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
	}
}
Exemple #2
0
void *
dma_getnextrxp(dma_info_t *di, bool forceall)
{
	uint i;
	void *rxp;

	/* if forcing, dma engine must be disabled */
	ASSERT(!forceall || !dma_rxenabled(di));

	i = di->rxin;

	/* return if no packets posted */
	if (i == di->rxout)
		return (NULL);

	/* ignore curr if forceall */
	if (!forceall && (i == B2I(R_REG(&di->regs->rcvstatus) & RS_CD_MASK)))
		return (NULL);

	/* get the packet pointer that corresponds to the rx descriptor */
	rxp = di->rxp[i];
	ASSERT(rxp);
	di->rxp[i] = NULL;

	/* clear this packet from the descriptor ring */
	DMA_UNMAP(di->dev, (BUS_SWAP32(R_SM(&di->rxd[i].addr)) - di->dataoffset),
		  di->rxbufsize, DMA_RX, rxp);
	W_SM(&di->rxd[i].addr, 0);

	di->rxin = NEXTRXD(i);

	return (rxp);
}
Exemple #3
0
/* init the tx or rx descriptor */
static INLINE void
dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *flags,
             uint32 bufcount)
{
	/* dma32 uses 32 bits control to fit both flags and bufcounter */
	*flags = *flags | (bufcount & CTRL_BC_MASK);

	if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
		W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + di->dataoffsetlow));
		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
	} else {
		/* address extension */
		uint32 ae;
		ASSERT(di->addrext);
		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
		pa &= ~PCI32ADDR_HIGH;

		*flags |= (ae << CTRL_AE_SHIFT);
		W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + di->dataoffsetlow));
		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
	}
}
Exemple #4
0
/*
 * Reclaim next completed txd (txds if using chained buffers) and
 * return associated packet.
 * If 'force' is true, reclaim txd(s) and return associated packet
 * regardless of the value of the hardware "curr" pointer.
 */
void*
dma_getnexttxp(dma_info_t *di, bool forceall)
{
	uint start, end, i;
	void *txp;

	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));

	txp = NULL;

	/* if forcing, dma engine must be disabled */
	ASSERT(!forceall || !dma_txenabled(di));

	start = di->txin;
	if (forceall)
		end = di->txout;
	else
		end = B2I(R_REG(&di->regs->xmtstatus) & XS_CD_MASK);

	/* PR4738 - xmt disable/re-enable does not clear CURR */
	if ((start == 0) && (end > di->txout))
		goto bogus;

	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
		DMA_UNMAP(di->dev, (BUS_SWAP32(R_SM(&di->txd[i].addr)) - di->dataoffset),
			  (BUS_SWAP32(R_SM(&di->txd[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
		W_SM(&di->txd[i].addr, 0);
		txp = di->txp[i];
		di->txp[i] = NULL;
	}

	di->txin = i;

	/* tx flow control */
	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;

	return (txp);

bogus:
/*
	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
		start, end, di->txout, forceall));
*/
	return (NULL);
}
Exemple #5
0
/* post receive buffers */
void
dma_rxfill(dma_info_t *di)
{
	void *p;
	uint rxin, rxout;
	uint ctrl;
	uint n;
	uint i;
	uint32 pa;
	uint rxbufsize;

	/*
	 * Determine how many receive buffers we're lacking
	 * from the full complement, allocate, initialize,
	 * and post them, then update the chip rx lastdscr.
	 */

	rxin = di->rxin;
	rxout = di->rxout;
	rxbufsize = di->rxbufsize;

	n = di->nrxpost - NRXDACTIVE(rxin, rxout);

	DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));

	for (i = 0; i < n; i++) {
		if ((p = PKTGET(di->drv, rxbufsize, FALSE)) == NULL) {
			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
			di->hnddma.rxnobuf++;
			break;
		}

		/* PR3263 & PR3387 & PR4642 war: rxh.len=0 means dma writes not complete */
		*(uint32*)(OSL_UNCACHED(PKTDATA(di->drv, p))) = 0;

		pa = (uint32) DMA_MAP(di->dev, PKTDATA(di->drv, p), rxbufsize, DMA_RX, p);
		ASSERT(ISALIGNED(pa, 4));

		/* save the free packet pointer */
#if 0
		ASSERT(di->rxp[rxout] == NULL);
#endif
		di->rxp[rxout] = p;

		/* paranoia */
		ASSERT(R_SM(&di->rxd[rxout].addr) == 0);

		/* prep the descriptor control value */
		ctrl = rxbufsize;
		if (rxout == (di->nrxd - 1))
			ctrl |= CTRL_EOT;

		/* init the rx descriptor */
		W_SM(&di->rxd[rxout].ctrl, BUS_SWAP32(ctrl));
		W_SM(&di->rxd[rxout].addr, BUS_SWAP32(pa + di->dataoffset));

		DMA_TRACE(("%s: dma_rxfill:  ctrl %08x dataoffset: %08x\n", di->name, BUS_SWAP32(ctrl), BUS_SWAP32(pa + di->dataoffset)));

		rxout = NEXTRXD(rxout);
	}

	di->rxout = rxout;

	/* update the chip lastdscr pointer */
	W_REG(&di->regs->rcvptr, I2B(rxout));
}
Exemple #6
0
/*
 * Just like above except go through the extra effort of splitting
 * buffers that cross 4Kbyte boundaries into multiple tx descriptors.
 */
int
dma_tx(dma_info_t *di, void *p0, uint32 coreflags)
{
	void *p, *next;
	uchar *data;
	uint plen, len;
	uchar *page, *start, *end;
	uint txout;
	uint32 ctrl;
	uint32 pa;

	DMA_TRACE(("%s: dma_tx\n", di->name));

	txout = di->txout;
	ctrl = 0;

	/*
	 * Walk the chain of packet buffers
	 * splitting those that cross 4 Kbyte boundaries
	 * allocating and initializing transmit descriptor entries.
	 */
	for (p = p0; p; p = next) {
		data = PKTDATA(di->drv, p);
		plen = PKTLEN(di->drv, p);
		next = PKTNEXT(di->drv, p);

		/* PR988 - skip zero length buffers */
		if (plen == 0)
			continue;

		for (page = (uchar*)PAGEBASE(data);
			page <= (uchar*)PAGEBASE(data + plen - 1);
			page += PAGESZ) {

			/* return nonzero if out of tx descriptors */
			if (NEXTTXD(txout) == di->txin)
				goto outoftxd;

			start = (page == (uchar*)PAGEBASE(data))?  data: page;
			end = (page == (uchar*)PAGEBASE(data + plen))?
				(data + plen): (page + PAGESZ);
			len = end - start;

			/* build the descriptor control value */
			ctrl = len & CTRL_BC_MASK;

			/* PR3697: Descriptor flags are not ignored for descriptors where SOF is clear */
			ctrl |= coreflags;

			if ((p == p0) && (start == data))
				ctrl |= CTRL_SOF;
			if ((next == NULL) && (end == (data + plen)))
				ctrl |= (CTRL_IOC | CTRL_EOF);
			if (txout == (di->ntxd - 1))
				ctrl |= CTRL_EOT;

			/* get physical address of buffer start */
			pa = (uint32) DMA_MAP(di->dev, start, len, DMA_TX, p);

			/* init the tx descriptor */
			W_SM(&di->txd[txout].ctrl, BUS_SWAP32(ctrl));
			W_SM(&di->txd[txout].addr, BUS_SWAP32(pa + di->dataoffset));

			ASSERT(di->txp[txout] == NULL);

			txout = NEXTTXD(txout);
		}
	}

	/* if last txd eof not set, fix it */
	if (!(ctrl & CTRL_EOF))
		W_SM(&di->txd[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));

	/* save the packet */
	di->txp[di->txout] = p0;

	/* bump the tx descriptor index */
	di->txout = txout;

	/* kick the chip */
	W_REG(&di->regs->xmtptr, I2B(txout));

	/* tx flow control */
	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;

	return (0);

outoftxd:
	DMA_ERROR(("%s: dma_tx: out of txds\n", di->name));
	PKTFREE(di->drv, p0, TRUE);
	di->txavail = 0;
	di->hnddma.txnobuf++;
	return (-1);
}