示例#1
0
void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring)
{
    XEmacPs_Bd *rxbd;
    XStatus status;
    struct pbuf *p;
    u32_t freebds;
    u32_t bdindex;
    u32_t *temp;
    u32_t index = 0;

    if (xemacpsif->emacps.Config.BaseAddress != XPAR_XEMACPS_0_BASEADDR) {
        index = sizeof(s32_t) * XLWIP_CONFIG_N_RX_DESC;
    }

    freebds = XEmacPs_BdRingGetFreeCnt (rxring);
    while (freebds > 0) {
        freebds--;
        p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
        if (!p) {
#if LINK_STATS
            lwip_stats.link.memerr++;
            lwip_stats.link.drop++;
#endif
            printf("unable to alloc pbuf in recv_handler\r\n");
            return;
        }
        status = XEmacPs_BdRingAlloc(rxring, 1, &rxbd);
        if (status != XST_SUCCESS) {
            LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
            pbuf_free(p);
            return;
        }
        status = XEmacPs_BdRingToHw(rxring, 1, rxbd);
        if (status != XST_SUCCESS) {
            LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
            if (status == XST_DMA_SG_LIST_ERROR)
                LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n"));
            else
                LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));

            pbuf_free(p);
            XEmacPs_BdRingUnAlloc(rxring, 1, rxbd);
            return;
        }
        Xil_DCacheInvalidateRange((u32_t)p->payload, (u32_t)XEMACPS_MAX_FRAME_SIZE);
        bdindex = XEMACPS_BD_TO_INDEX(rxring, rxbd);
        temp = (u32_t *)rxbd;
        *temp = 0;
        if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
            *temp = 0x00000002;
        }
        temp++;
        *temp = 0;

        XEmacPs_BdSetAddressRx(rxbd, (u32_t)p->payload);
        rx_pbufs_storage[index + bdindex] = (s32_t)p;
    }
}
void
_setup_rx_bds(XEmacPs_BdRing *rxring)
{
	XEmacPs_Bd *rxbd, *CurBdPtr;
	int n_bds, i;
	XStatus Status;
	struct pbuf *p;
	u32 BdSts;
	unsigned int FreeBds, k;
	unsigned int BdIndex;
	unsigned int *Temp;

	FreeBds = XEmacPs_BdRingGetFreeCnt (rxring);
	Status = XEmacPs_BdRingAlloc(rxring, FreeBds, &rxbd);
	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
		return;
	}
	for (k = 0, CurBdPtr = rxbd; k < FreeBds; k++) {
		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);

		if (!p) {
#if LINK_STATS
			lwip_stats.link.memerr++;
			lwip_stats.link.drop++;
#endif
			LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
			return;
		}
		BdIndex = XEMACPS_BD_TO_INDEX(rxring, CurBdPtr);
		Temp = (unsigned int *)CurBdPtr;
		*Temp = 0;
		if (BdIndex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
			*Temp = 0x00000002;
		}
		Temp++;
		*Temp = 0;
		XEmacPs_BdSetAddressRx(CurBdPtr, (u32)p->payload);
		dsb();

		rx_pbufs_storage[BdIndex] = (int)p;
		CurBdPtr = XEmacPs_BdRingNext (rxring, CurBdPtr);

		/* Enqueue to HW */
	}
	Status = XEmacPs_BdRingToHw(rxring, FreeBds, rxbd);
	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
		if (Status == XST_DMA_SG_LIST_ERROR)
			LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n"));
		else
			LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));
		return;
	}
}
示例#3
0
XStatus init_dma(struct xemac_s *xemac)
{
	XEmacPs_Bd bdtemplate;
	XEmacPs_BdRing *rxringptr, *txringptr;
	XEmacPs_Bd *rxbd;
	struct pbuf *p;
	XStatus status;
	s32_t i;
	u32_t bdindex;
	volatile u32_t tempaddress;

	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
	struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];

	/*
	 * The BDs need to be allocated in uncached memory. Hence the 1 MB
	 * address range allocated for Bd_Space is made uncached
	 * by setting appropriate attributes in the translation table.
	 * The Bd_Space is aligned to 1MB and has a size of 1 MB. This ensures
	 * a reserved uncached area used only for BDs.
	 */
	if (bd_space_attr_set == 0) {
		Xil_SetTlbAttributes((s32_t)bd_space, 0xc02); // addr, attr
		bd_space_attr_set = 1;
	}

	rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
	txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
	LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
	LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));

	/* Allocate 64k for Rx and Tx bds each to take care of extreme cases */
	tempaddress = (u32_t)&(bd_space[bd_space_index]);
	xemacpsif->rx_bdspace = (void *)tempaddress;
	bd_space_index += 0x10000;
	tempaddress = (u32_t)&(bd_space[bd_space_index]);
	xemacpsif->tx_bdspace = (void *)tempaddress;
	bd_space_index += 0x10000;

	LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n", xemacpsif->rx_bdspace));
	LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n", xemacpsif->tx_bdspace));

	if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
		xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
				__FILE__, __LINE__);
		return ERR_IF;
	}

	/*
	 * Setup RxBD space.
	 *
	 * Setup a BD template for the Rx channel. This template will be copied to
	 * every RxBD. We will not have to explicitly set these again.
	 */
	XEmacPs_BdClear(&bdtemplate);

	/*
	 * Create the RxBD ring
	 */

	status = XEmacPs_BdRingCreate(rxringptr, (u32) xemacpsif->rx_bdspace,
				(u32) xemacpsif->rx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_RX_DESC);

	if (status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
		return ERR_IF;
	}

	status = XEmacPs_BdRingClone(rxringptr, &bdtemplate, XEMACPS_RECV);
	if (status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
		return ERR_IF;
	}

	XEmacPs_BdClear(&bdtemplate);
	XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
	/*
	 * Create the TxBD ring
	 */
	status = XEmacPs_BdRingCreate(txringptr, (u32) xemacpsif->tx_bdspace,
				(u32) xemacpsif->tx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_TX_DESC);

	if (status != XST_SUCCESS) {
		return ERR_IF;
	}

	/* We reuse the bd template, as the same one will work for both rx and tx. */
	status = XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
	if (status != XST_SUCCESS) {
		return ERR_IF;
	}

	/*
	 * Allocate RX descriptors, 1 RxBD at a time.
	 */
	for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
		if (!p) {
#if LINK_STATS
			lwip_stats.link.memerr++;
			lwip_stats.link.drop++;
#endif
			printf("unable to alloc pbuf in init_dma\r\n");
			return ERR_IF;
		}
		status = XEmacPs_BdRingAlloc(rxringptr, 1, &rxbd);
		if (status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
			pbuf_free(p);
			return ERR_IF;
		}
		/* Enqueue to HW */
		status = XEmacPs_BdRingToHw(rxringptr, 1, rxbd);
		if (status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
			pbuf_free(p);
			XEmacPs_BdRingUnAlloc(rxringptr, 1, rxbd);
			return ERR_IF;
		}

		Xil_DCacheInvalidateRange((u32_t)p->payload, (u32_t)XEMACPS_MAX_FRAME_SIZE);
		XEmacPs_BdSetAddressRx(rxbd, (u32_t)p->payload);

		bdindex = XEMACPS_BD_TO_INDEX(rxringptr, rxbd);
		rx_pbufs_storage[bdindex] = (s32_t)p;
	}

	/*
	 * Connect the device driver handler that will be called when an
	 * interrupt for the device occurs, the handler defined above performs
	 * the specific interrupt processing for the device.
	 */
	XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
				(Xil_ExceptionHandler)XEmacPs_IntrHandler,
						(void *)&xemacpsif->emacps);
	/*
	 * Enable the interrupt for emacps.
	 */
	XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
	emac_intr_num = (u32) xtopologyp->scugic_emac_intr;
	return 0;
}
示例#4
0
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
{
	struct pbuf *q;
	s32_t n_pbufs;
	XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
	XEmacPs_Bd *temp_txbd;
	XStatus status;
	XEmacPs_BdRing *txring;
	u32_t bdindex;
	u32_t lev;

	lev = mfcpsr();
	mtcpsr(lev | 0x000000C0);

	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));

	/* first count the number of pbufs */
	for (q = p, n_pbufs = 0; q != NULL; q = q->next)
		n_pbufs++;

	/* obtain as many BD's */
	status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
	if (status != XST_SUCCESS) {
		mtcpsr(lev);
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
		return XST_FAILURE;
	}

	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
		bdindex = XEMACPS_BD_TO_INDEX(txring, txbd);
		if (tx_pbufs_storage[bdindex] != 0) {
			mtcpsr(lev);
			LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
			return XST_FAILURE;
		}

		/* Send the data from the pbuf to the interface, one pbuf at a
		   time. The size of the data in each pbuf is kept in the ->len
		   variable. */
		Xil_DCacheFlushRange((u32_t)q->payload, (u32_t)q->len);

		XEmacPs_BdSetAddressTx(txbd, (u32)q->payload);
		if (q->len > (XEMACPS_MAX_FRAME_SIZE - 18))
			XEmacPs_BdSetLength(txbd, (XEMACPS_MAX_FRAME_SIZE - 18) & 0x3FFF);
		else
			XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);

		tx_pbufs_storage[bdindex] = (s32_t)q;

		pbuf_ref(q);
		last_txbd = txbd;
		XEmacPs_BdClearLast(txbd);
		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	XEmacPs_BdSetLast(last_txbd);
	/* For fragmented packets, remember the 1st BD allocated for the 1st
	   packet fragment. The used bit for this BD should be cleared at the end
	   after clearing out used bits for other fragments. For packets without
	   just remember the allocated BD. */
	temp_txbd = txbdset;
	txbd = txbdset;
	txbd = XEmacPs_BdRingNext(txring, txbd);
	q = p->next;
	for(; q != NULL; q = q->next) {
		XEmacPs_BdClearTxUsed(txbd);
		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	XEmacPs_BdClearTxUsed(temp_txbd);

	status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
	if (status != XST_SUCCESS) {
		mtcpsr(lev);
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
		return XST_FAILURE;
	}
	/* Start transmit */
	XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET,
	(XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));

	mtcpsr(lev);
	return status;
}
XStatus init_dma(struct xemac_s *xemac)
{
	XEmacPs_Bd BdTemplate;
	XEmacPs_BdRing *RxRingPtr, *TxRingPtr;
	XEmacPs_Bd *rxbd;
	struct pbuf *p;
	XStatus Status;
	int i;
	unsigned int BdIndex;
	char *endAdd = &_end;
	/*
	 * Align the BD starte address to 1 MB boundary.
	 */
	char *endAdd_aligned = (char *)(((int)endAdd + 0x100000) & (~0xFFFFF));
	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
	struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];

	/*
	 * The BDs need to be allocated in uncached memory. Hence the 1 MB
	 * address range that starts at address 0xFF00000 is made uncached
	 * by setting appropriate attributes in the translation table.
	 */
	Xil_SetTlbAttributes((int)endAdd_aligned, 0xc02); // addr, attr

	RxRingPtr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
	TxRingPtr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
	LWIP_DEBUGF(NETIF_DEBUG, ("RxRingPtr: 0x%08x\r\n", RxRingPtr));
	LWIP_DEBUGF(NETIF_DEBUG, ("TxRingPtr: 0x%08x\r\n", TxRingPtr));

	xemacpsif->rx_bdspace = (void *)endAdd_aligned;
	/*
	 * We allocate 65536 bytes for Rx BDs which can accomodate a
	 * maximum of 8192 BDs which is much more than any application
	 * will ever need.
	 */
	xemacpsif->tx_bdspace = (void *)(endAdd_aligned + 0x10000);

	LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n",
												xemacpsif->rx_bdspace));
	LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n",
												xemacpsif->tx_bdspace));

	if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
		xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
				__FILE__, __LINE__);
		return XST_FAILURE;
	}

	/*
	 * Setup RxBD space.
	 *
	 * Setup a BD template for the Rx channel. This template will be copied to
	 * every RxBD. We will not have to explicitly set these again.
	 */
	XEmacPs_BdClear(&BdTemplate);

	/*
	 * Create the RxBD ring
	 */

	Status = XEmacPs_BdRingCreate(RxRingPtr, (u32) xemacpsif->rx_bdspace,
				(u32) xemacpsif->rx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_RX_DESC);

	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
		return XST_FAILURE;
	}

	Status = XEmacPs_BdRingClone(RxRingPtr, &BdTemplate, XEMACPS_RECV);
	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
		return XST_FAILURE;
	}

	XEmacPs_BdClear(&BdTemplate);
	XEmacPs_BdSetStatus(&BdTemplate, XEMACPS_TXBUF_USED_MASK);
	/*
	 * Create the TxBD ring
	 */
	Status = XEmacPs_BdRingCreate(TxRingPtr, (u32) xemacpsif->tx_bdspace,
				(u32) xemacpsif->tx_bdspace, BD_ALIGNMENT,
				     XLWIP_CONFIG_N_TX_DESC);

	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}

	/* We reuse the bd template, as the same one will work for both rx and tx. */
	Status = XEmacPs_BdRingClone(TxRingPtr, &BdTemplate, XEMACPS_SEND);
	if (Status != XST_SUCCESS) {
		return ERR_IF;
	}

	/*
	 * Allocate RX descriptors, 1 RxBD at a time.
	 */
	for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
		Status = XEmacPs_BdRingAlloc(RxRingPtr, 1, &rxbd);
		if (Status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
			return ERR_IF;
		}

		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
		if (!p) {
#if LINK_STATS
			lwip_stats.link.memerr++;
			lwip_stats.link.drop++;
#endif
			LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n"));
			return -1;
		}

		XEmacPs_BdSetAddressRx(rxbd, (u32)p->payload);

		BdIndex = XEMACPS_BD_TO_INDEX(RxRingPtr, rxbd);
		rx_pbufs_storage[BdIndex] = (int)p;

		/* Enqueue to HW */
		Status = XEmacPs_BdRingToHw(RxRingPtr, 1, rxbd);
		if (Status != XST_SUCCESS) {
			LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
			return XST_FAILURE;
		}
	}

	/*
	 * Connect the device driver handler that will be called when an
	 * interrupt for the device occurs, the handler defined above performs
	 * the specific interrupt processing for the device.
	 */
	XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
				(Xil_ExceptionHandler)XEmacPs_IntrHandler,
						(void *)&xemacpsif->emacps);
	/*
	 * Enable the interrupt for emacps.
	 */
	XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
	EmacIntrNum = (u32) xtopologyp->scugic_emac_intr;
	return 0;
}
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
{
	struct pbuf *q;
	int n_pbufs;
	XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
	XStatus Status;
	XEmacPs_BdRing *txring;
	unsigned int BdIndex;
	unsigned int lev;

	lev = mfcpsr();
	mtcpsr(lev | 0x000000C0);

#ifdef PEEP
    while((XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
    									XEMACPS_TXSR_OFFSET)) & 0x08);
#endif
	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));

	/* first count the number of pbufs */
	for (q = p, n_pbufs = 0; q != NULL; q = q->next)
		n_pbufs++;

	/* obtain as many BD's */
	Status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
	if (Status != XST_SUCCESS) {
		mtcpsr(lev);
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
		return ERR_IF;
	}

	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
		BdIndex = XEMACPS_BD_TO_INDEX(txring, txbd);
		if (tx_pbufs_storage[BdIndex] != 0) {
			mtcpsr(lev);
			LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
			return ERR_IF;
		}

		/* Send the data from the pbuf to the interface, one pbuf at a
		   time. The size of the data in each pbuf is kept in the ->len
		   variable. */
		Xil_DCacheFlushRange((unsigned int)q->payload, (unsigned)q->len);

		XEmacPs_BdSetAddressTx(txbd, (u32)q->payload);
		if (q->len > (XEMACPS_MAX_FRAME_SIZE - 18))
			XEmacPs_BdSetLength(txbd, (XEMACPS_MAX_FRAME_SIZE - 18) & 0x3FFF);
		else
			XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);

		tx_pbufs_storage[BdIndex] = (int)q;

		pbuf_ref(q);
		last_txbd = txbd;
		XEmacPs_BdClearLast(txbd);
		dsb();
 		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	XEmacPs_BdSetLast(last_txbd);
	dsb();
	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
		XEmacPs_BdClearTxUsed(txbd);
		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	dsb();

	Status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
	if (Status != XST_SUCCESS) {
		mtcpsr(lev);
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
		return ERR_IF;
	}
	dsb();
	/* Start transmit */
	XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET,
	(XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));
	dsb();
	mtcpsr(lev);
	return Status;
}
/**
*
* This function demonstrates the usage of the EMACPS by sending and
* receiving a single frame in DMA interrupt mode.
* The source packet will be described by two descriptors. It will be
* received into a buffer described by a single descriptor.
*
* @param	EmacPsInstancePtr is a pointer to the instance of the EmacPs
*		driver.
*
* @return	XST_SUCCESS to indicate success, otherwise XST_FAILURE.
*
* @note		None.
*
*****************************************************************************/
int EmacPsDmaSingleFrameIntrExample(XEmacPs *EmacPsInstancePtr)
{
	int Status;
	u32 PayloadSize = 1000;
	u32 NumRxBuf = 0;
	XEmacPs_Bd *Bd1Ptr;
	XEmacPs_Bd *Bd2Ptr;
	XEmacPs_Bd *BdRxPtr;

	/*
	 * Clear variables shared with callbacks
	 */
	FramesRx = 0;
	FramesTx = 0;
	DeviceErrors = 0;

	/*
	 * Calculate the frame length (not including FCS)
	 */
	TxFrameLength = XEMACPS_HDR_SIZE + PayloadSize;

	/*
	 * Setup packet to be transmitted
	 */
	EmacPsUtilFrameHdrFormatMAC(&TxFrame, EmacPsMAC);
	EmacPsUtilFrameHdrFormatType(&TxFrame, PayloadSize);
	EmacPsUtilFrameSetPayloadData(&TxFrame, PayloadSize);

	Xil_DCacheFlushRange((u32)&TxFrame, TxFrameLength);

	/*
	 * Clear out receive packet memory area
	 */
	EmacPsUtilFrameMemClear(&RxFrame);
	Xil_DCacheFlushRange((u32)&RxFrame, TxFrameLength);
	/*
	 * Allocate RxBDs since we do not know how many BDs will be used
	 * in advance, use RXBD_CNT here.
	 */
	Status = XEmacPs_BdRingAlloc(&
				      (XEmacPs_GetRxRing(EmacPsInstancePtr)),
				      1, &BdRxPtr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error allocating RxBD");
		return XST_FAILURE;
	}

	/*
	 * Setup the BD. The XEmacPs_BdRingClone() call will mark the
	 * "wrap" field for last RxBD. Setup buffer address to associated
	 * BD.
	 */

	XEmacPs_BdSetAddressRx(BdRxPtr, &RxFrame);

	/*
	 * Enqueue to HW
	 */
	Status = XEmacPs_BdRingToHw(&(XEmacPs_GetRxRing(EmacPsInstancePtr)),
				     1, BdRxPtr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error committing RxBD to HW");
		return XST_FAILURE;
	}

	/*
	 * Allocate, setup, and enqueue 2 TxBDs. The first BD will
	 * describe the first 32 bytes of TxFrame and the rest of BDs
	 * will describe the rest of the frame.
	 *
	 * The function below will allocate 2 adjacent BDs with Bd1Ptr
	 * being set as the lead BD.
	 */
	Status = XEmacPs_BdRingAlloc(&(XEmacPs_GetTxRing(EmacPsInstancePtr)),
				      2, &Bd1Ptr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error allocating TxBD");
		return XST_FAILURE;
	}

	/*
	 * Setup first TxBD
	 */
	XEmacPs_BdSetAddressTx(Bd1Ptr, &TxFrame);
	XEmacPs_BdSetLength(Bd1Ptr, FIRST_FRAGMENT_SIZE);
	XEmacPs_BdClearTxUsed(Bd1Ptr);
	XEmacPs_BdClearLast(Bd1Ptr);

	/*
	 * Setup second TxBD
	 */
	Bd2Ptr = XEmacPs_BdRingNext(&(XEmacPs_GetTxRing(EmacPsInstancePtr)),
				      Bd1Ptr);
	XEmacPs_BdSetAddressTx(Bd2Ptr,
				 (u32) (&TxFrame) + FIRST_FRAGMENT_SIZE);
	XEmacPs_BdSetLength(Bd2Ptr, TxFrameLength - FIRST_FRAGMENT_SIZE);
	XEmacPs_BdClearTxUsed(Bd2Ptr);
	XEmacPs_BdSetLast(Bd2Ptr);

	/*
	 * Enqueue to HW
	 */
	Status = XEmacPs_BdRingToHw(&(XEmacPs_GetTxRing(EmacPsInstancePtr)),
				     2, Bd1Ptr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error committing TxBD to HW");
		return XST_FAILURE;
	}

	/*
	 * Start the device
	 */
	XEmacPs_Start(EmacPsInstancePtr);

	/* Start transmit */
	XEmacPs_Transmit(EmacPsInstancePtr);

	/*
	 * Wait for transmission to complete
	 */
	while (!FramesTx);

	/*
	 * Now that the frame has been sent, post process our TxBDs.
	 * Since we have only submitted 2 to hardware, then there should
	 * be only 2 ready for post processing.
	 */
	if (XEmacPs_BdRingFromHwTx(&(XEmacPs_GetTxRing(EmacPsInstancePtr)),
				    2, &Bd1Ptr) == 0) {
		EmacPsUtilErrorTrap
			("TxBDs were not ready for post processing");
		return XST_FAILURE;
	}

	/*
	 * Examine the TxBDs.
	 *
	 * There isn't much to do. The only thing to check would be DMA
	 * exception bits. But this would also be caught in the error
	 * handler. So we just return these BDs to the free list.
	 */
	Status = XEmacPs_BdRingFree(&(XEmacPs_GetTxRing(EmacPsInstancePtr)),
				     2, Bd1Ptr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error freeing up TxBDs");
		return XST_FAILURE;
	}

	/*
	 * Wait for Rx indication
	 */
	while (!FramesRx);

	/*
	 * Now that the frame has been received, post process our RxBD.
	 * Since we have submitted to hardware, then there should be only 1
	 * ready for post processing.
	 */
	NumRxBuf = XEmacPs_BdRingFromHwRx(&(XEmacPs_GetRxRing
					  (EmacPsInstancePtr)), 1,
					 &BdRxPtr);
	if (0 == NumRxBuf) {
		EmacPsUtilErrorTrap("RxBD was not ready for post processing");
		return XST_FAILURE;
	}

	/*
	 * There is no device status to check. If there was a DMA error,
	 * it should have been reported to the error handler. Check the
	 * receive lengthi against the transmitted length, then verify
	 * the data.
	 */
	if ((XEmacPs_BdGetLength(BdRxPtr)) != TxFrameLength) {
		EmacPsUtilErrorTrap("Length mismatch");
		return XST_FAILURE;
	}

	if (EmacPsUtilFrameVerify(&TxFrame, &RxFrame) != 0) {
		EmacPsUtilErrorTrap("Data mismatch");
		return XST_FAILURE;
	}

	/*
	 * Return the RxBD back to the channel for later allocation. Free
	 * the exact number we just post processed.
	 */
	Status = XEmacPs_BdRingFree(&(XEmacPs_GetRxRing(EmacPsInstancePtr)),
				     NumRxBuf, BdRxPtr);
	if (Status != XST_SUCCESS) {
		EmacPsUtilErrorTrap("Error freeing up RxBDs");
		return XST_FAILURE;
	}

	/*
	 * Finished this example. If everything worked correctly, all TxBDs
	 * and RxBDs should be free for allocation. Stop the device.
	 */
	XEmacPs_Stop(EmacPsInstancePtr);

	return XST_SUCCESS;
}