int wlan_eth_dma_send(u8* pkt_ptr, u32 length) {
	int status;
	XAxiDma_BdRing *txRing_ptr;
	XAxiDma_Bd *cur_bd_ptr;

	//Flush the data cache of the pkt buffer
	// Comment this back in if the dcache is enabled
	//Xil_DCacheFlushRange((u32)TxPacket, MAX_PKT_LEN);

	txRing_ptr = XAxiDma_GetTxRing(&ETH_A_DMA_Instance);

	status = XAxiDma_BdRingAlloc(txRing_ptr, 1, &cur_bd_ptr);
	status |= XAxiDma_BdSetBufAddr(cur_bd_ptr, (u32)pkt_ptr);
	status |= XAxiDma_BdSetLength(cur_bd_ptr, length, txRing_ptr->MaxTransferLen);
	if(status != XST_SUCCESS) {xil_printf("Error in setting ETH Tx BD! Err = %d\n", status); return -1;}

	//When using 1 BD for 1 pkt set both SOF and EOF
	XAxiDma_BdSetCtrl(cur_bd_ptr, (XAXIDMA_BD_CTRL_TXSOF_MASK | XAXIDMA_BD_CTRL_TXEOF_MASK) );

	//Set arbitrary ID
	XAxiDma_BdSetId(cur_bd_ptr, (u32)pkt_ptr);

	//Push the BD ring to hardware; this initiates the actual DMA transfer and Ethernet Tx
	status = XAxiDma_BdRingToHw(txRing_ptr, 1, cur_bd_ptr);
	if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingToHw(txRing_ptr)! Err = %d\n", status); return -1;}

	//Wait for this DMA transfer to finish (will be replaced by post-Tx ISR)
	while (XAxiDma_BdRingFromHw(txRing_ptr, 1, &cur_bd_ptr) == 0) {/*Do Nothing*/}
	status = XAxiDma_BdRingFree(txRing_ptr, 1, cur_bd_ptr);
	if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingFree(txRing_ptr, 1)! Err = %d\n", status); return -1;}

	return 0;
}
void wlan_eth_dma_update(){
	//Used to submit new BDs to the DMA hardware if space is available
	int bd_count;
	int status;
	XAxiDma_BdRing *ETH_A_RxRing_ptr;
	XAxiDma_Bd *first_bd_ptr;
	XAxiDma_Bd *cur_bd_ptr;
	packet_bd_list checkout;
	packet_bd*	tx_queue;
	u32 i;
	u32 buf_addr;
	u32 num_available_packet_bd;

	ETH_A_RxRing_ptr = XAxiDma_GetRxRing(&ETH_A_DMA_Instance);
	bd_count = XAxiDma_BdRingGetFreeCnt(ETH_A_RxRing_ptr);

	num_available_packet_bd = queue_num_free();

	if(min(num_available_packet_bd,bd_count)>0){
//		xil_printf("%d BDs are free\n",bd_count);
//		xil_printf("%d packet_bds are free\n", num_available_packet_bd);
//		xil_printf("Attaching %d BDs to packet_bds\n",min(bd_count,num_available_packet_bd));

		//Checkout ETH_A_NUM_RX_BD packet_bds
		queue_checkout(&checkout, min(bd_count,num_available_packet_bd));

		status = XAxiDma_BdRingAlloc(ETH_A_RxRing_ptr, min(bd_count,checkout.length), &first_bd_ptr);
		if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingAlloc()! Err = %d\n", status); return;}

		tx_queue = checkout.first;

		//Iterate over each Rx buffer descriptor
		cur_bd_ptr = first_bd_ptr;
		for(i = 0; i < min(bd_count,checkout.length); i++) {
			//Set the memory address for this BD's buffer
			buf_addr = (u32)((void*)((tx_packet_buffer*)(tx_queue->buf_ptr))->frame + sizeof(mac_header_80211) + sizeof(llc_header) - sizeof(ethernet_header));

			status = XAxiDma_BdSetBufAddr(cur_bd_ptr, buf_addr);
			if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetBufAddr failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return;}

			//Set every Rx BD to max length (this assures 1 BD per Rx pkt)
			status = XAxiDma_BdSetLength(cur_bd_ptr, ETH_A_PKT_BUF_SIZE, ETH_A_RxRing_ptr->MaxTransferLen);
			if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetLength failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return;}

			//Rx BD's don't need control flags before use; DMA populates these post-Rx
			XAxiDma_BdSetCtrl(cur_bd_ptr, 0);

			//BD ID is arbitrary; use pointer to the packet_bd associated with this BD
			XAxiDma_BdSetId(cur_bd_ptr, (u32)tx_queue);

			//Update cur_bd_ptr to the next BD in the chain for the next iteration
			cur_bd_ptr = XAxiDma_BdRingNext(ETH_A_RxRing_ptr, cur_bd_ptr);

			//Remove this tx_queue from the checkout list
			//packet_bd_remove(&checkout,tx_queue);

			//Traverse forward in the checked-out packet_bd list
			tx_queue = tx_queue->next;
		}

		//Push the Rx BD ring to hardware and start receiving
		status = XAxiDma_BdRingToHw(ETH_A_RxRing_ptr, min(num_available_packet_bd,bd_count), first_bd_ptr);

		//Check any remaining unused entries from the checkout list back in
		//queue_checkin(&checkout);

	}
	return;
}
int wlan_eth_dma_init() {
	int status;
	int bd_count;
	int i;
	u32 buf_addr;

	XAxiDma_Config *ETH_A_DMA_CFG_ptr;

	XAxiDma_Bd ETH_DMA_BD_Template;
	XAxiDma_BdRing *ETH_A_TxRing_ptr;
	XAxiDma_BdRing *ETH_A_RxRing_ptr;

	XAxiDma_Bd *first_bd_ptr;
	XAxiDma_Bd *cur_bd_ptr;

	packet_bd_list checkout;
	packet_bd*	tx_queue;

	ETH_A_DMA_CFG_ptr = XAxiDma_LookupConfig(ETH_A_DMA_DEV_ID);
	status = XAxiDma_CfgInitialize(&ETH_A_DMA_Instance, ETH_A_DMA_CFG_ptr);
	if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_CfgInitialize! Err = %d\n", status); return -1;}

	//Zero-out the template buffer descriptor
	XAxiDma_BdClear(&ETH_DMA_BD_Template);

	//Fetch handles to the Tx and Rx BD rings
	ETH_A_TxRing_ptr = XAxiDma_GetTxRing(&ETH_A_DMA_Instance);
	ETH_A_RxRing_ptr = XAxiDma_GetRxRing(&ETH_A_DMA_Instance);

	//Disable all Tx/Rx DMA interrupts
	XAxiDma_BdRingIntDisable(ETH_A_TxRing_ptr, XAXIDMA_IRQ_ALL_MASK);
	XAxiDma_BdRingIntDisable(ETH_A_RxRing_ptr, XAXIDMA_IRQ_ALL_MASK);

	//Disable delays and coalescing (for now - these will be useful when we transition to interrupts)
	XAxiDma_BdRingSetCoalesce(ETH_A_TxRing_ptr, 1, 0);
	XAxiDma_BdRingSetCoalesce(ETH_A_RxRing_ptr, 1, 0);

	//Setup Tx/Rx buffer descriptor rings in memory
	status =  XAxiDma_BdRingCreate(ETH_A_TxRing_ptr, ETH_A_TX_BD_SPACE_BASE, ETH_A_TX_BD_SPACE_BASE, XAXIDMA_BD_MINIMUM_ALIGNMENT, ETH_A_NUM_TX_BD);
	status |= XAxiDma_BdRingCreate(ETH_A_RxRing_ptr, ETH_A_RX_BD_SPACE_BASE, ETH_A_RX_BD_SPACE_BASE, XAXIDMA_BD_MINIMUM_ALIGNMENT, ETH_A_NUM_RX_BD);
	if(status != XST_SUCCESS) {xil_printf("Error creating DMA BD Rings! Err = %d\n", status); return -1;}

	//Populate each ring with empty buffer descriptors
	status =  XAxiDma_BdRingClone(ETH_A_TxRing_ptr, &ETH_DMA_BD_Template);
	status |= XAxiDma_BdRingClone(ETH_A_RxRing_ptr, &ETH_DMA_BD_Template);
	if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingClone()! Err = %d\n", status); return -1;}

	//Start the DMA Tx channel
	// No Eth packets are transmitted until actual Tx BD's are pushed to the DMA hardware
	status = XAxiDma_BdRingStart(ETH_A_TxRing_ptr);

	//Initialize the Rx buffer descriptors
	bd_count = XAxiDma_BdRingGetFreeCnt(ETH_A_RxRing_ptr);
	if(bd_count != ETH_A_NUM_RX_BD) {xil_printf("Error in Eth Rx DMA init - not all Rx BDs were free at boot\n");}

	status = XAxiDma_BdRingAlloc(ETH_A_RxRing_ptr, bd_count, &first_bd_ptr);
	if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingAlloc()! Err = %d\n", status); return -1;}

	//Checkout ETH_A_NUM_RX_BD packet_bds
	queue_checkout(&checkout, ETH_A_NUM_RX_BD);

	if(checkout.length == ETH_A_NUM_RX_BD){
		tx_queue = checkout.first;
	} else {
		xil_printf("Error during wlan_eth_dma_init: able to check out %d of %d packet_bds\n", checkout.length, ETH_A_NUM_RX_BD);
		return -1;
	}

	//Iterate over each Rx buffer descriptor
	cur_bd_ptr = first_bd_ptr;
	for(i = 0; i < bd_count; i++) {
		//Set the memory address for this BD's buffer
		buf_addr = (u32)((void*)((tx_packet_buffer*)(tx_queue->buf_ptr))->frame + sizeof(mac_header_80211) + sizeof(llc_header) - sizeof(ethernet_header));

		status = XAxiDma_BdSetBufAddr(cur_bd_ptr, buf_addr);
		if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetBufAddr failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return -1;}

		//Set every Rx BD to max length (this assures 1 BD per Rx pkt)
		status = XAxiDma_BdSetLength(cur_bd_ptr, ETH_A_PKT_BUF_SIZE, ETH_A_RxRing_ptr->MaxTransferLen);
		if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetLength failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return -1;}

		//Rx BD's don't need control flags before use; DMA populates these post-Rx
		XAxiDma_BdSetCtrl(cur_bd_ptr, 0);

		//BD ID is arbitrary; use pointer to the packet_bd associated with this BD
		XAxiDma_BdSetId(cur_bd_ptr, (u32)tx_queue);

		//Update cur_bd_ptr to the next BD in the chain for the next iteration
		cur_bd_ptr = XAxiDma_BdRingNext(ETH_A_RxRing_ptr, cur_bd_ptr);

		//Traverse forward in the checked-out packet_bd list
		tx_queue = tx_queue->next;
	}

	//Push the Rx BD ring to hardware and start receiving
	status = XAxiDma_BdRingToHw(ETH_A_RxRing_ptr, bd_count, first_bd_ptr);

	//Enable Interrupts
	XAxiDma_BdRingIntEnable(ETH_A_RxRing_ptr, XAXIDMA_IRQ_ALL_MASK);

	status |= XAxiDma_BdRingStart(ETH_A_RxRing_ptr);
	if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingToHw/XAxiDma_BdRingStart(ETH_A_RxRing_ptr)! Err = %d\n", status); return -1;}

	return 0;
}
/**
*
* This function transmits one packet non-blockingly through the DMA engine.
*
* @param    AxiDmaInstPtr points to the DMA engine instance
*
* @return   XST_SUCCESS if the DMA accepts the packet successfully,
*           XST_FAILURE otherwise.
*
* @note     None.
*
******************************************************************************/
static int SendPacket(XAxiDma * AxiDmaInstPtr)
{
	XAxiDma_BdRing *TxRingPtr;
	u8 *TxPacket;
	u8 Value;
	XAxiDma_Bd *BdPtr;
	int Status;
	int i;

	TxRingPtr = XAxiDma_GetTxRing(AxiDmaInstPtr);

	/* Create pattern in the packet to transmit */
	TxPacket = (u8 *) Packet;

	Value = TEST_START_VALUE;

	for(i = 0; i < MAX_PKT_LEN; i ++) {
		TxPacket[i] = Value;

		Value = (Value + 1) & 0xFF;
	}

	/* Allocate a BD */
	Status = XAxiDma_BdRingAlloc(TxRingPtr, 1, &BdPtr);
	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}

	/* Set up the BD using the information of the packet to transmit */
	Status = XAxiDma_BdSetBufAddr(BdPtr, (u32) Packet);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Tx set buffer addr %x on BD %x failed %d\r\n",
		    (unsigned int)Packet, (unsigned int)BdPtr, Status);

		return XST_FAILURE;
	}

	Status = XAxiDma_BdSetLength(BdPtr, MAX_PKT_LEN);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Tx set length %d on BD %x failed %d\r\n",
		    MAX_PKT_LEN, (unsigned int)BdPtr, Status);

		return XST_FAILURE;
	}

#if (XPAR_AXIDMA_0_SG_INCLUDE_STSCNTRL_STRM == 1)
	Status = XAxiDma_BdSetAppWord(BdPtr,
	    XAXIDMA_LAST_APPWORD, MAX_PKT_LEN);

	/* If Set app length failed, it is not fatal 
	 */
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Set app word failed with %d\r\n", Status);
	}
#endif

	/* For single packet, both SOF and EOF are to be set
	 */
	XAxiDma_BdSetCtrl(BdPtr, XAXIDMA_BD_CTRL_TXEOF_MASK |
						XAXIDMA_BD_CTRL_TXSOF_MASK);

	XAxiDma_BdSetId(BdPtr, (u32) Packet);

	/* Give the BD to DMA to kick off the transmission. */
	Status = XAxiDma_BdRingToHw(TxRingPtr, 1, BdPtr);
	if (Status != XST_SUCCESS) {
		xil_printf("to hw failed %d\r\n", Status);
		return XST_FAILURE;
	}

	return XST_SUCCESS;
}
/**
*
* This function waits until the DMA transaction is finished, checks data,
* and cleans up.
*
* @return   - XST_SUCCESS if DMA transfer is successful and data is correct,
*           - XST_FAILURE otherwise.
*
* @note     None.
*
******************************************************************************/
static int CheckDmaResult(XAxiDma * AxiDmaInstPtr)
{
	XAxiDma_BdRing *TxRingPtr;
	XAxiDma_BdRing *RxRingPtr;
	XAxiDma_Bd *BdPtr;
	int ProcessedBdCount;
	int FreeBdCount;
	int Status;

	TxRingPtr = XAxiDma_GetTxRing(AxiDmaInstPtr);
	RxRingPtr = XAxiDma_GetRxRing(AxiDmaInstPtr);

	/* Wait until the one BD TX transaction is done */
	while ((ProcessedBdCount = XAxiDma_BdRingFromHw(TxRingPtr,
						       XAXIDMA_ALL_BDS,
						       &BdPtr)) == 0) {
	}

	/* Free all processed TX BDs for future transmission */
	Status = XAxiDma_BdRingFree(TxRingPtr, ProcessedBdCount, BdPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Failed to free %d tx BDs %d\r\n",
		    ProcessedBdCount, Status);
		return XST_FAILURE;
	}

	/* Wait until the data has been received by the Rx channel */
	while ((ProcessedBdCount = XAxiDma_BdRingFromHw(RxRingPtr,
						       XAXIDMA_ALL_BDS,
						       &BdPtr)) == 0) {
	}

	/* Check received data */
	if (CheckData() != XST_SUCCESS) {

		return XST_FAILURE;
	}

	/* Free all processed RX BDs for future transmission */
	Status = XAxiDma_BdRingFree(RxRingPtr, ProcessedBdCount, BdPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Failed to free %d rx BDs %d\r\n",
		    ProcessedBdCount, Status);
		return XST_FAILURE;
	}

	/* Return processed BDs to RX channel so we are ready to receive new
	 * packets:
	 *    - Allocate all free RX BDs
	 *    - Pass the BDs to RX channel
	 */
	FreeBdCount = XAxiDma_BdRingGetFreeCnt(RxRingPtr);
	Status = XAxiDma_BdRingAlloc(RxRingPtr, FreeBdCount, &BdPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR, "bd alloc failed\r\n");
		return XST_FAILURE;
	}

	Status = XAxiDma_BdRingToHw(RxRingPtr, FreeBdCount, BdPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "Submit %d rx BDs failed %d\r\n", FreeBdCount, Status);
		return XST_FAILURE;
	}

	return XST_SUCCESS;
}
/**
*
* This function sets up RX channel of the DMA engine to be ready for packet
* reception
*
* @param    AxiDmaInstPtr is the pointer to the instance of the DMA engine.
*
* @return   XST_SUCCESS if the setup is successful, XST_FAILURE otherwise.
*
* @note     None.
*
******************************************************************************/
static int RxSetup(XAxiDma * AxiDmaInstPtr)
{
	XAxiDma_BdRing *RxRingPtr;
	int Delay = 0;
	int Coalesce = 1;
	int Status;
	XAxiDma_Bd BdTemplate;
	XAxiDma_Bd *BdPtr;
	XAxiDma_Bd *BdCurPtr;
	u32 BdCount;
	u32 FreeBdCount;
	u32 RxBufferPtr;
	int i;

	RxRingPtr = XAxiDma_GetRxRing(&AxiDma);

	/* Disable all RX interrupts before RxBD space setup */

	XAxiDma_BdRingIntDisable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK);

	/* Set delay and coalescing */
	XAxiDma_BdRingSetCoalesce(RxRingPtr, Coalesce, Delay);

	/* Setup Rx BD space */
	BdCount = XAxiDma_BdRingCntCalc(XAXIDMA_BD_MINIMUM_ALIGNMENT,
					RX_BD_SPACE_HIGH - RX_BD_SPACE_BASE + 1);

	Status = XAxiDma_BdRingCreate(RxRingPtr, RX_BD_SPACE_BASE,
				     RX_BD_SPACE_BASE,
				     XAXIDMA_BD_MINIMUM_ALIGNMENT, BdCount);

	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "RX create BD ring failed %d\r\n", Status);

		return XST_FAILURE;
	}

	/*
	 * Setup an all-zero BD as the template for the Rx channel.
	 */
	XAxiDma_BdClear(&BdTemplate);

	Status = XAxiDma_BdRingClone(RxRingPtr, &BdTemplate);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "RX clone BD failed %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Attach buffers to RxBD ring so we are ready to receive packets */

	FreeBdCount = XAxiDma_BdRingGetFreeCnt(RxRingPtr);

	Status = XAxiDma_BdRingAlloc(RxRingPtr, FreeBdCount, &BdPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "RX alloc BD failed %d\r\n", Status);

		return XST_FAILURE;
	}

	BdCurPtr = BdPtr;
	RxBufferPtr = RX_BUFFER_BASE;
	for (i = 0; i < FreeBdCount; i++) {
		Status = XAxiDma_BdSetBufAddr(BdCurPtr, RxBufferPtr);

		if (Status != XST_SUCCESS) {
			xdbg_printf(XDBG_DEBUG_ERROR,
			    "Set buffer addr %x on BD %x failed %d\r\n",
			    (unsigned int)RxBufferPtr,
			    (unsigned int)BdCurPtr, Status);

			return XST_FAILURE;
		}

		Status = XAxiDma_BdSetLength(BdCurPtr, MAX_PKT_LEN);
		if (Status != XST_SUCCESS) {
			xdbg_printf(XDBG_DEBUG_ERROR,
			    "Rx set length %d on BD %x failed %d\r\n",
			    MAX_PKT_LEN, (unsigned int)BdCurPtr, Status);

			return XST_FAILURE;
		}

		/* Receive BDs do not need to set anything for the control
		 * The hardware will set the SOF/EOF bits per stream status 
		 */
		XAxiDma_BdSetCtrl(BdCurPtr, 0);
		XAxiDma_BdSetId(BdCurPtr, RxBufferPtr);
	
		RxBufferPtr += MAX_PKT_LEN;
		BdCurPtr = XAxiDma_BdRingNext(RxRingPtr, BdCurPtr);
	}

	/* Clear the receive buffer, so we can verify data
	 */
	memset((void *)RX_BUFFER_BASE, 0, MAX_PKT_LEN); 

	Status = XAxiDma_BdRingToHw(RxRingPtr, FreeBdCount, BdPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "RX submit hw failed %d\r\n", Status);

		return XST_FAILURE;
	}

	/* Start RX DMA channel */
	Status = XAxiDma_BdRingStart(RxRingPtr);
	if (Status != XST_SUCCESS) {
		xdbg_printf(XDBG_DEBUG_ERROR,
		    "RX start hw failed %d\r\n", Status);

		return XST_FAILURE;
	}

	return XST_SUCCESS;
}
/*
*
* This function sets up RX channel of the DMA engine to be ready for packet
* reception
*
* @param	AxiDmaInstPtr is the pointer to the instance of the DMA engine.
*
* @return	- XST_SUCCESS if the setup is successful.
*		- XST_FAILURE if fails.
*
* @note		None.
*
******************************************************************************/
static int RxSetup(XAxiDma * AxiDmaInstPtr)
{
	XAxiDma_BdRing *RxRingPtr;
	int Status;
	XAxiDma_Bd BdTemplate;
	XAxiDma_Bd *BdPtr;
	XAxiDma_Bd *BdCurPtr;
	int BdCount;
	int FreeBdCount;
	u32 RxBufferPtr;
	u32 RxBdSpacePtr;
	int Index;
	int RingIndex;

	RxBufferPtr = RX_BUFFER_BASE;
	RxBdSpacePtr = RX_BD_SPACE_BASE;

	for (RingIndex = 0;
			RingIndex < AxiDmaInstPtr->RxNumChannels; RingIndex++) {

		RxRingPtr = XAxiDma_GetRxIndexRing(&AxiDma, RingIndex);

		/* Disable all RX interrupts before RxBD space setup */
		XAxiDma_BdRingIntDisable(RxRingPtr,
						XAXIDMA_IRQ_ALL_MASK);

		/* Setup Rx BD space */
		BdCount = XAxiDma_BdRingCntCalc(XAXIDMA_BD_MINIMUM_ALIGNMENT,
				RX_BD_SPACE_HIGH - RX_BD_SPACE_BASE + 1);

		Status = XAxiDma_BdRingCreate(RxRingPtr,
					RxBdSpacePtr,
					RxBdSpacePtr,
					XAXIDMA_BD_MINIMUM_ALIGNMENT,
					BdCount);
		if (Status != XST_SUCCESS) {
			xil_printf("Rx bd create failed with %d\r\n",
				Status);
			return XST_FAILURE;
		}

		/*
	 	 * Setup a BD template for the Rx channel. Then copy it
	 	 * to every RX BD.
		 */
		XAxiDma_BdClear(&BdTemplate);
		Status = XAxiDma_BdRingClone(RxRingPtr,
						 &BdTemplate);
		if (Status != XST_SUCCESS) {
			xil_printf("Rx bd clone failed with %d\r\n",
				Status);
			return XST_FAILURE;
		}

		/* Attach buffers to RxBD ring so we are ready to receive packets */
		FreeBdCount = XAxiDma_BdRingGetFreeCnt(RxRingPtr);

		Status = XAxiDma_BdRingAlloc(RxRingPtr,
					FreeBdCount, &BdPtr);
		if (Status != XST_SUCCESS) {
			xil_printf("Rx bd alloc failed with %d\r\n",
				Status);
			return XST_FAILURE;
		}

		BdCurPtr = BdPtr;

		for (Index = 0; Index < FreeBdCount; Index++) {

			Status = XAxiDma_BdSetBufAddr(BdCurPtr, RxBufferPtr);
			if (Status != XST_SUCCESS) {
				xil_printf("Rx set buffer addr %x on BD %x failed %d\r\n",
				(unsigned int)RxBufferPtr,
				(unsigned int)BdCurPtr, Status);

				return XST_FAILURE;
			}

			Status = XAxiDma_BdSetLength(BdCurPtr, HSIZE,
						RxRingPtr->MaxTransferLen);
			if (Status != XST_SUCCESS) {
				xil_printf("Rx set length %d on BD %x failed %d\r\n",
			    	MAX_PKT_LEN, (unsigned int)BdCurPtr, Status);

				return XST_FAILURE;
			}

			/* Receive BDs do not need to set anything for the control
		 	 * The hardware will set the SOF/EOF bits per stream status
		 	 */
			XAxiDma_BdSetCtrl(BdCurPtr, 0);
			XAxiDma_BdSetId(BdCurPtr, RxBufferPtr);
			XAxiDma_BdSetARCache(BdCurPtr, ARCACHE);
			XAxiDma_BdSetARUser(BdCurPtr, ARUSER);
			XAxiDma_BdSetVSize(BdCurPtr, VSIZE);
			XAxiDma_BdSetStride(BdCurPtr, STRIDE);

			RxBufferPtr += MAX_PKT_LEN;
			BdCurPtr = XAxiDma_BdRingNext(RxRingPtr, BdCurPtr);
		}

		/*
	 	 * Set the coalescing threshold, so only one receive interrupt
	 	 * occurs for this example
	 	 *
	 	 * If you would like to have multiple interrupts to happen, change
	 	 * the COALESCING_COUNT to be a smaller value
	 	 */
		Status = XAxiDma_BdRingSetCoalesce(RxRingPtr, COALESCING_COUNT,
				DELAY_TIMER_COUNT);
		if (Status != XST_SUCCESS) {
			xil_printf("Rx set coalesce failed with %d\r\n", Status);
			return XST_FAILURE;
		}

		Status = XAxiDma_BdRingToHw(RxRingPtr, FreeBdCount, BdPtr);
		if (Status != XST_SUCCESS) {
			xil_printf("Rx ToHw failed with %d\r\n", Status);
			return XST_FAILURE;
		}

		/* Enable all RX interrupts */
		XAxiDma_BdRingIntEnable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK);

		/* Start RX DMA channel */
		Status = XAxiDma_UpdateBdRingCDesc(RxRingPtr);
		if (Status != XST_SUCCESS) {
			xil_printf("Failed bd start %x\r\n", Status);
			return XST_FAILURE;
		}

		RxBdSpacePtr += BdCount * sizeof(XAxiDma_Bd);

	}

	for (RingIndex = 0;
			RingIndex < AxiDmaInstPtr->RxNumChannels; RingIndex++) {
		RxRingPtr = XAxiDma_GetRxIndexRing(&AxiDma, RingIndex);
		Status = XAxiDma_StartBdRingHw(RxRingPtr);
		if (Status != XST_SUCCESS) {
			xil_printf("Rx start BD ring failed with %d\r\n", Status);
			return XST_FAILURE;
		}
	}

	return XST_SUCCESS;
}
/*
*
* This function non-blockingly transmits all packets through the DMA engine.
*
* @param	AxiDmaInstPtr points to the DMA engine instance
*
* @return
* 		- XST_SUCCESS if the DMA accepts all the packets successfully,
* 		- XST_FAILURE if error occurs
*
* @note		None.
*
******************************************************************************/
static int SendPacket(XAxiDma * AxiDmaInstPtr, u8 TDest, u8 TId, u8 Value)
{
	XAxiDma_BdRing *TxRingPtr = XAxiDma_GetTxRing(AxiDmaInstPtr);
	u8 *TxPacket;
	XAxiDma_Bd *BdPtr, *BdCurPtr;
	int Status;
	int Index, Pkts;
	u32 BufferAddr;

	/*
	 * Each packet is limited to TxRingPtr->MaxTransferLen
	 *
	 * This will not be the case if hardware has store and forward built in
	 */
	if (MAX_PKT_LEN * NUMBER_OF_BDS_PER_PKT > TxRingPtr->MaxTransferLen) {

		xil_printf("Invalid total per packet transfer length for the "
		    "packet %d/%d\r\n",
		    MAX_PKT_LEN * NUMBER_OF_BDS_PER_PKT,
		    TxRingPtr->MaxTransferLen);

		return XST_INVALID_PARAM;
	}

	TxPacket = (u8 *) Packet;

	for(Index = 0; Index < MAX_PKT_LEN * NUMBER_OF_BDS_TO_TRANSFER;
					Index ++) {
		TxPacket[Index] = Value;

		Value = (Value + 1) & 0xFF;
	}

	/* Flush the SrcBuffer before the DMA transfer, in case the Data Cache
	 * is enabled
	 */
	Xil_DCacheFlushRange((u32)TxPacket, MAX_PKT_LEN *
				NUMBER_OF_BDS_TO_TRANSFER);

	Status = XAxiDma_BdRingAlloc(TxRingPtr,
				NUMBER_OF_BDS_TO_TRANSFER,
				&BdPtr);
	if (Status != XST_SUCCESS) {

		xil_printf("Failed bd alloc\r\n");
		return XST_FAILURE;
	}

	BufferAddr = (u32) TxPacket;
	BdCurPtr = BdPtr;

	/*
	 * Set up the BD using the information of the packet to transmit
	 * Each transfer has NUMBER_OF_BDS_PER_PKT BDs
	 */
	for(Index = 0; Index < NUMBER_OF_PKTS_TO_TRANSFER; Index++) {

		for(Pkts = 0; Pkts < NUMBER_OF_BDS_PER_PKT; Pkts++) {
			u32 CrBits = 0;

			Status = XAxiDma_BdSetBufAddr(BdCurPtr, BufferAddr);
			if (Status != XST_SUCCESS) {
				xil_printf("Tx set buffer addr %x on BD %x failed %d\r\n",
				(unsigned int)BufferAddr,
				(unsigned int)BdCurPtr, Status);

				return XST_FAILURE;
			}

			Status = XAxiDma_BdSetLength(BdCurPtr, HSIZE,
						TxRingPtr->MaxTransferLen);
			if (Status != XST_SUCCESS) {
				xil_printf("Tx set length %d on BD %x failed %d\r\n",
				MAX_PKT_LEN, (unsigned int)BdCurPtr, Status);

				return XST_FAILURE;
			}

			if (Pkts == 0) {
				/* The first BD has SOF set
				 */
				CrBits |= XAXIDMA_BD_CTRL_TXSOF_MASK;

#if (XPAR_AXIDMA_0_SG_INCLUDE_STSCNTRL_STRM == 1)
				/* The first BD has total transfer length set
				 * in the last APP word, this is for the
				 * loopback widget
				 */
				Status = XAxiDma_BdSetAppWord(BdCurPtr,
				    XAXIDMA_LAST_APPWORD,
				    MAX_PKT_LEN * NUMBER_OF_BDS_PER_PKT);

				if (Status != XST_SUCCESS) {
					xil_printf("Set app word failed with %d\r\n",
					Status);
				}
#endif
			}

			if(Pkts == (NUMBER_OF_BDS_PER_PKT - 1)) {
				/* The last BD should have EOF and IOC set
				 */
				CrBits |= XAXIDMA_BD_CTRL_TXEOF_MASK;
			}

			XAxiDma_BdSetCtrl(BdCurPtr, CrBits);
			XAxiDma_BdSetId(BdCurPtr, BufferAddr);
			XAxiDma_BdSetTId(BdCurPtr, TId);
			XAxiDma_BdSetTDest(BdCurPtr, TDest);
			XAxiDma_BdSetTUser(BdCurPtr, TUSER);
			XAxiDma_BdSetARCache(BdCurPtr, ARCACHE);
			XAxiDma_BdSetARUser(BdCurPtr, ARUSER);
			XAxiDma_BdSetVSize(BdCurPtr, VSIZE);
			XAxiDma_BdSetStride(BdCurPtr, STRIDE);

			BufferAddr += MAX_PKT_LEN;
			BdCurPtr = XAxiDma_BdRingNext(TxRingPtr, BdCurPtr);
		}
	}

	/* Give the BD to hardware */
	Status = XAxiDma_BdRingToHw(TxRingPtr, NUMBER_OF_BDS_TO_TRANSFER,
					BdPtr);
	if (Status != XST_SUCCESS) {

		xil_printf("Failed to hw, length %d\r\n",
			(int)XAxiDma_BdGetLength(BdPtr,
					TxRingPtr->MaxTransferLen));

		return XST_FAILURE;
	}

	return XST_SUCCESS;
}
Пример #9
0
/**
*
* This function transmits one packet non-blockingly through the DMA engine.
*
* @param	AxiDmaInstPtr points to the DMA engine instance
*
* @return	- XST_SUCCESS if the DMA accepts the packet successfully,
*		- XST_FAILURE otherwise.
*
* @note     None.
*
******************************************************************************/
static int SendPacket(XAxiDma * AxiDmaInstPtr,int c)
{
	XAxiDma_BdRing *TxRingPtr; //TxRing
	cplx_data_t *TxPacket;	   //PacketPointer
	//cplx_data_t *RxClean;    //CleanData Do in RxSetup

	XAxiDma_Bd *BdPtr;			//Start BDPointer
	int Status;
	int Index;
	char         str[30];
	TxRingPtr = XAxiDma_GetTxRing(AxiDmaInstPtr);

	TxPacket = 	(cplx_data_t *)stim_buf;	//setPointerForPacket

	/* Flush the SrcBuffer before the DMA transfer, in case the Data Cache
	 * is enabled
	 */
	Xil_DCacheFlushRange((u32)TxPacket, MAX_PKT_LEN*64); //8 point each packet
	int FreeBdCount = XAxiDma_BdRingGetFreeCnt(TxRingPtr);
		xil_printf("TxFreeBdCount %d", FreeBdCount);
	/* Allocate a BD */
	Status = XAxiDma_BdRingAlloc(TxRingPtr, 32, &BdPtr);
	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}
	xil_printf("check1");
	XAxiDma_Bd *BdCurPtr;
	u32 TxBufferPtr;
	BdCurPtr = BdPtr;		// Set Current BDptr
	TxBufferPtr = (u32)stim_buf;

	for (Index = 0; Index < 32; Index++) {
		Status = XAxiDma_BdSetBufAddr(BdCurPtr, TxBufferPtr);
		if (Status != XST_SUCCESS) {
		xil_printf("Tx set buffer addr %x on BD %x failed %d\r\n",
		    (unsigned int)TxBufferPtr, (unsigned int)BdCurPtr, Status);

		return XST_FAILURE;
	}


	Status = XAxiDma_BdSetLength(BdCurPtr, MAX_PKT_LEN*4,
				TxRingPtr->MaxTransferLen);
	if (Status != XST_SUCCESS) {
		xil_printf("Tx set length %d on BD %x failed %d\r\n",
		    MAX_PKT_LEN*4, (unsigned int)BdCurPtr, Status);

		return XST_FAILURE;
	}
	#if (XPAR_AXIDMA_0_SG_INCLUDE_STSCNTRL_STRM == 1)
	Status = XAxiDma_BdSetAppWord(BdCurPtr,
	    XAXIDMA_LAST_APPWORD, MAX_PKT_LEN*2);

	/* If Set app length failed, it is not fatal
	 */
	if (Status != XST_SUCCESS) {
		xil_printf("Set app word failed with %d\r\n", Status);
	}
	#endif

	/* For single packet, both SOF and EOF are to be set
	 */
	XAxiDma_BdSetCtrl(BdCurPtr, XAXIDMA_BD_CTRL_TXEOF_MASK |
						XAXIDMA_BD_CTRL_TXSOF_MASK);

	XAxiDma_BdSetId(BdCurPtr, (u32) TxBufferPtr);
	TxBufferPtr += MAX_PKT_LEN*4;
	BdCurPtr = XAxiDma_BdRingNext(TxRingPtr, BdCurPtr);
	}

	/* Give the BD to DMA to kick off the transmission. */
	Status = XAxiDma_BdRingToHw(TxRingPtr, 32, BdPtr);
	if (Status != XST_SUCCESS) {
		xil_printf("to hw failed %d\r\n", Status);
		return XST_FAILURE;
	}
	return XST_SUCCESS;
}
/**
*
* This example sends and receives a single packet in loopback mode with
* extended VLAN support.
*
* The transmit frame will have VLAN field populated.
*
* On receive, HW should pass the VLAN field to receive BDs.
*
* @param	AxiEthernetInstancePtr is a pointer to the instance of the
*		AxiEthernet component.
* @param	DmaInstancePtr   is a pointer to the instance of the Dma
*		component.
*
* @return	-XST_SUCCESS to indicate success.
*		-XST_FAILURE to indicate failure.
*
* @note		Summary of VLAN tags handling in this example
*
* Frame setup with Tpid1+Cfi1+TxPid => 0x88A83111
* Frame translated to TxTransVid => 0x88A83222
* Frame tagged to Tpid2+Cfi2+TxTagVid => 0x9100C333 + 0x88A83222
* Frame sent and loopbacked.
*
* Frame stripped with RxStrpVid(0x333) => 0x88A83222
* Frame translated (key:RxVid:0x222) RxTransVid => 0x88A83444
*
******************************************************************************/
int AxiEthernetSgDmaIntrExtVlanExample(XAxiEthernet *AxiEthernetInstancePtr,
				XAxiDma *DmaInstancePtr)
{
	int Status;
	u32 TxFrameLength;
	u32 RxFrameLength;
	int PayloadSize = PAYLOAD_SIZE;
	u16 Tpid1 = 0x88A8;
	u16 Tpid2 = 0x9100;
	u8  Cfi1  = 0x03;
	u8  Cfi2  = 0x0C;
	u16 TxVid      = 0x0111;
	u16 TxTransVid = 0x0222;
	u16 TxTagVid   = 0x0333;
	u16 RxVid      = 0x0222;
	u16 RxTransVid = 0x0444;
	u16 RxStrpVid  = 0x0333;
	u32 VTagCfiVid;
	u16 RxCfiVid;
	u16 RxTpid;
	u32 RxStatusControlWord;
	int Valid;


	XAxiDma_BdRing *RxRingPtr = XAxiDma_GetRxRing(DmaInstancePtr);
	XAxiDma_BdRing *TxRingPtr = XAxiDma_GetTxRing(DmaInstancePtr);
	XAxiDma_Bd *BdPtr;
	XAxiDma_Bd *BdCurPtr;
	u32 BdSts;

	/*
	 * Cannot run this example if extended features support is not enabled
	 */
	if (!(XAxiEthernet_IsTxVlanTran(AxiEthernetInstancePtr) &&
		XAxiEthernet_IsTxVlanStrp(AxiEthernetInstancePtr) &&
		XAxiEthernet_IsTxVlanTag(AxiEthernetInstancePtr)  &&
		XAxiEthernet_IsRxVlanTran(AxiEthernetInstancePtr) &&
		XAxiEthernet_IsRxVlanStrp(AxiEthernetInstancePtr) &&
		XAxiEthernet_IsRxVlanTag(AxiEthernetInstancePtr))) {
		AxiEthernetUtilErrorTrap("Extended VLAN not available");
		return XST_FAILURE;
	}

	/*
	 * Clear variables shared with callbacks
	 */
	FramesRx = 0;
	FramesTx = 0;
	DeviceErrors = 0;
	memset(RxFrame,0,sizeof(RxFrame));
	memset(TxFrame,0,sizeof(TxFrame));

	/*
	 * Calculate frame length (not including FCS) plus one VLAN tag
	 */
	TxFrameLength = XAE_HDR_VLAN_SIZE + PayloadSize;

	/*
	 * Setup the packet with one VALN tag = VtagCfiVid to be transmitted
	 * initially.
	 */
	VTagCfiVid = (((u32)Tpid1 << 16) | ((u32)Cfi1 << 12) | TxVid);
	AxiEthernetUtilFrameMemClear(&TxFrame);
	AxiEthernetUtilFrameHdrFormatMAC(&TxFrame, AxiEthernetMAC);
	AxiEthernetUtilFrameHdrVlanFormatVid(&TxFrame, 0, VTagCfiVid);
	AxiEthernetUtilFrameHdrVlanFormatType(&TxFrame, PayloadSize, 1);
	AxiEthernetUtilFrameSetVlanPayloadData(&TxFrame, PayloadSize, 1);

	/* Intended VLAN setup:
	 * TX translation and tagging. RX stripping and translation.
	 *
	 * Frame setup with Tpid1+Cfi1+TxPid => 0x88A83111
	 * Frame translated to TxTransVid => 0x88A83222
	 * Frame tagged to Tpid2+Cfi2+TxTagVid => 0x9100C333 + 0x88A83222
	 * Frame sent and loopbacked.
	 *
	 * Frame stripped with RxStrpVid(0x333) => 0x88A83222
	 * Frame translated (key:RxVid:0x222) RxTransVid => 0x88A83444
	 */

	/* Extended VLAN transmit side. Stripping->Translation->Tagging */
	/*
	 * Configure VLAN TX tag mode, set to XAE_VTAG_SELECT.
	 */
	Status  = XAxiEthernet_SetOptions(AxiEthernetInstancePtr,
						XAE_EXT_TXVLAN_TAG_OPTION);
	Status |= XAxiEthernet_SetVTagMode(AxiEthernetInstancePtr,
						XAE_VTAG_SELECT, XAE_TX);

	/*
	 * TX VLAN translation from TxVid to TxTransVid and enable tagging.
	 */
	Status |= XAxiEthernet_SetOptions(AxiEthernetInstancePtr,
						XAE_EXT_TXVLAN_TRAN_OPTION);
	Status |= XAxiEthernet_SetVidTable(AxiEthernetInstancePtr, TxVid,
						TxTransVid,0, 1, XAE_TX);

	/*
	 * TX VLAN tagging is keyed on TxVid to add one additional tag based
	 * on register XAE_TTAG_OFFSET value.
	 */
	VTagCfiVid = (((u32)Tpid2 << 16) | ((u32)Cfi2 << 12) | TxTagVid);
	Status |= XAxiEthernet_SetVTagValue(AxiEthernetInstancePtr,
							VTagCfiVid, XAE_TX);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting TX VLAN");
		return XST_FAILURE;
	}

	/* Extended VLAN receive side. Stripping->Translation->Tagging */
	/*
	 * Configure VLAN RX strip mode, set to XAE_VSTRP_SELECT.
	 */
	Status = XAxiEthernet_SetOptions(AxiEthernetInstancePtr,
						XAE_EXT_RXVLAN_STRP_OPTION);
	Status |= XAxiEthernet_SetVStripMode(AxiEthernetInstancePtr,
						XAE_VSTRP_SELECT, XAE_RX);

	/*
	 * RX VLAN strips based on RxStrpVid and enable stripping.
	 */
	Status |= XAxiEthernet_SetVidTable(AxiEthernetInstancePtr, RxStrpVid,
						RxStrpVid, 1, 0, XAE_RX);

	/*
	 * RX VLAN translation from RxVid to RxTransVid only.
	 */
	Status |= XAxiEthernet_SetOptions(AxiEthernetInstancePtr,
						XAE_EXT_RXVLAN_TRAN_OPTION);
	Status |= XAxiEthernet_SetVidTable(AxiEthernetInstancePtr, RxVid,
						RxTransVid, 0, 0, XAE_RX);

	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting RX VLAN");
		return XST_FAILURE;
	}

	/* Configure VLAN TPIDs for HW to recognize. */
	Status  = XAxiEthernet_SetTpid(AxiEthernetInstancePtr, Tpid1, 0);
	Status |= XAxiEthernet_SetTpid(AxiEthernetInstancePtr, Tpid2, 1);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting TPIDs");
		return XST_FAILURE;
	}

	/*
	 * Flush the TX frame before giving it to DMA TX channel to transmit.
	 */
	Xil_DCacheFlushRange((u32)&TxFrame, TxFrameLength);

	/*
	 * Clear out receive packet memory area
	 */
	AxiEthernetUtilFrameMemClear(&RxFrame);

	/*
	 * Invalidate the RX frame before giving it to DMA RX channel to
	 * receive data.
 	 */
	Xil_DCacheInvalidateRange((u32)&RxFrame, TxFrameLength + 4);

	/*
	 * Interrupt coalescing parameters are set to their default settings
	 * which is to interrupt the processor after every frame has been
	 * processed by the DMA engine.
	 */

	Status = XAxiDma_BdRingSetCoalesce(TxRingPtr, 1, 1);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting coalescing for transmit");
		return XST_FAILURE;
	}

	Status = XAxiDma_BdRingSetCoalesce(RxRingPtr, 1, 1);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting coalescing for recv");
		return XST_FAILURE;
	}

	/*
	 * Make sure Tx and Rx are enabled
	 */
	Status = XAxiEthernet_SetOptions(AxiEthernetInstancePtr,
					XAE_RECEIVER_ENABLE_OPTION |
					XAE_TRANSMITTER_ENABLE_OPTION );
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting options");
		return XST_FAILURE;
	}

	Status = XAxiEthernet_SetOptions(AxiEthernetInstancePtr,
					XAE_JUMBO_OPTION);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error setting options");
		return XST_FAILURE;
	}

	/*
	 * Start the AxiEthernet and enable its ERROR interrupts
	 */
	XAxiEthernet_Start(AxiEthernetInstancePtr);
	XAxiEthernet_IntEnable(&AxiEthernetInstance,
					XAE_INT_RECV_ERROR_MASK);

	/*
	 * Enable DMA receive related interrupts
	 */
	XAxiDma_BdRingIntEnable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK);

	/*
	 * Allocate 1 RxBD.
	 */
	Status = XAxiDma_BdRingAlloc(RxRingPtr, 1, &BdPtr);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error allocating RxBD");
		return XST_FAILURE;
	}

	/*
	 * Setup the BD.
	 */
	XAxiDma_BdSetBufAddr(BdPtr, (u32)&RxFrame);
#ifndef XPAR_AXIDMA_0_ENABLE_MULTI_CHANNEL
	XAxiDma_BdSetLength(BdPtr, sizeof(RxFrame));
#else
	XAxiDma_BdSetLength(BdPtr, sizeof(RxFrame),
				RxRingPtr->MaxTransferLen);
#endif
	XAxiDma_BdSetCtrl(BdPtr, 0);

	/*
	 * Enqueue to HW
	 */
	Status = XAxiDma_BdRingToHw(RxRingPtr, 1, BdPtr);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error committing RxBD to HW");
		return XST_FAILURE;
	}

	/*
	 * Start DMA RX channel. Now it's ready to receive data.
	 */
	Status = XAxiDma_BdRingStart(RxRingPtr);
	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}

	/*
	 * Enable DMA transmit related interrupts
	 */
	XAxiDma_BdRingIntEnable(TxRingPtr, XAXIDMA_IRQ_ALL_MASK);

	/*
	 * Allocate 1 TxBD
	 */
	Status = XAxiDma_BdRingAlloc(TxRingPtr, 1, &BdPtr);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error allocating TxBD");
		return XST_FAILURE;
	}

	/*
	 * Setup the TxBD
	 */
	XAxiDma_BdSetBufAddr(BdPtr, (u32)&TxFrame);
#ifndef XPAR_AXIDMA_0_ENABLE_MULTI_CHANNEL
	XAxiDma_BdSetLength(BdPtr, TxFrameLength);
#else
	XAxiDma_BdSetLength(BdPtr, TxFrameLength,
				TxRingPtr->MaxTransferLen);
#endif
	XAxiDma_BdSetCtrl(BdPtr, XAXIDMA_BD_CTRL_TXSOF_MASK |
						XAXIDMA_BD_CTRL_TXEOF_MASK);

	/*
	 * Enqueue to HW
	 */
	Status = XAxiDma_BdRingToHw(TxRingPtr, 1, BdPtr);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error committing TxBD to HW");
		return XST_FAILURE;
	}

	/*
	 * Start DMA TX channel. Transmission starts at once.
	 */
	Status = XAxiDma_BdRingStart(TxRingPtr);
	if (Status != XST_SUCCESS) {
		return XST_FAILURE;
	}

	/*
	 * Wait for transmission to complete
	 */
	while (!FramesTx);

	/*
	 * Now that the frame has been sent, post process our TxBDs.
	 * Since we have only submitted 2 to HW, then there should be only 2
	 * ready for post processing.
	 */
	if (XAxiDma_BdRingFromHw(TxRingPtr, 1, &BdPtr) == 0) {
		AxiEthernetUtilErrorTrap("TxBDs were not ready for post processing");
		return XST_FAILURE;
	}

	/*
	 * Examine the TxBDs.
	 *
	 * There isn't much to do. The only thing to check would be DMA
	 * exception bits. But this would also be caught in the error handler.
	 * So we just return these BDs to the free list
	 */
	Status = XAxiDma_BdRingFree(TxRingPtr, 1, BdPtr);
	if (Status != XST_SUCCESS) {
		AxiEthernetUtilErrorTrap("Error freeing up TxBDs");
		return XST_FAILURE;
	}

	/*
	 * Wait for Rx indication
	 */
	while (!FramesRx);
	/*
	 * Now that the frame has been received, post process our RxBD.
	 * Since we have only submitted 1 to HW, then there should be only 1
	 * ready for post processing.
	 */
	if (XAxiDma_BdRingFromHw(RxRingPtr, 1, &BdPtr) == 0) {
		AxiEthernetUtilErrorTrap("RxBD was not ready for post processing");
		return XST_FAILURE;
	}

	BdCurPtr = BdPtr;
	BdSts = XAxiDma_BdGetSts(BdCurPtr);

	if ((BdSts & XAXIDMA_BD_STS_ALL_ERR_MASK) ||
		(!(BdSts & XAXIDMA_BD_STS_COMPLETE_MASK))) {
			AxiEthernetUtilErrorTrap("Rx Error");
			return XST_FAILURE;
	}
	else {

		RxFrameLength =
		(XAxiDma_BdRead(BdCurPtr,XAXIDMA_BD_USR4_OFFSET)) & 0x0000FFFF;
	}

	/* Expected RX TPID+CFI+VID !!! */
	VTagCfiVid = (((u32)Tpid2 << 16) | ((u32)Cfi2 << 12) | RxStrpVid);

	/* Check on the VLAN CFI and VID */
	RxStatusControlWord = XAxiDma_BdGetAppWord(BdPtr,
						BD_VLAN_VID_OFFSET, &Valid);
	if(Valid) {
		RxCfiVid = RxStatusControlWord >> 16;
		RxCfiVid = Xil_Ntohs(RxCfiVid);
		if(RxCfiVid != (VTagCfiVid & 0x0000FFFF)) {
			AxiEthernetUtilErrorTrap("VLAN CFI and VID mismatch\n");
			return XST_FAILURE;
		}
	}
	else {