/* * * This is the DMA RX callback function called by the RX interrupt handler. * This function handles finished BDs by hardware, attaches new buffers to those * BDs, and give them back to hardware to receive more incoming packets * * @param RxRingPtr is a pointer to RX channel of the DMA engine. * * @return None. * * @note None. * ******************************************************************************/ static void RxCallBack(XAxiDma_BdRing * RxRingPtr) { int BdCount; XAxiDma_Bd *BdPtr; XAxiDma_Bd *BdCurPtr; u32 BdSts; int Index; /* Get finished BDs from hardware */ BdCount = XAxiDma_BdRingFromHw(RxRingPtr, XAXIDMA_ALL_BDS, &BdPtr); BdCurPtr = BdPtr; for (Index = 0; Index < BdCount; Index++) { /* * Check the flags set by the hardware for status * If error happens, processing stops, because the DMA engine * is halted after this BD. */ BdSts = XAxiDma_BdGetSts(BdCurPtr); if ((BdSts & XAXIDMA_BD_STS_ALL_ERR_MASK) || (!(BdSts & XAXIDMA_BD_STS_COMPLETE_MASK))) { Error = 1; break; } /* Find the next processed BD */ BdCurPtr = XAxiDma_BdRingNext(RxRingPtr, BdCurPtr); RxDone += 1; } }
/* * * This is the DMA TX callback function to be called by TX interrupt handler. * This function handles BDs finished by hardware. * * @param TxRingPtr is a pointer to TX channel of the DMA engine. * * @return None. * * @note None. * ******************************************************************************/ static void TxCallBack(XAxiDma *AxiDmaPtr) { int BdCount; u32 BdSts; XAxiDma_Bd *BdPtr; XAxiDma_Bd *BdCurPtr; int Status; int Index; XAxiDma_BdRing *TxRingPtr = XAxiDma_GetTxRing(AxiDmaPtr); /* Get all processed BDs from hardware */ BdCount = XAxiDma_BdRingFromHw(TxRingPtr, XAXIDMA_ALL_BDS, &BdPtr); /* Handle the BDs */ BdCurPtr = BdPtr; for (Index = 0; Index < BdCount; Index++) { /* * Check the status in each BD * If error happens, the DMA engine will be halted after this * BD processing stops. */ BdSts = XAxiDma_BdGetSts(BdCurPtr); if ((BdSts & XAXIDMA_BD_STS_ALL_ERR_MASK) || (!(BdSts & XAXIDMA_BD_STS_COMPLETE_MASK))) { Error = 1; break; } /* * Here we don't need to do anything. But if a RTOS is being * used, we may need to free the packet buffer attached to * the processed BD */ /* Find the next processed BD */ BdCurPtr = XAxiDma_BdRingNext(TxRingPtr, BdCurPtr); } /* Free all processed BDs for future transmission */ Status = XAxiDma_BdRingFree(TxRingPtr, BdCount, BdPtr); if (Status != XST_SUCCESS) { Error = 1; } if(!Error) { TxDone += BdCount; } }
void wlan_eth_dma_update(){ //Used to submit new BDs to the DMA hardware if space is available int bd_count; int status; XAxiDma_BdRing *ETH_A_RxRing_ptr; XAxiDma_Bd *first_bd_ptr; XAxiDma_Bd *cur_bd_ptr; packet_bd_list checkout; packet_bd* tx_queue; u32 i; u32 buf_addr; u32 num_available_packet_bd; ETH_A_RxRing_ptr = XAxiDma_GetRxRing(Ð_A_DMA_Instance); bd_count = XAxiDma_BdRingGetFreeCnt(ETH_A_RxRing_ptr); num_available_packet_bd = queue_num_free(); if(min(num_available_packet_bd,bd_count)>0){ // xil_printf("%d BDs are free\n",bd_count); // xil_printf("%d packet_bds are free\n", num_available_packet_bd); // xil_printf("Attaching %d BDs to packet_bds\n",min(bd_count,num_available_packet_bd)); //Checkout ETH_A_NUM_RX_BD packet_bds queue_checkout(&checkout, min(bd_count,num_available_packet_bd)); status = XAxiDma_BdRingAlloc(ETH_A_RxRing_ptr, min(bd_count,checkout.length), &first_bd_ptr); if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingAlloc()! Err = %d\n", status); return;} tx_queue = checkout.first; //Iterate over each Rx buffer descriptor cur_bd_ptr = first_bd_ptr; for(i = 0; i < min(bd_count,checkout.length); i++) { //Set the memory address for this BD's buffer buf_addr = (u32)((void*)((tx_packet_buffer*)(tx_queue->buf_ptr))->frame + sizeof(mac_header_80211) + sizeof(llc_header) - sizeof(ethernet_header)); status = XAxiDma_BdSetBufAddr(cur_bd_ptr, buf_addr); if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetBufAddr failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return;} //Set every Rx BD to max length (this assures 1 BD per Rx pkt) status = XAxiDma_BdSetLength(cur_bd_ptr, ETH_A_PKT_BUF_SIZE, ETH_A_RxRing_ptr->MaxTransferLen); if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetLength failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return;} //Rx BD's don't need control flags before use; DMA populates these post-Rx XAxiDma_BdSetCtrl(cur_bd_ptr, 0); //BD ID is arbitrary; use pointer to the packet_bd associated with this BD XAxiDma_BdSetId(cur_bd_ptr, (u32)tx_queue); //Update cur_bd_ptr to the next BD in the chain for the next iteration cur_bd_ptr = XAxiDma_BdRingNext(ETH_A_RxRing_ptr, cur_bd_ptr); //Remove this tx_queue from the checkout list //packet_bd_remove(&checkout,tx_queue); //Traverse forward in the checked-out packet_bd list tx_queue = tx_queue->next; } //Push the Rx BD ring to hardware and start receiving status = XAxiDma_BdRingToHw(ETH_A_RxRing_ptr, min(num_available_packet_bd,bd_count), first_bd_ptr); //Check any remaining unused entries from the checkout list back in //queue_checkin(&checkout); } return; }
void wlan_poll_eth() { XAxiDma_BdRing *rxRing_ptr; XAxiDma_Bd *cur_bd_ptr; XAxiDma_Bd *first_bd_ptr; u8* mpdu_start_ptr; u8* eth_start_ptr; u8* eth_mid_ptr; packet_bd* tx_queue; u32 eth_rx_len, eth_rx_buf; u32 mpdu_tx_len; packet_bd_list tx_queue_list; u32 i; u8 continue_loop; int bd_count; int status; int packet_is_queued; ethernet_header* eth_hdr; ipv4_header* ip_hdr; arp_packet* arp; udp_header* udp; dhcp_packet* dhcp; llc_header* llc_hdr; u8 eth_dest[6]; u8 eth_src[6]; static u32 max_bd_count = 0; rxRing_ptr = XAxiDma_GetRxRing(Ð_A_DMA_Instance); //Check if any Rx BDs have been executed //TODO: process XAXIDMA_ALL_BDS instead of 1 at a time //bd_count = XAxiDma_BdRingFromHw(rxRing_ptr, 1, &cur_bd_ptr); bd_count = XAxiDma_BdRingFromHw(rxRing_ptr, XAXIDMA_ALL_BDS, &first_bd_ptr); cur_bd_ptr = first_bd_ptr; if(bd_count > max_bd_count){ max_bd_count = bd_count; //xil_printf("max_bd_count = %d\n",max_bd_count); } if(bd_count == 0) { //No Rx BDs have been processed - no new Eth receptions waiting return; } for(i=0;i<bd_count;i++){ //A packet has been received and transferred by DMA tx_queue = (packet_bd*)XAxiDma_BdGetId(cur_bd_ptr); //xil_printf("DMA has filled in packet_bd at 0x%08x\n", tx_queue); eth_rx_len = XAxiDma_BdGetActualLength(cur_bd_ptr, rxRing_ptr->MaxTransferLen); eth_rx_buf = XAxiDma_BdGetBufAddr(cur_bd_ptr); //After encapsulation, byte[0] of the MPDU will be at byte[0] of the queue entry frame buffer mpdu_start_ptr = (void*)((tx_packet_buffer*)(tx_queue->buf_ptr))->frame; eth_start_ptr = (u8*)eth_rx_buf; //Calculate actual wireless Tx len (eth payload - eth header + wireless header) mpdu_tx_len = eth_rx_len - sizeof(ethernet_header) + sizeof(llc_header) + sizeof(mac_header_80211); //Helper pointers to interpret/fill fields in the new MPDU eth_hdr = (ethernet_header*)eth_start_ptr; llc_hdr = (llc_header*)(mpdu_start_ptr + sizeof(mac_header_80211)); //Copy the src/dest addresses from the received Eth packet to temp space memcpy(eth_src, eth_hdr->address_source, 6); memcpy(eth_dest, eth_hdr->address_destination, 6); //Prepare the MPDU LLC header llc_hdr->dsap = LLC_SNAP; llc_hdr->ssap = LLC_SNAP; llc_hdr->control_field = LLC_CNTRL_UNNUMBERED; bzero((void *)(llc_hdr->org_code), 3); //Org Code 0x000000: Encapsulated Ethernet packet_is_queued = 0; packet_bd_list tx_queue_list; packet_bd_list_init(&tx_queue_list); packet_bd_insertEnd(&tx_queue_list, tx_queue); switch(eth_encap_mode){ case ENCAP_MODE_AP: switch(eth_hdr->type) { case ETH_TYPE_ARP: llc_hdr->type = LLC_TYPE_ARP; packet_is_queued = eth_rx_callback(&tx_queue_list, eth_dest, eth_src, mpdu_tx_len); break; case ETH_TYPE_IP: llc_hdr->type = LLC_TYPE_IP; packet_is_queued = eth_rx_callback(&tx_queue_list, eth_dest, eth_src, mpdu_tx_len); break; /* WMP_START */ case WMP4WARP_ETHER_TYPE: wmp_high_util_handle_wmp_cmd_from_ethernet(eth_hdr); break; /* WMP_END */ default: //Unknown/unsupported EtherType; don't process the Eth frame break; } break; case ENCAP_MODE_STA: //Save this ethernet src address for d memcpy(eth_sta_mac_addr, eth_src, 6); memcpy(eth_src, hw_info.hw_addr_wlan, 6); switch(eth_hdr->type) { case ETH_TYPE_ARP: arp = (arp_packet*)((void*)eth_hdr + sizeof(ethernet_header)); //Here we hijack ARP messages and overwrite their source MAC address field with //the station's wireless MAC address. memcpy(arp->eth_src, hw_info.hw_addr_wlan, 6); llc_hdr->type = LLC_TYPE_ARP; packet_is_queued = eth_rx_callback(&tx_queue_list, eth_dest, eth_src, mpdu_tx_len); break; case ETH_TYPE_IP: llc_hdr->type = LLC_TYPE_IP; ip_hdr = (ipv4_header*)((void*)eth_hdr + sizeof(ethernet_header)); if(ip_hdr->prot == IPV4_PROT_UDP){ udp = (udp_header*)((void*)ip_hdr + 4*((u8)(ip_hdr->ver_ihl) & 0xF)); udp->checksum = 0; //Disable the checksum since we are about to mess with the bytes in the packet if(Xil_Ntohs(udp->src_port) == UDP_SRC_PORT_BOOTPC || Xil_Ntohs(udp->src_port) == UDP_SRC_PORT_BOOTPS){ //This is a DHCP Discover packet, which contains the source hardware address //deep inside the packet (in addition to its usual location in the Eth header). //For STA encapsulation, we need to overwrite this address with the MAC addr //of the wireless station. dhcp = (dhcp_packet*)((void*)udp + sizeof(udp_header)); if(Xil_Ntohl(dhcp->magic_cookie) == DHCP_MAGIC_COOKIE){ eth_mid_ptr = (u8*)((void*)dhcp + sizeof(dhcp_packet)); dhcp->flags = Xil_Htons(DHCP_BOOTP_FLAGS_BROADCAST); //Tagged DHCP Options continue_loop = 1; while(continue_loop){ switch(eth_mid_ptr[0]){ case DHCP_OPTION_TAG_TYPE: switch(eth_mid_ptr[2]){ case DHCP_OPTION_TYPE_DISCOVER: case DHCP_OPTION_TYPE_REQUEST: //memcpy(dhcp->chaddr,hw_info.hw_addr_wlan,6); break; } break; case DHCP_OPTION_TAG_IDENTIFIER: //memcpy(&(eth_mid_ptr[3]),hw_info.hw_addr_wlan,6); break; case DHCP_OPTION_END: continue_loop = 0; break; } eth_mid_ptr += (2+eth_mid_ptr[1]); } } } } packet_is_queued = eth_rx_callback(&tx_queue_list, eth_dest, eth_src, mpdu_tx_len); break; /* WMP_START */ case WMP4WARP_ETHER_TYPE:{ wmp_high_util_handle_wmp_cmd_from_ethernet(eth_hdr); break;} /* WMP_END */ default: //Unknown/unsupported EtherType; don't process the Eth frame break; } break; } if(packet_is_queued == 0){ //xil_printf(" ...checking in\n"); queue_checkin(&tx_queue_list); } //TODO: Option A: We free this single BD and run the routine to checkout as many queues as we can and hook them up to BDs //Results: pretty good TCP performance //Free this bd status = XAxiDma_BdRingFree(rxRing_ptr, 1, cur_bd_ptr); if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingFree of Rx BD! Err = %d\n", status); return;} wlan_eth_dma_update(); //Update cur_bd_ptr to the next BD in the chain for the next iteration cur_bd_ptr = XAxiDma_BdRingNext(rxRing_ptr, cur_bd_ptr); } //TODO: Option B: We free all BDs at once and run the routine to checkout as many queues as we can and hook them up to BDs //Results: pretty lackluster TCP performance. needs further investigation //Free this bd //status = XAxiDma_BdRingFree(rxRing_ptr, bd_count, first_bd_ptr); //if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingFree of Rx BD! Err = %d\n", status); return;} //wlan_eth_dma_update(); return; }
int wlan_eth_dma_init() { int status; int bd_count; int i; u32 buf_addr; XAxiDma_Config *ETH_A_DMA_CFG_ptr; XAxiDma_Bd ETH_DMA_BD_Template; XAxiDma_BdRing *ETH_A_TxRing_ptr; XAxiDma_BdRing *ETH_A_RxRing_ptr; XAxiDma_Bd *first_bd_ptr; XAxiDma_Bd *cur_bd_ptr; packet_bd_list checkout; packet_bd* tx_queue; ETH_A_DMA_CFG_ptr = XAxiDma_LookupConfig(ETH_A_DMA_DEV_ID); status = XAxiDma_CfgInitialize(Ð_A_DMA_Instance, ETH_A_DMA_CFG_ptr); if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_CfgInitialize! Err = %d\n", status); return -1;} //Zero-out the template buffer descriptor XAxiDma_BdClear(Ð_DMA_BD_Template); //Fetch handles to the Tx and Rx BD rings ETH_A_TxRing_ptr = XAxiDma_GetTxRing(Ð_A_DMA_Instance); ETH_A_RxRing_ptr = XAxiDma_GetRxRing(Ð_A_DMA_Instance); //Disable all Tx/Rx DMA interrupts XAxiDma_BdRingIntDisable(ETH_A_TxRing_ptr, XAXIDMA_IRQ_ALL_MASK); XAxiDma_BdRingIntDisable(ETH_A_RxRing_ptr, XAXIDMA_IRQ_ALL_MASK); //Disable delays and coalescing (for now - these will be useful when we transition to interrupts) XAxiDma_BdRingSetCoalesce(ETH_A_TxRing_ptr, 1, 0); XAxiDma_BdRingSetCoalesce(ETH_A_RxRing_ptr, 1, 0); //Setup Tx/Rx buffer descriptor rings in memory status = XAxiDma_BdRingCreate(ETH_A_TxRing_ptr, ETH_A_TX_BD_SPACE_BASE, ETH_A_TX_BD_SPACE_BASE, XAXIDMA_BD_MINIMUM_ALIGNMENT, ETH_A_NUM_TX_BD); status |= XAxiDma_BdRingCreate(ETH_A_RxRing_ptr, ETH_A_RX_BD_SPACE_BASE, ETH_A_RX_BD_SPACE_BASE, XAXIDMA_BD_MINIMUM_ALIGNMENT, ETH_A_NUM_RX_BD); if(status != XST_SUCCESS) {xil_printf("Error creating DMA BD Rings! Err = %d\n", status); return -1;} //Populate each ring with empty buffer descriptors status = XAxiDma_BdRingClone(ETH_A_TxRing_ptr, Ð_DMA_BD_Template); status |= XAxiDma_BdRingClone(ETH_A_RxRing_ptr, Ð_DMA_BD_Template); if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingClone()! Err = %d\n", status); return -1;} //Start the DMA Tx channel // No Eth packets are transmitted until actual Tx BD's are pushed to the DMA hardware status = XAxiDma_BdRingStart(ETH_A_TxRing_ptr); //Initialize the Rx buffer descriptors bd_count = XAxiDma_BdRingGetFreeCnt(ETH_A_RxRing_ptr); if(bd_count != ETH_A_NUM_RX_BD) {xil_printf("Error in Eth Rx DMA init - not all Rx BDs were free at boot\n");} status = XAxiDma_BdRingAlloc(ETH_A_RxRing_ptr, bd_count, &first_bd_ptr); if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingAlloc()! Err = %d\n", status); return -1;} //Checkout ETH_A_NUM_RX_BD packet_bds queue_checkout(&checkout, ETH_A_NUM_RX_BD); if(checkout.length == ETH_A_NUM_RX_BD){ tx_queue = checkout.first; } else { xil_printf("Error during wlan_eth_dma_init: able to check out %d of %d packet_bds\n", checkout.length, ETH_A_NUM_RX_BD); return -1; } //Iterate over each Rx buffer descriptor cur_bd_ptr = first_bd_ptr; for(i = 0; i < bd_count; i++) { //Set the memory address for this BD's buffer buf_addr = (u32)((void*)((tx_packet_buffer*)(tx_queue->buf_ptr))->frame + sizeof(mac_header_80211) + sizeof(llc_header) - sizeof(ethernet_header)); status = XAxiDma_BdSetBufAddr(cur_bd_ptr, buf_addr); if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetBufAddr failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return -1;} //Set every Rx BD to max length (this assures 1 BD per Rx pkt) status = XAxiDma_BdSetLength(cur_bd_ptr, ETH_A_PKT_BUF_SIZE, ETH_A_RxRing_ptr->MaxTransferLen); if(status != XST_SUCCESS) {xil_printf("XAxiDma_BdSetLength failed (bd %d, addr 0x08x)! Err = %d\n", i, buf_addr, status); return -1;} //Rx BD's don't need control flags before use; DMA populates these post-Rx XAxiDma_BdSetCtrl(cur_bd_ptr, 0); //BD ID is arbitrary; use pointer to the packet_bd associated with this BD XAxiDma_BdSetId(cur_bd_ptr, (u32)tx_queue); //Update cur_bd_ptr to the next BD in the chain for the next iteration cur_bd_ptr = XAxiDma_BdRingNext(ETH_A_RxRing_ptr, cur_bd_ptr); //Traverse forward in the checked-out packet_bd list tx_queue = tx_queue->next; } //Push the Rx BD ring to hardware and start receiving status = XAxiDma_BdRingToHw(ETH_A_RxRing_ptr, bd_count, first_bd_ptr); //Enable Interrupts XAxiDma_BdRingIntEnable(ETH_A_RxRing_ptr, XAXIDMA_IRQ_ALL_MASK); status |= XAxiDma_BdRingStart(ETH_A_RxRing_ptr); if(status != XST_SUCCESS) {xil_printf("Error in XAxiDma_BdRingToHw/XAxiDma_BdRingStart(ETH_A_RxRing_ptr)! Err = %d\n", status); return -1;} return 0; }
/** * * This function sets up RX channel of the DMA engine to be ready for packet * reception * * @param AxiDmaInstPtr is the pointer to the instance of the DMA engine. * * @return XST_SUCCESS if the setup is successful, XST_FAILURE otherwise. * * @note None. * ******************************************************************************/ static int RxSetup(XAxiDma * AxiDmaInstPtr) { XAxiDma_BdRing *RxRingPtr; int Delay = 0; int Coalesce = 1; int Status; XAxiDma_Bd BdTemplate; XAxiDma_Bd *BdPtr; XAxiDma_Bd *BdCurPtr; u32 BdCount; u32 FreeBdCount; u32 RxBufferPtr; int i; RxRingPtr = XAxiDma_GetRxRing(&AxiDma); /* Disable all RX interrupts before RxBD space setup */ XAxiDma_BdRingIntDisable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK); /* Set delay and coalescing */ XAxiDma_BdRingSetCoalesce(RxRingPtr, Coalesce, Delay); /* Setup Rx BD space */ BdCount = XAxiDma_BdRingCntCalc(XAXIDMA_BD_MINIMUM_ALIGNMENT, RX_BD_SPACE_HIGH - RX_BD_SPACE_BASE + 1); Status = XAxiDma_BdRingCreate(RxRingPtr, RX_BD_SPACE_BASE, RX_BD_SPACE_BASE, XAXIDMA_BD_MINIMUM_ALIGNMENT, BdCount); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "RX create BD ring failed %d\r\n", Status); return XST_FAILURE; } /* * Setup an all-zero BD as the template for the Rx channel. */ XAxiDma_BdClear(&BdTemplate); Status = XAxiDma_BdRingClone(RxRingPtr, &BdTemplate); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "RX clone BD failed %d\r\n", Status); return XST_FAILURE; } /* Attach buffers to RxBD ring so we are ready to receive packets */ FreeBdCount = XAxiDma_BdRingGetFreeCnt(RxRingPtr); Status = XAxiDma_BdRingAlloc(RxRingPtr, FreeBdCount, &BdPtr); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "RX alloc BD failed %d\r\n", Status); return XST_FAILURE; } BdCurPtr = BdPtr; RxBufferPtr = RX_BUFFER_BASE; for (i = 0; i < FreeBdCount; i++) { Status = XAxiDma_BdSetBufAddr(BdCurPtr, RxBufferPtr); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "Set buffer addr %x on BD %x failed %d\r\n", (unsigned int)RxBufferPtr, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } Status = XAxiDma_BdSetLength(BdCurPtr, MAX_PKT_LEN); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "Rx set length %d on BD %x failed %d\r\n", MAX_PKT_LEN, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } /* Receive BDs do not need to set anything for the control * The hardware will set the SOF/EOF bits per stream status */ XAxiDma_BdSetCtrl(BdCurPtr, 0); XAxiDma_BdSetId(BdCurPtr, RxBufferPtr); RxBufferPtr += MAX_PKT_LEN; BdCurPtr = XAxiDma_BdRingNext(RxRingPtr, BdCurPtr); } /* Clear the receive buffer, so we can verify data */ memset((void *)RX_BUFFER_BASE, 0, MAX_PKT_LEN); Status = XAxiDma_BdRingToHw(RxRingPtr, FreeBdCount, BdPtr); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "RX submit hw failed %d\r\n", Status); return XST_FAILURE; } /* Start RX DMA channel */ Status = XAxiDma_BdRingStart(RxRingPtr); if (Status != XST_SUCCESS) { xdbg_printf(XDBG_DEBUG_ERROR, "RX start hw failed %d\r\n", Status); return XST_FAILURE; } return XST_SUCCESS; }
/* * * This function sets up RX channel of the DMA engine to be ready for packet * reception * * @param AxiDmaInstPtr is the pointer to the instance of the DMA engine. * * @return - XST_SUCCESS if the setup is successful. * - XST_FAILURE if fails. * * @note None. * ******************************************************************************/ static int RxSetup(XAxiDma * AxiDmaInstPtr) { XAxiDma_BdRing *RxRingPtr; int Status; XAxiDma_Bd BdTemplate; XAxiDma_Bd *BdPtr; XAxiDma_Bd *BdCurPtr; int BdCount; int FreeBdCount; u32 RxBufferPtr; u32 RxBdSpacePtr; int Index; int RingIndex; RxBufferPtr = RX_BUFFER_BASE; RxBdSpacePtr = RX_BD_SPACE_BASE; for (RingIndex = 0; RingIndex < AxiDmaInstPtr->RxNumChannels; RingIndex++) { RxRingPtr = XAxiDma_GetRxIndexRing(&AxiDma, RingIndex); /* Disable all RX interrupts before RxBD space setup */ XAxiDma_BdRingIntDisable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK); /* Setup Rx BD space */ BdCount = XAxiDma_BdRingCntCalc(XAXIDMA_BD_MINIMUM_ALIGNMENT, RX_BD_SPACE_HIGH - RX_BD_SPACE_BASE + 1); Status = XAxiDma_BdRingCreate(RxRingPtr, RxBdSpacePtr, RxBdSpacePtr, XAXIDMA_BD_MINIMUM_ALIGNMENT, BdCount); if (Status != XST_SUCCESS) { xil_printf("Rx bd create failed with %d\r\n", Status); return XST_FAILURE; } /* * Setup a BD template for the Rx channel. Then copy it * to every RX BD. */ XAxiDma_BdClear(&BdTemplate); Status = XAxiDma_BdRingClone(RxRingPtr, &BdTemplate); if (Status != XST_SUCCESS) { xil_printf("Rx bd clone failed with %d\r\n", Status); return XST_FAILURE; } /* Attach buffers to RxBD ring so we are ready to receive packets */ FreeBdCount = XAxiDma_BdRingGetFreeCnt(RxRingPtr); Status = XAxiDma_BdRingAlloc(RxRingPtr, FreeBdCount, &BdPtr); if (Status != XST_SUCCESS) { xil_printf("Rx bd alloc failed with %d\r\n", Status); return XST_FAILURE; } BdCurPtr = BdPtr; for (Index = 0; Index < FreeBdCount; Index++) { Status = XAxiDma_BdSetBufAddr(BdCurPtr, RxBufferPtr); if (Status != XST_SUCCESS) { xil_printf("Rx set buffer addr %x on BD %x failed %d\r\n", (unsigned int)RxBufferPtr, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } Status = XAxiDma_BdSetLength(BdCurPtr, HSIZE, RxRingPtr->MaxTransferLen); if (Status != XST_SUCCESS) { xil_printf("Rx set length %d on BD %x failed %d\r\n", MAX_PKT_LEN, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } /* Receive BDs do not need to set anything for the control * The hardware will set the SOF/EOF bits per stream status */ XAxiDma_BdSetCtrl(BdCurPtr, 0); XAxiDma_BdSetId(BdCurPtr, RxBufferPtr); XAxiDma_BdSetARCache(BdCurPtr, ARCACHE); XAxiDma_BdSetARUser(BdCurPtr, ARUSER); XAxiDma_BdSetVSize(BdCurPtr, VSIZE); XAxiDma_BdSetStride(BdCurPtr, STRIDE); RxBufferPtr += MAX_PKT_LEN; BdCurPtr = XAxiDma_BdRingNext(RxRingPtr, BdCurPtr); } /* * Set the coalescing threshold, so only one receive interrupt * occurs for this example * * If you would like to have multiple interrupts to happen, change * the COALESCING_COUNT to be a smaller value */ Status = XAxiDma_BdRingSetCoalesce(RxRingPtr, COALESCING_COUNT, DELAY_TIMER_COUNT); if (Status != XST_SUCCESS) { xil_printf("Rx set coalesce failed with %d\r\n", Status); return XST_FAILURE; } Status = XAxiDma_BdRingToHw(RxRingPtr, FreeBdCount, BdPtr); if (Status != XST_SUCCESS) { xil_printf("Rx ToHw failed with %d\r\n", Status); return XST_FAILURE; } /* Enable all RX interrupts */ XAxiDma_BdRingIntEnable(RxRingPtr, XAXIDMA_IRQ_ALL_MASK); /* Start RX DMA channel */ Status = XAxiDma_UpdateBdRingCDesc(RxRingPtr); if (Status != XST_SUCCESS) { xil_printf("Failed bd start %x\r\n", Status); return XST_FAILURE; } RxBdSpacePtr += BdCount * sizeof(XAxiDma_Bd); } for (RingIndex = 0; RingIndex < AxiDmaInstPtr->RxNumChannels; RingIndex++) { RxRingPtr = XAxiDma_GetRxIndexRing(&AxiDma, RingIndex); Status = XAxiDma_StartBdRingHw(RxRingPtr); if (Status != XST_SUCCESS) { xil_printf("Rx start BD ring failed with %d\r\n", Status); return XST_FAILURE; } } return XST_SUCCESS; }
/* * * This function non-blockingly transmits all packets through the DMA engine. * * @param AxiDmaInstPtr points to the DMA engine instance * * @return * - XST_SUCCESS if the DMA accepts all the packets successfully, * - XST_FAILURE if error occurs * * @note None. * ******************************************************************************/ static int SendPacket(XAxiDma * AxiDmaInstPtr, u8 TDest, u8 TId, u8 Value) { XAxiDma_BdRing *TxRingPtr = XAxiDma_GetTxRing(AxiDmaInstPtr); u8 *TxPacket; XAxiDma_Bd *BdPtr, *BdCurPtr; int Status; int Index, Pkts; u32 BufferAddr; /* * Each packet is limited to TxRingPtr->MaxTransferLen * * This will not be the case if hardware has store and forward built in */ if (MAX_PKT_LEN * NUMBER_OF_BDS_PER_PKT > TxRingPtr->MaxTransferLen) { xil_printf("Invalid total per packet transfer length for the " "packet %d/%d\r\n", MAX_PKT_LEN * NUMBER_OF_BDS_PER_PKT, TxRingPtr->MaxTransferLen); return XST_INVALID_PARAM; } TxPacket = (u8 *) Packet; for(Index = 0; Index < MAX_PKT_LEN * NUMBER_OF_BDS_TO_TRANSFER; Index ++) { TxPacket[Index] = Value; Value = (Value + 1) & 0xFF; } /* Flush the SrcBuffer before the DMA transfer, in case the Data Cache * is enabled */ Xil_DCacheFlushRange((u32)TxPacket, MAX_PKT_LEN * NUMBER_OF_BDS_TO_TRANSFER); Status = XAxiDma_BdRingAlloc(TxRingPtr, NUMBER_OF_BDS_TO_TRANSFER, &BdPtr); if (Status != XST_SUCCESS) { xil_printf("Failed bd alloc\r\n"); return XST_FAILURE; } BufferAddr = (u32) TxPacket; BdCurPtr = BdPtr; /* * Set up the BD using the information of the packet to transmit * Each transfer has NUMBER_OF_BDS_PER_PKT BDs */ for(Index = 0; Index < NUMBER_OF_PKTS_TO_TRANSFER; Index++) { for(Pkts = 0; Pkts < NUMBER_OF_BDS_PER_PKT; Pkts++) { u32 CrBits = 0; Status = XAxiDma_BdSetBufAddr(BdCurPtr, BufferAddr); if (Status != XST_SUCCESS) { xil_printf("Tx set buffer addr %x on BD %x failed %d\r\n", (unsigned int)BufferAddr, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } Status = XAxiDma_BdSetLength(BdCurPtr, HSIZE, TxRingPtr->MaxTransferLen); if (Status != XST_SUCCESS) { xil_printf("Tx set length %d on BD %x failed %d\r\n", MAX_PKT_LEN, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } if (Pkts == 0) { /* The first BD has SOF set */ CrBits |= XAXIDMA_BD_CTRL_TXSOF_MASK; #if (XPAR_AXIDMA_0_SG_INCLUDE_STSCNTRL_STRM == 1) /* The first BD has total transfer length set * in the last APP word, this is for the * loopback widget */ Status = XAxiDma_BdSetAppWord(BdCurPtr, XAXIDMA_LAST_APPWORD, MAX_PKT_LEN * NUMBER_OF_BDS_PER_PKT); if (Status != XST_SUCCESS) { xil_printf("Set app word failed with %d\r\n", Status); } #endif } if(Pkts == (NUMBER_OF_BDS_PER_PKT - 1)) { /* The last BD should have EOF and IOC set */ CrBits |= XAXIDMA_BD_CTRL_TXEOF_MASK; } XAxiDma_BdSetCtrl(BdCurPtr, CrBits); XAxiDma_BdSetId(BdCurPtr, BufferAddr); XAxiDma_BdSetTId(BdCurPtr, TId); XAxiDma_BdSetTDest(BdCurPtr, TDest); XAxiDma_BdSetTUser(BdCurPtr, TUSER); XAxiDma_BdSetARCache(BdCurPtr, ARCACHE); XAxiDma_BdSetARUser(BdCurPtr, ARUSER); XAxiDma_BdSetVSize(BdCurPtr, VSIZE); XAxiDma_BdSetStride(BdCurPtr, STRIDE); BufferAddr += MAX_PKT_LEN; BdCurPtr = XAxiDma_BdRingNext(TxRingPtr, BdCurPtr); } } /* Give the BD to hardware */ Status = XAxiDma_BdRingToHw(TxRingPtr, NUMBER_OF_BDS_TO_TRANSFER, BdPtr); if (Status != XST_SUCCESS) { xil_printf("Failed to hw, length %d\r\n", (int)XAxiDma_BdGetLength(BdPtr, TxRingPtr->MaxTransferLen)); return XST_FAILURE; } return XST_SUCCESS; }
/** * * This function transmits one packet non-blockingly through the DMA engine. * * @param AxiDmaInstPtr points to the DMA engine instance * * @return - XST_SUCCESS if the DMA accepts the packet successfully, * - XST_FAILURE otherwise. * * @note None. * ******************************************************************************/ static int SendPacket(XAxiDma * AxiDmaInstPtr,int c) { XAxiDma_BdRing *TxRingPtr; //TxRing cplx_data_t *TxPacket; //PacketPointer //cplx_data_t *RxClean; //CleanData Do in RxSetup XAxiDma_Bd *BdPtr; //Start BDPointer int Status; int Index; char str[30]; TxRingPtr = XAxiDma_GetTxRing(AxiDmaInstPtr); TxPacket = (cplx_data_t *)stim_buf; //setPointerForPacket /* Flush the SrcBuffer before the DMA transfer, in case the Data Cache * is enabled */ Xil_DCacheFlushRange((u32)TxPacket, MAX_PKT_LEN*64); //8 point each packet int FreeBdCount = XAxiDma_BdRingGetFreeCnt(TxRingPtr); xil_printf("TxFreeBdCount %d", FreeBdCount); /* Allocate a BD */ Status = XAxiDma_BdRingAlloc(TxRingPtr, 32, &BdPtr); if (Status != XST_SUCCESS) { return XST_FAILURE; } xil_printf("check1"); XAxiDma_Bd *BdCurPtr; u32 TxBufferPtr; BdCurPtr = BdPtr; // Set Current BDptr TxBufferPtr = (u32)stim_buf; for (Index = 0; Index < 32; Index++) { Status = XAxiDma_BdSetBufAddr(BdCurPtr, TxBufferPtr); if (Status != XST_SUCCESS) { xil_printf("Tx set buffer addr %x on BD %x failed %d\r\n", (unsigned int)TxBufferPtr, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } Status = XAxiDma_BdSetLength(BdCurPtr, MAX_PKT_LEN*4, TxRingPtr->MaxTransferLen); if (Status != XST_SUCCESS) { xil_printf("Tx set length %d on BD %x failed %d\r\n", MAX_PKT_LEN*4, (unsigned int)BdCurPtr, Status); return XST_FAILURE; } #if (XPAR_AXIDMA_0_SG_INCLUDE_STSCNTRL_STRM == 1) Status = XAxiDma_BdSetAppWord(BdCurPtr, XAXIDMA_LAST_APPWORD, MAX_PKT_LEN*2); /* If Set app length failed, it is not fatal */ if (Status != XST_SUCCESS) { xil_printf("Set app word failed with %d\r\n", Status); } #endif /* For single packet, both SOF and EOF are to be set */ XAxiDma_BdSetCtrl(BdCurPtr, XAXIDMA_BD_CTRL_TXEOF_MASK | XAXIDMA_BD_CTRL_TXSOF_MASK); XAxiDma_BdSetId(BdCurPtr, (u32) TxBufferPtr); TxBufferPtr += MAX_PKT_LEN*4; BdCurPtr = XAxiDma_BdRingNext(TxRingPtr, BdCurPtr); } /* Give the BD to DMA to kick off the transmission. */ Status = XAxiDma_BdRingToHw(TxRingPtr, 32, BdPtr); if (Status != XST_SUCCESS) { xil_printf("to hw failed %d\r\n", Status); return XST_FAILURE; } return XST_SUCCESS; }