void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring) { XEmacPs_Bd *rxbd; XStatus status; struct pbuf *p; u32_t freebds; u32_t bdindex; u32_t *temp; u32_t index = 0; if (xemacpsif->emacps.Config.BaseAddress != XPAR_XEMACPS_0_BASEADDR) { index = sizeof(s32_t) * XLWIP_CONFIG_N_RX_DESC; } freebds = XEmacPs_BdRingGetFreeCnt (rxring); while (freebds > 0) { freebds--; p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL); if (!p) { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif printf("unable to alloc pbuf in recv_handler\r\n"); return; } status = XEmacPs_BdRingAlloc(rxring, 1, &rxbd); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n")); pbuf_free(p); return; } status = XEmacPs_BdRingToHw(rxring, 1, rxbd); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: ")); if (status == XST_DMA_SG_LIST_ERROR) LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n")); else LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n")); pbuf_free(p); XEmacPs_BdRingUnAlloc(rxring, 1, rxbd); return; } Xil_DCacheInvalidateRange((u32_t)p->payload, (u32_t)XEMACPS_MAX_FRAME_SIZE); bdindex = XEMACPS_BD_TO_INDEX(rxring, rxbd); temp = (u32_t *)rxbd; *temp = 0; if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) { *temp = 0x00000002; } temp++; *temp = 0; XEmacPs_BdSetAddressRx(rxbd, (u32_t)p->payload); rx_pbufs_storage[index + bdindex] = (s32_t)p; } }
void _setup_rx_bds(XEmacPs_BdRing *rxring) { XEmacPs_Bd *rxbd, *CurBdPtr; int n_bds, i; XStatus Status; struct pbuf *p; u32 BdSts; unsigned int FreeBds, k; unsigned int BdIndex; unsigned int *Temp; FreeBds = XEmacPs_BdRingGetFreeCnt (rxring); Status = XEmacPs_BdRingAlloc(rxring, FreeBds, &rxbd); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n")); return; } for (k = 0, CurBdPtr = rxbd; k < FreeBds; k++) { p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL); if (!p) { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n")); return; } BdIndex = XEMACPS_BD_TO_INDEX(rxring, CurBdPtr); Temp = (unsigned int *)CurBdPtr; *Temp = 0; if (BdIndex == (XLWIP_CONFIG_N_RX_DESC - 1)) { *Temp = 0x00000002; } Temp++; *Temp = 0; XEmacPs_BdSetAddressRx(CurBdPtr, (u32)p->payload); dsb(); rx_pbufs_storage[BdIndex] = (int)p; CurBdPtr = XEmacPs_BdRingNext (rxring, CurBdPtr); /* Enqueue to HW */ } Status = XEmacPs_BdRingToHw(rxring, FreeBds, rxbd); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: ")); if (Status == XST_DMA_SG_LIST_ERROR) LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n")); else LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n")); return; } }
void process_sent_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *txring) { XEmacPs_Bd *txbdset; XEmacPs_Bd *curbdpntr; s32_t n_bds; XStatus status; s32_t n_pbufs_freed = 0; u32_t bdindex; struct pbuf *p; u32_t *temp; u32_t index = 0; if (xemacpsif->emacps.Config.BaseAddress != XPAR_XEMACPS_0_BASEADDR) { index = sizeof(s32_t) * XLWIP_CONFIG_N_TX_DESC; } while (1) { /* obtain processed BD's */ n_bds = XEmacPs_BdRingFromHwTx(txring, XLWIP_CONFIG_N_TX_DESC, &txbdset); if (n_bds == 0) { return; } /* free the processed BD's */ n_pbufs_freed = n_bds; curbdpntr = txbdset; while (n_pbufs_freed > 0) { bdindex = XEMACPS_BD_TO_INDEX(txring, curbdpntr); temp = (u32_t *)curbdpntr; *temp = 0; temp++; *temp = 0x80000000; if (bdindex == (XLWIP_CONFIG_N_TX_DESC - 1)) { *temp = 0xC0000000; } p = (struct pbuf *)tx_pbufs_storage[index + bdindex]; if (p != NULL) { pbuf_free(p); } tx_pbufs_storage[index + bdindex] = 0; curbdpntr = XEmacPs_BdRingNext(txring, curbdpntr); n_pbufs_freed--; dsb(); } status = XEmacPs_BdRingFree(txring, n_bds, txbdset); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Failure while freeing in Tx Done ISR\r\n")); } } return; }
void process_sent_bds(XEmacPs_BdRing *txring) { XEmacPs_Bd *txbdset; XEmacPs_Bd *CurBdPntr; int n_bds; XStatus Status; int n_pbufs_freed = 0; unsigned int BdIndex; struct pbuf *p; unsigned int *Temp; while (1) { /* obtain processed BD's */ n_bds = XEmacPs_BdRingFromHwTx(txring, XLWIP_CONFIG_N_TX_DESC, &txbdset); if (n_bds == 0) { return; } /* free the processed BD's */ n_pbufs_freed = n_bds; CurBdPntr = txbdset; while (n_pbufs_freed > 0) { BdIndex = XEMACPS_BD_TO_INDEX(txring, CurBdPntr); Temp = (unsigned int *)CurBdPntr; *Temp = 0; Temp++; *Temp = 0x80000000; if (BdIndex == (XLWIP_CONFIG_N_TX_DESC - 1)) { *Temp = 0xC0000000; } p = (struct pbuf *)tx_pbufs_storage[BdIndex]; if(p != NULL) { pbuf_free(p); } tx_pbufs_storage[BdIndex] = 0; CurBdPntr = XEmacPs_BdRingNext(txring, CurBdPntr); n_pbufs_freed--; dsb(); } Status = XEmacPs_BdRingFree(txring, n_bds, txbdset); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Failure while freeing in Tx Done ISR\r\n")); } } return; }
XStatus init_dma(struct xemac_s *xemac) { XEmacPs_Bd bdtemplate; XEmacPs_BdRing *rxringptr, *txringptr; XEmacPs_Bd *rxbd; struct pbuf *p; XStatus status; s32_t i; u32_t bdindex; volatile u32_t tempaddress; xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state); struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index]; /* * The BDs need to be allocated in uncached memory. Hence the 1 MB * address range allocated for Bd_Space is made uncached * by setting appropriate attributes in the translation table. * The Bd_Space is aligned to 1MB and has a size of 1 MB. This ensures * a reserved uncached area used only for BDs. */ if (bd_space_attr_set == 0) { Xil_SetTlbAttributes((s32_t)bd_space, 0xc02); // addr, attr bd_space_attr_set = 1; } rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps); txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps); LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr)); LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr)); /* Allocate 64k for Rx and Tx bds each to take care of extreme cases */ tempaddress = (u32_t)&(bd_space[bd_space_index]); xemacpsif->rx_bdspace = (void *)tempaddress; bd_space_index += 0x10000; tempaddress = (u32_t)&(bd_space[bd_space_index]); xemacpsif->tx_bdspace = (void *)tempaddress; bd_space_index += 0x10000; LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n", xemacpsif->rx_bdspace)); LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n", xemacpsif->tx_bdspace)); if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) { xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors", __FILE__, __LINE__); return ERR_IF; } /* * Setup RxBD space. * * Setup a BD template for the Rx channel. This template will be copied to * every RxBD. We will not have to explicitly set these again. */ XEmacPs_BdClear(&bdtemplate); /* * Create the RxBD ring */ status = XEmacPs_BdRingCreate(rxringptr, (u32) xemacpsif->rx_bdspace, (u32) xemacpsif->rx_bdspace, BD_ALIGNMENT, XLWIP_CONFIG_N_RX_DESC); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n")); return ERR_IF; } status = XEmacPs_BdRingClone(rxringptr, &bdtemplate, XEMACPS_RECV); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n")); return ERR_IF; } XEmacPs_BdClear(&bdtemplate); XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK); /* * Create the TxBD ring */ status = XEmacPs_BdRingCreate(txringptr, (u32) xemacpsif->tx_bdspace, (u32) xemacpsif->tx_bdspace, BD_ALIGNMENT, XLWIP_CONFIG_N_TX_DESC); if (status != XST_SUCCESS) { return ERR_IF; } /* We reuse the bd template, as the same one will work for both rx and tx. */ status = XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND); if (status != XST_SUCCESS) { return ERR_IF; } /* * Allocate RX descriptors, 1 RxBD at a time. */ for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) { p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL); if (!p) { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif printf("unable to alloc pbuf in init_dma\r\n"); return ERR_IF; } status = XEmacPs_BdRingAlloc(rxringptr, 1, &rxbd); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n")); pbuf_free(p); return ERR_IF; } /* Enqueue to HW */ status = XEmacPs_BdRingToHw(rxringptr, 1, rxbd); if (status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n")); pbuf_free(p); XEmacPs_BdRingUnAlloc(rxringptr, 1, rxbd); return ERR_IF; } Xil_DCacheInvalidateRange((u32_t)p->payload, (u32_t)XEMACPS_MAX_FRAME_SIZE); XEmacPs_BdSetAddressRx(rxbd, (u32_t)p->payload); bdindex = XEMACPS_BD_TO_INDEX(rxringptr, rxbd); rx_pbufs_storage[bdindex] = (s32_t)p; } /* * Connect the device driver handler that will be called when an * interrupt for the device occurs, the handler defined above performs * the specific interrupt processing for the device. */ XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr, (Xil_ExceptionHandler)XEmacPs_IntrHandler, (void *)&xemacpsif->emacps); /* * Enable the interrupt for emacps. */ XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr); emac_intr_num = (u32) xtopologyp->scugic_emac_intr; return 0; }
void emacps_recv_handler(void *arg) { struct pbuf *p; XEmacPs_Bd *rxbdset, *curbdptr; struct xemac_s *xemac; xemacpsif_s *xemacpsif; XEmacPs_BdRing *rxring; volatile s32_t bd_processed; s32_t rx_bytes, k; u32_t bdindex; u32_t regval; xemac = (struct xemac_s *)(arg); xemacpsif = (xemacpsif_s *)(xemac->state); rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps); #ifdef OS_IS_FREERTOS xInsideISR++; #endif /* * If Reception done interrupt is asserted, call RX call back function * to handle the processed BDs and then raise the according flag. */ regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET); XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET, regval); resetrx_on_no_rxdata(xemacpsif); while(1) { bd_processed = XEmacPs_BdRingFromHwRx(rxring, XLWIP_CONFIG_N_RX_DESC, &rxbdset); if (bd_processed <= 0) { break; } for (k = 0, curbdptr=rxbdset; k < bd_processed; k++) { bdindex = XEMACPS_BD_TO_INDEX(rxring, curbdptr); p = (struct pbuf *)rx_pbufs_storage[bdindex]; /* * Adjust the buffer size to the actual number of bytes received. */ rx_bytes = XEmacPs_BdGetLength(curbdptr); pbuf_realloc(p, rx_bytes); /* store it in the receive queue, * where it'll be processed by a different handler */ if (pq_enqueue(xemacpsif->recv_q, (void*)p) < 0) { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif pbuf_free(p); } else { #if !NO_SYS sys_sem_signal(&xemac->sem_rx_data_available); #endif } curbdptr = XEmacPs_BdRingNext( rxring, curbdptr); } /* free up the BD's */ XEmacPs_BdRingFree(rxring, bd_processed, rxbdset); setup_rx_bds(rxring); } #ifdef OS_IS_FREERTOS xInsideISR--; #endif return; }
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p) { struct pbuf *q; s32_t n_pbufs; XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL; XEmacPs_Bd *temp_txbd; XStatus status; XEmacPs_BdRing *txring; u32_t bdindex; u32_t lev; lev = mfcpsr(); mtcpsr(lev | 0x000000C0); txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps)); /* first count the number of pbufs */ for (q = p, n_pbufs = 0; q != NULL; q = q->next) n_pbufs++; /* obtain as many BD's */ status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset); if (status != XST_SUCCESS) { mtcpsr(lev); LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n")); return XST_FAILURE; } for(q = p, txbd = txbdset; q != NULL; q = q->next) { bdindex = XEMACPS_BD_TO_INDEX(txring, txbd); if (tx_pbufs_storage[bdindex] != 0) { mtcpsr(lev); LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n")); return XST_FAILURE; } /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ Xil_DCacheFlushRange((u32_t)q->payload, (u32_t)q->len); XEmacPs_BdSetAddressTx(txbd, (u32)q->payload); if (q->len > (XEMACPS_MAX_FRAME_SIZE - 18)) XEmacPs_BdSetLength(txbd, (XEMACPS_MAX_FRAME_SIZE - 18) & 0x3FFF); else XEmacPs_BdSetLength(txbd, q->len & 0x3FFF); tx_pbufs_storage[bdindex] = (s32_t)q; pbuf_ref(q); last_txbd = txbd; XEmacPs_BdClearLast(txbd); txbd = XEmacPs_BdRingNext(txring, txbd); } XEmacPs_BdSetLast(last_txbd); /* For fragmented packets, remember the 1st BD allocated for the 1st packet fragment. The used bit for this BD should be cleared at the end after clearing out used bits for other fragments. For packets without just remember the allocated BD. */ temp_txbd = txbdset; txbd = txbdset; txbd = XEmacPs_BdRingNext(txring, txbd); q = p->next; for(; q != NULL; q = q->next) { XEmacPs_BdClearTxUsed(txbd); txbd = XEmacPs_BdRingNext(txring, txbd); } XEmacPs_BdClearTxUsed(temp_txbd); status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset); if (status != XST_SUCCESS) { mtcpsr(lev); LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n")); return XST_FAILURE; } /* Start transmit */ XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress, XEMACPS_NWCTRL_OFFSET, (XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress, XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK)); mtcpsr(lev); return status; }
XStatus init_dma(struct xemac_s *xemac) { XEmacPs_Bd BdTemplate; XEmacPs_BdRing *RxRingPtr, *TxRingPtr; XEmacPs_Bd *rxbd; struct pbuf *p; XStatus Status; int i; unsigned int BdIndex; char *endAdd = &_end; /* * Align the BD starte address to 1 MB boundary. */ char *endAdd_aligned = (char *)(((int)endAdd + 0x100000) & (~0xFFFFF)); xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state); struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index]; /* * The BDs need to be allocated in uncached memory. Hence the 1 MB * address range that starts at address 0xFF00000 is made uncached * by setting appropriate attributes in the translation table. */ Xil_SetTlbAttributes((int)endAdd_aligned, 0xc02); // addr, attr RxRingPtr = &XEmacPs_GetRxRing(&xemacpsif->emacps); TxRingPtr = &XEmacPs_GetTxRing(&xemacpsif->emacps); LWIP_DEBUGF(NETIF_DEBUG, ("RxRingPtr: 0x%08x\r\n", RxRingPtr)); LWIP_DEBUGF(NETIF_DEBUG, ("TxRingPtr: 0x%08x\r\n", TxRingPtr)); xemacpsif->rx_bdspace = (void *)endAdd_aligned; /* * We allocate 65536 bytes for Rx BDs which can accomodate a * maximum of 8192 BDs which is much more than any application * will ever need. */ xemacpsif->tx_bdspace = (void *)(endAdd_aligned + 0x10000); LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: 0x%08x\r\n", xemacpsif->rx_bdspace)); LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: 0x%08x\r\n", xemacpsif->tx_bdspace)); if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) { xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors", __FILE__, __LINE__); return XST_FAILURE; } /* * Setup RxBD space. * * Setup a BD template for the Rx channel. This template will be copied to * every RxBD. We will not have to explicitly set these again. */ XEmacPs_BdClear(&BdTemplate); /* * Create the RxBD ring */ Status = XEmacPs_BdRingCreate(RxRingPtr, (u32) xemacpsif->rx_bdspace, (u32) xemacpsif->rx_bdspace, BD_ALIGNMENT, XLWIP_CONFIG_N_RX_DESC); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n")); return XST_FAILURE; } Status = XEmacPs_BdRingClone(RxRingPtr, &BdTemplate, XEMACPS_RECV); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n")); return XST_FAILURE; } XEmacPs_BdClear(&BdTemplate); XEmacPs_BdSetStatus(&BdTemplate, XEMACPS_TXBUF_USED_MASK); /* * Create the TxBD ring */ Status = XEmacPs_BdRingCreate(TxRingPtr, (u32) xemacpsif->tx_bdspace, (u32) xemacpsif->tx_bdspace, BD_ALIGNMENT, XLWIP_CONFIG_N_TX_DESC); if (Status != XST_SUCCESS) { return XST_FAILURE; } /* We reuse the bd template, as the same one will work for both rx and tx. */ Status = XEmacPs_BdRingClone(TxRingPtr, &BdTemplate, XEMACPS_SEND); if (Status != XST_SUCCESS) { return ERR_IF; } /* * Allocate RX descriptors, 1 RxBD at a time. */ for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) { Status = XEmacPs_BdRingAlloc(RxRingPtr, 1, &rxbd); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n")); return ERR_IF; } p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL); if (!p) { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif LWIP_DEBUGF(NETIF_DEBUG, ("unable to alloc pbuf in recv_handler\r\n")); return -1; } XEmacPs_BdSetAddressRx(rxbd, (u32)p->payload); BdIndex = XEMACPS_BD_TO_INDEX(RxRingPtr, rxbd); rx_pbufs_storage[BdIndex] = (int)p; /* Enqueue to HW */ Status = XEmacPs_BdRingToHw(RxRingPtr, 1, rxbd); if (Status != XST_SUCCESS) { LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n")); return XST_FAILURE; } } /* * Connect the device driver handler that will be called when an * interrupt for the device occurs, the handler defined above performs * the specific interrupt processing for the device. */ XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr, (Xil_ExceptionHandler)XEmacPs_IntrHandler, (void *)&xemacpsif->emacps); /* * Enable the interrupt for emacps. */ XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr); EmacIntrNum = (u32) xtopologyp->scugic_emac_intr; return 0; }
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p) { struct pbuf *q; int n_pbufs; XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL; XStatus Status; XEmacPs_BdRing *txring; unsigned int BdIndex; unsigned int lev; lev = mfcpsr(); mtcpsr(lev | 0x000000C0); #ifdef PEEP while((XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress, XEMACPS_TXSR_OFFSET)) & 0x08); #endif txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps)); /* first count the number of pbufs */ for (q = p, n_pbufs = 0; q != NULL; q = q->next) n_pbufs++; /* obtain as many BD's */ Status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset); if (Status != XST_SUCCESS) { mtcpsr(lev); LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n")); return ERR_IF; } for(q = p, txbd = txbdset; q != NULL; q = q->next) { BdIndex = XEMACPS_BD_TO_INDEX(txring, txbd); if (tx_pbufs_storage[BdIndex] != 0) { mtcpsr(lev); LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n")); return ERR_IF; } /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ Xil_DCacheFlushRange((unsigned int)q->payload, (unsigned)q->len); XEmacPs_BdSetAddressTx(txbd, (u32)q->payload); if (q->len > (XEMACPS_MAX_FRAME_SIZE - 18)) XEmacPs_BdSetLength(txbd, (XEMACPS_MAX_FRAME_SIZE - 18) & 0x3FFF); else XEmacPs_BdSetLength(txbd, q->len & 0x3FFF); tx_pbufs_storage[BdIndex] = (int)q; pbuf_ref(q); last_txbd = txbd; XEmacPs_BdClearLast(txbd); dsb(); txbd = XEmacPs_BdRingNext(txring, txbd); } XEmacPs_BdSetLast(last_txbd); dsb(); for(q = p, txbd = txbdset; q != NULL; q = q->next) { XEmacPs_BdClearTxUsed(txbd); txbd = XEmacPs_BdRingNext(txring, txbd); } dsb(); Status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset); if (Status != XST_SUCCESS) { mtcpsr(lev); LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n")); return ERR_IF; } dsb(); /* Start transmit */ XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress, XEMACPS_NWCTRL_OFFSET, (XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress, XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK)); dsb(); mtcpsr(lev); return Status; }
void emacps_recv_handler(void *arg) { struct pbuf *p; unsigned irq_status, i; XEmacPs_Bd *rxbdset, *CurBdPtr; struct xemac_s *xemac; xemacpsif_s *xemacpsif; XEmacPs_BdRing *rxring; volatile int bd_processed; int rx_bytes, k; unsigned int BdIndex; xemac = (struct xemac_s *)(arg); xemacpsif = (xemacpsif_s *)(xemac->state); rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps); #ifdef OS_IS_FREERTOS xInsideISR++; #endif /* * If Reception done interrupt is asserted, call RX call back function * to handle the processed BDs and then raise the according flag. */ while(1) { bd_processed = XEmacPs_BdRingFromHwRx(rxring, XLWIP_CONFIG_N_RX_DESC, &rxbdset); if (bd_processed <= 0) { #ifdef OS_IS_FREERTOS xInsideISR--; #endif return; } for (k = 0, CurBdPtr=rxbdset; k < bd_processed; k++) { BdIndex = XEMACPS_BD_TO_INDEX(rxring, CurBdPtr); p = (struct pbuf *)rx_pbufs_storage[BdIndex]; /* * Adjust the buffer size to the actual number of bytes received. */ rx_bytes = XEmacPs_BdGetLength(CurBdPtr); pbuf_realloc(p, rx_bytes); Xil_DCacheInvalidateRange((unsigned int)p->payload, (unsigned)XEMACPS_MAX_FRAME_SIZE); /* store it in the receive queue, * where it'll be processed by a different handler */ if (pq_enqueue(xemacpsif->recv_q, (void*)p) < 0) { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif pbuf_free(p); } else { #if !NO_SYS sys_sem_signal(&xemac->sem_rx_data_available); #endif } CurBdPtr = XEmacPs_BdRingNext( rxring, CurBdPtr); } /* free up the BD's */ XEmacPs_BdRingFree(rxring, bd_processed, rxbdset); _setup_rx_bds(rxring); } return; }