/* * LPLD_ENET_MacRecv * 以太帧接收函数 * * 参数: * *ch--接收数据帧头地址。 * *len--数据帧长度地址。 * * 输出: * 无 */ uint8 LPLD_ENET_MacRecv(uint8 *ch, uint16 *len) { uint8 *prvRxd; *len = 0; uxNextRxBuffer = 0; //寻找为非空的接收缓冲区描述符 while( (xENETRxDescriptors[ uxNextRxBuffer ].status & RX_BD_E)!=0 ) { uxNextRxBuffer++; if( uxNextRxBuffer >= CFG_NUM_ENET_RX_BUFFERS ) { uxNextRxBuffer = 0; return 1; } } //读取接收缓冲区描述符 *len = __REVSH(xENETRxDescriptors[ uxNextRxBuffer ].length); prvRxd = (uint8 *)__REV((uint32)xENETRxDescriptors[ uxNextRxBuffer ].data); memcpy((void *)ch, (void *)prvRxd, *len); //清除接收缓冲区描述符状态标志Empty xENETRxDescriptors[ uxNextRxBuffer ].status |= RX_BD_E; ENET_RDAR = ENET_RDAR_RDAR_MASK; return 0; }
void vEMACWrite( void ) { long x; /* Wait until the second transmission of the last packet has completed. */ for( x = 0; x < emacTX_WAIT_ATTEMPTS; x++ ) { if( ( xTxDescriptors[ 1 ].status & TX_BD_R ) != 0 ) { /* Descriptor is still active. */ vTaskDelay( emacTX_WAIT_DELAY_ms ); } else { break; } } /* Is the descriptor free after waiting for it? */ if( ( xTxDescriptors[ 1 ].status & TX_BD_R ) != 0 ) { /* Something has gone wrong. */ prvResetEverything(); } /* Setup both descriptors to transmit the frame. */ xTxDescriptors[ 0 ].data = ( uint8_t * ) __REV( ( unsigned long ) uip_buf ); xTxDescriptors[ 0 ].length = __REVSH( uip_len ); xTxDescriptors[ 1 ].data = ( uint8_t * ) __REV( ( unsigned long ) uip_buf ); xTxDescriptors[ 1 ].length = __REVSH( uip_len ); /* uip_buf is being sent by the Tx descriptor. Allocate a new buffer for use by the stack. */ uip_buf = prvGetNextBuffer(); /* Clear previous settings and go. */ xTxDescriptors[ 0 ].status |= ( TX_BD_R | TX_BD_L ); xTxDescriptors[ 1 ].status |= ( TX_BD_R | TX_BD_L ); /* Start the Tx. */ ENET_TDAR = ENET_TDAR_TDAR_MASK; }
/* * LPLD_ENET_MacSend * 以太帧发送函数 * * 参数: * *ch--数据帧头地址,该数据帧为以太帧,必须包含目的地址、源地址、类型等。 * len--数据帧长度。 * * 输出: * 无 */ void LPLD_ENET_MacSend(uint8 *ch, uint16 len) { //检查当前发送缓冲区描述符是否可用 while( xENETTxDescriptors[ uxNextTxBuffer ].status & TX_BD_R); //设置发送缓冲区描述符 xENETTxDescriptors[ uxNextTxBuffer ].data = (uint8 *)__REV((uint32)ch); xENETTxDescriptors[ uxNextTxBuffer ].length = __REVSH(len); xENETTxDescriptors[ uxNextTxBuffer ].status = ( TX_BD_R | TX_BD_L | TX_BD_TC | TX_BD_W ); uxNextTxBuffer++; if( uxNextTxBuffer >= CFG_NUM_ENET_TX_BUFFERS ) { uxNextTxBuffer = 0; } //使能发送 ENET_TDAR = ENET_TDAR_TDAR_MASK; }
unsigned short usEMACRead( void ) { unsigned short usBytesReceived; usBytesReceived = prvCheckRxStatus(); usBytesReceived = __REVSH( usBytesReceived ); if( usBytesReceived > 0 ) { /* Mark the pxDescriptor buffer as free as uip_buf is going to be set to the buffer that contains the received data. */ prvReturnBuffer( uip_buf ); /* Point uip_buf to the data about to be processed. */ uip_buf = ( void * ) pxCurrentRxDesc->data; uip_buf = ( void * ) __REV( ( unsigned long ) uip_buf ); /* Allocate a new buffer to the descriptor, as uip_buf is now using it's old descriptor. */ pxCurrentRxDesc->data = ( uint8_t * ) prvGetNextBuffer(); pxCurrentRxDesc->data = ( uint8_t* ) __REV( ( unsigned long ) pxCurrentRxDesc->data ); /* Prepare the descriptor to go again. */ pxCurrentRxDesc->status |= RX_BD_E; /* Move onto the next buffer in the ring. */ ulRxDescriptorIndex++; if( ulRxDescriptorIndex >= emacNUM_RX_DESCRIPTORS ) { ulRxDescriptorIndex = 0UL; } pxCurrentRxDesc = &( xRxDescriptors[ ulRxDescriptorIndex ] ); /* Restart Ethernet if it has stopped */ ENET_RDAR = ENET_RDAR_RDAR_MASK; } return usBytesReceived; }
void enet_fill_txbds(int ch, NBUF * tx_packet) { int num_txbds, i; int index_txbd; num_txbds = (tx_packet->length/TX_BUFFER_SIZE); index_txbd = next_txbd; if((num_txbds * TX_BUFFER_SIZE) < tx_packet->length) { num_txbds = num_txbds + 1; } // Fill Descriptors for (i = 0; i < num_txbds; i++) { TxNBUF[index_txbd].status = TX_BD_TC | TX_BD_R; #ifdef ENHANCED_BD TxNBUF[index_txbd].bdu = 0x00000000; TxNBUF[index_txbd].ebd_status = TX_BD_INT | TX_BD_TS;// | TX_BD_IINS | TX_BD_PINS; #endif if(i == num_txbds - 1) { #ifdef NBUF_LITTLE_ENDIAN TxNBUF[index_txbd].length = __REVSH((tx_packet->length - (i*TX_BUFFER_SIZE))); #else TxNBUF[index_txbd].length = (tx_packet->length - (i*TX_BUFFER_SIZE)); #endif // Set the Last bit on the last BD TxNBUF[index_txbd].status |= TX_BD_L; } else { #ifdef NBUF_LITTLE_ENDIAN TxNBUF[index_txbd].length = __REVSH(TX_BUFFER_SIZE); #else TxNBUF[index_txbd].length = TX_BUFFER_SIZE; #endif } #ifdef USE_DEDICATED_TX_BUFFERS #ifdef NBUF_LITTLE_ENDIAN //Copy data to Tx buffers memcpy((void *)__REV((uint32_t)TxNBUF[index_txbd].data), (void *)(((uint32_t)(tx_packet->data)) + (i*TX_BUFFER_SIZE)), __REVSH(TxNBUF[index_txbd].length)); #else // Copy data to Tx buffers memcpy((void *)(uint32_t)TxNBUF[index_txbd].data, (void *)(((uint32_t)(tx_packet->data)) + (i*TX_BUFFER_SIZE)), TxNBUF[index_txbd].length); #endif #else // Just update data pointer as data is aready there #ifdef NBUF_LITTLE_ENDIAN TxNBUF[index_txbd].data = (uint8_t *)__REV((((uint32_t)(tx_packet->data)) + (i*TX_BUFFER_SIZE))); #else TxNBUF[index_txbd].data = (uint8_t *)(((uint32_t)(tx_packet->data)) + (i*TX_BUFFER_SIZE)); #endif #endif // Wrap if this was last TxBD if(++index_txbd == NUM_TXBDS) { TxNBUF[NUM_TXBDS - 1].status |= TX_BD_W; index_txbd = 0; } } // Update the global txbd index next_txbd = index_txbd; }
void enet_get_received_packet(int ch, NBUF * rx_packet) { int last_buffer; uint16_t status; int index_rxbd; last_buffer = 0; rx_packet->length = 0; index_rxbd = next_rxbd; if(RxNBUF[index_rxbd].status & RX_BD_E) { printf("Under processing. SHouldnt be here\n"); return; } #ifdef NBUF_LITTLE_ENDIAN rx_packet->data = (uint8_t *)__REV((uint32_t)RxNBUF[index_rxbd].data); #else rx_packet->data = (uint8_t *)(uint32_t)RxNBUF[index_rxbd].data; #endif // Update next_rxbd pointer and mark buffers as empty again while(!last_buffer) { status = RxNBUF[index_rxbd].status; #ifdef NBUF_LITTLE_ENDIAN rx_packet->length = __REVSH(RxNBUF[index_rxbd].length); #else rx_packet->length = RxNBUF[index_rxbd].length; #endif #ifdef ENHANCED_BD rx_packet->ebd_status = RxNBUF[index_rxbd].ebd_status; #ifdef NBUF_LITTLE_ENDIAN rx_packet->timestamp = __REV(RxNBUF[index_rxbd].timestamp); rx_packet->length_proto_type = __REVSH(RxNBUF[index_rxbd].length_proto_type); rx_packet->payload_checksum = __REVSH(RxNBUF[index_rxbd].payload_checksum); #else rx_packet->timestamp = RxNBUF[index_rxbd].timestamp; rx_packet->length_proto_type = RxNBUF[index_rxbd].length_proto_type; rx_packet->payload_checksum = RxNBUF[index_rxbd].payload_checksum; #endif #endif last_buffer = (status & RX_BD_L); if(status & RX_BD_W) { RxNBUF[index_rxbd].status = (RX_BD_W | RX_BD_E); index_rxbd = 0; } else { RxNBUF[index_rxbd].status = RX_BD_E; index_rxbd++; } } // Update the global rxbd index next_rxbd = index_rxbd; // Put the last BD status in rx_packet->status as MISS flags and more // are updated in last BD rx_packet->status = status; }
static struct pbuf* low_level_input(struct netif *netif) { u16_t l, temp_l; struct pbuf *first_pbuf, *next_pbuf, *q; u16_t len; #ifdef ENET_LITTLE_ENDIAN u8_t *data_temp; #endif u8_t more_pkts = 1, processing_error = 0; (void)netif; /* initial pkt handling */ if (!(rx_bd[rx_next_buf].status & ENET_RX_BD_E)) { /* if pkt is filled */ if (rx_bd[rx_next_buf].status & ENET_RX_BD_L) { more_pkts = 0; if (rx_bd[rx_next_buf].status & (ENET_RX_BD_LG | ENET_RX_BD_NO | ENET_RX_BD_CR | ENET_RX_BD_OV)) { /* bad packet */ LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); goto EXIT_RX_PKT; } else { #ifdef ENET_LITTLE_ENDIAN len = __REVSH(rx_bd[rx_next_buf].length); #else len = rx_bd[rx_next_buf].length; #endif LINK_STATS_INC(link.recv); } } else /* if not L bit, then buffer's length */ len = ENET_RX_BUF_SIZE; if ((first_pbuf = pbuf_alloc(PBUF_RAW, len, PBUF_POOL)) != NULL) { /* get data */ l = 0; temp_l = 0; /* We iterate over the pbuf chain until we have read the entire * packet into the pbuf. */ for (q = first_pbuf; q != NULL; q = q->next) { /* Read enough bytes to fill this pbuf in the chain. The * available data in the pbuf is given by the q->len * variable. * This does not necessarily have to be a memcpy, you can also preallocate * pbufs for a DMA-enabled MAC and after receiving truncate it to the * actually received size. In this case, ensure the tot_len member of the * pbuf is the sum of the chained pbuf len members. */ temp_l = LWIP_MIN(len, LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)); #ifdef ENET_LITTLE_ENDIAN data_temp = (u8_t *)__REV((u32_t)rx_bd[ rx_next_buf ].data); memcpy((u8_t*)q->payload, &( data_temp[l] ), temp_l); #else memcpy((u8_t*)q->payload, &( rx_bd[ rx_next_buf ].data[l] ), temp_l); #endif l += temp_l; len -= temp_l; } } else { /* bad buffers */ LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); processing_error = 1; } EXIT_RX_PKT: rx_bd[rx_next_buf++].status |= ENET_RX_BD_E; /* consumed pkt */ ENET_RDAR = ENET_RDAR_RDAR_MASK; if (rx_next_buf >= NUM_ENET_RX_BUFS) rx_next_buf = 0; } else return (struct pbuf*)NULL; /* special NULL case */ /* more pkts handling */ while (more_pkts) { //if(!(rx_bd[ rx_next_buf ].status & RX_BD_E) ) ///*if pkt is filled*/ //{ if (rx_bd[rx_next_buf].status & ENET_RX_BD_L) { more_pkts = 0; if (rx_bd[rx_next_buf].status & (ENET_RX_BD_LG | ENET_RX_BD_NO | ENET_RX_BD_CR | ENET_RX_BD_OV)) { /* bad packet */ LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); goto EXIT_RX_PKT2; } else { #ifdef ENET_LITTLE_ENDIAN len = __REVSH(rx_bd[rx_next_buf].length); #else len = rx_bd[rx_next_buf].length; #endif /* buffer with L bit has total frame's length instead of remaining bytes from frame's lenght */ len %= ENET_RX_BUF_SIZE; LINK_STATS_INC(link.recv); } } else /* if not L bit, then buffer's length */ len = ENET_RX_BUF_SIZE; if (((next_pbuf = pbuf_alloc(PBUF_RAW, len, PBUF_POOL)) != NULL) && (!processing_error)) { /* get data */ l = 0; temp_l = 0; /* We iterate over the pbuf chain until we have read the entire * packet into the pbuf. */ for (q = next_pbuf; q != NULL; q = q->next) { /* Read enough bytes to fill this pbuf in the chain. The * available data in the pbuf is given by the q->len * variable. * This does not necessarily have to be a memcpy, you can also preallocate * pbufs for a DMA-enabled MAC and after receiving truncate it to the * actually received size. In this case, ensure the tot_len member of the * pbuf is the sum of the chained pbuf len members. */ temp_l = LWIP_MIN(len, LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)); #ifdef ENET_LITTLE_ENDIAN data_temp = (u8_t *)__REV((u32_t)rx_bd[rx_next_buf].data); memcpy((u8_t*)q->payload, &(data_temp[l]), temp_l); #else memcpy((u8_t*)q->payload, &(rx_bd[rx_next_buf].data[l] ), temp_l); #endif l += temp_l; len -= temp_l; } /* link pbufs */ pbuf_cat(first_pbuf, next_pbuf); } else { /* bad buffer - out of lwip buffers */ LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); processing_error = 1; } EXIT_RX_PKT2: rx_bd[rx_next_buf++].status |= ENET_RX_BD_E; /* consumed pkt */ ENET_RDAR = ENET_RDAR_RDAR_MASK; if (rx_next_buf >= NUM_ENET_RX_BUFS) rx_next_buf = 0; } return first_pbuf; }
/** * Should allocate a pbuf and transfer the bytes of the incoming * packet from the interface into the pbuf. * * @param netif the lwip network interface structure for this ethernetif * @return a pbuf filled with the received packet (including MAC header) * NULL on memory error */ static struct pbuf* low_level_input(struct netif *netif) { u32_t l = 0; struct pbuf *p = NULL, *q = NULL; u16_t len; #ifdef ENET_LITTLE_ENDIAN u8_t *data_temp; #endif (void)netif; /* Obtain the size of the packet and put it into the "len" variable */ #ifdef ENET_LITTLE_ENDIAN len = __REVSH(rx_bd[rx_next_buf].length); #else len = rx_bd[rx_next_buf].length; #endif if ((len != 0) && ((rx_bd[rx_next_buf].status & RX_BD_E) == 0)) { #if (ENET_HARDWARE_SHIFT==0) #if ETH_PAD_SIZE len += ETH_PAD_SIZE; /* allow room for Ethernet padding */ #endif #endif /* We allocate a pbuf chain of pbufs from the pool. */ p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL); if (p != NULL) { #if (ENET_HARDWARE_SHIFT==0) #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif #endif /* We iterate over the pbuf chain until we have read the entire * packet into the pbuf. */ for (q = p; q != NULL; q = q->next) { /* Read enough bytes to fill this pbuf in the chain. The * available data in the pbuf is given by the q->len * variable. * This does not necessarily have to be a memcpy, you can also preallocate * pbufs for a DMA-enabled MAC and after receiving truncate it to the * actually received size. In this case, ensure the tot_len member of the * pbuf is the sum of the chained pbuf len members. */ #ifdef ENET_LITTLE_ENDIAN data_temp = (u8_t*)__REV((u32_t)rx_bd[rx_next_buf].data); memcpy((u8_t*)q->payload, &(data_temp[l]), q->len); #else memcpy((u8_t*)q->payload, &(rx_bd[rx_next_buf].data[l]), q->len); #endif l = l + q->len; } #if (ENET_HARDWARE_SHIFT==0) #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif #endif LINK_STATS_INC(link.recv); } else { /* drop packet() */ LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); } /* acknowledge that packet has been read() */ /* free the descriptor */ rx_bd[rx_next_buf].status |= RX_BD_E; ENET_RDAR = ENET_RDAR_RDAR_MASK; rx_next_buf++; if (rx_next_buf >= NUM_ENET_RX_BUFS) { rx_next_buf = 0; } } return p; }
/** * This function should do the actual transmission of the packet. The packet is * contained in the pbuf that is passed to the function. This pbuf * might be chained. * * @param netif the lwip network interface structure for this ethernetif * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * @return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent * * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to * strange results. You might consider waiting for space in the DMA queue * to become availale since the stack doesn't retry to send a packet * dropped because of memory failure (except for the TCP timers). */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q; u16_t l = 0; uint8_t *buf = NULL; int i; //initiate transfer(); #if 0 == ENET_HARDWARE_SHIFT #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif #endif /* Get a DMA buffer into which we can write the data to send. */ for (i = 0; i < BUF_WAIT_ATTEMPTS; i++) { if (tx_bd[tx_next_buf].status & ENET_TX_BD_R) { /* wait for the buffer to become available */ CoTickDelay(BUF_WAIT_DELAY); } else { #ifdef ENET_LITTLE_ENDIAN buf = (uint8_t*)__REV((uint32_t)tx_bd[tx_next_buf].data); #else buf = tx_bd[tx_next_buf].data; #endif break; } } if (NULL == buf) { return ERR_BUF; } else { for (q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ memcpy(&buf[l], (u8_t*)q->payload, q->len); l += q->len; } } //signal that packet should be sent(); /* Setup the buffer descriptor for transmission */ #ifdef ENET_LITTLE_ENDIAN tx_bd[tx_next_buf].length = __REVSH(l);//nbuf->length + ETH_HDR_LEN; #else tx_bd[tx_next_buf].length = l;//nbuf->length + ETH_HDR_LEN; #endif tx_bd[tx_next_buf].status |= (ENET_TX_BD_R | ENET_TX_BD_L); tx_next_buf++; if (tx_next_buf >= NUM_ENET_TX_BUFS) { tx_next_buf = 0; } #if 0 == ENET_HARDWARE_SHIFT #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif #endif LINK_STATS_INC(link.xmit); /* only one task can be here. wait until pkt is sent, then go ahead */ /* semaphore released inside isr */ /* start expiring semaphore: no more than 3 ticks */ /* no blocking code */ CoPendSem(sem_tx, 3); /* Request xmit process to MAC-NET */ ENET_TDAR = ENET_TDAR_TDAR_MASK; return ERR_OK; }