static void netfe_incoming(netfe_t *fe, uint8_t *packet, int pack_len) { #ifdef EXP_LINC_LATENCY // DSCP:ECN must be 42 // 6 +6 +2 ether // 20 ip // - icmp if (pack_len >= 6 +6 +2 +20 && packet[6 +6 +2 +1] == 42) linc_incoming(fe->index); #endif // EXP_LINC_LATENCY // NI if (fe->attached_lwip_netif != 0) { LINK_STATS_INC(link.recv); struct pbuf *p = packet_to_pbuf(packet, pack_len); if (p != 0) { struct netif *nf = fe->attached_lwip_netif; if (nf->input(p, nf) != ERR_OK) { printk("netfe_incoming: input error\n"); pbuf_free(p); } } else { //printk("netfe_incoming: packet dropped\n"); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); } } // OL if (fe->attached_outlet != 0) outlet_new_data(fe->attached_outlet, packet, pack_len); }
/** * This function should do the actual transmission of the packet. The packet is * contained in the pbuf that is passed to the function. This pbuf * might be chained. * * @param netif the lwip network interface structure for this ethernetif * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * @return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent * * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to * strange results. You might consider waiting for space in the DMA queue * to become availale since the stack doesn't retry to send a packet * dropped because of memory failure (except for the TCP timers). */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q; u32_t l = 0; unsigned char *pcTxData; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif /* Get a DMA buffer into which we can write the data to send. */ for( int i = 0; i < netifBUFFER_WAIT_ATTEMPTS; i++ ) { pcTxData = pcGetNextBuffer(); if( pcTxData ) { break; } else { vTaskDelay( netifBUFFER_WAIT_DELAY ); } } if (pcTxData == NULL) { portNOP(); return ERR_BUF; } else { for(q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ vTaskSuspendAll(); memcpy(&pcTxData[l], (u8_t*)q->payload, q->len); xTaskResumeAll(); l += q->len; } } ENET_TxPkt( &pcTxData, l ); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
static void handleDataEvt(cbBCM_Handle connHandle, cb_uint8* pData, cb_uint16 length) { (void)connHandle; struct pbuf* pbuf; struct netif* netif = &panIf.hInterface; pbuf = (struct pbuf*)cbIP_allocDataFrame(length); MBED_ASSERT(pbuf != NULL); cb_boolean status = cbIP_copyToDataFrame((cbIP_frame*)pbuf,pData,length,0); MBED_ASSERT(status); panIf.statusCallback(cbIP_NETWORK_ACTIVITY, NULL, NULL, panIf.callbackArg); netif->input(pbuf, netif); LINK_STATS_INC(link.recv); }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q; #ifdef FREERTOS_USED static xSemaphoreHandle xTxSemaphore = NULL; #endif ( void )netif; // Unused param, avoid a compiler warning. #ifdef FREERTOS_USED if( xTxSemaphore == NULL ) { vSemaphoreCreateBinary( xTxSemaphore ); } #endif #if ETH_PAD_SIZE pbuf_header( p, -ETH_PAD_SIZE ); /* drop the padding word */ #endif #ifdef FREERTOS_USED /* Access to the MACB is guarded using a semaphore. */ if( xSemaphoreTake( xTxSemaphore, netifGUARD_BLOCK_NBTICKS ) ) { #endif for( q = p; q != NULL; q = q->next ) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. This function also signals to the MACB that the packet should be sent. */ lMACBSend(&AVR32_MACB, q->payload, q->len, ( q->next == NULL ) ); } #ifdef FREERTOS_USED xSemaphoreGive( xTxSemaphore ); } #endif #if ETH_PAD_SIZE pbuf_header( p, ETH_PAD_SIZE ); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); // Traces return ERR_OK; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q = NULL; int l = 0; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif pin_toggle(); if (g_bTimerStopped) { csi_kernel_timer_stop(timer_send_handle); csi_kernel_timer_start(timer_send_handle, csi_kernel_ms2tick(10000)); g_bTimerStopped = 0; } csi_eth_mac_ex_send_frame_begin(eth_mac_handle, p->tot_len); for (q = p; q != NULL; q = q->next) { csi_eth_mac_ex_send_frame(eth_mac_handle, q->payload, q->len, 0); l = l + q->len; } csi_eth_mac_ex_send_frame_end(eth_mac_handle); pin_toggle(); MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len); if (((u8_t *)p->payload)[0] & 1) { /* broadcast or multicast packet */ MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); } else { /* unicast packet */ MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); } /* increase ifoutdiscards or ifouterrors on error */ #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
/** * Should allocate a pbuf and transfer the bytes of the incoming * packet from the interface into the pbuf. * * @param pxNetIf the lwip network interface structure for this ethernetif * @return a pbuf filled with the received packet (including MAC header) * NULL on memory error */ static struct pbuf *prvLowLevelInput( const unsigned char * const pucInputData, long lDataLength ) { struct pbuf *p = NULL, *q; if( lDataLength > 0 ) { #if ETH_PAD_SIZE len += ETH_PAD_SIZE; /* allow room for Ethernet padding */ #endif /* We allocate a pbuf chain of pbufs from the pool. */ p = pbuf_alloc( PBUF_RAW, lDataLength, PBUF_POOL ); if( p != NULL ) { #if ETH_PAD_SIZE pbuf_header( p, -ETH_PAD_SIZE ); /* drop the padding word */ #endif /* We iterate over the pbuf chain until we have read the entire * packet into the pbuf. */ lDataLength = 0; for( q = p; q != NULL; q = q->next ) { /* Read enough bytes to fill this pbuf in the chain. The * available data in the pbuf is given by the q->len * variable. * This does not necessarily have to be a memcpy, you can also preallocate * pbufs for a DMA-enabled MAC and after receiving truncate it to the * actually received size. In this case, ensure the usTotalLength member of the * pbuf is the sum of the chained pbuf len members. */ memcpy( q->payload, &( pucInputData[ lDataLength ] ), q->len ); lDataLength += q->len; } #if ETH_PAD_SIZE pbuf_header( p, ETH_PAD_SIZE ); /* reclaim the padding word */ #endif LINK_STATS_INC( link.recv ); } } return p; }
static err_t eth_output(struct netif *netif, struct pbuf *p) { #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif do { pEth->write((const char *)p->payload, p->len); } while((p = p->next)!=NULL); pEth->send(); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
/** * This function should do the actual transmission of the packet. The packet is * contained in the pbuf that is passed to the function. This pbuf * might be chained. * * @param netif the lwip network interface structure for this ethernetif * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * @return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent * * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to * strange results. You might consider waiting for space in the DMA queue * to become availale since the stack doesn't retry to send a packet * dropped because of memory failure (except for the TCP timers). */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif //acoral_prints("\r\nlowLOutput=sta\r\n"); //hw_sendPacket(p); acoral_dev_write(net_dev_id, p, 0, 0, 0); //acoral_prints("\r\nlowLOutput=end\r\n"); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct ethernetif *ethernetif = netif->state; struct pbuf *q; uint8 result; uint8 index = 0; err_t err = ERR_OK; TCPIP_PACKET_INFO_T tcpip_packet; TCPIP_PACKET_INFO_T *p_packet = &tcpip_packet; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif p_packet->packet_len = 0; for(q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ p_packet->data_ptr[index] = q->payload; p_packet->data_len[index] = q->len; p_packet->packet_len += q->len; index++; } p_packet->pbuf_num = index; if (ethernetif->tcpip_wifi_xmit) { result = ethernetif->tcpip_wifi_xmit(&tcpip_packet); if (result != 0) { DEBUG_ERROR("the xmit packet is dropped!"); err = ERR_IF; } } #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return err; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q; static xSemaphoreHandle xTxSemaphore = NULL; err_t xReturn = ERR_OK; ( void )netif; // Unused param, avoid a compiler warning. if( xTxSemaphore == NULL ) { vSemaphoreCreateBinary( xTxSemaphore ); } #if ETH_PAD_SIZE pbuf_header( p, -ETH_PAD_SIZE ); /* drop the padding word */ #endif /* Access to the MACB is guarded using a semaphore. */ if( xSemaphoreTake( xTxSemaphore, netifGUARD_BLOCK_NBTICKS ) ) { for( q = p; q != NULL; q = q->next ) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. if q->next == NULL then this is the last pbuf in the chain. */ if( !lMACBSend(&AVR32_MACB, q->payload, q->len, ( q->next == NULL ) ) ) { xReturn = ~ERR_OK; } } xSemaphoreGive( xTxSemaphore ); } #if ETH_PAD_SIZE pbuf_header( p, ETH_PAD_SIZE ); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); // Traces return ERR_OK; }
/** * \brief This function should do the actual transmission of the packet. The * packet is contained in the pbuf that is passed to the function. This pbuf * might be chained. * note: Returning ERR_MEM here if a DMA queue of your MAC is full can lead to * strange results. You might consider waiting for space in the DMA queue * to become available since the stack doesn't retry to send a packet * dropped because of memory failure (except for the TCP timers). * * \param netif the lwip network interface structure for this ethernetif * \param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * * \return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent. */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q = NULL; int8_t pc_buf[NET_RW_BUFF_SIZE]; int8_t *bufptr = &pc_buf[0]; uint8_t uc_rc; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* Drop the padding word */ #endif /* Check the buffer boundary */ if (p->tot_len > NET_RW_BUFF_SIZE) { return ERR_BUF; } /* Clear the output buffer */ memset(bufptr, 0x0, NET_RW_BUFF_SIZE); for (q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a * time. The size of the data in each pbuf is kept in the ->len * variable. */ /* Send data from(q->payload, q->len); */ memcpy(bufptr, q->payload, q->len); bufptr += q->len; } /* Signal that packet should be sent(); */ uc_rc = emac_dev_write(&gs_emac_dev, pc_buf, p->tot_len, NULL); if (uc_rc != EMAC_OK) { return ERR_BUF; } #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* Reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
void netfe_output(netfe_t *fe, uint8_t *packet, int pack_len) { assert(pack_len <= ETH_MTU +ETH_HDR_LEN +ETH_CSUM_LEN); assert(pack_len <= PAGE_SIZE); #ifdef EXP_LINC_LATENCY // see comment above if (pack_len >= 6 +6 +2 +20 && packet[6 +6 +2 +1] == 42) linc_output(fe->index); #endif // EXP_LINC_LATENCY if (fe->free_tx_head == NO_TX_BUFFER) { //printk("netfe_output: packet dropped [size %d]\n", pack_len); LINK_STATS_INC(link.drop); return; } int tx_buf = fe->free_tx_head; fe->free_tx_head = fe->free_tx_bufs[tx_buf]; uint8_t *p = fe->tx_buffers[tx_buf]; memcpy(p, packet, pack_len); RING_IDX prod = fe->tx_ring.req_prod_pvt; netif_tx_request_t *req = RING_GET_REQUEST(&fe->tx_ring, prod); req->gref = fe->tx_buf_refs[tx_buf]; req->id = tx_buf; req->offset = 0; req->flags = 0; req->size = pack_len; fe->tx_ring.req_prod_pvt = prod +1; wmb(); // dark int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&fe->tx_ring, notify); if (notify) event_kick(fe->evtchn); netfe_tx_buf_gc(fe); }
static err_t netif_output(struct netif *netif, struct pbuf *p) { LINK_STATS_INC(link.xmit); /* Update SNMP stats (only if you use SNMP) */ MIB2_STATS_NETIF_ADD(netif, ifoutoctets, p->tot_len); int unicast = ((p->payload[0] & 0x01) == 0); if (unicast) { MIB2_STATS_NETIF_INC(netif, ifoutucastpkts); } else { MIB2_STATS_NETIF_INC(netif, ifoutnucastpkts); } lock_interrupts(); pbuf_copy_partial(p, mac_send_buffer, p->tot_len, 0); /* Start MAC transmit here */ unlock_interrupts(); return ERR_OK; }
static err_t linkoutput(struct netif *netif, struct pbuf *p) { struct ethernetif *ethernetif = netif->state; struct pbuf *q; uint16_t total_len = p->tot_len; // The length of the packet. int fragcnt = 0; // The number of fragments. unsigned flags = ethernetif->ops->flags; if (flags & ETHIF_FRAGCNT) { // Pre-calculate the number of fragments for startoutput(). for(q = p; q != NULL; q = q->next) { ++fragcnt; } } if(!ethernetif->ops->startoutput(ethernetif->priv, total_len, fragcnt)) return ERR_IF; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); // Drop the padding word. #endif for(q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a * time. The size of the data in each pbuf is kept in the ->len * variable. */ ethernetif->ops->output(ethernetif->priv, q->payload, q->len); } ethernetif->ops->endoutput(ethernetif->priv, total_len); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); // Reclaim the padding word. #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
static err_t low_level_output(UNUSED_ARG(struct netif *, netif), struct pbuf *p) { struct pbuf *q; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif proc_forbid(); for (q = p; q != NULL; q = q->next) eth_putFrame(q->payload, q->len); eth_sendFrame(); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); proc_permit(); return ERR_OK; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { // struct ethernetif *ethernetif = netif->state; // struct pbuf *q; // initiate transfer(); #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif mac_data_xmit(p); // signal that packet should be sent(); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
err_t eth_input(struct pbuf *p, struct netif *inp) { struct eth_hdr *ethhdr; if(p != RT_NULL) { #ifdef LINK_STATS LINK_STATS_INC(link.recv); #endif /* LINK_STATS */ ethhdr = p->payload; switch(htons(ethhdr->type)) { case ETHTYPE_IP: etharp_ip_input(inp, p); pbuf_header(p, -((rt_int16_t)sizeof(struct eth_hdr))); if (tcpip_input(p, inp) != ERR_OK) { /* discard packet */ pbuf_free(p); } break; case ETHTYPE_ARP: etharp_arp_input(inp, (struct eth_addr *)inp->hwaddr, p); break; default: pbuf_free(p); p = RT_NULL; break; } } return ERR_OK; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct g2100if *g2100if = netif->state; struct pbuf *q; // *** //initiate transfer(); #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif for(q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ // *** //send data from(q->payload, q->len); zg_set_buf(q->payload, q->len); zg_set_tx_status(1); while (!zg_get_cnf_pending()) { // wait until transfer is confirmed zg_drv_process(); } } // *** //signal that packet should be sent(); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { // struct ethernetif *ethernetif = netif->state; struct pbuf *q; // initiate transfer(); #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif if(xSemaphoreTake(semEthTx, portMAX_DELAY) == pdTRUE ) { RequestSend(p->tot_len); for(q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ //send data from(q->payload, q->len); CopyToFrame_EMAC_Start(q->payload, q->len); } //signal that packet should be sent(); CopyToFrame_EMAC_End(); } pbuf_free(p); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { // struct ethernetif *ethernetif = netif->state; struct pbuf *q; // uint32_t i; u32_t tx_len; tx_len = 0; #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif for (q = p; q != NULL; q = q->next) { memcpy(&gTxBuf[tx_len], q->payload, q->len); tx_len += q->len; } ENET_MacSendData(gTxBuf, tx_len); // printf("sending frame:%d!!!!!!!!!!!!!\r\n", tx_len); //for(i=0;i<tx_len;i++) { // printf("%x ", gTxBuf[i]); } // printf("\r\n"); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; }
static err_t prvLowLevelOutput( struct netif *pxNetIf, struct pbuf *p ) { /* This is taken from lwIP example code and therefore does not conform to the FreeRTOS coding standard. */ struct pbuf *q; static unsigned char ucBuffer[ 1520 ]; unsigned char *pucBuffer = ucBuffer; unsigned char *pucChar; struct eth_hdr *pxHeader; u16_t usTotalLength = p->tot_len - ETH_PAD_SIZE; err_t xReturn = ERR_OK; ( void ) pxNetIf; #if defined(LWIP_DEBUG) && LWIP_NETIF_TX_SINGLE_PBUF LWIP_ASSERT("p->next == NULL && p->len == p->tot_len", p->next == NULL && p->len == p->tot_len); #endif /* Initiate transfer. */ if( p->len == p->tot_len ) { /* No pbuf chain, don't have to copy -> faster. */ pucBuffer = &( ( unsigned char * ) p->payload )[ ETH_PAD_SIZE ]; } else { /* pbuf chain, copy into contiguous ucBuffer. */ if( p->tot_len >= sizeof( ucBuffer ) ) { LINK_STATS_INC( link.lenerr ); LINK_STATS_INC( link.drop ); snmp_inc_ifoutdiscards( pxNetIf ); xReturn = ERR_BUF; } else { pucChar = ucBuffer; for( q = p; q != NULL; q = q->next ) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ /* send data from(q->payload, q->len); */ LWIP_DEBUGF( NETIF_DEBUG, ("NETIF: send pucChar %p q->payload %p q->len %i q->next %p\n", pucChar, q->payload, ( int ) q->len, ( void* ) q->next ) ); if( q == p ) { memcpy( pucChar, &( ( char * ) q->payload )[ ETH_PAD_SIZE ], q->len - ETH_PAD_SIZE ); pucChar += q->len - ETH_PAD_SIZE; } else { memcpy( pucChar, q->payload, q->len ); pucChar += q->len; } } } } if( xReturn == ERR_OK ) { /* signal that packet should be sent */ if( pcap_sendpacket( pxOpenedInterfaceHandle, pucBuffer, usTotalLength ) < 0 ) { LINK_STATS_INC( link.memerr ); LINK_STATS_INC( link.drop ); snmp_inc_ifoutdiscards( pxNetIf ); xReturn = ERR_BUF; } else { LINK_STATS_INC( link.xmit ); snmp_add_ifoutoctets( pxNetIf, usTotalLength ); pxHeader = ( struct eth_hdr * )p->payload; if( ( pxHeader->dest.addr[ 0 ] & 1 ) != 0 ) { /* broadcast or multicast packet*/ snmp_inc_ifoutnucastpkts( pxNetIf ); } else { /* unicast packet */ snmp_inc_ifoutucastpkts( pxNetIf ); } } } return xReturn; }
/** \brief Low level output of a packet. Never call this from an * interrupt context, as it may block until TX descriptors * become available. * * \param[in] netif the lwip network interface structure for this lpc_enetif * \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type) * \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent */ static err_t lpc_low_level_output(struct netif *netif, struct pbuf *p) { struct lpc_enetdata *lpc_enetif = netif->state; struct pbuf *q; u8_t *dst; u32_t idx, notdmasafe = 0; struct pbuf *np; s32_t dn; /* Zero-copy TX buffers may be fragmented across mutliple payload chains. Determine the number of descriptors needed for the transfer. The pbuf chaining can be a mess! */ dn = (s32_t) pbuf_clen(p); /* Test to make sure packet addresses are DMA safe. A DMA safe address is once that uses external memory or periphheral RAM. IRAM and FLASH are not safe! */ for (q = p; q != NULL; q = q->next) notdmasafe += lpc_packet_addr_notsafe(q->payload); #if LPC_TX_PBUF_BOUNCE_EN==1 /* If the pbuf is not DMA safe, a new bounce buffer (pbuf) will be created that will be used instead. This requires an copy from the non-safe DMA region to the new pbuf */ if (notdmasafe) { /* Allocate a pbuf in DMA memory */ np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); if (np == NULL) return ERR_MEM; /* This buffer better be contiguous! */ LWIP_ASSERT("lpc_low_level_output: New transmit pbuf is chained", (pbuf_clen(np) == 1)); /* Copy to DMA safe pbuf */ dst = (u8_t *) np->payload; for(q = p; q != NULL; q = q->next) { /* Copy the buffer to the descriptor's buffer */ MEMCPY(dst, (u8_t *) q->payload, q->len); dst += q->len; } np->len = p->tot_len; LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("lpc_low_level_output: Switched to DMA safe buffer, old=%p, new=%p\r\n", q, np)); /* use the new buffer for descrptor queueing. The original pbuf will be de-allocated outsuide this driver. */ p = np; dn = 1; } #else if (notdmasafe) LWIP_ASSERT("lpc_low_level_output: Not a DMA safe pbuf", (notdmasafe == 0)); #endif /* Wait until enough descriptors are available for the transfer. */ /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */ while (dn > lpc_tx_ready(netif)) #if NO_SYS == 0 osSemaphoreWait(lpc_enetif->xTXDCountSem.id, osWaitForever); #else osDelay(1); #endif /* Get free TX buffer index */ idx = LPC_EMAC->TxProduceIndex; #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_enetif->TXLockMutex); #endif /* Prevent LWIP from de-allocating this pbuf. The driver will free it once it's been transmitted. */ if (!notdmasafe) pbuf_ref(p); /* Setup transfers */ q = p; while (dn > 0) { dn--; /* Only save pointer to free on last descriptor */ if (dn == 0) { /* Save size of packet and signal it's ready */ lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT | EMAC_TCTRL_LAST; lpc_enetif->txb[idx] = p; } else { /* Save size of packet, descriptor is not last */ lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT; lpc_enetif->txb[idx] = NULL; } LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("lpc_low_level_output: pbuf packet(%p) sent, chain#=%d," " size = %d (index=%d)\r\n", q->payload, dn, q->len, idx)); lpc_enetif->ptxd[idx].packet = (u32_t) q->payload; q = q->next; idx++; if (idx >= LPC_NUM_BUFF_TXDESCS) idx = 0; } LPC_EMAC->TxProduceIndex = idx; LINK_STATS_INC(link.xmit); #if NO_SYS == 0 /* Restore access */ sys_mutex_unlock(&lpc_enetif->TXLockMutex); #endif return ERR_OK; }
/** \brief Allocates a pbuf and returns the data from the incoming packet. * * \param[in] netif the lwip network interface structure for this lpc_enetif * \return a pbuf filled with the received packet (including MAC header) * NULL on memory error */ static struct pbuf *lpc_low_level_input(struct netif *netif) { struct lpc_enetdata *lpc_enetif = netif->state; struct pbuf *p = NULL; u32_t idx, length; u16_t origLength; #ifdef LOCK_RX_THREAD #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_enetif->TXLockMutex); #endif #endif /* Monitor RX overrun status. This should never happen unless (possibly) the internal bus is behing held up by something. Unless your system is running at a very low clock speed or there are possibilities that the internal buses may be held up for a long time, this can probably safely be removed. */ if (LPC_EMAC->IntStatus & EMAC_INT_RX_OVERRUN) { LINK_STATS_INC(link.err); LINK_STATS_INC(link.drop); /* Temporarily disable RX */ LPC_EMAC->MAC1 &= ~EMAC_MAC1_REC_EN; /* Reset the RX side */ LPC_EMAC->MAC1 |= EMAC_MAC1_RES_RX; LPC_EMAC->IntClear = EMAC_INT_RX_OVERRUN; /* De-allocate all queued RX pbufs */ for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) { if (lpc_enetif->rxb[idx] != NULL) { pbuf_free(lpc_enetif->rxb[idx]); lpc_enetif->rxb[idx] = NULL; } } /* Start RX side again */ lpc_rx_setup(lpc_enetif); /* Re-enable RX */ LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN; #ifdef LOCK_RX_THREAD #if NO_SYS == 0 sys_mutex_unlock(&lpc_enetif->TXLockMutex); #endif #endif return NULL; } /* Determine if a frame has been received */ length = 0; idx = LPC_EMAC->RxConsumeIndex; if (LPC_EMAC->RxProduceIndex != idx) { /* Handle errors */ if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR | EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR | EMAC_RINFO_LEN_ERR)) { #if LINK_STATS if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR | EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR)) LINK_STATS_INC(link.chkerr); if (lpc_enetif->prxs[idx].statusinfo & EMAC_RINFO_LEN_ERR) LINK_STATS_INC(link.lenerr); #endif /* Drop the frame */ LINK_STATS_INC(link.drop); /* Re-queue the pbuf for receive */ lpc_enetif->rx_free_descs++; p = lpc_enetif->rxb[idx]; lpc_enetif->rxb[idx] = NULL; lpc_rxqueue_pbuf(lpc_enetif, p); LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("lpc_low_level_input: Packet dropped with errors (0x%x)\r\n", lpc_enetif->prxs[idx].statusinfo)); p = NULL; } else { /* A packet is waiting, get length */ length = (lpc_enetif->prxs[idx].statusinfo & 0x7FF) + 1; /* Zero-copy */ p = lpc_enetif->rxb[idx]; origLength = p->len; p->len = (u16_t) length; /* Free pbuf from descriptor */ lpc_enetif->rxb[idx] = NULL; lpc_enetif->rx_free_descs++; /* Attempt to queue new buffer(s) */ if (lpc_rx_queue(lpc_enetif->netif) == 0) { /* Drop the frame due to OOM. */ LINK_STATS_INC(link.drop); /* Re-queue the pbuf for receive */ p->len = origLength; lpc_rxqueue_pbuf(lpc_enetif, p); LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("lpc_low_level_input: Packet index %d dropped for OOM\r\n", idx)); #ifdef LOCK_RX_THREAD #if NO_SYS == 0 sys_mutex_unlock(&lpc_enetif->TXLockMutex); #endif #endif return NULL; } LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("lpc_low_level_input: Packet received: %p, size %d (index=%d)\r\n", p, length, idx)); /* Save size */ p->tot_len = (u16_t) length; LINK_STATS_INC(link.recv); } } #ifdef LOCK_RX_THREAD #if NO_SYS == 0 sys_mutex_unlock(&lpc_enetif->TXLockMutex); #endif #endif return p; }
/** * Send an IP packet to be received on the same netif (loopif-like). * The pbuf is simply copied and handed back to netif->input. * In multithreaded mode, this is done directly since netif->input must put * the packet on a queue. * In callback mode, the packet is put on an internal queue and is fed to * netif->input by netif_poll(). * * @param netif the lwip network interface structure * @param p the (IP) packet to 'send' * @param ipaddr the ip address to send the packet to (not used) * @return ERR_OK if the packet has been sent * ERR_MEM if the pbuf used to copy the packet couldn't be allocated */ err_t netif_loop_output(struct netif *netif, struct pbuf *p, ip_addr_t *ipaddr) { struct pbuf *r; err_t err; struct pbuf *last; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = 0; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if LWIP_SNMP #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* LWIP_SNMP */ SYS_ARCH_DECL_PROTECT(lev); LWIP_UNUSED_ARG(ipaddr); /* Allocate a new pbuf */ r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); if (r == NULL) { LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); snmp_inc_ifoutdiscards(stats_if); return ERR_MEM; } #if LWIP_LOOPBACK_MAX_PBUFS clen = pbuf_clen(r); /* check for overflow or too many pbuf on queue */ if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); snmp_inc_ifoutdiscards(stats_if); return ERR_MEM; } netif->loop_cnt_current += clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* Copy the whole pbuf queue p into the single pbuf r */ if ((err = pbuf_copy(r, p)) != ERR_OK) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); snmp_inc_ifoutdiscards(stats_if); return err; } /* Put the packet on a linked list which gets emptied through calling netif_poll(). */ /* let last point to the last pbuf in chain r */ for (last = r; last->next != NULL; last = last->next); SYS_ARCH_PROTECT(lev); if(netif->loop_first != NULL) { LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); netif->loop_last->next = r; netif->loop_last = last; } else { netif->loop_first = r; netif->loop_last = last; } SYS_ARCH_UNPROTECT(lev); LINK_STATS_INC(link.xmit); snmp_add_ifoutoctets(stats_if, p->tot_len); snmp_inc_ifoutucastpkts(stats_if); #if LWIP_NETIF_LOOPBACK_MULTITHREADING /* For multithreading environment, schedule a call to netif_poll */ tcpip_callback((tcpip_callback_fn)netif_poll, netif); #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ return ERR_OK; }
/** * Handle the incoming SLIP stream character by character * * @param netif the lwip network interface structure for this slipif * @param c received character (multiple calls to this function will * return a complete packet, NULL is returned before - used for polling) * @return The IP packet when SLIP_END is received */ static struct pbuf* slipif_rxbyte(struct netif *netif, u8_t c) { struct slipif_priv *priv; struct pbuf *t; LWIP_ASSERT("netif != NULL", (netif != NULL)); LWIP_ASSERT("netif->state != NULL", (netif->state != NULL)); priv = netif->state; switch (priv->state) { case SLIP_RECV_NORMAL: switch (c) { case SLIP_END: if (priv->recved > 0) { /* Received whole packet. */ /* Trim the pbuf to the size of the received packet. */ pbuf_realloc(priv->q, priv->recved); LINK_STATS_INC(link.recv); LWIP_DEBUGF(SLIP_DEBUG, ("slipif: Got packet (%"U16_F" bytes)\n", priv->recved)); t = priv->q; priv->p = priv->q = NULL; priv->i = priv->recved = 0; return t; } return NULL; case SLIP_ESC: priv->state = SLIP_RECV_ESCAPE; return NULL; } /* end switch (c) */ break; case SLIP_RECV_ESCAPE: /* un-escape END or ESC bytes, leave other bytes (although that would be a protocol error) */ switch (c) { case SLIP_ESC_END: c = SLIP_END; break; case SLIP_ESC_ESC: c = SLIP_ESC; break; } priv->state = SLIP_RECV_NORMAL; break; } /* end switch (priv->state) */ /* byte received, packet not yet completely received */ if (priv->p == NULL) { /* allocate a new pbuf */ LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: alloc\n")); priv->p = pbuf_alloc(PBUF_LINK, (PBUF_POOL_BUFSIZE - PBUF_LINK_HLEN), PBUF_POOL); if (priv->p == NULL) { LINK_STATS_INC(link.drop); LWIP_DEBUGF(SLIP_DEBUG, ("slipif_input: no new pbuf! (DROP)\n")); /* don't process any further since we got no pbuf to receive to */ return NULL; } if (priv->q != NULL) { /* 'chain' the pbuf to the existing chain */ pbuf_cat(priv->q, priv->p); } else { /* p is the first pbuf in the chain */ priv->q = priv->p; } } /* this automatically drops bytes if > SLIP_MAX_SIZE */ if ((priv->p != NULL) && (priv->recved <= SLIP_MAX_SIZE)) { ((u8_t *)priv->p->payload)[priv->i] = c; priv->recved++; priv->i++; if (priv->i >= priv->p->len) { /* on to the next pbuf */ priv->i = 0; if (priv->p->next != NULL && priv->p->next->len > 0) { /* p is a chain, on to the next in the chain */ priv->p = priv->p->next; } else { /* p is a single pbuf, set it to NULL so next time a new * pbuf is allocated */ priv->p = NULL; } } } return NULL; }
/* Call for freeing TX buffers that are complete */ void lpc_tx_reclaim(struct netif *netif) { struct lpc_enetdata *lpc_netifdata = netif->state; s32_t ridx; u32_t status; #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_netifdata->TXLockMutex); #endif /* If a descriptor is available and is no longer owned by the hardware, it can be reclaimed */ ridx = lpc_netifdata->tx_reclaim_idx; while ((lpc_netifdata->tx_free_descs < LPC_NUM_BUFF_TXDESCS) && (!(lpc_netifdata->ptdesc[ridx].CTRLSTAT & TDES_OWN))) { /* Peek at the status of the descriptor to determine if the packet is good and any status information. */ status = lpc_netifdata->ptdesc[ridx].CTRLSTAT; LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE, ("lpc_tx_reclaim: Reclaiming sent packet %p, index %d\n", lpc_netifdata->txpbufs[ridx], ridx)); /* Check TX error conditions */ if (status & TDES_ES) { LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE, ("lpc_tx_reclaim: TX error condition status 0x%x\n", status)); LINK_STATS_INC(link.err); #if LINK_STATS == 1 /* Error conditions that cause a packet drop */ if (status & (TDES_UF | TDES_ED | TDES_EC | TDES_LC)) { LINK_STATS_INC(link.drop); } #endif } /* Reset control for this descriptor */ if (ridx == (LPC_NUM_BUFF_TXDESCS - 1)) { lpc_netifdata->ptdesc[ridx].CTRLSTAT = TDES_ENH_TCH | TDES_ENH_TER; } else { lpc_netifdata->ptdesc[ridx].CTRLSTAT = TDES_ENH_TCH; } /* Free the pbuf associate with this descriptor */ if (lpc_netifdata->txpbufs[ridx]) { pbuf_free(lpc_netifdata->txpbufs[ridx]); } /* Reclaim this descriptor */ lpc_netifdata->tx_free_descs++; #if NO_SYS == 0 xSemaphoreGive(lpc_netifdata->xTXDCountSem); #endif ridx++; if (ridx >= LPC_NUM_BUFF_TXDESCS) { ridx = 0; } } lpc_netifdata->tx_reclaim_idx = ridx; #if NO_SYS == 0 /* Restore access */ sys_mutex_unlock(&lpc_netifdata->TXLockMutex); #endif }
/* Low level output of a packet. Never call this from an interrupt context, as it may block until TX descriptors become available */ static err_t lpc_low_level_output(struct netif *netif, struct pbuf *sendp) { struct lpc_enetdata *lpc_netifdata = netif->state; u32_t idx, fidx, dn; struct pbuf *p = sendp; #if LPC_CHECK_SLOWMEM == 1 struct pbuf *q, *wp; u8_t *dst; int pcopy = 0; /* Check packet address to determine if it's in slow memory and relocate if necessary */ for (q = p; ((q != NULL) && (pcopy == 0)); q = q->next) { fidx = 0; for (idx = 0; idx < sizeof(slmem); idx += sizeof(struct lpc_slowmem_array_t)) { if ((q->payload >= (void *) slmem[fidx].start) && (q->payload <= (void *) slmem[fidx].end)) { /* Needs copy */ pcopy = 1; } } } if (pcopy) { /* Create a new pbuf with the total pbuf size */ wp = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM); if (!wp) { /* Exit with error */ return ERR_MEM; } /* Copy pbuf */ dst = (u8_t *) wp->payload; wp->tot_len = 0; for (q = p; q != NULL; q = q->next) { MEMCPY(dst, (u8_t *) q->payload, q->len); dst += q->len; wp->tot_len += q->len; } wp->len = wp->tot_len; /* LWIP will free original pbuf on exit of function */ p = sendp = wp; } #endif /* Zero-copy TX buffers may be fragmented across mutliple payload chains. Determine the number of descriptors needed for the transfer. The pbuf chaining can be a mess! */ dn = (u32_t) pbuf_clen(p); /* Wait until enough descriptors are available for the transfer. */ /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */ while (dn > lpc_tx_ready(netif)) #if NO_SYS == 0 {xSemaphoreTake(lpc_netifdata->xTXDCountSem, 0); } #else {msDelay(1); } #endif /* Get the next free descriptor index */ fidx = idx = lpc_netifdata->tx_fill_idx; #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_netifdata->TXLockMutex); #endif /* Fill in the next free descriptor(s) */ while (dn > 0) { dn--; /* Setup packet address and length */ lpc_netifdata->ptdesc[idx].B1ADD = (u32_t) p->payload; lpc_netifdata->ptdesc[idx].BSIZE = (u32_t) TDES_ENH_BS1(p->len); /* Save pointer to pbuf so we can reclain the memory for the pbuf after the buffer has been sent. Only the first pbuf in a chain is saved since the full chain doesn't need to be freed. */ /* For first packet only, first flag */ lpc_netifdata->tx_free_descs--; if (idx == fidx) { lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_FS; #if LPC_CHECK_SLOWMEM == 1 /* If this is a copied pbuf, then avoid getting the extra reference or the TX reclaim will be off by 1 */ if (!pcopy) { pbuf_ref(p); } #else /* Increment reference count on this packet so LWIP doesn't attempt to free it on return from this call */ pbuf_ref(p); #endif } else { lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_OWN; } /* Save address of pbuf, but make sure it's associated with the first chained pbuf so it gets freed once all pbuf chains are transferred. */ if (!dn) { lpc_netifdata->txpbufs[idx] = sendp; } else { lpc_netifdata->txpbufs[idx] = NULL; } /* For last packet only, interrupt and last flag */ if (dn == 0) { lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_LS | TDES_ENH_IC; } /* IP checksumming requires full buffering in IP */ lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_CIC(3); LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE, ("lpc_low_level_output: pbuf packet %p sent, chain %d," " size %d, index %d, free %d\n", p, dn, p->len, idx, lpc_netifdata->tx_free_descs)); /* Update next available descriptor */ idx++; if (idx >= LPC_NUM_BUFF_TXDESCS) { idx = 0; } /* Next packet fragment */ p = p->next; } lpc_netifdata->tx_fill_idx = idx; LINK_STATS_INC(link.xmit); /* Give first descriptor to DMA to start transfer */ lpc_netifdata->ptdesc[fidx].CTRLSTAT |= TDES_OWN; /* Tell DMA to poll descriptors to start transfer */ LPC_ETHERNET->DMA_TRANS_POLL_DEMAND = 1; #if NO_SYS == 0 /* Restore access */ sys_mutex_unlock(&lpc_netifdata->TXLockMutex); #endif return ERR_OK; }
/* Gets data from queue and forwards to LWIP */ static struct pbuf *lpc_low_level_input(struct netif *netif) { struct lpc_enetdata *lpc_netifdata = netif->state; u32_t status, ridx; int rxerr = 0; struct pbuf *p; #ifdef LOCK_RX_THREAD #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_netifdata->TXLockMutex); #endif #endif /* If there are no used descriptors, then this call was not for a received packet, try to setup some descriptors now */ if (lpc_netifdata->rx_free_descs == LPC_NUM_BUFF_RXDESCS) { lpc_rx_queue(netif); #ifdef LOCK_RX_THREAD #if NO_SYS == 0 sys_mutex_unlock(&lpc_netifdata->TXLockMutex); #endif #endif return NULL; } /* Get index for next descriptor with data */ ridx = lpc_netifdata->rx_get_idx; /* Return if descriptor is still owned by DMA */ if (lpc_netifdata->prdesc[ridx].STATUS & RDES_OWN) { #ifdef LOCK_RX_THREAD #if NO_SYS == 0 sys_mutex_unlock(&lpc_netifdata->TXLockMutex); #endif #endif return NULL; } /* Get address of pbuf for this descriptor */ p = lpc_netifdata->rxpbufs[ridx]; /* Get receive packet status */ status = lpc_netifdata->prdesc[ridx].STATUS; /* Check packet for errors */ if (status & RDES_ES) { LINK_STATS_INC(link.drop); /* Error conditions that cause a packet drop */ if (status & intMask) { LINK_STATS_INC(link.err); rxerr = 1; } else /* Length error check needs qualification */ if ((status & (RDES_LE | RDES_FT)) == RDES_LE) { LINK_STATS_INC(link.lenerr); rxerr = 1; } else /* CRC error check needs qualification */ if ((status & (RDES_CE | RDES_LS)) == (RDES_CE | RDES_LS)) { LINK_STATS_INC(link.chkerr); rxerr = 1; } /* Descriptor error check needs qualification */ if ((status & (RDES_DE | RDES_LS)) == (RDES_DE | RDES_LS)) { LINK_STATS_INC(link.err); rxerr = 1; } else /* Dribble bit error only applies in half duplex mode */ if ((status & RDES_DE) && (!(LPC_ETHERNET->MAC_CONFIG & MAC_CFG_DM))) { LINK_STATS_INC(link.err); rxerr = 1; } } /* Increment free descriptor count and next get index */ lpc_netifdata->rx_free_descs++; ridx++; if (ridx >= LPC_NUM_BUFF_RXDESCS) { ridx = 0; } lpc_netifdata->rx_get_idx = ridx; /* If an error occurred, just re-queue the pbuf */ if (rxerr) { lpc_rxqueue_pbuf(lpc_netifdata, p); p = NULL; LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE, ("lpc_low_level_input: RX error condition status 0x%08x\n", status)); } else { /* Attempt to queue a new pbuf for the descriptor */ lpc_rx_queue(netif); /* Get length of received packet */ p->len = p->tot_len = (u16_t) RDES_FLMSK(status); LINK_STATS_INC(link.recv); LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE, ("lpc_low_level_input: Packet received, %d bytes, " "status 0x%08x\n", p->len, status)); } /* (Re)start receive polling */ LPC_ETHERNET->DMA_REC_POLL_DEMAND = 1; #ifdef LOCK_RX_THREAD #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_unlock(&lpc_netifdata->TXLockMutex); #endif #endif return p; }
/** * Call netif_poll() in the main loop of your application. This is to prevent * reentering non-reentrant functions like tcp_input(). Packets passed to * netif_loop_output() are put on a list that is passed to netif->input() by * netif_poll(). */ void netif_poll(struct netif *netif) { struct pbuf *in; /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if LWIP_SNMP #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* LWIP_SNMP */ SYS_ARCH_DECL_PROTECT(lev); do { /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ SYS_ARCH_PROTECT(lev); in = netif->loop_first; if (in != NULL) { struct pbuf *in_end = in; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = 1; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ while (in_end->len != in_end->tot_len) { LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); in_end = in_end->next; #if LWIP_LOOPBACK_MAX_PBUFS clen++; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ } #if LWIP_LOOPBACK_MAX_PBUFS /* adjust the number of pbufs on queue */ LWIP_ASSERT("netif->loop_cnt_current underflow", ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); netif->loop_cnt_current -= clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* 'in_end' now points to the last pbuf from 'in' */ if (in_end == netif->loop_last) { /* this was the last pbuf in the list */ netif->loop_first = netif->loop_last = NULL; } else { /* pop the pbuf off the list */ netif->loop_first = in_end->next; LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); } /* De-queue the pbuf from its successors on the 'loop_' list. */ in_end->next = NULL; } SYS_ARCH_UNPROTECT(lev); if (in != NULL) { LINK_STATS_INC(link.recv); snmp_add_ifinoctets(stats_if, in->tot_len); snmp_inc_ifinucastpkts(stats_if); /* loopback packets are always IP packets! */ if (ipX_input(in, netif) != ERR_OK) { pbuf_free(in); } /* Don't reference the packet any more! */ in = NULL; } /* go on while there is a packet on the list */ } while (netif->loop_first != NULL); }
/** * @ingroup lwip_nosys * Process received ethernet frames. Using this function instead of directly * calling ip_input and passing ARP frames through etharp in ethernetif_input, * the ARP cache is protected from concurrent access.\n * Don't call directly, pass to netif_add() and call netif->input(). * * @param p the received packet, p->payload pointing to the ethernet header * @param netif the network interface on which the packet was received * * @see LWIP_HOOK_UNKNOWN_ETH_PROTOCOL * @see ETHARP_SUPPORT_VLAN * @see LWIP_HOOK_VLAN_CHECK */ err_t ethernet_input(struct pbuf *p, struct netif *netif) { struct eth_hdr* ethhdr; u16_t type; #if LWIP_ARP || ETHARP_SUPPORT_VLAN || LWIP_IPV6 s16_t ip_hdr_offset = SIZEOF_ETH_HDR; #endif /* LWIP_ARP || ETHARP_SUPPORT_VLAN */ if (p->len <= SIZEOF_ETH_HDR) { /* a packet with only an ethernet header (or less) is not valid for us */ ETHARP_STATS_INC(etharp.proterr); ETHARP_STATS_INC(etharp.drop); MIB2_STATS_NETIF_INC(netif, ifinerrors); goto free_and_return; } /* points to packet payload, which starts with an Ethernet header */ ethhdr = (struct eth_hdr *)p->payload; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("ethernet_input: dest:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", src:%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F":%"X8_F", type:%"X16_F"\n", (unsigned)ethhdr->dest.addr[0], (unsigned)ethhdr->dest.addr[1], (unsigned)ethhdr->dest.addr[2], (unsigned)ethhdr->dest.addr[3], (unsigned)ethhdr->dest.addr[4], (unsigned)ethhdr->dest.addr[5], (unsigned)ethhdr->src.addr[0], (unsigned)ethhdr->src.addr[1], (unsigned)ethhdr->src.addr[2], (unsigned)ethhdr->src.addr[3], (unsigned)ethhdr->src.addr[4], (unsigned)ethhdr->src.addr[5], lwip_htons(ethhdr->type))); type = ethhdr->type; #if ETHARP_SUPPORT_VLAN if (type == PP_HTONS(ETHTYPE_VLAN)) { struct eth_vlan_hdr *vlan = (struct eth_vlan_hdr*)(((char*)ethhdr) + SIZEOF_ETH_HDR); if (p->len <= SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) { /* a packet with only an ethernet/vlan header (or less) is not valid for us */ ETHARP_STATS_INC(etharp.proterr); ETHARP_STATS_INC(etharp.drop); MIB2_STATS_NETIF_INC(netif, ifinerrors); goto free_and_return; } #if defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) /* if not, allow all VLANs */ #ifdef LWIP_HOOK_VLAN_CHECK if (!LWIP_HOOK_VLAN_CHECK(netif, ethhdr, vlan)) { #elif defined(ETHARP_VLAN_CHECK_FN) if (!ETHARP_VLAN_CHECK_FN(ethhdr, vlan)) { #elif defined(ETHARP_VLAN_CHECK) if (VLAN_ID(vlan) != ETHARP_VLAN_CHECK) { #endif /* silently ignore this packet: not for our VLAN */ pbuf_free(p); return ERR_OK; } #endif /* defined(LWIP_HOOK_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK) || defined(ETHARP_VLAN_CHECK_FN) */ type = vlan->tpid; ip_hdr_offset = SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR; } #endif /* ETHARP_SUPPORT_VLAN */ #if LWIP_ARP_FILTER_NETIF netif = LWIP_ARP_FILTER_NETIF_FN(p, netif, lwip_htons(type)); #endif /* LWIP_ARP_FILTER_NETIF*/ if (ethhdr->dest.addr[0] & 1) { /* this might be a multicast or broadcast packet */ if (ethhdr->dest.addr[0] == LL_IP4_MULTICAST_ADDR_0) { #if LWIP_IPV4 if ((ethhdr->dest.addr[1] == LL_IP4_MULTICAST_ADDR_1) && (ethhdr->dest.addr[2] == LL_IP4_MULTICAST_ADDR_2)) { /* mark the pbuf as link-layer multicast */ p->flags |= PBUF_FLAG_LLMCAST; } #endif /* LWIP_IPV4 */ } #if LWIP_IPV6 else if ((ethhdr->dest.addr[0] == LL_IP6_MULTICAST_ADDR_0) && (ethhdr->dest.addr[1] == LL_IP6_MULTICAST_ADDR_1)) { /* mark the pbuf as link-layer multicast */ p->flags |= PBUF_FLAG_LLMCAST; } #endif /* LWIP_IPV6 */ else if (eth_addr_cmp(ðhdr->dest, ðbroadcast)) { /* mark the pbuf as link-layer broadcast */ p->flags |= PBUF_FLAG_LLBCAST; } } switch (type) { #if LWIP_IPV4 && LWIP_ARP /* IP packet? */ case PP_HTONS(ETHTYPE_IP): if (!(netif->flags & NETIF_FLAG_ETHARP)) { goto free_and_return; } /* skip Ethernet header */ if ((p->len < ip_hdr_offset) || pbuf_header(p, (s16_t)-ip_hdr_offset)) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ethernet_input: IPv4 packet dropped, too short (%"S16_F"/%"S16_F")\n", p->tot_len, ip_hdr_offset)); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); goto free_and_return; } else { /* pass to IP layer */ ip4_input(p, netif); } break; case PP_HTONS(ETHTYPE_ARP): if (!(netif->flags & NETIF_FLAG_ETHARP)) { goto free_and_return; } /* skip Ethernet header */ if ((p->len < ip_hdr_offset) || pbuf_header(p, (s16_t)-ip_hdr_offset)) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ethernet_input: ARP response packet dropped, too short (%"S16_F"/%"S16_F")\n", p->tot_len, ip_hdr_offset)); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("Can't move over header in packet")); ETHARP_STATS_INC(etharp.lenerr); ETHARP_STATS_INC(etharp.drop); goto free_and_return; } else { /* pass p to ARP module */ etharp_input(p, netif); } break; #endif /* LWIP_IPV4 && LWIP_ARP */ #if PPPOE_SUPPORT case PP_HTONS(ETHTYPE_PPPOEDISC): /* PPP Over Ethernet Discovery Stage */ pppoe_disc_input(netif, p); break; case PP_HTONS(ETHTYPE_PPPOE): /* PPP Over Ethernet Session Stage */ pppoe_data_input(netif, p); break; #endif /* PPPOE_SUPPORT */ #if LWIP_IPV6 case PP_HTONS(ETHTYPE_IPV6): /* IPv6 */ /* skip Ethernet header */ if ((p->len < ip_hdr_offset) || pbuf_header(p, (s16_t)-ip_hdr_offset)) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ethernet_input: IPv6 packet dropped, too short (%"S16_F"/%"S16_F")\n", p->tot_len, ip_hdr_offset)); goto free_and_return; } else { /* pass to IPv6 layer */ ip6_input(p, netif); } break; #endif /* LWIP_IPV6 */ default: #ifdef LWIP_HOOK_UNKNOWN_ETH_PROTOCOL if(LWIP_HOOK_UNKNOWN_ETH_PROTOCOL(p, netif) == ERR_OK) { break; } #endif ETHARP_STATS_INC(etharp.proterr); ETHARP_STATS_INC(etharp.drop); MIB2_STATS_NETIF_INC(netif, ifinunknownprotos); goto free_and_return; } /* This means the pbuf is freed or consumed, so the caller doesn't have to free it again */ return ERR_OK; free_and_return: pbuf_free(p); return ERR_OK; } /** * @ingroup ethernet * Send an ethernet packet on the network using netif->linkoutput(). * The ethernet header is filled in before sending. * * @see LWIP_HOOK_VLAN_SET * * @param netif the lwIP network interface on which to send the packet * @param p the packet to send. pbuf layer must be @ref PBUF_LINK. * @param src the source MAC address to be copied into the ethernet header * @param dst the destination MAC address to be copied into the ethernet header * @param eth_type ethernet type (@ref eth_type) * @return ERR_OK if the packet was sent, any other err_t on failure */ err_t ethernet_output(struct netif* netif, struct pbuf* p, const struct eth_addr* src, const struct eth_addr* dst, u16_t eth_type) { struct eth_hdr* ethhdr; u16_t eth_type_be = lwip_htons(eth_type); #if ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) s32_t vlan_prio_vid = LWIP_HOOK_VLAN_SET(netif, p, src, dst, eth_type); if (vlan_prio_vid >= 0) { struct eth_vlan_hdr* vlanhdr; LWIP_ASSERT("prio_vid must be <= 0xFFFF", vlan_prio_vid <= 0xFFFF); if (pbuf_header(p, SIZEOF_ETH_HDR + SIZEOF_VLAN_HDR) != 0) { goto pbuf_header_failed; } vlanhdr = (struct eth_vlan_hdr*)(((u8_t*)p->payload) + SIZEOF_ETH_HDR); vlanhdr->tpid = eth_type_be; vlanhdr->prio_vid = lwip_htons((u16_t)vlan_prio_vid); eth_type_be = PP_HTONS(ETHTYPE_VLAN); } else #endif /* ETHARP_SUPPORT_VLAN && defined(LWIP_HOOK_VLAN_SET) */ { if (pbuf_header(p, SIZEOF_ETH_HDR) != 0) { goto pbuf_header_failed; } } ethhdr = (struct eth_hdr*)p->payload; ethhdr->type = eth_type_be; ETHADDR32_COPY(ðhdr->dest, dst); ETHADDR16_COPY(ðhdr->src, src); LWIP_ASSERT("netif->hwaddr_len must be 6 for ethernet_output!", (netif->hwaddr_len == ETH_HWADDR_LEN)); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("ethernet_output: sending packet %p\n", (void *)p)); /* send the packet */ return netif->linkoutput(netif, p); pbuf_header_failed: LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("ethernet_output: could not allocate room for header.\n")); LINK_STATS_INC(link.lenerr); return ERR_BUF; }