/** \brief Sets up the TX descriptor ring buffers. * * This function sets up the descriptor list used for transmit packets. * * \param[in] netif Pointer to driver data structure * \returns true/false */ static NyLPC_TBool k64f_tx_setup(enet_txbd_config_t *txbdCfg) { int i; enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE]; // Allocate TX descriptors if(TX_DESC_BUF_BASE!=NULL){ free(TX_DESC_BUF_BASE); TX_DESC_BUF_BASE=NULL; } TX_DESC_BUF_BASE = (void*)calloc(1, enet_hal_get_bd_size() * enetIfPtr->macCfgPtr->txBdNumber + ENET_BD_ALIGNMENT); if(TX_DESC_BUF_BASE==NULL){ return NyLPC_TBool_FALSE; } _driver.tx_desc_start_addr = (uint8_t *)ENET_ALIGN((uint32_t)TX_DESC_BUF_BASE, ENET_BD_ALIGNMENT); txbdCfg->txBdPtrAlign = _driver.tx_desc_start_addr; txbdCfg->txBufferNum = enetIfPtr->macCfgPtr->txBdNumber; txbdCfg->txBufferSizeAlign = ENET_ALIGN(enetIfPtr->maxFrameSize, ENET_TX_BUFFER_ALIGNMENT); // Make the TX descriptor ring circular for(i=0;i<NUM_OF_TX_RING;i++){ setTxDesc(i); } return NyLPC_TBool_TRUE; }
/** \brief Sets up the RX descriptor ring buffers. * * This function sets up the descriptor list used for receive packets. * * \param[in] netif Pointer to driver data structure * \returns true/false */ static NyLPC_TBool k64f_rx_setup(enet_rxbd_config_t *rxbdCfg) { // struct k64f_enetdata *k64f_enet = &(netif->state); enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE]; uint32_t rxBufferSizeAligned; int i; // Allocate RX descriptors if(RX_DESC_BUF_BASE!=NULL){ free(RX_DESC_BUF_BASE); RX_DESC_BUF_BASE=NULL; } RX_DESC_BUF_BASE = (void*)calloc(1, enet_hal_get_bd_size() * enetIfPtr->macCfgPtr->rxBdNumber + ENET_BD_ALIGNMENT); if(RX_DESC_BUF_BASE==NULL){ return NyLPC_TBool_FALSE; } //16byteアライメントに修正 _driver.rx_desc_start_addr = (uint8_t *)ENET_ALIGN((NyLPC_TUInt32)RX_DESC_BUF_BASE, ENET_BD_ALIGNMENT); rxBufferSizeAligned = ENET_ALIGN(enetIfPtr->macCfgPtr->rxBufferSize, ENET_RX_BUFFER_ALIGNMENT); enetIfPtr->macContextPtr->rxBufferSizeAligned = rxBufferSizeAligned; rxbdCfg->rxBdPtrAlign = _driver.rx_desc_start_addr; rxbdCfg->rxBdNum = enetIfPtr->macCfgPtr->rxBdNumber; rxbdCfg->rxBufferNum = enetIfPtr->macCfgPtr->rxBdNumber; //初期化 enet_hal_active_rxbd(BOARD_DEBUG_ENET_INSTANCE); for(i=0;i<NUM_OF_RX_RING;i++){ setRxDesc(RX_BUF+(i*SIZE_OF_ETH_PACKET),i); } // k64f_rx_queue(netif, RX_PBUF_AUTO_INDEX); return NyLPC_TBool_TRUE; }
//////////////////////////////////////////////////////////////////////////////// //LAN API //////////////////////////////////////////////////////////////////////////////// NyLPC_TBool EthDev_K64F_getInterface( const struct TiEthernetDevice** o_dev) { *o_dev=&_interface_KSZ8081RNACA; RX_BUF_BASE=(unsigned char*)malloc(SIZE_OF_ETH_PACKET*NUM_OF_RX_BUF+RX_BUF_ALIGNMENT); RX_BUF=(unsigned char*)ENET_ALIGN((NyLPC_TUInt32)RX_BUF_BASE,RX_BUF_ALIGNMENT); TX_BUF_BASE=malloc(sizeof(struct NyLPC_TcEthernetMM_TxMemoryBlock)+TX_BUF_ALIGNMENT); TX_BUF=(unsigned char*)ENET_ALIGN((NyLPC_TUInt32)TX_BUF_BASE,TX_BUF_ALIGNMENT); return NyLPC_TBool_TRUE; }
/** \brief Attempt to allocate and requeue a new pbuf for RX * * \param[in] netif Pointer to the netif structure * \returns number of queued packets */ s32_t k64f_rx_queue(struct netif *netif, int idx) { struct k64f_enetdata *k64f_enet = netif->state; enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE]; struct pbuf *p; int queued = 0; /* Attempt to requeue as many packets as possible */ while (k64f_enet->rx_free_descs > 0) { /* Allocate a pbuf from the pool. We need to allocate at the maximum size as we don't know the size of the yet to be received packet. */ p = pbuf_alloc(PBUF_RAW, enetIfPtr->macCfgPtr->rxBufferSize + RX_BUF_ALIGNMENT, PBUF_RAM); if (p == NULL) { LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("k64_rx_queue: could not allocate RX pbuf (free desc=%d)\n", k64f_enet->rx_free_descs)); return queued; } /* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing a data structure which is internal to lwIP. This might not prove to be a good idea in the long run, but a better fix would probably involve modifying lwIP itself */ p->payload = (void*)ENET_ALIGN((uint32_t)p->payload, RX_BUF_ALIGNMENT); /* pbufs allocated from the RAM pool should be non-chained. */ LWIP_ASSERT("k64f_rx_queue: pbuf is not contiguous (chained)", pbuf_clen(p) <= 1); /* Queue packet */ k64f_rxqueue_pbuf(k64f_enet, p, idx); queued++; } return queued; }
/** \brief Low level output of a packet. Never call this from an * interrupt context, as it may block until TX descriptors * become available. * * \param[in] netif the lwip network interface structure for this netif * \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type) * \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent */ static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p) { struct k64f_enetdata *k64f_enet = netif->state; struct pbuf *q; struct pbuf *temp_pbuf; uint8_t *psend = NULL, *dst; temp_pbuf = pbuf_alloc(PBUF_RAW, p->tot_len + ENET_BUFF_ALIGNMENT, PBUF_RAM); if (NULL == temp_pbuf) return ERR_MEM; /* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing a data structure which is internal to lwIP. This might not prove to be a good idea in the long run, but a better fix would probably involve modifying lwIP itself */ psend = (uint8_t *)ENET_ALIGN((uint32_t)temp_pbuf->payload, ENET_BUFF_ALIGNMENT); for (q = p, dst = psend; q != NULL; q = q->next) { MEMCPY(dst, q->payload, q->len); dst += q->len; } /* Check if a descriptor is available for the transfer. */ osStatus_t stat = osSemaphoreAcquire(k64f_enet->xTXDCountSem.id, 0); if (stat != osOK) return ERR_BUF; /* Get exclusive access */ sys_mutex_lock(&k64f_enet->TXLockMutex); /* Save the buffer so that it can be freed when transmit is done */ tx_buff[k64f_enet->tx_produce_index % ENET_TX_RING_LEN] = temp_pbuf; k64f_enet->tx_produce_index += 1; /* Setup transfers */ g_handle.txBdCurrent->buffer = psend; g_handle.txBdCurrent->length = p->tot_len; g_handle.txBdCurrent->control |= (ENET_BUFFDESCRIPTOR_TX_READY_MASK | ENET_BUFFDESCRIPTOR_TX_LAST_MASK); /* Increase the buffer descriptor address. */ if (g_handle.txBdCurrent->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK) g_handle.txBdCurrent = g_handle.txBdBase; else g_handle.txBdCurrent++; /* Active the transmit buffer descriptor. */ ENET->TDAR = ENET_TDAR_TDAR_MASK; LINK_STATS_INC(link.xmit); /* Restore access */ sys_mutex_unlock(&k64f_enet->TXLockMutex); return ERR_OK; }
/** \brief Sets up the TX descriptor ring buffers. * * This function sets up the descriptor list used for transmit packets. * * \param[in] netif Pointer to driver data structure * \returns ERR_MEM if out of memory, ERR_OK otherwise */ static err_t k64f_tx_setup(struct netif *netif, enet_txbd_config_t *txbdCfg) { struct k64f_enetdata *k64f_enet = netif->state; enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE]; uint8_t *txBdPtr; // Allocate TX descriptors txBdPtr = (uint8_t *)calloc(1, enet_hal_get_bd_size() * enetIfPtr->macCfgPtr->txBdNumber + ENET_BD_ALIGNMENT); if(!txBdPtr) return ERR_MEM; k64f_enet->tx_desc_start_addr = (uint8_t *)ENET_ALIGN((uint32_t)txBdPtr, ENET_BD_ALIGNMENT); k64f_enet->tx_consume_index = k64f_enet->tx_produce_index = 0; txbdCfg->txBdPtrAlign = k64f_enet->tx_desc_start_addr; txbdCfg->txBufferNum = enetIfPtr->macCfgPtr->txBdNumber; txbdCfg->txBufferSizeAlign = ENET_ALIGN(enetIfPtr->maxFrameSize, ENET_TX_BUFFER_ALIGNMENT); // Make the TX descriptor ring circular enet_hal_init_txbds(k64f_enet->tx_desc_start_addr + enet_hal_get_bd_size() * (ENET_TX_RING_LEN - 1), 1); return ERR_OK; }
/** \brief Sets up the RX descriptor ring buffers. * * This function sets up the descriptor list used for receive packets. * * \param[in] netif Pointer to driver data structure * \returns ERR_MEM if out of memory, ERR_OK otherwise */ static err_t k64f_rx_setup(struct netif *netif, enet_rxbd_config_t *rxbdCfg) { struct k64f_enetdata *k64f_enet = netif->state; enet_dev_if_t *enetIfPtr = (enet_dev_if_t *)&enetDevIf[BOARD_DEBUG_ENET_INSTANCE]; uint8_t *rxBdPtr; uint32_t rxBufferSizeAligned; // Allocate RX descriptors rxBdPtr = (uint8_t *)calloc(1, enet_hal_get_bd_size() * enetIfPtr->macCfgPtr->rxBdNumber + ENET_BD_ALIGNMENT); if(!rxBdPtr) return ERR_MEM; k64f_enet->rx_desc_start_addr = (uint8_t *)ENET_ALIGN((uint32_t)rxBdPtr, ENET_BD_ALIGNMENT); k64f_enet->rx_free_descs = enetIfPtr->macCfgPtr->rxBdNumber; k64f_enet->rx_fill_index = 0; rxBufferSizeAligned = ENET_ALIGN(enetIfPtr->macCfgPtr->rxBufferSize, ENET_RX_BUFFER_ALIGNMENT); enetIfPtr->macContextPtr->rxBufferSizeAligned = rxBufferSizeAligned; rxbdCfg->rxBdPtrAlign = k64f_enet->rx_desc_start_addr; rxbdCfg->rxBdNum = enetIfPtr->macCfgPtr->rxBdNumber; rxbdCfg->rxBufferNum = enetIfPtr->macCfgPtr->rxBdNumber; k64f_rx_queue(netif, RX_PBUF_AUTO_INDEX); return ERR_OK; }
/** \brief Allocates a pbuf and returns the data from the incoming packet. * * \param[in] netif the lwip network interface structure * \param[in] idx index of packet to be read * \return a pbuf filled with the received packet (including MAC header) */ static struct pbuf *k64f_low_level_input(struct netif *netif, int idx) { volatile enet_rx_bd_struct_t *bdPtr = g_handle.rxBdCurrent; struct pbuf *p = NULL; struct pbuf *temp_rxbuf = NULL; u32_t length = 0; const u16_t err_mask = ENET_BUFFDESCRIPTOR_RX_TRUNC_MASK | ENET_BUFFDESCRIPTOR_RX_CRC_MASK | ENET_BUFFDESCRIPTOR_RX_NOOCTET_MASK | ENET_BUFFDESCRIPTOR_RX_LENVLIOLATE_MASK; #ifdef LOCK_RX_THREAD /* Get exclusive access */ sys_mutex_lock(&k64f_enet->TXLockMutex); #endif /* Determine if a frame has been received */ if ((bdPtr->control & err_mask) != 0) { #if LINK_STATS if ((bdPtr->control & ENET_BUFFDESCRIPTOR_RX_LENVLIOLATE_MASK) != 0) LINK_STATS_INC(link.lenerr); else LINK_STATS_INC(link.chkerr); #endif LINK_STATS_INC(link.drop); /* Re-use the same buffer in case of error */ update_read_buffer(NULL); } else { /* A packet is waiting, get length */ length = bdPtr->length; /* Zero-copy */ p = rx_buff[idx]; p->len = length; /* Attempt to queue new buffer */ temp_rxbuf = pbuf_alloc(PBUF_RAW, ENET_ETH_MAX_FLEN + ENET_BUFF_ALIGNMENT, PBUF_RAM); if (NULL == temp_rxbuf) { /* Drop frame (out of memory) */ LINK_STATS_INC(link.drop); /* Re-queue the same buffer */ update_read_buffer(NULL); LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("k64f_low_level_input: Packet index %d dropped for OOM\n", idx)); #ifdef LOCK_RX_THREAD sys_mutex_unlock(&k64f_enet->TXLockMutex); #endif return NULL; } rx_buff[idx] = temp_rxbuf; /* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing a data structure which is internal to lwIP. This might not prove to be a good idea in the long run, but a better fix would probably involve modifying lwIP itself */ rx_buff[idx]->payload = (void*)ENET_ALIGN((uint32_t)rx_buff[idx]->payload, ENET_BUFF_ALIGNMENT); rx_ptr[idx] = rx_buff[idx]->payload; update_read_buffer(rx_buff[idx]->payload); LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE, ("k64f_low_level_input: Packet received: %p, size %"PRIu32" (index=%d)\n", p, length, idx)); /* Save size */ p->tot_len = (u16_t) length; LINK_STATS_INC(link.recv); } #ifdef LOCK_RX_THREAD sys_mutex_unlock(&k64f_enet->TXLockMutex); #endif return p; }
/** \brief Low level init of the MAC and PHY. * * \param[in] netif Pointer to LWIP netif structure */ static err_t low_level_init(struct netif *netif) { struct k64f_enetdata *k64f_enet = netif->state; uint8_t i; uint32_t sysClock; phy_speed_t phy_speed; phy_duplex_t phy_duplex; uint32_t phyAddr = 0; bool link = false; enet_config_t config; // Allocate RX descriptors rx_desc_start_addr = (uint8_t *)calloc(1, sizeof(enet_rx_bd_struct_t) * ENET_RX_RING_LEN + ENET_BUFF_ALIGNMENT); if(!rx_desc_start_addr) return ERR_MEM; // Allocate TX descriptors tx_desc_start_addr = (uint8_t *)calloc(1, sizeof(enet_tx_bd_struct_t) * ENET_TX_RING_LEN + ENET_BUFF_ALIGNMENT); if(!tx_desc_start_addr) return ERR_MEM; rx_desc_start_addr = (uint8_t *)ENET_ALIGN(rx_desc_start_addr, ENET_BUFF_ALIGNMENT); tx_desc_start_addr = (uint8_t *)ENET_ALIGN(tx_desc_start_addr, ENET_BUFF_ALIGNMENT); /* Create buffers for each receive BD */ for (i = 0; i < ENET_RX_RING_LEN; i++) { rx_buff[i] = pbuf_alloc(PBUF_RAW, ENET_ETH_MAX_FLEN + ENET_BUFF_ALIGNMENT, PBUF_RAM); if (NULL == rx_buff[i]) return ERR_MEM; /* K64F note: the next line ensures that the RX buffer is properly aligned for the K64F RX descriptors (16 bytes alignment). However, by doing so, we're effectively changing a data structure which is internal to lwIP. This might not prove to be a good idea in the long run, but a better fix would probably involve modifying lwIP itself */ rx_buff[i]->payload = (void*)ENET_ALIGN((uint32_t)rx_buff[i]->payload, ENET_BUFF_ALIGNMENT); rx_ptr[i] = rx_buff[i]->payload; } k64f_enet->tx_consume_index = k64f_enet->tx_produce_index = 0; /* prepare the buffer configuration. */ enet_buffer_config_t buffCfg = { ENET_RX_RING_LEN, ENET_TX_RING_LEN, ENET_ALIGN(ENET_ETH_MAX_FLEN, ENET_BUFF_ALIGNMENT), 0, (volatile enet_rx_bd_struct_t *)rx_desc_start_addr, (volatile enet_tx_bd_struct_t *)tx_desc_start_addr, (uint8_t *)&rx_ptr, NULL, }; #if (defined(TARGET_K64F) && (defined(TARGET_FRDM))) k64f_init_eth_hardware(); #endif #if (defined(TARGET_K66F) && (defined(TARGET_FRDM))) k66f_init_eth_hardware(); #endif sysClock = CLOCK_GetFreq(kCLOCK_CoreSysClk); ENET_GetDefaultConfig(&config); PHY_Init(ENET, 0, sysClock); PHY_GetLinkStatus(ENET, phyAddr, &link); if (link) { /* Get link information from PHY */ PHY_GetLinkSpeedDuplex(ENET, phyAddr, &phy_speed, &phy_duplex); /* Change the MII speed and duplex for actual link status. */ config.miiSpeed = (enet_mii_speed_t)phy_speed; config.miiDuplex = (enet_mii_duplex_t)phy_duplex; config.interrupt = kENET_RxFrameInterrupt | kENET_TxFrameInterrupt; } config.rxMaxFrameLen = ENET_ETH_MAX_FLEN; config.macSpecialConfig = kENET_ControlFlowControlEnable; config.txAccelerConfig = kENET_TxAccelIsShift16Enabled; config.rxAccelerConfig = kENET_RxAccelisShift16Enabled | kENET_RxAccelMacCheckEnabled; ENET_Init(ENET, &g_handle, &config, &buffCfg, netif->hwaddr, sysClock); ENET_SetCallback(&g_handle, ethernet_callback, netif); ENET_ActiveRead(ENET); return ERR_OK; }