static INLINE MV_VOID mvNfpSecInvRange(MV_U8 *addr, MV_U32 size) { MV_U32 i; MV_U8 *align; align = (MV_U8 *) ((MV_U32) addr & ~0x1f); for (i = 0; align <= (addr + size); align += CPU_D_CACHE_LINE_SIZE) { mvOsCacheLineInv(NULL, align); mvOsCacheIoSync(); } }
static inline struct eth_pbuf *eth_l2fw_copy_packet_withXor(struct eth_pbuf *pRxPktInfo) { struct bm_pool *pool; struct eth_pbuf *pTxPktInfo; pool = &mv_eth_pool[pRxPktInfo->pool]; pTxPktInfo = mv_eth_pool_get(pool); if (pTxPktInfo == NULL) { mvOsPrintf("pTxPktInfo == NULL in %s\n", __func__); return NULL; } /* sync between giga and XOR to avoid errors (like checksum errors in TX) when working with IOCC */ mvOsCacheIoSync(); eth_xor_desc->srcAdd0 = pRxPktInfo->physAddr + pRxPktInfo->offset + MV_ETH_MH_SIZE + 30; eth_xor_desc->phyDestAdd = pTxPktInfo->physAddr + pTxPktInfo->offset + MV_ETH_MH_SIZE + 30; eth_xor_desc->byteCnt = pRxPktInfo->bytes - 30; eth_xor_desc->phyNextDescPtr = 0; eth_xor_desc->status = BIT31; /* we had changed only the first part of eth_xor_desc, so flush only one line of cache */ mvOsCacheLineFlush(NULL, eth_xor_desc); MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr); MV_REG_WRITE(XOR_ACTIVATION_REG(1, XOR_CHAN(0)), XEXACTR_XESTART_MASK); mvOsCacheLineInv(NULL, pRxPktInfo->pBuf + pRxPktInfo->offset); l2fw_copy_mac(pRxPktInfo, pTxPktInfo); mvOsCacheLineFlush(NULL, pTxPktInfo->pBuf + pTxPktInfo->offset); /* Update TxPktInfo */ pTxPktInfo->bytes = pRxPktInfo->bytes; return pTxPktInfo; }
static int mvEgigaRx(struct eth_device *dev) { egigaPriv *priv = dev->priv; MV_U8 *pkt; int packets_done = 0; int num_recieved_packets, pool_id; MV_U32 status; MV_PP2_PHYS_RXQ_CTRL *pRxq; PP2_RX_DESC *pDesc; if (priv->devInit != MV_TRUE || priv->devEnable != MV_TRUE) return 0; /* port is not initialized or not enabled */ pRxq = mvPp2RxqHndlGet(priv->port, EGIGA_DEF_RXQ); num_recieved_packets = mvPp2RxqBusyDescNumGet(priv->port, EGIGA_DEF_RXQ); packets_done = num_recieved_packets; while (num_recieved_packets--) { pDesc = mvPp2RxqNextDescGet(pRxq); /* cache invalidate - descriptor */ mvOsCacheLineInv(NULL, pDesc); #if defined(MV_CPU_BE) mvNetaRxqDescSwap(pDesc);//TODO #endif /* MV_CPU_BE */ status = pDesc->status; /* drop packets with error or with buffer header (MC, SG) */ if ((status & PP2_RX_BUF_HDR_MASK) || (status & PP2_RX_ES_MASK)) { #if defined(MV_CPU_BE) mvNetaRxqDescSwap(pDesc);//TODO #endif /* MV_CPU_BE */ mvOsCacheLineFlushInv(NULL, pDesc); continue; } /* TODO: drop fragmented packets */ /* cache invalidate - packet */ mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE); /* give packet to stack - skip on first 2 bytes + buffer header */ pkt = ((MV_U8 *)pDesc->bufPhysAddr) + 2 + BUFF_HDR_OFFS; NetReceive(pkt, (int)pDesc->dataSize - 2); /* refill: pass packet back to BM */ pool_id = (status & PP2_RX_BM_POOL_ALL_MASK) >> PP2_RX_BM_POOL_ID_OFFS; mvBmPoolPut(pool_id, (MV_ULONG) pDesc->bufPhysAddr, (MV_ULONG) pDesc->bufCookie); /* cache invalidate - packet */ #if defined(MV_CPU_BE) mvNetaRxqDescSwap(pDesc);//TODO #endif /* MV_CPU_BE */ mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE); } /* cache invalidate - descriptor */ mvOsCacheLineInv(NULL, pDesc); mvPp2RxqDescNumUpdate(priv->port, EGIGA_DEF_RXQ, packets_done, packets_done); return 0; }
static inline int mv_eth_l2fw_rx(struct eth_port *pp, int rx_todo, int rxq) { struct eth_port *new_pp; L2FW_RULE *l2fw_rule; MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q; int rx_done, rx_filled; struct neta_rx_desc *rx_desc; u32 rx_status = MV_OK; struct eth_pbuf *pkt; struct eth_pbuf *newpkt = NULL; struct bm_pool *pool; MV_STATUS status = MV_OK; struct eth_port_l2fw *ppl2fw = mv_eth_ports_l2fw[pp->port]; MV_IP_HEADER *pIph = NULL; MV_U8 *pData; int ipOffset; rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq); mvOsCacheIoSync(); if (rx_todo > rx_done) rx_todo = rx_done; rx_done = 0; rx_filled = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { #ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH rx_desc = mv_eth_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo); if (!rx_desc) printk(KERN_INFO "rx_desc is NULL in %s\n", __func__); #else rx_desc = mvNetaRxqNextDescGet(rx_ctrl); mvOsCacheLineInv(NULL, rx_desc); prefetch(rx_desc); #endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */ rx_done++; rx_filled++; pkt = (struct eth_pbuf *)rx_desc->bufCookie; if (!pkt) { printk(KERN_INFO "pkt is NULL in ; rx_done=%d %s\n", rx_done, __func__); return rx_done; } pool = &mv_eth_pool[pkt->pool]; rx_status = rx_desc->status; if (((rx_status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) || (rx_status & NETA_RX_ES_MASK)) { STAT_ERR(pp->stats.rx_error++); if (pp->dev) pp->dev->stats.rx_errors++; mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); continue; } pkt->bytes = rx_desc->dataSize - (MV_ETH_CRC_SIZE + MV_ETH_MH_SIZE); pData = pkt->pBuf + pkt->offset; #ifdef CONFIG_MV_ETH_PNC ipOffset = NETA_RX_GET_IPHDR_OFFSET(rx_desc); #else if ((rx_desc->status & ETH_RX_VLAN_TAGGED_FRAME_MASK)) ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER) + MV_VLAN_HLEN; else ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER); #endif pIph = (MV_IP_HEADER *)(pData + ipOffset); if (pIph == NULL) { printk(KERN_INFO "pIph==NULL in %s\n", __func__); continue; } #ifdef CONFIG_MV_ETH_L2FW_DEBUG if (pIph) { MV_U8 *srcIP, *dstIP; srcIP = (MV_U8 *)&(pIph->srcIP); dstIP = (MV_U8 *)&(pIph->dstIP); printk(KERN_INFO "%u.%u.%u.%u->%u.%u.%u.%u in %s\n", MV_IPQUAD(srcIP), MV_IPQUAD(dstIP), __func__); } else printk(KERN_INFO "pIph is NULL in %s\n", __func__); #endif if (espEnabled) new_pp = mv_eth_ports[ppl2fw->txPort]; else { l2fw_rule = l2fw_lookup(pIph->srcIP, pIph->dstIP); if (!l2fw_rule) { #ifdef CONFIG_MV_ETH_L2FW_DEBUG printk(KERN_INFO "l2fw_lookup() failed in %s\n", __func__); #endif mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); continue; } #ifdef CONFIG_MV_ETH_L2FW_DEBUG printk(KERN_INFO "l2fw_lookup() is ok l2fw_rule->port=%d in %s\n", l2fw_rule->port, __func__); #endif new_pp = mv_eth_ports[l2fw_rule->port]; } switch (ppl2fw->cmd) { case TX_AS_IS: #ifdef CONFIG_MV_ETH_L2SEC if (espEnabled) { status = handleEsp(pkt, rx_desc, new_pp, pp->port); } else #endif status = mv_eth_l2fw_tx(pkt, new_pp, 0, rx_desc); break; case SWAP_MAC: mvOsCacheLineInv(NULL, pkt->pBuf + pkt->offset); l2fw_swap_mac(pkt); mvOsCacheLineFlush(NULL, pkt->pBuf+pkt->offset); status = mv_eth_l2fw_tx(pkt, new_pp, 0, rx_desc); break; case COPY_AND_SWAP: if (pkt->bytes >= l2fw_xor_threshold) { newpkt = eth_l2fw_copy_packet_withXor(pkt); if (newpkt) status = mv_eth_l2fw_tx(newpkt, new_pp, 1, rx_desc); else status = MV_ERROR; } else { newpkt = eth_l2fw_copy_packet_withoutXor(pkt); if (newpkt) status = mv_eth_l2fw_tx(newpkt, new_pp, 0, rx_desc); else status = MV_ERROR; } } if (status == MV_OK) { mvOsCacheLineInv(NULL, rx_desc); /* we do not need the pkt , we do not do anything with it*/ if ((ppl2fw->cmd == COPY_AND_SWAP) && !(espEnabled)) mv_eth_pool_put(pool, pkt); continue; } else if (status == MV_DROPPED) { mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); if ((ppl2fw->cmd == COPY_AND_SWAP) && !(espEnabled)) mv_eth_pool_put(pool, newpkt); continue; } else if (status == MV_ERROR) { printk(KERN_INFO "MV_ERROR in %s\n", __func__); mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); } } /* of while */ /* Update RxQ management counters */ mvOsCacheIoSync(); mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_filled); return rx_done; }