static inline struct eth_pbuf *eth_l2fw_copy_packet_withXor(struct eth_pbuf *pRxPktInfo) { struct bm_pool *pool; struct eth_pbuf *pTxPktInfo; pool = &mv_eth_pool[pRxPktInfo->pool]; pTxPktInfo = mv_eth_pool_get(pool); if (pTxPktInfo == NULL) { mvOsPrintf("pTxPktInfo == NULL in %s\n", __func__); return NULL; } /* sync between giga and XOR to avoid errors (like checksum errors in TX) when working with IOCC */ mvOsCacheIoSync(); eth_xor_desc->srcAdd0 = pRxPktInfo->physAddr + pRxPktInfo->offset + MV_ETH_MH_SIZE + 30; eth_xor_desc->phyDestAdd = pTxPktInfo->physAddr + pTxPktInfo->offset + MV_ETH_MH_SIZE + 30; eth_xor_desc->byteCnt = pRxPktInfo->bytes - 30; eth_xor_desc->phyNextDescPtr = 0; eth_xor_desc->status = BIT31; /* we had changed only the first part of eth_xor_desc, so flush only one line of cache */ mvOsCacheLineFlush(NULL, eth_xor_desc); MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr); MV_REG_WRITE(XOR_ACTIVATION_REG(1, XOR_CHAN(0)), XEXACTR_XESTART_MASK); mvOsCacheLineInv(NULL, pRxPktInfo->pBuf + pRxPktInfo->offset); l2fw_copy_mac(pRxPktInfo, pTxPktInfo); mvOsCacheLineFlush(NULL, pTxPktInfo->pBuf + pTxPktInfo->offset); /* Update TxPktInfo */ pTxPktInfo->bytes = pRxPktInfo->bytes; return pTxPktInfo; }
static int mvEgigaTx(struct eth_device *dev, volatile void *buf, int len) { egigaPriv *priv = dev->priv; MV_U32 timeout = 0; int txDone; MV_PP2_AGGR_TXQ_CTRL *pAggrTxq; PP2_TX_DESC *pDesc; if (priv->devInit != MV_TRUE || priv->devEnable != MV_TRUE) return 0; /* port is not initialized or not enabled */ pAggrTxq = mvPp2AggrTxqHndlGet(0); /* get next descriptor */ pDesc = mvPp2AggrTxqNextDescGet(pAggrTxq); if (pDesc == NULL) { printf("No available descriptors\n"); goto error; } /* set descriptor fields */ pDesc->command = 0 | PP2_TX_L4_CSUM_NOT | PP2_TX_F_DESC_MASK | PP2_TX_L_DESC_MASK; pDesc->dataSize = len; pDesc->pktOffset = (MV_U32)buf & EGIGA_TX_DESC_ALIGN; pDesc->bufPhysAddr = (MV_U32)buf & (~EGIGA_TX_DESC_ALIGN); pDesc->bufCookie = (MV_U32)buf; pDesc->physTxq = MV_PPV2_TXQ_PHYS(priv->port, EGIGA_DEF_TXP, EGIGA_DEF_TXQ); mvOsCacheFlush(NULL, (void *)buf, len); #if defined(MV_CPU_BE) mvNetaTxqDescSwap(pDesc);//TODO #endif /* MV_CPU_BE */ mvOsCacheLineFlush(NULL, (void *)pDesc); /* send */ mvPp2AggrTxqPendDescAdd(1); /* Enable TXQ drain */ mvPp2TxqDrainSet(priv->port, EGIGA_DEF_TXP, EGIGA_DEF_TXQ, MV_TRUE); /* Tx done processing */ /* wait for agrregated to physical TXQ transfer */ txDone = mvPp2AggrTxqPendDescNumGet(0); while (txDone) { if (timeout++ > 10000) { printf("timeout: packet not sent from aggregated to phys TXQ\n"); goto error; } txDone = mvPp2AggrTxqPendDescNumGet(0); } /* Disable TXQ drain */ mvPp2TxqDrainSet(priv->port, EGIGA_DEF_TXP, EGIGA_DEF_TXQ, MV_FALSE); timeout = 0; txDone = mvPp2TxqSentDescProc(priv->port, EGIGA_DEF_TXP, EGIGA_DEF_TXQ); /* wait for packet to be transmitted */ while (!txDone) { if (timeout++ > 10000) { printf("timeout: packet not sent\n"); goto error; } txDone = mvPp2TxqSentDescProc(priv->port, EGIGA_DEF_TXP, EGIGA_DEF_TXQ); } /* txDone has increased - hw sent packet */ return 0; error: printf("%s: %s failed\n", __func__, dev->name); /* mvNetaTxpReset(priv->port, EGIGA_DEF_TXP); */ return 1; }
static inline int mv_eth_l2fw_rx(struct eth_port *pp, int rx_todo, int rxq) { struct eth_port *new_pp; L2FW_RULE *l2fw_rule; MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q; int rx_done, rx_filled; struct neta_rx_desc *rx_desc; u32 rx_status = MV_OK; struct eth_pbuf *pkt; struct eth_pbuf *newpkt = NULL; struct bm_pool *pool; MV_STATUS status = MV_OK; struct eth_port_l2fw *ppl2fw = mv_eth_ports_l2fw[pp->port]; MV_IP_HEADER *pIph = NULL; MV_U8 *pData; int ipOffset; rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq); mvOsCacheIoSync(); if (rx_todo > rx_done) rx_todo = rx_done; rx_done = 0; rx_filled = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { #ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH rx_desc = mv_eth_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo); if (!rx_desc) printk(KERN_INFO "rx_desc is NULL in %s\n", __func__); #else rx_desc = mvNetaRxqNextDescGet(rx_ctrl); mvOsCacheLineInv(NULL, rx_desc); prefetch(rx_desc); #endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */ rx_done++; rx_filled++; pkt = (struct eth_pbuf *)rx_desc->bufCookie; if (!pkt) { printk(KERN_INFO "pkt is NULL in ; rx_done=%d %s\n", rx_done, __func__); return rx_done; } pool = &mv_eth_pool[pkt->pool]; rx_status = rx_desc->status; if (((rx_status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) || (rx_status & NETA_RX_ES_MASK)) { STAT_ERR(pp->stats.rx_error++); if (pp->dev) pp->dev->stats.rx_errors++; mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); continue; } pkt->bytes = rx_desc->dataSize - (MV_ETH_CRC_SIZE + MV_ETH_MH_SIZE); pData = pkt->pBuf + pkt->offset; #ifdef CONFIG_MV_ETH_PNC ipOffset = NETA_RX_GET_IPHDR_OFFSET(rx_desc); #else if ((rx_desc->status & ETH_RX_VLAN_TAGGED_FRAME_MASK)) ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER) + MV_VLAN_HLEN; else ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER); #endif pIph = (MV_IP_HEADER *)(pData + ipOffset); if (pIph == NULL) { printk(KERN_INFO "pIph==NULL in %s\n", __func__); continue; } #ifdef CONFIG_MV_ETH_L2FW_DEBUG if (pIph) { MV_U8 *srcIP, *dstIP; srcIP = (MV_U8 *)&(pIph->srcIP); dstIP = (MV_U8 *)&(pIph->dstIP); printk(KERN_INFO "%u.%u.%u.%u->%u.%u.%u.%u in %s\n", MV_IPQUAD(srcIP), MV_IPQUAD(dstIP), __func__); } else printk(KERN_INFO "pIph is NULL in %s\n", __func__); #endif if (espEnabled) new_pp = mv_eth_ports[ppl2fw->txPort]; else { l2fw_rule = l2fw_lookup(pIph->srcIP, pIph->dstIP); if (!l2fw_rule) { #ifdef CONFIG_MV_ETH_L2FW_DEBUG printk(KERN_INFO "l2fw_lookup() failed in %s\n", __func__); #endif mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); continue; } #ifdef CONFIG_MV_ETH_L2FW_DEBUG printk(KERN_INFO "l2fw_lookup() is ok l2fw_rule->port=%d in %s\n", l2fw_rule->port, __func__); #endif new_pp = mv_eth_ports[l2fw_rule->port]; } switch (ppl2fw->cmd) { case TX_AS_IS: #ifdef CONFIG_MV_ETH_L2SEC if (espEnabled) { status = handleEsp(pkt, rx_desc, new_pp, pp->port); } else #endif status = mv_eth_l2fw_tx(pkt, new_pp, 0, rx_desc); break; case SWAP_MAC: mvOsCacheLineInv(NULL, pkt->pBuf + pkt->offset); l2fw_swap_mac(pkt); mvOsCacheLineFlush(NULL, pkt->pBuf+pkt->offset); status = mv_eth_l2fw_tx(pkt, new_pp, 0, rx_desc); break; case COPY_AND_SWAP: if (pkt->bytes >= l2fw_xor_threshold) { newpkt = eth_l2fw_copy_packet_withXor(pkt); if (newpkt) status = mv_eth_l2fw_tx(newpkt, new_pp, 1, rx_desc); else status = MV_ERROR; } else { newpkt = eth_l2fw_copy_packet_withoutXor(pkt); if (newpkt) status = mv_eth_l2fw_tx(newpkt, new_pp, 0, rx_desc); else status = MV_ERROR; } } if (status == MV_OK) { mvOsCacheLineInv(NULL, rx_desc); /* we do not need the pkt , we do not do anything with it*/ if ((ppl2fw->cmd == COPY_AND_SWAP) && !(espEnabled)) mv_eth_pool_put(pool, pkt); continue; } else if (status == MV_DROPPED) { mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); if ((ppl2fw->cmd == COPY_AND_SWAP) && !(espEnabled)) mv_eth_pool_put(pool, newpkt); continue; } else if (status == MV_ERROR) { printk(KERN_INFO "MV_ERROR in %s\n", __func__); mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc); } } /* of while */ /* Update RxQ management counters */ mvOsCacheIoSync(); mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_filled); return rx_done; }