Example #1
0
static INLINE MV_VOID mvNfpSecClearRange(MV_U8 *addr, MV_U32 size)
{
	MV_U32 i;
	MV_U8 *align;

	align = (MV_U8 *) ((MV_U32) addr & ~0x1f);

	for (i = 0; align <= (addr + size); align += CPU_D_CACHE_LINE_SIZE) {
		mvOsCacheLineFlushInv(NULL, align);
		mvOsCacheIoSync();
	}
}
static int mvEgigaRx(struct eth_device *dev)
{
	egigaPriv *priv = dev->priv;
	MV_U8 *pkt;
	int packets_done = 0;
	int num_recieved_packets, pool_id;
	MV_U32 status;
	MV_PP2_PHYS_RXQ_CTRL *pRxq;
	PP2_RX_DESC *pDesc;

	if (priv->devInit != MV_TRUE || priv->devEnable != MV_TRUE)
		return 0; /* port is not initialized or not enabled */

	pRxq = mvPp2RxqHndlGet(priv->port, EGIGA_DEF_RXQ);
	num_recieved_packets = mvPp2RxqBusyDescNumGet(priv->port, EGIGA_DEF_RXQ);
	packets_done = num_recieved_packets;

	while (num_recieved_packets--) {
		pDesc = mvPp2RxqNextDescGet(pRxq);
		/* cache invalidate - descriptor */
		mvOsCacheLineInv(NULL, pDesc);
#if defined(MV_CPU_BE)
		mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
		status = pDesc->status;

		/* drop packets with error or with buffer header (MC, SG) */
		if ((status & PP2_RX_BUF_HDR_MASK) || (status & PP2_RX_ES_MASK)) {
#if defined(MV_CPU_BE)
			mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
			mvOsCacheLineFlushInv(NULL, pDesc);
			continue;
		}
		/* TODO: drop fragmented packets */

		/* cache invalidate - packet */
		mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE);

		/* give packet to stack - skip on first 2 bytes + buffer header */
		pkt = ((MV_U8 *)pDesc->bufPhysAddr) + 2 + BUFF_HDR_OFFS;
		NetReceive(pkt, (int)pDesc->dataSize - 2);

		/* refill: pass packet back to BM */
		pool_id = (status & PP2_RX_BM_POOL_ALL_MASK) >> PP2_RX_BM_POOL_ID_OFFS;
		mvBmPoolPut(pool_id, (MV_ULONG) pDesc->bufPhysAddr, (MV_ULONG) pDesc->bufCookie);

		/* cache invalidate - packet */
#if defined(MV_CPU_BE)
		mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
		mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE);

	}
	/* cache invalidate - descriptor */
	mvOsCacheLineInv(NULL, pDesc);

	mvPp2RxqDescNumUpdate(priv->port, EGIGA_DEF_RXQ, packets_done, packets_done);

	return 0;
}
int mvFpProcess(MV_U32 ifIndex, MV_PKT_INFO *pPkt, MV_IP_HEADER *pIpHdr, MV_FP_STATS *pFpStats)
{
	MV_FP_RULE *pRt;
	MV_U32 dip, sip;
	MV_U8 proto;
	MV_U16 srcPort, dstPort;
	MV_STATUS status;
#ifdef CONFIG_MV_ETH_NFP_NAT
	MV_FP_NAT_RULE *pDnatRule, *pSnatRule;
#endif /* CONFIG_MV_ETH_NFP_NAT */
#ifdef CONFIG_MV_ETH_NFP_FDB
	MV_FP_FDB_RULE *pFdb;
#endif
	MV_U8 *pEth = pPkt->pFrags->bufVirtPtr;
#ifdef CONFIG_MV_ETH_NFP_PPP
	MV_BUF_INFO *pBuf = pPkt->pFrags;
#endif
#ifdef CONFIG_MV_ETH_NFP_SEC
	MV_NFP_SEC_SPD_RULE *pSpd;
	MV_NFP_SEC_SA_ENTRY *pSAEntry;
	MV_ESP_HEADER *pEspHdr;
#endif

	MV_NFP_STAT(pFpStats->process++);

	/* Check MAC address:
	 *   WAN - non-promiscous mode.
	 *       Unicast packets - NFP,
	 *       Multicast, Broadcast - Linux
	 *   LAN - Promiscous mode.
	 *       LAN Unicast MAC - NFP,
	 *       Multicast, Broadcast, Unknown Unicast - Linux
	 */
	if (pEth[ETH_MV_HEADER_SIZE] == 0x01) {
		MV_NFP_STAT(pFpStats->multicast++);
		return -1;
	}
#ifdef CONFIG_MV_ETH_NFP_FDB
	if (mvFpFdbMember(ifIndex)) {
		pFdb = mvFpFdbLookup(ifIndex, pEth + ETH_MV_HEADER_SIZE);
		if (!pFdb) {
			MV_NFP_STAT(pFpStats->fdb_rx_unknown++);
			return -1;
		}
		if (pFdb->fdbInfo.flags & MV_FP_FDB_IS_LOCAL) {
			/* DA is local, continue with routing */
			MV_NFP_STAT(pFpStats->fdb_rx_local++);
		} else {
			/* perform bridging */
			MV_NFP_STAT(pFpStats->fdb_bridged++);
			return pFdb->fdbInfo.ifIndex;
		}
	}
#endif /* CONFIG_MV_ETH_NFP_FDB */

#ifdef CONFIG_MV_ETH_NFP_PPP
	/* Decapsulate PPPoE */
	if (!pIpHdr) {
		MV_PPPoE_HEADER *pPPP = (MV_PPPoE_HEADER *) pEth;
		if ((pPPP->ethertype == 0x6488) && (pPPP->proto == 0x2100)) {
			pIpHdr = (MV_IP_HEADER *) (pEth + ETH_MV_HEADER_SIZE +
						   sizeof(MV_802_3_HEADER) + ETH_FP_PPPOE_HDR);

			/* do not process fragments */
			if (pIpHdr->fragmentCtrl & 0xFF3F) {
				MV_NFP_STAT(pFpStats->ppp_rx_frag++);
				goto out;
			}

			pBuf->bufAddrShift -= ETH_FP_PPPOE_HDR;
			pBuf->bufPhysAddr += ETH_FP_PPPOE_HDR;
			pBuf->bufVirtPtr += ETH_FP_PPPOE_HDR;
			pBuf->dataSize -= ETH_FP_PPPOE_HDR;
			pEth += ETH_FP_PPPOE_HDR;

			pPkt->status = ETH_TX_IP_NO_FRAG | ETH_TX_GENERATE_IP_CHKSUM_MASK |
			    (0x5 << ETH_TX_IP_HEADER_LEN_OFFSET);

			switch (pIpHdr->protocol) {
			case MV_IP_PROTO_TCP:
				pPkt->status |= ETH_TX_L4_TCP_TYPE | ETH_TX_GENERATE_L4_CHKSUM_MASK;
				break;
			case MV_IP_PROTO_UDP:
				pPkt->status |= ETH_TX_L4_UDP_TYPE | ETH_TX_GENERATE_L4_CHKSUM_MASK;
				break;
			}

			MV_NFP_STAT(pFpStats->ppp_rx++);
			MV_NFP_STAT(pFpStats->ethertype_unknown--);
		}
	}

	if (!pIpHdr)
		goto out;
#endif /* CONFIG_MV_ETH_NFP_PPP */

	proto = mvFpPortsGet(pIpHdr, &dstPort, &srcPort);

	/* Check TTL value */
	if (pIpHdr->ttl <= 1) {
		/* TTL expired */
		MV_NFP_STAT(pFpStats->ip_ttl_expired++);
		goto out;
	}

	dip = pIpHdr->dstIP;
	sip = pIpHdr->srcIP;

#ifdef CONFIG_MV_ETH_NFP_SEC
	/* TBD - Add statistics counters */
	/* inbound ipsec traffic */
	if (pIpHdr->protocol == MV_IP_PROTO_ESP) {

		/* extract esp header */
		pEspHdr = (MV_ESP_HEADER *) ((MV_U8 *) pIpHdr + sizeof(MV_IP_HEADER));

		/* extract SA according to packet spi */
		pSAEntry = mvNfpSecSARuleFind(pEspHdr->spi);
		if (pSAEntry != NULL) {
			if (MV_OK == mvNfpSecIncoming(pPkt, pSAEntry)) {
				MV_NFP_STAT(pFpStats->sec_in++);
				return MV_NFP_STOLEN;
			} else {
				/* TDB- handle pkt gracefully */
				MV_NFP_STAT(pFpStats->sec_in_drop++);
				return MV_NFP_DROP;

			}
		}
		mvOsPrintf("mvFpProcess: no SA found for ESP packet(spi=0x%x)\n", pEspHdr->spi);
	} else {
		/* outbound */
		pSpd = mvNfpSecSPDRuleFind(dip, sip, proto, dstPort, srcPort, MV_NFP_SEC_RULE_DB_OUT);
		if (pSpd != NULL) {
			switch (pSpd->actionType) {
			case (MV_NFP_SEC_FWD):
				break;
			case (MV_NFP_SEC_SECURE):
				status = mvNfpSecOutgoing(pPkt, pSpd->pSAEntry);
				if (status == MV_OK) {
					/* handled by cesa */
					MV_NFP_STAT(pFpStats->sec_out++);
					return MV_NFP_STOLEN;
				} else if (status == MV_OUT_OF_RANGE) {
					/* slow path */
					MV_NFP_STAT(pFpStats->sec_out_slow++);
					return MV_NFP_NONE;
				} else {
					/* drop packet */
					MV_NFP_STAT(pFpStats->sec_out_drop++);
					return MV_NFP_DROP;
				}
				break;
			case (MV_NFP_SEC_DROP):
				MV_NFP_STAT(pFpStats->sec_out_drop++);
				return MV_NFP_DROP;
				break;
			}
		}
	}
#endif

#ifdef CONFIG_MV_ETH_NFP_NAT
	proto = mvFpPortsGet(pIpHdr, &dstPort, &srcPort);
	if (proto == MV_IP_PROTO_NULL) {
		/* NAT not supported for this protocol */
		MV_NFP_STAT(pFpStats->nat_bad_proto++);
		pDnatRule = NULL;
	} else {
		/* Lookup NAT database accordingly with 5 tuple key */
		pDnatRule = mvFpNatRuleFind(dip, sip, proto, dstPort, srcPort);
	}
	if (pDnatRule != NULL) {
		if (pDnatRule->flags & MV_FP_DIP_CMD_MAP)
			dip = pDnatRule->newIp;
		if (pDnatRule->flags & MV_FP_DPORT_CMD_MAP)
			dstPort = pDnatRule->newPort;
	} else {
		MV_NFP_STAT(pFpStats->dnat_not_found++);
	}

#endif /* CONFIG_MV_ETH_NFP_NAT */

	pRt = mvFpRuleFind(dip, sip);
	if (pRt == NULL) {
		/* IP Routing rule is not found: go to Linux IP stack */
		MV_NFP_STAT(pFpStats->route_miss++);
		goto out;
	}

	MV_NFP_STAT(pFpStats->route_hit++);

#ifdef CONFIG_MV_ETH_NFP_NAT
	if ((pDnatRule != NULL) && (pDnatRule->flags & MV_FP_DNAT_CMD_MAP)) {
		MV_NFP_STAT(pFpStats->dnat_found++);
		pSnatRule = mvFpNatRuleFind(dip, sip, proto, dstPort, srcPort);
	} else {
		pSnatRule = pDnatRule;
	}

	if ((pSnatRule != NULL) && (pSnatRule->flags & MV_FP_SNAT_CMD_MAP))
		MV_NFP_STAT(pFpStats->snat_found++);
	else
		MV_NFP_STAT(pFpStats->snat_not_found++);

	/* Check IP awareness */
	if ((pRt->routingInfo.aware_flags & MV_FP_DIP_CMD_MAP) && (pDnatRule == NULL)) {
		MV_NFP_STAT(pFpStats->dnat_aware++);
		goto out;
	}

	if ((pRt->routingInfo.aware_flags & MV_FP_SIP_CMD_MAP) && (pSnatRule == NULL)) {
		MV_NFP_STAT(pFpStats->snat_aware++);
		goto out;
	}

	/* Update packet accordingly with NAT rules */
	if ((pDnatRule != NULL) || (pSnatRule != NULL))
		mvFpNatPktUpdate(pIpHdr, pDnatRule, pSnatRule);
#endif /* CONFIG_MV_ETH_NFP_NAT */

	ifIndex = pRt->routingInfo.outIfIndex;

#ifdef CONFIG_MV_ETH_NFP_PPP
	/* Encapsulate PPPoE on Tx */
	if (mvFpPppLookup(ifIndex)) {
		if (pBuf->dataSize > 1514 + ETH_MV_HEADER_SIZE - ETH_FP_PPPOE_HDR) {
			MV_NFP_STAT(pFpStats->ppp_tx_slow++);
			goto out;
		}

		MV_NFP_STAT(pFpStats->ppp_tx++);

		/* FIXME: pktSize is left unchanged */
		pBuf->bufAddrShift += ETH_FP_PPPOE_HDR;
		pBuf->bufPhysAddr -= ETH_FP_PPPOE_HDR;
		pBuf->bufVirtPtr -= ETH_FP_PPPOE_HDR;
		pBuf->dataSize += ETH_FP_PPPOE_HDR;
		pEth -= ETH_FP_PPPOE_HDR;

		/* -6B aligment from 32B boundary */
		{
			MV_U32 *d = (MV_U32 *) pEth;
			MV_U32 *s = pppOpen[ifIndex].pppInfo.u.u32;

			*(d++) = *(s++);
			*(d++) = *(s++);
			*(d++) = *(s++);
			*(d++) = *(s++);
			*(d++) = *(s++);
			*(d++) = *(s++);
		}

		/* update payload len */
		*(MV_U16 *) (pEth + 20) = htons(pBuf->dataSize - 14 - ETH_FP_PPPOE_HDR);

		mvFpDecTTL(pIpHdr);
#ifdef CONFIG_MV_ETH_NFP_NAT
		if (pSnatRule)
			mvFpCSumInc(pIpHdr, pSnatRule->srcIp, pSnatRule->newIp);
#endif
		pPkt->status = 0;
		ifIndex = pppOpen[ifIndex].pppInfo.if_eth;

		/* Flush and Invalidate 3rd cacheline */
		pEth = (MV_U32) pEth & ~(CPU_D_CACHE_LINE_SIZE - 1);
		pEth += CPU_D_CACHE_LINE_SIZE * 2;
		mvOsCacheLineFlushInv(NULL, pEth);
		goto end;
	}
#endif /* CONFIG_MV_ETH_NFP_PPP */

	*(MV_U16 *) (pEth + 2) = *(MV_U16 *) (&pRt->routingInfo.dstMac[0]);
	*(MV_U32 *) (pEth + 4) = *(MV_U32 *) (&pRt->routingInfo.dstMac[2]);
	*(MV_U32 *) (pEth + 8) = *(MV_U32 *) (&pRt->routingInfo.srcMac[0]);
	*(MV_U16 *) (pEth + 12) = *(MV_U16 *) (&pRt->routingInfo.srcMac[4]);
#ifdef CONFIG_MV_ETH_NFP_PPP
	*(MV_U16 *) (pEth + 14) = 0x0008;
#endif

	pIpHdr->ttl--;

#ifdef CONFIG_MV_ETH_NFP_TOS
	pIpHdr->tos = pRt->routingInfo.dscp;
	pPkt->txq = pRt->routingInfo.txq;
#endif

#ifdef CONFIG_MV_ETH_NFP_FDB
	/* find actual port inside bridge, otherwise br->xmit is called */
	if (mvFpFdbMember(ifIndex)) {
		pFdb = mvFpFdbLookup(ifIndex, pEth + ETH_MV_HEADER_SIZE);
		if (pFdb) {
			MV_NFP_STAT(pFpStats->fdb_tx_found++);
			return pFdb->fdbInfo.ifIndex;
		}
	}
#endif /* CONFIG_MV_ETH_NFP_FDB */

end:
	return ifIndex;
out:
#ifdef CONFIG_MV_ETH_NFP_PPP
	/* restore original packet */
	if (pBuf->bufAddrShift) {
		pBuf->bufPhysAddr += pBuf->bufAddrShift;
		pBuf->bufVirtPtr += pBuf->bufAddrShift;
		pBuf->dataSize -= pBuf->bufAddrShift;
		pBuf->bufAddrShift = 0;
		MV_NFP_STAT(pFpStats->ppp_rx_slow++);
	}
#endif
	MV_NFP_STAT(pFpStats->slowpath++);
	return -1;
}