示例#1
0
inline void *copy_cpltx_pktxt(struct sk_buff *skb,
				struct net_device *dev,
				void *pos)
{
	struct adapter *adap;
	struct port_info *pi;
	struct sge_eth_txq *q;
	struct cpl_tx_pkt_core *cpl;
	u64 cntrl = 0;
	u32 ctrl0, qidx;

	pi = netdev_priv(dev);
	adap = pi->adapter;
	qidx = skb->queue_mapping;
	q = &adap->sge.ethtxq[qidx + pi->first_qset];

	cpl = (struct cpl_tx_pkt_core *)pos;

	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
			       TXPKT_PF_V(adap->pf);
	if (skb_vlan_tag_present(skb)) {
		q->vlan_ins++;
		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
	}

	cpl->ctrl0 = htonl(ctrl0);
	cpl->pack = htons(0);
	cpl->len = htons(skb->len);
	cpl->ctrl1 = cpu_to_be64(cntrl);

	pos += sizeof(struct cpl_tx_pkt_core);
	return pos;
}
示例#2
0
static int nf_trace_fill_pkt_info(struct sk_buff *nlskb,
				  const struct nft_pktinfo *pkt)
{
	const struct sk_buff *skb = pkt->skb;
	unsigned int len = min_t(unsigned int,
				 pkt->xt.thoff - skb_network_offset(skb),
				 NFT_TRACETYPE_NETWORK_HSIZE);
	int off = skb_network_offset(skb);

	if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len))
		return -1;

	len = min_t(unsigned int, skb->len - pkt->xt.thoff,
		    NFT_TRACETYPE_TRANSPORT_HSIZE);

	if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
			      pkt->xt.thoff, len))
		return -1;

	if (!skb_mac_header_was_set(skb))
		return 0;

	if (skb_vlan_tag_get(skb))
		return nf_trace_fill_ll_header(nlskb, skb);

	off = skb_mac_header(skb) - skb->data;
	len = min_t(unsigned int, -off, NFT_TRACETYPE_LL_HSIZE);
	return trace_fill_header(nlskb, NFTA_TRACE_LL_HEADER,
				 skb, off, len);
}
示例#3
0
static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
{
	struct net_device *vlan, *br;

	br = bridge_parent(dev);
	if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
		return br;

	vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
				    skb_vlan_tag_get(skb) & VLAN_VID_MASK);

	return vlan ? vlan : br;
}
示例#4
0
文件: en_tx.c 项目: ammubhave/bargud
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
{
	struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
	int cpy1_sz = 2 * ETH_ALEN;
	int cpy2_sz = ihs - cpy1_sz;

	skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
	skb_pull_inline(skb, cpy1_sz);
	vhdr->h_vlan_proto = skb->vlan_proto;
	vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
	skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
				  cpy2_sz);
	skb_pull_inline(skb, cpy2_sz);
}
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
                                    unsigned char **skb_data,
				    unsigned int *skb_len)
{
	struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
	int cpy1_sz = 2 * ETH_ALEN;
	int cpy2_sz = ihs - cpy1_sz;

	memcpy(vhdr, *skb_data, cpy1_sz);
	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
	vhdr->h_vlan_proto = skb->vlan_proto;
	vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
	memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
}
示例#6
0
static int nf_trace_fill_ll_header(struct sk_buff *nlskb,
				   const struct sk_buff *skb)
{
	struct vlan_ethhdr veth;
	int off;

	BUILD_BUG_ON(sizeof(veth) > NFT_TRACETYPE_LL_HSIZE);

	off = skb_mac_header(skb) - skb->data;
	if (off != -ETH_HLEN)
		return -1;

	if (skb_copy_bits(skb, off, &veth, ETH_HLEN))
		return -1;

	veth.h_vlan_proto = skb->vlan_proto;
	veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
	veth.h_vlan_encapsulated_proto = skb->protocol;

	return nla_put(nlskb, NFTA_TRACE_LL_HEADER, sizeof(veth), &veth);
}
示例#7
0
/**
 * nfp_net_tx() - Main transmit entry point
 * @skb:    SKB to transmit
 * @netdev: netdev structure
 *
 * Return: NETDEV_TX_OK on success.
 */
static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
	struct nfp_net *nn = netdev_priv(netdev);
	const struct skb_frag_struct *frag;
	struct nfp_net_r_vector *r_vec;
	struct nfp_net_tx_desc *txd, txdg;
	struct nfp_net_tx_buf *txbuf;
	struct nfp_net_tx_ring *tx_ring;
	struct netdev_queue *nd_q;
	dma_addr_t dma_addr;
	unsigned int fsize;
	int f, nr_frags;
	int wr_idx;
	u16 qidx;

	qidx = skb_get_queue_mapping(skb);
	tx_ring = &nn->tx_rings[qidx];
	r_vec = tx_ring->r_vec;
	nd_q = netdev_get_tx_queue(nn->netdev, qidx);

	nr_frags = skb_shinfo(skb)->nr_frags;

	if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
		nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
				  qidx, tx_ring->wr_p, tx_ring->rd_p);
		netif_tx_stop_queue(nd_q);
		u64_stats_update_begin(&r_vec->tx_sync);
		r_vec->tx_busy++;
		u64_stats_update_end(&r_vec->tx_sync);
		return NETDEV_TX_BUSY;
	}

	/* Start with the head skbuf */
	dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
				  DMA_TO_DEVICE);
	if (dma_mapping_error(&nn->pdev->dev, dma_addr))
		goto err_free;

	wr_idx = tx_ring->wr_p % tx_ring->cnt;

	/* Stash the soft descriptor of the head then initialize it */
	txbuf = &tx_ring->txbufs[wr_idx];
	txbuf->skb = skb;
	txbuf->dma_addr = dma_addr;
	txbuf->fidx = -1;
	txbuf->pkt_cnt = 1;
	txbuf->real_len = skb->len;

	/* Build TX descriptor */
	txd = &tx_ring->txds[wr_idx];
	txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
	txd->dma_len = cpu_to_le16(skb_headlen(skb));
	nfp_desc_set_dma_addr(txd, dma_addr);
	txd->data_len = cpu_to_le16(skb->len);

	txd->flags = 0;
	txd->mss = 0;
	txd->l4_offset = 0;

	nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);

	nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);

	if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
		txd->flags |= PCIE_DESC_TX_VLAN;
		txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
	}

	/* Gather DMA */
	if (nr_frags > 0) {
		/* all descs must match except for in addr, length and eop */
		txdg = *txd;

		for (f = 0; f < nr_frags; f++) {
			frag = &skb_shinfo(skb)->frags[f];
			fsize = skb_frag_size(frag);

			dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
						    fsize, DMA_TO_DEVICE);
			if (dma_mapping_error(&nn->pdev->dev, dma_addr))
				goto err_unmap;

			wr_idx = (wr_idx + 1) % tx_ring->cnt;
			tx_ring->txbufs[wr_idx].skb = skb;
			tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
			tx_ring->txbufs[wr_idx].fidx = f;

			txd = &tx_ring->txds[wr_idx];
			*txd = txdg;
			txd->dma_len = cpu_to_le16(fsize);
			nfp_desc_set_dma_addr(txd, dma_addr);
			txd->offset_eop =
				(f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
		}

		u64_stats_update_begin(&r_vec->tx_sync);
		r_vec->tx_gather++;
		u64_stats_update_end(&r_vec->tx_sync);
	}

	netdev_tx_sent_queue(nd_q, txbuf->real_len);

	tx_ring->wr_p += nr_frags + 1;
	if (nfp_net_tx_ring_should_stop(tx_ring))
		nfp_net_tx_ring_stop(nd_q, tx_ring);

	tx_ring->wr_ptr_add += nr_frags + 1;
	if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
		/* force memory write before we let HW know */
		wmb();
		nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
		tx_ring->wr_ptr_add = 0;
	}

	skb_tx_timestamp(skb);

	return NETDEV_TX_OK;

err_unmap:
	--f;
	while (f >= 0) {
		frag = &skb_shinfo(skb)->frags[f];
		dma_unmap_page(&nn->pdev->dev,
			       tx_ring->txbufs[wr_idx].dma_addr,
			       skb_frag_size(frag), DMA_TO_DEVICE);
		tx_ring->txbufs[wr_idx].skb = NULL;
		tx_ring->txbufs[wr_idx].dma_addr = 0;
		tx_ring->txbufs[wr_idx].fidx = -2;
		wr_idx = wr_idx - 1;
		if (wr_idx < 0)
			wr_idx += tx_ring->cnt;
	}
	dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
			 skb_headlen(skb), DMA_TO_DEVICE);
	tx_ring->txbufs[wr_idx].skb = NULL;
	tx_ring->txbufs[wr_idx].dma_addr = 0;
	tx_ring->txbufs[wr_idx].fidx = -2;
err_free:
	nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
	u64_stats_update_begin(&r_vec->tx_sync);
	r_vec->tx_errors++;
	u64_stats_update_end(&r_vec->tx_sync);
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}
示例#8
0
static inline int
dma_xmit(struct sk_buff *skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 txd_info3, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

	txd_info3 = TX3_QDMA_SWC;
	if (gmac_no != PSE_PORT_PPE) {
		u32 QID = M2Q_table[(skb->mark & 0x3f)];
		if (QID < 8 && M2Q_wan_lan) {
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (gmac_no == PSE_PORT_GMAC2)
				QID += 8;
#elif defined (CONFIG_RAETH_HW_VLAN_TX)
			if ((skb_vlan_tag_get(skb) & VLAN_VID_MASK) > 1)
				QID += 8;
#endif
		}
		txd_info3 |= TX3_QDMA_QID(QID);
	}

	txd_info4 = TX4_DMA_FPORT(gmac_no);

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb))
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+2 free descriptors (2 need to prevent head/tail overlap) */
	if (ei_local->txd_pool_free_num < (nr_desc+2)) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: QDMA TX pool is run out! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	qdma_write_skb_fragment(ei_local, frag_addr, frag_size,
				txd_info3, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		qdma_write_skb_fragment(ei_local, frag_addr, frag_size,
					txd_info3, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the QDMA TX */
	sysRegWrite(QTX_CTX_PTR, (u32)get_txd_ptr_phy(ei_local, ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}
示例#9
0
static void xlgmac_prep_vlan(struct sk_buff *skb,
			     struct xlgmac_pkt_info *pkt_info)
{
	if (skb_vlan_tag_present(skb))
		pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
}
示例#10
0
static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
			       struct xlgmac_ring *ring,
			       struct sk_buff *skb,
			       struct xlgmac_pkt_info *pkt_info)
{
	struct skb_frag_struct *frag;
	unsigned int context_desc;
	unsigned int len;
	unsigned int i;

	pkt_info->skb = skb;

	context_desc = 0;
	pkt_info->desc_count = 0;

	pkt_info->tx_packets = 1;
	pkt_info->tx_bytes = skb->len;

	if (xlgmac_is_tso(skb)) {
		/* TSO requires an extra descriptor if mss is different */
		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
			context_desc = 1;
			pkt_info->desc_count++;
		}

		/* TSO requires an extra descriptor for TSO header */
		pkt_info->desc_count++;

		pkt_info->attributes = XLGMAC_SET_REG_BITS(
					pkt_info->attributes,
					TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
					TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
					1);
		pkt_info->attributes = XLGMAC_SET_REG_BITS(
					pkt_info->attributes,
					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
					1);
	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
		pkt_info->attributes = XLGMAC_SET_REG_BITS(
					pkt_info->attributes,
					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
					1);

	if (skb_vlan_tag_present(skb)) {
		/* VLAN requires an extra descriptor if tag is different */
		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
			/* We can share with the TSO context descriptor */
			if (!context_desc) {
				context_desc = 1;
				pkt_info->desc_count++;
			}

		pkt_info->attributes = XLGMAC_SET_REG_BITS(
					pkt_info->attributes,
					TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
					TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
					1);
	}

	for (len = skb_headlen(skb); len;) {
		pkt_info->desc_count++;
		len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		frag = &skb_shinfo(skb)->frags[i];
		for (len = skb_frag_size(frag); len; ) {
			pkt_info->desc_count++;
			len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
		}
	}
}
示例#11
0
ASF_int32_t asfctrl_delete_dev_map(struct net_device *dev)
{
	ASF_int32_t  cii;
	ASF_uint32_t ulVSGId;
	ASFCTRL_FUNC_ENTRY;
#ifdef CONFIG_PPPOE
	if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_PPP)) {
#else
	if (dev->type == ARPHRD_ETHER) {
#endif
		cii = asfctrl_dev_get_cii(dev);
		if (cii < 0) {
			ASFCTRL_DBG("Failed to determine cii for device %s\n",
				dev->name);
			return T_FAILURE;
		}
		ASFCTRL_DBG("UNMAP interface %s\n",  dev->name);
		/* Get the VSG Id from asfctrl_netns Subha 11/3 */
		ulVSGId = asfctrl_netns_net_to_vsg(dev_net(dev));
		printk("UnRegister Device: ulVSGId = %d\n", ulVSGId);
		ASFUnBindDeviceToVSG(ulVSGId, cii);
		ASFUnMapInterface(cii);
		dev_put(dev);
		p_asfctrl_netdev_cii[cii] = NULL;
		return T_SUCCESS;
	}

	ASFCTRL_FUNC_EXIT;
	return T_FAILURE;
}
EXPORT_SYMBOL(asfctrl_delete_dev_map);

#if (ASFCTRL_DEBUG_LEVEL >= LOGS)
char *print_netevent(int event)
{
	switch (event) {
	case NETDEV_UP:
		return (char *)"NETDEV_UP";
	case NETDEV_DOWN:
		return (char *)"NETDEV_DOWN";
	case NETDEV_REBOOT:
		return (char *)"NETDEV_REBOOT";
	case NETDEV_CHANGE:
		return (char *)"NETDEV_CHANGE";
	case NETDEV_REGISTER:
		return (char *)"NETDEV_REGISTER";
	case NETDEV_UNREGISTER:
		return (char *)"NETDEV_UNREGISTER";
	case NETDEV_CHANGEMTU:
		return (char *)"NETDEV_CHANGEMTU";
	case NETDEV_CHANGEADDR:
		return (char *)"NETDEV_CHANGEADDR";
	case NETDEV_GOING_DOWN:
		return (char *)"NETDEV_GOING_DOWN";
	case NETDEV_CHANGENAME:
		return (char *)"NETDEV_CHANGENAME";
	case NETDEV_PRE_UP:
		return (char *)"NETDEV_PRE_UP";
	default:
		return (char *)"UNKNOWN";
	}
}
#endif

static int asfctrl_dev_notifier_fn(struct notifier_block *this,
				unsigned long event, void *ptr)
{
#if 0 /* Linux verions less than 3.8 */
	struct net_device *dev = (struct net_device *)(ptr);
#else
	/* Versions above 3.8 */
	struct net_device *dev = ((struct netdev_notifier_info *)(ptr))->dev;
#endif

	if (dev == NULL)
	{
		ASFCTRL_DBG("asfctrl_dev_notifier: NULL String for dev? \n");
		return NOTIFY_DONE;
	}

	ASFCTRL_FUNC_ENTRY;
	printk(KERN_INFO "Subha: asfctrl_dev_notifier called for dev = 0x%x\n", dev);
	ASFCTRL_DBG("%s - event %ld (%s)\n",
			dev->name, event, print_netevent(event));

	/* handle only ethernet, vlan, bridge and pppoe (ppp) interfaces */
	switch (event) {
	case NETDEV_REGISTER: /* A  new device is allocated*/
		printk(KERN_INFO "Subha: NETDEV_REGISTER\n");
		printk(KERN_INFO "dev->type = %d, ARPHDR_ETHER =%d device=%s\n", dev->type, ARPHRD_ETHER, dev->name);
		ASFCTRL_INFO("Register Device type %d mac %pM\n", dev->type,
			dev->dev_addr);
		if (dev->type == ARPHRD_ETHER)
			asfctrl_create_dev_map(dev, 1);
		break;

	case NETDEV_UNREGISTER:/* A new device is deallocated*/
		ASFCTRL_INFO("Unregister Device type %d mac %pM\n", dev->type,
			dev->dev_addr);
#ifdef CONFIG_PPPOE
		if (dev->type == ARPHRD_ETHER  || dev->type == ARPHRD_PPP)
#else
		if (dev->type == ARPHRD_ETHER)
#endif
			asfctrl_delete_dev_map(dev);
		break;

#ifdef CONFIG_PPPOE
	case NETDEV_UP:
		if (dev->type == ARPHRD_PPP)
			asfctrl_create_dev_map(dev, 1);
		break;
#endif
	}
	ASFCTRL_FUNC_EXIT;
	return NOTIFY_DONE;
}

int asfctrl_dev_fp_tx_hook(struct sk_buff *skb, struct net_device *dev)
{
	ASF_uint16_t	usEthType;
	ASF_int32_t		hh_len;
	ASF_boolean_t	bPPPoE = 0;
	struct iphdr       *iph = 0;
	struct ipv6hdr       *ipv6h;
	unsigned int proto;
	unsigned int  tun_hdr = 0;

	ASFCTRL_FUNC_ENTRY;

	if (!asfctrl_skb_is_dummy(skb))
		return AS_FP_PROCEED;
	

	asfctrl_skb_unmark_dummy(skb);

	if (dev->type != ARPHRD_ETHER)
		goto drop;

	ASFCTRL_INFO("asfctrl_dev_fp_tx: 2\n");

	usEthType = skb->protocol;
	hh_len = ETH_HLEN;

	if (usEthType == __constant_htons(ETH_P_8021Q)) {
		struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data+hh_len);
		ASFCTRL_TRACE("8021Q packet");
		hh_len += VLAN_HLEN;
		usEthType = vhdr->h_vlan_encapsulated_proto;
	}

	if (usEthType == __constant_htons(ETH_P_PPP_SES)) {
		unsigned char *poe_hdr = skb->data+hh_len;
		unsigned short ppp_proto;

		ASFCTRL_TRACE("PPPoE packet");

		/*PPPoE header is of 6 bytes */
		ppp_proto = *(unsigned short *)(poe_hdr+6);
		/* PPPOE: VER=1,TYPE=1,CODE=0 and  PPP:_PROTO=0x0021 (IP) */
		if ((poe_hdr[0] != 0x11) || (poe_hdr[1] != 0) ||
			(ppp_proto != __constant_htons(0x0021))) {
				goto drop;
		}

		hh_len += (8); /* 6+2 -- pppoe+ppp headers */
		usEthType = __constant_htons(ETH_P_IP);
		bPPPoE = 1;
	}

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 3\n");
	if (usEthType != __constant_htons(ETH_P_IP) &&
		usEthType != __constant_htons(ETH_P_IPV6))
		goto drop;

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 4\n");

	if (usEthType == __constant_htons(ETH_P_IP)) {
		iph = (struct iphdr *)(skb->data+hh_len);
		proto = iph->protocol;
		if (proto == IPPROTO_IPV6) {
			ipv6h = (struct ipv6hdr *)(skb->data+hh_len+sizeof(struct iphdr));
			proto = ipv6h->nexthdr;
			tun_hdr = sizeof(struct iphdr);
		}
	} else {
		ipv6h = (struct ipv6hdr *)(skb->data+hh_len);
		proto = ipv6h->nexthdr;
		if (proto == IPPROTO_IPIP) {
			iph = (struct iphdr *)(skb->data+hh_len+sizeof(struct ipv6hdr));
			proto = iph->protocol;
			tun_hdr = sizeof(struct ipv6hdr);
		}
	}

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 5\n");
	switch (proto) {
		asf_linux_L2blobPktData_t *pData;
		ASFFFPUpdateFlowParams_t  cmd;

	case ASFCTRL_IPPROTO_DUMMY_L2BLOB:

		/*
		* if the packet is coming on a PPP interface,
		* network header points to start of PPPOE header
		* instaed of IP header.
		*  So always dynamically identify start of IP header!
		*/

		memset(&cmd, 0, sizeof(cmd));
		cmd.u.l2blob.bUpdatePPPoELen = bPPPoE;


		ASFCTRL_INFO(
			"DUMMY_L2BLOB: %pM:%pM..%02x%02x (skb->proto 0x%04x) "
			"data 0x%p nw_hdr 0x%p tr_hdr 0x%p\n",
			skb->data, skb->data+6, skb->data[12], skb->data[13],
			skb->protocol, skb->data, skb_network_header(skb),
			skb_transport_header(skb));

		if (usEthType == __constant_htons(ETH_P_IP)) {
			pData = (asf_linux_L2blobPktData_t *)(skb->data+hh_len +
							(iph->ihl * 4) + (tun_hdr ? sizeof(struct ipv6hdr) : 0));
			cmd.u.l2blob.tunnel.bIP6IP4Out = tun_hdr ? 1 : 0;
		} else {
			pData = (asf_linux_L2blobPktData_t *)(skb->data+hh_len +
							sizeof(struct ipv6hdr) + (tun_hdr ? sizeof(struct iphdr) : 0));
			cmd.u.l2blob.tunnel.bIP4IP6Out = tun_hdr ? 1 : 0;
		}

		memcpy(&cmd.tuple, &pData->tuple, sizeof(cmd.tuple));
		cmd.ulZoneId = pData->ulZoneId;
		cmd.bL2blobUpdate = 1;
		cmd.u.l2blob.ulDeviceId = asfctrl_dev_get_cii(dev);
		cmd.u.l2blob.ulPathMTU = pData->ulPathMTU;

		cmd.u.l2blob.ulL2blobMagicNumber = asfctrl_vsg_l2blobconfig_id;

		/* need to include PPPOE+PPP header if any */
		cmd.u.l2blob.l2blobLen = hh_len + tun_hdr;

		memcpy(cmd.u.l2blob.l2blob, skb->data, cmd.u.l2blob.l2blobLen);
#ifdef CONFIG_VLAN_8021Q
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
		if (vlan_tx_tag_present(skb)) {
#else
		if (skb_vlan_tag_present(skb)) {
#endif
			cmd.u.l2blob.bTxVlan = 1;

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
			cmd.u.l2blob.usTxVlanId = (vlan_tx_tag_get(skb)
							| VLAN_TAG_PRESENT);
#else
			cmd.u.l2blob.usTxVlanId = (skb_vlan_tag_get(skb)
							| VLAN_TAG_PRESENT);
#endif
		} else
#endif
			cmd.u.l2blob.bTxVlan = 0;

		ASFFFPRuntime(pData->ulVsgId, ASF_FFP_MODIFY_FLOWS, &cmd,
			sizeof(cmd), NULL, 0);
		break;

#ifdef ASFCTRL_IPSEC_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_IPSEC_L2BLOB:
		ASFCTRL_INFO("DUMMY_IPSEC_L2BLOB");

		skb->protocol = usEthType;
		if (fn_ipsec_l2blob_update)
			fn_ipsec_l2blob_update(skb,
				hh_len, asfctrl_dev_get_cii(dev));

		break;
#endif

#ifdef ASFCTRL_FWD_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_FWD_L2BLOB:
		ASFCTRL_INFO("DUMMY_FWD_L2BLOB");

		if (fn_fwd_l2blob_update)
			fn_fwd_l2blob_update(skb, hh_len,
				asfctrl_dev_get_cii(dev));

		break;
#endif
#ifdef ASFCTRL_TERM_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_TERM_L2BLOB:
		ASFCTRL_INFO("DUMMY_TERM_L2BLOB");

		if (fn_term_l2blob_update)
			fn_term_l2blob_update(skb, hh_len,
				asfctrl_dev_get_cii(dev));

		break;
#endif
	}
drop:
	ASFCTRLKernelSkbFree(skb);
	ASFCTRL_FUNC_EXIT;
	return AS_FP_STOLEN;
}

static struct notifier_block asfctrl_dev_notifier = {
	.notifier_call = asfctrl_dev_notifier_fn,
};

ASF_void_t  asfctrl_fnInterfaceNotFound(
			ASFBuffer_t Buffer,
			genericFreeFn_t pFreeFn,
			ASF_void_t *freeArg)
{
	struct sk_buff  *skb;
	int bVal = in_softirq();

	ASFCTRL_FUNC_ENTRY;
	skb = AsfBuf2Skb(Buffer);

	if (!bVal)
		local_bh_disable();
	/* Send it to for normal path handling */
	ASFCTRL_netif_receive_skb(skb);

	if (!bVal)
		local_bh_enable();
	ASFCTRL_FUNC_EXIT;
}

ASF_void_t  asfctrl_fnVSGMappingNotFound(
			ASF_uint32_t ulCommonInterfaceId,
			ASFBuffer_t Buffer,
			genericFreeFn_t pFreeFn,
			ASF_void_t *freeArg)
{
	struct sk_buff  *skb;
	int bVal = in_softirq();

	ASFCTRL_FUNC_ENTRY;
	skb = AsfBuf2Skb(Buffer);

	if (!bVal)
		local_bh_disable();
	/* Send it to for normal path handling */
	ASFCTRL_netif_receive_skb(skb);

	if (!bVal)
		local_bh_enable();
	ASFCTRL_FUNC_EXIT;
}


static int __init asfctrl_init(void)
{
	int ret;
	ASFFFPConfigIdentity_t cmd;
	ASFFFPCallbackFns_t asfctrl_Cbs = {
		asfctrl_fnInterfaceNotFound,
		asfctrl_fnVSGMappingNotFound,
		asfctrl_fnZoneMappingNotFound,
		asfctrl_fnNoFlowFound,
		asfctrl_fnRuntime,
		asfctrl_fnFlowRefreshL2Blob,
		asfctrl_fnFlowActivityRefresh,
		asfctrl_fnFlowTcpSpecialPkts,
		asfctrl_fnFlowValidate,
		asfctrl_fnAuditLog
	};

	ASFCTRL_FUNC_ENTRY;

	memset(p_asfctrl_netdev_cii, 0, sizeof(p_asfctrl_netdev_cii));

	ASFGetCapabilities(&g_cap);

	if (!g_cap.bBufferHomogenous) {
		ASFCTRL_ERR("ASF capabilities: Non homogenous buffer");
		return -1;
	}
	asfctrl_vsg_config_id = jiffies;
	memset(&cmd, 0, sizeof(cmd));
	cmd.ulConfigMagicNumber = asfctrl_vsg_config_id;
	ASFFFPUpdateConfigIdentity(ASF_DEF_VSG, cmd);

	memset(&cmd, 0, sizeof(cmd));
	cmd.bL2blobMagicNumber = 1;
	cmd.l2blobConfig.ulL2blobMagicNumber = asfctrl_vsg_l2blobconfig_id;
	ASFFFPUpdateConfigIdentity(ASF_DEF_VSG, cmd);


	ASFFFPRegisterCallbackFns(&asfctrl_Cbs);

	ret = asfctrl_netns_vsg_init();
	printk("asfctrl_netns_vsg_init returned %d\n", ret);

	printk(KERN_INFO "Subha: before asfctrl_dev_notifier\r\n");
	register_netdevice_notifier(&asfctrl_dev_notifier);
	printk(KERN_INFO "Subha: before devfp_register_hook\r\n");
	printk(KERN_INFO "Subha: devfp_register_hook called asf_ffp_devfp_rx=0x%x, asfctrl_dev_fp_tx_hook=%x\r\n",
		(int)asf_ffp_devfp_rx, (int)asfctrl_dev_fp_tx_hook);
	//devfp_register_hook(asf_ffp_devfp_rx, asfctrl_dev_fp_tx_hook);
	devfp_register_rx_hook_veth(asf_ffp_devfp_rx_veth);
	devfp_register_tx_hook_veth(asfctrl_dev_fp_tx_hook);
	route_hook_fn_register(&asfctrl_l3_route_flush);
#ifdef ASF_IPV6_FP_SUPPORT
	ipv6_route_hook_fn_register(&asfctrl_l3_ipv6_route_flush);
#endif

	asfctrl_sysfs_init();


	if (g_cap.mode & fwMode)
		asfctrl_linux_register_ffp();

	if (ASFGetStatus() == 0)
		ASFDeploy();

	ASFCTRL_INFO("ASF Control Module - Core Loaded.\n");
	ASFCTRL_FUNC_EXIT;
	return 0;
}
示例#12
0
文件: en_tx.c 项目: avagin/linux
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	struct mlx4_en_priv *priv = netdev_priv(dev);
	union mlx4_wqe_qpn_vlan	qpn_vlan = {};
	struct mlx4_en_tx_ring *ring;
	struct mlx4_en_tx_desc *tx_desc;
	struct mlx4_wqe_data_seg *data;
	struct mlx4_en_tx_info *tx_info;
	int tx_ind;
	int nr_txbb;
	int desc_size;
	int real_size;
	u32 index, bf_index;
	__be32 op_own;
	int lso_header_size;
	void *fragptr = NULL;
	bool bounce = false;
	bool send_doorbell;
	bool stop_queue;
	bool inline_ok;
	u8 data_offset;
	u32 ring_cons;
	bool bf_ok;

	tx_ind = skb_get_queue_mapping(skb);
	ring = priv->tx_ring[TX][tx_ind];

	if (unlikely(!priv->port_up))
		goto tx_drop;

	/* fetch ring->cons far ahead before needing it to avoid stall */
	ring_cons = READ_ONCE(ring->cons);

	real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
				  &inline_ok, &fragptr);
	if (unlikely(!real_size))
		goto tx_drop_count;

	/* Align descriptor to TXBB size */
	desc_size = ALIGN(real_size, TXBB_SIZE);
	nr_txbb = desc_size >> LOG_TXBB_SIZE;
	if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
		if (netif_msg_tx_err(priv))
			en_warn(priv, "Oversized header or SG list\n");
		goto tx_drop_count;
	}

	bf_ok = ring->bf_enabled;
	if (skb_vlan_tag_present(skb)) {
		u16 vlan_proto;

		qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
		vlan_proto = be16_to_cpu(skb->vlan_proto);
		if (vlan_proto == ETH_P_8021AD)
			qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
		else if (vlan_proto == ETH_P_8021Q)
			qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
		else
			qpn_vlan.ins_vlan = 0;
		bf_ok = false;
	}

	netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);

	/* Track current inflight packets for performance analysis */
	AVG_PERF_COUNTER(priv->pstats.inflight_avg,
			 (u32)(ring->prod - ring_cons - 1));

	/* Packet is good - grab an index and transmit it */
	index = ring->prod & ring->size_mask;
	bf_index = ring->prod;

	/* See if we have enough space for whole descriptor TXBB for setting
	 * SW ownership on next descriptor; if not, use a bounce buffer. */
	if (likely(index + nr_txbb <= ring->size))
		tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
	else {
		tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
		bounce = true;
		bf_ok = false;
	}

	/* Save skb in tx_info ring */
	tx_info = &ring->tx_info[index];
	tx_info->skb = skb;
	tx_info->nr_txbb = nr_txbb;

	if (!lso_header_size) {
		data = &tx_desc->data;
		data_offset = offsetof(struct mlx4_en_tx_desc, data);
	} else {
示例#13
0
static inline int
dma_xmit(struct sk_buff* skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 next_idx, desc_odd = 0;
	u32 txd_info2 = 0, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

#if !defined (RAETH_HW_PADPKT)
	if (skb->len < ei_local->min_pkt_len) {
		if (skb_padto(skb, ei_local->min_pkt_len)) {
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: skb_padto failed\n", RAETH_DEV_NAME);
#endif
			return NETDEV_TX_OK;
		}
		skb_put(skb, ei_local->min_pkt_len - skb->len);
	}
#endif

#if defined (CONFIG_RALINK_MT7620)
	if (gmac_no == PSE_PORT_PPE)
		txd_info4 = TX4_DMA_FP_BMAP(0x80); /* P7 */
	else
#if defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x20); /* P5 */
#elif defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x10); /* P4 */
#else
		txd_info4 = 0; /* routing by DA */
#endif
#elif defined (CONFIG_RALINK_MT7621)
	txd_info4 = TX4_DMA_FPORT(gmac_no);
#else
	txd_info4 = (TX4_DMA_QN(3) | TX4_DMA_PN(gmac_no));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && !defined (RAETH_SDMA)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb)) {
#if defined (RAETH_HW_VLAN4K)
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#else
		u32 vlan_tci = skb_vlan_tag_get(skb);
		txd_info4 |= (TX4_DMA_INSV | TX4_DMA_VPRI(vlan_tci));
		txd_info4 |= (u32)ei_local->vlan_4k_map[(vlan_tci & VLAN_VID_MASK)];
#endif
	}
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif
	nr_desc = DIV_ROUND_UP(nr_desc, 2);

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+1 free descriptors */
	next_idx = (ei_local->txd_last_idx + nr_desc) % NUM_TX_DESC;
	if (ei_local->txd_buff[ei_local->txd_last_idx] || ei_local->txd_buff[next_idx]) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: PDMA TX ring is full! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
				&txd_info2, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
					&txd_info2, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the DMA TX */
	sysRegWrite(TX_CTX_IDX0, cpu_to_le32(ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}