Пример #1
0
static inline int sn_send_tx_queue(struct sn_queue *queue,
			            struct sn_device* dev, struct sk_buff* skb)
{
	struct sn_tx_metadata tx_meta;
	int ret = NET_XMIT_DROP;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
	if (queue->tx.opts.tci) {
		skb = vlan_insert_tag(skb, queue->tx.opts.tci);
		if (unlikely(!skb))
			goto skip_send;
	}
#else
	if (queue->tx.opts.tci) {
		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
				queue->tx.opts.tci);
		if (unlikely(!skb))
			goto skip_send;
	}

	if (queue->tx.opts.outer_tci) {
		skb = vlan_insert_tag(skb, htons(ETH_P_8021AD),
				queue->tx.opts.outer_tci);
		if (unlikely(!skb))
			goto skip_send;
	}
#endif

	skb_orphan(skb);

	sn_set_tx_metadata(skb, &tx_meta);
	ret = dev->ops->do_tx(queue, skb, &tx_meta);

skip_send:
	switch (ret) {
	case NET_XMIT_CN:
		queue->tx.stats.throttled++;
		/* fall through */

	case NET_XMIT_SUCCESS:
		queue->tx.stats.packets++;
		queue->tx.stats.bytes += skb->len;
		break;

	case NET_XMIT_DROP:
		queue->tx.stats.dropped++;
		break;

	case SN_NET_XMIT_BUFFERED:
		/* should not free skb */
		return NET_XMIT_SUCCESS;
	}

	dev_kfree_skb(skb);
	return ret;
}
Пример #2
0
/* As a soft device without qdisc, 
 * this function returns NET_XMIT_* instead of NETDEV_TX_* */
static int sn_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct sn_device *dev = netdev_priv(netdev);
	struct sn_queue *queue;

	struct sn_tx_metadata tx_meta;

	int ret;

	u16 txq = skb->queue_mapping;

	/* log_info("txq=%d cpu=%d\n", txq, raw_smp_processor_id()); */

	if (unlikely(skb->len > MAX_LFRAME)) {
		log_err("too large skb! (%d)\n", skb->len);
		dev_kfree_skb(skb);
		return NET_XMIT_DROP;
	}

	if (unlikely(skb_shinfo(skb)->frag_list)) {
		log_err("frag_list is not NULL!\n");
		dev_kfree_skb(skb);
		return NET_XMIT_DROP;
	}

	if (unlikely(txq >= dev->num_txq)) {
		log_err("invalid txq=%u\n", txq);
		dev_kfree_skb(skb);
		return NET_XMIT_DROP;
	}

	queue = dev->tx_queues[txq];

	sn_set_tx_metadata(skb, &tx_meta);
	ret = dev->ops->do_tx(queue, skb, &tx_meta);

	if (unlikely(ret == NET_XMIT_DROP)) {
		queue->tx_stats.dropped++;
	} else {
		queue->tx_stats.packets++;
		queue->tx_stats.bytes += skb->len;

		if (unlikely(ret == NET_XMIT_CN))
			queue->tx_stats.throttled++;
	}

	dev_kfree_skb(skb);
	return ret;
}