Esempio n. 1
0
static int sn_poll_action_single(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->napi;
	int poll_cnt = 0;

	while (poll_cnt < budget) {
		struct sk_buff *skb;
		struct sn_rx_metadata rx_meta;
		int ret;

		skb = rx_queue->dev->ops->do_rx(rx_queue, &rx_meta);
		if (!skb)
			return poll_cnt;

		rx_queue->rx_stats.packets++;
		rx_queue->rx_stats.bytes += skb->len;

		ret = sn_process_rx_metadata(skb, &rx_meta);
		if (unlikely(ret)) {
			dev_kfree_skb(skb);
			continue;
		}

		skb_record_rx_queue(skb, rx_queue->queue_id);
		skb->protocol = eth_type_trans(skb, napi->dev);
		skb_mark_napi_id(skb, napi);

		netif_receive_skb(skb);

		poll_cnt++;
	}

	return poll_cnt;
}
Esempio n. 2
0
static int sn_poll_action_batch(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->napi;
	int poll_cnt = 0;

	while (poll_cnt < budget) {
		struct sk_buff *skbs[MAX_RX_BATCH];
		struct sn_rx_metadata rx_meta[MAX_RX_BATCH];

		int cnt;
		int i;

		cnt = rx_queue->dev->ops->do_rx_batch(rx_queue, rx_meta, skbs, 
				min(MAX_RX_BATCH, budget - poll_cnt));
		if (cnt == 0)
			break;

		rx_queue->rx_stats.packets += cnt;

		for (i = 0; i < cnt; i++) {
			struct sk_buff *skb = skbs[i];
			int ret;

			rx_queue->rx_stats.bytes += skb->len;

			ret = sn_process_rx_metadata(skb, &rx_meta[i]);
			if (unlikely(ret)) {
				dev_kfree_skb(skb);
				skbs[i] = NULL;
				continue;
			}

			skb_record_rx_queue(skb, rx_queue->queue_id);
			skb->protocol = eth_type_trans(skb, napi->dev);
			skb_mark_napi_id(skb, napi);
		}

		
		for (i = 0; i < cnt; i++) {
			if (skbs[i])
				netif_receive_skb(skbs[i]);
		}

		poll_cnt += cnt;

	}

	return poll_cnt;
}
Esempio n. 3
0
static int sn_poll_action_batch(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->rx.napi;
	struct sn_device *dev = rx_queue->dev;

	int poll_cnt = 0;

	int *polling;

	polling = this_cpu_ptr(&in_batched_polling);
	*polling = 1;

	while (poll_cnt < budget) {
		struct sk_buff *skbs[MAX_BATCH];
		struct sn_rx_metadata rx_meta[MAX_BATCH];

		int cnt;
		int i;

		cnt = dev->ops->do_rx_batch(rx_queue, rx_meta, skbs,
				min(MAX_BATCH, budget - poll_cnt));
		if (cnt == 0)
			break;

		rx_queue->rx.stats.packets += cnt;
		poll_cnt += cnt;

		for (i = 0; i < cnt; i++) {
			struct sk_buff *skb = skbs[i];
			int ret;

			if (unlikely(!skb))
				continue;

			rx_queue->rx.stats.bytes += skb->len;

			ret = sn_process_rx_metadata(skb, &rx_meta[i]);
			if (ret == 0) {
				skb_record_rx_queue(skb, rx_queue->queue_id);
				skb->protocol = eth_type_trans(skb, napi->dev);
#ifdef CONFIG_NET_RX_BUSY_POLL
				skb_mark_napi_id(skb, napi);
#endif
			} else {
				dev_kfree_skb(skb);
				skbs[i] = NULL;
			}
		}

		if (!rx_queue->rx.opts.loopback) {
			for (i = 0; i < cnt; i++) {
				if (!skbs[i])
					continue;

				netif_receive_skb(skbs[i]);
			}
		} else
			sn_process_loopback(dev, skbs, cnt);
	}

	if (dev->ops->flush_tx)
		dev->ops->flush_tx();

	*polling = 0;

	return poll_cnt;
}