Exemple #1
0
static int sn_poll_action_single(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->napi;
	int poll_cnt = 0;

	while (poll_cnt < budget) {
		struct sk_buff *skb;
		struct sn_rx_metadata rx_meta;
		int ret;

		skb = rx_queue->dev->ops->do_rx(rx_queue, &rx_meta);
		if (!skb)
			return poll_cnt;

		rx_queue->rx_stats.packets++;
		rx_queue->rx_stats.bytes += skb->len;

		ret = sn_process_rx_metadata(skb, &rx_meta);
		if (unlikely(ret)) {
			dev_kfree_skb(skb);
			continue;
		}

		skb_record_rx_queue(skb, rx_queue->queue_id);
		skb->protocol = eth_type_trans(skb, napi->dev);
		skb_mark_napi_id(skb, napi);

		netif_receive_skb(skb);

		poll_cnt++;
	}

	return poll_cnt;
}
Exemple #2
0
Fichier : rx.c Projet : 3bsa/linux
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
				     struct efx_rx_buffer *rx_buf,
				     unsigned int n_frags,
				     u8 *eh, int hdr_len)
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

	/* Allocate an SKB to store the headers */
	skb = netdev_alloc_skb(efx->net_dev,
			       efx->rx_ip_align + efx->rx_prefix_size +
			       hdr_len);
	if (unlikely(skb == NULL)) {
		atomic_inc(&efx->n_rx_noskb_drops);
		return NULL;
	}

	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);

	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
	       efx->rx_prefix_size + hdr_len);
	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
	__skb_put(skb, hdr_len);

	/* Append the remaining page(s) onto the frag list */
	if (rx_buf->len > hdr_len) {
		rx_buf->page_offset += hdr_len;
		rx_buf->len -= hdr_len;

		for (;;) {
			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
					   rx_buf->page, rx_buf->page_offset,
					   rx_buf->len);
			rx_buf->page = NULL;
			skb->len += rx_buf->len;
			skb->data_len += rx_buf->len;
			if (skb_shinfo(skb)->nr_frags == n_frags)
				break;

			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
		}
	} else {
		__free_pages(rx_buf->page, efx->rx_buffer_order);
		rx_buf->page = NULL;
		n_frags = 0;
	}

	skb->truesize += n_frags * efx->rx_buffer_truesize;

	/* Move past the ethernet header */
	skb->protocol = eth_type_trans(skb, efx->net_dev);

	skb_mark_napi_id(skb, &channel->napi_str);

	return skb;
}
Exemple #3
0
static int sn_poll_action_batch(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->napi;
	int poll_cnt = 0;

	while (poll_cnt < budget) {
		struct sk_buff *skbs[MAX_RX_BATCH];
		struct sn_rx_metadata rx_meta[MAX_RX_BATCH];

		int cnt;
		int i;

		cnt = rx_queue->dev->ops->do_rx_batch(rx_queue, rx_meta, skbs, 
				min(MAX_RX_BATCH, budget - poll_cnt));
		if (cnt == 0)
			break;

		rx_queue->rx_stats.packets += cnt;

		for (i = 0; i < cnt; i++) {
			struct sk_buff *skb = skbs[i];
			int ret;

			rx_queue->rx_stats.bytes += skb->len;

			ret = sn_process_rx_metadata(skb, &rx_meta[i]);
			if (unlikely(ret)) {
				dev_kfree_skb(skb);
				skbs[i] = NULL;
				continue;
			}

			skb_record_rx_queue(skb, rx_queue->queue_id);
			skb->protocol = eth_type_trans(skb, napi->dev);
			skb_mark_napi_id(skb, napi);
		}

		
		for (i = 0; i < cnt; i++) {
			if (skbs[i])
				netif_receive_skb(skbs[i]);
		}

		poll_cnt += cnt;

	}

	return poll_cnt;
}
Exemple #4
0
Fichier : rx.c Projet : 3bsa/linux
/* Pass a received packet up through GRO.  GRO can handle pages
 * regardless of checksum state and skbs with a good checksum.
 */
static void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
		  unsigned int n_frags, u8 *eh)
{
	struct napi_struct *napi = &channel->napi_str;
	gro_result_t gro_result;
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

	skb = napi_get_frags(napi);
	if (unlikely(!skb)) {
		struct efx_rx_queue *rx_queue;

		rx_queue = efx_channel_get_rx_queue(channel);
		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
		return;
	}

	if (efx->net_dev->features & NETIF_F_RXHASH)
		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
			     PKT_HASH_TYPE_L3);
	skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);

	for (;;) {
		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
				   rx_buf->page, rx_buf->page_offset,
				   rx_buf->len);
		rx_buf->page = NULL;
		skb->len += rx_buf->len;
		if (skb_shinfo(skb)->nr_frags == n_frags)
			break;

		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
	}

	skb->data_len = skb->len;
	skb->truesize += n_frags * efx->rx_buffer_truesize;

	skb_record_rx_queue(skb, channel->rx_queue.core_index);

	skb_mark_napi_id(skb, &channel->napi_str);
	gro_result = napi_gro_frags(napi);
	if (gro_result != GRO_DROP)
		channel->irq_mod_score += 2;
}
Exemple #5
0
static int sn_poll_action_batch(struct sn_queue *rx_queue, int budget)
{
	struct napi_struct *napi = &rx_queue->rx.napi;
	struct sn_device *dev = rx_queue->dev;

	int poll_cnt = 0;

	int *polling;

	polling = this_cpu_ptr(&in_batched_polling);
	*polling = 1;

	while (poll_cnt < budget) {
		struct sk_buff *skbs[MAX_BATCH];
		struct sn_rx_metadata rx_meta[MAX_BATCH];

		int cnt;
		int i;

		cnt = dev->ops->do_rx_batch(rx_queue, rx_meta, skbs,
				min(MAX_BATCH, budget - poll_cnt));
		if (cnt == 0)
			break;

		rx_queue->rx.stats.packets += cnt;
		poll_cnt += cnt;

		for (i = 0; i < cnt; i++) {
			struct sk_buff *skb = skbs[i];
			int ret;

			if (unlikely(!skb))
				continue;

			rx_queue->rx.stats.bytes += skb->len;

			ret = sn_process_rx_metadata(skb, &rx_meta[i]);
			if (ret == 0) {
				skb_record_rx_queue(skb, rx_queue->queue_id);
				skb->protocol = eth_type_trans(skb, napi->dev);
#ifdef CONFIG_NET_RX_BUSY_POLL
				skb_mark_napi_id(skb, napi);
#endif
			} else {
				dev_kfree_skb(skb);
				skbs[i] = NULL;
			}
		}

		if (!rx_queue->rx.opts.loopback) {
			for (i = 0; i < cnt; i++) {
				if (!skbs[i])
					continue;

				netif_receive_skb(skbs[i]);
			}
		} else
			sn_process_loopback(dev, skbs, cnt);
	}

	if (dev->ops->flush_tx)
		dev->ops->flush_tx();

	*polling = 0;

	return poll_cnt;
}