Beispiel #1
0
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
			   struct efx_rx_buffer *rx_buf,
			   unsigned int n_frags)
{
	struct sk_buff *skb;
	u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);

	skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
	if (unlikely(skb == NULL)) {
		efx_free_rx_buffer(rx_buf);
		return;
	}
	skb_record_rx_queue(skb, channel->rx_queue.core_index);

	/* Set the SKB flags */
	skb_checksum_none_assert(skb);
	if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
		skb->ip_summed = CHECKSUM_UNNECESSARY;

	if (channel->type->receive_skb)
		if (channel->type->receive_skb(channel, skb))
			return;

	/* Pass the packet up */
	netif_receive_skb(skb);
}
Beispiel #2
0
/* Handle a received packet.  Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
		     struct efx_rx_buffer *rx_buf, bool checksummed)
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;
	bool lro = !!(efx->net_dev->features & NETIF_F_LRO);

	/* If we're in loopback test, then pass the packet directly to the
	 * loopback layer, and free the rx_buf here
	 */
	if (unlikely(efx->loopback_selftest)) {
		efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
		efx_free_rx_buffer(efx, rx_buf);
		goto done;
	}

	if (rx_buf->skb) {
		prefetch(skb_shinfo(rx_buf->skb));

		skb_put(rx_buf->skb, rx_buf->len);

		/* Move past the ethernet header. rx_buf->data still points
		 * at the ethernet header */
		rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
						       efx->net_dev);
	}

	/* Both our generic-LRO and SFC-SSR support skb and page based
	 * allocation, but neither support switching from one to the
	 * other on the fly. If we spot that the allocation mode has
	 * changed, then flush the LRO state.
	 */
	if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
		efx_flush_lro(channel);
		channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
	}
	if (likely(checksummed && lro)) {
		efx_rx_packet_lro(channel, rx_buf);
		goto done;
	}

	/* Form an skb if required */
	if (rx_buf->page) {
		int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
		skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
		if (unlikely(skb == NULL)) {
			efx_free_rx_buffer(efx, rx_buf);
			goto done;
		}
	} else {
		/* We now own the SKB */
		skb = rx_buf->skb;
		rx_buf->skb = NULL;
	}

	EFX_BUG_ON_PARANOID(rx_buf->page);
	EFX_BUG_ON_PARANOID(rx_buf->skb);
	EFX_BUG_ON_PARANOID(!skb);

	/* Set the SKB flags */
	if (unlikely(!checksummed || !efx->rx_checksum_enabled))
		skb->ip_summed = CHECKSUM_NONE;

	/* Pass the packet up */
	netif_receive_skb(skb);

	/* Update allocation strategy method */
	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;

done:
	;
}