/* Handle a received packet. Second half: Touches packet payload. */ void __efx_rx_packet(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); u8 *eh = efx_rx_buf_va(rx_buf); /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { efx_loopback_rx_packet(efx, eh, rx_buf->len); efx_free_rx_buffer(rx_buf); goto out; } if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); out: channel->rx_pkt_n_frags = 0; }
/* Handle a received packet. Second half: Touches packet payload. */ void __efx_rx_packet(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); u8 *eh = efx_rx_buf_va(rx_buf); /* Read length from the prefix if necessary. This already * excludes the length of the prefix itself. */ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) rx_buf->len = le16_to_cpup((__le16 *) (eh + efx->rx_packet_len_offset)); /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { struct efx_rx_queue *rx_queue; efx_loopback_rx_packet(efx, eh, rx_buf->len); rx_queue = efx_channel_get_rx_queue(channel); efx_free_rx_buffers(rx_queue, rx_buf, channel->rx_pkt_n_frags); goto out; } if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && !efx_channel_busy_polling(channel)) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); out: channel->rx_pkt_n_frags = 0; }
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, unsigned int n_frags, unsigned int len, u16 flags) { struct efx_nic *efx = rx_queue->efx; struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_rx_buffer *rx_buf; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->flags |= flags; /* Validate the number of fragments and completed length */ if (n_frags == 1) { efx_rx_packet__check_len(rx_queue, rx_buf, len); } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || unlikely(!efx->rx_scatter)) { /* If this isn't an explicit discard request, either * the hardware or the driver is broken. */ WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); rx_buf->flags |= EFX_RX_PKT_DISCARD; } netif_vdbg(efx, rx_status, efx->net_dev, "RX queue %d received ids %x-%x len %d %s%s\n", efx_rx_queue_index(rx_queue), index, (index + n_frags - 1) & rx_queue->ptr_mask, len, (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); /* Discard packet, if instructed to do so. Process the * previous receive first. */ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { efx_rx_flush_packet(channel); efx_discard_rx_packet(channel, rx_buf, n_frags); return; } if (n_frags == 1) rx_buf->len = len; /* Release and/or sync the DMA mapping - assumes all RX buffers * consumed in-order per RX queue. */ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. */ prefetch(efx_rx_buf_va(rx_buf)); rx_buf->page_offset += efx->type->rx_buffer_hash_size; rx_buf->len -= efx->type->rx_buffer_hash_size; if (n_frags > 1) { /* Release/sync DMA mapping for additional fragments. * Fix length for last fragment. */ unsigned int tail_frags = n_frags - 1; for (;;) { rx_buf = efx_rx_buf_next(rx_queue, rx_buf); if (--tail_frags == 0) break; efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); } rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); } /* All fragments have been DMA-synced, so recycle pages. */ rx_buf = efx_rx_buffer(rx_queue, index); efx_recycle_rx_pages(channel, rx_buf, n_frags); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. */ efx_rx_flush_packet(channel); channel->rx_pkt_n_frags = n_frags; channel->rx_pkt_index = index; }