/* Pass a received packet up through the generic LRO stack * * Handles driverlink veto, and passes the fragment up via * the appropriate LRO method */ static void efx_rx_packet_lro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) { struct net_lro_mgr *lro_mgr = &channel->lro_mgr; void *priv = channel; /* Pass the skb/page into the LRO engine */ if (rx_buf->page) { struct skb_frag_struct frags; frags.page = rx_buf->page; frags.page_offset = efx_rx_buf_offset(rx_buf); frags.size = rx_buf->len; lro_receive_frags(lro_mgr, &frags, rx_buf->len, rx_buf->len, priv, 0); EFX_BUG_ON_PARANOID(rx_buf->skb); rx_buf->page = NULL; } else { EFX_BUG_ON_PARANOID(!rx_buf->skb); lro_receive_skb(lro_mgr, rx_buf->skb, priv); rx_buf->skb = NULL; } }
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) { if (buf->flags & EFX_RX_BUF_PAGE) return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); else return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; }
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) { if (buf->is_page) return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); else return ((u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size); }
/* Pass a received packet up through the generic LRO stack * * Handles driverlink veto, and passes the fragment up via * the appropriate LRO method */ static void efx_rx_packet_lro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, bool checksummed) { struct napi_struct *napi = &channel->napi_str; gro_result_t gro_result; /* Pass the skb/page into the LRO engine */ if (rx_buf->page) { struct page *page = rx_buf->page; struct sk_buff *skb; EFX_BUG_ON_PARANOID(rx_buf->skb); rx_buf->page = NULL; skb = napi_get_frags(napi); if (!skb) { put_page(page); return; } skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->frags[0].page_offset = efx_rx_buf_offset(rx_buf); skb_shinfo(skb)->frags[0].size = rx_buf->len; skb_shinfo(skb)->nr_frags = 1; skb->len = rx_buf->len; skb->data_len = rx_buf->len; skb->truesize += rx_buf->len; skb->ip_summed = checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; skb_record_rx_queue(skb, channel->channel); gro_result = napi_gro_frags(napi); } else { struct sk_buff *skb = rx_buf->skb; EFX_BUG_ON_PARANOID(!skb); EFX_BUG_ON_PARANOID(!checksummed); rx_buf->skb = NULL; gro_result = napi_gro_receive(napi, skb); } if (gro_result == GRO_NORMAL) { channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; } else if (gro_result != GRO_DROP) { channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; channel->irq_mod_score += 2; } }
/* Allocate and construct an SKB around a struct page.*/ static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, struct efx_nic *efx, int hdr_len) { struct sk_buff *skb; /* Allocate an SKB to store the headers */ skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); if (unlikely(skb == NULL)) { EFX_ERR_RL(efx, "RX out of memory for skb\n"); return NULL; } EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags); EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); skb->ip_summed = CHECKSUM_UNNECESSARY; skb_reserve(skb, EFX_PAGE_SKB_ALIGN); skb->len = rx_buf->len; skb->truesize = rx_buf->len + sizeof(struct sk_buff); memcpy(skb->data, rx_buf->data, hdr_len); skb->tail += hdr_len; /* Append the remaining page onto the frag list */ if (unlikely(rx_buf->len > hdr_len)) { struct skb_frag_struct *frag = skb_shinfo(skb)->frags; frag->page = rx_buf->page; frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; frag->size = skb->len - hdr_len; skb_shinfo(skb)->nr_frags = 1; skb->data_len = frag->size; } else { __free_pages(rx_buf->page, efx->rx_buffer_order); skb->data_len = 0; } /* Ownership has transferred from the rx_buf to skb */ rx_buf->page = NULL; /* Move past the ethernet header */ skb->protocol = eth_type_trans(skb, efx->net_dev); return skb; }
/* Pass a received packet up through the generic LRO stack * * Handles driverlink veto, and passes the fragment up via * the appropriate LRO method */ static void efx_rx_packet_lro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) { struct napi_struct *napi = &channel->napi_str; /* Pass the skb/page into the LRO engine */ if (rx_buf->page) { struct sk_buff *skb = napi_get_frags(napi); if (!skb) { put_page(rx_buf->page); goto out; } skb_shinfo(skb)->frags[0].page = rx_buf->page; skb_shinfo(skb)->frags[0].page_offset = efx_rx_buf_offset(rx_buf); skb_shinfo(skb)->frags[0].size = rx_buf->len; skb_shinfo(skb)->nr_frags = 1; skb->len = rx_buf->len; skb->data_len = rx_buf->len; skb->truesize += rx_buf->len; skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_frags(napi); out: EFX_BUG_ON_PARANOID(rx_buf->skb); rx_buf->page = NULL; } else { EFX_BUG_ON_PARANOID(!rx_buf->skb); napi_gro_receive(napi, rx_buf->skb); rx_buf->skb = NULL; } }
/** * efx_init_rx_buffer_page - create new RX buffer using page-based allocation * * @rx_queue: Efx RX queue * @rx_buf: RX buffer structure to populate * * This allocates memory for a new receive buffer, maps it for DMA, * and populates a struct efx_rx_buffer with the relevant * information. Return a negative error code or 0 on success. */ static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { struct efx_nic *efx = rx_queue->efx; int bytes, space, offset; bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; /* If there is space left in the previously allocated page, * then use it. Otherwise allocate a new one */ rx_buf->page = rx_queue->buf_page; if (rx_buf->page == NULL) { dma_addr_t dma_addr; rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, efx->rx_buffer_order); if (unlikely(rx_buf->page == NULL)) return -ENOMEM; dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 0, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { __free_pages(rx_buf->page, efx->rx_buffer_order); rx_buf->page = NULL; return -EIO; } rx_queue->buf_page = rx_buf->page; rx_queue->buf_dma_addr = dma_addr; rx_queue->buf_data = (page_address(rx_buf->page) + EFX_PAGE_IP_ALIGN); } rx_buf->len = bytes; rx_buf->data = rx_queue->buf_data; offset = efx_rx_buf_offset(rx_buf); rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; /* Try to pack multiple buffers per page */ if (efx->rx_buffer_order == 0) { /* The next buffer starts on the next 512 byte boundary */ rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); offset += ((bytes + 0x1ff) & ~0x1ff); space = efx_rx_buf_size(efx) - offset; if (space >= bytes) { /* Refs dropped on kernel releasing each skb */ get_page(rx_queue->buf_page); goto out; } } /* This is the final RX buffer for this page, so mark it for * unmapping */ rx_queue->buf_page = NULL; rx_buf->unmap_addr = rx_queue->buf_dma_addr; out: return 0; }