/* Consolidate skb with a frag_list into a brand new one with local pages on * frags. Returns 0 or -ENOMEM if can't allocate new pages. */ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb) { unsigned int offset = skb_headlen(skb); skb_frag_t frags[MAX_SKB_FRAGS]; int i; struct ubuf_info *uarg; struct sk_buff *nskb = skb_shinfo(skb)->frag_list; vif->tx_zerocopy_sent += 2; vif->tx_frag_overflow++; xenvif_fill_frags(vif, nskb); /* Subtract frags size, we will correct it later */ skb->truesize -= skb->data_len; skb->len += nskb->len; skb->data_len += nskb->len; /* create a brand new frags array and coalesce there */ for (i = 0; offset < skb->len; i++) { struct page *page; unsigned int len; BUG_ON(i >= MAX_SKB_FRAGS); page = alloc_page(GFP_ATOMIC|__GFP_COLD); if (!page) { int j; skb->truesize += skb->data_len; for (j = 0; j < i; j++) put_page(frags[j].page.p); return -ENOMEM; } if (offset + PAGE_SIZE < skb->len) len = PAGE_SIZE; else len = skb->len - offset; if (skb_copy_bits(skb, offset, page_address(page), len)) BUG(); offset += len; frags[i].page.p = page; frags[i].page_offset = 0; skb_frag_size_set(&frags[i], len); } /* swap out with old one */ memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); skb_shinfo(skb)->nr_frags = i; skb->truesize += i * PAGE_SIZE; /* remove traces of mapped pages and frag_list */ skb_frag_list_init(skb); uarg = skb_shinfo(skb)->destructor_arg; uarg->callback(uarg, true); skb_shinfo(skb)->destructor_arg = NULL; skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; kfree_skb(nskb); return 0; }
static int xenvif_tx_submit(struct xenvif *vif) { struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops; struct gnttab_copy *gop_copy = vif->tx_copy_ops; struct sk_buff *skb; int work_done = 0; while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; pending_idx = XENVIF_TX_CB(skb)->pending_idx; txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) { skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); continue; } data_len = skb->len; callback_param(vif, pending_idx).ctx = NULL; if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; xenvif_fill_frags(vif, skb); if (unlikely(skb_has_frag_list(skb))) { if (xenvif_handle_frag_list(vif, skb)) { if (net_ratelimit()) netdev_err(vif->dev, "Not enough memory to consolidate frag_list!\n"); skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; kfree_skb(skb); continue; } } if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } skb->dev = vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); if (checksum_setup(vif, skb)) { netdev_dbg(vif->dev, "Can't setup checksum in net_tx_action\n"); /* We have to set this flag to trigger the callback */ if (skb_shinfo(skb)->destructor_arg) skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; kfree_skb(skb); continue; } skb_probe_transport_header(skb, 0); /* If the packet is GSO then we will have just set up the * transport header offset in checksum_setup so it's now * straightforward to calculate gso_segs. */ if (skb_is_gso(skb)) { int mss = skb_shinfo(skb)->gso_size; int hdrlen = skb_transport_header(skb) - skb_mac_header(skb) + tcp_hdrlen(skb); skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hdrlen, mss); } vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; work_done++; /* Set this flag right before netif_receive_skb, otherwise * someone might think this packet already left netback, and * do a skb_copy_ubufs while we are still in control of the * skb. E.g. the __pskb_pull_tail earlier can do such thing. */ if (skb_shinfo(skb)->destructor_arg) { skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; vif->tx_zerocopy_sent++; } netif_receive_skb(skb); }
static int xenvif_tx_submit(struct xenvif *vif) { struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; int work_done = 0; while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; pending_idx = *((u16 *)skb->data); txp = &vif->pending_tx_info[pending_idx].req; /* Check the remap error code. */ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); continue; } data_len = skb->len; memcpy(skb->data, (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset), data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; xenvif_fill_frags(vif, skb); if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } skb->dev = vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); if (checksum_setup(vif, skb)) { netdev_dbg(vif->dev, "Can't setup checksum in net_tx_action\n"); kfree_skb(skb); continue; } skb_probe_transport_header(skb, 0); /* If the packet is GSO then we will have just set up the * transport header offset in checksum_setup so it's now * straightforward to calculate gso_segs. */ if (skb_is_gso(skb)) { int mss = skb_shinfo(skb)->gso_size; int hdrlen = skb_transport_header(skb) - skb_mac_header(skb) + tcp_hdrlen(skb); skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hdrlen, mss); } vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; work_done++; netif_receive_skb(skb); }