static void netbk_add_frag_responses(struct xenvif *vif, int status, struct netbk_rx_meta *meta, int nr_meta_slots) { int i; unsigned long offset; /* No fragments used */ if (nr_meta_slots <= 1) return; nr_meta_slots--; for (i = 0; i < nr_meta_slots; i++) { int flags; if (i == nr_meta_slots - 1) flags = 0; else flags = XEN_NETRXF_more_data; offset = 0; make_rx_response(vif, meta[i].id, status, offset, meta[i].size, flags); } }
static void netbk_add_frag_responses(netif_t *netif, int status, struct netbk_rx_meta *meta, int nr_frags) { int i; unsigned long offset; for (i = 0; i < nr_frags; i++) { int id = meta[i].id; int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data; if (meta[i].copy) offset = 0; else offset = meta[i].frag.page_offset; make_rx_response(netif, id, status, offset, meta[i].frag.size, flags); } }
static void xenvif_rx_action(struct xenvif *vif) { s8 status; u16 flags; struct xen_netif_rx_response *resp; struct sk_buff_head rxq; struct sk_buff *skb; LIST_HEAD(notify); int ret; unsigned long offset; bool need_to_notify = false; struct netrx_pending_operations npo = { .copy = vif->grant_copy_op, .meta = vif->meta, }; skb_queue_head_init(&rxq); while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { RING_IDX max_slots_needed; RING_IDX old_req_cons; RING_IDX ring_slots_used; int i; /* We need a cheap worse case estimate for the number of * slots we'll use. */ max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + skb_headlen(skb), PAGE_SIZE); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { unsigned int size; unsigned int offset; size = skb_frag_size(&skb_shinfo(skb)->frags[i]); offset = skb_shinfo(skb)->frags[i].page_offset; /* For a worse-case estimate we need to factor in * the fragment page offset as this will affect the * number of times xenvif_gop_frag_copy() will * call start_new_rx_buffer(). */ max_slots_needed += DIV_ROUND_UP(offset + size, PAGE_SIZE); } /* To avoid the estimate becoming too pessimal for some * frontends that limit posted rx requests, cap the estimate * at MAX_SKB_FRAGS. */ if (max_slots_needed > MAX_SKB_FRAGS) max_slots_needed = MAX_SKB_FRAGS; /* We may need one more slot for GSO metadata */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) max_slots_needed++; /* If the skb may not fit then bail out now */ if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { skb_queue_head(&vif->rx_queue, skb); need_to_notify = true; vif->rx_last_skb_slots = max_slots_needed; break; } else vif->rx_last_skb_slots = 0; old_req_cons = vif->rx.req_cons; XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); ring_slots_used = vif->rx.req_cons - old_req_cons; BUG_ON(ring_slots_used > max_slots_needed); __skb_queue_tail(&rxq, skb); } BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); if (!npo.copy_prod) goto done; BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { if ((1 << vif->meta[npo.meta_cons].gso_type) & vif->gso_prefix_mask) { resp = RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; resp->offset = vif->meta[npo.meta_cons].gso_size; resp->id = vif->meta[npo.meta_cons].id; resp->status = XENVIF_RX_CB(skb)->meta_slots_used; npo.meta_cons++; XENVIF_RX_CB(skb)->meta_slots_used--; } vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; status = xenvif_check_gop(vif, XENVIF_RX_CB(skb)->meta_slots_used, &npo); if (XENVIF_RX_CB(skb)->meta_slots_used == 1) flags = 0; else flags = XEN_NETRXF_more_data; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ flags |= XEN_NETRXF_data_validated; offset = 0; resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, status, offset, vif->meta[npo.meta_cons].size, flags); if ((1 << vif->meta[npo.meta_cons].gso_type) & vif->gso_mask) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } xenvif_add_frag_responses(vif, status, vif->meta + npo.meta_cons + 1, XENVIF_RX_CB(skb)->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); need_to_notify |= !!ret; npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; dev_kfree_skb(skb); } done: if (need_to_notify) notify_remote_via_irq(vif->rx_irq); } void xenvif_check_rx_xenvif(struct xenvif *vif) { int more_to_do; RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (more_to_do) napi_schedule(&vif->napi); }