static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); int min_slots_needed; BUG_ON(skb->dev != dev); /* Drop the packet if vif is not ready */ if (vif->task == NULL || vif->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; /* At best we'll need one slot for the header and one for each * frag. */ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; /* If the skb is GSO then we'll also need an extra slot for the * metadata. */ if (skb_is_gso(skb)) min_slots_needed++; /* If the skb can't possibly fit in the remaining slots * then turn off the queue to give the ring a chance to * drain. */ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { vif->wake_queue.function = xenvif_wake_queue; vif->wake_queue.data = (unsigned long)vif; xenvif_stop_queue(vif); mod_timer(&vif->wake_queue, jiffies + rx_drain_timeout_jiffies); } skb_queue_tail(&vif->rx_queue, skb); xenvif_kick_thread(vif); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; u16 index; int min_slots_needed; BUG_ON(skb->dev != dev); /* Drop the packet if queues are not set up */ if (num_queues < 1) goto drop; /* Obtain the queue to be used to transmit this packet */ index = skb_get_queue_mapping(skb); if (index >= num_queues) { pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", index, vif->dev->name); index %= num_queues; } queue = &vif->queues[index]; /* Drop the packet if queue is not ready */ if (queue->task == NULL || queue->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; /* At best we'll need one slot for the header and one for each * frag. */ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; /* If the skb is GSO then we'll also need an extra slot for the * metadata. */ if (skb_is_gso(skb)) min_slots_needed++; /* If the skb can't possibly fit in the remaining slots * then turn off the queue to give the ring a chance to * drain. */ if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { queue->rx_stalled.function = xenvif_rx_stalled; queue->rx_stalled.data = (unsigned long)queue; xenvif_stop_queue(queue); mod_timer(&queue->rx_stalled, jiffies + rx_drain_timeout_jiffies); } skb_queue_tail(&queue->rx_queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static void xenvif_rx_action(struct xenvif *vif) { s8 status; u16 flags; struct xen_netif_rx_response *resp; struct sk_buff_head rxq; struct sk_buff *skb; LIST_HEAD(notify); int ret; unsigned long offset; bool need_to_notify = false; struct netrx_pending_operations npo = { .copy = vif->grant_copy_op, .meta = vif->meta, }; skb_queue_head_init(&rxq); while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { RING_IDX max_slots_needed; RING_IDX old_req_cons; RING_IDX ring_slots_used; int i; /* We need a cheap worse case estimate for the number of * slots we'll use. */ max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) + skb_headlen(skb), PAGE_SIZE); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { unsigned int size; unsigned int offset; size = skb_frag_size(&skb_shinfo(skb)->frags[i]); offset = skb_shinfo(skb)->frags[i].page_offset; /* For a worse-case estimate we need to factor in * the fragment page offset as this will affect the * number of times xenvif_gop_frag_copy() will * call start_new_rx_buffer(). */ max_slots_needed += DIV_ROUND_UP(offset + size, PAGE_SIZE); } /* To avoid the estimate becoming too pessimal for some * frontends that limit posted rx requests, cap the estimate * at MAX_SKB_FRAGS. */ if (max_slots_needed > MAX_SKB_FRAGS) max_slots_needed = MAX_SKB_FRAGS; /* We may need one more slot for GSO metadata */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) max_slots_needed++; /* If the skb may not fit then bail out now */ if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { skb_queue_head(&vif->rx_queue, skb); need_to_notify = true; vif->rx_last_skb_slots = max_slots_needed; break; } else vif->rx_last_skb_slots = 0; old_req_cons = vif->rx.req_cons; XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo); ring_slots_used = vif->rx.req_cons - old_req_cons; BUG_ON(ring_slots_used > max_slots_needed); __skb_queue_tail(&rxq, skb); } BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); if (!npo.copy_prod) goto done; BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { if ((1 << vif->meta[npo.meta_cons].gso_type) & vif->gso_prefix_mask) { resp = RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; resp->offset = vif->meta[npo.meta_cons].gso_size; resp->id = vif->meta[npo.meta_cons].id; resp->status = XENVIF_RX_CB(skb)->meta_slots_used; npo.meta_cons++; XENVIF_RX_CB(skb)->meta_slots_used--; } vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; status = xenvif_check_gop(vif, XENVIF_RX_CB(skb)->meta_slots_used, &npo); if (XENVIF_RX_CB(skb)->meta_slots_used == 1) flags = 0; else flags = XEN_NETRXF_more_data; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ flags |= XEN_NETRXF_data_validated; offset = 0; resp = make_rx_response(vif, vif->meta[npo.meta_cons].id, status, offset, vif->meta[npo.meta_cons].size, flags); if ((1 << vif->meta[npo.meta_cons].gso_type) & vif->gso_mask) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } xenvif_add_frag_responses(vif, status, vif->meta + npo.meta_cons + 1, XENVIF_RX_CB(skb)->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); need_to_notify |= !!ret; npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; dev_kfree_skb(skb); } done: if (need_to_notify) notify_remote_via_irq(vif->rx_irq); } void xenvif_check_rx_xenvif(struct xenvif *vif) { int more_to_do; RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (more_to_do) napi_schedule(&vif->napi); }