static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues; u16 index; struct xenvif_rx_cb *cb; BUG_ON(skb->dev != dev); /* Drop the packet if queues are not set up. * This handler should be called inside an RCU read section * so we don't need to enter it here explicitly. */ num_queues = READ_ONCE(vif->num_queues); if (num_queues < 1) goto drop; /* Obtain the queue to be used to transmit this packet */ index = skb_get_queue_mapping(skb); if (index >= num_queues) { pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", index, vif->dev->name); index %= num_queues; } queue = &vif->queues[index]; /* Drop the packet if queue is not ready */ if (queue->task == NULL || queue->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { struct ethhdr *eth = (struct ethhdr *)skb->data; if (!xenvif_mcast_match(vif, eth->h_dest)) goto drop; } cb = XENVIF_RX_CB(skb); cb->expires = jiffies + vif->drain_timeout; /* If there is no hash algorithm configured then make sure there * is no hash information in the socket buffer otherwise it * would be incorrectly forwarded to the frontend. */ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); xenvif_rx_queue_tail(queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; u16 index; struct xenvif_rx_cb *cb; BUG_ON(skb->dev != dev); /* Drop the packet if queues are not set up */ if (num_queues < 1) goto drop; /* Obtain the queue to be used to transmit this packet */ index = skb_get_queue_mapping(skb); if (index >= num_queues) { pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", index, vif->dev->name); index %= num_queues; } queue = &vif->queues[index]; /* Drop the packet if queue is not ready */ if (queue->task == NULL || queue->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { struct ethhdr *eth = (struct ethhdr *)skb->data; if (!xenvif_mcast_match(vif, eth->h_dest)) goto drop; } cb = XENVIF_RX_CB(skb); cb->expires = jiffies + vif->drain_timeout; xenvif_rx_queue_tail(queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); int min_slots_needed; BUG_ON(skb->dev != dev); /* Drop the packet if vif is not ready */ if (vif->task == NULL || vif->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; /* At best we'll need one slot for the header and one for each * frag. */ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; /* If the skb is GSO then we'll also need an extra slot for the * metadata. */ if (skb_is_gso(skb)) min_slots_needed++; /* If the skb can't possibly fit in the remaining slots * then turn off the queue to give the ring a chance to * drain. */ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) { vif->wake_queue.function = xenvif_wake_queue; vif->wake_queue.data = (unsigned long)vif; xenvif_stop_queue(vif); mod_timer(&vif->wake_queue, jiffies + rx_drain_timeout_jiffies); } skb_queue_tail(&vif->rx_queue, skb); xenvif_kick_thread(vif); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; u16 index; int min_slots_needed; BUG_ON(skb->dev != dev); /* Drop the packet if queues are not set up */ if (num_queues < 1) goto drop; /* Obtain the queue to be used to transmit this packet */ index = skb_get_queue_mapping(skb); if (index >= num_queues) { pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", index, vif->dev->name); index %= num_queues; } queue = &vif->queues[index]; /* Drop the packet if queue is not ready */ if (queue->task == NULL || queue->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; /* At best we'll need one slot for the header and one for each * frag. */ min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; /* If the skb is GSO then we'll also need an extra slot for the * metadata. */ if (skb_is_gso(skb)) min_slots_needed++; /* If the skb can't possibly fit in the remaining slots * then turn off the queue to give the ring a chance to * drain. */ if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { queue->rx_stalled.function = xenvif_rx_stalled; queue->rx_stalled.data = (unsigned long)queue; xenvif_stop_queue(queue); mod_timer(&queue->rx_stalled, jiffies + rx_drain_timeout_jiffies); } skb_queue_tail(&queue->rx_queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int xenvif_rx_schedulable(struct xenvif *vif) { return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); }