static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); BUG_ON(skb->dev != dev); if (vif->netbk == NULL) goto drop; /* Drop the packet if the target domain has no receive buffers. */ if (!xenvif_rx_schedulable(vif)) goto drop; /* Reserve ring slots for the worst-case number of fragments. */ vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); xenvif_get(vif); if (vif->can_queue && xen_netbk_must_stop_queue(vif)) netif_stop_queue(dev); xen_netbk_queue_tx_skb(vif, skb); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif *vif = dev_id; if (xenvif_rx_schedulable(vif)) netif_wake_queue(vif->dev); return IRQ_HANDLED; }
static irqreturn_t xenvif_interrupt(int irq, void *dev_id) { struct xenvif *vif = dev_id; if (vif->netbk == NULL) return IRQ_NONE; xen_netbk_schedule_xenvif(vif); if (xenvif_rx_schedulable(vif)) netif_wake_queue(vif->dev); return IRQ_HANDLED; }
void xenvif_notify_tx_completion(struct xenvif *vif) { if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) netif_wake_queue(vif->dev); }