static int ar9170_usb_flush(struct ar9170 *ar) { struct ar9170_usb *aru = (void *) ar; struct urb *urb; int ret, err = 0; if (IS_STARTED(ar)) aru->common.state = AR9170_IDLE; usb_wait_anchor_empty_timeout(&aru->tx_pending, msecs_to_jiffies(800)); while ((urb = usb_get_from_anchor(&aru->tx_pending))) { ar9170_tx_callback(&aru->common, (void *) urb->context); usb_free_urb(urb); } /* lets wait a while until the tx - queues are dried out */ ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, msecs_to_jiffies(100)); if (ret == 0) err = -ETIMEDOUT; usb_kill_anchored_urbs(&aru->tx_submitted); if (IS_ACCEPTING_CMD(ar)) aru->common.state = AR9170_STARTED; return err; }
static void ar9170_usb_tx_urb_complete_frame(struct urb *urb) { struct sk_buff *skb = urb->context; struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); if (unlikely(!aru)) { dev_kfree_skb_irq(skb); return ; } atomic_dec(&aru->tx_submitted_urbs); ar9170_tx_callback(&aru->common, skb); ar9170_usb_submit_urb(aru); }
static void ar9170_usb_submit_urb(struct ar9170_usb *aru) { struct urb *urb; unsigned long flags; int err; if (unlikely(!IS_STARTED(&aru->common))) return ; spin_lock_irqsave(&aru->tx_urb_lock, flags); if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) { spin_unlock_irqrestore(&aru->tx_urb_lock, flags); return ; } atomic_inc(&aru->tx_submitted_urbs); urb = usb_get_from_anchor(&aru->tx_pending); if (!urb) { atomic_dec(&aru->tx_submitted_urbs); spin_unlock_irqrestore(&aru->tx_urb_lock, flags); return ; } spin_unlock_irqrestore(&aru->tx_urb_lock, flags); aru->tx_pending_urbs--; usb_anchor_urb(urb, &aru->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { if (ar9170_nag_limiter(&aru->common)) dev_err(&aru->udev->dev, "submit_urb failed (%d).\n", err); usb_unanchor_urb(urb); atomic_dec(&aru->tx_submitted_urbs); ar9170_tx_callback(&aru->common, urb->context); } usb_free_urb(urb); }