Example #1
0
void __wa_destroy(struct wahc *wa)
{
	if (wa->dti_urb) {
		usb_kill_urb(wa->dti_urb);
		usb_put_urb(wa->dti_urb);
		usb_kill_urb(wa->buf_in_urb);
		usb_put_urb(wa->buf_in_urb);
	}
	kfree(wa->xfer_result);
	wa_nep_destroy(wa);
	wa_rpipes_destroy(wa);
}
Example #2
0
static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
	unsigned long		flags;
	struct sk_buff		*skb;
	int			count = 0;

	spin_lock_irqsave (&q->lock, flags);
	while (!skb_queue_empty(q)) {
		struct skb_data		*entry;
		struct urb		*urb;
		int			retval;

		skb_queue_walk(q, skb) {
			entry = (struct skb_data *) skb->cb;
			if (entry->state != unlink_start)
				goto found;
		}
		break;
found:
		entry->state = unlink_start;
		urb = entry->urb;

		usb_get_urb(urb);
		spin_unlock_irqrestore(&q->lock, flags);
		
		
		retval = usb_unlink_urb (urb);
		if (retval != -EINPROGRESS && retval != 0)
			netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
		else
			count++;
		usb_put_urb(urb);
		spin_lock_irqsave(&q->lock, flags);
	}
Example #3
0
static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
	unsigned long		flags;
	struct sk_buff		*skb, *skbnext;
	int			count = 0;

	spin_lock_irqsave (&q->lock, flags);
	skb_queue_walk_safe(q, skb, skbnext) {
		struct skb_data		*entry;
		struct urb		*urb;
		int			retval;

		entry = (struct skb_data *) skb->cb;
		urb = entry->urb;

		/*
		 * Get reference count of the URB to avoid it to be
		 * freed during usb_unlink_urb, which may trigger
		 * use-after-free problem inside usb_unlink_urb since
		 * usb_unlink_urb is always racing with .complete
		 * handler(include defer_bh).
		 */
		usb_get_urb(urb);
		// during some PM-driven resume scenarios,
		// these (async) unlinks complete immediately
		retval = usb_unlink_urb (urb);
		if (retval != -EINPROGRESS && retval != 0)
			netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
		else
			count++;
		usb_put_urb(urb);
	}
	spin_unlock_irqrestore (&q->lock, flags);
	return count;
}
Example #4
0
static int acm_write_start(struct acm *acm, int wbn)
{
	unsigned long flags;
	struct acm_wb *wb = &acm->wb[wbn];
	int rc;
#ifdef CONFIG_PM
	struct urb *res;
#endif

	spin_lock_irqsave(&acm->write_lock, flags);
	if (!acm->dev) {
		wb->use = 0;
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return -ENODEV;
	}

	dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__,
							acm->susp_count);
	usb_autopm_get_interface_async(acm->control);
	if (acm->susp_count) {
#ifdef CONFIG_PM
		acm->transmitting++;
		wb->urb->transfer_buffer = wb->buf;
		wb->urb->transfer_dma = wb->dmah;
		wb->urb->transfer_buffer_length = wb->len;
		wb->urb->dev = acm->dev;
		usb_anchor_urb(wb->urb, &acm->deferred);
#else
		if (!acm->delayed_wb)
			acm->delayed_wb = wb;
		else {
			usb_autopm_put_interface_async(acm->control);
			printk(KERN_INFO "%s: acm->delayed_wb is not NULL, "
				"returning -EAGAIN\n", __func__);
			spin_unlock_irqrestore(&acm->write_lock, flags);
			return -EAGAIN;
		}
#endif
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;	/* A white lie */
	}
	usb_mark_last_busy(acm->dev);
#ifdef CONFIG_PM
	while ((res = usb_get_from_anchor(&acm->deferred))) {
		/* decrement ref count*/
		usb_put_urb(res);
		rc = usb_submit_urb(res, GFP_ATOMIC);
		if (rc < 0) {
			dbg("usb_submit_urb(pending request) failed: %d", rc);
			usb_unanchor_urb(res);
			acm_write_done(acm, res->context);
		}
	}
#endif
	rc = acm_start_wb(acm, wb);
	spin_unlock_irqrestore(&acm->write_lock, flags);

	return rc;

}
Example #5
0
/**
 * usb_unanchor_urb - unanchors an URB
 * @urb: pointer to the urb to anchor
 *
 * Call this to stop the system keeping track of this URB
 */
void usb_unanchor_urb(struct urb *urb)
{
	unsigned long flags;
	struct usb_anchor *anchor;

	if (!urb)
		return;

	anchor = urb->anchor;
	if (!anchor)
		return;

	spin_lock_irqsave(&anchor->lock, flags);
	if (unlikely(anchor != urb->anchor)) {
		/* we've lost the race to another thread */
		spin_unlock_irqrestore(&anchor->lock, flags);
		return;
	}
	urb->anchor = NULL;
	list_del(&urb->anchor_list);
	spin_unlock_irqrestore(&anchor->lock, flags);
	usb_put_urb(urb);
	if (list_empty(&anchor->urb_list))
		wake_up(&anchor->wait);
}
Example #6
0
static void acm_disconnect(struct usb_interface *intf)
{
	struct acm *acm = usb_get_intfdata(intf);
	struct usb_device *usb_dev = interface_to_usbdev(intf);
	struct tty_struct *tty;
	struct urb *res;
	u32 project_info = tegra3_get_project_id();

	/* sibling interface is already cleaning up */
	if (!acm)
		return;

	mutex_lock(&open_mutex);
	if (acm->country_codes) {
		device_remove_file(&acm->control->dev,
				&dev_attr_wCountryCodes);
		device_remove_file(&acm->control->dev,
				&dev_attr_iCountryCodeRelDate);
	}
	device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
	acm->dev = NULL;
	usb_set_intfdata(acm->control, NULL);
	usb_set_intfdata(acm->data, NULL);

	stop_data_traffic(acm);

	/* decrement ref count of anchored urbs */
	while ((res = usb_get_from_anchor(&acm->deferred)))
		usb_put_urb(res);

	acm_write_buffers_free(acm);
	usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
			  acm->ctrl_dma);
	acm_read_buffers_free(acm);

	if (!acm->combined_interfaces)
		usb_driver_release_interface(&acm_driver, intf == acm->control ?
					acm->data : acm->control);

	if (acm->port.count == 0) {
		acm_tty_unregister(acm);
		mutex_unlock(&open_mutex);
		return;
	}

	mutex_unlock(&open_mutex);
	tty = tty_port_tty_get(&acm->port);
	if (tty) {
		tty_hangup(tty);
		tty_kref_put(tty);
	}

	if (project_info == TEGRA3_PROJECT_TF201) {
		if(gps_dongle_flag == true) {
			dev_info(&usb_dev->dev, "ublox - GPS Receiver Dongle unplug.\n");
			gps_dongle_flag = false;
		}
	}
}
/* Callers must hold anchor->lock */
static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
	urb->anchor = NULL;
	list_del(&urb->anchor_list);
	usb_put_urb(urb);
	if (usb_anchor_check_wakeup(anchor))
		wake_up(&anchor->wait);
}
Example #8
0
/* Callers must hold anchor->lock */
static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
	urb->anchor = NULL;
	list_del(&urb->anchor_list);
	usb_put_urb(urb);
	if (list_empty(&anchor->urb_list))
		wake_up(&anchor->wait);
}
static void
usb_free_urbs(struct usb_link_device *usb_ld, struct if_usb_devdata *pipe)
{
	struct usb_device *usbdev = usb_ld->usbdev;
	struct urb *urb;

	while ((urb = usb_get_from_anchor(&pipe->urbs))) {
		usb_poison_urb(urb);
		usb_free_coherent(usbdev, pipe->rx_buf_size,
				urb->transfer_buffer, urb->transfer_dma);
		urb->transfer_buffer = NULL;
		usb_put_urb(urb);
		usb_free_urb(urb);
	}
}
Example #10
0
int usbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);
	struct sk_buff          *skb;
	struct urb              *res;
	int                     retval;

	if (!--dev->suspend_count) {
		/* resume interrupt URBs */
		if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
			usb_submit_urb(dev->interrupt, GFP_NOIO);

		spin_lock_irq(&dev->txq.lock);
		while ((res = usb_get_from_anchor(&dev->deferred))) {
			//HTC +++
			// refer to acm_write_start() and usb_net_raw_ip_tx_urb_work(), need to
			// decrement urb ref count after usb_get_from_anchor() to prevent memory leak
			usb_put_urb(res);
			//HTC ---

			skb = (struct sk_buff *)res->context;
			retval = usb_submit_urb(res, GFP_ATOMIC);
			if (retval < 0) {
				dev_kfree_skb_any(skb);
				usb_free_urb(res);
				usb_autopm_put_interface_async(dev->intf);
			} else {
				dev->net->trans_start = jiffies;
				__skb_queue_tail(&dev->txq, skb);
			}
		}

		smp_mb();
		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
		spin_unlock_irq(&dev->txq.lock);

		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
			if (!(dev->txq.qlen >= TX_QLEN(dev)))
				netif_start_queue(dev->net);
			tasklet_schedule (&dev->bh);
		}
	}
	return 0;
}
int ctrl_bridge_resume(unsigned int id)
{
    struct ctrl_bridge	*dev;
    struct urb		*urb;
    unsigned long		flags;
    int			ret;

    if (id >= MAX_BRIDGE_DEVICES)
        return -EINVAL;

    dev = __dev[id];
    if (!dev)
        return -ENODEV;
    if (!dev->int_pipe)
        return 0;
    if (!test_bit(SUSPENDED, &dev->flags))
        return 0;

    spin_lock_irqsave(&dev->lock, flags);
    /* submit pending write requests */
    while ((urb = usb_get_from_anchor(&dev->tx_deferred))) {
        spin_unlock_irqrestore(&dev->lock, flags);
        /*
         * usb_get_from_anchor() does not drop the
         * ref count incremented by the usb_anchro_urb()
         * called in Tx submission path. Let us do it.
         */
        usb_put_urb(urb);
        usb_anchor_urb(urb, &dev->tx_submitted);
        ret = usb_submit_urb(urb, GFP_ATOMIC);
        if (ret < 0) {
            usb_unanchor_urb(urb);
            kfree(urb->setup_packet);
            kfree(urb->transfer_buffer);
            usb_free_urb(urb);
            usb_autopm_put_interface_async(dev->intf);
        }
        spin_lock_irqsave(&dev->lock, flags);
    }
    clear_bit(SUSPENDED, &dev->flags);
    spin_unlock_irqrestore(&dev->lock, flags);

    return ctrl_bridge_start_read(dev, GFP_KERNEL);
}
Example #12
0
/**
 * usb_poison_anchored_urbs - cease all traffic from an anchor
 * @anchor: anchor the requests are bound to
 *
 * this allows all outstanding URBs to be poisoned starting
 * from the back of the queue. Newly added URBs will also be
 * poisoned
 *
 * This routine should not be called by a driver after its disconnect
 * method has returned.
 */
void usb_poison_anchored_urbs(struct usb_anchor *anchor)
{
	struct urb *victim;

	spin_lock_irq(&anchor->lock);
	// anchor->poisoned = 1; /* XXX: Cannot backport */
	while (!list_empty(&anchor->urb_list)) {
		victim = list_entry(anchor->urb_list.prev, struct urb,
				    anchor_list);
		/* we must make sure the URB isn't freed before we kill it*/
		usb_get_urb(victim);
		spin_unlock_irq(&anchor->lock);
		/* this will unanchor the URB */
		usb_poison_urb(victim);
		usb_put_urb(victim);
		spin_lock_irq(&anchor->lock);
	}
	spin_unlock_irq(&anchor->lock);
}
Example #13
0
void dde_linux26_usb_vhcd_urb_complete(void *urb_handle,
                                       dde_kit_size_t data_size, void *data)
{
	LOG("URB %p completed", urb_handle);

	struct urb *urb = (struct urb *)urb_handle;

	LOG("urb @ %p, urb->setup_packet @ %p data_size %zd data %p hc_priv %p",
	    urb, urb->setup_packet, data_size, data, urb->hcpriv);

	/* FIXME the URB may have been unlinked */
	if (!urb) return;

	/* FIXME update URB */
//	urb->status         = irq_d_urb->status;
//	urb->actual_length  = irq_d_urb->actual_length;
//	urb->start_frame    = irq_d_urb->start_frame;
//	urb->interval       = irq_d_urb->interval;
//	urb->error_count    = irq_d_urb->error_count;
//	urb->transfer_flags = irq_d_urb->transfer_flags;

	if (urb->setup_packet)
		memcpy(urb->setup_packet, data, 8);

	/* XXX no ISOC */
//	if (urb->number_of_packets)
//		memcpy(urb->iso_frame_desc, irq_d_urb->iso_desc,
//		       urb->number_of_packets * sizeof(struct usb_iso_packet_descriptor));

	struct usb_hcd *hcd = vhcd_to_hcd(urb->hcpriv);
	urb->hcpriv = 0;
	LOG("hcd %p", hcd);

	usb_hcd_giveback_urb(hcd, urb);

	usb_put_urb(urb);
}
Example #14
0
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	if ((skb = alloc_skb (size + NET_IP_ALIGN + FOE_INFO_LEN, flags)) == NULL) {
#else
	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
#endif
		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	skb_reserve (skb, NET_IP_ALIGN + FOE_INFO_LEN);
#else
	skb_reserve (skb, NET_IP_ALIGN);
#endif
	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}


/*-------------------------------------------------------------------------*/

static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
	if (dev->driver_info->rx_fixup
			&& !dev->driver_info->rx_fixup (dev, skb))
		goto error;
	// else network stack removes extra byte if we forced a short packet

	if (skb->len)
		usbnet_skb_return (dev, skb);
	else {
		if (netif_msg_rx_err (dev))
			devdbg (dev, "drop");
error:
		dev->stats.rx_errors++;
		skb_queue_tail (&dev->done, skb);
	}
}

/*-------------------------------------------------------------------------*/

static void rx_complete (struct urb *urb)
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;
	int			urb_status = urb->status;
	enum skb_state		state;

	skb_put (skb, urb->actual_length);
	state = rx_done;
	entry->urb = NULL;

	switch (urb_status) {
	    // success
	    case 0:
		if (skb->len < dev->net->hard_header_len) {
			state = rx_cleanup;
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx length %d", skb->len);
		}
		break;

	    // stalls need manual reset. this is rare ... except that
	    // when going through USB 2.0 TTs, unplug appears this way.
	    // we avoid the highspeed version of the ETIMEOUT/EILSEQ
	    // storm, recovering as needed.
	    case -EPIPE:
		dev->stats.rx_errors++;
		usbnet_defer_kevent (dev, EVENT_RX_HALT);
		// FALLTHROUGH

	    // software-driven interface shutdown
	    case -ECONNRESET:		// async unlink
	    case -ESHUTDOWN:		// hardware gone
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx shutdown, code %d", urb_status);
		goto block;

	    // we get controller i/o faults during khubd disconnect() delays.
	    // throttle down resubmits, to avoid log floods; just temporarily,
	    // so we still recover when the fault isn't a khubd delay.
	    case -EPROTO:
	    case -ETIME:
	    case -EILSEQ:
		dev->stats.rx_errors++;
		if (!timer_pending (&dev->delay)) {
			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
			if (netif_msg_link (dev))
				devdbg (dev, "rx throttle %d", urb_status);
		}
block:
		state = rx_cleanup;
		entry->urb = urb;
		urb = NULL;
		break;

	    // data overrun ... flush fifo?
	    case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		// FALLTHROUGH

	    default:
		state = rx_cleanup;
		dev->stats.rx_errors++;
		if (netif_msg_rx_err (dev))
			devdbg (dev, "rx status %d", urb_status);
		break;
	}

	state = defer_bh(dev, skb, &dev->rxq, state);

	if (urb) {
		if (netif_running (dev->net)
		    && !test_bit (EVENT_RX_HALT, &dev->flags) &&
		    state != unlink_start) {
			rx_submit (dev, urb, GFP_ATOMIC);
			return;
		}
		usb_free_urb (urb);
	}
	if (netif_msg_rx_err (dev))
		devdbg (dev, "no read resubmitted");
}

static void intr_complete (struct urb *urb)
{
	struct usbnet	*dev = urb->context;
	int		status = urb->status;

	switch (status) {
	    /* success */
	    case 0:
		dev->driver_info->status(dev, urb);
		break;

	    /* software-driven interface shutdown */
	    case -ENOENT:		// urb killed
	    case -ESHUTDOWN:		// hardware gone
		if (netif_msg_ifdown (dev))
			devdbg (dev, "intr shutdown, code %d", status);
		return;

	    /* NOTE:  not throttling like RX/TX, since this endpoint
	     * already polls infrequently
	     */
	    default:
		devdbg (dev, "intr status %d", status);
		break;
	}

	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
	status = usb_submit_urb (urb, GFP_ATOMIC);
	if (status != 0 && netif_msg_timer (dev))
		deverr(dev, "intr resubmit --> %d", status);
}

/*-------------------------------------------------------------------------*/

// unlink pending rx/tx; completion handlers do all other cleanup

static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
	unsigned long		flags;
	struct sk_buff		*skb;
	int			count = 0;

	spin_lock_irqsave (&q->lock, flags);
	while (!skb_queue_empty(q)) {
		struct skb_data		*entry;
		struct urb		*urb;
		int			retval;

		skb_queue_walk(q, skb) {
		entry = (struct skb_data *) skb->cb;
			if (entry->state != unlink_start)
				goto found;
		}
		break;
found:
		entry->state = unlink_start;
		urb = entry->urb;

		/*
		 * Get reference count of the URB to avoid it to be
		 * freed during usb_unlink_urb, which may trigger
		 * use-after-free problem inside usb_unlink_urb since
		 * usb_unlink_urb is always racing with .complete
		 * handler(include defer_bh).
		 */
		usb_get_urb(urb);
		spin_unlock_irqrestore(&q->lock, flags);
		// during some PM-driven resume scenarios,
		// these (async) unlinks complete immediately
		retval = usb_unlink_urb (urb);
		if (retval != -EINPROGRESS && retval != 0)
			devdbg (dev, "unlink urb err, %d", retval);
		else
			count++;
		usb_put_urb(urb);
		spin_lock_irqsave(&q->lock, flags);
	}
Example #15
0
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                               struct net_device *net)
{
    struct usbnet		*dev = netdev_priv(net);
    int			length;
    struct urb		*urb = NULL;
    struct skb_data		*entry;
    struct driver_info	*info = dev->driver_info;
    unsigned long		flags;
    int retval;

    // some devices want funky USB-level framing, for
    // win32 driver (usually) and/or hardware quirks
    if (info->tx_fixup) {
        skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
        if (!skb) {
            if (netif_msg_tx_err(dev)) {
                netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
                goto drop;
            } else {
                /* cdc_ncm collected packet; waits for more */
                goto not_drop;
            }
        }
    }
    length = skb->len;

    if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
        netif_dbg(dev, tx_err, dev->net, "no urb\n");
        goto drop;
    }

    entry = (struct skb_data *) skb->cb;
    entry->urb = urb;
    entry->dev = dev;
    entry->length = length;

    usb_fill_bulk_urb (urb, dev->udev, dev->out,
                       skb->data, skb->len, tx_complete, skb);

    /* don't assume the hardware handles USB_ZERO_PACKET
     * NOTE:  strictly conforming cdc-ether devices should expect
     * the ZLP here, but ignore the one-byte packet.
     * NOTE2: CDC NCM specification is different from CDC ECM when
     * handling ZLP/short packets, so cdc_ncm driver will make short
     * packet itself if needed.
     */
    if (length % dev->maxpacket == 0) {
        if (!(info->flags & FLAG_SEND_ZLP)) {
            if (!(info->flags & FLAG_MULTI_PACKET)) {
                urb->transfer_buffer_length++;
                if (skb_tailroom(skb)) {
                    skb->data[skb->len] = 0;
                    __skb_put(skb, 1);
                }
            }
        } else
            urb->transfer_flags |= URB_ZERO_PACKET;
    }

    spin_lock_irqsave(&dev->txq.lock, flags);
    retval = usb_autopm_get_interface_async(dev->intf);
    if (retval < 0) {
        spin_unlock_irqrestore(&dev->txq.lock, flags);
        goto drop;
    }

#ifdef CONFIG_PM
    /* if this triggers the device is still a sleep */
    if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
        /* transmission will be done in resume */
        usb_anchor_urb(urb, &dev->deferred);
        /* no use to process more packets */
        netif_stop_queue(net);
        usb_put_urb(urb);
        spin_unlock_irqrestore(&dev->txq.lock, flags);
        netdev_dbg(dev->net, "Delaying transmission for resumption\n");
        goto deferred;
    }
#endif

    switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
    case -EPIPE:
        netif_stop_queue (net);
        usbnet_defer_kevent (dev, EVENT_TX_HALT);
        usb_autopm_put_interface_async(dev->intf);
        break;
    default:
        usb_autopm_put_interface_async(dev->intf);
        netif_dbg(dev, tx_err, dev->net,
                  "tx: submit urb err %d\n", retval);
        break;
    case 0:
        net->trans_start = jiffies;
        __usbnet_queue_skb(&dev->txq, skb, tx_start);
        if (dev->txq.qlen >= TX_QLEN (dev))
            netif_stop_queue (net);
    }
    spin_unlock_irqrestore (&dev->txq.lock, flags);

    if (retval) {
        netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
        dev->net->stats.tx_dropped++;
not_drop:
        if (skb)
            dev_kfree_skb_any (skb);
        usb_free_urb (urb);
    } else
        netif_dbg(dev, tx_queued, dev->net,
                  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
    return NETDEV_TX_OK;
}
Example #16
0
static int acm_resume(struct usb_interface *intf)
{
	struct acm *acm = usb_get_intfdata(intf);
	int rv = 0;
	int cnt;
#ifdef CONFIG_PM
	struct urb *res;
#else
	struct acm_wb *wb;
#endif

	if (!acm) {
		pr_err("%s: !acm\n", __func__);
		return -ENODEV;
	}

	spin_lock_irq(&acm->read_lock);
	if (acm->susp_count > 0) {
		acm->susp_count -= 1;
		cnt = acm->susp_count;
	} else {
		spin_unlock_irq(&acm->read_lock);
		return 0;
	}
	spin_unlock_irq(&acm->read_lock);

	if (cnt)
		return 0;


	mutex_lock(&acm->mutex);

	if (acm->port.count) {
		rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
		spin_lock_irq(&acm->write_lock);
#ifdef CONFIG_PM
		while ((res = usb_get_from_anchor(&acm->deferred))) {
			/* decrement ref count*/
			usb_put_urb(res);
			rv = usb_submit_urb(res, GFP_ATOMIC);
			if (rv < 0) {
				dbg("usb_submit_urb(pending request)"
					" failed: %d", rv);
				usb_unanchor_urb(res);
				acm_write_done(acm, res->context);
			}
		}
		spin_unlock_irq(&acm->write_lock);
#else
		if (acm->delayed_wb) {
			wb = acm->delayed_wb;
			acm->delayed_wb = NULL;
			spin_unlock_irq(&acm->write_lock);
			acm_start_wb(acm, wb);
		} else {
			spin_unlock_irq(&acm->write_lock);
		}
#endif

		/*
		 * delayed error checking because we must
		 * do the write path at all cost
		 */
		if (rv < 0)
			goto err_out;

		rv = acm_submit_read_urbs(acm, GFP_NOIO);
	}

err_out:
	mutex_unlock(&acm->mutex);
	return rv;
}
static int if_usb_resume(struct usb_interface *intf)
{
	int i, ret;
	struct sk_buff *skb;
	struct usb_link_device *usb_ld = usb_get_intfdata(intf);
	struct if_usb_devdata *pipe;
	struct urb *urb;

	spin_lock_irq(&usb_ld->lock);
	if (!atomic_dec_return(&usb_ld->suspend_count)) {
		spin_unlock_irq(&usb_ld->lock);

		mif_debug("\n");
		wake_lock(&usb_ld->susplock);

		/* HACK: Runtime pm does not allow requesting autosuspend from
		 * resume callback, delayed it after resume */
		queue_delayed_work(system_nrt_wq, &usb_ld->runtime_pm_work,
							msecs_to_jiffies(50));

		for (i = 0; i < IF_USB_DEVNUM_MAX; i++) {
			pipe = &usb_ld->devdata[i];
			while ((urb = usb_get_from_anchor(&pipe->urbs))) {
				ret = usb_rx_submit(pipe, urb, GFP_KERNEL);
				if (ret < 0) {
					usb_put_urb(urb);
					mif_err(
					"usb_rx_submit error with (%d)\n",
						ret);
					return ret;
				}
				usb_put_urb(urb);
			}
		}

		while ((urb = usb_get_from_anchor(&usb_ld->deferred))) {
			mif_debug("got urb (0x%p) from anchor & resubmit\n",
					urb);
			ret = usb_submit_urb(urb, GFP_KERNEL);
			if (ret < 0) {
				mif_err("resubmit failed\n");
				skb = urb->context;
				dev_kfree_skb_any(skb);
				usb_free_urb(urb);
				ret = pm_runtime_put_autosuspend(
						&usb_ld->usbdev->dev);
				if (ret < 0 && ret != -EAGAIN)
					mif_debug("pm_runtime_put_autosuspend "
							"failed: %d\n", ret);
			}
		}
		SET_SLAVE_WAKEUP(usb_ld->pdata, 1);
		udelay(100);
		SET_SLAVE_WAKEUP(usb_ld->pdata, 0);

		/* if_usb_resume() is atomic. post_resume_work is
		 * a kind of bottom halves
		 */
		queue_delayed_work(system_nrt_wq, &usb_ld->post_resume_work, 0);

		return 0;
	}

	spin_unlock_irq(&usb_ld->lock);
	return 0;
}
static int usb_tx_urb_with_skb(struct usb_link_device *usb_ld,
		struct sk_buff *skb, struct if_usb_devdata *pipe_data)
{
	int ret, cnt = 0;
	struct urb *urb;
	struct usb_device *usbdev = usb_ld->usbdev;
	unsigned long flags;

	if (!usbdev || (usbdev->state == USB_STATE_NOTATTACHED) ||
			usb_ld->host_wake_timeout_flag)
		return -ENODEV;

	pm_runtime_get_noresume(&usbdev->dev);

	if (usbdev->dev.power.runtime_status == RPM_SUSPENDED ||
		usbdev->dev.power.runtime_status == RPM_SUSPENDING) {
		usb_ld->resume_status = AP_INITIATED_RESUME;
		SET_SLAVE_WAKEUP(usb_ld->pdata, 1);

		while (!wait_event_interruptible_timeout(usb_ld->l2_wait,
				usbdev->dev.power.runtime_status == RPM_ACTIVE
				|| pipe_data->disconnected,
				HOST_WAKEUP_TIMEOUT_JIFFIES)) {

			if (cnt == MAX_RETRY) {
				mif_err("host wakeup timeout !!\n");
				SET_SLAVE_WAKEUP(usb_ld->pdata, 0);
				pm_runtime_put_autosuspend(&usbdev->dev);
				schedule_work(&usb_ld->disconnect_work);
				usb_ld->host_wake_timeout_flag = 1;
				return -1;
			}
			mif_err("host wakeup timeout ! retry..\n");
			SET_SLAVE_WAKEUP(usb_ld->pdata, 0);
			udelay(100);
			SET_SLAVE_WAKEUP(usb_ld->pdata, 1);
			cnt++;
		}

		if (pipe_data->disconnected) {
			SET_SLAVE_WAKEUP(usb_ld->pdata, 0);
			pm_runtime_put_autosuspend(&usbdev->dev);
			return -ENODEV;
		}

		mif_debug("wait_q done (runtime_status=%d)\n",
				usbdev->dev.power.runtime_status);
	}

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		mif_err("alloc urb error\n");
		if (pm_runtime_put_autosuspend(&usbdev->dev) < 0)
			mif_debug("pm_runtime_put_autosuspend fail\n");
		return -ENOMEM;
	}

	urb->transfer_flags = URB_ZERO_PACKET;
	usb_fill_bulk_urb(urb, usbdev, pipe_data->tx_pipe, skb->data,
			skb->len, usb_tx_complete, (void *)skb);

	spin_lock_irqsave(&usb_ld->lock, flags);
	if (atomic_read(&usb_ld->suspend_count)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &usb_ld->deferred);
		usb_put_urb(urb);
		mif_debug("anchor urb (0x%p)\n", urb);
		spin_unlock_irqrestore(&usb_ld->lock, flags);
		return 0;
	}
	spin_unlock_irqrestore(&usb_ld->lock, flags);

	ret = usb_submit_urb(urb, GFP_KERNEL);
	if (ret < 0) {
		mif_err("usb_submit_urb with ret(%d)\n", ret);
		if (pm_runtime_put_autosuspend(&usbdev->dev) < 0)
			mif_debug("pm_runtime_put_autosuspend fail\n");
	}
	return ret;
}