Exemplo n.º 1
0
static void play_delayed(struct usb_serial_port *port)
{
	struct hsictty_intf_private *data;
	struct hsictty_port_private *portdata;
	struct urb *urb;
	int err;

	portdata = usb_get_serial_port_data(port);
	data = usb_get_serial_data(port->serial);
	while ((urb = usb_get_from_anchor(&portdata->delayed_urb))) {
		err = usb_submit_urb(urb, GFP_ATOMIC);
		if (!err) {
			hsictty_dbg
			    ("%s re-submit URB %p, write data may lost\n",
			     __func__, urb);
			data->in_flight++;
		} else {
			/* we have to throw away the rest */
			hsictty_error
			    ("%s re-submit flight URB error, write data may lost\n",
			     __func__);
			do {
				unbusy_queued_urb(urb, portdata);
				usb_autopm_put_interface_no_suspend
				    (port->serial->interface);
			} while ((urb =
				  usb_get_from_anchor(&portdata->delayed_urb)));
			break;
		}
	}
}
Exemplo n.º 2
0
static void rtl_usb_stop(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct urb *urb;

	/* should after adapter start and interrupt enable. */
	set_hal_stop(rtlhal);
	cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
	/* Enable software */
	SET_USB_STOP(rtlusb);
	rtl_usb_deinit(hw);

	/* free pre-allocated URBs from rtl_usb_start() */
	usb_kill_anchored_urbs(&rtlusb->rx_submitted);

	tasklet_kill(&rtlusb->rx_work_tasklet);
	cancel_work_sync(&rtlpriv->works.lps_change_work);

	flush_workqueue(rtlpriv->works.rtl_wq);

	skb_queue_purge(&rtlusb->rx_queue);

	while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
		usb_free_coherent(urb->dev, urb->transfer_buffer_length,
				urb->transfer_buffer, urb->transfer_dma);
		usb_free_urb(urb);
	}

	rtlpriv->cfg->ops->hw_disable(hw);
}
Exemplo n.º 3
0
static void acm_port_shutdown(struct tty_port *port)
{
	struct acm *acm = container_of(port, struct acm, port);
	struct urb *urb;
	struct acm_wb *wb;
	int i;
	int pm_err;

	dev_dbg(&acm->control->dev, "%s\n", __func__);

	mutex_lock(&acm->mutex);
	if (!acm->disconnected) {
		pm_err = usb_autopm_get_interface(acm->control);
		acm_set_control(acm, acm->ctrlout = 0);

		for (;;) {
			urb = usb_get_from_anchor(&acm->delayed);
			if (!urb)
				break;
			wb = urb->context;
			wb->use = 0;
			usb_autopm_put_interface_async(acm->control);
		}

		usb_kill_urb(acm->ctrlurb);
		for (i = 0; i < ACM_NW; i++)
			usb_kill_urb(acm->wb[i].urb);
		for (i = 0; i < acm->rx_buflimit; i++)
			usb_kill_urb(acm->read_urbs[i]);
		acm->control->needs_remote_wakeup = 0;
		if (!pm_err)
			usb_autopm_put_interface(acm->control);
	}
	mutex_unlock(&acm->mutex);
}
Exemplo n.º 4
0
int usbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);
	struct sk_buff          *skb;
	struct urb              *res;
	int                     retval;

	if (!--dev->suspend_count) {
		spin_lock_irq(&dev->txq.lock);
		while ((res = usb_get_from_anchor(&dev->deferred))) {

			printk(KERN_INFO"%s has delayed data\n", __func__);
			skb = (struct sk_buff *)res->context;
			retval = usb_submit_urb(res, GFP_ATOMIC);
			if (retval < 0) {
				dev_kfree_skb_any(skb);
				usb_free_urb(res);
				usb_autopm_put_interface_async(dev->intf);
			} else {
				dev->net->trans_start = jiffies;
				__skb_queue_tail(&dev->txq, skb);
			}
		}

		smp_mb();
		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
		spin_unlock_irq(&dev->txq.lock);
		if (!(dev->txq.qlen >= TX_QLEN(dev)))
			netif_start_queue(dev->net);
		tasklet_schedule (&dev->bh);
	}
	return 0;
}
Exemplo n.º 5
0
static void r92su_tx_schedule(struct r92su *r92su)
{
	struct urb *urb;
	int err;

	if (atomic_inc_return(&r92su->tx_pending_urbs) >
	    RTL_USB_MAX_TX_URBS_NUM)
		goto err_acc;

	urb = usb_get_from_anchor(&r92su->tx_wait);
	if (!urb)
		goto err_acc;

	usb_anchor_urb(urb, &r92su->tx_submitted);
	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (err) {
		WARN_ONCE(err, "can't handle urb submit error %d", err);

		usb_unanchor_urb(urb);
		r92su_mark_dead(r92su);

		dev_kfree_skb_any(urb->context);
	}

	usb_free_urb(urb);
	if (likely(err == 0))
		return;

err_acc:
	atomic_dec(&r92su->tx_pending_urbs);
}
Exemplo n.º 6
0
static int data_bridge_resume(struct data_bridge *dev)
{
	struct urb	*urb;
	int		retval;

	if (!test_and_clear_bit(SUSPENDED, &dev->flags))
		return 0;

	while ((urb = usb_get_from_anchor(&dev->delayed))) {
		usb_anchor_urb(urb, &dev->tx_active);
		atomic_inc(&dev->pending_txurbs);
		retval = usb_submit_urb(urb, GFP_ATOMIC);
		if (retval < 0) {
			atomic_dec(&dev->pending_txurbs);
			usb_unanchor_urb(urb);

			/* TODO: need to free urb data */
			usb_scuttle_anchored_urbs(&dev->delayed);
			break;
		}
		dev->to_modem++;
		dev->txurb_drp_cnt--;
	}
	/* if the bridge is open or not, resume to consume mdm request
	 * because this link is not dead, it's alive
	 */
#ifndef CONFIG_MDM_HSIC_PM
	if (dev->brdg)
#endif
		queue_work(dev->wq, &dev->process_rx_w);

	return 0;
}
Exemplo n.º 7
0
static int ar9170_usb_flush(struct ar9170 *ar)
{
	struct ar9170_usb *aru = (void *) ar;
	struct urb *urb;
	int ret, err = 0;

	if (IS_STARTED(ar))
		aru->common.state = AR9170_IDLE;

	usb_wait_anchor_empty_timeout(&aru->tx_pending,
					    msecs_to_jiffies(800));
	while ((urb = usb_get_from_anchor(&aru->tx_pending))) {
		ar9170_tx_callback(&aru->common, (void *) urb->context);
		usb_free_urb(urb);
	}

	/* lets wait a while until the tx - queues are dried out */
	ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
					    msecs_to_jiffies(100));
	if (ret == 0)
		err = -ETIMEDOUT;

	usb_kill_anchored_urbs(&aru->tx_submitted);

	if (IS_ACCEPTING_CMD(ar))
		aru->common.state = AR9170_STARTED;

	return err;
}
Exemplo n.º 8
0
static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar)
{
	struct urb *urb;
	int err;

	if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) {
		atomic_dec(&ar->tx_cmd_urbs);
		return 0;
	}

	urb = usb_get_from_anchor(&ar->tx_cmd);
	if (!urb) {
		atomic_dec(&ar->tx_cmd_urbs);
		return 0;
	}

	usb_anchor_urb(urb, &ar->tx_anch);
	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err)) {
		usb_unanchor_urb(urb);
		atomic_dec(&ar->tx_cmd_urbs);
	}
	usb_free_urb(urb);

	return err;
}
Exemplo n.º 9
0
static int acm_write_start(struct acm *acm, int wbn)
{
	unsigned long flags;
	struct acm_wb *wb = &acm->wb[wbn];
	int rc;
#ifdef CONFIG_PM
	struct urb *res;
#endif

	spin_lock_irqsave(&acm->write_lock, flags);
	if (!acm->dev) {
		wb->use = 0;
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return -ENODEV;
	}

	dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__,
							acm->susp_count);
	usb_autopm_get_interface_async(acm->control);
	if (acm->susp_count) {
#ifdef CONFIG_PM
		acm->transmitting++;
		wb->urb->transfer_buffer = wb->buf;
		wb->urb->transfer_dma = wb->dmah;
		wb->urb->transfer_buffer_length = wb->len;
		wb->urb->dev = acm->dev;
		usb_anchor_urb(wb->urb, &acm->deferred);
#else
		if (!acm->delayed_wb)
			acm->delayed_wb = wb;
		else {
			usb_autopm_put_interface_async(acm->control);
			printk(KERN_INFO "%s: acm->delayed_wb is not NULL, "
				"returning -EAGAIN\n", __func__);
			spin_unlock_irqrestore(&acm->write_lock, flags);
			return -EAGAIN;
		}
#endif
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;	/* A white lie */
	}
	usb_mark_last_busy(acm->dev);
#ifdef CONFIG_PM
	while ((res = usb_get_from_anchor(&acm->deferred))) {
		/* decrement ref count*/
		usb_put_urb(res);
		rc = usb_submit_urb(res, GFP_ATOMIC);
		if (rc < 0) {
			dbg("usb_submit_urb(pending request) failed: %d", rc);
			usb_unanchor_urb(res);
			acm_write_done(acm, res->context);
		}
	}
#endif
	rc = acm_start_wb(acm, wb);
	spin_unlock_irqrestore(&acm->write_lock, flags);

	return rc;

}
Exemplo n.º 10
0
static int carl9170_usb_flush(struct ar9170 *ar)
{
	struct urb *urb;
	int ret, err = 0;

	while ((urb = usb_get_from_anchor(&ar->tx_wait))) {
		struct sk_buff *skb = (void *)urb->context;
		carl9170_tx_drop(ar, skb);
		carl9170_tx_callback(ar, skb);
		usb_free_urb(urb);
	}

	ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000);
	if (ret == 0)
		err = -ETIMEDOUT;

	/* lets wait a while until the tx - queues are dried out */
	ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000);
	if (ret == 0)
		err = -ETIMEDOUT;

	usb_kill_anchored_urbs(&ar->tx_anch);
	carl9170_usb_handle_tx_err(ar);

	return err;
}
Exemplo n.º 11
0
static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp)
{
	struct urb *urb;
	int err = 0, runs = 0;

	while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) &&
		(runs++ < AR9170_NUM_RX_URBS)) {
		err = -ENOSPC;
		urb = usb_get_from_anchor(&ar->rx_pool);
		if (urb) {
			usb_anchor_urb(urb, &ar->rx_anch);
			err = usb_submit_urb(urb, gfp);
			if (unlikely(err)) {
				usb_unanchor_urb(urb);
				usb_anchor_urb(urb, &ar->rx_pool);
			} else {
				atomic_dec(&ar->rx_pool_urbs);
				atomic_inc(&ar->rx_anch_urbs);
			}
			usb_free_urb(urb);
		}
	}

	return err;
}
Exemplo n.º 12
0
static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
{
	struct urb *urb;
	int err;

	if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS)
		goto err_acc;

	urb = usb_get_from_anchor(&ar->tx_wait);
	if (!urb)
		goto err_acc;

	usb_anchor_urb(urb, &ar->tx_anch);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err)) {
		if (net_ratelimit()) {
			dev_err(&ar->udev->dev, "tx submit failed (%d)\n",
				urb->status);
		}

		usb_unanchor_urb(urb);
		usb_anchor_urb(urb, &ar->tx_err);
	}

	usb_free_urb(urb);

	if (likely(err == 0))
		return;

err_acc:
	atomic_dec(&ar->tx_anch_urbs);
}
Exemplo n.º 13
0
static void rtl_usb_tx_schedule(struct ieee80211_hw *hw)
{
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct urb *urb;
	int err;

	if (atomic_inc_return(&rtlusb->tx_pending_urbs) >
	    RTL_USB_MAX_TX_URBS_NUM)
		goto err_acc;

	urb = usb_get_from_anchor(&rtlusb->tx_wait);
	if (!urb)
		goto err_acc;

	err = _rtl_submit_tx_urb(hw, urb);
	if (err) {
		WARN_ONCE(err, "can't handle urb submit error %d", err);
		usb_unanchor_urb(urb);
		dev_kfree_skb_irq(urb->context);
	}

	if (likely(err == 0))
		return;

err_acc:
	atomic_dec(&rtlusb->tx_pending_urbs);
}
Exemplo n.º 14
0
static int data_bridge_resume(struct data_bridge *dev)
{
	struct urb	*urb;
	int		retval;

	while ((urb = usb_get_from_anchor(&dev->delayed))) {
		usb_anchor_urb(urb, &dev->tx_active);
		atomic_inc(&dev->pending_txurbs);
		retval = usb_submit_urb(urb, GFP_ATOMIC);
		if (retval < 0) {
			atomic_dec(&dev->pending_txurbs);
			usb_unanchor_urb(urb);

			/* TODO: need to free urb data */
			usb_scuttle_anchored_urbs(&dev->delayed);
			break;
		}
		dev->to_modem++;
		dev->txurb_drp_cnt--;
	}

	clear_bit(SUSPENDED, &dev->flags);

	if (dev->brdg)
		queue_work(dev->wq, &dev->process_rx_w);

	return 0;
}
Exemplo n.º 15
0
int ctrl_bridge_resume(unsigned int id)
{
	struct ctrl_bridge	*dev;
	struct urb		*urb;

	if (id >= MAX_BRIDGE_DEVICES)
		return -EINVAL;

	dev = __dev[id];
	if (!dev)
		return -ENODEV;

	if (!test_and_clear_bit(SUSPENDED, &dev->flags))
		return 0;

	/* submit pending write requests */
	while ((urb = usb_get_from_anchor(&dev->tx_deferred))) {
		int ret;
		usb_anchor_urb(urb, &dev->tx_submitted);
		ret = usb_submit_urb(urb, GFP_ATOMIC);
		if (ret < 0) {
			usb_unanchor_urb(urb);
			kfree(urb->setup_packet);
			kfree(urb->transfer_buffer);
			usb_free_urb(urb);
			usb_autopm_put_interface_async(dev->intf);
		}
	}

	return ctrl_bridge_start_read(dev);
}
Exemplo n.º 16
0
static void acm_port_down(struct acm *acm)
{
	struct urb *urb;
	struct acm_wb *wb;
	int i;

	if (acm->dev) {
		usb_autopm_get_interface(acm->control);
		acm_set_control(acm, acm->ctrlout = 0);

		for (;;) {
			urb = usb_get_from_anchor(&acm->delayed);
			if (!urb)
				break;
			wb = urb->context;
			wb->use = 0;
			usb_autopm_put_interface_async(acm->control);
		}

		usb_kill_urb(acm->ctrlurb);
		for (i = 0; i < ACM_NW; i++)
			usb_kill_urb(acm->wb[i].urb);
		for (i = 0; i < acm->rx_buflimit; i++)
			usb_kill_urb(acm->read_urbs[i]);
		acm->control->needs_remote_wakeup = 0;
		usb_autopm_put_interface(acm->control);
	}
}
static void baseband_usb_driver_disconnect(struct usb_interface *intf)
{
	int i, j;
	struct urb *urb;

	pr_debug("%s intf %p\n", __func__, intf);

	for (i = 0; i < max_intfs; i++) {
		pr_debug("[%d]\n", i);
		if (!baseband_usb_net[i])
			continue;
		if (baseband_usb_net[i]->usb.interface != intf) {
			pr_debug("%p != %p\n",
				baseband_usb_net[i]->usb.interface, intf);
			continue;
		}
		/* acquire semaphore */
		if (down_interruptible(&baseband_usb_net[i]->sem)) {
			pr_err("%s: cannot acquire semaphore\n", __func__);
			continue;
		}
		/* kill usb tx */
		while ((urb = usb_get_from_anchor(&baseband_usb_net[i]->
			usb.tx_urb_deferred)) != (struct urb *) 0) {
			pr_info("%s: kill deferred tx urb %p\n",
				__func__, urb);
			/* decrement count from usb_get_from_anchor() */
			usb_free_urb(urb);
			/* kill tx urb */
			usb_kill_urb(urb);
			/* free tx urb + tx urb transfer buffer */
			if (urb->transfer_buffer) {
				kfree(urb->transfer_buffer);
				urb->transfer_buffer = (void *) 0;
			}
			usb_free_urb(urb);
		}
		if (baseband_usb_net[i]->usb.tx_workqueue) {
			flush_workqueue(baseband_usb_net[i]
				->usb.tx_workqueue);
		}
		if (baseband_usb_net[i]->usb.tx_urb) {
			usb_kill_urb(baseband_usb_net[i]->usb.tx_urb);
			baseband_usb_net[i]->usb.tx_urb
				= (struct urb *) 0;
		}
		/* kill usb rx */
		for (j = 0; j < RAWIP_RX_BUFS; j++)
			usb_kill_urb(baseband_usb_net[i]->urb_r[j]);
		baseband_usb_net[i]->usb.rx_urb	= (struct urb *) 0;

		/* mark interface as disconnected */
		baseband_usb_net[i]->usb.interface
			= (struct usb_interface *) 0;
		/* release semaphore */
		up(&baseband_usb_net[i]->sem);
	}

}
Exemplo n.º 18
0
static void acm_disconnect(struct usb_interface *intf)
{
	struct acm *acm = usb_get_intfdata(intf);
	struct usb_device *usb_dev = interface_to_usbdev(intf);
	struct tty_struct *tty;
	struct urb *res;
	u32 project_info = tegra3_get_project_id();

	/* sibling interface is already cleaning up */
	if (!acm)
		return;

	mutex_lock(&open_mutex);
	if (acm->country_codes) {
		device_remove_file(&acm->control->dev,
				&dev_attr_wCountryCodes);
		device_remove_file(&acm->control->dev,
				&dev_attr_iCountryCodeRelDate);
	}
	device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
	acm->dev = NULL;
	usb_set_intfdata(acm->control, NULL);
	usb_set_intfdata(acm->data, NULL);

	stop_data_traffic(acm);

	/* decrement ref count of anchored urbs */
	while ((res = usb_get_from_anchor(&acm->deferred)))
		usb_put_urb(res);

	acm_write_buffers_free(acm);
	usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
			  acm->ctrl_dma);
	acm_read_buffers_free(acm);

	if (!acm->combined_interfaces)
		usb_driver_release_interface(&acm_driver, intf == acm->control ?
					acm->data : acm->control);

	if (acm->port.count == 0) {
		acm_tty_unregister(acm);
		mutex_unlock(&open_mutex);
		return;
	}

	mutex_unlock(&open_mutex);
	tty = tty_port_tty_get(&acm->port);
	if (tty) {
		tty_hangup(tty);
		tty_kref_put(tty);
	}

	if (project_info == TEGRA3_PROJECT_TF201) {
		if(gps_dongle_flag == true) {
			dev_info(&usb_dev->dev, "ublox - GPS Receiver Dongle unplug.\n");
			gps_dongle_flag = false;
		}
	}
}
Exemplo n.º 19
0
static int acm_resume(struct usb_interface *intf)
{
	struct acm *acm = usb_get_intfdata(intf);
	struct acm_wb *wb;
	int rv = 0;
	struct urb *res;
	int cnt;

	spin_lock_irq(&acm->read_lock);
	acm->susp_count -= 1;
	cnt = acm->susp_count;
	spin_unlock_irq(&acm->read_lock);

	if (cnt)
		return 0;


	mutex_lock(&acm->mutex);

#ifdef CONFIG_PM
	while ((res = usb_get_from_anchor(&acm->deferred))) {
		printk("%s process buffered request \n", __func__);
		rv = usb_submit_urb(res, GFP_ATOMIC);
		if (rv < 0) {
			dbg("usb_submit_urb(pending request) failed: %d", rv);
		}
	}
#endif

	if (acm->port.count) {
		rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
		spin_lock_irq(&acm->write_lock);
		if (acm->delayed_wb) {
			wb = acm->delayed_wb;
			acm->delayed_wb = NULL;
			spin_unlock_irq(&acm->write_lock);
			acm_start_wb(acm, wb);
		} else {
			spin_unlock_irq(&acm->write_lock);
		}


		/*
		 * delayed error checking because we must
		 * do the write path at all cost
		 */
		if (rv < 0)
			goto err_out;

		tasklet_schedule(&acm->urb_task);
	}

err_out:
	mutex_unlock(&acm->mutex);
	return rv;
}
Exemplo n.º 20
0
void carl9170_usb_handle_tx_err(struct ar9170 *ar)
{
	struct urb *urb;

	while ((urb = usb_get_from_anchor(&ar->tx_err))) {
		struct sk_buff *skb = (void *)urb->context;

		carl9170_tx_drop(ar, skb);
		carl9170_tx_callback(ar, skb);
		usb_free_urb(urb);
	}
}
Exemplo n.º 21
0
Arquivo: atusb.c Projeto: 020gzh/linux
static void atusb_free_urbs(struct atusb *atusb)
{
	struct urb *urb;

	while (1) {
		urb = usb_get_from_anchor(&atusb->idle_urbs);
		if (!urb)
			break;
		kfree_skb(urb->context);
		usb_free_urb(urb);
	}
}
Exemplo n.º 22
0
static void
usb_free_deferred_urbs(struct usb_link_device *usb_ld)
{
	struct urb *urb = NULL;
	struct sk_buff *skb = NULL;

	while ((urb = usb_get_from_anchor(&usb_ld->deferred))) {
		skb = urb->context;
		usb_poison_urb(urb);
		usb_free_urb(urb);
		dev_kfree_skb_any(skb);
	}
}
Exemplo n.º 23
0
static void play_deferred(struct btusb_data *data)
{
	struct urb *urb;
	int err;

	while ((urb = usb_get_from_anchor(&data->deferred))) {
		err = usb_submit_urb(urb, GFP_ATOMIC);
		if (err < 0)
			break;

		data->tx_in_flight++;
	}
	usb_scuttle_anchored_urbs(&data->deferred);
}
Exemplo n.º 24
0
static void
usb_free_urbs(struct usb_link_device *usb_ld, struct if_usb_devdata *pipe)
{
	struct usb_device *usbdev = usb_ld->usbdev;
	struct urb *urb;

	while ((urb = usb_get_from_anchor(&pipe->urbs))) {
		usb_poison_urb(urb);
		usb_free_coherent(usbdev, pipe->rx_buf_size,
				urb->transfer_buffer, urb->transfer_dma);
		urb->transfer_buffer = NULL;
		usb_put_urb(urb);
		usb_free_urb(urb);
	}
}
Exemplo n.º 25
0
/*=======================  tx =========================================*/
static void rtl_usb_cleanup(struct ieee80211_hw *hw)
{
	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
	struct urb *urb;

	/* clean up rx stuff. */
	_rtl_usb_cleanup_rx(hw);

	/* clean up tx stuff */
	usb_kill_anchored_urbs(&rtlusb->tx_submitted);
	while ((urb = usb_get_from_anchor(&rtlusb->tx_wait))) {
		kfree_skb(urb->context);
		usb_free_urb(urb);
	}
	usb_kill_anchored_urbs(&rtlusb->tx_submitted);
}
Exemplo n.º 26
0
/**
 * free_anchored_buffers - free device's anchored items
 * @mdev: the device
 * @channel: channel ID
 * @status: status of MBO termination
 */
static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
				  enum mbo_status_flags status)
{
	struct mbo *mbo;
	struct urb *urb;

	while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) {
		mbo = urb->context;
		usb_kill_urb(urb);
		if (mbo && mbo->complete) {
			mbo->status = status;
			mbo->processed_length = 0;
			mbo->complete(mbo);
		}
		usb_free_urb(urb);
	}
}
Exemplo n.º 27
0
int usbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);
	struct sk_buff          *skb;
	struct urb              *res;
	int                     retval;

	if (!--dev->suspend_count) {
		/* resume interrupt URBs */
		if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
			usb_submit_urb(dev->interrupt, GFP_NOIO);

		spin_lock_irq(&dev->txq.lock);
		while ((res = usb_get_from_anchor(&dev->deferred))) {
			//HTC +++
			// refer to acm_write_start() and usb_net_raw_ip_tx_urb_work(), need to
			// decrement urb ref count after usb_get_from_anchor() to prevent memory leak
			usb_put_urb(res);
			//HTC ---

			skb = (struct sk_buff *)res->context;
			retval = usb_submit_urb(res, GFP_ATOMIC);
			if (retval < 0) {
				dev_kfree_skb_any(skb);
				usb_free_urb(res);
				usb_autopm_put_interface_async(dev->intf);
			} else {
				dev->net->trans_start = jiffies;
				__skb_queue_tail(&dev->txq, skb);
			}
		}

		smp_mb();
		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
		spin_unlock_irq(&dev->txq.lock);

		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
			if (!(dev->txq.qlen >= TX_QLEN(dev)))
				netif_start_queue(dev->net);
			tasklet_schedule (&dev->bh);
		}
	}
	return 0;
}
int ctrl_bridge_resume(unsigned int id)
{
    struct ctrl_bridge	*dev;
    struct urb		*urb;
    unsigned long		flags;
    int			ret;

    if (id >= MAX_BRIDGE_DEVICES)
        return -EINVAL;

    dev = __dev[id];
    if (!dev)
        return -ENODEV;
    if (!dev->int_pipe)
        return 0;
    if (!test_bit(SUSPENDED, &dev->flags))
        return 0;

    spin_lock_irqsave(&dev->lock, flags);
    /* submit pending write requests */
    while ((urb = usb_get_from_anchor(&dev->tx_deferred))) {
        spin_unlock_irqrestore(&dev->lock, flags);
        /*
         * usb_get_from_anchor() does not drop the
         * ref count incremented by the usb_anchro_urb()
         * called in Tx submission path. Let us do it.
         */
        usb_put_urb(urb);
        usb_anchor_urb(urb, &dev->tx_submitted);
        ret = usb_submit_urb(urb, GFP_ATOMIC);
        if (ret < 0) {
            usb_unanchor_urb(urb);
            kfree(urb->setup_packet);
            kfree(urb->transfer_buffer);
            usb_free_urb(urb);
            usb_autopm_put_interface_async(dev->intf);
        }
        spin_lock_irqsave(&dev->lock, flags);
    }
    clear_bit(SUSPENDED, &dev->flags);
    spin_unlock_irqrestore(&dev->lock, flags);

    return ctrl_bridge_start_read(dev, GFP_KERNEL);
}
Exemplo n.º 29
0
static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
{
	struct urb *urb;
	unsigned long flags;
	int err;

	if (unlikely(!IS_STARTED(&aru->common)))
		return ;

	spin_lock_irqsave(&aru->tx_urb_lock, flags);
	if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
		spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
		return ;
	}
	atomic_inc(&aru->tx_submitted_urbs);

	urb = usb_get_from_anchor(&aru->tx_pending);
	if (!urb) {
		atomic_dec(&aru->tx_submitted_urbs);
		spin_unlock_irqrestore(&aru->tx_urb_lock, flags);

		return ;
	}
	spin_unlock_irqrestore(&aru->tx_urb_lock, flags);

	aru->tx_pending_urbs--;
	usb_anchor_urb(urb, &aru->tx_submitted);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err)) {
		if (ar9170_nag_limiter(&aru->common))
			dev_err(&aru->udev->dev, "submit_urb failed (%d).\n",
				err);

		usb_unanchor_urb(urb);
		atomic_dec(&aru->tx_submitted_urbs);
		ar9170_tx_callback(&aru->common, urb->context);
	}

	usb_free_urb(urb);
}
Exemplo n.º 30
0
static void sierra_close(struct usb_serial_port *port)
{
	int i;
	struct usb_serial *serial = port->serial;
	struct sierra_port_private *portdata;
	struct sierra_intf_private *intfdata = usb_get_serial_data(serial);
	struct urb *urb;

	portdata = usb_get_serial_port_data(port);

	/*
	 * Need to take susp_lock to make sure port is not already being
	 * resumed, but no need to hold it due to initialized
	 */
	spin_lock_irq(&intfdata->susp_lock);
	if (--intfdata->open_ports == 0)
		serial->interface->needs_remote_wakeup = 0;
	spin_unlock_irq(&intfdata->susp_lock);

	for (;;) {
		urb = usb_get_from_anchor(&portdata->delayed);
		if (!urb)
			break;
		kfree(urb->transfer_buffer);
		usb_free_urb(urb);
		usb_autopm_put_interface_async(serial->interface);
		spin_lock(&portdata->lock);
		portdata->outstanding_urbs--;
		spin_unlock(&portdata->lock);
	}

	sierra_stop_rx_urbs(port);
	usb_kill_anchored_urbs(&portdata->active);

	for (i = 0; i < portdata->num_in_urbs; i++) {
		sierra_release_urb(portdata->in_urbs[i]);
		portdata->in_urbs[i] = NULL;
	}

	usb_autopm_get_interface_no_resume(serial->interface);
}