Esempio n. 1
0
static int acm_write_start(struct acm *acm, int wbn)
{
	unsigned long flags;
	struct acm_wb *wb = &acm->wb[wbn];
	int rc;

	spin_lock_irqsave(&acm->write_lock, flags);
	if (!acm->dev) {
		wb->use = 0;
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return -ENODEV;
	}

	dbg("%s susp_count: %d", __func__, acm->susp_count);
	usb_autopm_get_interface_async(acm->control);
	if (acm->susp_count) {
#ifdef CONFIG_PM
		printk("%s buffer urb\n", __func__);
		acm->transmitting++;
		wb->urb->transfer_buffer = wb->buf;
		wb->urb->transfer_dma = wb->dmah;
		wb->urb->transfer_buffer_length = wb->len;
		wb->urb->dev = acm->dev;
		usb_anchor_urb(wb->urb, &acm->deferred);
#endif
		if (!acm->delayed_wb)
			acm->delayed_wb = wb;
		else {
			usb_autopm_put_interface_async(acm->control);
			printk(KERN_INFO "%s: acm->delayed_wb is not NULL, "
				"returning -EAGAIN\n", __func__);
			spin_unlock_irqrestore(&acm->write_lock, flags);
			return -EAGAIN;
		}
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;	/* A white lie */
	}
	usb_mark_last_busy(acm->dev);

	rc = acm_start_wb(acm, wb);
	spin_unlock_irqrestore(&acm->write_lock, flags);

	return rc;

}
Esempio n. 2
0
static int acm_write_start(struct acm *acm, int wbn)
{
	unsigned long flags;
	struct acm_wb *wb = &acm->wb[wbn];
	int rc;

	spin_lock_irqsave(&acm->write_lock, flags);
	if (!acm->dev) {
		wb->use = 0;
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return -ENODEV;
	}

	dbg("%s susp_count: %d", __func__, acm->susp_count);
	usb_autopm_get_interface_async(acm->control);
	if (acm->susp_count) {
		if (!acm->delayed_wb) {
			acm->delayed_wb = wb;
		} else {
			if (acm->delayed_wb->len + wb->len <= acm->writesize) {
				memcpy(acm->delayed_wb->buf + acm->delayed_wb->len, wb->buf, wb->len);
				acm->delayed_wb->len += wb->len;
		}
			wb->use = 0;
			usb_autopm_put_interface_async(acm->control);
		}
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;	/* A white lie */
	}
	usb_mark_last_busy(acm->dev);

	rc = acm_start_wb(acm, wb);
	spin_unlock_irqrestore(&acm->write_lock, flags);

	return rc;

}
Esempio n. 3
0
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		netif_dbg(dev, tx_err, dev->net, "no urb\n");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
		goto deferred;
	}
#endif

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		usb_autopm_put_interface_async(dev->intf);
		break;
	default:
		usb_autopm_put_interface_async(dev->intf);
		netif_dbg(dev, tx_err, dev->net,
			  "tx: submit urb err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
		dev->net->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else
		netif_dbg(dev, tx_queued, dev->net,
			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
	return NETDEV_TX_OK;
}
Esempio n. 4
0
/* Write */
static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
					const unsigned char *buf, int count)
{
	struct sierra_port_private *portdata;
	struct sierra_intf_private *intfdata;
	struct usb_serial *serial = port->serial;
	unsigned long flags;
	unsigned char *buffer;
	struct urb *urb;
	size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER);
	int retval = 0;

	/* verify that we actually have some data to write */
	if (count == 0)
		return 0;

	portdata = usb_get_serial_port_data(port);
	intfdata = usb_get_serial_data(serial);

	dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
	spin_lock_irqsave(&portdata->lock, flags);
	dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
		portdata->outstanding_urbs);
	if (portdata->outstanding_urbs > portdata->num_out_urbs) {
		spin_unlock_irqrestore(&portdata->lock, flags);
		dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
		return 0;
	}
	portdata->outstanding_urbs++;
	dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__,
		portdata->outstanding_urbs);
	spin_unlock_irqrestore(&portdata->lock, flags);

	retval = usb_autopm_get_interface_async(serial->interface);
	if (retval < 0) {
		spin_lock_irqsave(&portdata->lock, flags);
		portdata->outstanding_urbs--;
		spin_unlock_irqrestore(&portdata->lock, flags);
		goto error_simple;
	}

	buffer = kmalloc(writesize, GFP_ATOMIC);
	if (!buffer) {
		retval = -ENOMEM;
		goto error_no_buffer;
	}

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		retval = -ENOMEM;
		goto error_no_urb;
	}

	memcpy(buffer, buf, writesize);

	usb_serial_debug_data(&port->dev, __func__, writesize, buffer);

	usb_fill_bulk_urb(urb, serial->dev,
			  usb_sndbulkpipe(serial->dev,
					  port->bulk_out_endpointAddress),
			  buffer, writesize, sierra_outdat_callback, port);

	/* Handle the need to send a zero length packet */
	urb->transfer_flags |= URB_ZERO_PACKET;

	spin_lock_irqsave(&intfdata->susp_lock, flags);

	if (intfdata->suspended) {
		usb_anchor_urb(urb, &portdata->delayed);
		spin_unlock_irqrestore(&intfdata->susp_lock, flags);
		goto skip_power;
	} else {
		usb_anchor_urb(urb, &portdata->active);
	}
	/* send it down the pipe */
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval) {
		usb_unanchor_urb(urb);
		spin_unlock_irqrestore(&intfdata->susp_lock, flags);
		dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
			"with status = %d\n", __func__, retval);
		goto error;
	} else {
		intfdata->in_flight++;
		spin_unlock_irqrestore(&intfdata->susp_lock, flags);
	}

skip_power:
	/* we are done with this urb, so let the host driver
	 * really free it when it is finished with it */
	usb_free_urb(urb);

	return writesize;
error:
	usb_free_urb(urb);
error_no_urb:
	kfree(buffer);
error_no_buffer:
	spin_lock_irqsave(&portdata->lock, flags);
	--portdata->outstanding_urbs;
	dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
		portdata->outstanding_urbs);
	spin_unlock_irqrestore(&portdata->lock, flags);
	usb_autopm_put_interface_async(serial->interface);
error_simple:
	return retval;
}
Esempio n. 5
0
int diag_bridge_write(char *data, int size)
{
	struct urb		*urb = NULL;
	unsigned int		pipe;
	struct diag_bridge	*dev = __dev;
	int			ret;
	int			spin;

	if (!dev || !dev->udev)
		return -ENODEV;

	if (!size) {
		dev_err(&dev->udev->dev, "invalid size:%d\n", size);
		return -EINVAL;
	}

	if (!dev->ifc) {
		dev_err(&dev->udev->dev, "device is disconnected\n");
		return -ENODEV;
	}

	/* if there was a previous unrecoverable error, just quit */
	if (dev->err)
		return -ESHUTDOWN;

	spin = 50;
	while (check_request_blocked(rmnet_pm_dev) && spin--) {
		pr_info("%s: wake up wait loop\n", __func__);
		msleep(20);
	}

	if (check_request_blocked(rmnet_pm_dev)) {
		pr_err("%s: in lpa wakeup, return EAGAIN\n", __func__);
		return -EAGAIN;
	}

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		err("unable to allocate urb");
		return -ENOMEM;
	}

	ret = usb_autopm_get_interface_async(dev->ifc);
	if (ret < 0) {
		dev_err(&dev->udev->dev, "autopm_get failed:%d\n", ret);
		usb_free_urb(urb);
		return ret;
	}

	for (spin = 0; spin < 10; spin++) {
		/* check rpm active */
		if (dev->udev->dev.power.runtime_status == RPM_ACTIVE) {
			ret = 0;
			break;
		} else {
			dev_err(&dev->udev->dev, "waiting rpm active\n");
			ret = -EAGAIN;
		}
		msleep(20);
	}
	if (ret < 0) {
		dev_err(&dev->udev->dev, "rpm active failed:%d\n", ret);
		usb_free_urb(urb);
		usb_autopm_put_interface(dev->ifc);
		return ret;
	}

	pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr);
	usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
				diag_bridge_write_cb, dev);
	usb_anchor_urb(urb, &dev->submitted);
	dev->pending_writes++;

	ret = usb_submit_urb(urb, GFP_KERNEL);
	if (ret) {
		dev_err(&dev->udev->dev, "submitting urb failed err:%d\n", ret);
		dev->pending_writes--;
		usb_unanchor_urb(urb);
		usb_free_urb(urb);
		usb_autopm_put_interface(dev->ifc);
		return ret;
	}
#if 0
	usb_free_urb(urb);
#endif
	return 0;
}
Esempio n. 6
0
static int acm_write_start(struct acm *acm, int wbn)
{
	unsigned long flags;
	struct acm_wb *wb = &acm->wb[wbn];
	int rc;
#ifdef CONFIG_PM
	struct urb *res;
#endif

	spin_lock_irqsave(&acm->write_lock, flags);
	if (!acm->dev) {
		wb->use = 0;
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return -ENODEV;
	}

	dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__,
							acm->susp_count);
	usb_autopm_get_interface_async(acm->control);
	if (acm->susp_count) {
#ifdef CONFIG_PM
		acm->transmitting++;
		wb->urb->transfer_buffer = wb->buf;
		wb->urb->transfer_dma = wb->dmah;
		wb->urb->transfer_buffer_length = wb->len;
		wb->urb->dev = acm->dev;
		usb_anchor_urb(wb->urb, &acm->deferred);
#else
		if (!acm->delayed_wb) {
			acm->delayed_wb = wb;
		} else {
			if (acm->delayed_wb->len + wb->len <= acm->writesize ) {
				memcpy(acm->delayed_wb->buf + acm->delayed_wb->len, wb->buf, wb->len);
				acm->delayed_wb->len += wb->len;
			}
			wb->use = 0;
			usb_autopm_put_interface_async(acm->control);
		}
#endif
		spin_unlock_irqrestore(&acm->write_lock, flags);
		return 0;	/* A white lie */
	}
	usb_mark_last_busy(acm->dev);
#ifdef CONFIG_PM
	while ((res = usb_get_from_anchor(&acm->deferred))) {
		/* decrement ref count*/
		usb_put_urb(res);
		rc = usb_submit_urb(res, GFP_ATOMIC);
		if (rc < 0) {
			dbg("usb_submit_urb(pending request) failed: %d", rc);
			usb_unanchor_urb(res);
			acm_write_done(acm, res->context);
		}
	}
#endif
	rc = acm_start_wb(acm, wb);
	spin_unlock_irqrestore(&acm->write_lock, flags);

	return rc;

}
Esempio n. 7
0
int ctrl_bridge_write(unsigned int id, char *data, size_t size)
{
	int			result;
	struct urb		*writeurb;
	struct usb_ctrlrequest	*out_ctlreq;
	struct ctrl_bridge	*dev;

	if (id >= MAX_BRIDGE_DEVICES) {
		result = -EINVAL;
		goto free_data;
	}

	dev = __dev[id];

	if (!dev) {
		result = -ENODEV;
		goto free_data;
	}

	dev_dbg(&dev->intf->dev, "%s:[id]:%u: write (%d bytes)\n",
		__func__, id, size);

	writeurb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!writeurb) {
		dev_err(&dev->intf->dev, "%s: error allocating read urb\n",
			__func__);
		result = -ENOMEM;
		goto free_data;
	}

	out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_ATOMIC);
	if (!out_ctlreq) {
		dev_err(&dev->intf->dev,
			"%s: error allocating setup packet buffer\n",
			__func__);
		result = -ENOMEM;
		goto free_urb;
	}

	/* CDC Send Encapsulated Request packet */
	out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
				 USB_RECIP_INTERFACE);
	if (!data && !size) {
		out_ctlreq->bRequest = USB_CDC_REQ_SET_CONTROL_LINE_STATE;
		out_ctlreq->wValue = dev->cbits_tomdm;
		dev->set_ctrl_line_sts++;
	} else {
		out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
		out_ctlreq->wValue = 0;
		dev->snd_encap_cmd++;
	}
	out_ctlreq->wIndex =
		dev->intf->cur_altsetting->desc.bInterfaceNumber;
	out_ctlreq->wLength = cpu_to_le16(size);

	usb_fill_control_urb(writeurb, dev->udev,
				 usb_sndctrlpipe(dev->udev, 0),
				 (unsigned char *)out_ctlreq,
				 (void *)data, size,
				 ctrl_write_callback, dev);

	result = usb_autopm_get_interface_async(dev->intf);
	if (result < 0) {
		dev_dbg(&dev->intf->dev, "%s: unable to resume interface: %d\n",
			__func__, result);

		/*
		  * Revisit: if (result == -EPERM)
		  * bridge_suspend(dev->intf, PMSG_SUSPEND);
		  */

		goto free_ctrlreq;
	}

	if (test_bit(SUSPENDED, &dev->flags)) {
		usb_anchor_urb(writeurb, &dev->tx_deferred);
		goto deferred;
	}

	usb_anchor_urb(writeurb, &dev->tx_submitted);
	result = usb_submit_urb(writeurb, GFP_ATOMIC);
	if (result < 0) {
		dev_err(&dev->intf->dev, "%s: submit URB error %d\n",
			__func__, result);
		usb_autopm_put_interface_async(dev->intf);
		goto unanchor_urb;
	}
deferred:
	return size;

unanchor_urb:
	usb_unanchor_urb(writeurb);
free_ctrlreq:
	kfree(out_ctlreq);
free_urb:
	usb_free_urb(writeurb);
free_data:
	kfree(data);

	return result;
}
Esempio n. 8
0
netdev_tx_t gobi_usbnet_start_xmit_3_0_6 (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;
#ifdef TX_URB_MONITOR   
   unsigned char b_usb_if_num = 0;
   int iRet = -1;
#endif //#ifdef TX_URB_MONITOR

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err(dev)) {
				netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
				goto drop;
			} else {
				/* cdc_ncm collected packet; waits for more */
				goto not_drop;
			}
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		netif_dbg(dev, tx_err, dev->net, "no urb\n");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 * NOTE2: CDC NCM specification is different from CDC ECM when
	 * handling ZLP/short packets, so cdc_ncm driver will make short
	 * packet itself if needed.
	 */
	if (length % dev->maxpacket == 0) {
		if (!(info->flags & FLAG_SEND_ZLP)) {
			if (!(info->flags & FLAG_MULTI_PACKET)) {
				urb->transfer_buffer_length++;
				if (skb_tailroom(skb)) {
					skb->data[skb->len] = 0;
					__skb_put(skb, 1);
				}
			}
		} else
			urb->transfer_flags |= URB_ZERO_PACKET;
	}

	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
		goto deferred;
	}
#endif

#ifdef TX_URB_MONITOR
   iRet = get_usb_interface(urb, &b_usb_if_num);
#endif //#ifdef TX_URB_MONITOR  

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		usb_autopm_put_interface_async(dev->intf);
		break;
	default:
		usb_autopm_put_interface_async(dev->intf);
		netif_dbg(dev, tx_err, dev->net,
			  "tx: submit urb err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
#ifdef TX_URB_MONITOR
   /*
    * This can be called from here or from inside the
    * case 0 in the above switch. There will be one less
    * condition to check
    */
   /*
    * Call URB_monitor() with true as the URB has been successfully 
    * submitted to the txq. 
    */
   if ((URB_monitor) && (0==iRet) && (0==retval))
   {
       URB_monitor(true, b_usb_if_num);
   }
#endif //#ifdef TX_URB_MONITOR

	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
		dev->net->stats.tx_dropped++;
not_drop:
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else
		netif_dbg(dev, tx_queued, dev->net,
			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
	return NETDEV_TX_OK;
}
Esempio n. 9
0
static netdev_tx_t baseband_usb_netdev_start_xmit(
	struct sk_buff *skb, struct net_device *dev)
{
	/* 77228-5 patch */
	int i;
	struct baseband_usb *usb;
	/* 77228-5 patch */
	int err;

	//+Sophia:0112
	struct rmnet_private *p = netdev_priv(dev);
	//-Sophia:0112

	pr_debug("baseband_usb_netdev_start_xmit\n");

	/* check input */
	if (!skb) {
		pr_err("no skb\n");
		/* 77969-7 patch */
		return NETDEV_TX_BUSY;
		/* 77969-7 patch */
	}

	/* 77228-5 patch */
	if (!dev) {
		pr_err("no net dev\n");
		/* 77969-7 patch */
		return NETDEV_TX_BUSY;
		/* 77969-7 patch */
	/* 77228-5 patch */
	}
	/* 77228-5 patch */

	/* find index of network device which is transmitting */
	for (i = 0; i < max_intfs; i++) {
		if (usb_net_raw_ip_dev[i] == dev)
			break;
	/* 77228-5 patch */
	}
	/* 77228-5 patch */
	if (i >= max_intfs) {
		pr_err("unknown net dev %p\n", dev);
		/* 77969-7 patch */
		return NETDEV_TX_BUSY;
		/* 77969-7 patch */
	/* 77228-5 patch */
	}
	/* 77228-5 patch */
	usb = baseband_usb_net[i];
	/* 77228-5 patch */

	//+Sophia:0112
	p->stats.tx_packets++;
	p->stats.tx_bytes += skb->len - 14;
	//-Sophia:0112

	if (usb->usb.interface) {
		/* autoresume if suspended */
		usb_autopm_get_interface_async(usb->usb.interface);
	} else{
		pr_err("tx urb submit error\n");
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}

#if 0
	if (usb->susp_count > 0) {
		//pr_info("%s: usb->susp_count %d > 0 (suspended -> autoresume)\n",__func__, usb->susp_count);
		if (usb_autopm_get_interface_async(usb->usb.interface) >= 0) {
			usb_autopm_put_interface_async(usb->usb.interface);
		}else{
			pr_debug("usb_autopm_get_interface_async failed!\n");

		}
		//return NETDEV_TX_BUSY;
	}
#endif
	/* submit tx urb */
	/* 77228-5 patch */
	err = usb_net_raw_ip_tx_urb_submit(usb, skb);
	/* 77228-5 patch */
	
	if (err < 0) {
		/* 77228-5 patch */
		pr_err("tx urb submit error\n");
		/* 77228-5 patch */
		rawlog4("[ref] -- %s(%d) %d\n", __func__, __LINE__, --autopm_refcnt);
		/*83290-1*/
		netif_stop_queue(dev);
		/*83290-1*/
		/* 77969-5 patch */
		return NETDEV_TX_BUSY;
		/* 77969-5 patch */
	}
	
	return NETDEV_TX_OK;
}
irqreturn_t baseband_xmm_power_ipc_ap_wake_irq(int irq, void *dev_id)
{
	int value;
	struct baseband_power_platform_data *data = baseband_power_driver_data;
	
	value = gpio_get_value(baseband_power_driver_data->modem.xmm.ipc_ap_wake);
	pr_debug("%s g(%d), wake_st(%d)\n", __func__, value, ipc_ap_wake_state);

	if (ipc_ap_wake_state < IPC_AP_WAKE_IRQ_READY) {
		pr_err("%s - spurious irq\n", __func__);
	} else if (ipc_ap_wake_state == IPC_AP_WAKE_IRQ_READY) {
		if (!value) {
			pr_debug("%s - IPC_AP_WAKE_INIT1" " - got falling edge\n", __func__);
			/* go to IPC_AP_WAKE_INIT1 state */
			ipc_ap_wake_state = IPC_AP_WAKE_INIT1;
			/* queue work */
			queue_work(workqueue, &init1_work);
		} else {
			pr_debug("%s - IPC_AP_WAKE_INIT1" " - wait for falling edge\n",	__func__);
		}
	} else if (ipc_ap_wake_state == IPC_AP_WAKE_INIT1) {
		if (!value) {
			pr_debug("%s - IPC_AP_WAKE_INIT2" " - wait for rising edge\n",__func__);
		} else {
			pr_debug("%s - IPC_AP_WAKE_INIT2" " - got rising edge\n",__func__);
			/* go to IPC_AP_WAKE_INIT2 state */
			ipc_ap_wake_state = IPC_AP_WAKE_INIT2;
			/* queue work */
			queue_work(workqueue, &init2_work);
		}
	} else {
		if (!value) {
			pr_debug("%s - falling\n", __func__);
			/* First check it a CP ack or CP wake  */
			value = gpio_get_value(data->modem.xmm.ipc_bb_wake);
			if (value) {
				pr_debug("cp ack for bb_wake\n");
				ipc_ap_wake_state = IPC_AP_WAKE_L;
				return IRQ_HANDLED;
			}
			spin_lock(&xmm_lock);
			wakeup_pending = true;
			if (system_suspending) {
				spin_unlock(&xmm_lock);
				pr_debug("Set wakeup_pending = 1 in system_" " suspending!!!\n");
			} else {
				if (baseband_xmm_powerstate == BBXMM_PS_L3) {
					spin_unlock(&xmm_lock);
					pr_debug("PM_ST : CP L3 -> L0\n");
				} else if (baseband_xmm_powerstate == BBXMM_PS_L2) {
					CP_initiated_L2toL0 = true;
					spin_unlock(&xmm_lock);
					baseband_xmm_set_power_status(BBXMM_PS_L2TOL0);
				} else {
					CP_initiated_L2toL0 = true;
					spin_unlock(&xmm_lock);
				}
			}
			/* save gpio state */
			ipc_ap_wake_state = IPC_AP_WAKE_L;
		} else {
			pr_debug("%s - rising\n", __func__);
			value = gpio_get_value(data->modem.xmm.ipc_hsic_active);
			//pr_debug("GPIO [R]: Host_active = %d \n",value); 
			if (!value) {
				pr_debug("host active low: ignore request\n");
				ipc_ap_wake_state = IPC_AP_WAKE_H;
				return IRQ_HANDLED;
			}
			value = gpio_get_value(data->modem.xmm.ipc_bb_wake);
			//pr_debug("GPIO [R]: Slave_wakeup = %d \n", value); 
			if (value) {
				/* Clear the slave wakeup request */
				gpio_set_value(data->modem.xmm.ipc_bb_wake, 0);
				pr_debug("GPIO [W]: Slave_wake -> 0 \n"); 
			}
			if (reenable_autosuspend && usbdev) {
			      reenable_autosuspend = false;
				struct usb_interface *intf;
				intf = usb_ifnum_to_if(usbdev, 0);
				if (usb_autopm_get_interface_async(intf) >= 0) {
					pr_debug("get_interface_async succeeded"	" - call put_interface\n");
					usb_autopm_put_interface_async(intf);
				} else {
					pr_debug("get_interface_async failed" " - do not call put_interface\n");
				}			      
			}
			modem_sleep_flag = false;
			baseband_xmm_set_power_status(BBXMM_PS_L0);
			if (short_autosuspend && enable_short_autosuspend &&
						baseband_xmm_powerstate == BBXMM_PS_L0 &&
						&usbdev->dev) 
			{
				pr_debug("set autosuspend delay %d ms\n",SHORT_AUTOSUSPEND_DELAY);
				queue_work(workqueue_susp, &work_shortsusp);				
			} 
			/* save gpio state */
			ipc_ap_wake_state = IPC_AP_WAKE_H;
		}
	}

	return IRQ_HANDLED;
}
static int rmnet_usb_ctrl_write(struct rmnet_ctrl_dev *dev, char *buf,
		size_t size)
{
	int			result;
	struct urb		*sndurb;
	struct usb_ctrlrequest	*out_ctlreq;
	struct usb_device	*udev;

	if (!is_dev_connected(dev))
		return -ENETRESET;

	udev = interface_to_usbdev(dev->intf);

	sndurb = usb_alloc_urb(0, GFP_KERNEL);
	if (!sndurb) {
		dev_err(dev->devicep, "Error allocating read urb\n");
		return -ENOMEM;
	}

	out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_KERNEL);
	if (!out_ctlreq) {
		usb_free_urb(sndurb);
		dev_err(dev->devicep, "Error allocating setup packet buffer\n");
		return -ENOMEM;
	}

	/* CDC Send Encapsulated Request packet */
	out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
			     USB_RECIP_INTERFACE);
	out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
	out_ctlreq->wValue = 0;
	out_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber;
	out_ctlreq->wLength = cpu_to_le16(size);

	usb_fill_control_urb(sndurb, udev,
			     usb_sndctrlpipe(udev, 0),
			     (unsigned char *)out_ctlreq, (void *)buf, size,
			     ctrl_write_callback, dev);

	result = usb_autopm_get_interface_async(dev->intf);
	if (result < 0) {
		dev_err(dev->devicep, "%s: Unable to resume interface: %d\n",
			__func__, result);

		/*
		* Revisit:  if (result == -EPERM)
		*		rmnet_usb_suspend(dev->intf, PMSG_SUSPEND);
		*/

		usb_free_urb(sndurb);
		kfree(out_ctlreq);
		return result;
	}

	usb_anchor_urb(sndurb, &dev->tx_submitted);
	dev->snd_encap_cmd_cnt++;
	result = usb_submit_urb(sndurb, GFP_KERNEL);
	if (result < 0) {
		dev_err(dev->devicep, "%s: Submit URB error %d\n",
			__func__, result);
		dev->snd_encap_cmd_cnt--;
		usb_autopm_put_interface_async(dev->intf);
		usb_unanchor_urb(sndurb);
		usb_free_urb(sndurb);
		kfree(out_ctlreq);
		return result;
	}

	return size;
}