static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct wwan_private *wwan_ptr = netdev_priv(dev);
	unsigned long flags;
	int ret = 0;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
		       dev->name);
		return 0;
	}
	ret = ipa_rm_inactivity_timer_request_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	if (ret == -EINPROGRESS) {
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}
	if (ret) {
		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
		       dev->name, ret);
		return -EFAULT;
	}
	ret = wwan_send_packet(skb, dev);
	if (ret == -EPERM) {
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	if (ret == -EFAULT) {
		netif_carrier_off(dev);
		dev_kfree_skb_any(skb);
		ret = 0;
		goto exit;
	}
	if (ret == -EAGAIN) {
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	spin_lock_irqsave(&wwan_ptr->lock, flags);
	if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
		netif_stop_queue(dev);
		pr_debug("%s: High WM hit, stopping queue=%p\n",
		       __func__, skb);
	}
	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
	return ret;
exit:
	ipa_rm_inactivity_timer_release_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	return ret;
}
static int __wwan_close(struct net_device *dev)
{
	struct wwan_private *wwan_ptr = netdev_priv(dev);
	int rc = 0;

	if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
		wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
		/* do not close wwan port once up,  this causes
			remote side to hang if tried to open again */
		INIT_COMPLETION(wwan_ptr->resource_granted_completion);
		rc = ipa_rm_inactivity_timer_request_resource(
			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
		if (rc < 0 && rc != -EINPROGRESS) {
			pr_err("%s: ipa rm timer request resource failed %d\n",
					__func__, rc);
			return -ENODEV;
		}
		if (rc == -EINPROGRESS) {
			wait_for_completion(
				&wwan_ptr->resource_granted_completion);
		}
		rc = a2_mux_close_channel(
			a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
		if (rc) {
			pr_err("[%s] %s: a2_mux_close_channel failed %d\n",
			       dev->name, __func__, rc);
			ipa_rm_inactivity_timer_release_resource(
				ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
			return rc;
		}
		ipa_rm_inactivity_timer_release_resource(
			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
		rc = ipa_deregister_intf(dev->name);
		if (rc) {
			pr_err("[%s] %s: ipa_deregister_intf failed %d\n",
			       dev->name, __func__, rc);
			return rc;
		}
		return rc;
	} else
		return -EBADF;
}
static int __wwan_open(struct net_device *dev)
{
	int r;
	struct wwan_private *wwan_ptr = netdev_priv(dev);

	pr_debug("[%s] __wwan_open()\n", dev->name);
	if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) {
		INIT_COMPLETION(wwan_ptr->resource_granted_completion);
		r = ipa_rm_inactivity_timer_request_resource(
			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
		if (r < 0 && r != -EINPROGRESS) {
			pr_err("%s: ipa rm timer request resource failed %d\n",
					__func__, r);
			return -ENODEV;
		}
		if (r == -EINPROGRESS) {
			wait_for_completion(
				&wwan_ptr->resource_granted_completion);
		}
		r = a2_mux_open_channel(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id],
					dev, a2_mux_notify);
		if (r < 0) {
			pr_err("%s: ch=%d failed with rc %d\n",
					__func__, wwan_ptr->ch_id, r);
			ipa_rm_inactivity_timer_release_resource(
				ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
			return -ENODEV;
		}
		ipa_rm_inactivity_timer_release_resource(
			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
		r = wwan_register_to_ipa(dev);
		if (r < 0) {
			pr_err("%s: ch=%d failed to register to IPA rc %d\n",
					__func__, wwan_ptr->ch_id, r);
			return -ENODEV;
		}
	}
	wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
	return 0;
}
/**
 * wwan_xmit() - Transmits an skb. In charge of asking IPA
 * RM needed resources. In case that IPA RM is not ready, then
 * the skb is saved for tranmitting as soon as IPA RM resources
 * are granted.
 *
 * @skb: skb to be transmitted
 * @dev: network device
 *
 * Return codes:
 * 0: success
 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
 * later
 * -EFAULT: Error while transmitting the skb
 */
static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct wwan_private *wwan_ptr = netdev_priv(dev);
	unsigned long flags;
	int ret = 0;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
		       dev->name);
		return 0;
	}
	ret = ipa_rm_inactivity_timer_request_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	if (ret == -EINPROGRESS) {
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}
	if (ret) {
		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
		       dev->name, ret);
		return -EFAULT;
	}
	ret = wwan_send_packet(skb, dev);
	if (ret == -EPERM) {
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	/*
	 * detected SSR a bit early.  shut some things down now, and leave
	 * the rest to the main ssr handling code when that happens later
	 */
	if (ret == -EFAULT) {
		netif_carrier_off(dev);
		dev_kfree_skb_any(skb);
		ret = 0;
		goto exit;
	}
	if (ret == -EAGAIN) {
		/*
		 * This should not happen
		 * EAGAIN means we attempted to overflow the high watermark
		 * Clearly the queue is not stopped like it should be, so
		 * stop it and return BUSY to the TCP/IP framework.  It will
		 * retry this packet with the queue is restarted which happens
		 * in the write_done callback when the low watermark is hit.
		 */
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	spin_lock_irqsave(&wwan_ptr->lock, flags);
	if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
		netif_stop_queue(dev);
		pr_debug("%s: High WM hit, stopping queue=%p\n",
		       __func__, skb);
	}
	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
	return ret;
exit:
	ipa_rm_inactivity_timer_release_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	return ret;
}