static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct wwan_private *wwan_ptr = netdev_priv(dev);
	unsigned long flags;
	int ret = 0;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
		       dev->name);
		return 0;
	}
	ret = ipa_rm_inactivity_timer_request_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	if (ret == -EINPROGRESS) {
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}
	if (ret) {
		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
		       dev->name, ret);
		return -EFAULT;
	}
	ret = wwan_send_packet(skb, dev);
	if (ret == -EPERM) {
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	if (ret == -EFAULT) {
		netif_carrier_off(dev);
		dev_kfree_skb_any(skb);
		ret = 0;
		goto exit;
	}
	if (ret == -EAGAIN) {
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	spin_lock_irqsave(&wwan_ptr->lock, flags);
	if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
		netif_stop_queue(dev);
		pr_debug("%s: High WM hit, stopping queue=%p\n",
		       __func__, skb);
	}
	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
	return ret;
exit:
	ipa_rm_inactivity_timer_release_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	return ret;
}
/**
 * wwan_xmit() - Transmits an skb. In charge of asking IPA
 * RM needed resources. In case that IPA RM is not ready, then
 * the skb is saved for tranmitting as soon as IPA RM resources
 * are granted.
 *
 * @skb: skb to be transmitted
 * @dev: network device
 *
 * Return codes:
 * 0: success
 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
 * later
 * -EFAULT: Error while transmitting the skb
 */
static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct wwan_private *wwan_ptr = netdev_priv(dev);
	unsigned long flags;
	int ret = 0;

	if (netif_queue_stopped(dev)) {
		pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
		       dev->name);
		return 0;
	}
	ret = ipa_rm_inactivity_timer_request_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	if (ret == -EINPROGRESS) {
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}
	if (ret) {
		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
		       dev->name, ret);
		return -EFAULT;
	}
	ret = wwan_send_packet(skb, dev);
	if (ret == -EPERM) {
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	/*
	 * detected SSR a bit early.  shut some things down now, and leave
	 * the rest to the main ssr handling code when that happens later
	 */
	if (ret == -EFAULT) {
		netif_carrier_off(dev);
		dev_kfree_skb_any(skb);
		ret = 0;
		goto exit;
	}
	if (ret == -EAGAIN) {
		/*
		 * This should not happen
		 * EAGAIN means we attempted to overflow the high watermark
		 * Clearly the queue is not stopped like it should be, so
		 * stop it and return BUSY to the TCP/IP framework.  It will
		 * retry this packet with the queue is restarted which happens
		 * in the write_done callback when the low watermark is hit.
		 */
		netif_stop_queue(dev);
		ret = NETDEV_TX_BUSY;
		goto exit;
	}
	spin_lock_irqsave(&wwan_ptr->lock, flags);
	if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
		netif_stop_queue(dev);
		pr_debug("%s: High WM hit, stopping queue=%p\n",
		       __func__, skb);
	}
	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
	return ret;
exit:
	ipa_rm_inactivity_timer_release_resource(
		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
	return ret;
}