int msm_rmnet_sdio_close(uint32_t id)
{
	struct sdio_mux_hdr hdr;
	unsigned long flags;

	pr_info("[lte] %s: closing ch %d\n", __func__, id);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);

	if (sdio_ch[id].skb) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("[lte] Error - %s, EINVAL\n", __func__);
		return -EINVAL;
	}

	sdio_ch[id].receive_cb = NULL;
	sdio_ch[id].priv = NULL;
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

	hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
	hdr.reserved = 0;
	hdr.ch_id = id;
	hdr.pkt_len = 0;
	hdr.pad_len = 0;

	sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));

	pr_info("[lte] %s: closed ch %d\n", __func__, id);
	return 0;
}
int msm_sdio_dmux_close(uint32_t id)
{
    struct sdio_mux_hdr hdr;
    unsigned long flags;

    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;
    DBG("%s: closing ch %d\n", __func__, id);
    if (!sdio_mux_initialized)
        return -ENODEV;
    spin_lock_irqsave(&sdio_ch[id].lock, flags);

    sdio_ch[id].receive_cb = NULL;
    sdio_ch[id].priv = NULL;
    sdio_ch[id].status &= ~SDIO_CH_LOCAL_OPEN;
    sdio_ch[id].status &= ~SDIO_CH_IN_RESET;
    spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

    hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
    hdr.cmd = SDIO_MUX_HDR_CMD_CLOSE;
    hdr.reserved = 0;
    hdr.ch_id = id;
    hdr.pkt_len = 0;
    hdr.pad_len = 0;

    sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));

    pr_info("%s: closed ch %d\n", __func__, id);
    return 0;
}
int msm_rmnet_sdio_open(uint32_t id, void *priv,
			void (*receive_cb)(void *, struct sk_buff *),
			void (*write_done)(void *, struct sk_buff *))
{
	struct sdio_mux_hdr hdr;
	unsigned long flags;

	pr_info("[lte] %s: opening ch %d\n", __func__, id);
	if (id >= 8) {
		pr_err("[lte] Error - %s, EINVAL\n", __func__);
		return -EINVAL;
	}

	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (sdio_ch_is_local_open(id)) {
		pr_info("[lte] %s: Already opened %d\n", __func__, id);
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		goto open_done;
	}

	sdio_ch[id].receive_cb = receive_cb;
	sdio_ch[id].write_done = write_done;
	sdio_ch[id].priv = priv;
	sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

	hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr.cmd = SDIO_MUX_HDR_CMD_OPEN;
	hdr.reserved = 0;
	hdr.ch_id = id;
	hdr.pkt_len = 0;
	hdr.pad_len = 0;

	sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));

open_done:
	pr_info("[lte] %s: opened ch %d\n", __func__, id);
	return 0;
}
static void sdio_mux_send_open_cmd(uint32_t id)
{
    struct sdio_mux_hdr hdr = {
        .magic_num = SDIO_MUX_HDR_MAGIC_NO,
        .cmd = SDIO_MUX_HDR_CMD_OPEN,
        .reserved = 0,
        .ch_id = id,
        .pkt_len = 0,
        .pad_len = 0
    };

    sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));
}

static void sdio_mux_write_data(struct work_struct *work)
{
    int rc, reschedule = 0;
    int notify = 0;
    struct sk_buff *skb;
    unsigned long flags;
    int avail;
    int ch_id;

    spin_lock_irqsave(&sdio_mux_write_lock, flags);
    while ((skb = __skb_dequeue(&sdio_mux_write_pool))) {
        ch_id = ((struct sdio_mux_hdr *)skb->data)->ch_id;

        avail = sdio_write_avail(sdio_mux_ch);
        if (avail < skb->len) {
            /* we may have to wait for write avail
             * notification from sdio al
             */
            DBG("%s: sdio_write_avail(%d) < skb->len(%d)\n",
                __func__, avail, skb->len);

            reschedule = 1;
            break;
        }
        spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
        rc = sdio_mux_write(skb);
        spin_lock_irqsave(&sdio_mux_write_lock, flags);
        if (rc == 0) {

            spin_lock(&sdio_ch[ch_id].lock);
            sdio_ch[ch_id].num_tx_pkts--;
            spin_unlock(&sdio_ch[ch_id].lock);

            if (sdio_ch[ch_id].write_done)
                sdio_ch[ch_id].write_done(
                    sdio_ch[ch_id].priv, skb);
            else
                dev_kfree_skb_any(skb);
        } else if (rc == -EAGAIN || rc == -ENOMEM) {
            /* recoverable error - retry again later */
            reschedule = 1;
            break;
        } else if (rc == -ENODEV) {
            /*
             * sdio_al suffered some kind of fatal error
             * prevent future writes and clean up pending ones
             */
            fatal_error = 1;
            do {
                ch_id = ((struct sdio_mux_hdr *) skb->data)->ch_id;
                spin_lock(&sdio_ch[ch_id].lock);
                sdio_ch[ch_id].num_tx_pkts--;
                spin_unlock(&sdio_ch[ch_id].lock);
                dev_kfree_skb_any(skb);
            } while ((skb = __skb_dequeue(&sdio_mux_write_pool)));
            spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
            return;
        } else {
            /* unknown error condition - drop the
             * skb and reschedule for the
             * other skb's
             */
            pr_err("%s: sdio_mux_write error %d"
                   " for ch %d, skb=%p\n",
                   __func__, rc, ch_id, skb);
            notify = 1;
            break;
        }
    }

    if (reschedule) {
        if (sdio_ch_is_in_reset(ch_id)) {
            notify = 1;
        } else {
            __skb_queue_head(&sdio_mux_write_pool, skb);
            queue_delayed_work(sdio_mux_workqueue,
                               &delayed_work_sdio_mux_write,
                               msecs_to_jiffies(250)
                              );
        }
    }

    if (notify) {
        spin_lock(&sdio_ch[ch_id].lock);
        sdio_ch[ch_id].num_tx_pkts--;
        spin_unlock(&sdio_ch[ch_id].lock);

        if (sdio_ch[ch_id].write_done)
            sdio_ch[ch_id].write_done(
                sdio_ch[ch_id].priv, skb);
        else
            dev_kfree_skb_any(skb);
    }
    spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
}