int msm_sdio_dmux_open(uint32_t id, void *priv,
                       void (*receive_cb)(void *, struct sk_buff *),
                       void (*write_done)(void *, struct sk_buff *))
{
    unsigned long flags;

    DBG("%s: opening ch %d\n", __func__, id);
    if (!sdio_mux_initialized)
        return -ENODEV;
    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;

    spin_lock_irqsave(&sdio_ch[id].lock, flags);
    if (sdio_ch_is_local_open(id)) {
        pr_info("%s: Already opened %d\n", __func__, id);
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        goto open_done;
    }

    sdio_ch[id].receive_cb = receive_cb;
    sdio_ch[id].write_done = write_done;
    sdio_ch[id].priv = priv;
    sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
    sdio_ch[id].num_tx_pkts = 0;
    sdio_ch[id].use_wm = 0;
    spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

    sdio_mux_send_open_cmd(id);

open_done:
    pr_info("%s: opened ch %d\n", __func__, id);
    return 0;
}
Пример #2
0
static void sdio_mux_write_data(struct work_struct *work)
{
	int i, rc, reschedule = 0;
	struct sk_buff *skb;
	unsigned long flags;

	for (i = 0; i < 8; i++) {
		spin_lock_irqsave(&sdio_ch[i].lock, flags);
		if (sdio_ch_is_local_open(i) && sdio_ch[i].skb) {
			skb = sdio_ch[i].skb;
			spin_unlock_irqrestore(&sdio_ch[i].lock, flags);
			DBG("[lte] %s: writing for ch %d\n", __func__, i);
			rc = sdio_mux_write(skb);
			if (rc == -EAGAIN) {
				reschedule = 1;
			} else if (!rc) {
				spin_lock_irqsave(&sdio_ch[i].lock, flags);
				sdio_ch[i].skb = NULL;
				sdio_ch[i].write_done(sdio_ch[i].priv, skb);
				spin_unlock_irqrestore(&sdio_ch[i].lock, flags);
			}
		} else
			spin_unlock_irqrestore(&sdio_ch[i].lock, flags);
	}

	/* probably should use delayed work */
	if (reschedule)
		queue_work(sdio_mux_workqueue, &work_sdio_mux_write);
}
Пример #3
0
static void sdio_mux_write_data(struct work_struct *work)
{
	int i, rc, reschedule = 0;
	struct sk_buff *skb;
	unsigned long flags;

	for (i = 0; i < SDIO_DMUX_NUM_CHANNELS; ++i) {
		spin_lock_irqsave(&sdio_ch[i].lock, flags);
		if (sdio_ch_is_local_open(i) && sdio_ch[i].skb) {
			skb = sdio_ch[i].skb;
			spin_unlock_irqrestore(&sdio_ch[i].lock, flags);
			DBG("%s: writing for ch %d\n", __func__, i);
			rc = sdio_mux_write(skb);
			if (rc == -EAGAIN || rc == -ENOMEM) {
				reschedule = 1;
			} else if (!rc) {
				spin_lock_irqsave(&sdio_ch[i].lock, flags);
				sdio_ch[i].skb = NULL;
				sdio_ch[i].write_done(sdio_ch[i].priv, skb);
				spin_unlock_irqrestore(&sdio_ch[i].lock, flags);
			}
		} else
			spin_unlock_irqrestore(&sdio_ch[i].lock, flags);
	}

	if (reschedule)
		queue_delayed_work(sdio_mux_write_workqueue,
					&delayed_work_sdio_mux_write,
					msecs_to_jiffies(250));
}
static int debug_tbl(char *buf, int max)
{
    int i = 0;
    int j;

    for (j = 0; j < SDIO_DMUX_NUM_CHANNELS; ++j) {
        i += scnprintf(buf + i, max - i,
                       "ch%02d  local open=%s  remote open=%s\n",
                       j, sdio_ch_is_local_open(j) ? "Y" : "N",
                       sdio_ch_is_remote_open(j) ? "Y" : "N");
    }

    return i;
}
int msm_sdio_dmux_is_ch_low(uint32_t id)
{
    int ret;

    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;

    sdio_ch[id].use_wm = 1;
    ret = sdio_ch[id].num_tx_pkts <= LOW_WATERMARK;
    DBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
        id, sdio_ch[id].num_tx_pkts, ret);
    if (!sdio_ch_is_local_open(id)) {
        ret = -ENODEV;
        pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
    }

    return ret;
}
int msm_sdio_dmux_is_ch_full(uint32_t id)
{
    unsigned long flags;
    int ret;

    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;

    spin_lock_irqsave(&sdio_ch[id].lock, flags);
    sdio_ch[id].use_wm = 1;
    ret = sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK;
    DBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
        id, sdio_ch[id].num_tx_pkts, ret);
    if (!sdio_ch_is_local_open(id)) {
        ret = -ENODEV;
        pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
    }
    spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

    return ret;
}
Пример #7
0
int msm_rmnet_sdio_open(uint32_t id, void *priv,
			void (*receive_cb)(void *, struct sk_buff *),
			void (*write_done)(void *, struct sk_buff *))
{
	struct sdio_mux_hdr hdr;
	unsigned long flags;

	pr_info("[lte] %s: opening ch %d\n", __func__, id);
	if (id >= 8) {
		pr_err("[lte] Error - %s, EINVAL\n", __func__);
		return -EINVAL;
	}

	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (sdio_ch_is_local_open(id)) {
		pr_info("[lte] %s: Already opened %d\n", __func__, id);
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		goto open_done;
	}

	sdio_ch[id].receive_cb = receive_cb;
	sdio_ch[id].write_done = write_done;
	sdio_ch[id].priv = priv;
	sdio_ch[id].status |= SDIO_CH_LOCAL_OPEN;
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

	hdr.magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr.cmd = SDIO_MUX_HDR_CMD_OPEN;
	hdr.reserved = 0;
	hdr.ch_id = id;
	hdr.pkt_len = 0;
	hdr.pad_len = 0;

	sdio_mux_write_cmd((void *)&hdr, sizeof(hdr));

open_done:
	pr_info("[lte] %s: opened ch %d\n", __func__, id);
	return 0;
}
Пример #8
0
int msm_rmnet_sdio_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct sdio_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb;

	if (!skb) {
		pr_err("[lte] Error - %s\n", __func__);
		return -EINVAL;
	}

	DBG("[lte] %s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (!sdio_ch_is_local_open(id)) {
		pr_err("[lte] Error - %s: port not open: %d\n", __func__, sdio_ch[id].status);
		rc = -ENODEV;
		goto write_done;
	}

	if (sdio_ch[id].skb) {
		pr_err("[lte] Error - %s: packet pending ch: %d\n", __func__, id);
		rc = -EPERM;
		goto write_done;
	}

	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_KERNEL);
		if (new_skb == NULL) {
			pr_err("[lte] Error - %s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

	DBG("[lte] %s: [RIL][write][index %d] data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, hdr->ch_id, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);
	sdio_ch[id].skb = skb;
	queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
	return rc;
}
int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
{
    int rc = 0;
    struct sdio_mux_hdr *hdr;
    unsigned long flags;
    struct sk_buff *new_skb;

    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;
    if (!skb)
        return -EINVAL;
    if (!sdio_mux_initialized)
        return -ENODEV;
    if (fatal_error)
        return -ENODEV;

    DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
    spin_lock_irqsave(&sdio_ch[id].lock, flags);
    if (sdio_ch_is_in_reset(id)) {
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        pr_err("%s: port is in reset: %d\n", __func__,
               sdio_ch[id].status);
        return -ENETRESET;
    }
    if (!sdio_ch_is_local_open(id)) {
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
        return -ENODEV;
    }
    if (sdio_ch[id].use_wm &&
            (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        pr_err("%s: watermark exceeded: %d\n", __func__, id);
        return -EAGAIN;
    }
    spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

    spin_lock_irqsave(&sdio_mux_write_lock, flags);
    /* if skb do not have any tailroom for padding,
       copy the skb into a new expanded skb */
    if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
        /* revisit, probably dev_alloc_skb and memcpy is effecient */
        new_skb = skb_copy_expand(skb, skb_headroom(skb),
                                  4 - (skb->len & 0x3), GFP_ATOMIC);
        if (new_skb == NULL) {
            pr_err("%s: cannot allocate skb\n", __func__);
            rc = -ENOMEM;
            goto write_done;
        }
        dev_kfree_skb_any(skb);
        skb = new_skb;
        DBG_INC_WRITE_CPY(skb->len);
    }

    hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

    /* caller should allocate for hdr and padding
       hdr is fine, padding is tricky */
    hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
    hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
    hdr->reserved = 0;
    hdr->ch_id = id;
    hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
    if (skb->len & 0x3)
        skb_put(skb, 4 - (skb->len & 0x3));

    hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

    DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
        __func__, skb->data, skb->tail, skb->len,
        hdr->pkt_len, hdr->pad_len);
    __skb_queue_tail(&sdio_mux_write_pool, skb);

    spin_lock(&sdio_ch[id].lock);
    sdio_ch[id].num_tx_pkts++;
    spin_unlock(&sdio_ch[id].lock);

    queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
    spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
    return rc;
}