Beispiel #1
0
int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct bam_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb = NULL;
	dma_addr_t dma_address;
	struct tx_pkt_info *pkt;

	if (id >= BAM_DMUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!bam_mux_initialized)
		return -ENODEV;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&bam_ch[id].lock, flags);
	if (!bam_ch_is_open(id)) {
		spin_unlock_irqrestore(&bam_ch[id].lock, flags);
		pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
		return -ENODEV;
	}
	spin_unlock_irqrestore(&bam_ch[id].lock, flags);

	spin_lock_irqsave(&bam_mux_write_lock, flags);
	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_ATOMIC);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
	hdr->cmd = BAM_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);

	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
	if (pkt == NULL) {
		pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
		if (new_skb)
			dev_kfree_skb_any(new_skb);
		rc = -ENOMEM;
		goto write_done;
	}

	dma_address = dma_map_single(NULL, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (!dma_address) {
		pr_err("%s: dma_map_single() failed\n", __func__);
		if (new_skb)
			dev_kfree_skb_any(new_skb);
		kfree(pkt);
		rc = -ENOMEM;
		goto write_done;
	}
	pkt->skb = skb;
	pkt->dma_address = dma_address;
	pkt->is_cmd = 0;
	spin_unlock_irqrestore(&bam_mux_write_lock, flags);
	rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
				pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
	return rc;

write_done:
	spin_unlock_irqrestore(&bam_mux_write_lock, flags);
	return rc;
}
int msm_rmnet_sdio_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct sdio_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb;

	if (!skb) {
		pr_err("[lte] Error - %s\n", __func__);
		return -EINVAL;
	}

	DBG("[lte] %s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (!sdio_ch_is_local_open(id)) {
		pr_err("[lte] Error - %s: port not open: %d\n", __func__, sdio_ch[id].status);
		rc = -ENODEV;
		goto write_done;
	}

	if (sdio_ch[id].skb) {
		pr_err("[lte] Error - %s: packet pending ch: %d\n", __func__, id);
		rc = -EPERM;
		goto write_done;
	}

	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_KERNEL);
		if (new_skb == NULL) {
			pr_err("[lte] Error - %s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

	DBG("[lte] %s: [RIL][write][index %d] data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, hdr->ch_id, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);
	sdio_ch[id].skb = skb;
	queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
	return rc;
}
int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
{
    int rc = 0;
    struct sdio_mux_hdr *hdr;
    unsigned long flags;
    struct sk_buff *new_skb;

    if (id >= SDIO_DMUX_NUM_CHANNELS)
        return -EINVAL;
    if (!skb)
        return -EINVAL;
    if (!sdio_mux_initialized)
        return -ENODEV;
    if (fatal_error)
        return -ENODEV;

    DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
    spin_lock_irqsave(&sdio_ch[id].lock, flags);
    if (sdio_ch_is_in_reset(id)) {
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        pr_err("%s: port is in reset: %d\n", __func__,
               sdio_ch[id].status);
        return -ENETRESET;
    }
    if (!sdio_ch_is_local_open(id)) {
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
        return -ENODEV;
    }
    if (sdio_ch[id].use_wm &&
            (sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
        spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
        pr_err("%s: watermark exceeded: %d\n", __func__, id);
        return -EAGAIN;
    }
    spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

    spin_lock_irqsave(&sdio_mux_write_lock, flags);
    /* if skb do not have any tailroom for padding,
       copy the skb into a new expanded skb */
    if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
        /* revisit, probably dev_alloc_skb and memcpy is effecient */
        new_skb = skb_copy_expand(skb, skb_headroom(skb),
                                  4 - (skb->len & 0x3), GFP_ATOMIC);
        if (new_skb == NULL) {
            pr_err("%s: cannot allocate skb\n", __func__);
            rc = -ENOMEM;
            goto write_done;
        }
        dev_kfree_skb_any(skb);
        skb = new_skb;
        DBG_INC_WRITE_CPY(skb->len);
    }

    hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

    /* caller should allocate for hdr and padding
       hdr is fine, padding is tricky */
    hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
    hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
    hdr->reserved = 0;
    hdr->ch_id = id;
    hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
    if (skb->len & 0x3)
        skb_put(skb, 4 - (skb->len & 0x3));

    hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

    DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
        __func__, skb->data, skb->tail, skb->len,
        hdr->pkt_len, hdr->pad_len);
    __skb_queue_tail(&sdio_mux_write_pool, skb);

    spin_lock(&sdio_ch[id].lock);
    sdio_ch[id].num_tx_pkts++;
    spin_unlock(&sdio_ch[id].lock);

    queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
    spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
    return rc;
}