コード例 #1
0
ファイル: bam_dmux.c プロジェクト: yxsh/ZTE-Blade-2.6.38.6
static int bam_mux_write_cmd(void *data, uint32_t len)
{
	int rc;
	struct tx_pkt_info *pkt;
	dma_addr_t dma_address;

	mutex_lock(&bam_mux_lock);
	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_KERNEL);
	if (pkt == NULL) {
		pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
		rc = -ENOMEM;
		mutex_unlock(&bam_mux_lock);
		return rc;
	}

	dma_address = dma_map_single(NULL, data, len,
					DMA_TO_DEVICE);
	if (!dma_address) {
		pr_err("%s: dma_map_single() failed\n", __func__);
		rc = -ENOMEM;
		mutex_unlock(&bam_mux_lock);
		return rc;
	}
	pkt->skb = (struct sk_buff *)(data);
	pkt->len = len;
	pkt->dma_address = dma_address;
	pkt->is_cmd = 1;
	rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
				pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);

	mutex_unlock(&bam_mux_lock);
	return rc;
}
コード例 #2
0
ファイル: bam_dmux.c プロジェクト: yxsh/ZTE-Blade-2.6.38.6
static void queue_rx(void)
{
	void *ptr;
	rx_skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
	ptr = skb_put(rx_skb, BUFFER_SIZE);
	/* need a way to handle error case */
	rx_skb_dma_addr = dma_map_single(NULL, ptr, BUFFER_SIZE,
						DMA_FROM_DEVICE);
	sps_transfer_one(bam_rx_pipe, rx_skb_dma_addr,
				BUFFER_SIZE, NULL,
				SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
}
コード例 #3
0
static int queue_rx_single(enum ipa_bridge_dir dir)
{
	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
	struct ipa_pkt_info *info;
	int ret;

	info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL);
	if (!info) {
		IPAERR("unable to alloc rx_pkt_info\n");
		goto fail_pkt;
	}

	info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA);
	if (!info->buffer) {
		IPAERR("unable to alloc rx_pkt_buffer\n");
		goto fail_buffer;
	}

	info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE,
					   DMA_BIDIRECTIONAL);
	if (info->dma_address == 0 || info->dma_address == ~0) {
		IPAERR("dma_map_single failure %p for %p\n",
				(void *)info->dma_address, info->buffer);
		goto fail_dma;
	}

	info->len = ~0;

	list_add_tail(&info->list_node, &sys_rx->head_desc_list);
	ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
			       IPA_RX_SKB_SIZE, info,
			       SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
	if (ret) {
		list_del(&info->list_node);
		dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
				 DMA_BIDIRECTIONAL);
		IPAERR("sps_transfer_one failed %d\n", ret);
		goto fail_dma;
	}
	sys_rx->len++;
	return 0;

fail_dma:
	kfree(info->buffer);
fail_buffer:
	kfree(info);
fail_pkt:
	IPAERR("failed\n");
	return -ENOMEM;
}
コード例 #4
0
int qpic_flush_buffer_bam(u32 cmd, u32 len, u32 *param, u32 is_cmd)
{
	int  ret = 0;
	u32 phys_addr, cfg2, block_len , flags;
	if (is_cmd) {
		memcpy((u8 *)qpic_res->cmd_buf_virt, param, len);
		invalidate_caches((unsigned long)qpic_res->cmd_buf_virt,
		len,
		(unsigned long)qpic_res->cmd_buf_phys);
		phys_addr = qpic_res->cmd_buf_phys;
	} else {
		phys_addr = (u32)param;
	}

	cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
	cfg2 &= ~0xFF;
	cfg2 |= cmd;
	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
	block_len = 0x7FF0;
	while (len > 0)  {
		if (len <= 0x7FF0) {
			flags = SPS_IOVEC_FLAG_EOT;
			block_len = len;
		} else {
			flags = 0;
		}
		ret = sps_transfer_one(qpic_res->qpic_endpt.handle,
				phys_addr, block_len, NULL, flags);
		if (ret)
			pr_err("failed to submit command %x ret %d\n",
				cmd, ret);
		phys_addr += block_len;
		len -= block_len;
	}
	ret = wait_for_completion_interruptible_timeout(
		&qpic_res->qpic_endpt.completion,
		msecs_to_jiffies(100 * 4));
	if (ret <= 0)
		pr_err("%s timeout %x", __func__, ret);
	else
		ret = 0;
	return ret;
}
コード例 #5
0
static void queue_rx(void)
{
	void *ptr;
	struct rx_pkt_info *info;

	info = kmalloc(sizeof(struct rx_pkt_info), GFP_KERNEL);
	if (!info)
		return; /*need better way to handle this */

	INIT_WORK(&info->work, handle_bam_mux_cmd);

	info->skb = __dev_alloc_skb(BUFFER_SIZE, GFP_KERNEL);
	ptr = skb_put(info->skb, BUFFER_SIZE);

	mutex_lock(&bam_rx_pool_lock);
	list_add_tail(&info->list_node, &bam_rx_pool);
	mutex_unlock(&bam_rx_pool_lock);

	/* need a way to handle error case */
	info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
						DMA_FROM_DEVICE);
	sps_transfer_one(bam_rx_pipe, info->dma_address,
				BUFFER_SIZE, info, 0);
}
コード例 #6
0
ファイル: bam_dmux.c プロジェクト: yxsh/ZTE-Blade-2.6.38.6
int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct bam_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb = NULL;
	dma_addr_t dma_address;
	struct tx_pkt_info *pkt;

	if (id >= BAM_DMUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!bam_mux_initialized)
		return -ENODEV;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&bam_ch[id].lock, flags);
	if (!bam_ch_is_open(id)) {
		spin_unlock_irqrestore(&bam_ch[id].lock, flags);
		pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
		return -ENODEV;
	}
	spin_unlock_irqrestore(&bam_ch[id].lock, flags);

	spin_lock_irqsave(&bam_mux_write_lock, flags);
	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_ATOMIC);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
	hdr->cmd = BAM_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);

	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
	if (pkt == NULL) {
		pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
		if (new_skb)
			dev_kfree_skb_any(new_skb);
		rc = -ENOMEM;
		goto write_done;
	}

	dma_address = dma_map_single(NULL, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (!dma_address) {
		pr_err("%s: dma_map_single() failed\n", __func__);
		if (new_skb)
			dev_kfree_skb_any(new_skb);
		kfree(pkt);
		rc = -ENOMEM;
		goto write_done;
	}
	pkt->skb = skb;
	pkt->dma_address = dma_address;
	pkt->is_cmd = 0;
	spin_unlock_irqrestore(&bam_mux_write_lock, flags);
	rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
				pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
	return rc;

write_done:
	spin_unlock_irqrestore(&bam_mux_write_lock, flags);
	return rc;
}
コード例 #7
0
static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
{
	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
	struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
	struct ipa_pkt_info *tx_pkt;
	struct ipa_pkt_info *rx_pkt;
	struct ipa_pkt_info *tmp_pkt;
	struct sps_iovec iov;
	int ret;
	int inactive_cycles = 0;

	while (1) {
		++inactive_cycles;

		if (ipa_reclaim_tx(sys_tx, false))
			inactive_cycles = 0;

		iov.addr = 0;
		ret = sps_get_iovec(sys_rx->pipe, &iov);
		if (ret || iov.addr == 0) {
			/* no-op */
		} else {
			inactive_cycles = 0;

			rx_pkt = list_first_entry(&sys_rx->head_desc_list,
						  struct ipa_pkt_info,
						  list_node);
			list_del(&rx_pkt->list_node);
			sys_rx->len--;
			rx_pkt->len = iov.size;

retry_alloc_tx:
			if (list_empty(&sys_tx->free_desc_list)) {
				tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
						GFP_KERNEL);
				if (!tmp_pkt) {
					pr_debug_ratelimited("%s: unable to alloc tx_pkt_info\n",
					       __func__);
					usleep_range(polling_min_sleep[dir],
							polling_max_sleep[dir]);
					goto retry_alloc_tx;
				}

				tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
						GFP_KERNEL | GFP_DMA);
				if (!tmp_pkt->buffer) {
					pr_debug_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
					       __func__);
					kfree(tmp_pkt);
					usleep_range(polling_min_sleep[dir],
							polling_max_sleep[dir]);
					goto retry_alloc_tx;
				}

				tmp_pkt->dma_address = dma_map_single(NULL,
						tmp_pkt->buffer,
						IPA_RX_SKB_SIZE,
						DMA_BIDIRECTIONAL);
				if (tmp_pkt->dma_address == 0 ||
						tmp_pkt->dma_address == ~0) {
					pr_debug_ratelimited("%s: dma_map_single failure %p for %p\n",
					       __func__,
					       (void *)tmp_pkt->dma_address,
					       tmp_pkt->buffer);
				}

				list_add_tail(&tmp_pkt->list_node,
						&sys_tx->free_desc_list);
				sys_tx->free_len++;
				alloc_cnt[dir]++;

				tmp_pkt->len = ~0;
			}

			tx_pkt = list_first_entry(&sys_tx->free_desc_list,
						  struct ipa_pkt_info,
						  list_node);
			list_del(&tx_pkt->list_node);
			sys_tx->free_len--;

retry_add_rx:
			list_add_tail(&tx_pkt->list_node,
					&sys_rx->head_desc_list);
			ret = sps_transfer_one(sys_rx->pipe,
					tx_pkt->dma_address,
					IPA_RX_SKB_SIZE,
					tx_pkt,
					SPS_IOVEC_FLAG_INT |
					SPS_IOVEC_FLAG_EOT);
			if (ret) {
				list_del(&tx_pkt->list_node);
				pr_debug_ratelimited("%s: sps_transfer_one failed %d\n",
						__func__, ret);
				usleep_range(polling_min_sleep[dir],
						polling_max_sleep[dir]);
				goto retry_add_rx;
			}
			sys_rx->len++;

retry_add_tx:
			list_add_tail(&rx_pkt->list_node,
					&sys_tx->head_desc_list);
			ret = sps_transfer_one(sys_tx->pipe,
					       rx_pkt->dma_address,
					       iov.size,
					       rx_pkt,
					       SPS_IOVEC_FLAG_INT |
					       SPS_IOVEC_FLAG_EOT);
			if (ret) {
				pr_debug_ratelimited("%s: fail to add to TX dir=%d\n",
						__func__, dir);
				list_del(&rx_pkt->list_node);
				ipa_reclaim_tx(sys_tx, true);
				usleep_range(polling_min_sleep[dir],
						polling_max_sleep[dir]);
				goto retry_add_tx;
			}
			sys_tx->len++;
		}

		if (inactive_cycles >= polling_inactivity[dir]) {
			ipa_switch_to_intr_mode(dir);
			break;
		}
	}
}