示例#1
0
static void free_tx_descriptor(struct work_struct *work)
{
	struct sps_iovec iov;

	/* mark consumed descriptor as free */
	sps_get_iovec(bam_tx_pipe, &iov);
}
示例#2
0
static int ipa_reclaim_tx(struct ipa_bridge_pipe_context *sys_tx, bool all)
{
	struct sps_iovec iov;
	struct ipa_pkt_info *tx_pkt;
	int cnt = 0;
	int ret;

	do {
		iov.addr = 0;
		ret = sps_get_iovec(sys_tx->pipe, &iov);
		if (ret || iov.addr == 0) {
			break;
		} else {
			tx_pkt = list_first_entry(&sys_tx->head_desc_list,
						  struct ipa_pkt_info,
						  list_node);
			list_move_tail(&tx_pkt->list_node,
					&sys_tx->free_desc_list);
			sys_tx->len--;
			sys_tx->free_len++;
			tx_pkt->len = ~0;
			cnt++;
		}
	} while (all);

	return cnt;
}
示例#3
0
static void handle_bam_mux_cmd(struct work_struct *work)
{
	unsigned long flags;
	struct bam_mux_hdr *rx_hdr;
	struct sps_iovec iov;

	/* mark consumed descriptor as free */
	sps_get_iovec(bam_rx_pipe, &iov);

	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;

	DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
	DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
	if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
		pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
			" pad %d ch %d len %d\n", __func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		return;
	}
	switch (rx_hdr->cmd) {
	case BAM_MUX_HDR_CMD_DATA:
		DBG_INC_READ_CNT(rx_hdr->pkt_len);
		bam_mux_process_data();
		break;
	case BAM_MUX_HDR_CMD_OPEN:
		spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
		bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
		spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		break;
	case BAM_MUX_HDR_CMD_CLOSE:
		/* probably should drop pending write */
		spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
		bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
		spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		break;
	default:
		pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
			" pad %d ch %d len %d\n", __func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		return;
	}
}
static void rx_timer_work_func(struct work_struct *work)
{
	struct sps_iovec iov;
	struct list_head *node;
	struct rx_pkt_info *info;

	while (1) {
		sps_get_iovec(bam_rx_pipe, &iov);
		if (iov.addr == 0)
			break;
		mutex_lock(&bam_rx_pool_lock);
		node = bam_rx_pool.next;
		list_del(node);
		mutex_unlock(&bam_rx_pool_lock);
		info = container_of(node, struct rx_pkt_info, list_node);
		handle_bam_mux_cmd(&info->work);
	}

	msleep(1);
	queue_work(bam_mux_rx_workqueue, &rx_timer_work);
}
示例#5
0
static void ipa_do_bridge_work(enum ipa_bridge_dir dir)
{
	struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir];
	struct ipa_bridge_pipe_context *sys_tx = &bridge[2 * dir + 1];
	struct ipa_pkt_info *tx_pkt;
	struct ipa_pkt_info *rx_pkt;
	struct ipa_pkt_info *tmp_pkt;
	struct sps_iovec iov;
	int ret;
	int inactive_cycles = 0;

	while (1) {
		++inactive_cycles;

		if (ipa_reclaim_tx(sys_tx, false))
			inactive_cycles = 0;

		iov.addr = 0;
		ret = sps_get_iovec(sys_rx->pipe, &iov);
		if (ret || iov.addr == 0) {
			/* no-op */
		} else {
			inactive_cycles = 0;

			rx_pkt = list_first_entry(&sys_rx->head_desc_list,
						  struct ipa_pkt_info,
						  list_node);
			list_del(&rx_pkt->list_node);
			sys_rx->len--;
			rx_pkt->len = iov.size;

retry_alloc_tx:
			if (list_empty(&sys_tx->free_desc_list)) {
				tmp_pkt = kmalloc(sizeof(struct ipa_pkt_info),
						GFP_KERNEL);
				if (!tmp_pkt) {
					pr_debug_ratelimited("%s: unable to alloc tx_pkt_info\n",
					       __func__);
					usleep_range(polling_min_sleep[dir],
							polling_max_sleep[dir]);
					goto retry_alloc_tx;
				}

				tmp_pkt->buffer = kmalloc(IPA_RX_SKB_SIZE,
						GFP_KERNEL | GFP_DMA);
				if (!tmp_pkt->buffer) {
					pr_debug_ratelimited("%s: unable to alloc tx_pkt_buffer\n",
					       __func__);
					kfree(tmp_pkt);
					usleep_range(polling_min_sleep[dir],
							polling_max_sleep[dir]);
					goto retry_alloc_tx;
				}

				tmp_pkt->dma_address = dma_map_single(NULL,
						tmp_pkt->buffer,
						IPA_RX_SKB_SIZE,
						DMA_BIDIRECTIONAL);
				if (tmp_pkt->dma_address == 0 ||
						tmp_pkt->dma_address == ~0) {
					pr_debug_ratelimited("%s: dma_map_single failure %p for %p\n",
					       __func__,
					       (void *)tmp_pkt->dma_address,
					       tmp_pkt->buffer);
				}

				list_add_tail(&tmp_pkt->list_node,
						&sys_tx->free_desc_list);
				sys_tx->free_len++;
				alloc_cnt[dir]++;

				tmp_pkt->len = ~0;
			}

			tx_pkt = list_first_entry(&sys_tx->free_desc_list,
						  struct ipa_pkt_info,
						  list_node);
			list_del(&tx_pkt->list_node);
			sys_tx->free_len--;

retry_add_rx:
			list_add_tail(&tx_pkt->list_node,
					&sys_rx->head_desc_list);
			ret = sps_transfer_one(sys_rx->pipe,
					tx_pkt->dma_address,
					IPA_RX_SKB_SIZE,
					tx_pkt,
					SPS_IOVEC_FLAG_INT |
					SPS_IOVEC_FLAG_EOT);
			if (ret) {
				list_del(&tx_pkt->list_node);
				pr_debug_ratelimited("%s: sps_transfer_one failed %d\n",
						__func__, ret);
				usleep_range(polling_min_sleep[dir],
						polling_max_sleep[dir]);
				goto retry_add_rx;
			}
			sys_rx->len++;

retry_add_tx:
			list_add_tail(&rx_pkt->list_node,
					&sys_tx->head_desc_list);
			ret = sps_transfer_one(sys_tx->pipe,
					       rx_pkt->dma_address,
					       iov.size,
					       rx_pkt,
					       SPS_IOVEC_FLAG_INT |
					       SPS_IOVEC_FLAG_EOT);
			if (ret) {
				pr_debug_ratelimited("%s: fail to add to TX dir=%d\n",
						__func__, dir);
				list_del(&rx_pkt->list_node);
				ipa_reclaim_tx(sys_tx, true);
				usleep_range(polling_min_sleep[dir],
						polling_max_sleep[dir]);
				goto retry_add_tx;
			}
			sys_tx->len++;
		}

		if (inactive_cycles >= polling_inactivity[dir]) {
			ipa_switch_to_intr_mode(dir);
			break;
		}
	}
}