Beispiel #1
0
static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir)
{
	int ret;
	struct ipa_bridge_pipe_context *sys = &bridge[2 * dir];

	ret = sps_get_config(sys->pipe, &sys->connection);
	if (ret) {
		IPAERR("sps_get_config() failed %d\n", ret);
		goto fail;
	}
	sys->register_event.options = SPS_O_EOT;
	ret = sps_register_event(sys->pipe, &sys->register_event);
	if (ret) {
		IPAERR("sps_register_event() failed %d\n", ret);
		goto fail;
	}
	sys->connection.options =
	   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
	ret = sps_set_config(sys->pipe, &sys->connection);
	if (ret) {
		IPAERR("sps_set_config() failed %d\n", ret);
		goto fail;
	}
	ret = 0;
fail:
	return ret;
}
Beispiel #2
0
static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
{
	struct msm_slim_endp *endpoint = &dev->pipes[pn];
	struct sps_register_event sps_event;
	writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (pn + dev->port_b),
					dev->ver));
	
	mb();
	memset(&sps_event, 0, sizeof(sps_event));
	sps_register_event(endpoint->sps, &sps_event);
	sps_disconnect(endpoint->sps);
	dev->pipes[pn].connected = false;
}
Beispiel #3
0
static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
{
	struct msm_slim_endp *endpoint = &dev->pipes[pn];
	struct sps_register_event sps_event;
	u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
					dev->ver));
	writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (endpoint->port_b),
					dev->ver));
	writel_relaxed((int_port & ~(1 << endpoint->port_b)),
		PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
	/* Make sure port register is updated */
	mb();
	memset(&sps_event, 0, sizeof(sps_event));
	sps_register_event(endpoint->sps, &sps_event);
	sps_disconnect(endpoint->sps);
	dev->pipes[pn].connected = false;
}
Beispiel #4
0
static void bam_init(struct work_struct *work)
{
	u32 h;
	dma_addr_t dma_addr;
	int ret;
	void *a2_virt_addr;

	/* init BAM */
	a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
	if (!a2_virt_addr) {
		pr_err("%s: ioremap failed\n", __func__);
		ret = -ENOMEM;
		goto register_bam_failed;
	}
	a2_props.phys_addr = A2_PHYS_BASE;
	a2_props.virt_addr = a2_virt_addr;
	a2_props.virt_size = A2_PHYS_SIZE;
	a2_props.irq = A2_BAM_IRQ;
	a2_props.num_pipes = A2_NUM_PIPES;
	a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
	/* need to free on tear down */
	ret = sps_register_bam_device(&a2_props, &h);
	if (ret < 0) {
		pr_err("%s: register bam error %d\n", __func__, ret);
		goto register_bam_failed;
	}

	bam_tx_pipe = sps_alloc_endpoint();
	if (bam_tx_pipe == NULL) {
		pr_err("%s: tx alloc endpoint failed\n", __func__);
		ret = -ENOMEM;
		goto register_bam_failed;
	}
	ret = sps_get_config(bam_tx_pipe, &tx_connection);
	if (ret) {
		pr_err("%s: tx get config failed %d\n", __func__, ret);
		goto tx_get_config_failed;
	}

	tx_connection.source = SPS_DEV_HANDLE_MEM;
	tx_connection.src_pipe_index = 0;
	tx_connection.destination = h;
	tx_connection.dest_pipe_index = 4;
	tx_connection.mode = SPS_MODE_DEST;
	tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
					SPS_O_ACK_TRANSFERS;
	tx_desc_mem_buf.size = 0x800; /* 2k */
	tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
							&dma_addr, 0);
	if (tx_desc_mem_buf.base == NULL) {
		pr_err("%s: tx memory alloc failed\n", __func__);
		ret = -ENOMEM;
		goto tx_mem_failed;
	}
	tx_desc_mem_buf.phys_base = dma_addr;
	memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
	tx_connection.desc = tx_desc_mem_buf;
	tx_connection.event_thresh = 0x10;

	ret = sps_connect(bam_tx_pipe, &tx_connection);
	if (ret < 0) {
		pr_err("%s: tx connect error %d\n", __func__, ret);
		goto tx_connect_failed;
	}

	bam_rx_pipe = sps_alloc_endpoint();
	if (bam_rx_pipe == NULL) {
		pr_err("%s: rx alloc endpoint failed\n", __func__);
		ret = -ENOMEM;
		goto tx_connect_failed;
	}
	ret = sps_get_config(bam_rx_pipe, &rx_connection);
	if (ret) {
		pr_err("%s: rx get config failed %d\n", __func__, ret);
		goto rx_get_config_failed;
	}

	rx_connection.source = h;
	rx_connection.src_pipe_index = 5;
	rx_connection.destination = SPS_DEV_HANDLE_MEM;
	rx_connection.dest_pipe_index = 1;
	rx_connection.mode = SPS_MODE_SRC;
	rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
					SPS_O_ACK_TRANSFERS;
	rx_desc_mem_buf.size = 0x800; /* 2k */
	rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
							&dma_addr, 0);
	if (rx_desc_mem_buf.base == NULL) {
		pr_err("%s: rx memory alloc failed\n", __func__);
		ret = -ENOMEM;
		goto rx_mem_failed;
	}
	rx_desc_mem_buf.phys_base = dma_addr;
	memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
	rx_connection.desc = rx_desc_mem_buf;
	rx_connection.event_thresh = 0x10;

	ret = sps_connect(bam_rx_pipe, &rx_connection);
	if (ret < 0) {
		pr_err("%s: rx connect error %d\n", __func__, ret);
		goto rx_connect_failed;
	}

	tx_register_event.options = SPS_O_EOT;
	tx_register_event.mode = SPS_TRIGGER_CALLBACK;
	tx_register_event.xfer_done = NULL;
	tx_register_event.callback = bam_mux_tx_notify;
	tx_register_event.user = NULL;
	ret = sps_register_event(bam_tx_pipe, &tx_register_event);
	if (ret < 0) {
		pr_err("%s: tx register event error %d\n", __func__, ret);
		goto rx_event_reg_failed;
	}

	rx_register_event.options = SPS_O_EOT;
	rx_register_event.mode = SPS_TRIGGER_CALLBACK;
	rx_register_event.xfer_done = NULL;
	rx_register_event.callback = bam_mux_rx_notify;
	rx_register_event.user = NULL;
	ret = sps_register_event(bam_rx_pipe, &rx_register_event);
	if (ret < 0) {
		pr_err("%s: tx register event error %d\n", __func__, ret);
		goto rx_event_reg_failed;
	}

	bam_mux_initialized = 1;
	queue_rx();
	return;

rx_event_reg_failed:
	sps_disconnect(bam_rx_pipe);
rx_connect_failed:
	dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
				rx_desc_mem_buf.phys_base);
rx_mem_failed:
	sps_disconnect(bam_tx_pipe);
rx_get_config_failed:
	sps_free_endpoint(bam_rx_pipe);
tx_connect_failed:
	dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
				tx_desc_mem_buf.phys_base);
tx_get_config_failed:
	sps_free_endpoint(bam_tx_pipe);
tx_mem_failed:
	sps_deregister_bam_device(h);
register_bam_failed:
	/*destroy_workqueue(bam_mux_workqueue);*/
	/*return ret;*/
	return;
}
int qpic_init_sps(struct platform_device *pdev,
				struct qpic_sps_endpt *end_point)
{
	int rc = 0;
	struct sps_pipe *pipe_handle;
	struct sps_connect *sps_config = &end_point->config;
	struct sps_register_event *sps_event = &end_point->bam_event;
	struct sps_bam_props bam = {0};
	u32 bam_handle = 0;

	if (qpic_res->sps_init)
		return 0;
	bam.phys_addr = qpic_res->qpic_phys + 0x4000;
	bam.virt_addr = qpic_res->qpic_base + 0x4000;
	bam.irq = qpic_res->irq - 4;
	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;

	rc = sps_phy2h(bam.phys_addr, &bam_handle);
	if (rc)
		rc = sps_register_bam_device(&bam, &bam_handle);
	if (rc) {
		pr_err("%s bam_handle is NULL", __func__);
		rc = -ENOMEM;
		goto out;
	}

	pipe_handle = sps_alloc_endpoint();
	if (!pipe_handle) {
		pr_err("sps_alloc_endpoint() failed\n");
		rc = -ENOMEM;
		goto out;
	}

	rc = sps_get_config(pipe_handle, sps_config);
	if (rc) {
		pr_err("sps_get_config() failed %d\n", rc);
		goto free_endpoint;
	}

	/* WRITE CASE: source - system memory; destination - BAM */
	sps_config->source = SPS_DEV_HANDLE_MEM;
	sps_config->destination = bam_handle;
	sps_config->mode = SPS_MODE_DEST;
	sps_config->dest_pipe_index = 6;

	sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
	sps_config->lock_group = 0;
	/*
	 * Descriptor FIFO is a cyclic FIFO. If 64 descriptors
	 * are allowed to be submitted before we get any ack for any of them,
	 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
	 * sizeof(struct sps_iovec).
	 */
	sps_config->desc.size = (64) *
					sizeof(struct sps_iovec);
	sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
					sps_config->desc.size,
					&sps_config->desc.phys_base,
					GFP_KERNEL);
	if (!sps_config->desc.base) {
		pr_err("dmam_alloc_coherent() failed for size %x\n",
				sps_config->desc.size);
		rc = -ENOMEM;
		goto free_endpoint;
	}
	memset(sps_config->desc.base, 0x00, sps_config->desc.size);

	rc = sps_connect(pipe_handle, sps_config);
	if (rc) {
		pr_err("sps_connect() failed %d\n", rc);
		goto free_endpoint;
	}

	init_completion(&end_point->completion);
	sps_event->mode = SPS_TRIGGER_WAIT;
	sps_event->options = SPS_O_EOT;
	sps_event->xfer_done = &end_point->completion;
	sps_event->user = (void *)qpic_res;

	rc = sps_register_event(pipe_handle, sps_event);
	if (rc) {
		pr_err("sps_register_event() failed %d\n", rc);
		goto sps_disconnect;
	}

	end_point->handle = pipe_handle;
	qpic_res->sps_init = true;
	goto out;
sps_disconnect:
	sps_disconnect(pipe_handle);
free_endpoint:
	sps_free_endpoint(pipe_handle);
out:
	return rc;
}
Beispiel #6
0
static int setup_bridge_to_a2(enum ipa_bridge_dir dir)
{
	struct ipa_bridge_pipe_context *sys;
	struct a2_mux_pipe_connection pipe_conn = { 0, };
	dma_addr_t dma_addr;
	u32 a2_handle;
	int ret;
	int i;

	if (dir == IPA_UL) {
		ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn);
		if (ret) {
			IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n");
			goto tx_alloc_endpoint_failed;
		}

		ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle);
		if (ret) {
			IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
			goto tx_alloc_endpoint_failed;
		}

		sys = &bridge[IPA_UL_TO_A2];
		sys->pipe = sps_alloc_endpoint();
		if (sys->pipe == NULL) {
			IPAERR("tx alloc endpoint failed\n");
			ret = -ENOMEM;
			goto tx_alloc_endpoint_failed;
		}
		ret = sps_get_config(sys->pipe, &sys->connection);
		if (ret) {
			IPAERR("tx get config failed %d\n", ret);
			goto tx_get_config_failed;
		}

		sys->connection.source = SPS_DEV_HANDLE_MEM;
		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
		sys->connection.destination = a2_handle;
		sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index;
		sys->connection.mode = SPS_MODE_DEST;
		sys->connection.options =
		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
				sys->desc_mem_buf.size,
				&dma_addr,
				0);
		if (sys->desc_mem_buf.base == NULL) {
			IPAERR("tx memory alloc failed\n");
			ret = -ENOMEM;
			goto tx_get_config_failed;
		}
		sys->desc_mem_buf.phys_base = dma_addr;
		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
		sys->connection.desc = sys->desc_mem_buf;
		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;

		ret = sps_connect(sys->pipe, &sys->connection);
		if (ret < 0) {
			IPAERR("tx connect error %d\n", ret);
			goto tx_connect_failed;
		}

		INIT_LIST_HEAD(&sys->head_desc_list);
		INIT_LIST_HEAD(&sys->free_desc_list);
		spin_lock_init(&sys->spinlock);

		return 0;

tx_connect_failed:
		dma_free_coherent(NULL,
				sys->desc_mem_buf.size,
				sys->desc_mem_buf.base,
				sys->desc_mem_buf.phys_base);
tx_get_config_failed:
		sps_free_endpoint(sys->pipe);
tx_alloc_endpoint_failed:
		return ret;
	} else { /* dir == IPA_UL */

		ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn);
		if (ret) {
			IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n");
			goto rx_alloc_endpoint_failed;
		}

		ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle);
		if (ret) {
			IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret);
			goto rx_alloc_endpoint_failed;
		}

		sys = &bridge[IPA_DL_FROM_A2];
		sys->pipe = sps_alloc_endpoint();
		if (sys->pipe == NULL) {
			IPAERR("rx alloc endpoint failed\n");
			ret = -ENOMEM;
			goto rx_alloc_endpoint_failed;
		}
		ret = sps_get_config(sys->pipe, &sys->connection);
		if (ret) {
			IPAERR("rx get config failed %d\n", ret);
			goto rx_get_config_failed;
		}

		sys->connection.source = a2_handle;
		sys->connection.src_pipe_index = pipe_conn.src_pipe_index;
		sys->connection.destination = SPS_DEV_HANDLE_MEM;
		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
		sys->connection.mode = SPS_MODE_SRC;
		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
		      SPS_O_ACK_TRANSFERS;
		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
				sys->desc_mem_buf.size,
				&dma_addr,
				0);
		if (sys->desc_mem_buf.base == NULL) {
			IPAERR("rx memory alloc failed\n");
			ret = -ENOMEM;
			goto rx_get_config_failed;
		}
		sys->desc_mem_buf.phys_base = dma_addr;
		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
		sys->connection.desc = sys->desc_mem_buf;
		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;

		ret = sps_connect(sys->pipe, &sys->connection);
		if (ret < 0) {
			IPAERR("rx connect error %d\n", ret);
			goto rx_connect_failed;
		}

		sys->register_event.options = SPS_O_EOT;
		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
		sys->register_event.xfer_done = NULL;
		sys->register_event.callback = bam_mux_rx_notify;
		sys->register_event.user = NULL;
		ret = sps_register_event(sys->pipe, &sys->register_event);
		if (ret < 0) {
			IPAERR("tx register event error %d\n", ret);
			goto rx_event_reg_failed;
		}

		INIT_LIST_HEAD(&sys->head_desc_list);
		INIT_LIST_HEAD(&sys->free_desc_list);
		spin_lock_init(&sys->spinlock);


		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
			ret = queue_rx_single(dir);
			if (ret < 0)
				IPAERR("queue fail %d %d\n", dir, i);
		}

		return 0;

rx_event_reg_failed:
		sps_disconnect(sys->pipe);
rx_connect_failed:
		dma_free_coherent(NULL,
				sys->desc_mem_buf.size,
				sys->desc_mem_buf.base,
				sys->desc_mem_buf.phys_base);
rx_get_config_failed:
		sps_free_endpoint(sys->pipe);
rx_alloc_endpoint_failed:
		return ret;
	}
}
Beispiel #7
0
static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
{
	struct ipa_bridge_pipe_context *sys;
	struct ipa_ep_cfg_mode mode;
	dma_addr_t dma_addr;
	int ipa_ep_idx;
	int ret;
	int i;

	if (dir == IPA_DL) {
		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
				IPA_CLIENT_A2_TETHERED_PROD);
		if (ipa_ep_idx == -1) {
			IPAERR("Invalid client.\n");
			ret = -EINVAL;
			goto tx_alloc_endpoint_failed;
		}

		sys = &bridge[IPA_DL_TO_IPA];
		sys->pipe = sps_alloc_endpoint();
		if (sys->pipe == NULL) {
			IPAERR("tx alloc endpoint failed\n");
			ret = -ENOMEM;
			goto tx_alloc_endpoint_failed;
		}
		ret = sps_get_config(sys->pipe, &sys->connection);
		if (ret) {
			IPAERR("tx get config failed %d\n", ret);
			goto tx_get_config_failed;
		}

		sys->connection.source = SPS_DEV_HANDLE_MEM;
		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
		sys->connection.destination = ipa_ctx->bam_handle;
		sys->connection.dest_pipe_index = ipa_ep_idx;
		sys->connection.mode = SPS_MODE_DEST;
		sys->connection.options =
		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
				sys->desc_mem_buf.size,
				&dma_addr,
				0);
		if (sys->desc_mem_buf.base == NULL) {
			IPAERR("tx memory alloc failed\n");
			ret = -ENOMEM;
			goto tx_get_config_failed;
		}
		sys->desc_mem_buf.phys_base = dma_addr;
		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
		sys->connection.desc = sys->desc_mem_buf;
		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;

		ret = sps_connect(sys->pipe, &sys->connection);
		if (ret < 0) {
			IPAERR("tx connect error %d\n", ret);
			goto tx_connect_failed;
		}

		INIT_LIST_HEAD(&sys->head_desc_list);
		INIT_LIST_HEAD(&sys->free_desc_list);
		spin_lock_init(&sys->spinlock);

		ipa_ctx->ep[ipa_ep_idx].valid = 1;

		mode.mode = IPA_DMA;
		mode.dst = IPA_CLIENT_USB_CONS;
		ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
		if (ret < 0) {
			IPAERR("DMA mode set error %d\n", ret);
			goto tx_mode_set_failed;
		}

		return 0;

tx_mode_set_failed:
		sps_disconnect(sys->pipe);
tx_connect_failed:
		dma_free_coherent(NULL, sys->desc_mem_buf.size,
				sys->desc_mem_buf.base,
				sys->desc_mem_buf.phys_base);
tx_get_config_failed:
		sps_free_endpoint(sys->pipe);
tx_alloc_endpoint_failed:
		return ret;
	} else {

		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
				IPA_CLIENT_A2_TETHERED_CONS);
		if (ipa_ep_idx == -1) {
			IPAERR("Invalid client.\n");
			ret = -EINVAL;
			goto rx_alloc_endpoint_failed;
		}

		sys = &bridge[IPA_UL_FROM_IPA];
		sys->pipe = sps_alloc_endpoint();
		if (sys->pipe == NULL) {
			IPAERR("rx alloc endpoint failed\n");
			ret = -ENOMEM;
			goto rx_alloc_endpoint_failed;
		}
		ret = sps_get_config(sys->pipe, &sys->connection);
		if (ret) {
			IPAERR("rx get config failed %d\n", ret);
			goto rx_get_config_failed;
		}

		sys->connection.source = ipa_ctx->bam_handle;
		sys->connection.src_pipe_index = 7;
		sys->connection.destination = SPS_DEV_HANDLE_MEM;
		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
		sys->connection.mode = SPS_MODE_SRC;
		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
		      SPS_O_ACK_TRANSFERS;
		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
				sys->desc_mem_buf.size,
				&dma_addr,
				0);
		if (sys->desc_mem_buf.base == NULL) {
			IPAERR("rx memory alloc failed\n");
			ret = -ENOMEM;
			goto rx_get_config_failed;
		}
		sys->desc_mem_buf.phys_base = dma_addr;
		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
		sys->connection.desc = sys->desc_mem_buf;
		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;

		ret = sps_connect(sys->pipe, &sys->connection);
		if (ret < 0) {
			IPAERR("rx connect error %d\n", ret);
			goto rx_connect_failed;
		}

		sys->register_event.options = SPS_O_EOT;
		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
		sys->register_event.xfer_done = NULL;
		sys->register_event.callback = ipa_sps_irq_rx_notify;
		sys->register_event.user = NULL;
		ret = sps_register_event(sys->pipe, &sys->register_event);
		if (ret < 0) {
			IPAERR("tx register event error %d\n", ret);
			goto rx_event_reg_failed;
		}

		INIT_LIST_HEAD(&sys->head_desc_list);
		INIT_LIST_HEAD(&sys->free_desc_list);
		spin_lock_init(&sys->spinlock);

		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
			ret = queue_rx_single(dir);
			if (ret < 0)
				IPAERR("queue fail %d %d\n", dir, i);
		}

		return 0;

rx_event_reg_failed:
		sps_disconnect(sys->pipe);
rx_connect_failed:
		dma_free_coherent(NULL,
				sys->desc_mem_buf.size,
				sys->desc_mem_buf.base,
				sys->desc_mem_buf.phys_base);
rx_get_config_failed:
		sps_free_endpoint(sys->pipe);
rx_alloc_endpoint_failed:
		return ret;
	}
}