int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep) { int ret; struct sps_pipe *endpoint; struct sps_connect *config = &ep->config; /* Allocate the endpoint */ endpoint = sps_alloc_endpoint(); if (!endpoint) { dev_err(dev->dev, "sps_alloc_endpoint failed\n"); return -ENOMEM; } /* Get default connection configuration for an endpoint */ ret = sps_get_config(endpoint, config); if (ret) { dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret); goto sps_config_failed; } ep->sps = endpoint; return 0; sps_config_failed: sps_free_endpoint(endpoint); return ret; }
static int ipa_connect_configure_sps(const struct ipa_connect_params *in, struct ipa_ep_context *ep, int ipa_ep_idx) { int result = -EFAULT; /* Default Config */ ep->ep_hdl = sps_alloc_endpoint(); if (ep->ep_hdl == NULL) { IPAERR("SPS EP alloc failed EP.\n"); return -EFAULT; } result = sps_get_config(ep->ep_hdl, &ep->connect); if (result) { IPAERR("fail to get config.\n"); return -EFAULT; } /* Specific Config */ if (IPA_CLIENT_IS_CONS(in->client)) { ep->connect.mode = SPS_MODE_SRC; ep->connect.destination = in->client_bam_hdl; ep->connect.source = ipa_ctx->bam_handle; ep->connect.dest_pipe_index = in->client_ep_idx; ep->connect.src_pipe_index = ipa_ep_idx; } else { ep->connect.mode = SPS_MODE_DEST; ep->connect.source = in->client_bam_hdl; ep->connect.destination = ipa_ctx->bam_handle; ep->connect.src_pipe_index = in->client_ep_idx; ep->connect.dest_pipe_index = ipa_ep_idx; } return 0; }
static int setup_dma_bam_bridge(enum ipa_bridge_dir dir, enum ipa_bridge_type type, struct ipa_sys_connect_params *props, u32 *clnt_hdl) { struct ipa_connect_params ipa_in_params; struct ipa_sps_params sps_out_params; int dma_a2_pipe; int dma_ipa_pipe; struct sps_pipe *pipe; struct sps_pipe *pipe_a2; struct sps_connect _connection; struct sps_connect *connection = &_connection; struct a2_mux_pipe_connection pipe_conn = {0}; enum a2_mux_pipe_direction pipe_dir; u32 dma_hdl = sps_dma_get_bam_handle(); u32 a2_hdl; u32 pa; int ret; memset(&ipa_in_params, 0, sizeof(ipa_in_params)); memset(&sps_out_params, 0, sizeof(sps_out_params)); pipe_dir = (dir == IPA_BRIDGE_DIR_UL) ? IPA_TO_A2 : A2_TO_IPA; ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed dir=%d type=%d\n", dir, type); goto fail_get_a2_prop; } pa = (dir == IPA_BRIDGE_DIR_UL) ? pipe_conn.dst_phy_addr : pipe_conn.src_phy_addr; ret = sps_phy2h(pa, &a2_hdl); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d dir=%d type=%d\n", ret, dir, type); goto fail_get_a2_prop; } ipa_get_dma_pipe_num(dir, type, &dma_a2_pipe, &dma_ipa_pipe); ipa_in_params.ipa_ep_cfg = props->ipa_ep_cfg; ipa_in_params.client = props->client; ipa_in_params.client_bam_hdl = dma_hdl; ipa_in_params.client_ep_idx = dma_ipa_pipe; ipa_in_params.priv = props->priv; ipa_in_params.notify = props->notify; ipa_in_params.desc_fifo_sz = ipa_get_desc_fifo_sz(dir, type); ipa_in_params.data_fifo_sz = ipa_get_data_fifo_sz(dir, type); if (ipa_connect(&ipa_in_params, &sps_out_params, clnt_hdl)) { IPAERR("ipa connect failed dir=%d type=%d\n", dir, type); goto fail_get_a2_prop; } pipe = sps_alloc_endpoint(); if (pipe == NULL) { IPAERR("sps_alloc_endpoint failed dir=%d type=%d\n", dir, type); ret = -ENOMEM; goto fail_sps_alloc; } memset(&_connection, 0, sizeof(_connection)); ret = sps_get_config(pipe, connection); if (ret) { IPAERR("sps_get_config failed %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config; } if (dir == IPA_BRIDGE_DIR_DL) { connection->mode = SPS_MODE_SRC; connection->source = dma_hdl; connection->destination = sps_out_params.ipa_bam_hdl; connection->src_pipe_index = dma_ipa_pipe; connection->dest_pipe_index = sps_out_params.ipa_ep_idx; } else { connection->mode = SPS_MODE_DEST; connection->source = sps_out_params.ipa_bam_hdl; connection->destination = dma_hdl; connection->src_pipe_index = sps_out_params.ipa_ep_idx; connection->dest_pipe_index = dma_ipa_pipe; } connection->event_thresh = IPA_EVENT_THRESHOLD; connection->data = sps_out_params.data; connection->desc = sps_out_params.desc; connection->options = SPS_O_AUTO_ENABLE; ret = sps_connect(pipe, connection); if (ret) { IPAERR("sps_connect failed %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config; } if (dir == IPA_BRIDGE_DIR_DL) { bridge[type].pipe[IPA_DL_TO_IPA].pipe = pipe; bridge[type].pipe[IPA_DL_TO_IPA].ipa_facing = true; bridge[type].pipe[IPA_DL_TO_IPA].valid = true; } else { bridge[type].pipe[IPA_UL_FROM_IPA].pipe = pipe; bridge[type].pipe[IPA_UL_FROM_IPA].ipa_facing = true; bridge[type].pipe[IPA_UL_FROM_IPA].valid = true; } IPADBG("dir=%d type=%d (ipa) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type, connection->source, connection->src_pipe_index, connection->destination, connection->dest_pipe_index); pipe_a2 = sps_alloc_endpoint(); if (pipe_a2 == NULL) { IPAERR("sps_alloc_endpoint failed2 dir=%d type=%d\n", dir, type); ret = -ENOMEM; goto fail_sps_alloc_a2; } memset(&_connection, 0, sizeof(_connection)); ret = sps_get_config(pipe_a2, connection); if (ret) { IPAERR("sps_get_config failed2 %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config_a2; } if (dir == IPA_BRIDGE_DIR_DL) { connection->mode = SPS_MODE_DEST; connection->source = a2_hdl; connection->destination = dma_hdl; connection->src_pipe_index = ipa_get_a2_pipe_num(dir, type); connection->dest_pipe_index = dma_a2_pipe; } else { connection->mode = SPS_MODE_SRC; connection->source = dma_hdl; connection->destination = a2_hdl; connection->src_pipe_index = dma_a2_pipe; connection->dest_pipe_index = ipa_get_a2_pipe_num(dir, type); } connection->event_thresh = IPA_EVENT_THRESHOLD; if (ipa_setup_a2_dma_fifos(dir, type, &connection->desc, &connection->data)) { IPAERR("fail to setup A2-DMA FIFOs dir=%d type=%d\n", dir, type); goto fail_sps_get_config_a2; } connection->options = SPS_O_AUTO_ENABLE; ret = sps_connect(pipe_a2, connection); if (ret) { IPAERR("sps_connect failed2 %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config_a2; } if (dir == IPA_BRIDGE_DIR_DL) { bridge[type].pipe[IPA_DL_FROM_A2].pipe = pipe_a2; bridge[type].pipe[IPA_DL_FROM_A2].valid = true; } else { bridge[type].pipe[IPA_UL_TO_A2].pipe = pipe_a2; bridge[type].pipe[IPA_UL_TO_A2].valid = true; } IPADBG("dir=%d type=%d (a2) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type, connection->source, connection->src_pipe_index, connection->destination, connection->dest_pipe_index); return 0; fail_sps_get_config_a2: sps_free_endpoint(pipe_a2); fail_sps_alloc_a2: sps_disconnect(pipe); fail_sps_get_config: sps_free_endpoint(pipe); fail_sps_alloc: ipa_disconnect(*clnt_hdl); fail_get_a2_prop: return ret; }
static void bam_init(struct work_struct *work) { u32 h; dma_addr_t dma_addr; int ret; void *a2_virt_addr; /* init BAM */ a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE); if (!a2_virt_addr) { pr_err("%s: ioremap failed\n", __func__); ret = -ENOMEM; goto register_bam_failed; } a2_props.phys_addr = A2_PHYS_BASE; a2_props.virt_addr = a2_virt_addr; a2_props.virt_size = A2_PHYS_SIZE; a2_props.irq = A2_BAM_IRQ; a2_props.num_pipes = A2_NUM_PIPES; a2_props.summing_threshold = A2_SUMMING_THRESHOLD; /* need to free on tear down */ ret = sps_register_bam_device(&a2_props, &h); if (ret < 0) { pr_err("%s: register bam error %d\n", __func__, ret); goto register_bam_failed; } bam_tx_pipe = sps_alloc_endpoint(); if (bam_tx_pipe == NULL) { pr_err("%s: tx alloc endpoint failed\n", __func__); ret = -ENOMEM; goto register_bam_failed; } ret = sps_get_config(bam_tx_pipe, &tx_connection); if (ret) { pr_err("%s: tx get config failed %d\n", __func__, ret); goto tx_get_config_failed; } tx_connection.source = SPS_DEV_HANDLE_MEM; tx_connection.src_pipe_index = 0; tx_connection.destination = h; tx_connection.dest_pipe_index = 4; tx_connection.mode = SPS_MODE_DEST; tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; tx_desc_mem_buf.size = 0x800; /* 2k */ tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size, &dma_addr, 0); if (tx_desc_mem_buf.base == NULL) { pr_err("%s: tx memory alloc failed\n", __func__); ret = -ENOMEM; goto tx_mem_failed; } tx_desc_mem_buf.phys_base = dma_addr; memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size); tx_connection.desc = tx_desc_mem_buf; tx_connection.event_thresh = 0x10; ret = sps_connect(bam_tx_pipe, &tx_connection); if (ret < 0) { pr_err("%s: tx connect error %d\n", __func__, ret); goto tx_connect_failed; } bam_rx_pipe = sps_alloc_endpoint(); if (bam_rx_pipe == NULL) { pr_err("%s: rx alloc endpoint failed\n", __func__); ret = -ENOMEM; goto tx_connect_failed; } ret = sps_get_config(bam_rx_pipe, &rx_connection); if (ret) { pr_err("%s: rx get config failed %d\n", __func__, ret); goto rx_get_config_failed; } rx_connection.source = h; rx_connection.src_pipe_index = 5; rx_connection.destination = SPS_DEV_HANDLE_MEM; rx_connection.dest_pipe_index = 1; rx_connection.mode = SPS_MODE_SRC; rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; rx_desc_mem_buf.size = 0x800; /* 2k */ rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size, &dma_addr, 0); if (rx_desc_mem_buf.base == NULL) { pr_err("%s: rx memory alloc failed\n", __func__); ret = -ENOMEM; goto rx_mem_failed; } rx_desc_mem_buf.phys_base = dma_addr; memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size); rx_connection.desc = rx_desc_mem_buf; rx_connection.event_thresh = 0x10; ret = sps_connect(bam_rx_pipe, &rx_connection); if (ret < 0) { pr_err("%s: rx connect error %d\n", __func__, ret); goto rx_connect_failed; } tx_register_event.options = SPS_O_EOT; tx_register_event.mode = SPS_TRIGGER_CALLBACK; tx_register_event.xfer_done = NULL; tx_register_event.callback = bam_mux_tx_notify; tx_register_event.user = NULL; ret = sps_register_event(bam_tx_pipe, &tx_register_event); if (ret < 0) { pr_err("%s: tx register event error %d\n", __func__, ret); goto rx_event_reg_failed; } rx_register_event.options = SPS_O_EOT; rx_register_event.mode = SPS_TRIGGER_CALLBACK; rx_register_event.xfer_done = NULL; rx_register_event.callback = bam_mux_rx_notify; rx_register_event.user = NULL; ret = sps_register_event(bam_rx_pipe, &rx_register_event); if (ret < 0) { pr_err("%s: tx register event error %d\n", __func__, ret); goto rx_event_reg_failed; } bam_mux_initialized = 1; queue_rx(); return; rx_event_reg_failed: sps_disconnect(bam_rx_pipe); rx_connect_failed: dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base, rx_desc_mem_buf.phys_base); rx_mem_failed: sps_disconnect(bam_tx_pipe); rx_get_config_failed: sps_free_endpoint(bam_rx_pipe); tx_connect_failed: dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base, tx_desc_mem_buf.phys_base); tx_get_config_failed: sps_free_endpoint(bam_tx_pipe); tx_mem_failed: sps_deregister_bam_device(h); register_bam_failed: /*destroy_workqueue(bam_mux_workqueue);*/ /*return ret;*/ return; }
int qpic_init_sps(struct platform_device *pdev, struct qpic_sps_endpt *end_point) { int rc = 0; struct sps_pipe *pipe_handle; struct sps_connect *sps_config = &end_point->config; struct sps_register_event *sps_event = &end_point->bam_event; struct sps_bam_props bam = {0}; u32 bam_handle = 0; if (qpic_res->sps_init) return 0; bam.phys_addr = qpic_res->qpic_phys + 0x4000; bam.virt_addr = qpic_res->qpic_base + 0x4000; bam.irq = qpic_res->irq - 4; bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE; rc = sps_phy2h(bam.phys_addr, &bam_handle); if (rc) rc = sps_register_bam_device(&bam, &bam_handle); if (rc) { pr_err("%s bam_handle is NULL", __func__); rc = -ENOMEM; goto out; } pipe_handle = sps_alloc_endpoint(); if (!pipe_handle) { pr_err("sps_alloc_endpoint() failed\n"); rc = -ENOMEM; goto out; } rc = sps_get_config(pipe_handle, sps_config); if (rc) { pr_err("sps_get_config() failed %d\n", rc); goto free_endpoint; } /* WRITE CASE: source - system memory; destination - BAM */ sps_config->source = SPS_DEV_HANDLE_MEM; sps_config->destination = bam_handle; sps_config->mode = SPS_MODE_DEST; sps_config->dest_pipe_index = 6; sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT; sps_config->lock_group = 0; /* * Descriptor FIFO is a cyclic FIFO. If 64 descriptors * are allowed to be submitted before we get any ack for any of them, * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) * * sizeof(struct sps_iovec). */ sps_config->desc.size = (64) * sizeof(struct sps_iovec); sps_config->desc.base = dmam_alloc_coherent(&pdev->dev, sps_config->desc.size, &sps_config->desc.phys_base, GFP_KERNEL); if (!sps_config->desc.base) { pr_err("dmam_alloc_coherent() failed for size %x\n", sps_config->desc.size); rc = -ENOMEM; goto free_endpoint; } memset(sps_config->desc.base, 0x00, sps_config->desc.size); rc = sps_connect(pipe_handle, sps_config); if (rc) { pr_err("sps_connect() failed %d\n", rc); goto free_endpoint; } init_completion(&end_point->completion); sps_event->mode = SPS_TRIGGER_WAIT; sps_event->options = SPS_O_EOT; sps_event->xfer_done = &end_point->completion; sps_event->user = (void *)qpic_res; rc = sps_register_event(pipe_handle, sps_event); if (rc) { pr_err("sps_register_event() failed %d\n", rc); goto sps_disconnect; } end_point->handle = pipe_handle; qpic_res->sps_init = true; goto out; sps_disconnect: sps_disconnect(pipe_handle); free_endpoint: sps_free_endpoint(pipe_handle); out: return rc; }
static int setup_bridge_to_a2(enum ipa_bridge_dir dir) { struct ipa_bridge_pipe_context *sys; struct a2_mux_pipe_connection pipe_conn = { 0, }; dma_addr_t dma_addr; u32 a2_handle; int ret; int i; if (dir == IPA_UL) { ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n"); goto tx_alloc_endpoint_failed; } ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret); goto tx_alloc_endpoint_failed; } sys = &bridge[IPA_UL_TO_A2]; sys->pipe = sps_alloc_endpoint(); if (sys->pipe == NULL) { IPAERR("tx alloc endpoint failed\n"); ret = -ENOMEM; goto tx_alloc_endpoint_failed; } ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("tx get config failed %d\n", ret); goto tx_get_config_failed; } sys->connection.source = SPS_DEV_HANDLE_MEM; sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++; sys->connection.destination = a2_handle; sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index; sys->connection.mode = SPS_MODE_DEST; sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL; sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */ sys->desc_mem_buf.base = dma_alloc_coherent(NULL, sys->desc_mem_buf.size, &dma_addr, 0); if (sys->desc_mem_buf.base == NULL) { IPAERR("tx memory alloc failed\n"); ret = -ENOMEM; goto tx_get_config_failed; } sys->desc_mem_buf.phys_base = dma_addr; memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size); sys->connection.desc = sys->desc_mem_buf; sys->connection.event_thresh = IPA_EVENT_THRESHOLD; ret = sps_connect(sys->pipe, &sys->connection); if (ret < 0) { IPAERR("tx connect error %d\n", ret); goto tx_connect_failed; } INIT_LIST_HEAD(&sys->head_desc_list); INIT_LIST_HEAD(&sys->free_desc_list); spin_lock_init(&sys->spinlock); return 0; tx_connect_failed: dma_free_coherent(NULL, sys->desc_mem_buf.size, sys->desc_mem_buf.base, sys->desc_mem_buf.phys_base); tx_get_config_failed: sps_free_endpoint(sys->pipe); tx_alloc_endpoint_failed: return ret; } else { /* dir == IPA_UL */ ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n"); goto rx_alloc_endpoint_failed; } ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret); goto rx_alloc_endpoint_failed; } sys = &bridge[IPA_DL_FROM_A2]; sys->pipe = sps_alloc_endpoint(); if (sys->pipe == NULL) { IPAERR("rx alloc endpoint failed\n"); ret = -ENOMEM; goto rx_alloc_endpoint_failed; } ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("rx get config failed %d\n", ret); goto rx_get_config_failed; } sys->connection.source = a2_handle; sys->connection.src_pipe_index = pipe_conn.src_pipe_index; sys->connection.destination = SPS_DEV_HANDLE_MEM; sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++; sys->connection.mode = SPS_MODE_SRC; sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */ sys->desc_mem_buf.base = dma_alloc_coherent(NULL, sys->desc_mem_buf.size, &dma_addr, 0); if (sys->desc_mem_buf.base == NULL) { IPAERR("rx memory alloc failed\n"); ret = -ENOMEM; goto rx_get_config_failed; } sys->desc_mem_buf.phys_base = dma_addr; memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size); sys->connection.desc = sys->desc_mem_buf; sys->connection.event_thresh = IPA_EVENT_THRESHOLD; ret = sps_connect(sys->pipe, &sys->connection); if (ret < 0) { IPAERR("rx connect error %d\n", ret); goto rx_connect_failed; } sys->register_event.options = SPS_O_EOT; sys->register_event.mode = SPS_TRIGGER_CALLBACK; sys->register_event.xfer_done = NULL; sys->register_event.callback = bam_mux_rx_notify; sys->register_event.user = NULL; ret = sps_register_event(sys->pipe, &sys->register_event); if (ret < 0) { IPAERR("tx register event error %d\n", ret); goto rx_event_reg_failed; } INIT_LIST_HEAD(&sys->head_desc_list); INIT_LIST_HEAD(&sys->free_desc_list); spin_lock_init(&sys->spinlock); for (i = 0; i < IPA_RX_POOL_CEIL; i++) { ret = queue_rx_single(dir); if (ret < 0) IPAERR("queue fail %d %d\n", dir, i); } return 0; rx_event_reg_failed: sps_disconnect(sys->pipe); rx_connect_failed: dma_free_coherent(NULL, sys->desc_mem_buf.size, sys->desc_mem_buf.base, sys->desc_mem_buf.phys_base); rx_get_config_failed: sps_free_endpoint(sys->pipe); rx_alloc_endpoint_failed: return ret; } }
static int setup_bridge_to_ipa(enum ipa_bridge_dir dir) { struct ipa_bridge_pipe_context *sys; struct ipa_ep_cfg_mode mode; dma_addr_t dma_addr; int ipa_ep_idx; int ret; int i; if (dir == IPA_DL) { ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, IPA_CLIENT_A2_TETHERED_PROD); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); ret = -EINVAL; goto tx_alloc_endpoint_failed; } sys = &bridge[IPA_DL_TO_IPA]; sys->pipe = sps_alloc_endpoint(); if (sys->pipe == NULL) { IPAERR("tx alloc endpoint failed\n"); ret = -ENOMEM; goto tx_alloc_endpoint_failed; } ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("tx get config failed %d\n", ret); goto tx_get_config_failed; } sys->connection.source = SPS_DEV_HANDLE_MEM; sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++; sys->connection.destination = ipa_ctx->bam_handle; sys->connection.dest_pipe_index = ipa_ep_idx; sys->connection.mode = SPS_MODE_DEST; sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL; sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */ sys->desc_mem_buf.base = dma_alloc_coherent(NULL, sys->desc_mem_buf.size, &dma_addr, 0); if (sys->desc_mem_buf.base == NULL) { IPAERR("tx memory alloc failed\n"); ret = -ENOMEM; goto tx_get_config_failed; } sys->desc_mem_buf.phys_base = dma_addr; memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size); sys->connection.desc = sys->desc_mem_buf; sys->connection.event_thresh = IPA_EVENT_THRESHOLD; ret = sps_connect(sys->pipe, &sys->connection); if (ret < 0) { IPAERR("tx connect error %d\n", ret); goto tx_connect_failed; } INIT_LIST_HEAD(&sys->head_desc_list); INIT_LIST_HEAD(&sys->free_desc_list); spin_lock_init(&sys->spinlock); ipa_ctx->ep[ipa_ep_idx].valid = 1; mode.mode = IPA_DMA; mode.dst = IPA_CLIENT_USB_CONS; ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode); if (ret < 0) { IPAERR("DMA mode set error %d\n", ret); goto tx_mode_set_failed; } return 0; tx_mode_set_failed: sps_disconnect(sys->pipe); tx_connect_failed: dma_free_coherent(NULL, sys->desc_mem_buf.size, sys->desc_mem_buf.base, sys->desc_mem_buf.phys_base); tx_get_config_failed: sps_free_endpoint(sys->pipe); tx_alloc_endpoint_failed: return ret; } else { ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, IPA_CLIENT_A2_TETHERED_CONS); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); ret = -EINVAL; goto rx_alloc_endpoint_failed; } sys = &bridge[IPA_UL_FROM_IPA]; sys->pipe = sps_alloc_endpoint(); if (sys->pipe == NULL) { IPAERR("rx alloc endpoint failed\n"); ret = -ENOMEM; goto rx_alloc_endpoint_failed; } ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("rx get config failed %d\n", ret); goto rx_get_config_failed; } sys->connection.source = ipa_ctx->bam_handle; sys->connection.src_pipe_index = 7; sys->connection.destination = SPS_DEV_HANDLE_MEM; sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++; sys->connection.mode = SPS_MODE_SRC; sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */ sys->desc_mem_buf.base = dma_alloc_coherent(NULL, sys->desc_mem_buf.size, &dma_addr, 0); if (sys->desc_mem_buf.base == NULL) { IPAERR("rx memory alloc failed\n"); ret = -ENOMEM; goto rx_get_config_failed; } sys->desc_mem_buf.phys_base = dma_addr; memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size); sys->connection.desc = sys->desc_mem_buf; sys->connection.event_thresh = IPA_EVENT_THRESHOLD; ret = sps_connect(sys->pipe, &sys->connection); if (ret < 0) { IPAERR("rx connect error %d\n", ret); goto rx_connect_failed; } sys->register_event.options = SPS_O_EOT; sys->register_event.mode = SPS_TRIGGER_CALLBACK; sys->register_event.xfer_done = NULL; sys->register_event.callback = ipa_sps_irq_rx_notify; sys->register_event.user = NULL; ret = sps_register_event(sys->pipe, &sys->register_event); if (ret < 0) { IPAERR("tx register event error %d\n", ret); goto rx_event_reg_failed; } INIT_LIST_HEAD(&sys->head_desc_list); INIT_LIST_HEAD(&sys->free_desc_list); spin_lock_init(&sys->spinlock); for (i = 0; i < IPA_RX_POOL_CEIL; i++) { ret = queue_rx_single(dir); if (ret < 0) IPAERR("queue fail %d %d\n", dir, i); } return 0; rx_event_reg_failed: sps_disconnect(sys->pipe); rx_connect_failed: dma_free_coherent(NULL, sys->desc_mem_buf.size, sys->desc_mem_buf.base, sys->desc_mem_buf.phys_base); rx_get_config_failed: sps_free_endpoint(sys->pipe); rx_alloc_endpoint_failed: return ret; } }