int qpic_init_sps(struct platform_device *pdev, struct qpic_sps_endpt *end_point) { int rc = 0; struct sps_pipe *pipe_handle; struct sps_connect *sps_config = &end_point->config; struct sps_register_event *sps_event = &end_point->bam_event; struct sps_bam_props bam = {0}; u32 bam_handle = 0; if (qpic_res->sps_init) return 0; bam.phys_addr = qpic_res->qpic_phys + 0x4000; bam.virt_addr = qpic_res->qpic_base + 0x4000; bam.irq = qpic_res->irq - 4; bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE; rc = sps_phy2h(bam.phys_addr, &bam_handle); if (rc) rc = sps_register_bam_device(&bam, &bam_handle); if (rc) { pr_err("%s bam_handle is NULL", __func__); rc = -ENOMEM; goto out; } pipe_handle = sps_alloc_endpoint(); if (!pipe_handle) { pr_err("sps_alloc_endpoint() failed\n"); rc = -ENOMEM; goto out; } rc = sps_get_config(pipe_handle, sps_config); if (rc) { pr_err("sps_get_config() failed %d\n", rc); goto free_endpoint; } /* WRITE CASE: source - system memory; destination - BAM */ sps_config->source = SPS_DEV_HANDLE_MEM; sps_config->destination = bam_handle; sps_config->mode = SPS_MODE_DEST; sps_config->dest_pipe_index = 6; sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT; sps_config->lock_group = 0; /* * Descriptor FIFO is a cyclic FIFO. If 64 descriptors * are allowed to be submitted before we get any ack for any of them, * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) * * sizeof(struct sps_iovec). */ sps_config->desc.size = (64) * sizeof(struct sps_iovec); sps_config->desc.base = dmam_alloc_coherent(&pdev->dev, sps_config->desc.size, &sps_config->desc.phys_base, GFP_KERNEL); if (!sps_config->desc.base) { pr_err("dmam_alloc_coherent() failed for size %x\n", sps_config->desc.size); rc = -ENOMEM; goto free_endpoint; } memset(sps_config->desc.base, 0x00, sps_config->desc.size); rc = sps_connect(pipe_handle, sps_config); if (rc) { pr_err("sps_connect() failed %d\n", rc); goto free_endpoint; } init_completion(&end_point->completion); sps_event->mode = SPS_TRIGGER_WAIT; sps_event->options = SPS_O_EOT; sps_event->xfer_done = &end_point->completion; sps_event->user = (void *)qpic_res; rc = sps_register_event(pipe_handle, sps_event); if (rc) { pr_err("sps_register_event() failed %d\n", rc); goto sps_disconnect; } end_point->handle = pipe_handle; qpic_res->sps_init = true; goto out; sps_disconnect: sps_disconnect(pipe_handle); free_endpoint: sps_free_endpoint(pipe_handle); out: return rc; }
static int setup_dma_bam_bridge(enum ipa_bridge_dir dir, enum ipa_bridge_type type, struct ipa_sys_connect_params *props, u32 *clnt_hdl) { struct ipa_connect_params ipa_in_params; struct ipa_sps_params sps_out_params; int dma_a2_pipe; int dma_ipa_pipe; struct sps_pipe *pipe; struct sps_pipe *pipe_a2; struct sps_connect _connection; struct sps_connect *connection = &_connection; struct a2_mux_pipe_connection pipe_conn = {0}; enum a2_mux_pipe_direction pipe_dir; u32 dma_hdl = sps_dma_get_bam_handle(); u32 a2_hdl; u32 pa; int ret; memset(&ipa_in_params, 0, sizeof(ipa_in_params)); memset(&sps_out_params, 0, sizeof(sps_out_params)); pipe_dir = (dir == IPA_BRIDGE_DIR_UL) ? IPA_TO_A2 : A2_TO_IPA; ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed dir=%d type=%d\n", dir, type); goto fail_get_a2_prop; } pa = (dir == IPA_BRIDGE_DIR_UL) ? pipe_conn.dst_phy_addr : pipe_conn.src_phy_addr; ret = sps_phy2h(pa, &a2_hdl); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d dir=%d type=%d\n", ret, dir, type); goto fail_get_a2_prop; } ipa_get_dma_pipe_num(dir, type, &dma_a2_pipe, &dma_ipa_pipe); ipa_in_params.ipa_ep_cfg = props->ipa_ep_cfg; ipa_in_params.client = props->client; ipa_in_params.client_bam_hdl = dma_hdl; ipa_in_params.client_ep_idx = dma_ipa_pipe; ipa_in_params.priv = props->priv; ipa_in_params.notify = props->notify; ipa_in_params.desc_fifo_sz = ipa_get_desc_fifo_sz(dir, type); ipa_in_params.data_fifo_sz = ipa_get_data_fifo_sz(dir, type); if (ipa_connect(&ipa_in_params, &sps_out_params, clnt_hdl)) { IPAERR("ipa connect failed dir=%d type=%d\n", dir, type); goto fail_get_a2_prop; } pipe = sps_alloc_endpoint(); if (pipe == NULL) { IPAERR("sps_alloc_endpoint failed dir=%d type=%d\n", dir, type); ret = -ENOMEM; goto fail_sps_alloc; } memset(&_connection, 0, sizeof(_connection)); ret = sps_get_config(pipe, connection); if (ret) { IPAERR("sps_get_config failed %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config; } if (dir == IPA_BRIDGE_DIR_DL) { connection->mode = SPS_MODE_SRC; connection->source = dma_hdl; connection->destination = sps_out_params.ipa_bam_hdl; connection->src_pipe_index = dma_ipa_pipe; connection->dest_pipe_index = sps_out_params.ipa_ep_idx; } else { connection->mode = SPS_MODE_DEST; connection->source = sps_out_params.ipa_bam_hdl; connection->destination = dma_hdl; connection->src_pipe_index = sps_out_params.ipa_ep_idx; connection->dest_pipe_index = dma_ipa_pipe; } connection->event_thresh = IPA_EVENT_THRESHOLD; connection->data = sps_out_params.data; connection->desc = sps_out_params.desc; connection->options = SPS_O_AUTO_ENABLE; ret = sps_connect(pipe, connection); if (ret) { IPAERR("sps_connect failed %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config; } if (dir == IPA_BRIDGE_DIR_DL) { bridge[type].pipe[IPA_DL_TO_IPA].pipe = pipe; bridge[type].pipe[IPA_DL_TO_IPA].ipa_facing = true; bridge[type].pipe[IPA_DL_TO_IPA].valid = true; } else { bridge[type].pipe[IPA_UL_FROM_IPA].pipe = pipe; bridge[type].pipe[IPA_UL_FROM_IPA].ipa_facing = true; bridge[type].pipe[IPA_UL_FROM_IPA].valid = true; } IPADBG("dir=%d type=%d (ipa) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type, connection->source, connection->src_pipe_index, connection->destination, connection->dest_pipe_index); pipe_a2 = sps_alloc_endpoint(); if (pipe_a2 == NULL) { IPAERR("sps_alloc_endpoint failed2 dir=%d type=%d\n", dir, type); ret = -ENOMEM; goto fail_sps_alloc_a2; } memset(&_connection, 0, sizeof(_connection)); ret = sps_get_config(pipe_a2, connection); if (ret) { IPAERR("sps_get_config failed2 %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config_a2; } if (dir == IPA_BRIDGE_DIR_DL) { connection->mode = SPS_MODE_DEST; connection->source = a2_hdl; connection->destination = dma_hdl; connection->src_pipe_index = ipa_get_a2_pipe_num(dir, type); connection->dest_pipe_index = dma_a2_pipe; } else { connection->mode = SPS_MODE_SRC; connection->source = dma_hdl; connection->destination = a2_hdl; connection->src_pipe_index = dma_a2_pipe; connection->dest_pipe_index = ipa_get_a2_pipe_num(dir, type); } connection->event_thresh = IPA_EVENT_THRESHOLD; if (ipa_setup_a2_dma_fifos(dir, type, &connection->desc, &connection->data)) { IPAERR("fail to setup A2-DMA FIFOs dir=%d type=%d\n", dir, type); goto fail_sps_get_config_a2; } connection->options = SPS_O_AUTO_ENABLE; ret = sps_connect(pipe_a2, connection); if (ret) { IPAERR("sps_connect failed2 %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config_a2; } if (dir == IPA_BRIDGE_DIR_DL) { bridge[type].pipe[IPA_DL_FROM_A2].pipe = pipe_a2; bridge[type].pipe[IPA_DL_FROM_A2].valid = true; } else { bridge[type].pipe[IPA_UL_TO_A2].pipe = pipe_a2; bridge[type].pipe[IPA_UL_TO_A2].valid = true; } IPADBG("dir=%d type=%d (a2) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type, connection->source, connection->src_pipe_index, connection->destination, connection->dest_pipe_index); return 0; fail_sps_get_config_a2: sps_free_endpoint(pipe_a2); fail_sps_alloc_a2: sps_disconnect(pipe); fail_sps_get_config: sps_free_endpoint(pipe); fail_sps_alloc: ipa_disconnect(*clnt_hdl); fail_get_a2_prop: return ret; }
static int setup_bridge_to_a2(enum ipa_bridge_dir dir) { struct ipa_bridge_pipe_context *sys; struct a2_mux_pipe_connection pipe_conn = { 0, }; dma_addr_t dma_addr; u32 a2_handle; int ret; int i; if (dir == IPA_UL) { ret = ipa_get_a2_mux_pipe_info(IPA_TO_A2, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed IPA_TO_A2\n"); goto tx_alloc_endpoint_failed; } ret = sps_phy2h(pipe_conn.dst_phy_addr, &a2_handle); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret); goto tx_alloc_endpoint_failed; } sys = &bridge[IPA_UL_TO_A2]; sys->pipe = sps_alloc_endpoint(); if (sys->pipe == NULL) { IPAERR("tx alloc endpoint failed\n"); ret = -ENOMEM; goto tx_alloc_endpoint_failed; } ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("tx get config failed %d\n", ret); goto tx_get_config_failed; } sys->connection.source = SPS_DEV_HANDLE_MEM; sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++; sys->connection.destination = a2_handle; sys->connection.dest_pipe_index = pipe_conn.dst_pipe_index; sys->connection.mode = SPS_MODE_DEST; sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL; sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */ sys->desc_mem_buf.base = dma_alloc_coherent(NULL, sys->desc_mem_buf.size, &dma_addr, 0); if (sys->desc_mem_buf.base == NULL) { IPAERR("tx memory alloc failed\n"); ret = -ENOMEM; goto tx_get_config_failed; } sys->desc_mem_buf.phys_base = dma_addr; memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size); sys->connection.desc = sys->desc_mem_buf; sys->connection.event_thresh = IPA_EVENT_THRESHOLD; ret = sps_connect(sys->pipe, &sys->connection); if (ret < 0) { IPAERR("tx connect error %d\n", ret); goto tx_connect_failed; } INIT_LIST_HEAD(&sys->head_desc_list); INIT_LIST_HEAD(&sys->free_desc_list); spin_lock_init(&sys->spinlock); return 0; tx_connect_failed: dma_free_coherent(NULL, sys->desc_mem_buf.size, sys->desc_mem_buf.base, sys->desc_mem_buf.phys_base); tx_get_config_failed: sps_free_endpoint(sys->pipe); tx_alloc_endpoint_failed: return ret; } else { /* dir == IPA_UL */ ret = ipa_get_a2_mux_pipe_info(A2_TO_IPA, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed A2_TO_IPA\n"); goto rx_alloc_endpoint_failed; } ret = sps_phy2h(pipe_conn.src_phy_addr, &a2_handle); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d\n", ret); goto rx_alloc_endpoint_failed; } sys = &bridge[IPA_DL_FROM_A2]; sys->pipe = sps_alloc_endpoint(); if (sys->pipe == NULL) { IPAERR("rx alloc endpoint failed\n"); ret = -ENOMEM; goto rx_alloc_endpoint_failed; } ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("rx get config failed %d\n", ret); goto rx_get_config_failed; } sys->connection.source = a2_handle; sys->connection.src_pipe_index = pipe_conn.src_pipe_index; sys->connection.destination = SPS_DEV_HANDLE_MEM; sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++; sys->connection.mode = SPS_MODE_SRC; sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */ sys->desc_mem_buf.base = dma_alloc_coherent(NULL, sys->desc_mem_buf.size, &dma_addr, 0); if (sys->desc_mem_buf.base == NULL) { IPAERR("rx memory alloc failed\n"); ret = -ENOMEM; goto rx_get_config_failed; } sys->desc_mem_buf.phys_base = dma_addr; memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size); sys->connection.desc = sys->desc_mem_buf; sys->connection.event_thresh = IPA_EVENT_THRESHOLD; ret = sps_connect(sys->pipe, &sys->connection); if (ret < 0) { IPAERR("rx connect error %d\n", ret); goto rx_connect_failed; } sys->register_event.options = SPS_O_EOT; sys->register_event.mode = SPS_TRIGGER_CALLBACK; sys->register_event.xfer_done = NULL; sys->register_event.callback = bam_mux_rx_notify; sys->register_event.user = NULL; ret = sps_register_event(sys->pipe, &sys->register_event); if (ret < 0) { IPAERR("tx register event error %d\n", ret); goto rx_event_reg_failed; } INIT_LIST_HEAD(&sys->head_desc_list); INIT_LIST_HEAD(&sys->free_desc_list); spin_lock_init(&sys->spinlock); for (i = 0; i < IPA_RX_POOL_CEIL; i++) { ret = queue_rx_single(dir); if (ret < 0) IPAERR("queue fail %d %d\n", dir, i); } return 0; rx_event_reg_failed: sps_disconnect(sys->pipe); rx_connect_failed: dma_free_coherent(NULL, sys->desc_mem_buf.size, sys->desc_mem_buf.base, sys->desc_mem_buf.phys_base); rx_get_config_failed: sps_free_endpoint(sys->pipe); rx_alloc_endpoint_failed: return ret; } }