int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd) { int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", cmd.params.isDlUlSyncEnabled, cmd.params.UlAccmVal); IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold); ipa_inc_client_enable_clks(); res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
/** * ipa_bridge_setup() - setup SW bridge leg * @dir: downlink or uplink (from air interface perspective) * @type: tethered or embedded bridge * @props: bridge leg properties (EP config, callbacks, etc) * @clnt_hdl: [out] handle of IPA EP belonging to bridge leg * * NOTE: IT IS CALLER'S RESPONSIBILITY TO ENSURE BAMs ARE * OPERATIONAL AS LONG AS BRIDGE REMAINS UP * * Return codes: * 0: success * various negative error codes on errors */ int ipa_bridge_setup(enum ipa_bridge_dir dir, enum ipa_bridge_type type, struct ipa_sys_connect_params *props, u32 *clnt_hdl) { int ret; if (props == NULL || clnt_hdl == NULL || type >= IPA_BRIDGE_TYPE_MAX || dir >= IPA_BRIDGE_DIR_MAX || props->client >= IPA_CLIENT_MAX) { IPAERR("Bad param props=%p clnt_hdl=%p type=%d dir=%d\n", props, clnt_hdl, type, dir); return -EINVAL; } ipa_inc_client_enable_clks(); if (setup_dma_bam_bridge(dir, type, props, clnt_hdl)) { IPAERR("fail to setup SYS pipe to IPA dir=%d type=%d\n", dir, type); ret = -EINVAL; goto bail_ipa; } return 0; bail_ipa: ipa_dec_client_disable_clks(); return ret; }
int ipa_uc_mhi_stop_event_update_channel(int channelHandle) { union IpaHwMhiStopEventUpdateData_t cmd; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&cmd, 0, sizeof(cmd)); cmd.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
/** * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding * MHI channel * @in: connect parameters * @clnt_hdl: [out] client handle for this pipe * * This function is called by MHI client driver on MHI channel reset. * This function is called after MHI channel was started. * This function is doing the following: * - Send command to uC to reset corresponding MHI channel * - Configure IPA EP control * * Return codes: 0 : success * negative : error */ int ipa_mhi_disconnect_pipe(u32 clnt_hdl) { struct ipa_ep_context *ep; static struct ipa_mhi_channel_ctx *channel; int res; IPA_MHI_FUNC_ENTRY(); if (clnt_hdl >= ipa_ctx->ipa_num_pipes) { IPAERR("invalid handle %d\n", clnt_hdl); return -EINVAL; } if (ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("pipe was not connected %d\n", clnt_hdl); return -EINVAL; } if (unlikely(!ipa_mhi_ctx)) { IPA_MHI_ERR("IPA MHI was not initialized\n"); return -EINVAL; } channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl); if (!channel) { IPAERR("invalid clnt hdl\n"); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa_inc_client_enable_clks(); res = ipa_mhi_reset_channel(channel); if (res) { IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res); goto fail_reset_channel; } ep->valid = 0; ipa_delete_dflt_flt_rules(clnt_hdl); ipa_dec_client_disable_clks(); IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); IPA_MHI_FUNC_EXIT(); return 0; fail_reset_channel: if (!ep->keep_ipa_awake) ipa_dec_client_disable_clks(); return res; }
static void ipa_uc_event_handler(enum ipa_irq_type interrupt, void *private_data, void *interrupt_data) { union IpaHwErrorEventData_t evt; u8 feature; WARN_ON(private_data != ipa_ctx); ipa_inc_client_enable_clks(); IPADBG("uC evt opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { IPAERR("Invalid feature %u for event %u\n", feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); ipa_dec_client_disable_clks(); return; } /* Feature specific handling */ if (uc_hdlrs[feature].ipa_uc_event_hdlr) uc_hdlrs[feature].ipa_uc_event_hdlr (ipa_ctx->uc_ctx.uc_sram_mmio); /* General handling */ if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_ERROR) { evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; IPADBG("uC evt errorType=%u\n", evt.params.errorType); BUG(); } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_LOG_INFO) { IPADBG("uC evt log info ofst=0x%x\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventParams); ipa_log_evt_hdlr(); } else { IPADBG("unsupported uC evt opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); } ipa_dec_client_disable_clks(); }
int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, int contexArrayIndex, int channelDirection) { int res; union IpaHwMhiInitChannelCmdData_t init_cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA_NUM_PIPES) { IPAERR("Invalid ipa_ep_idx.\n"); return -EINVAL; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&init_cmd, 0, sizeof(init_cmd)); init_cmd.params.channelHandle = channelHandle; init_cmd.params.contexArrayIndex = contexArrayIndex; init_cmd.params.bamPipeId = ipa_ep_idx; init_cmd.params.channelDirection = channelDirection; res = ipa_uc_send_cmd(init_cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
/** * ipa_resume() - low-level IPA client resume * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to resume IPA * connection. Resume IPA connection results in turning on IPA clocks in * case they were off as a result of suspend. * this api can be called only if a call to ipa_suspend() was * made. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_resume(u32 clnt_hdl) { struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm. clnt_hdl %d\n", clnt_hdl); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->suspended) { IPAERR("EP not suspended. clnt_hdl %d\n", clnt_hdl); return -EPERM; } ipa_inc_client_enable_clks(); ep->suspended = false; return 0; }
int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) { union IpaHwMhiChangeChannelStateCmdData_t cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&cmd, 0, sizeof(cmd)); cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; cmd.params.channelHandle = channelHandle; cmd.params.LPTransitionRejected = LPTransitionRejected; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, u32 first_evt_idx) { int res; struct ipa_mem_buffer mem; struct IpaHwMhiInitCmdData_t *init_cmd_data; struct IpaHwMhiMsiCmdData_t *msi_cmd; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); res = ipa_uc_update_hw_flags(0); if (res) { IPAERR("ipa_uc_update_hw_flags failed %d\n", res); goto disable_clks; } mem.size = sizeof(*init_cmd_data); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, GFP_KERNEL); if (!mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", mem.size); res = -ENOMEM; goto disable_clks; } memset(mem.base, 0, mem.size); init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; init_cmd_data->msiAddress = msi->addr_low; init_cmd_data->mmioBaseAddress = mmio_addr; init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; init_cmd_data->firstChannelIndex = first_ch_idx; init_cmd_data->firstEventRingIndex = first_evt_idx; res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); goto disable_clks; } dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); mem.size = sizeof(*msi_cmd); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, GFP_KERNEL); if (!mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", mem.size); res = -ENOMEM; goto disable_clks; } msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; msi_cmd->msiAddress_hi = msi->addr_hi; msi_cmd->msiAddress_low = msi->addr_low; msi_cmd->msiData = msi->data; msi_cmd->msiMask = msi->mask; res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); goto disable_clks; } dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
/** * ipa_mhi_suspend() - Suspend MHI accelerated channels * @force: * false: in case of data pending in IPA, MHI channels will not be * suspended and function will fail. * true: in case of data pending in IPA, make sure no further access from * IPA to PCIe is possible. In this case suspend cannot fail. * * This function is called by MHI client driver on MHI suspend. * This function is called after MHI channel was started. * When this function returns device can move to M1/M2/M3/D3cold state. * This function is doing the following: * - Send command to uC to suspend corresponding MHI channel * - Make sure no further access is possible from IPA to PCIe * - Release MHI_PROD in IPA RM * * Return codes: 0 : success * negative : error */ int ipa_mhi_suspend(bool force) { int res; bool bam_empty; bool force_clear = false; IPA_MHI_FUNC_ENTRY(); if (unlikely(!ipa_mhi_ctx)) { IPA_MHI_ERR("IPA MHI was not initialized\n"); return -EINVAL; } res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS); if (res) { IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); return res; } res = ipa_mhi_suspend_ul_channels(); if (res) { IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res); goto fail_suspend_ul_channel; } bam_empty = ipa_mhi_wait_for_bam_empty_timeout( IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC); if (!bam_empty) { if (force) { res = ipa_mhi_enable_force_clear( ipa_mhi_ctx->qmi_req_id, false); if (res) { IPA_MHI_ERR("failed to enable force clear\n"); BUG(); return res; } force_clear = true; IPA_MHI_DBG("force clear datapath enabled\n"); bam_empty = ipa_mhi_wait_for_bam_empty_timeout( IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC); IPADBG("bam_empty=%d\n", bam_empty); } else { IPA_MHI_DBG("BAM not empty\n"); res = -EAGAIN; goto fail_suspend_ul_channel; } } res = ipa_mhi_stop_event_update_ul_channels(); if (res) { IPA_MHI_ERR("ipa_mhi_stop_event_update_ul_channels failed %d\n", res); goto fail_suspend_ul_channel; } /* * in case BAM not empty, hold IPA clocks and release them after all * IPA RM resource are released to make sure tag process will not start */ if (!bam_empty) ipa_inc_client_enable_clks(); IPA_MHI_DBG("release prod\n"); res = ipa_mhi_release_prod(); if (res) { IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res); goto fail_release_prod; } IPA_MHI_DBG("wait for cons release\n"); res = ipa_mhi_wait_for_cons_release(); if (res) { IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res); goto fail_release_cons; } usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX); res = ipa_mhi_suspend_dl_channels(); if (res) { IPA_MHI_ERR("ipa_mhi_suspend_dl_channels failed %d\n", res); goto fail_suspend_dl_channel; } res = ipa_mhi_stop_event_update_dl_channels(); if (res) { IPA_MHI_ERR("failed to stop event update on DL %d\n", res); goto fail_stop_event_update_dl_channel; } if (force_clear) { res = ipa_mhi_disable_force_clear(ipa_mhi_ctx->qmi_req_id); if (res) { IPA_MHI_ERR("failed to disable force clear\n"); BUG(); return res; } IPA_MHI_DBG("force clear datapath disabled\n"); ipa_mhi_ctx->qmi_req_id++; } if (!bam_empty) { ipa_ctx->tag_process_before_gating = false; ipa_dec_client_disable_clks(); } res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED); if (res) { IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res); goto fail_release_cons; } IPA_MHI_FUNC_EXIT(); return 0; fail_stop_event_update_dl_channel: ipa_mhi_resume_dl_channels(true); fail_suspend_dl_channel: fail_release_cons: ipa_mhi_request_prod(); fail_release_prod: fail_suspend_ul_channel: ipa_mhi_resume_ul_channels(true); ipa_mhi_set_state(IPA_MHI_STATE_STARTED); return res; }
/** * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding * MHI channel * @in: connect parameters * @clnt_hdl: [out] client handle for this pipe * * This function is called by MHI client driver on MHI channel start. * This function is called after MHI engine was started. * This function is doing the following: * - Send command to uC to start corresponding MHI channel * - Configure IPA EP control * * Return codes: 0 : success * negative : error */ int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl) { struct ipa_ep_context *ep; int ipa_ep_idx; int res; struct ipa_mhi_channel_ctx *channel = NULL; unsigned long flags; IPA_MHI_FUNC_ENTRY(); if (!in || !clnt_hdl) { IPA_MHI_ERR("NULL args\n"); return -EINVAL; } if (in->sys.client >= IPA_CLIENT_MAX) { IPA_MHI_ERR("bad parm client:%d\n", in->sys.client); return -EINVAL; } if (unlikely(!ipa_mhi_ctx)) { IPA_MHI_ERR("IPA MHI was not initialized\n"); return -EINVAL; } spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags); if (!ipa_mhi_ctx || ipa_mhi_ctx->state != IPA_MHI_STATE_STARTED) { IPA_MHI_ERR("IPA MHI was not started\n"); spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags); return -EINVAL; } spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags); ipa_ep_idx = ipa_get_ep_mapping(in->sys.client); if (ipa_ep_idx == -1) { IPA_MHI_ERR("Invalid client.\n"); return -EINVAL; } ep = &ipa_ctx->ep[ipa_ep_idx]; channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id); if (!channel) { IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n"); return -EINVAL; } IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n", channel->client, channel->hdl, channel->id); ipa_inc_client_enable_clks(); if (ep->valid == 1) { IPA_MHI_ERR("EP already allocated.\n"); goto fail_ep_exists; } memset(ep, 0, offsetof(struct ipa_ep_context, sys)); ep->valid = 1; ep->skip_ep_cfg = in->sys.skip_ep_cfg; ep->client = in->sys.client; ep->client_notify = in->sys.notify; ep->priv = in->sys.priv; ep->keep_ipa_awake = in->sys.keep_ipa_awake; /* start channel in uC */ if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) { IPA_MHI_DBG("Initializing channel\n"); res = ipa_uc_mhi_init_channel(ipa_ep_idx, channel->hdl, channel->id, (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2)); if (res) { IPA_MHI_ERR("init_channel failed %d\n", res); goto fail_init_channel; } } else if (channel->state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) { if (channel->client != ep->client) { IPA_MHI_ERR("previous channel client was %d\n", ep->client); goto fail_init_channel; } IPA_MHI_DBG("Starting channel\n"); res = ipa_uc_mhi_resume_channel(channel->hdl, false); if (res) { IPA_MHI_ERR("init_channel failed %d\n", res); goto fail_init_channel; } } else { IPA_MHI_ERR("Invalid channel state %d\n", channel->state); goto fail_init_channel; } channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN; res = ipa_enable_data_path(ipa_ep_idx); if (res) { IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res, ipa_ep_idx); goto fail_enable_dp; } if (!ep->skip_ep_cfg) { if (ipa_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) { IPAERR("fail to configure EP.\n"); goto fail_ep_cfg; } if (ipa_cfg_ep_status(ipa_ep_idx, &ep->status)) { IPAERR("fail to configure status of EP.\n"); goto fail_ep_cfg; } IPA_MHI_DBG("ep configuration successful\n"); } else { IPA_MHI_DBG("skipping ep configuration\n"); } *clnt_hdl = ipa_ep_idx; if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client)) ipa_install_dflt_flt_rules(ipa_ep_idx); if (!ep->keep_ipa_awake) ipa_dec_client_disable_clks(); ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx); IPA_MHI_FUNC_EXIT(); return 0; fail_ep_cfg: ipa_disable_data_path(ipa_ep_idx); fail_enable_dp: ipa_uc_mhi_reset_channel(channel->hdl); channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; fail_init_channel: memset(ep, 0, offsetof(struct ipa_ep_context, sys)); fail_ep_exists: ipa_dec_client_disable_clks(); return -EPERM; }
/** * ipa_disconnect() - low-level IPA client disconnect * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to disconnect * from IPA in BAM-BAM mode. this api expects caller to take responsibility to * free any needed headers, routing and filtering tables and rules as needed. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_disconnect(u32 clnt_hdl) { int result; struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm.\n"); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (ep->suspended) { ipa_inc_client_enable_clks(); ep->suspended = false; } result = ipa_disable_data_path(clnt_hdl); if (result) { IPAERR("disable data path failed res=%d clnt=%d.\n", result, clnt_hdl); return -EPERM; } result = sps_disconnect(ep->ep_hdl); if (result) { IPAERR("SPS disconnect failed.\n"); return -EPERM; } if (!ep->desc_fifo_client_allocated && ep->connect.desc.base) { if (!ep->desc_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.desc.size, ep->connect.desc.base, ep->connect.desc.phys_base); else ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, ep->connect.desc.size); } if (!ep->data_fifo_client_allocated && ep->connect.data.base) { if (!ep->data_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.data.size, ep->connect.data.base, ep->connect.data.phys_base); else ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, ep->connect.data.size); } result = sps_free_endpoint(ep->ep_hdl); if (result) { IPAERR("SPS de-alloc EP failed.\n"); return -EPERM; } memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); ipa_dec_client_disable_clks(); IPADBG("client (ep: %d) disconnected\n", clnt_hdl); return 0; }
/** * ipa_connect() - low-level IPA client connect * @in: [in] input parameters from client * @sps: [out] sps output from IPA needed by client for sps_connect * @clnt_hdl: [out] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to connect to * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api * expects caller to take responsibility to add any needed headers, routing * and filtering tables and rules as needed. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps, u32 *clnt_hdl) { int ipa_ep_idx; int result = -EFAULT; struct ipa_ep_context *ep; ipa_inc_client_enable_clks(); if (in == NULL || sps == NULL || clnt_hdl == NULL || in->client >= IPA_CLIENT_MAX || in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) { IPAERR("bad parm.\n"); result = -EINVAL; goto fail; } ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client); if (ipa_ep_idx == -1) { IPAERR("fail to alloc EP.\n"); goto fail; } ep = &ipa_ctx->ep[ipa_ep_idx]; if (ep->valid) { IPAERR("EP already allocated.\n"); goto fail; } memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); ipa_enable_data_path(ipa_ep_idx); ep->valid = 1; ep->client = in->client; ep->client_notify = in->notify; ep->priv = in->priv; if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) { IPAERR("fail to configure EP.\n"); goto ipa_cfg_ep_fail; } result = ipa_connect_configure_sps(in, ep, ipa_ep_idx); if (result) { IPAERR("fail to configure SPS.\n"); goto ipa_cfg_ep_fail; } if (in->desc.base == NULL) { result = ipa_connect_allocate_fifo(in, &ep->connect.desc, &ep->desc_fifo_in_pipe_mem, &ep->desc_fifo_pipe_mem_ofst, in->desc_fifo_sz, ipa_ep_idx); if (result) { IPAERR("fail to allocate DESC FIFO.\n"); goto desc_mem_alloc_fail; } } else { IPADBG("client allocated DESC FIFO\n"); ep->connect.desc = in->desc; ep->desc_fifo_client_allocated = 1; } IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base, ep->connect.desc.size); if (in->data.base == NULL) { result = ipa_connect_allocate_fifo(in, &ep->connect.data, &ep->data_fifo_in_pipe_mem, &ep->data_fifo_pipe_mem_ofst, in->data_fifo_sz, ipa_ep_idx); if (result) { IPAERR("fail to allocate DATA FIFO.\n"); goto data_mem_alloc_fail; } } else { IPADBG("client allocated DATA FIFO\n"); ep->connect.data = in->data; ep->data_fifo_client_allocated = 1; } IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base, ep->connect.data.size); ep->connect.event_thresh = IPA_EVENT_THRESHOLD; ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */ if (IPA_CLIENT_IS_CONS(in->client)) ep->connect.options |= SPS_O_NO_DISABLE; result = sps_connect(ep->ep_hdl, &ep->connect); if (result) { IPAERR("sps_connect fails.\n"); goto sps_connect_fail; } sps->ipa_bam_hdl = ipa_ctx->bam_handle; sps->ipa_ep_idx = ipa_ep_idx; *clnt_hdl = ipa_ep_idx; memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer)); memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer)); ipa_program_holb(ep, ipa_ep_idx); IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx); return 0; sps_connect_fail: if (!ep->data_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.data.size, ep->connect.data.base, ep->connect.data.phys_base); else ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, ep->connect.data.size); data_mem_alloc_fail: if (!ep->desc_fifo_in_pipe_mem) dma_free_coherent(NULL, ep->connect.desc.size, ep->connect.desc.base, ep->connect.desc.phys_base); else ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, ep->connect.desc.size); desc_mem_alloc_fail: sps_free_endpoint(ep->ep_hdl); ipa_cfg_ep_fail: memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); fail: ipa_dec_client_disable_clks(); return result; }