int ipa_uc_mhi_stop_event_update_channel(int channelHandle) { union IpaHwMhiStopEventUpdateData_t cmd; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&cmd, 0, sizeof(cmd)); cmd.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
int ipa_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) { struct ipa_uc_hdlrs hdlrs; if (ipa_uc_mhi_ctx) { IPAERR("Already initialized\n"); return -EFAULT; } ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL); if (!ipa_uc_mhi_ctx) { IPAERR("no mem\n"); return -ENOMEM; } ipa_uc_mhi_ctx->ready_cb = ready_cb; ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb; memset(&hdlrs, 0, sizeof(hdlrs)); hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb; hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr; hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr; hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr; ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs); IPADBG("Done\n"); return 0; }
/** * ipa_suspend() - low-level IPA client suspend * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to suspend IPA * connection. Suspend IPA connection results in turning off IPA clocks in * case that there is no active clients using IPA. Pipes remains connected in * case of suspend. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_suspend(u32 clnt_hdl) { struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm. clnt_hdl %d\n", clnt_hdl); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (ep->suspended) { IPAERR("EP already suspended. clnt_hdl %d\n", clnt_hdl); return -EPERM; } if (IPA_CLIENT_IS_CONS(ep->client) && ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR && ep->cfg.aggr.aggr_time_limit) msleep(ep->cfg.aggr.aggr_time_limit); ipa_dec_client_disable_clks(); ep->suspended = true; return 0; }
/** * ipa_bridge_setup() - setup tethered SW bridge in specified direction * @dir: downlink or uplink (from air interface perspective) * * Return codes: * 0: success * various negative error codes on errors */ int ipa_bridge_setup(enum ipa_bridge_dir dir) { int ret; if (atomic_inc_return(&ipa_ctx->ipa_active_clients) == 1) ipa_enable_clks(); if (setup_bridge_to_a2(dir)) { IPAERR("fail to setup SYS pipe to A2 %d\n", dir); ret = -EINVAL; goto bail_a2; } if (setup_bridge_to_ipa(dir)) { IPAERR("fail to setup SYS pipe to IPA %d\n", dir); ret = -EINVAL; goto bail_ipa; } return 0; bail_ipa: if (dir == IPA_UL) sps_disconnect(bridge[IPA_UL_TO_A2].pipe); else sps_disconnect(bridge[IPA_DL_FROM_A2].pipe); bail_a2: if (atomic_dec_return(&ipa_ctx->ipa_active_clients) == 0) ipa_disable_clks(); return ret; }
/** * ipa_bridge_init() - initialize the tethered bridge, allocate UL and DL * workqueues * * Return codes: 0: success, -ENOMEM: failure */ int ipa_bridge_init(void) { int ret; ipa_ul_workqueue = alloc_workqueue("ipa_ul", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); if (!ipa_ul_workqueue) { IPAERR("ipa ul wq alloc failed\n"); ret = -ENOMEM; goto fail_ul; } ipa_dl_workqueue = alloc_workqueue("ipa_dl", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); if (!ipa_dl_workqueue) { IPAERR("ipa dl wq alloc failed\n"); ret = -ENOMEM; goto fail_dl; } return 0; fail_dl: destroy_workqueue(ipa_ul_workqueue); fail_ul: return ret; }
static int ipa_switch_to_intr_mode(enum ipa_bridge_dir dir) { int ret; struct ipa_bridge_pipe_context *sys = &bridge[2 * dir]; ret = sps_get_config(sys->pipe, &sys->connection); if (ret) { IPAERR("sps_get_config() failed %d\n", ret); goto fail; } sys->register_event.options = SPS_O_EOT; ret = sps_register_event(sys->pipe, &sys->register_event); if (ret) { IPAERR("sps_register_event() failed %d\n", ret); goto fail; } sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; ret = sps_set_config(sys->pipe, &sys->connection); if (ret) { IPAERR("sps_set_config() failed %d\n", ret); goto fail; } ret = 0; fail: return ret; }
/** * ipa_bridge_setup() - setup SW bridge leg * @dir: downlink or uplink (from air interface perspective) * @type: tethered or embedded bridge * @props: bridge leg properties (EP config, callbacks, etc) * @clnt_hdl: [out] handle of IPA EP belonging to bridge leg * * NOTE: IT IS CALLER'S RESPONSIBILITY TO ENSURE BAMs ARE * OPERATIONAL AS LONG AS BRIDGE REMAINS UP * * Return codes: * 0: success * various negative error codes on errors */ int ipa_bridge_setup(enum ipa_bridge_dir dir, enum ipa_bridge_type type, struct ipa_sys_connect_params *props, u32 *clnt_hdl) { int ret; if (props == NULL || clnt_hdl == NULL || type >= IPA_BRIDGE_TYPE_MAX || dir >= IPA_BRIDGE_DIR_MAX || props->client >= IPA_CLIENT_MAX) { IPAERR("Bad param props=%p clnt_hdl=%p type=%d dir=%d\n", props, clnt_hdl, type, dir); return -EINVAL; } ipa_inc_client_enable_clks(); if (setup_dma_bam_bridge(dir, type, props, clnt_hdl)) { IPAERR("fail to setup SYS pipe to IPA dir=%d type=%d\n", dir, type); ret = -EINVAL; goto bail_ipa; } return 0; bail_ipa: ipa_dec_client_disable_clks(); return ret; }
int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd) { int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", cmd.params.isDlUlSyncEnabled, cmd.params.UlAccmVal); IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold); ipa_inc_client_enable_clks(); res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
/** * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding * MHI channel * @in: connect parameters * @clnt_hdl: [out] client handle for this pipe * * This function is called by MHI client driver on MHI channel reset. * This function is called after MHI channel was started. * This function is doing the following: * - Send command to uC to reset corresponding MHI channel * - Configure IPA EP control * * Return codes: 0 : success * negative : error */ int ipa_mhi_disconnect_pipe(u32 clnt_hdl) { struct ipa_ep_context *ep; static struct ipa_mhi_channel_ctx *channel; int res; IPA_MHI_FUNC_ENTRY(); if (clnt_hdl >= ipa_ctx->ipa_num_pipes) { IPAERR("invalid handle %d\n", clnt_hdl); return -EINVAL; } if (ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("pipe was not connected %d\n", clnt_hdl); return -EINVAL; } if (unlikely(!ipa_mhi_ctx)) { IPA_MHI_ERR("IPA MHI was not initialized\n"); return -EINVAL; } channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl); if (!channel) { IPAERR("invalid clnt hdl\n"); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->keep_ipa_awake) ipa_inc_client_enable_clks(); res = ipa_mhi_reset_channel(channel); if (res) { IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res); goto fail_reset_channel; } ep->valid = 0; ipa_delete_dflt_flt_rules(clnt_hdl); ipa_dec_client_disable_clks(); IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); IPA_MHI_FUNC_EXIT(); return 0; fail_reset_channel: if (!ep->keep_ipa_awake) ipa_dec_client_disable_clks(); return res; }
static int queue_rx_single(enum ipa_bridge_dir dir) { struct ipa_bridge_pipe_context *sys_rx = &bridge[2 * dir]; struct ipa_pkt_info *info; int ret; info = kmalloc(sizeof(struct ipa_pkt_info), GFP_KERNEL); if (!info) { IPAERR("unable to alloc rx_pkt_info\n"); goto fail_pkt; } info->buffer = kmalloc(IPA_RX_SKB_SIZE, GFP_KERNEL | GFP_DMA); if (!info->buffer) { IPAERR("unable to alloc rx_pkt_buffer\n"); goto fail_buffer; } info->dma_address = dma_map_single(NULL, info->buffer, IPA_RX_SKB_SIZE, DMA_BIDIRECTIONAL); if (info->dma_address == 0 || info->dma_address == ~0) { IPAERR("dma_map_single failure %p for %p\n", (void *)info->dma_address, info->buffer); goto fail_dma; } info->len = ~0; list_add_tail(&info->list_node, &sys_rx->head_desc_list); ret = sps_transfer_one(sys_rx->pipe, info->dma_address, IPA_RX_SKB_SIZE, info, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT); if (ret) { list_del(&info->list_node); dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE, DMA_BIDIRECTIONAL); IPAERR("sps_transfer_one failed %d\n", ret); goto fail_dma; } sys_rx->len++; return 0; fail_dma: kfree(info->buffer); fail_buffer: kfree(info); fail_pkt: IPAERR("failed\n"); return -ENOMEM; }
static void ipa_log_evt_hdlr(void) { int i; if (!ipa_ctx->uc_ctx.uc_event_top_ofst) { ipa_ctx->uc_ctx.uc_event_top_ofst = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; if (ipa_ctx->uc_ctx.uc_event_top_ofst + sizeof(struct IpaHwEventLogInfoData_t) >= ipa_ctx->ctrl->ipa_reg_base_ofst + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + ipa_ctx->smem_sz) { IPAERR("uc_top 0x%x outside SRAM\n", ipa_ctx->uc_ctx.uc_event_top_ofst); goto bad_uc_top_ofst; } ipa_ctx->uc_ctx.uc_event_top_mmio = ioremap( ipa_ctx->ipa_wrapper_base + ipa_ctx->uc_ctx.uc_event_top_ofst, sizeof(struct IpaHwEventLogInfoData_t)); if (!ipa_ctx->uc_ctx.uc_event_top_mmio) { IPAERR("fail to ioremap uc top\n"); goto bad_uc_top_ofst; } for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { if (uc_hdlrs[i].ipa_uc_event_log_info_hdlr) uc_hdlrs[i].ipa_uc_event_log_info_hdlr (ipa_ctx->uc_ctx.uc_event_top_mmio); } } else { if (ipa_ctx->uc_ctx.uc_sram_mmio->eventParams != ipa_ctx->uc_ctx.uc_event_top_ofst) { IPAERR("uc top ofst changed new=%u cur=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio-> eventParams, ipa_ctx->uc_ctx.uc_event_top_ofst); } } return; bad_uc_top_ofst: ipa_ctx->uc_ctx.uc_event_top_ofst = 0; return; }
static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip, struct ipa_rt_entry *entry, u8 *buf) { struct ipa_rt_rule_hw_hdr *rule_hdr; const struct ipa_rt_rule *rule = (const struct ipa_rt_rule *)&entry->rule; u16 en_rule = 0; u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; u8 *start; int pipe_idx; if (buf == NULL) { memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); buf = (u8 *)tmp; } start = buf; rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf; pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode, entry->rule.dst); if (pipe_idx == -1) { IPAERR("Wrong destination pipe specified in RT rule\n"); WARN_ON(1); return -EPERM; } rule_hdr->u.hdr.pipe_dest_idx = pipe_idx; rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl; if (entry->hdr) { rule_hdr->u.hdr.hdr_offset = entry->hdr->offset_entry->offset >> 2; } else {
int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, int contexArrayIndex, int channelDirection) { int res; union IpaHwMhiInitChannelCmdData_t init_cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA_NUM_PIPES) { IPAERR("Invalid ipa_ep_idx.\n"); return -EINVAL; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&init_cmd, 0, sizeof(init_cmd)); init_cmd.params.channelHandle = channelHandle; init_cmd.params.contexArrayIndex = contexArrayIndex; init_cmd.params.bamPipeId = ipa_ep_idx; init_cmd.params.channelDirection = channelDirection; res = ipa_uc_send_cmd(init_cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
static void ipa_uc_mhi_event_log_info_hdlr( struct IpaHwEventLogInfoData_t *uc_event_top_mmio) { if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) { IPAERR("MHI feature missing 0x%x\n", uc_event_top_mmio->featureMask); return; } if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI]. params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) { IPAERR("mhi stats sz invalid exp=%zu is=%u\n", sizeof(struct IpaHwStatsMhiInfoData_t), uc_event_top_mmio->statsInfo. featureInfo[IPA_HW_FEATURE_MHI].params.size); return; } ipa_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio-> statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo. featureInfo[IPA_HW_FEATURE_MHI].params.offset; IPAERR("MHI stats ofst=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst); if (ipa_uc_mhi_ctx->mhi_uc_stats_ofst + sizeof(struct IpaHwStatsMhiInfoData_t) >= ipa_ctx->ctrl->ipa_reg_base_ofst + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + ipa_ctx->smem_sz) { IPAERR("uc_mhi_stats 0x%x outside SRAM\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst); return; } ipa_uc_mhi_ctx->mhi_uc_stats_mmio = ioremap(ipa_ctx->ipa_wrapper_base + ipa_uc_mhi_ctx->mhi_uc_stats_ofst, sizeof(struct IpaHwStatsMhiInfoData_t)); if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { IPAERR("fail to ioremap uc mhi stats\n"); return; } return; }
/** * ipa_uc_state_check() - Check the status of the uC interface * * Return value: 0 if the uC is loaded, interface is initialized * and there was no recent failure in one of the commands. * A negative value is returned otherwise. */ int ipa_uc_state_check(void) { if (!ipa_ctx->uc_ctx.uc_inited) { IPAERR("uC interface not initialized\n"); return -EFAULT; } if (!ipa_ctx->uc_ctx.uc_loaded) { IPAERR("uC is not loaded\n"); return -EFAULT; } if (ipa_ctx->uc_ctx.uc_failed) { IPAERR("uC has failed its last command\n"); return -EFAULT; } return 0; }
static void bam_mux_rx_notify(struct sps_event_notify *notify) { switch (notify->event_id) { case SPS_EVENT_EOT: ipa_switch_to_poll_mode(IPA_DL); queue_work(ipa_dl_workqueue, &dl_work); break; default: IPAERR("recieved unexpected event id %d\n", notify->event_id); } }
static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in, struct sps_mem_buffer *mem_buff_ptr, bool *fifo_in_pipe_mem_ptr, u32 *fifo_pipe_mem_ofst_ptr, u32 fifo_size, int ipa_ep_idx) { dma_addr_t dma_addr; u32 ofst; int result = -EFAULT; mem_buff_ptr->size = fifo_size; if (in->pipe_mem_preferred) { if (ipa_pipe_mem_alloc(&ofst, fifo_size)) { IPAERR("FIFO pipe mem alloc fail ep %u\n", ipa_ep_idx); mem_buff_ptr->base = dma_alloc_coherent(NULL, mem_buff_ptr->size, &dma_addr, GFP_KERNEL); } else { memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer)); result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst, fifo_size, 1); WARN_ON(result); *fifo_in_pipe_mem_ptr = 1; dma_addr = mem_buff_ptr->phys_base; *fifo_pipe_mem_ofst_ptr = ofst; } } else { mem_buff_ptr->base = dma_alloc_coherent(NULL, mem_buff_ptr->size, &dma_addr, GFP_KERNEL); } mem_buff_ptr->phys_base = dma_addr; if (mem_buff_ptr->base == NULL) { IPAERR("fail to get DMA memory.\n"); return -EFAULT; } return 0; }
/** * ipa_resume() - low-level IPA client resume * @clnt_hdl: [in] opaque client handle assigned by IPA to client * * Should be called by the driver of the peripheral that wants to resume IPA * connection. Resume IPA connection results in turning on IPA clocks in * case they were off as a result of suspend. * this api can be called only if a call to ipa_suspend() was * made. * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa_resume(u32 clnt_hdl) { struct ipa_ep_context *ep; if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("bad parm. clnt_hdl %d\n", clnt_hdl); return -EINVAL; } ep = &ipa_ctx->ep[clnt_hdl]; if (!ep->suspended) { IPAERR("EP not suspended. clnt_hdl %d\n", clnt_hdl); return -EPERM; } ipa_inc_client_enable_clks(); ep->suspended = false; return 0; }
static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio) { if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) { union IpaHwMhiChannelErrorEventData_t evt; IPAERR("Channel error\n"); evt.raw32b = uc_sram_mmio->eventParams; IPAERR("errorType=%d channelHandle=%d reserved=%d\n", evt.params.errorType, evt.params.channelHandle, evt.params.reserved); } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) { union IpaHwMhiChannelWakeupEventData_t evt; IPADBG("WakeUp channel request\n"); evt.raw32b = uc_sram_mmio->eventParams; IPADBG("channelHandle=%d reserved=%d\n", evt.params.channelHandle, evt.params.reserved); ipa_uc_mhi_ctx->wakeup_request_cb(); } }
static int ipa_connect_configure_sps(const struct ipa_connect_params *in, struct ipa_ep_context *ep, int ipa_ep_idx) { int result = -EFAULT; /* Default Config */ ep->ep_hdl = sps_alloc_endpoint(); if (ep->ep_hdl == NULL) { IPAERR("SPS EP alloc failed EP.\n"); return -EFAULT; } result = sps_get_config(ep->ep_hdl, &ep->connect); if (result) { IPAERR("fail to get config.\n"); return -EFAULT; } /* Specific Config */ if (IPA_CLIENT_IS_CONS(in->client)) { ep->connect.mode = SPS_MODE_SRC; ep->connect.destination = in->client_bam_hdl; ep->connect.source = ipa_ctx->bam_handle; ep->connect.dest_pipe_index = in->client_ep_idx; ep->connect.src_pipe_index = ipa_ep_idx; } else { ep->connect.mode = SPS_MODE_DEST; ep->connect.source = in->client_bam_hdl; ep->connect.destination = ipa_ctx->bam_handle; ep->connect.src_pipe_index = in->client_ep_idx; ep->connect.dest_pipe_index = ipa_ep_idx; } return 0; }
int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) { union IpaHwMhiChangeChannelStateCmdData_t cmd; union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; int res; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); memset(&uc_rsp, 0, sizeof(uc_rsp)); uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; uc_rsp.params.channelHandle = channelHandle; ipa_uc_mhi_ctx->expected_responseOp = IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; memset(&cmd, 0, sizeof(cmd)); cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; cmd.params.channelHandle = channelHandle; cmd.params.LPTransitionRejected = LPTransitionRejected; res = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); goto disable_clks; } res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }
/** * ipa_mhi_destroy() - Destroy MHI IPA * * This function is called by MHI client driver on MHI reset to destroy all IPA * MHI resources. * * Return codes: 0 : success * negative : error */ int ipa_mhi_destroy(void) { IPA_MHI_FUNC_ENTRY(); if (unlikely(!ipa_mhi_ctx)) { IPA_MHI_ERR("IPA MHI was not initialized\n"); return -EINVAL; } IPAERR("Not implemented Yet!\n"); ipa_mhi_debugfs_destroy(); IPA_MHI_FUNC_EXIT(); return -EPERM; }
static void ipa_uc_event_handler(enum ipa_irq_type interrupt, void *private_data, void *interrupt_data) { union IpaHwErrorEventData_t evt; u8 feature; WARN_ON(private_data != ipa_ctx); ipa_inc_client_enable_clks(); IPADBG("uC evt opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { IPAERR("Invalid feature %u for event %u\n", feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); ipa_dec_client_disable_clks(); return; } /* Feature specific handling */ if (uc_hdlrs[feature].ipa_uc_event_hdlr) uc_hdlrs[feature].ipa_uc_event_hdlr (ipa_ctx->uc_ctx.uc_sram_mmio); /* General handling */ if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_ERROR) { evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; IPADBG("uC evt errorType=%u\n", evt.params.errorType); BUG(); } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_LOG_INFO) { IPADBG("uC evt log info ofst=0x%x\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventParams); ipa_log_evt_hdlr(); } else { IPADBG("unsupported uC evt opcode=%u\n", ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); } ipa_dec_client_disable_clks(); }
/** * ipa_bridge_init() * * Return codes: 0: success, -ENOMEM: failure */ int ipa_bridge_init(void) { int i; ipa_ctx->smem_pipe_mem = smem_alloc(SMEM_BAM_PIPE_MEMORY, IPA_SMEM_PIPE_MEM_SZ); if (!ipa_ctx->smem_pipe_mem) { IPAERR("smem alloc failed\n"); return -ENOMEM; } IPADBG("smem_pipe_mem = %p\n", ipa_ctx->smem_pipe_mem); for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) bridge[i].type = i; return 0; }
int ipa_uc_mhi_print_stats(char *dbg_buff, int size) { int nBytes = 0; int i; if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { IPAERR("MHI uc stats is not valid\n"); return 0; } nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, "Common Stats:\n"); PRINT_COMMON_STATS(numULDLSync); PRINT_COMMON_STATS(numULTimerExpired); PRINT_COMMON_STATS(numChEvCtxWpRead); for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) { nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, "Channel %d Stats:\n", i); PRINT_CHANNEL_STATS(i, doorbellInt); PRINT_CHANNEL_STATS(i, reProccesed); PRINT_CHANNEL_STATS(i, bamFifoFull); PRINT_CHANNEL_STATS(i, bamFifoEmpty); PRINT_CHANNEL_STATS(i, bamFifoUsageHigh); PRINT_CHANNEL_STATS(i, bamFifoUsageLow); PRINT_CHANNEL_STATS(i, bamInt); PRINT_CHANNEL_STATS(i, ringFull); PRINT_CHANNEL_STATS(i, ringEmpty); PRINT_CHANNEL_STATS(i, ringUsageHigh); PRINT_CHANNEL_STATS(i, ringUsageLow); PRINT_CHANNEL_STATS(i, delayedMsi); PRINT_CHANNEL_STATS(i, immediateMsi); PRINT_CHANNEL_STATS(i, thresholdMsi); PRINT_CHANNEL_STATS(i, numSuspend); PRINT_CHANNEL_STATS(i, numResume); PRINT_CHANNEL_STATS(i, num_OOB); PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry); PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start); PRINT_CHANNEL_STATS(i, num_db_mode_evt); } return nBytes; }
static int handle_ipa_config_req(void *req_h, void *req) { struct ipa_config_resp_msg_v01 resp; int rc; memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01)); resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; IPAWANDBG("Received IPA CONFIG Request\n"); rc = ipa_mhi_handle_ipa_config_req( (struct ipa_config_req_msg_v01 *)req); if (rc) { IPAERR("ipa_mhi_handle_ipa_config_req failed %d\n", rc); resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; } rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, &ipa_config_resp_desc, &resp, sizeof(resp)); IPAWANDBG("Responsed IPA CONFIG Request\n"); return rc; }
/** * ipa_bridge_teardown() - teardown SW bridge leg * @dir: downlink or uplink (from air interface perspective) * @type: tethered or embedded bridge * @clnt_hdl: handle of IPA EP * * Return codes: * 0: success * various negative error codes on errors */ int ipa_bridge_teardown(enum ipa_bridge_dir dir, enum ipa_bridge_type type, u32 clnt_hdl) { struct ipa_bridge_pipe_context *sys; int lo; int hi; if (dir >= IPA_BRIDGE_DIR_MAX || type >= IPA_BRIDGE_TYPE_MAX || clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) { IPAERR("Bad param dir=%d type=%d\n", dir, type); return -EINVAL; } if (dir == IPA_BRIDGE_DIR_UL) { lo = IPA_UL_FROM_IPA; hi = IPA_UL_TO_A2; } else { lo = IPA_DL_FROM_A2; hi = IPA_DL_TO_IPA; } for (; lo <= hi; lo++) { sys = &bridge[type].pipe[lo]; if (sys->valid) { if (sys->ipa_facing) ipa_disconnect(clnt_hdl); sps_disconnect(sys->pipe); sps_free_endpoint(sys->pipe); sys->valid = false; } } memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); ipa_dec_client_disable_clks(); return 0; }
static int setup_dma_bam_bridge(enum ipa_bridge_dir dir, enum ipa_bridge_type type, struct ipa_sys_connect_params *props, u32 *clnt_hdl) { struct ipa_connect_params ipa_in_params; struct ipa_sps_params sps_out_params; int dma_a2_pipe; int dma_ipa_pipe; struct sps_pipe *pipe; struct sps_pipe *pipe_a2; struct sps_connect _connection; struct sps_connect *connection = &_connection; struct a2_mux_pipe_connection pipe_conn = {0}; enum a2_mux_pipe_direction pipe_dir; u32 dma_hdl = sps_dma_get_bam_handle(); u32 a2_hdl; u32 pa; int ret; memset(&ipa_in_params, 0, sizeof(ipa_in_params)); memset(&sps_out_params, 0, sizeof(sps_out_params)); pipe_dir = (dir == IPA_BRIDGE_DIR_UL) ? IPA_TO_A2 : A2_TO_IPA; ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_conn); if (ret) { IPAERR("ipa_get_a2_mux_pipe_info failed dir=%d type=%d\n", dir, type); goto fail_get_a2_prop; } pa = (dir == IPA_BRIDGE_DIR_UL) ? pipe_conn.dst_phy_addr : pipe_conn.src_phy_addr; ret = sps_phy2h(pa, &a2_hdl); if (ret) { IPAERR("sps_phy2h failed (A2 BAM) %d dir=%d type=%d\n", ret, dir, type); goto fail_get_a2_prop; } ipa_get_dma_pipe_num(dir, type, &dma_a2_pipe, &dma_ipa_pipe); ipa_in_params.ipa_ep_cfg = props->ipa_ep_cfg; ipa_in_params.client = props->client; ipa_in_params.client_bam_hdl = dma_hdl; ipa_in_params.client_ep_idx = dma_ipa_pipe; ipa_in_params.priv = props->priv; ipa_in_params.notify = props->notify; ipa_in_params.desc_fifo_sz = ipa_get_desc_fifo_sz(dir, type); ipa_in_params.data_fifo_sz = ipa_get_data_fifo_sz(dir, type); if (ipa_connect(&ipa_in_params, &sps_out_params, clnt_hdl)) { IPAERR("ipa connect failed dir=%d type=%d\n", dir, type); goto fail_get_a2_prop; } pipe = sps_alloc_endpoint(); if (pipe == NULL) { IPAERR("sps_alloc_endpoint failed dir=%d type=%d\n", dir, type); ret = -ENOMEM; goto fail_sps_alloc; } memset(&_connection, 0, sizeof(_connection)); ret = sps_get_config(pipe, connection); if (ret) { IPAERR("sps_get_config failed %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config; } if (dir == IPA_BRIDGE_DIR_DL) { connection->mode = SPS_MODE_SRC; connection->source = dma_hdl; connection->destination = sps_out_params.ipa_bam_hdl; connection->src_pipe_index = dma_ipa_pipe; connection->dest_pipe_index = sps_out_params.ipa_ep_idx; } else { connection->mode = SPS_MODE_DEST; connection->source = sps_out_params.ipa_bam_hdl; connection->destination = dma_hdl; connection->src_pipe_index = sps_out_params.ipa_ep_idx; connection->dest_pipe_index = dma_ipa_pipe; } connection->event_thresh = IPA_EVENT_THRESHOLD; connection->data = sps_out_params.data; connection->desc = sps_out_params.desc; connection->options = SPS_O_AUTO_ENABLE; ret = sps_connect(pipe, connection); if (ret) { IPAERR("sps_connect failed %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config; } if (dir == IPA_BRIDGE_DIR_DL) { bridge[type].pipe[IPA_DL_TO_IPA].pipe = pipe; bridge[type].pipe[IPA_DL_TO_IPA].ipa_facing = true; bridge[type].pipe[IPA_DL_TO_IPA].valid = true; } else { bridge[type].pipe[IPA_UL_FROM_IPA].pipe = pipe; bridge[type].pipe[IPA_UL_FROM_IPA].ipa_facing = true; bridge[type].pipe[IPA_UL_FROM_IPA].valid = true; } IPADBG("dir=%d type=%d (ipa) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type, connection->source, connection->src_pipe_index, connection->destination, connection->dest_pipe_index); pipe_a2 = sps_alloc_endpoint(); if (pipe_a2 == NULL) { IPAERR("sps_alloc_endpoint failed2 dir=%d type=%d\n", dir, type); ret = -ENOMEM; goto fail_sps_alloc_a2; } memset(&_connection, 0, sizeof(_connection)); ret = sps_get_config(pipe_a2, connection); if (ret) { IPAERR("sps_get_config failed2 %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config_a2; } if (dir == IPA_BRIDGE_DIR_DL) { connection->mode = SPS_MODE_DEST; connection->source = a2_hdl; connection->destination = dma_hdl; connection->src_pipe_index = ipa_get_a2_pipe_num(dir, type); connection->dest_pipe_index = dma_a2_pipe; } else { connection->mode = SPS_MODE_SRC; connection->source = dma_hdl; connection->destination = a2_hdl; connection->src_pipe_index = dma_a2_pipe; connection->dest_pipe_index = ipa_get_a2_pipe_num(dir, type); } connection->event_thresh = IPA_EVENT_THRESHOLD; if (ipa_setup_a2_dma_fifos(dir, type, &connection->desc, &connection->data)) { IPAERR("fail to setup A2-DMA FIFOs dir=%d type=%d\n", dir, type); goto fail_sps_get_config_a2; } connection->options = SPS_O_AUTO_ENABLE; ret = sps_connect(pipe_a2, connection); if (ret) { IPAERR("sps_connect failed2 %d dir=%d type=%d\n", ret, dir, type); goto fail_sps_get_config_a2; } if (dir == IPA_BRIDGE_DIR_DL) { bridge[type].pipe[IPA_DL_FROM_A2].pipe = pipe_a2; bridge[type].pipe[IPA_DL_FROM_A2].valid = true; } else { bridge[type].pipe[IPA_UL_TO_A2].pipe = pipe_a2; bridge[type].pipe[IPA_UL_TO_A2].valid = true; } IPADBG("dir=%d type=%d (a2) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type, connection->source, connection->src_pipe_index, connection->destination, connection->dest_pipe_index); return 0; fail_sps_get_config_a2: sps_free_endpoint(pipe_a2); fail_sps_alloc_a2: sps_disconnect(pipe); fail_sps_get_config: sps_free_endpoint(pipe); fail_sps_alloc: ipa_disconnect(*clnt_hdl); fail_get_a2_prop: return ret; }
int ipa_setup_a2_dma_fifos(enum ipa_bridge_dir dir, enum ipa_bridge_type type, struct sps_mem_buffer *desc, struct sps_mem_buffer *data) { int ret; if (type == IPA_BRIDGE_TYPE_EMBEDDED) { if (dir == IPA_BRIDGE_DIR_UL) { desc->base = ipa_ctx->smem_pipe_mem + IPA_SMEM_UL_DESC_FIFO_OFST; desc->phys_base = smem_virt_to_phys(desc->base); desc->size = ipa_get_desc_fifo_sz(dir, type); data->base = ipa_ctx->smem_pipe_mem + IPA_SMEM_UL_DATA_FIFO_OFST; data->phys_base = smem_virt_to_phys(data->base); data->size = ipa_get_data_fifo_sz(dir, type); } else { desc->base = ipa_ctx->smem_pipe_mem + IPA_SMEM_DL_DESC_FIFO_OFST; desc->phys_base = smem_virt_to_phys(desc->base); desc->size = ipa_get_desc_fifo_sz(dir, type); data->base = ipa_ctx->smem_pipe_mem + IPA_SMEM_DL_DATA_FIFO_OFST; data->phys_base = smem_virt_to_phys(data->base); data->size = ipa_get_data_fifo_sz(dir, type); } } else { if (dir == IPA_BRIDGE_DIR_UL) { ret = sps_setup_bam2bam_fifo(data, IPA_OCIMEM_UL_DATA_FIFO_OFST, ipa_get_data_fifo_sz(dir, type), 1); if (ret) { IPAERR("DAFIFO setup fail %d dir %d type %d\n", ret, dir, type); return ret; } ret = sps_setup_bam2bam_fifo(desc, IPA_OCIMEM_UL_DESC_FIFO_OFST, ipa_get_desc_fifo_sz(dir, type), 1); if (ret) { IPAERR("DEFIFO setup fail %d dir %d type %d\n", ret, dir, type); return ret; } } else { ret = sps_setup_bam2bam_fifo(data, IPA_OCIMEM_DL_DATA_FIFO_OFST, ipa_get_data_fifo_sz(dir, type), 1); if (ret) { IPAERR("DAFIFO setup fail %d dir %d type %d\n", ret, dir, type); return ret; } ret = sps_setup_bam2bam_fifo(desc, IPA_OCIMEM_DL_DESC_FIFO_OFST, ipa_get_desc_fifo_sz(dir, type), 1); if (ret) { IPAERR("DEFIFO setup fail %d dir %d type %d\n", ret, dir, type); return ret; } } } IPADBG("dir=%d type=%d Dpa=%x Dsz=%u Dva=%p dpa=%x dsz=%u dva=%p\n", dir, type, data->phys_base, data->size, data->base, desc->phys_base, desc->size, desc->base); return 0; }
int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, u32 first_evt_idx) { int res; struct ipa_mem_buffer mem; struct IpaHwMhiInitCmdData_t *init_cmd_data; struct IpaHwMhiMsiCmdData_t *msi_cmd; if (!ipa_uc_mhi_ctx) { IPAERR("Not initialized\n"); return -EFAULT; } ipa_inc_client_enable_clks(); res = ipa_uc_update_hw_flags(0); if (res) { IPAERR("ipa_uc_update_hw_flags failed %d\n", res); goto disable_clks; } mem.size = sizeof(*init_cmd_data); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, GFP_KERNEL); if (!mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", mem.size); res = -ENOMEM; goto disable_clks; } memset(mem.base, 0, mem.size); init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; init_cmd_data->msiAddress = msi->addr_low; init_cmd_data->mmioBaseAddress = mmio_addr; init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; init_cmd_data->firstChannelIndex = first_ch_idx; init_cmd_data->firstEventRingIndex = first_evt_idx; res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); goto disable_clks; } dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); mem.size = sizeof(*msi_cmd); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, GFP_KERNEL); if (!mem.base) { IPAERR("fail to alloc DMA buff of size %d\n", mem.size); res = -ENOMEM; goto disable_clks; } msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; msi_cmd->msiAddress_hi = msi->addr_hi; msi_cmd->msiAddress_low = msi->addr_low; msi_cmd->msiData = msi->data; msi_cmd->msiMask = msi->mask; res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); if (res) { IPAERR("ipa_uc_send_cmd failed %d\n", res); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); goto disable_clks; } dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); res = 0; disable_clks: ipa_dec_client_disable_clks(); return res; }