int mhi_get_chan_max_buffers(u32 chan) { if (IS_SOFTWARE_CHANNEL(chan)) return MAX_NR_TRBS_PER_SOFT_CHAN - 1; else return MAX_NR_TRBS_PER_HARD_CHAN - 1; }
MHI_STATUS parse_inbound(mhi_device_ctxt *mhi_dev_ctxt, u32 chan, mhi_xfer_pkt *local_ev_trb_loc, u16 xfer_len) { mhi_client_handle *client_handle; mhi_ring *local_chan_ctxt; mhi_result *result; mhi_cb_info cb_info; client_handle = mhi_dev_ctxt->client_handle_list[chan]; local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; if (unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp)) { mhi_dev_ctxt->mhi_chan_cntr[chan].empty_ring_removal++; mhi_wait_for_mdm(mhi_dev_ctxt); return mhi_send_cmd(mhi_dev_ctxt, MHI_COMMAND_RESET_CHAN, chan); } if (NULL != mhi_dev_ctxt->client_handle_list[chan]) result = &mhi_dev_ctxt->client_handle_list[chan]->result; /* If a client is registered */ if (unlikely(IS_SOFTWARE_CHANNEL(chan))) { MHI_TX_TRB_SET_LEN(TX_TRB_LEN, local_ev_trb_loc, xfer_len); ctxt_del_element(local_chan_ctxt, NULL); if (NULL != client_handle->client_info.mhi_client_cb && (0 == (client_handle->pkt_count % client_handle->cb_mod))) { cb_info.cb_reason = MHI_CB_XFER_SUCCESS; cb_info.result = &client_handle->result; cb_info.result->transaction_status = MHI_STATUS_SUCCESS; client_handle->client_info.mhi_client_cb(&cb_info); } } else { /* IN Hardware channel with no client * registered, we are done with this TRB*/ if (likely(NULL != client_handle)) { ctxt_del_element(local_chan_ctxt, NULL); /* A client is not registred for this IN channel */ } else {/* Hardware Channel, no client registerered, drop data */ recycle_trb_and_ring(mhi_dev_ctxt, &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], MHI_RING_TYPE_XFER_RING, chan); } } return MHI_STATUS_SUCCESS; }
static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 i = 0; struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; struct mhi_event_ctxt *event_ctxt = NULL; u32 event_ring_index = 0; union mhi_xfer_pkt *trb_list = NULL; struct mhi_chan_ctxt *chan_ctxt = NULL; struct mhi_ring *local_event_ctxt = NULL; u32 msi_vec = 0; u32 intmod_t = 0; uintptr_t ev_ring_addr; for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, mhi_dev_ctxt->ev_ring_props[i], msi_vec); switch (i) { case IPA_OUT_EV_RING: intmod_t = 10; break; case IPA_IN_EV_RING: intmod_t = 6; break; } event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index]; local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; ev_ring_addr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_ctrl->ev_trb_list[i]); mhi_log(MHI_MSG_VERBOSE, "Setting msi_vec 0x%x, for ev ring ctxt 0x%x\n", msi_vec, event_ring_index); mhi_event_ring_init(event_ctxt, ev_ring_addr, (uintptr_t)mhi_ctrl->ev_trb_list[i], EV_EL_PER_RING, local_event_ctxt, intmod_t, msi_vec); } /* Init Command Ring */ mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING], mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING]), (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING], CMD_EL_PER_RING, &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]); mhi_log(MHI_MSG_INFO, "Initializing contexts\n"); /* Initialize Channel Contexts */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { trb_list = mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i]; chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; if (IS_SOFTWARE_CHANNEL(i)) { mhi_init_chan_ctxt(chan_ctxt, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)trb_list), (uintptr_t)trb_list, MAX_NR_TRBS_PER_SOFT_CHAN, (i % 2) ? MHI_IN : MHI_OUT, 0, &mhi_dev_ctxt->mhi_local_chan_ctxt[i]); } else if (IS_HARDWARE_CHANNEL(i)) { mhi_init_chan_ctxt(chan_ctxt, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)trb_list), (uintptr_t)trb_list, MAX_NR_TRBS_PER_HARD_CHAN, (i % 2) ? MHI_IN : MHI_OUT, i, &mhi_dev_ctxt->mhi_local_chan_ctxt[i]); } } mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; return MHI_STATUS_SUCCESS; }
static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt *mhi_dev_ctxt) { size_t ctrl_seg_size = 0; size_t ctrl_seg_offset = 0; u32 i = 0; u32 align_len = sizeof(u64)*2; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_dev_ctxt->enable_lpm = 1; if (NULL == mhi_dev_ctxt || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info->dev) return MHI_STATUS_ERROR; mhi_log(MHI_MSG_INFO, "Allocating control segment.\n"); ctrl_seg_size += sizeof(struct mhi_control_seg); /* Calculate the size of the control segment needed */ ctrl_seg_size += align_len - (ctrl_seg_size % align_len); for (i = 0; i < MHI_MAX_CHANNELS; ++i) { if (IS_HARDWARE_CHANNEL(i)) ctrl_seg_size += sizeof(union mhi_xfer_pkt) * (MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP); else if (IS_SOFTWARE_CHANNEL(i)) ctrl_seg_size += sizeof(union mhi_xfer_pkt) * (MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP); } ctrl_seg_size += align_len - (ctrl_seg_size % align_len); for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) ctrl_seg_size += sizeof(union mhi_event_pkt)* (EV_EL_PER_RING + ELEMENT_GAP); ctrl_seg_size += align_len - (ctrl_seg_size % align_len); ret_val = mhi_mallocmemregion(mhi_dev_ctxt->mhi_ctrl_seg_info, ctrl_seg_size); if (MHI_STATUS_SUCCESS != ret_val) return MHI_STATUS_ERROR; (mhi_dev_ctxt->mhi_ctrl_seg = mhi_get_virt_addr(mhi_dev_ctxt->mhi_ctrl_seg_info)); if (0 == mhi_dev_ctxt->mhi_ctrl_seg) return MHI_STATUS_ALLOC_ERROR; /* Set the channel contexts, event contexts and cmd context */ ctrl_seg_offset = (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg + sizeof(struct mhi_control_seg); ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len); /* Set the TRB lists */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { if (IS_HARDWARE_CHANNEL(i)) { mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] = (union mhi_xfer_pkt *)ctrl_seg_offset; ctrl_seg_offset += sizeof(union mhi_xfer_pkt) * (MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP); } else if (IS_SOFTWARE_CHANNEL(i)) { mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] = (union mhi_xfer_pkt *)ctrl_seg_offset; ctrl_seg_offset += sizeof(union mhi_xfer_pkt) * (MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP); } } ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len); for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] = (union mhi_event_pkt *)ctrl_seg_offset; ctrl_seg_offset += sizeof(union mhi_event_pkt) * (EV_EL_PER_RING + ELEMENT_GAP); } return MHI_STATUS_SUCCESS; }