/** * @brief Reset for a single MHI channel * * @param device [IN ] context * @param chan_id [IN ] channel id of the channel to reset * * @return MHI_STATUS */ MHI_STATUS mhi_reset_channel(mhi_client_handle *client_handle) { MHI_STATUS ret_val; mhi_chan_ctxt *cur_ctxt = NULL; mhi_device_ctxt *mhi_dev_ctxt = NULL; u32 chan_id = 0; mhi_ring *cur_ring = NULL; chan_id = client_handle->chan; mhi_dev_ctxt = client_handle->mhi_dev_ctxt; if (chan_id > (MHI_MAX_CHANNELS - 1) || NULL == mhi_dev_ctxt) { mhi_log(MHI_MSG_ERROR, "Bad input parameters\n"); return MHI_STATUS_ERROR; } mutex_lock(&mhi_dev_ctxt->mhi_chan_mutex[chan_id]); /* We need to reset the channel completley, we will assume that our * base is correct*/ cur_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan_id]; cur_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[chan_id]; memset(cur_ring->base, 0, sizeof(char)*cur_ring->len); if (IS_HARDWARE_CHANNEL(chan_id)) { ret_val = mhi_init_chan_ctxt(cur_ctxt, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)cur_ring->base), (uintptr_t)cur_ring->base, MAX_NR_TRBS_PER_HARD_CHAN, (chan_id % 2) ? MHI_IN : MHI_OUT, (chan_id % 2) ? IPA_IN_EV_RING : IPA_OUT_EV_RING, cur_ring); } else { ret_val = mhi_init_chan_ctxt(cur_ctxt, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)cur_ring->base), (uintptr_t)cur_ring->base, MAX_NR_TRBS_PER_SOFT_CHAN, (chan_id % 2) ? MHI_IN : MHI_OUT, SOFTWARE_EV_RING, cur_ring); } if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_ERROR, "Failed to reset chan ctxt\n"); mutex_unlock(&mhi_dev_ctxt->mhi_chan_mutex[chan_id]); return ret_val; }
void ring_all_ev_dbs(mhi_device_ctxt *mhi_dev_ctxt) { u32 i; u64 db_value = 0; u32 event_ring_index; mhi_event_ctxt *event_ctxt = NULL; mhi_control_seg *mhi_ctrl = NULL; spinlock_t *lock = NULL; unsigned long flags; mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index]; mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0; spin_lock_irqsave(lock, flags); event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index]; db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp); if (0 == mhi_dev_ctxt->mhi_ev_db_order[event_ring_index]) { MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, event_ring_index, db_value); } mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0; spin_unlock_irqrestore(lock, flags); } }
void ring_all_cmd_dbs(mhi_device_ctxt *mhi_dev_ctxt) { struct mutex *cmd_mutex = NULL; u64 db_value; u64 rp = 0; mhi_ring *local_ctxt = NULL; mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n"); cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; /* Write the cmd ring */ mhi_dev_ctxt->cmd_ring_order = 0; mutex_lock(cmd_mutex); local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]; rp = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)local_ctxt->rp); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp); if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value) MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0, db_value); mhi_dev_ctxt->cmd_ring_order = 0; mutex_unlock(cmd_mutex); }
void ring_ev_db(mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index) { mhi_ring *event_ctxt = NULL; u64 db_value = 0; event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)event_ctxt->wp); MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, event_ring_index, db_value); }
void conditional_chan_db_write(mhi_device_ctxt *mhi_dev_ctxt, u32 chan) { u64 db_value; unsigned long flags; mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags); if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) { db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value); } mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); }
MHI_STATUS mhi_notify_device(mhi_device_ctxt *mhi_dev_ctxt, u32 chan) { unsigned long flags = 0; u64 db_value; mhi_chan_ctxt *chan_ctxt; chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags); if (likely(((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) || (MHI_STATE_M1 == mhi_dev_ctxt->mhi_state)) && (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR) && !mhi_dev_ctxt->flags.pending_M3)) { mhi_dev_ctxt->mhi_chan_db_order[chan]++; db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); if (IS_HARDWARE_CHANNEL(chan) && (chan % 2)) { if ((mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd % MHI_XFER_DB_INTERVAL) == 0) { MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value); } } else { MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value); } } else { mhi_log(MHI_MSG_VERBOSE, "Triggering wakeup due to pending data MHI state %d, Chan state %d, Pending M3 %d\n", mhi_dev_ctxt->mhi_state, chan_ctxt->mhi_chan_state, mhi_dev_ctxt->flags.pending_M3); if (mhi_dev_ctxt->flags.pending_M3 || mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_wake_dev_from_m3(mhi_dev_ctxt); } } spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); /* If there are no clients still sending we can trigger our * inactivity timer */ return MHI_STATUS_SUCCESS; }
MHI_STATUS mhi_client_recycle_trb(mhi_client_handle *client_handle) { unsigned long flags; u32 chan = client_handle->chan; MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt; struct mutex *chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; mhi_ring *local_ctxt = NULL; u64 db_value; local_ctxt = &client_handle->mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; mutex_lock(chan_mutex); MHI_TX_TRB_SET_LEN(TX_TRB_LEN, (mhi_xfer_pkt *)local_ctxt->ack_rp, TRB_MAX_DATA_SIZE); *(mhi_xfer_pkt *)local_ctxt->wp = *(mhi_xfer_pkt *)local_ctxt->ack_rp; ret_val = delete_element(local_ctxt, &local_ctxt->ack_rp, &local_ctxt->rp, NULL); ret_val = ctxt_add_element(local_ctxt, NULL); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)local_ctxt->wp); read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); if (mhi_dev_ctxt->flags.link_up) { if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) { mhi_assert_device_wake(mhi_dev_ctxt); MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value); } else if (mhi_dev_ctxt->flags.pending_M3 || mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_wake_dev_from_m3(mhi_dev_ctxt); } } atomic_dec(&mhi_dev_ctxt->flags.data_pending); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++; mutex_unlock(chan_mutex); return ret_val; }
enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) { u64 pcie_dword_val = 0; u32 pcie_word_val = 0; u32 i = 0; enum MHI_STATUS ret_val; mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n"); mhi_dev_ctxt->mmio_addr = mhi_dev_ctxt->dev_props->bar0_base; mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n", mhi_dev_ctxt->mmio_addr); mhi_dev_ctxt->mmio_len = mhi_reg_read(mhi_dev_ctxt->mmio_addr, MHIREGLEN); if (0 == mhi_dev_ctxt->mmio_len) { mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n"); return MHI_STATUS_ERROR; } mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n"); mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read( mhi_dev_ctxt->mmio_addr, MHIVER); if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) { mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n", mhi_dev_ctxt->dev_props->mhi_ver); if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF) ret_val = mhi_wait_for_mdm(mhi_dev_ctxt); if (ret_val) return MHI_STATUS_ERROR; } /* Enable the channels */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { struct mhi_chan_ctxt *chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; if (VALID_CHAN_NR(i)) chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; else chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED; } mhi_log(MHI_MSG_INFO, "Read back MMIO Ready bit successfully. Moving on..\n"); mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n"); mhi_dev_ctxt->channel_db_addr = mhi_dev_ctxt->mmio_addr; mhi_dev_ctxt->event_db_addr = mhi_dev_ctxt->mmio_addr; mhi_dev_ctxt->channel_db_addr += mhi_reg_read_field( mhi_dev_ctxt->mmio_addr, CHDBOFF, CHDBOFF_CHDBOFF_MASK, CHDBOFF_CHDBOFF_SHIFT); mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n"); mhi_dev_ctxt->event_db_addr += mhi_reg_read_field( mhi_dev_ctxt->mmio_addr, ERDBOFF, ERDBOFF_ERDBOFF_MASK, ERDBOFF_ERDBOFF_SHIFT); mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n"); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, MHI_MAX_CHANNELS); pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list); pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CCABAP_HIGHER, CCABAP_HIGHER_CCABAP_HIGHER_MASK, CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CCABAP_LOWER, CCABAP_LOWER_CCABAP_LOWER_MASK, CCABAP_LOWER_CCABAP_LOWER_SHIFT, pcie_word_val); /* Write the Event Context Base Address Register High and Low parts */ pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list); pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, ECABAP_HIGHER, ECABAP_HIGHER_ECABAP_HIGHER_MASK, ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, ECABAP_LOWER, ECABAP_LOWER_ECABAP_LOWER_MASK, ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val); /* Write the Command Ring Control Register High and Low parts */ pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list); pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CRCBAP_HIGHER, CRCBAP_HIGHER_CRCBAP_HIGHER_MASK, CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CRCBAP_LOWER, CRCBAP_LOWER_CRCBAP_LOWER_MASK, CRCBAP_LOWER_CRCBAP_LOWER_SHIFT, pcie_word_val); mhi_dev_ctxt->cmd_db_addr = mhi_dev_ctxt->mmio_addr + CRDB_LOWER; /* Set the control segment in the MMIO */ pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg); pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICTRLBASE_HIGHER, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICTRLBASE_LOWER, MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK, MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT, pcie_word_val); pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg) + mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_HIGHER, MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK, MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_LOWER, MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK, MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT, pcie_word_val); /* Set the data segment in the MMIO */ pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHIDATABASE_HIGHER, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHIDATABASE_LOWER, MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK, MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT, pcie_word_val); pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_HIGHER, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_LOWER, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT, pcie_word_val); mhi_log(MHI_MSG_INFO, "Done..\n"); return MHI_STATUS_SUCCESS; }
MHI_STATUS process_RESET_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { u32 i = 0; u32 ev_ring_index; MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n"); mhi_dev_ctxt->counters.mhi_reset_cntr++; if (mhi_dev_ctxt->counters.mhi_reset_cntr >= 10) { panic("CP Crash: need CP dump"); } ret_val = mhi_test_for_device_ready(mhi_dev_ctxt); switch (ret_val) { case MHI_STATUS_SUCCESS: mhi_dev_ctxt->counters.mhi_reset_cntr = 0; mhi_dev_ctxt->mhi_state = MHI_STATE_READY; ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_READY); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans\n", STATE_TRANSITION_READY); break; case MHI_STATUS_LINK_DOWN: mhi_log(MHI_MSG_CRITICAL, "Link down detected\n"); break; case MHI_STATUS_DEVICE_NOT_READY: ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_RESET); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans\n", STATE_TRANSITION_RESET); break; default: mhi_log(MHI_MSG_CRITICAL, "Unexpected ret code detected for\n"); break; } /* Synchronise the local rp/wp with the ctxt rp/wp This will enable the device to pick up exactly where it left off, should this be an SSR recovery */ for (i = 0; i < NR_OF_CMD_RINGS; ++i) { mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i].mhi_cmd_ring_read_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp); } for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { ev_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_ring_index].mhi_event_read_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[ev_ring_index].rp); } for (i = 0; i < MHI_MAX_CHANNELS; ++i) { if (VALID_CHAN_NR(i)) { mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].mhi_trb_read_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[i].rp); mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].mhi_trb_write_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[i].wp); } } /* reset outbound_ack count */ atomic_set(&mhi_dev_ctxt->counters.outbound_acks, 0); return ret_val; }
static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 i = 0; struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; struct mhi_event_ctxt *event_ctxt = NULL; u32 event_ring_index = 0; union mhi_xfer_pkt *trb_list = NULL; struct mhi_chan_ctxt *chan_ctxt = NULL; struct mhi_ring *local_event_ctxt = NULL; u32 msi_vec = 0; u32 intmod_t = 0; uintptr_t ev_ring_addr; for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, mhi_dev_ctxt->ev_ring_props[i], msi_vec); switch (i) { case IPA_OUT_EV_RING: intmod_t = 10; break; case IPA_IN_EV_RING: intmod_t = 6; break; } event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index]; local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; ev_ring_addr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_ctrl->ev_trb_list[i]); mhi_log(MHI_MSG_VERBOSE, "Setting msi_vec 0x%x, for ev ring ctxt 0x%x\n", msi_vec, event_ring_index); mhi_event_ring_init(event_ctxt, ev_ring_addr, (uintptr_t)mhi_ctrl->ev_trb_list[i], EV_EL_PER_RING, local_event_ctxt, intmod_t, msi_vec); } /* Init Command Ring */ mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING], mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING]), (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING], CMD_EL_PER_RING, &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]); mhi_log(MHI_MSG_INFO, "Initializing contexts\n"); /* Initialize Channel Contexts */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { trb_list = mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i]; chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; if (IS_SOFTWARE_CHANNEL(i)) { mhi_init_chan_ctxt(chan_ctxt, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)trb_list), (uintptr_t)trb_list, MAX_NR_TRBS_PER_SOFT_CHAN, (i % 2) ? MHI_IN : MHI_OUT, 0, &mhi_dev_ctxt->mhi_local_chan_ctxt[i]); } else if (IS_HARDWARE_CHANNEL(i)) { mhi_init_chan_ctxt(chan_ctxt, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)trb_list), (uintptr_t)trb_list, MAX_NR_TRBS_PER_HARD_CHAN, (i % 2) ? MHI_IN : MHI_OUT, i, &mhi_dev_ctxt->mhi_local_chan_ctxt[i]); } } mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; return MHI_STATUS_SUCCESS; }
MHI_STATUS recycle_trb_and_ring(mhi_device_ctxt *mhi_dev_ctxt, mhi_ring *ring, MHI_RING_TYPE ring_type, u32 ring_index) { MHI_STATUS ret_val = MHI_STATUS_ERROR; u64 db_value = 0; void *removed_element = NULL; void *added_element = NULL; /* TODO This will not cover us for ring_index out of * bounds for cmd or event channels */ if (NULL == mhi_dev_ctxt || NULL == ring || ring_type > (MHI_RING_TYPE_MAX - 1) || ring_index > (MHI_MAX_CHANNELS - 1)) { mhi_log(MHI_MSG_ERROR, "Bad input params\n"); return ret_val; } ret_val = ctxt_del_element(ring, &removed_element); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n"); return MHI_STATUS_ERROR; } ret_val = ctxt_add_element(ring, &added_element); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n"); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp); if (MHI_STATUS_SUCCESS != ret_val) return ret_val; if (MHI_RING_TYPE_XFER_RING == ring_type) { mhi_xfer_pkt *removed_xfer_pkt = (mhi_xfer_pkt *)removed_element; mhi_xfer_pkt *added_xfer_pkt = (mhi_xfer_pkt *)added_element; added_xfer_pkt->data_tx_pkt = *(mhi_tx_pkt *)removed_xfer_pkt; } else if (MHI_RING_TYPE_EVENT_RING == ring_type && mhi_dev_ctxt->counters.m0_m3 > 0 && IS_HARDWARE_CHANNEL(ring_index)) { spinlock_t *lock = NULL; unsigned long flags = 0; #if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT) || defined(CONFIG_SEC_KCCAT6_PROJECT) mhi_log(MHI_MSG_VERBOSE, "Updating ev context id %d, value 0x%llx\n", ring_index, db_value); lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value; #else mhi_log(MHI_MSG_ERROR, "Updating EV_CTXT\n"); lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value; #endif mhi_dev_ctxt->ev_counter[ring_index]++; spin_unlock_irqrestore(lock, flags); } atomic_inc(&mhi_dev_ctxt->flags.data_pending); /* Asserting Device Wake here, will imediately wake mdm */ if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) && mhi_dev_ctxt->flags.link_up) { switch (ring_type) { case MHI_RING_TYPE_CMD_RING: { struct mutex *cmd_mutex = NULL; cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; mutex_lock(cmd_mutex); mhi_dev_ctxt->cmd_ring_order = 1; MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, ring_index, db_value); mutex_unlock(cmd_mutex); break; } case MHI_RING_TYPE_EVENT_RING: { spinlock_t *lock = NULL; unsigned long flags = 0; lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; #if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT)|| defined(CONFIG_SEC_KCCAT6_PROJECT) db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp); #endif if ((mhi_dev_ctxt->ev_counter[ring_index] % MHI_EV_DB_INTERVAL) == 0) { MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, ring_index, db_value); } spin_unlock_irqrestore(lock, flags); break; } case MHI_RING_TYPE_XFER_RING: { unsigned long flags = 0; spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[ring_index], flags); mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1; MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, ring_index, db_value); spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[ring_index], flags); break; } default: mhi_log(MHI_MSG_ERROR, "Bad ring type\n"); } } atomic_dec(&mhi_dev_ctxt->flags.data_pending); return ret_val; }
/** * @brief Thread which handles inbound data for MHI clients. * This thread will invoke thecallback for the mhi clients to * inform thme of data availability. * * The thread monitors the MHI state variable to know if it should * continue processing, * or stop. * * @param ctxt void pointer to a device context */ MHI_STATUS parse_xfer_event(mhi_device_ctxt *ctxt, mhi_event_pkt *event) { mhi_device_ctxt *mhi_dev_ctxt = (mhi_device_ctxt *)ctxt; mhi_result *result; u32 chan = MHI_MAX_CHANNELS; u16 xfer_len; uintptr_t phy_ev_trb_loc; mhi_xfer_pkt *local_ev_trb_loc; mhi_client_handle *client_handle; mhi_xfer_pkt *local_trb_loc; mhi_chan_ctxt *chan_ctxt; u32 nr_trb_to_parse; u32 i = 0; switch (MHI_EV_READ_CODE(EV_TRB_CODE, event)) { case MHI_EVENT_CC_EOB: mhi_log(MHI_MSG_VERBOSE, "IEOB condition detected\n"); case MHI_EVENT_CC_OVERFLOW: mhi_log(MHI_MSG_VERBOSE, "Overflow condition detected\n"); case MHI_EVENT_CC_EOT: { void *trb_data_loc; u32 ieot_flag; MHI_STATUS ret_val; mhi_ring *local_chan_ctxt; chan = MHI_EV_READ_CHID(EV_CHID, event); local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event); if (unlikely(!VALID_CHAN_NR(chan))) { mhi_log(MHI_MSG_ERROR, "Bad ring id.\n"); break; } chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; ret_val = validate_xfer_el_addr(chan_ctxt, phy_ev_trb_loc); if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { mhi_log(MHI_MSG_ERROR, "Bad event trb ptr.\n"); break; } /* Get the TRB this event points to*/ local_ev_trb_loc = (mhi_xfer_pkt *)mhi_p2v_addr( mhi_dev_ctxt->mhi_ctrl_seg_info, phy_ev_trb_loc); local_trb_loc = (mhi_xfer_pkt *)local_chan_ctxt->rp; ret_val = get_nr_enclosed_el(local_chan_ctxt, local_trb_loc, local_ev_trb_loc, &nr_trb_to_parse); if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { mhi_log(MHI_MSG_CRITICAL, "Failed to get nr available trbs ret: %d.\n", ret_val); return MHI_STATUS_ERROR; } do { u64 phy_buf_loc; MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag); phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr; trb_data_loc = (void *)(uintptr_t)phy_buf_loc; if (chan % 2) xfer_len = MHI_EV_READ_LEN(EV_LEN, event); else xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN, local_trb_loc); if (!VALID_BUF(trb_data_loc, xfer_len)) { mhi_log(MHI_MSG_CRITICAL, "Bad buffer ptr: %p.\n", trb_data_loc); return MHI_STATUS_ERROR; } client_handle = mhi_dev_ctxt->client_handle_list[chan]; if (NULL != client_handle) { client_handle->pkt_count++; result = &client_handle->result; result->payload_buf = trb_data_loc; result->bytes_xferd = xfer_len; result->user_data = client_handle->user_data; } if (chan % 2) { parse_inbound(mhi_dev_ctxt, chan, local_ev_trb_loc, xfer_len); } else { parse_outbound(mhi_dev_ctxt, chan, local_ev_trb_loc, xfer_len); } mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++; if (local_trb_loc == (mhi_xfer_pkt *)local_chan_ctxt->rp) { mhi_log(MHI_MSG_CRITICAL, "Done. Processed until: %p.\n", trb_data_loc); break; } else { local_trb_loc = (mhi_xfer_pkt *)local_chan_ctxt->rp; } i++; } while (i <= nr_trb_to_parse); break; } /* CC_EOT */ case MHI_EVENT_CC_OOB: case MHI_EVENT_CC_DB_MODE: { mhi_ring *chan_ctxt = NULL; u64 db_value = 0; mhi_dev_ctxt->uldl_enabled = 1; chan = MHI_EV_READ_CHID(EV_CHID, event); mhi_dev_ctxt->db_mode[chan] = 1; chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; mhi_log(MHI_MSG_INFO, "OOB Detected chan %d.\n", chan); if (chan_ctxt->wp != chan_ctxt->rp) { db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)chan_ctxt->wp); MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value); } client_handle = mhi_dev_ctxt->client_handle_list[chan]; if (NULL != client_handle) { result->transaction_status = MHI_STATUS_DEVICE_NOT_READY; } break; } default: { mhi_log(MHI_MSG_ERROR, "Unknown TX completion.\n"); break; } } /*switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ return 0; }
/** * @brief Function used to send a command TRE to the mhi device. * * @param device [IN ] Specify the mhi dev context to which to send the command * @param cmd [IN ] Enum specifying which command to send to device * @param chan [in ] Channel number for which this command is intended, * not applicable for all commands * * @return MHI_STATUS */ MHI_STATUS mhi_send_cmd(mhi_device_ctxt *mhi_dev_ctxt, MHI_COMMAND cmd, u32 chan) { u64 db_value = 0; mhi_cmd_pkt *cmd_pkt = NULL; MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED; MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED; MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD; struct mutex *cmd_mutex = NULL; struct mutex *chan_mutex = NULL; if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR || NULL == mhi_dev_ctxt) { mhi_log(MHI_MSG_ERROR, "Invalid channel id, received id: 0x%x", chan); goto error_general; } mhi_assert_device_wake(mhi_dev_ctxt); /*If there is a cmd pending a device confirmation, do not send anymore for this channel */ if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) return MHI_STATUS_CMD_PENDING; from_state = mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state; switch (cmd) { case MHI_COMMAND_NOOP: { ring_el_type = MHI_PKT_TYPE_NOOP_CMD; break; } case MHI_COMMAND_RESET_CHAN: { to_state = MHI_CHAN_STATE_DISABLED; ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD; break; } case MHI_COMMAND_START_CHAN: { switch (from_state) { case MHI_CHAN_STATE_ENABLED: case MHI_CHAN_STATE_STOP: to_state = MHI_CHAN_STATE_RUNNING; break; default: mhi_log(MHI_MSG_ERROR, "Invalid state transition for " "cmd 0x%x, from_state 0x%x\n", cmd, from_state); goto error_general; } ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD; break; } case MHI_COMMAND_STOP_CHAN: { switch (from_state) { case MHI_CHAN_STATE_RUNNING: case MHI_CHAN_STATE_SUSPENDED: to_state = MHI_CHAN_STATE_STOP; break; default: mhi_log(MHI_MSG_ERROR, "Invalid state transition for " "cmd 0x%x, from_state 0x%x\n", cmd, from_state); goto error_general; } ring_el_type = MHI_PKT_TYPE_STOP_CHAN_CMD; break; } default: mhi_log(MHI_MSG_ERROR, "Bad command received\n"); } cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; mutex_lock(cmd_mutex); if (MHI_STATUS_SUCCESS != ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, (void *)&cmd_pkt)) { mhi_log(MHI_MSG_ERROR, "Failed to insert element\n"); goto error_general; } chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; if (MHI_COMMAND_NOOP != cmd) { mutex_lock(chan_mutex); MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type); MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan); mutex_unlock(chan_mutex); } db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt->wp); mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING; if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) { mhi_dev_ctxt->cmd_ring_order++; MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0, db_value); } mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan 0x%x\n", cmd, chan); mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); return MHI_STATUS_SUCCESS; error_general: mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); return MHI_STATUS_ERROR; }