/** * mhi_init_state_transition - Add a new state transition work item to * the state transition thread work item list. * * @mhi_dev_ctxt The mhi_dev_ctxt context * @new_state The state we wish to transition to * */ enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION new_state) { unsigned long flags = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; enum STATE_TRANSITION *cur_work_item = NULL; s32 nr_avail_work_items = 0; struct mhi_ring *stt_ring = &mhi_dev_ctxt->state_change_work_item_list.q_info; struct mhi_state_work_queue *work_q = &mhi_dev_ctxt->state_change_work_item_list; spin_lock_irqsave(work_q->q_lock, flags); nr_avail_work_items = get_nr_avail_ring_elements(stt_ring); if (0 >= nr_avail_work_items) { mhi_log(MHI_MSG_CRITICAL, "No Room left on STT work queue\n"); return MHI_STATUS_ERROR; } mhi_log(MHI_MSG_VERBOSE, "Processing state transition %x\n", new_state); *(enum STATE_TRANSITION *)stt_ring->wp = new_state; ret_val = ctxt_add_element(stt_ring, (void **)&cur_work_item); wmb(); MHI_ASSERT(MHI_STATUS_SUCCESS == ret_val, "Failed to add selement to STT workqueue\n"); spin_unlock_irqrestore(work_q->q_lock, flags); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event); return ret_val; }
MHI_STATUS mhi_client_recycle_trb(mhi_client_handle *client_handle) { unsigned long flags; u32 chan = client_handle->chan; MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt; struct mutex *chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; mhi_ring *local_ctxt = NULL; u64 db_value; local_ctxt = &client_handle->mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; mutex_lock(chan_mutex); MHI_TX_TRB_SET_LEN(TX_TRB_LEN, (mhi_xfer_pkt *)local_ctxt->ack_rp, TRB_MAX_DATA_SIZE); *(mhi_xfer_pkt *)local_ctxt->wp = *(mhi_xfer_pkt *)local_ctxt->ack_rp; ret_val = delete_element(local_ctxt, &local_ctxt->ack_rp, &local_ctxt->rp, NULL); ret_val = ctxt_add_element(local_ctxt, NULL); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)local_ctxt->wp); read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); if (mhi_dev_ctxt->flags.link_up) { if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) { mhi_assert_device_wake(mhi_dev_ctxt); MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value); } else if (mhi_dev_ctxt->flags.pending_M3 || mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_wake_dev_from_m3(mhi_dev_ctxt); } } atomic_dec(&mhi_dev_ctxt->flags.data_pending); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++; mutex_unlock(chan_mutex); return ret_val; }
enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt, u32 nr_ev_el, u32 event_ring_index) { union mhi_event_pkt *ev_pkt = NULL; u32 i = 0; unsigned long flags = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; spinlock_t *lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index]; struct mhi_ring *event_ctxt = NULL; event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) { mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n"); return MHI_STATUS_ERROR; } spin_lock_irqsave(lock, flags); mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n", mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_len); mhi_log(MHI_MSG_INFO, "Initializing event ring %d\n", event_ring_index); for (i = 0; i < nr_ev_el - 1; ++i) { ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_ERROR, "Failed to insert el in ev ctxt\n"); ret_val = MHI_STATUS_ERROR; break; } } spin_unlock_irqrestore(lock, flags); return ret_val; }
MHI_STATUS recycle_trb_and_ring(mhi_device_ctxt *mhi_dev_ctxt, mhi_ring *ring, MHI_RING_TYPE ring_type, u32 ring_index) { MHI_STATUS ret_val = MHI_STATUS_ERROR; u64 db_value = 0; void *removed_element = NULL; void *added_element = NULL; /* TODO This will not cover us for ring_index out of * bounds for cmd or event channels */ if (NULL == mhi_dev_ctxt || NULL == ring || ring_type > (MHI_RING_TYPE_MAX - 1) || ring_index > (MHI_MAX_CHANNELS - 1)) { mhi_log(MHI_MSG_ERROR, "Bad input params\n"); return ret_val; } ret_val = ctxt_del_element(ring, &removed_element); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n"); return MHI_STATUS_ERROR; } ret_val = ctxt_add_element(ring, &added_element); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n"); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp); if (MHI_STATUS_SUCCESS != ret_val) return ret_val; if (MHI_RING_TYPE_XFER_RING == ring_type) { mhi_xfer_pkt *removed_xfer_pkt = (mhi_xfer_pkt *)removed_element; mhi_xfer_pkt *added_xfer_pkt = (mhi_xfer_pkt *)added_element; added_xfer_pkt->data_tx_pkt = *(mhi_tx_pkt *)removed_xfer_pkt; } else if (MHI_RING_TYPE_EVENT_RING == ring_type && mhi_dev_ctxt->counters.m0_m3 > 0 && IS_HARDWARE_CHANNEL(ring_index)) { spinlock_t *lock = NULL; unsigned long flags = 0; #if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT) || defined(CONFIG_SEC_KCCAT6_PROJECT) mhi_log(MHI_MSG_VERBOSE, "Updating ev context id %d, value 0x%llx\n", ring_index, db_value); lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value; #else mhi_log(MHI_MSG_ERROR, "Updating EV_CTXT\n"); lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value; #endif mhi_dev_ctxt->ev_counter[ring_index]++; spin_unlock_irqrestore(lock, flags); } atomic_inc(&mhi_dev_ctxt->flags.data_pending); /* Asserting Device Wake here, will imediately wake mdm */ if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) && mhi_dev_ctxt->flags.link_up) { switch (ring_type) { case MHI_RING_TYPE_CMD_RING: { struct mutex *cmd_mutex = NULL; cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; mutex_lock(cmd_mutex); mhi_dev_ctxt->cmd_ring_order = 1; MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, ring_index, db_value); mutex_unlock(cmd_mutex); break; } case MHI_RING_TYPE_EVENT_RING: { spinlock_t *lock = NULL; unsigned long flags = 0; lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; #if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT)|| defined(CONFIG_SEC_KCCAT6_PROJECT) db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp); #endif if ((mhi_dev_ctxt->ev_counter[ring_index] % MHI_EV_DB_INTERVAL) == 0) { MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, ring_index, db_value); } spin_unlock_irqrestore(lock, flags); break; } case MHI_RING_TYPE_XFER_RING: { unsigned long flags = 0; spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[ring_index], flags); mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1; MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, ring_index, db_value); spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[ring_index], flags); break; } default: mhi_log(MHI_MSG_ERROR, "Bad ring type\n"); } } atomic_dec(&mhi_dev_ctxt->flags.data_pending); return ret_val; }
/** * @brief Function used to send a command TRE to the mhi device. * * @param device [IN ] Specify the mhi dev context to which to send the command * @param cmd [IN ] Enum specifying which command to send to device * @param chan [in ] Channel number for which this command is intended, * not applicable for all commands * * @return MHI_STATUS */ MHI_STATUS mhi_send_cmd(mhi_device_ctxt *mhi_dev_ctxt, MHI_COMMAND cmd, u32 chan) { u64 db_value = 0; mhi_cmd_pkt *cmd_pkt = NULL; MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED; MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED; MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD; struct mutex *cmd_mutex = NULL; struct mutex *chan_mutex = NULL; if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR || NULL == mhi_dev_ctxt) { mhi_log(MHI_MSG_ERROR, "Invalid channel id, received id: 0x%x", chan); goto error_general; } mhi_assert_device_wake(mhi_dev_ctxt); /*If there is a cmd pending a device confirmation, do not send anymore for this channel */ if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) return MHI_STATUS_CMD_PENDING; from_state = mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state; switch (cmd) { case MHI_COMMAND_NOOP: { ring_el_type = MHI_PKT_TYPE_NOOP_CMD; break; } case MHI_COMMAND_RESET_CHAN: { to_state = MHI_CHAN_STATE_DISABLED; ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD; break; } case MHI_COMMAND_START_CHAN: { switch (from_state) { case MHI_CHAN_STATE_ENABLED: case MHI_CHAN_STATE_STOP: to_state = MHI_CHAN_STATE_RUNNING; break; default: mhi_log(MHI_MSG_ERROR, "Invalid state transition for " "cmd 0x%x, from_state 0x%x\n", cmd, from_state); goto error_general; } ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD; break; } case MHI_COMMAND_STOP_CHAN: { switch (from_state) { case MHI_CHAN_STATE_RUNNING: case MHI_CHAN_STATE_SUSPENDED: to_state = MHI_CHAN_STATE_STOP; break; default: mhi_log(MHI_MSG_ERROR, "Invalid state transition for " "cmd 0x%x, from_state 0x%x\n", cmd, from_state); goto error_general; } ring_el_type = MHI_PKT_TYPE_STOP_CHAN_CMD; break; } default: mhi_log(MHI_MSG_ERROR, "Bad command received\n"); } cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; mutex_lock(cmd_mutex); if (MHI_STATUS_SUCCESS != ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, (void *)&cmd_pkt)) { mhi_log(MHI_MSG_ERROR, "Failed to insert element\n"); goto error_general; } chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; if (MHI_COMMAND_NOOP != cmd) { mutex_lock(chan_mutex); MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type); MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan); mutex_unlock(chan_mutex); } db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt->wp); mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING; if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) { mhi_dev_ctxt->cmd_ring_order++; MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0, db_value); } mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan 0x%x\n", cmd, chan); mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); return MHI_STATUS_SUCCESS; error_general: mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); return MHI_STATUS_ERROR; }
/** * @brief Function for sending data on an outbound channel. * This function only sends on TRE's worth of * data and may chain the TRE as specified by the caller. * * @param device [IN ] Pointer to mhi context used to send the TRE * @param chan [IN ] Channel number to send the TRE on * @param buf [IN ] Physical address of buffer to be linked to descriptor * @param buf_len [IN ] Length of buffer, which will be populated in the TRE * @param chain [IN ] Specification on whether this TRE should be chained * * @return MHI_STATUS */ MHI_STATUS mhi_queue_xfer(mhi_client_handle *client_handle, uintptr_t buf, size_t buf_len, u32 chain, u32 eob) { mhi_xfer_pkt *pkt_loc; MHI_STATUS ret_val; MHI_CLIENT_CHANNEL chan; mhi_device_ctxt *mhi_dev_ctxt; unsigned long flags; if (NULL == client_handle || !VALID_CHAN_NR(client_handle->chan) || 0 == buf || chain >= MHI_TRE_CHAIN_LIMIT || 0 == buf_len) { mhi_log(MHI_MSG_CRITICAL, "Bad input args\n"); return MHI_STATUS_ERROR; } MHI_ASSERT(VALID_BUF(buf, buf_len), "Client buffer is of invalid length\n"); mhi_dev_ctxt = client_handle->mhi_dev_ctxt; chan = client_handle->chan; /* Bump up the vote for pending data */ read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_dev_ctxt->counters.m1_m0++; if (mhi_dev_ctxt->flags.link_up) mhi_assert_device_wake(mhi_dev_ctxt); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp; pkt_loc->data_tx_pkt.buffer_ptr = buf; if (likely(0 != client_handle->intmod_t)) MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 1); else MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 0); MHI_TRB_SET_INFO(TX_TRB_IEOT, pkt_loc, 1); MHI_TRB_SET_INFO(TX_TRB_CHAIN, pkt_loc, chain); MHI_TRB_SET_INFO(TX_TRB_IEOB, pkt_loc, eob); MHI_TRB_SET_INFO(TX_TRB_TYPE, pkt_loc, MHI_PKT_TYPE_TRANSFER); MHI_TX_TRB_SET_LEN(TX_TRB_LEN, pkt_loc, buf_len); if (chan % 2 == 0) { atomic_inc(&mhi_dev_ctxt->counters.outbound_acks); mhi_log(MHI_MSG_VERBOSE, "Queued outbound pkt. Pending Acks %d\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); } /* Add the TRB to the correct transfer ring */ ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], (void *)&pkt_loc); if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { mhi_log(MHI_MSG_INFO, "Failed to insert trb in xfer ring\n"); goto error; } mhi_notify_device(mhi_dev_ctxt, chan); atomic_dec(&mhi_dev_ctxt->flags.data_pending); return MHI_STATUS_SUCCESS; error: atomic_dec(&mhi_dev_ctxt->flags.data_pending); return ret_val; }