Ejemplo n.º 1
0
void ring_all_ev_dbs(mhi_device_ctxt *mhi_dev_ctxt)
{
	u32 i;
	u64 db_value = 0;
	u32 event_ring_index;
	mhi_event_ctxt *event_ctxt = NULL;
	mhi_control_seg *mhi_ctrl = NULL;
	spinlock_t *lock = NULL;
	unsigned long flags;
	mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;


	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
		event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i];
		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index];
		mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0;


		spin_lock_irqsave(lock, flags);
		event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index];
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp);

		if (0 == mhi_dev_ctxt->mhi_ev_db_order[event_ring_index]) {
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr,
				event_ring_index, db_value);
		}
		mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0;
		spin_unlock_irqrestore(lock, flags);
	}
}
Ejemplo n.º 2
0
void ring_ev_db(mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
	mhi_ring *event_ctxt = NULL;
	u64 db_value = 0;
	event_ctxt =
		&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)event_ctxt->wp);
	MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr,
				event_ring_index, db_value);
}
Ejemplo n.º 3
0
MHI_STATUS mhi_notify_device(mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
	unsigned long flags = 0;
	u64 db_value;
	mhi_chan_ctxt *chan_ctxt;
	chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
	spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
	if (likely(((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) ||
		(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state)) &&
		(chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR) &&
		!mhi_dev_ctxt->flags.pending_M3)) {

		mhi_dev_ctxt->mhi_chan_db_order[chan]++;
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
		if (IS_HARDWARE_CHANNEL(chan) && (chan % 2)) {
			if ((mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd %
						MHI_XFER_DB_INTERVAL) == 0) {
				MHI_WRITE_DB(mhi_dev_ctxt,
						mhi_dev_ctxt->channel_db_addr,
						chan, db_value);
			}
		} else {
			MHI_WRITE_DB(mhi_dev_ctxt,
			     mhi_dev_ctxt->channel_db_addr,
			     chan, db_value);
		}
	} else {
		mhi_log(MHI_MSG_VERBOSE,
			"Triggering wakeup due to pending data MHI state %d, Chan state %d, Pending M3 %d\n",
			mhi_dev_ctxt->mhi_state, chan_ctxt->mhi_chan_state, mhi_dev_ctxt->flags.pending_M3);
		if (mhi_dev_ctxt->flags.pending_M3 ||
		    mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
			mhi_wake_dev_from_m3(mhi_dev_ctxt);
		}
	}
	spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
	/* If there are no clients still sending we can trigger our
	 * inactivity timer */
	return MHI_STATUS_SUCCESS;
}
Ejemplo n.º 4
0
void conditional_chan_db_write(mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
	u64 db_value;
	unsigned long flags;
	mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
	spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
	if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
		MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr,
				chan, db_value);
	}
	mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
	spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
}
Ejemplo n.º 5
0
void ring_all_cmd_dbs(mhi_device_ctxt *mhi_dev_ctxt)
{
	struct mutex *cmd_mutex = NULL;
	u64 db_value;
	u64 rp = 0;
	mhi_ring *local_ctxt = NULL;
	mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
	cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
	/* Write the cmd ring */
	mhi_dev_ctxt->cmd_ring_order = 0;
	mutex_lock(cmd_mutex);
	local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
	rp = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)local_ctxt->rp);
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
	if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
		MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0, db_value);
	mhi_dev_ctxt->cmd_ring_order = 0;
	mutex_unlock(cmd_mutex);
}
Ejemplo n.º 6
0
MHI_STATUS mhi_client_recycle_trb(mhi_client_handle *client_handle)
{
	unsigned long flags;
	u32 chan = client_handle->chan;
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	struct mutex *chan_mutex  = &mhi_dev_ctxt->mhi_chan_mutex[chan];
	mhi_ring *local_ctxt = NULL;
	u64 db_value;
	local_ctxt = &client_handle->mhi_dev_ctxt->mhi_local_chan_ctxt[chan];

	mutex_lock(chan_mutex);
	MHI_TX_TRB_SET_LEN(TX_TRB_LEN,
				(mhi_xfer_pkt *)local_ctxt->ack_rp,
				TRB_MAX_DATA_SIZE);

	*(mhi_xfer_pkt *)local_ctxt->wp =
			*(mhi_xfer_pkt *)local_ctxt->ack_rp;
	ret_val = delete_element(local_ctxt, &local_ctxt->ack_rp,
				&local_ctxt->rp, NULL);
	ret_val = ctxt_add_element(local_ctxt, NULL);
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)local_ctxt->wp);
	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	if (mhi_dev_ctxt->flags.link_up) {
		if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
		    MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) {
			mhi_assert_device_wake(mhi_dev_ctxt);
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value);
		} else if (mhi_dev_ctxt->flags.pending_M3 ||
			   mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
				mhi_wake_dev_from_m3(mhi_dev_ctxt);
		}
	}
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++;
	mutex_unlock(chan_mutex);
	return ret_val;
}
Ejemplo n.º 7
0
MHI_STATUS recycle_trb_and_ring(mhi_device_ctxt *mhi_dev_ctxt,
		mhi_ring *ring,
		MHI_RING_TYPE ring_type,
		u32 ring_index)
{
	MHI_STATUS ret_val = MHI_STATUS_ERROR;
	u64 db_value = 0;
	void *removed_element = NULL;
	void *added_element = NULL;

	/* TODO This will not cover us for ring_index out of
	 * bounds for cmd or event channels */
	if (NULL == mhi_dev_ctxt || NULL == ring ||
		ring_type > (MHI_RING_TYPE_MAX - 1) ||
		ring_index > (MHI_MAX_CHANNELS - 1)) {

		mhi_log(MHI_MSG_ERROR, "Bad input params\n");
		return ret_val;
	}
	ret_val = ctxt_del_element(ring, &removed_element);
	if (MHI_STATUS_SUCCESS != ret_val) {
		mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n");
		return MHI_STATUS_ERROR;
	}
	ret_val = ctxt_add_element(ring, &added_element);
	if (MHI_STATUS_SUCCESS != ret_val)
		mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
				(uintptr_t)ring->wp);
	if (MHI_STATUS_SUCCESS != ret_val)
		return ret_val;
	if (MHI_RING_TYPE_XFER_RING == ring_type) {
		mhi_xfer_pkt *removed_xfer_pkt =
			(mhi_xfer_pkt *)removed_element;
		mhi_xfer_pkt *added_xfer_pkt =
			(mhi_xfer_pkt *)added_element;
		added_xfer_pkt->data_tx_pkt =
				*(mhi_tx_pkt *)removed_xfer_pkt;
	} else if (MHI_RING_TYPE_EVENT_RING == ring_type &&
		   mhi_dev_ctxt->counters.m0_m3 > 0 &&
		   IS_HARDWARE_CHANNEL(ring_index)) {
		spinlock_t *lock = NULL;
		unsigned long flags = 0;
#if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT) || defined(CONFIG_SEC_KCCAT6_PROJECT)
		mhi_log(MHI_MSG_VERBOSE, "Updating ev context id %d, value 0x%llx\n",
				ring_index, db_value);
		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
		spin_lock_irqsave(lock, flags);
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp);
		mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value;
#else
		mhi_log(MHI_MSG_ERROR, "Updating EV_CTXT\n");
		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
		spin_lock_irqsave(lock, flags);
		mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value;

#endif
		mhi_dev_ctxt->ev_counter[ring_index]++;
		spin_unlock_irqrestore(lock, flags);
	}
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	/* Asserting Device Wake here, will imediately wake mdm */
	if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
	     MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) &&
	     mhi_dev_ctxt->flags.link_up) {
		switch (ring_type) {
		case MHI_RING_TYPE_CMD_RING:
		{
			struct mutex *cmd_mutex = NULL;
			cmd_mutex =
				&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
			mutex_lock(cmd_mutex);
			mhi_dev_ctxt->cmd_ring_order = 1;
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr,
					ring_index, db_value);
			mutex_unlock(cmd_mutex);
			break;
		}
		case MHI_RING_TYPE_EVENT_RING:
		{
			spinlock_t *lock = NULL;
			unsigned long flags = 0;
			lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
			spin_lock_irqsave(lock, flags);
			mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
#if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT)|| defined(CONFIG_SEC_KCCAT6_PROJECT)
			db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)ring->wp);
#endif
			if ((mhi_dev_ctxt->ev_counter[ring_index] %
						MHI_EV_DB_INTERVAL) == 0) {
				MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr,
						ring_index, db_value);
			}
			spin_unlock_irqrestore(lock, flags);
			break;
		}
		case MHI_RING_TYPE_XFER_RING:
		{
			unsigned long flags = 0;
			spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[ring_index], flags);
			mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr,
					ring_index, db_value);
			spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[ring_index],
						flags);
			break;
		}
		default:
			mhi_log(MHI_MSG_ERROR, "Bad ring type\n");
		}
	}
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return ret_val;
}
Ejemplo n.º 8
0
/**
 * @brief Thread which handles inbound data for MHI clients.
 * This thread will invoke thecallback for the mhi clients to
 * inform thme of data availability.
 *
 * The thread monitors the MHI state variable to know if it should
 * continue processing, * or stop.
 *
 * @param ctxt void pointer to a device context
 */
MHI_STATUS parse_xfer_event(mhi_device_ctxt *ctxt, mhi_event_pkt *event)
{
	mhi_device_ctxt *mhi_dev_ctxt = (mhi_device_ctxt *)ctxt;
	mhi_result *result;
	u32 chan = MHI_MAX_CHANNELS;
	u16 xfer_len;
	uintptr_t phy_ev_trb_loc;
	mhi_xfer_pkt *local_ev_trb_loc;
	mhi_client_handle *client_handle;
	mhi_xfer_pkt *local_trb_loc;
	mhi_chan_ctxt *chan_ctxt;
	u32 nr_trb_to_parse;
	u32 i = 0;

	switch (MHI_EV_READ_CODE(EV_TRB_CODE, event)) {
	case MHI_EVENT_CC_EOB:
		mhi_log(MHI_MSG_VERBOSE, "IEOB condition detected\n");
	case MHI_EVENT_CC_OVERFLOW:
		mhi_log(MHI_MSG_VERBOSE, "Overflow condition detected\n");
	case MHI_EVENT_CC_EOT:
	{
		void *trb_data_loc;
		u32 ieot_flag;
		MHI_STATUS ret_val;
		mhi_ring *local_chan_ctxt;

		chan = MHI_EV_READ_CHID(EV_CHID, event);
		local_chan_ctxt =
			&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
		phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event);

		if (unlikely(!VALID_CHAN_NR(chan))) {
			mhi_log(MHI_MSG_ERROR, "Bad ring id.\n");
			break;
		}
		chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
		ret_val = validate_xfer_el_addr(chan_ctxt,
						phy_ev_trb_loc);

		if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
			mhi_log(MHI_MSG_ERROR, "Bad event trb ptr.\n");
			break;
		}

		/* Get the TRB this event points to*/
		local_ev_trb_loc =
			(mhi_xfer_pkt *)mhi_p2v_addr(
					mhi_dev_ctxt->mhi_ctrl_seg_info,
							phy_ev_trb_loc);
		local_trb_loc = (mhi_xfer_pkt *)local_chan_ctxt->rp;

		ret_val = get_nr_enclosed_el(local_chan_ctxt,
				      local_trb_loc,
				      local_ev_trb_loc,
				      &nr_trb_to_parse);
		if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to get nr available trbs ret: %d.\n",
				ret_val);
			return MHI_STATUS_ERROR;
		}
		do {
			u64 phy_buf_loc;
			MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag);
			phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr;
			trb_data_loc = (void *)(uintptr_t)phy_buf_loc;
			if (chan % 2)
				xfer_len = MHI_EV_READ_LEN(EV_LEN, event);
			else
				xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN,
							local_trb_loc);

			if (!VALID_BUF(trb_data_loc, xfer_len)) {
				mhi_log(MHI_MSG_CRITICAL,
					"Bad buffer ptr: %p.\n",
					trb_data_loc);
				return MHI_STATUS_ERROR;
			}

			client_handle = mhi_dev_ctxt->client_handle_list[chan];
			if (NULL != client_handle) {
				client_handle->pkt_count++;
				result = &client_handle->result;
				result->payload_buf = trb_data_loc;
				result->bytes_xferd = xfer_len;
				result->user_data = client_handle->user_data;
			}
			if (chan % 2) {
				parse_inbound(mhi_dev_ctxt, chan,
						local_ev_trb_loc, xfer_len);
			} else {
				parse_outbound(mhi_dev_ctxt, chan,
						local_ev_trb_loc, xfer_len);
			}
			mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++;
			if (local_trb_loc ==
				(mhi_xfer_pkt *)local_chan_ctxt->rp) {
				mhi_log(MHI_MSG_CRITICAL,
					"Done. Processed until: %p.\n",
					trb_data_loc);
				break;
			} else {
				local_trb_loc =
					(mhi_xfer_pkt *)local_chan_ctxt->rp;
			}
			i++;
		} while (i <= nr_trb_to_parse);
		break;
	} /* CC_EOT */
	case MHI_EVENT_CC_OOB:
	case MHI_EVENT_CC_DB_MODE:
	{
		mhi_ring *chan_ctxt = NULL;
		u64 db_value = 0;
		mhi_dev_ctxt->uldl_enabled = 1;
		chan = MHI_EV_READ_CHID(EV_CHID, event);
		mhi_dev_ctxt->db_mode[chan] = 1;
		chan_ctxt =
			&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
		mhi_log(MHI_MSG_INFO, "OOB Detected chan %d.\n", chan);
		if (chan_ctxt->wp != chan_ctxt->rp) {
			db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
							(uintptr_t)chan_ctxt->wp);
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan,
				db_value);
		}
		client_handle = mhi_dev_ctxt->client_handle_list[chan];
			if (NULL != client_handle) {
				result->transaction_status = MHI_STATUS_DEVICE_NOT_READY;
			}
		break;
	}
	default:
		{
			mhi_log(MHI_MSG_ERROR,
				"Unknown TX completion.\n");
			break;
		}
	} /*switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
	return 0;
}
Ejemplo n.º 9
0
/**
 * @brief Function used to send a command TRE to the mhi device.
 *
 * @param device [IN ] Specify the mhi dev context to which to send the command
 * @param cmd [IN ] Enum specifying which command to send to device
 * @param chan [in ] Channel number for which this command is intended,
 * not applicable for all commands
 *
 * @return MHI_STATUS
 */
MHI_STATUS mhi_send_cmd(mhi_device_ctxt *mhi_dev_ctxt,
			MHI_COMMAND cmd, u32 chan)
{
	u64 db_value = 0;
	mhi_cmd_pkt *cmd_pkt = NULL;
	MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED;
	MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED;
	MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD;
	struct mutex *cmd_mutex = NULL;
	struct mutex *chan_mutex = NULL;

	if (chan >= MHI_MAX_CHANNELS ||
		cmd >= MHI_COMMAND_MAX_NR || NULL == mhi_dev_ctxt) {
		mhi_log(MHI_MSG_ERROR,
			"Invalid channel id, received id: 0x%x", chan);
		goto error_general;
	}
	mhi_assert_device_wake(mhi_dev_ctxt);
	/*If there is a cmd pending a device confirmation, do not send anymore
	  for this channel */
	if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan])
		return MHI_STATUS_CMD_PENDING;

	from_state =
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state;

	switch (cmd) {
	case MHI_COMMAND_NOOP:
	{
		ring_el_type = MHI_PKT_TYPE_NOOP_CMD;
		break;
	}
	case MHI_COMMAND_RESET_CHAN:
	{
		to_state = MHI_CHAN_STATE_DISABLED;
		ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD;
		break;
	}
	case MHI_COMMAND_START_CHAN:
	{
		switch (from_state) {
		case MHI_CHAN_STATE_ENABLED:
		case MHI_CHAN_STATE_STOP:
			to_state = MHI_CHAN_STATE_RUNNING;
			break;
		default:
			mhi_log(MHI_MSG_ERROR,
				"Invalid state transition for "
				"cmd 0x%x, from_state 0x%x\n",
				cmd, from_state);
			goto error_general;
		}
		ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD;
		break;
	}
	case MHI_COMMAND_STOP_CHAN:
	{
		switch (from_state) {
		case MHI_CHAN_STATE_RUNNING:
		case MHI_CHAN_STATE_SUSPENDED:
			to_state = MHI_CHAN_STATE_STOP;
			break;
		default:
			mhi_log(MHI_MSG_ERROR,
				"Invalid state transition for "
				"cmd 0x%x, from_state 0x%x\n",
					cmd, from_state);
			goto error_general;
		}
		ring_el_type = MHI_PKT_TYPE_STOP_CHAN_CMD;
		break;
	}
	default:
		mhi_log(MHI_MSG_ERROR, "Bad command received\n");
	}

	cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
	mutex_lock(cmd_mutex);

	if (MHI_STATUS_SUCCESS !=
			ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt,
			(void *)&cmd_pkt)) {
		mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
		goto error_general;
	}
	chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
	if (MHI_COMMAND_NOOP != cmd) {
		mutex_lock(chan_mutex);
		MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type);
		MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
		mutex_unlock(chan_mutex);
	}
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt->wp);
	mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING;

	if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
		MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) {
		mhi_dev_ctxt->cmd_ring_order++;
		MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0, db_value);
	}
	mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan 0x%x\n", cmd, chan);
	mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);

	return MHI_STATUS_SUCCESS;

error_general:
	mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
	return MHI_STATUS_ERROR;
}