/**
 * @brief Reset for a single MHI channel
 *
 * @param device [IN ] context
 * @param chan_id [IN ] channel id of the channel to reset
 *
 * @return MHI_STATUS
 */
MHI_STATUS mhi_reset_channel(mhi_client_handle *client_handle)
{
	MHI_STATUS ret_val;
	mhi_chan_ctxt *cur_ctxt = NULL;
	mhi_device_ctxt *mhi_dev_ctxt = NULL;
	u32 chan_id = 0;
	mhi_ring *cur_ring = NULL;

	chan_id = client_handle->chan;
	mhi_dev_ctxt = client_handle->mhi_dev_ctxt;

	if (chan_id > (MHI_MAX_CHANNELS - 1) || NULL == mhi_dev_ctxt) {
		mhi_log(MHI_MSG_ERROR, "Bad input parameters\n");
		return MHI_STATUS_ERROR;
	}

	mutex_lock(&mhi_dev_ctxt->mhi_chan_mutex[chan_id]);

	/* We need to reset the channel completley, we will assume that our
	 * base is correct*/
	cur_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan_id];
	cur_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[chan_id];
	memset(cur_ring->base, 0, sizeof(char)*cur_ring->len);

	if (IS_HARDWARE_CHANNEL(chan_id)) {
		ret_val = mhi_init_chan_ctxt(cur_ctxt,
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					     (uintptr_t)cur_ring->base),
					     (uintptr_t)cur_ring->base,
					     MAX_NR_TRBS_PER_HARD_CHAN,
					     (chan_id % 2) ? MHI_IN : MHI_OUT,
			      (chan_id % 2) ? IPA_IN_EV_RING : IPA_OUT_EV_RING,
					     cur_ring);
	} else {
		ret_val = mhi_init_chan_ctxt(cur_ctxt,
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					     (uintptr_t)cur_ring->base),
					     (uintptr_t)cur_ring->base,
					     MAX_NR_TRBS_PER_SOFT_CHAN,
					     (chan_id % 2) ? MHI_IN : MHI_OUT,
					     SOFTWARE_EV_RING,
					     cur_ring);
	}

	if (MHI_STATUS_SUCCESS != ret_val)
		mhi_log(MHI_MSG_ERROR, "Failed to reset chan ctxt\n");


	mutex_unlock(&mhi_dev_ctxt->mhi_chan_mutex[chan_id]);
	return ret_val;
}
void ring_all_chan_dbs(mhi_device_ctxt *mhi_dev_ctxt)
{
	u32 i = 0;
	mhi_ring *local_ctxt = NULL;
	mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		if (VALID_CHAN_NR(i)) {
			local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
			if (IS_HARDWARE_CHANNEL(i))
				mhi_dev_ctxt->db_mode[i] = 1;
			if ( (local_ctxt->wp != local_ctxt->rp) || (i % 2))
				conditional_chan_db_write(mhi_dev_ctxt, i);
		}
}
Exemple #3
0
MHI_STATUS mhi_notify_device(mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
	unsigned long flags = 0;
	u64 db_value;
	mhi_chan_ctxt *chan_ctxt;
	chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
	spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
	if (likely(((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) ||
		(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state)) &&
		(chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR) &&
		!mhi_dev_ctxt->flags.pending_M3)) {

		mhi_dev_ctxt->mhi_chan_db_order[chan]++;
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
		if (IS_HARDWARE_CHANNEL(chan) && (chan % 2)) {
			if ((mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd %
						MHI_XFER_DB_INTERVAL) == 0) {
				MHI_WRITE_DB(mhi_dev_ctxt,
						mhi_dev_ctxt->channel_db_addr,
						chan, db_value);
			}
		} else {
			MHI_WRITE_DB(mhi_dev_ctxt,
			     mhi_dev_ctxt->channel_db_addr,
			     chan, db_value);
		}
	} else {
		mhi_log(MHI_MSG_VERBOSE,
			"Triggering wakeup due to pending data MHI state %d, Chan state %d, Pending M3 %d\n",
			mhi_dev_ctxt->mhi_state, chan_ctxt->mhi_chan_state, mhi_dev_ctxt->flags.pending_M3);
		if (mhi_dev_ctxt->flags.pending_M3 ||
		    mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
			mhi_wake_dev_from_m3(mhi_dev_ctxt);
		}
	}
	spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
	/* If there are no clients still sending we can trigger our
	 * inactivity timer */
	return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	u32 i = 0;
	struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;
	struct mhi_event_ctxt *event_ctxt = NULL;
	u32 event_ring_index = 0;
	union mhi_xfer_pkt *trb_list = NULL;
	struct mhi_chan_ctxt *chan_ctxt = NULL;
	struct mhi_ring *local_event_ctxt = NULL;
	u32 msi_vec = 0;
	u32 intmod_t = 0;
	uintptr_t ev_ring_addr;

	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
		MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
					mhi_dev_ctxt->ev_ring_props[i],
					msi_vec);
		switch (i) {
		case IPA_OUT_EV_RING:
			intmod_t = 10;
			break;
		case IPA_IN_EV_RING:
			intmod_t = 6;
			break;
		}
		event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i];
		event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index];
		local_event_ctxt =
			&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];

		ev_ring_addr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)mhi_ctrl->ev_trb_list[i]);
		mhi_log(MHI_MSG_VERBOSE,
			"Setting msi_vec 0x%x, for ev ring ctxt 0x%x\n",
			msi_vec, event_ring_index);
		mhi_event_ring_init(event_ctxt, ev_ring_addr,
				(uintptr_t)mhi_ctrl->ev_trb_list[i],
				EV_EL_PER_RING, local_event_ctxt,
				intmod_t, msi_vec);
	}

	/* Init Command Ring */
	mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING],
			mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING]),
			(uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING],
			CMD_EL_PER_RING,
			&mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]);

	mhi_log(MHI_MSG_INFO, "Initializing contexts\n");
	/* Initialize Channel Contexts */
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		trb_list = mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i];
		chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i];
		if (IS_SOFTWARE_CHANNEL(i)) {
			mhi_init_chan_ctxt(chan_ctxt,
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)trb_list),
				(uintptr_t)trb_list,
				MAX_NR_TRBS_PER_SOFT_CHAN,
				(i % 2) ? MHI_IN : MHI_OUT,
				0,
				&mhi_dev_ctxt->mhi_local_chan_ctxt[i]);
		} else if (IS_HARDWARE_CHANNEL(i)) {
			mhi_init_chan_ctxt(chan_ctxt,
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)trb_list),
				(uintptr_t)trb_list,
				MAX_NR_TRBS_PER_HARD_CHAN,
				(i % 2) ? MHI_IN : MHI_OUT,
				i,
				&mhi_dev_ctxt->mhi_local_chan_ctxt[i]);
		}
	}
	mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;

	return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
								*mhi_dev_ctxt)
{
	size_t ctrl_seg_size = 0;
	size_t ctrl_seg_offset = 0;
	u32 i = 0;
	u32 align_len = sizeof(u64)*2;
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;

	mhi_dev_ctxt->enable_lpm = 1;
	if (NULL == mhi_dev_ctxt || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info ||
			NULL == mhi_dev_ctxt->mhi_ctrl_seg_info->dev)
		return MHI_STATUS_ERROR;

	mhi_log(MHI_MSG_INFO, "Allocating control segment.\n");
	ctrl_seg_size += sizeof(struct mhi_control_seg);
	/* Calculate the size of the control segment needed */
	ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (IS_HARDWARE_CHANNEL(i))
			ctrl_seg_size += sizeof(union mhi_xfer_pkt) *
				(MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP);
		else if (IS_SOFTWARE_CHANNEL(i))
			ctrl_seg_size += sizeof(union mhi_xfer_pkt) *
				(MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP);
	}
	ctrl_seg_size += align_len - (ctrl_seg_size % align_len);

	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i)
		ctrl_seg_size += sizeof(union mhi_event_pkt)*
					(EV_EL_PER_RING + ELEMENT_GAP);

	ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
	ret_val = mhi_mallocmemregion(mhi_dev_ctxt->mhi_ctrl_seg_info,
							ctrl_seg_size);
	if (MHI_STATUS_SUCCESS != ret_val)
		return MHI_STATUS_ERROR;
	(mhi_dev_ctxt->mhi_ctrl_seg =
			mhi_get_virt_addr(mhi_dev_ctxt->mhi_ctrl_seg_info));

	if (0 == mhi_dev_ctxt->mhi_ctrl_seg)
		return MHI_STATUS_ALLOC_ERROR;

	/* Set the channel contexts, event contexts and cmd context */
	ctrl_seg_offset = (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg +
						sizeof(struct mhi_control_seg);
	ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
	/* Set the TRB lists */
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (IS_HARDWARE_CHANNEL(i)) {
			mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] =
				(union mhi_xfer_pkt *)ctrl_seg_offset;
			ctrl_seg_offset += sizeof(union mhi_xfer_pkt) *
				(MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP);

		} else if (IS_SOFTWARE_CHANNEL(i)) {
			mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] =
				(union mhi_xfer_pkt *)ctrl_seg_offset;
			ctrl_seg_offset += sizeof(union mhi_xfer_pkt) *
				(MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP);
		}
	}

	ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
		mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] =
			(union mhi_event_pkt *)ctrl_seg_offset;
		ctrl_seg_offset += sizeof(union mhi_event_pkt) *
			(EV_EL_PER_RING + ELEMENT_GAP);
	}
	return MHI_STATUS_SUCCESS;
}
Exemple #6
0
MHI_STATUS recycle_trb_and_ring(mhi_device_ctxt *mhi_dev_ctxt,
		mhi_ring *ring,
		MHI_RING_TYPE ring_type,
		u32 ring_index)
{
	MHI_STATUS ret_val = MHI_STATUS_ERROR;
	u64 db_value = 0;
	void *removed_element = NULL;
	void *added_element = NULL;

	/* TODO This will not cover us for ring_index out of
	 * bounds for cmd or event channels */
	if (NULL == mhi_dev_ctxt || NULL == ring ||
		ring_type > (MHI_RING_TYPE_MAX - 1) ||
		ring_index > (MHI_MAX_CHANNELS - 1)) {

		mhi_log(MHI_MSG_ERROR, "Bad input params\n");
		return ret_val;
	}
	ret_val = ctxt_del_element(ring, &removed_element);
	if (MHI_STATUS_SUCCESS != ret_val) {
		mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n");
		return MHI_STATUS_ERROR;
	}
	ret_val = ctxt_add_element(ring, &added_element);
	if (MHI_STATUS_SUCCESS != ret_val)
		mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
				(uintptr_t)ring->wp);
	if (MHI_STATUS_SUCCESS != ret_val)
		return ret_val;
	if (MHI_RING_TYPE_XFER_RING == ring_type) {
		mhi_xfer_pkt *removed_xfer_pkt =
			(mhi_xfer_pkt *)removed_element;
		mhi_xfer_pkt *added_xfer_pkt =
			(mhi_xfer_pkt *)added_element;
		added_xfer_pkt->data_tx_pkt =
				*(mhi_tx_pkt *)removed_xfer_pkt;
	} else if (MHI_RING_TYPE_EVENT_RING == ring_type &&
		   mhi_dev_ctxt->counters.m0_m3 > 0 &&
		   IS_HARDWARE_CHANNEL(ring_index)) {
		spinlock_t *lock = NULL;
		unsigned long flags = 0;
#if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT) || defined(CONFIG_SEC_KCCAT6_PROJECT)
		mhi_log(MHI_MSG_VERBOSE, "Updating ev context id %d, value 0x%llx\n",
				ring_index, db_value);
		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
		spin_lock_irqsave(lock, flags);
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)ring->wp);
		mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value;
#else
		mhi_log(MHI_MSG_ERROR, "Updating EV_CTXT\n");
		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
		spin_lock_irqsave(lock, flags);
		mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ring_index].mhi_event_write_ptr = db_value;

#endif
		mhi_dev_ctxt->ev_counter[ring_index]++;
		spin_unlock_irqrestore(lock, flags);
	}
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	/* Asserting Device Wake here, will imediately wake mdm */
	if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
	     MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) &&
	     mhi_dev_ctxt->flags.link_up) {
		switch (ring_type) {
		case MHI_RING_TYPE_CMD_RING:
		{
			struct mutex *cmd_mutex = NULL;
			cmd_mutex =
				&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
			mutex_lock(cmd_mutex);
			mhi_dev_ctxt->cmd_ring_order = 1;
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr,
					ring_index, db_value);
			mutex_unlock(cmd_mutex);
			break;
		}
		case MHI_RING_TYPE_EVENT_RING:
		{
			spinlock_t *lock = NULL;
			unsigned long flags = 0;
			lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
			spin_lock_irqsave(lock, flags);
			mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
#if defined(CONFIG_MACH_LENTISLTE_SKT) || defined(CONFIG_MACH_LENTISLTE_LGT) || defined(CONFIG_MACH_LENTISLTE_KTT)|| defined(CONFIG_SEC_KCCAT6_PROJECT)
			db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)ring->wp);
#endif
			if ((mhi_dev_ctxt->ev_counter[ring_index] %
						MHI_EV_DB_INTERVAL) == 0) {
				MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr,
						ring_index, db_value);
			}
			spin_unlock_irqrestore(lock, flags);
			break;
		}
		case MHI_RING_TYPE_XFER_RING:
		{
			unsigned long flags = 0;
			spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[ring_index], flags);
			mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr,
					ring_index, db_value);
			spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[ring_index],
						flags);
			break;
		}
		default:
			mhi_log(MHI_MSG_ERROR, "Bad ring type\n");
		}
	}
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return ret_val;
}