static enum MHI_STATUS process_bhi_transition(
			struct mhi_device_ctxt *mhi_dev_ctxt,
			enum STATE_TRANSITION cur_work_item)
{
	mhi_turn_on_pcie_link(mhi_dev_ctxt);
	mhi_log(MHI_MSG_INFO, "Entered\n");
	mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
	wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
	mhi_log(MHI_MSG_INFO, "Exited\n");
	return MHI_STATUS_SUCCESS;
}
Exemple #2
0
ssize_t sysfs_init_M0(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t count)
{
	mhi_device_ctxt *mhi_dev_ctxt = mhi_devices.device_list[0].mhi_ctxt;
	if (MHI_STATUS_SUCCESS != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
		mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
				"Failed to resume link\n");
		return count;
	}
	mhi_initiate_m0(mhi_dev_ctxt);
	mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
			"Current mhi_state = 0x%x\n",
			mhi_dev_ctxt->mhi_state);

	return count;
}
MHI_STATUS process_WAKE_transition(mhi_device_ctxt *mhi_dev_ctxt,
			STATE_TRANSITION cur_work_item)
{
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_log(MHI_MSG_INFO, "Entered\n");
	__pm_stay_awake(&mhi_dev_ctxt->wake_lock);

	ret_val = mhi_turn_on_pcie_link(mhi_dev_ctxt);

	if (MHI_STATUS_SUCCESS != ret_val) {
		mhi_log(MHI_MSG_CRITICAL,
			"Failed to turn on PCIe link.\n");
		goto exit;
	}
	mhi_dev_ctxt->flags.stop_threads = 0;
	if (mhi_dev_ctxt->flags.mhi_initialized &&
		mhi_dev_ctxt->flags.link_up) {
		mhi_log(MHI_MSG_CRITICAL,
			"MHI is initialized, transitioning to M0.\n");
		mhi_initiate_m0(mhi_dev_ctxt);
	}
	if (!mhi_dev_ctxt->flags.mhi_initialized) {
		mhi_log(MHI_MSG_CRITICAL,
			"MHI is not initialized transitioning to base.\n");
		ret_val = init_mhi_base_state(mhi_dev_ctxt);
		if (MHI_STATUS_SUCCESS != ret_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to transition to base state %d.\n",
				ret_val);
	}
exit:
	mhi_log(MHI_MSG_INFO, "Exited.\n");
	__pm_relax(&mhi_dev_ctxt->wake_lock);
	return ret_val;

}
int mhi_initiate_m0(mhi_device_ctxt *mhi_dev_ctxt)
{
	int r = 0;
	unsigned long flags;
	mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
			mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
			mhi_dev_ctxt->flags.pending_M3);
	mutex_lock(&mhi_dev_ctxt->pm_lock);
	/* 1. Wait on M3 event completion */
	mhi_log(MHI_MSG_INFO,
			"Waiting for M0 M1 or M3. Currently %d...\n",
			mhi_dev_ctxt->mhi_state);

	r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event,
			mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
			msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
	switch(r) {
		case 0:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Timeout: State %d after %d ms\n",
					mhi_dev_ctxt->mhi_state,
					MHI_MAX_SUSPEND_TIMEOUT);
			mhi_dev_ctxt->counters.m0_event_timeouts++;
			r = -ETIME;
			goto exit;
			break;
		case -ERESTARTSYS:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Going Down...\n");
			goto exit;
			break;
		default:
			mhi_log(MHI_MSG_INFO | MHI_DBG_POWER,
					"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
			r = 0;
			break;
	}
	if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
		mhi_assert_device_wake(mhi_dev_ctxt);
		mhi_log(MHI_MSG_INFO,
				"MHI state %d, done\n",
				mhi_dev_ctxt->mhi_state);
		goto exit;
	} else {
		/* 3. Turn the clocks back on. */
		if (MHI_STATUS_SUCCESS != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Failed to resume link\n");
			r = -EIO;
			goto exit;
		}

		write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
		mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
				"Setting M0 ...\n");
		if (mhi_dev_ctxt->flags.pending_M3){
			mhi_log(MHI_MSG_INFO,
					"Pending M3 detected, aborting M0 procedure\n");
			write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
			r = -EPERM;
			goto exit;
		}
		if (mhi_dev_ctxt->flags.link_up) {
			mhi_dev_ctxt->flags.pending_M0 = 1;
			mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
		}
		write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	}
exit:
	atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0);
	mutex_unlock(&mhi_dev_ctxt->pm_lock);
	mhi_log(MHI_MSG_INFO, "Exited...\n");
	return r;
}
int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int r = 0;
	unsigned long flags;

	mhi_log(MHI_MSG_INFO,
		"Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
		mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
					mhi_dev_ctxt->flags.pending_M3);
	mutex_lock(&mhi_dev_ctxt->pm_lock);
	mhi_log(MHI_MSG_INFO,
		"Waiting for M0 M1 or M3. Currently %d...\n",
					mhi_dev_ctxt->mhi_state);

	r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
			mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
			mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
		msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
	switch (r) {
	case 0:
		mhi_log(MHI_MSG_CRITICAL,
			"Timeout: State %d after %d ms\n",
				mhi_dev_ctxt->mhi_state,
				MHI_MAX_SUSPEND_TIMEOUT);
		mhi_dev_ctxt->counters.m0_event_timeouts++;
		r = -ETIME;
		goto exit;
		break;
	case -ERESTARTSYS:
		mhi_log(MHI_MSG_CRITICAL,
			"Going Down...\n");
		goto exit;
		break;
	default:
		mhi_log(MHI_MSG_INFO,
			"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
		r = 0;
		break;
	}
	if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
	    mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
		mhi_assert_device_wake(mhi_dev_ctxt);
		mhi_log(MHI_MSG_INFO,
				"MHI state %d, done\n",
					mhi_dev_ctxt->mhi_state);
		goto exit;
	} else {
		if (MHI_STATUS_SUCCESS != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
			mhi_log(MHI_MSG_CRITICAL,
					"Failed to resume link\n");
			r = -EIO;
			goto exit;
		}

		write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
		mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n");
		if (mhi_dev_ctxt->flags.pending_M3) {
			mhi_log(MHI_MSG_INFO,
				"Pending M3 detected, aborting M0 procedure\n");
			write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
								flags);
			r = -EPERM;
			goto exit;
		}
		if (mhi_dev_ctxt->flags.link_up) {
			mhi_dev_ctxt->flags.pending_M0 = 1;
			mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
		}
		write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
		r = wait_event_interruptible_timeout(
				*mhi_dev_ctxt->mhi_ev_wq.m0_event,
				mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
				mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
				msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
		WARN_ON(!r || -ERESTARTSYS == r);
		if (!r || -ERESTARTSYS == r)
			mhi_log(MHI_MSG_ERROR,
				"Failed to get M0 event ret %d\n", r);
		r = 0;
	}
exit:
	mutex_unlock(&mhi_dev_ctxt->pm_lock);
	mhi_log(MHI_MSG_INFO, "Exited...\n");
	return r;
}