Exemplo n.º 1
0
static enum MHI_STATUS process_reset_transition(
			struct mhi_device_ctxt *mhi_dev_ctxt,
			enum STATE_TRANSITION cur_work_item)
{
	u32 i = 0;
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	unsigned long flags = 0;

	mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->counters.mhi_reset_cntr++;
	mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
	ret_val = mhi_test_for_device_reset(mhi_dev_ctxt);
	ret_val = mhi_test_for_device_ready(mhi_dev_ctxt);
	switch (ret_val) {
	case MHI_STATUS_SUCCESS:
		break;
	case MHI_STATUS_LINK_DOWN:
		mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
		break;
	case MHI_STATUS_DEVICE_NOT_READY:
		ret_val = mhi_init_state_transition(mhi_dev_ctxt,
					STATE_TRANSITION_RESET);
		if (MHI_STATUS_SUCCESS != ret_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to initiate 0x%x state trans\n",
				STATE_TRANSITION_RESET);
		break;
	default:
		mhi_log(MHI_MSG_CRITICAL,
			"Unexpected ret code detected for\n");
		break;
	}
	for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
		mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
				mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
		mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp =
				mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i].
						mhi_cmd_ring_read_ptr =
				virt_to_dma(NULL,
				mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
	}
	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
		mhi_reset_ev_ctxt(mhi_dev_ctxt, i);

	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (VALID_CHAN_NR(i))
			mhi_reset_chan_ctxt(mhi_dev_ctxt, i);
	}
	ret_val = mhi_init_state_transition(mhi_dev_ctxt,
				STATE_TRANSITION_READY);
	if (MHI_STATUS_SUCCESS != ret_val)
		mhi_log(MHI_MSG_CRITICAL,
		"Failed to initiate 0x%x state trans\n",
		STATE_TRANSITION_READY);
	return ret_val;
}
Exemplo n.º 2
0
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
					enum MHI_EXEC_ENV exec_env)
{
	struct mhi_client_handle *client_handle = NULL;
	struct mhi_cb_info cb_info;
	int i;
	struct mhi_chan_info chan_info;
	int r;

	cb_info.cb_reason = MHI_CB_MHI_ENABLED;

	mhi_log(MHI_MSG_INFO, "Enabling Clients, exec env %d.\n", exec_env);
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (!VALID_CHAN_NR(i))
			continue;
		client_handle = mhi_dev_ctxt->client_handle_list[i];
		r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
		if (!r && client_handle &&
		    exec_env == GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
						chan_info.flags))
			mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
	}
	if (exec_env == MHI_EXEC_ENV_AMSS)
		mhi_deassert_device_wake(mhi_dev_ctxt);
	mhi_log(MHI_MSG_INFO, "Done.\n");
}
Exemplo n.º 3
0
MHI_STATUS mhi_open_channel(mhi_client_handle **client_handle,
		MHI_CLIENT_CHANNEL chan, s32 device_index,
		mhi_client_info_t *client_info, void *UserData)
{
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_control_seg *mhi_ctrl_seg = NULL;

	if (!VALID_CHAN_NR(chan)) {
		ret_val = MHI_STATUS_INVALID_CHAN_ERR;
		goto error_handle;
	}
	if (NULL == client_handle || device_index < 0 ||
			device_index >= mhi_devices.nr_of_devices) {
		ret_val = MHI_STATUS_ERROR;
		goto error_handle;
	}
	mhi_log(MHI_MSG_INFO,
			"Opened channel 0x%x for client\n", chan);

	atomic_inc(&mhi_devices.device_list[device_index].ref_count);

	*client_handle = kmalloc(sizeof(mhi_client_handle), GFP_KERNEL);
	if (NULL == *client_handle) {
		ret_val = MHI_STATUS_ALLOC_ERROR;
		goto error_handle;
	}
	memset(*client_handle, 0, sizeof(mhi_client_handle));
	(*client_handle)->chan = chan;
	(*client_handle)->mhi_dev_ctxt =
		mhi_devices.device_list[device_index].mhi_ctxt;
	mhi_ctrl_seg = (*client_handle)->mhi_dev_ctxt->mhi_ctrl_seg;

	(*client_handle)->mhi_dev_ctxt->client_handle_list[chan] =
							*client_handle;
	if (NULL != client_info)
		(*client_handle)->client_info = *client_info;

	(*client_handle)->user_data = UserData;
	(*client_handle)->event_ring_index =
		mhi_ctrl_seg->mhi_cc_list[chan].mhi_event_ring_index;
	init_completion(&(*client_handle)->chan_close_complete);
	(*client_handle)->msi_vec =
		mhi_ctrl_seg->mhi_ec_list[
			(*client_handle)->event_ring_index].mhi_msi_vector;
	if (client_info->cb_mod != 0)
		(*client_handle)->cb_mod = client_info->cb_mod;
	else
		(*client_handle)->cb_mod = 1;

	if (MHI_CLIENT_IP_HW_0_OUT  == chan)
		(*client_handle)->intmod_t = 10;
	if (MHI_CLIENT_IP_HW_0_IN  == chan)
		(*client_handle)->intmod_t = 10;
	mhi_log(MHI_MSG_VERBOSE,
		"Successfuly started chan 0x%x\n", chan);

error_handle:
	return ret_val;
}
Exemplo n.º 4
0
MHI_STATUS start_chan_cmd(mhi_device_ctxt *mhi_dev_ctxt, mhi_cmd_pkt *cmd_pkt)
{
	u32 chan;
	MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
	if (!VALID_CHAN_NR(chan))
		mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
	mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
					MHI_CMD_NOT_PENDING;

	mhi_log(MHI_MSG_INFO, "Processed cmd channel start\n");
	return MHI_STATUS_SUCCESS;
}
Exemplo n.º 5
0
void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
					enum MHI_CB_REASON reason)
{
	int i;
	struct mhi_client_handle *client_handle = NULL;

	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (VALID_CHAN_NR(i)) {
			client_handle = mhi_dev_ctxt->client_handle_list[i];
			mhi_notify_client(client_handle, reason);
		}
	}
}
Exemplo n.º 6
0
void ring_all_chan_dbs(mhi_device_ctxt *mhi_dev_ctxt)
{
	u32 i = 0;
	mhi_ring *local_ctxt = NULL;
	mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		if (VALID_CHAN_NR(i)) {
			local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
			if (IS_HARDWARE_CHANNEL(i))
				mhi_dev_ctxt->db_mode[i] = 1;
			if ( (local_ctxt->wp != local_ctxt->rp) || (i % 2))
				conditional_chan_db_write(mhi_dev_ctxt, i);
		}
}
Exemplo n.º 7
0
MHI_STATUS reset_chan_cmd(mhi_device_ctxt *mhi_dev_ctxt, mhi_cmd_pkt *cmd_pkt)
{
	u32 chan  = 0;
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_ring *local_chan_ctxt;
	mhi_chan_ctxt *chan_ctxt;
	mhi_client_handle *client_handle = NULL;
	struct mutex *chan_mutex;

	MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);

	if (!VALID_CHAN_NR(chan)) {
		mhi_log(MHI_MSG_ERROR,
			"Bad channel number for CCE\n");
		return MHI_STATUS_ERROR;
	}

	chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
	mutex_lock(chan_mutex);
	client_handle = mhi_dev_ctxt->client_handle_list[chan];
	local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
	chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
	mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n");

	/* Reset the local channel context */
	local_chan_ctxt->rp = local_chan_ctxt->base;
	local_chan_ctxt->wp = local_chan_ctxt->base;
	local_chan_ctxt->ack_rp = local_chan_ctxt->base;

	/* Reset the mhi channel context */
	chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
	chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
	chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;

	mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
	mutex_unlock(chan_mutex);
	mhi_log(MHI_MSG_INFO, "Reset complete starting channel back up.\n");
	if (MHI_STATUS_SUCCESS != mhi_send_cmd(mhi_dev_ctxt,
						MHI_COMMAND_START_CHAN,
						chan))
		mhi_log(MHI_MSG_CRITICAL, "Failed to restart channel.\n");
	if (NULL != client_handle)
		complete(&client_handle->chan_close_complete);
	return ret_val;
}
Exemplo n.º 8
0
enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	u64 pcie_dword_val = 0;
	u32 pcie_word_val = 0;
	u32 i = 0;
	enum MHI_STATUS ret_val;

	mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n");
	mhi_dev_ctxt->mmio_addr = mhi_dev_ctxt->dev_props->bar0_base;

	mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n",
			mhi_dev_ctxt->mmio_addr);

	mhi_dev_ctxt->mmio_len = mhi_reg_read(mhi_dev_ctxt->mmio_addr,
							 MHIREGLEN);

	if (0 == mhi_dev_ctxt->mmio_len) {
		mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n");
		return MHI_STATUS_ERROR;
	}

	mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n");
	mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
				mhi_dev_ctxt->mmio_addr, MHIVER);
	if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
		mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
					mhi_dev_ctxt->dev_props->mhi_ver);

		if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
			ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
		if (ret_val)
			return MHI_STATUS_ERROR;
	}
	/* Enable the channels */
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
			struct mhi_chan_ctxt *chan_ctxt =
				&mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i];
		if (VALID_CHAN_NR(i))
			chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
		else
			chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
	}
	mhi_log(MHI_MSG_INFO,
			"Read back MMIO Ready bit successfully. Moving on..\n");
	mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n");

	mhi_dev_ctxt->channel_db_addr = mhi_dev_ctxt->mmio_addr;
	mhi_dev_ctxt->event_db_addr = mhi_dev_ctxt->mmio_addr;

	mhi_dev_ctxt->channel_db_addr += mhi_reg_read_field(
					mhi_dev_ctxt->mmio_addr,
					CHDBOFF, CHDBOFF_CHDBOFF_MASK,
					CHDBOFF_CHDBOFF_SHIFT);

	mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n");
	mhi_dev_ctxt->event_db_addr += mhi_reg_read_field(
						mhi_dev_ctxt->mmio_addr,
						ERDBOFF, ERDBOFF_ERDBOFF_MASK,
						ERDBOFF_ERDBOFF_SHIFT);

	mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n");

	mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICFG,
				MHICFG_NER_MASK, MHICFG_NER_SHIFT,
				MHI_MAX_CHANNELS);

	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			    mhi_dev_ctxt->mmio_addr, CCABAP_HIGHER,
			    CCABAP_HIGHER_CCABAP_HIGHER_MASK,
			CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);

	mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CCABAP_LOWER,
				CCABAP_LOWER_CCABAP_LOWER_MASK,
				CCABAP_LOWER_CCABAP_LOWER_SHIFT,
				pcie_word_val);

	/* Write the Event Context Base Address Register High and Low parts */
	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, ECABAP_HIGHER,
			ECABAP_HIGHER_ECABAP_HIGHER_MASK,
			ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);

	mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, ECABAP_LOWER,
			ECABAP_LOWER_ECABAP_LOWER_MASK,
			ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val);


	/* Write the Command Ring Control Register High and Low parts */
	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
		(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
				mhi_dev_ctxt->mmio_addr, CRCBAP_HIGHER,
				CRCBAP_HIGHER_CRCBAP_HIGHER_MASK,
				CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT,
				pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, CRCBAP_LOWER,
			CRCBAP_LOWER_CRCBAP_LOWER_MASK,
			CRCBAP_LOWER_CRCBAP_LOWER_SHIFT,
			pcie_word_val);

	mhi_dev_ctxt->cmd_db_addr = mhi_dev_ctxt->mmio_addr + CRDB_LOWER;
	/* Set the control segment in the MMIO */
	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
				(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLBASE_HIGHER,
			MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK,
			MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT,
			pcie_word_val);

	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLBASE_LOWER,
			MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK,
			MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT,
			pcie_word_val);

	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
				(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg) +
		mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1;

	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_HIGHER,
			MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK,
			MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT,
			pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_LOWER,
			MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK,
			MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT,
			pcie_word_val);

	/* Set the data segment in the MMIO */
	pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR;
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHIDATABASE_HIGHER,
			MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK,
			MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT,
			pcie_word_val);

	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHIDATABASE_LOWER,
			MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK,
			MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT,
			pcie_word_val);

	pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR;

	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_HIGHER,
			MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK,
			MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT,
			pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			    mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_LOWER,
			MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK,
			MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT,
			pcie_word_val);

	mhi_log(MHI_MSG_INFO, "Done..\n");
	return MHI_STATUS_SUCCESS;
}
Exemplo n.º 9
0
MHI_STATUS process_AMSS_transition(mhi_device_ctxt *mhi_dev_ctxt,
				STATE_TRANSITION cur_work_item)
{
	MHI_STATUS ret_val;
	u32 chan;
	unsigned long flags;
	mhi_chan_ctxt *chan_ctxt;
	mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	mhi_assert_device_wake(mhi_dev_ctxt);

	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	ret_val = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
			cur_work_item);
	if (MHI_STATUS_SUCCESS != ret_val)
		return MHI_STATUS_ERROR;
	for (chan = 0; chan <= MHI_MAX_CHANNELS; ++chan) {
		if (VALID_CHAN_NR(chan)) {
			chan_ctxt =
				&mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
			if (MHI_CHAN_STATE_ENABLED ==
						chan_ctxt->mhi_chan_state) {
				mhi_log(MHI_MSG_INFO,
					"Starting Channel 0x%x \n", chan);
				ret_val = mhi_send_cmd(mhi_dev_ctxt,
						MHI_COMMAND_START_CHAN,
						chan);
				if (MHI_STATUS_SUCCESS != ret_val) {
					mhi_log(MHI_MSG_CRITICAL,
							"Failed to start chan0x%x,0x%x\n",
							chan, ret_val);
					return MHI_STATUS_ERROR;
				} else {
					atomic_inc(
							&mhi_dev_ctxt->start_cmd_pending_ack);
				}
			}
		}
	}
	mhi_log(MHI_MSG_INFO, "Waiting for cmd completions\n");
	wait_event_interruptible(*mhi_dev_ctxt->chan_start_complete,
			atomic_read(&mhi_dev_ctxt->start_cmd_pending_ack) == 0);
	if (0 == mhi_dev_ctxt->flags.mhi_initialized) {

		mhi_dev_ctxt->flags.mhi_initialized = 1;
		ret_val = mhi_set_state_of_all_channels(mhi_dev_ctxt,
				MHI_CHAN_STATE_RUNNING);
		if (MHI_STATUS_SUCCESS != ret_val)
			mhi_log(MHI_MSG_CRITICAL,
					"Failed to set local chan state\n");
		if (!mhi_dev_ctxt->flags.mhi_clients_probed) {
			ret_val = probe_clients(mhi_dev_ctxt, cur_work_item);
			if (ret_val != MHI_STATUS_SUCCESS)
				mhi_log(MHI_MSG_CRITICAL,
						"Failed to probe MHI CORE clients.\n");
			mhi_dev_ctxt->flags.mhi_clients_probed = 1;

			ring_all_ev_dbs(mhi_dev_ctxt);
			ring_all_chan_dbs(mhi_dev_ctxt);
			ring_all_cmd_dbs(mhi_dev_ctxt);

		} else {
			ring_all_chan_dbs(mhi_dev_ctxt);
			mhi_log(MHI_MSG_CRITICAL,
					"Notifying clients that MHI is enabled\n");
			mhi_notify_clients(mhi_dev_ctxt,
					MHI_CB_MHI_ENABLED);
		}
		if (ret_val != MHI_STATUS_SUCCESS)
			mhi_log(MHI_MSG_CRITICAL,
					"Failed to probe MHI CORE clients, ret 0x%x \n",
					ret_val);
	}
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	mhi_log(MHI_MSG_INFO, "Exited\n");
	return MHI_STATUS_SUCCESS;
}
Exemplo n.º 10
0
MHI_STATUS process_RESET_transition(mhi_device_ctxt *mhi_dev_ctxt,
			STATE_TRANSITION cur_work_item)
{
	u32 i = 0;
	u32 ev_ring_index;
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
	mhi_dev_ctxt->counters.mhi_reset_cntr++;

	if (mhi_dev_ctxt->counters.mhi_reset_cntr >= 10) {
		panic("CP Crash: need CP dump");
	}
	ret_val = mhi_test_for_device_ready(mhi_dev_ctxt);
	switch (ret_val) {
	case MHI_STATUS_SUCCESS:
		mhi_dev_ctxt->counters.mhi_reset_cntr = 0;
		mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
		ret_val = mhi_init_state_transition(mhi_dev_ctxt,
					STATE_TRANSITION_READY);
		if (MHI_STATUS_SUCCESS != ret_val)
			mhi_log(MHI_MSG_CRITICAL,
			"Failed to initiate 0x%x state trans\n",
			STATE_TRANSITION_READY);
	break;
	case MHI_STATUS_LINK_DOWN:
		mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
		break;
	case MHI_STATUS_DEVICE_NOT_READY:
		ret_val = mhi_init_state_transition(mhi_dev_ctxt,
					STATE_TRANSITION_RESET);
		if (MHI_STATUS_SUCCESS != ret_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to initiate 0x%x state trans\n",
				STATE_TRANSITION_RESET);
		break;
	default:
		mhi_log(MHI_MSG_CRITICAL,
			"Unexpected ret code detected for\n");
		break;
	}
	/* Synchronise the local rp/wp with the ctxt rp/wp
	   This will enable the device to pick up exactly where it left off, should
	   this be an SSR recovery */
	for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
		mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
		mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i].mhi_cmd_ring_read_ptr =
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
	}
	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
		ev_ring_index = mhi_dev_ctxt->alloced_ev_rings[i];
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_ring_index].mhi_event_read_ptr =
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[ev_ring_index].rp);
	}
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		if (VALID_CHAN_NR(i)) {
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].mhi_trb_read_ptr =
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[i].rp);
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].mhi_trb_write_ptr =
				mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[i].wp);
		}
	}

	/* reset outbound_ack count */
	atomic_set(&mhi_dev_ctxt->counters.outbound_acks, 0);
	return ret_val;
}
Exemplo n.º 11
0
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
				size_t count, loff_t *offp)
{
	int amnt_copied = 0;
	struct mhi_chan_ctxt *chan_ctxt;
	struct mhi_device_ctxt *mhi_dev_ctxt =
		&mhi_devices.device_list[0].mhi_ctxt;
	uintptr_t v_wp_index;
	uintptr_t v_rp_index;
	int valid_chan = 0;
	struct mhi_chan_ctxt *cc_list;
	struct mhi_client_handle *client_handle;

	if (NULL == mhi_dev_ctxt)
		return -EIO;
	cc_list = mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list;
	*offp = (u32)(*offp) % MHI_MAX_CHANNELS;

	while (!valid_chan) {
		client_handle = mhi_dev_ctxt->client_handle_list[*offp];
		if (*offp == (MHI_MAX_CHANNELS - 1))
			msleep(1000);
		if (!VALID_CHAN_NR(*offp) ||
		    !cc_list[*offp].mhi_trb_ring_base_addr ||
		    !client_handle) {
			*offp += 1;
			*offp = (u32)(*offp) % MHI_MAX_CHANNELS;
			continue;
		}
		valid_chan = 1;
	}

	chan_ctxt = &cc_list[*offp];
	get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp],
			mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
			&v_rp_index);
	get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp],
			mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
			&v_wp_index);

	amnt_copied =
	scnprintf(mhi_dev_ctxt->chan_info,
		MHI_LOG_SIZE,
		"%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d\n",
		"chan:",
		(unsigned int)*offp,
		"pkts from dev:",
		mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
		"state:",
		chan_ctxt->mhi_chan_state,
		"p_base:",
		chan_ctxt->mhi_trb_ring_base_addr,
		"v_base:",
		mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
		"v_wp:",
		mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
		"index:",
		v_wp_index,
		"v_rp:",
		mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
		"index:",
		v_rp_index,
		"pkts_queued",
		get_nr_avail_ring_elements(
		&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
		"/",
		client_handle->chan_info.max_desc);

	*offp += 1;

	if (amnt_copied < count)
		return amnt_copied -
			copy_to_user(buf, mhi_dev_ctxt->chan_info, amnt_copied);
	else
		return -ENOMEM;
}
Exemplo n.º 12
0
/**
 * @brief Thread which handles inbound data for MHI clients.
 * This thread will invoke thecallback for the mhi clients to
 * inform thme of data availability.
 *
 * The thread monitors the MHI state variable to know if it should
 * continue processing, * or stop.
 *
 * @param ctxt void pointer to a device context
 */
MHI_STATUS parse_xfer_event(mhi_device_ctxt *ctxt, mhi_event_pkt *event)
{
	mhi_device_ctxt *mhi_dev_ctxt = (mhi_device_ctxt *)ctxt;
	mhi_result *result;
	u32 chan = MHI_MAX_CHANNELS;
	u16 xfer_len;
	uintptr_t phy_ev_trb_loc;
	mhi_xfer_pkt *local_ev_trb_loc;
	mhi_client_handle *client_handle;
	mhi_xfer_pkt *local_trb_loc;
	mhi_chan_ctxt *chan_ctxt;
	u32 nr_trb_to_parse;
	u32 i = 0;

	switch (MHI_EV_READ_CODE(EV_TRB_CODE, event)) {
	case MHI_EVENT_CC_EOB:
		mhi_log(MHI_MSG_VERBOSE, "IEOB condition detected\n");
	case MHI_EVENT_CC_OVERFLOW:
		mhi_log(MHI_MSG_VERBOSE, "Overflow condition detected\n");
	case MHI_EVENT_CC_EOT:
	{
		void *trb_data_loc;
		u32 ieot_flag;
		MHI_STATUS ret_val;
		mhi_ring *local_chan_ctxt;

		chan = MHI_EV_READ_CHID(EV_CHID, event);
		local_chan_ctxt =
			&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
		phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event);

		if (unlikely(!VALID_CHAN_NR(chan))) {
			mhi_log(MHI_MSG_ERROR, "Bad ring id.\n");
			break;
		}
		chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
		ret_val = validate_xfer_el_addr(chan_ctxt,
						phy_ev_trb_loc);

		if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
			mhi_log(MHI_MSG_ERROR, "Bad event trb ptr.\n");
			break;
		}

		/* Get the TRB this event points to*/
		local_ev_trb_loc =
			(mhi_xfer_pkt *)mhi_p2v_addr(
					mhi_dev_ctxt->mhi_ctrl_seg_info,
							phy_ev_trb_loc);
		local_trb_loc = (mhi_xfer_pkt *)local_chan_ctxt->rp;

		ret_val = get_nr_enclosed_el(local_chan_ctxt,
				      local_trb_loc,
				      local_ev_trb_loc,
				      &nr_trb_to_parse);
		if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to get nr available trbs ret: %d.\n",
				ret_val);
			return MHI_STATUS_ERROR;
		}
		do {
			u64 phy_buf_loc;
			MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag);
			phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr;
			trb_data_loc = (void *)(uintptr_t)phy_buf_loc;
			if (chan % 2)
				xfer_len = MHI_EV_READ_LEN(EV_LEN, event);
			else
				xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN,
							local_trb_loc);

			if (!VALID_BUF(trb_data_loc, xfer_len)) {
				mhi_log(MHI_MSG_CRITICAL,
					"Bad buffer ptr: %p.\n",
					trb_data_loc);
				return MHI_STATUS_ERROR;
			}

			client_handle = mhi_dev_ctxt->client_handle_list[chan];
			if (NULL != client_handle) {
				client_handle->pkt_count++;
				result = &client_handle->result;
				result->payload_buf = trb_data_loc;
				result->bytes_xferd = xfer_len;
				result->user_data = client_handle->user_data;
			}
			if (chan % 2) {
				parse_inbound(mhi_dev_ctxt, chan,
						local_ev_trb_loc, xfer_len);
			} else {
				parse_outbound(mhi_dev_ctxt, chan,
						local_ev_trb_loc, xfer_len);
			}
			mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++;
			if (local_trb_loc ==
				(mhi_xfer_pkt *)local_chan_ctxt->rp) {
				mhi_log(MHI_MSG_CRITICAL,
					"Done. Processed until: %p.\n",
					trb_data_loc);
				break;
			} else {
				local_trb_loc =
					(mhi_xfer_pkt *)local_chan_ctxt->rp;
			}
			i++;
		} while (i <= nr_trb_to_parse);
		break;
	} /* CC_EOT */
	case MHI_EVENT_CC_OOB:
	case MHI_EVENT_CC_DB_MODE:
	{
		mhi_ring *chan_ctxt = NULL;
		u64 db_value = 0;
		mhi_dev_ctxt->uldl_enabled = 1;
		chan = MHI_EV_READ_CHID(EV_CHID, event);
		mhi_dev_ctxt->db_mode[chan] = 1;
		chan_ctxt =
			&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
		mhi_log(MHI_MSG_INFO, "OOB Detected chan %d.\n", chan);
		if (chan_ctxt->wp != chan_ctxt->rp) {
			db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
							(uintptr_t)chan_ctxt->wp);
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan,
				db_value);
		}
		client_handle = mhi_dev_ctxt->client_handle_list[chan];
			if (NULL != client_handle) {
				result->transaction_status = MHI_STATUS_DEVICE_NOT_READY;
			}
		break;
	}
	default:
		{
			mhi_log(MHI_MSG_ERROR,
				"Unknown TX completion.\n");
			break;
		}
	} /*switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
	return 0;
}
Exemplo n.º 13
0
/**
 * @brief Function for sending data on an outbound channel.
 * This function only sends on TRE's worth of
 * data and may chain the TRE as specified by the caller.
 *
 * @param device [IN ] Pointer to mhi context used to send the TRE
 * @param chan [IN ] Channel number to send the TRE on
 * @param buf [IN ] Physical address of buffer to be linked to descriptor
 * @param buf_len [IN ] Length of buffer, which will be populated in the TRE
 * @param chain [IN ] Specification on whether this TRE should be chained
 *
 * @return MHI_STATUS
 */
MHI_STATUS mhi_queue_xfer(mhi_client_handle *client_handle,
		uintptr_t buf, size_t buf_len, u32 chain, u32 eob)
{
	mhi_xfer_pkt *pkt_loc;
	MHI_STATUS ret_val;
	MHI_CLIENT_CHANNEL chan;
	mhi_device_ctxt *mhi_dev_ctxt;
	unsigned long flags;

	if (NULL == client_handle || !VALID_CHAN_NR(client_handle->chan) ||
		0 == buf || chain >= MHI_TRE_CHAIN_LIMIT || 0 == buf_len) {
		mhi_log(MHI_MSG_CRITICAL, "Bad input args\n");
		return MHI_STATUS_ERROR;
	}
	MHI_ASSERT(VALID_BUF(buf, buf_len),
			"Client buffer is of invalid length\n");
	mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	chan = client_handle->chan;


	/* Bump up the vote for pending data */
	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);

	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	mhi_dev_ctxt->counters.m1_m0++;
	if (mhi_dev_ctxt->flags.link_up)
		mhi_assert_device_wake(mhi_dev_ctxt);
	read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);

	pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
	pkt_loc->data_tx_pkt.buffer_ptr = buf;

	if (likely(0 != client_handle->intmod_t))
		MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 1);
	else
		MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 0);

	MHI_TRB_SET_INFO(TX_TRB_IEOT, pkt_loc, 1);
	MHI_TRB_SET_INFO(TX_TRB_CHAIN, pkt_loc, chain);
	MHI_TRB_SET_INFO(TX_TRB_IEOB, pkt_loc, eob);
	MHI_TRB_SET_INFO(TX_TRB_TYPE, pkt_loc, MHI_PKT_TYPE_TRANSFER);
	MHI_TX_TRB_SET_LEN(TX_TRB_LEN, pkt_loc, buf_len);

	if (chan % 2 == 0) {
		atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
		mhi_log(MHI_MSG_VERBOSE,
			"Queued outbound pkt. Pending Acks %d\n",
		atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
	}

	/* Add the TRB to the correct transfer ring */
	ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
				(void *)&pkt_loc);
	if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
		mhi_log(MHI_MSG_INFO, "Failed to insert trb in xfer ring\n");
		goto error;
	}
	mhi_notify_device(mhi_dev_ctxt, chan);
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return MHI_STATUS_SUCCESS;
error:
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return ret_val;
}