void mhi_set_m_state(mhi_device_ctxt *mhi_dev_ctxt, MHI_STATE new_state)
{
	mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL,
			MHICTRL_MHISTATE_MASK,
			MHICTRL_MHISTATE_SHIFT,
			new_state);
}
MHI_STATUS process_READY_transition(mhi_device_ctxt *mhi_dev_ctxt,
			STATE_TRANSITION cur_work_item)
{
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
	mhi_dev_ctxt->mhi_state = MHI_STATE_READY;

	ret_val = mhi_reset_all_thread_queues(mhi_dev_ctxt);

	if (MHI_STATUS_SUCCESS != ret_val)
		mhi_log(MHI_MSG_ERROR,
			"Failed to reset thread queues\n");

	/* Initialize MMIO */
	if (MHI_STATUS_SUCCESS != mhi_init_mmio(mhi_dev_ctxt)) {
		mhi_log(MHI_MSG_ERROR,
			"Failure during MMIO initialization\n");
		return MHI_STATUS_ERROR;
	}
	ret_val = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
				cur_work_item);
	if (MHI_STATUS_SUCCESS != ret_val) {
		mhi_log(MHI_MSG_ERROR,
			"Failure during event ring init\n");
		return MHI_STATUS_ERROR;
	}
	mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL,
			MHICTRL_MHISTATE_MASK,
			MHICTRL_MHISTATE_SHIFT,
			MHI_STATE_M0);
	return MHI_STATUS_SUCCESS;
}
MHI_STATUS process_M1_transition(mhi_device_ctxt  *mhi_dev_ctxt,
		STATE_TRANSITION cur_work_item)
{
	unsigned long flags = 0;
	int ret_val = 0;
	mhi_log(MHI_MSG_INFO,
			"Processing M1 state transition from state %d\n",
			mhi_dev_ctxt->mhi_state);

	mhi_dev_ctxt->counters.m0_m1++;
	mhi_log(MHI_MSG_VERBOSE,
		"Cancelling Inactivity timer\n");
	switch(hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer))
	{
	case 0:
		mhi_log(MHI_MSG_VERBOSE,
			"Timer was not active\n");
		break;
	case 1:
		mhi_log(MHI_MSG_VERBOSE,
			"Timer was active\n");
		break;
	case -1:
		mhi_log(MHI_MSG_VERBOSE,
			"Timer executing and can't stop\n");
		break;
	}
	if(mhi_dev_ctxt->flags.link_up == 0){
		mhi_log(MHI_MSG_ERROR, "pcie link is not up\n");
		return MHI_STATUS_ERROR;
	}
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	if (!mhi_dev_ctxt->flags.pending_M3) {
		atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0);
		mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
		mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
		mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL,
			MHICTRL_MHISTATE_MASK,
			MHICTRL_MHISTATE_SHIFT,
			MHI_STATE_M2);
		mhi_dev_ctxt->counters.m1_m2++;
	} else {
		mhi_log(MHI_MSG_INFO, "Skip M2 transition\n");
		atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 1);
	}
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	ret_val  = mhi_set_bus_request(mhi_dev_ctxt, 0);
	if (ret_val)
		mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
	if (!atomic_cmpxchg(&mhi_dev_ctxt->flags.m3_work_enabled, 0, 1)) {
		mhi_log(MHI_MSG_INFO, "Starting M3 deferred work\n");
		ret_val = queue_delayed_work(mhi_dev_ctxt->work_queue,
				     &mhi_dev_ctxt->m3_work,
				     msecs_to_jiffies(m3_timer_val_ms));
		if (ret_val == 0)
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to start M3 delayed work.\n");
	}
	return MHI_STATUS_SUCCESS;
}
static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
					enum MHI_STATE new_state)
{
	if (MHI_STATE_RESET == new_state) {
		mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
			MHICTRL_RESET_MASK,
			MHICTRL_RESET_SHIFT,
			1);
	} else {
		mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
			MHICTRL_MHISTATE_MASK,
			MHICTRL_MHISTATE_SHIFT,
			new_state);
	}
	mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL);
}
enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	u64 pcie_dword_val = 0;
	u32 pcie_word_val = 0;
	u32 i = 0;
	enum MHI_STATUS ret_val;

	mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n");
	mhi_dev_ctxt->mmio_addr = mhi_dev_ctxt->dev_props->bar0_base;

	mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n",
			mhi_dev_ctxt->mmio_addr);

	mhi_dev_ctxt->mmio_len = mhi_reg_read(mhi_dev_ctxt->mmio_addr,
							 MHIREGLEN);

	if (0 == mhi_dev_ctxt->mmio_len) {
		mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n");
		return MHI_STATUS_ERROR;
	}

	mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n");
	mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
				mhi_dev_ctxt->mmio_addr, MHIVER);
	if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
		mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
					mhi_dev_ctxt->dev_props->mhi_ver);

		if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
			ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
		if (ret_val)
			return MHI_STATUS_ERROR;
	}
	/* Enable the channels */
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
			struct mhi_chan_ctxt *chan_ctxt =
				&mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i];
		if (VALID_CHAN_NR(i))
			chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
		else
			chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
	}
	mhi_log(MHI_MSG_INFO,
			"Read back MMIO Ready bit successfully. Moving on..\n");
	mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n");

	mhi_dev_ctxt->channel_db_addr = mhi_dev_ctxt->mmio_addr;
	mhi_dev_ctxt->event_db_addr = mhi_dev_ctxt->mmio_addr;

	mhi_dev_ctxt->channel_db_addr += mhi_reg_read_field(
					mhi_dev_ctxt->mmio_addr,
					CHDBOFF, CHDBOFF_CHDBOFF_MASK,
					CHDBOFF_CHDBOFF_SHIFT);

	mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n");
	mhi_dev_ctxt->event_db_addr += mhi_reg_read_field(
						mhi_dev_ctxt->mmio_addr,
						ERDBOFF, ERDBOFF_ERDBOFF_MASK,
						ERDBOFF_ERDBOFF_SHIFT);

	mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n");

	mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICFG,
				MHICFG_NER_MASK, MHICFG_NER_SHIFT,
				MHI_MAX_CHANNELS);

	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			    mhi_dev_ctxt->mmio_addr, CCABAP_HIGHER,
			    CCABAP_HIGHER_CCABAP_HIGHER_MASK,
			CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);

	mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CCABAP_LOWER,
				CCABAP_LOWER_CCABAP_LOWER_MASK,
				CCABAP_LOWER_CCABAP_LOWER_SHIFT,
				pcie_word_val);

	/* Write the Event Context Base Address Register High and Low parts */
	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
			(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, ECABAP_HIGHER,
			ECABAP_HIGHER_ECABAP_HIGHER_MASK,
			ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);

	mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, ECABAP_LOWER,
			ECABAP_LOWER_ECABAP_LOWER_MASK,
			ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val);


	/* Write the Command Ring Control Register High and Low parts */
	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
		(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
				mhi_dev_ctxt->mmio_addr, CRCBAP_HIGHER,
				CRCBAP_HIGHER_CRCBAP_HIGHER_MASK,
				CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT,
				pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, CRCBAP_LOWER,
			CRCBAP_LOWER_CRCBAP_LOWER_MASK,
			CRCBAP_LOWER_CRCBAP_LOWER_SHIFT,
			pcie_word_val);

	mhi_dev_ctxt->cmd_db_addr = mhi_dev_ctxt->mmio_addr + CRDB_LOWER;
	/* Set the control segment in the MMIO */
	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
				(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg);
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLBASE_HIGHER,
			MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK,
			MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT,
			pcie_word_val);

	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLBASE_LOWER,
			MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK,
			MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT,
			pcie_word_val);

	pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
				(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg) +
		mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1;

	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_HIGHER,
			MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK,
			MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT,
			pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_LOWER,
			MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK,
			MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT,
			pcie_word_val);

	/* Set the data segment in the MMIO */
	pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR;
	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHIDATABASE_HIGHER,
			MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK,
			MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT,
			pcie_word_val);

	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHIDATABASE_LOWER,
			MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK,
			MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT,
			pcie_word_val);

	pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR;

	pcie_word_val = HIGH_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_HIGHER,
			MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK,
			MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT,
			pcie_word_val);
	pcie_word_val = LOW_WORD(pcie_dword_val);
	mhi_reg_write_field(mhi_dev_ctxt,
			    mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_LOWER,
			MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK,
			MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT,
			pcie_word_val);

	mhi_log(MHI_MSG_INFO, "Done..\n");
	return MHI_STATUS_SUCCESS;
}
int mhi_initiate_m3(mhi_device_ctxt *mhi_dev_ctxt)
{

	unsigned long flags;
	int r = 0;
	int abort_m3 = 0;

	mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
			mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
						mhi_dev_ctxt->flags.pending_M3);
	mutex_lock(&mhi_dev_ctxt->pm_lock);

	switch (mhi_dev_ctxt->mhi_state) {
	case MHI_STATE_M1:
	case MHI_STATE_M2:
		mhi_log(MHI_MSG_INFO,
			"Triggering wake out of M2\n");
		write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
		mhi_dev_ctxt->flags.pending_M3 = 1;
		mhi_assert_device_wake(mhi_dev_ctxt);
		write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
		r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event,
				mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
				mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
				msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
		if (0 == r || -ERESTARTSYS == r) {
			mhi_log(MHI_MSG_INFO | MHI_DBG_POWER,
				"MDM failed to come out of M2.\n");
			goto exit;
		}
		break;
	case MHI_STATE_M3:
		mhi_log(MHI_MSG_INFO,
			"MHI state %d, link state %d.\n",
				mhi_dev_ctxt->mhi_state,
				mhi_dev_ctxt->flags.link_up);
		if (mhi_dev_ctxt->flags.link_up)
			r = -EPERM;
		else
			r = 0;
		goto exit;
	case MHI_STATE_RESET:
		mhi_log(MHI_MSG_INFO,
				"MHI in RESET turning link off and quitting\n");
		mhi_turn_off_pcie_link(mhi_dev_ctxt);
		r = mhi_set_bus_request(mhi_dev_ctxt, 0);
		if (r)
			mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
		goto exit;

	default:
		mhi_log(MHI_MSG_INFO,
			"MHI state %d, link state %d.\n",
				mhi_dev_ctxt->mhi_state,
				mhi_dev_ctxt->flags.link_up);
		break;
	}
	while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
		mhi_log(MHI_MSG_INFO | MHI_DBG_POWER,
			"There are still %d acks pending from device\n",
			atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
			__pm_stay_awake(&mhi_dev_ctxt->wake_lock);
			__pm_relax(&mhi_dev_ctxt->wake_lock);
			abort_m3 = 1;
		goto exit;
	}
	if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
		abort_m3 = 1;
		goto exit;
	}
	r = hrtimer_cancel(&mhi_dev_ctxt->m1_timer);
	if (r)
		mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was active\n");
	else
		mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was not active\n");
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	if (mhi_dev_ctxt->flags.pending_M0) {
		write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
		mhi_log(MHI_MSG_INFO,
				"Pending M0 detected, aborting M3 procedure\n");
		r = -EPERM;
		goto exit;
	}
	mhi_dev_ctxt->flags.pending_M3 = 1;
	atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0);
	mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);

	mhi_log(MHI_MSG_INFO | MHI_DBG_POWER,
			"Waiting for M3 completion.\n");
	r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event,
			mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
			msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
	switch(r) {
		case 0:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"MDM failed to suspend after %d ms\n",
					MHI_MAX_SUSPEND_TIMEOUT);
			mhi_dev_ctxt->counters.m3_event_timeouts++;
			mhi_dev_ctxt->flags.pending_M3 = 0;
			r = -EAGAIN;
			goto exit;
			break;
		case -ERESTARTSYS:
			mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
					"Going Down...\n");
			goto exit;
			break;
		default:
			mhi_log(MHI_MSG_INFO | MHI_DBG_POWER,
					"M3 completion received\n");
			break;
	}
	mhi_deassert_device_wake(mhi_dev_ctxt);
	/* Turn off PCIe link*/
	mhi_turn_off_pcie_link(mhi_dev_ctxt);
	r = mhi_set_bus_request(mhi_dev_ctxt, 0);
	if (r)
		mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
exit:
	if (abort_m3) {
		write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
		atomic_inc(&mhi_dev_ctxt->flags.data_pending);
		write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
		ring_all_chan_dbs(mhi_dev_ctxt);
		atomic_dec(&mhi_dev_ctxt->flags.data_pending);
		r = -EAGAIN;

		if(atomic_read(&mhi_dev_ctxt->flags.cp_m1_state)) {
			write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
			atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0);
			mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
			mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
			mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL,
					MHICTRL_MHISTATE_MASK,
					MHICTRL_MHISTATE_SHIFT,
					MHI_STATE_M2);
			mhi_dev_ctxt->counters.m1_m2++;
			write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
		}
	}
	/* We have to be careful here, we are setting a pending_M3 to 0
	 * even if we did not set it above. This works since the only other
	 * entity that sets this flag must also acquire the pm_lock */
	atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0);
	mhi_dev_ctxt->flags.pending_M3 = 0;
	mutex_unlock(&mhi_dev_ctxt->pm_lock);
	return r;
}
static ssize_t bhi_write(struct file *file,
		const char __user *buf,
		size_t count, loff_t *offp)
{
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	u32 pcie_word_val = 0;
	u32 i = 0;
	struct bhi_ctxt_t *bhi_ctxt =
		&(((struct mhi_pcie_dev_info *)file->private_data)->bhi_ctxt);
	struct mhi_device_ctxt *mhi_dev_ctxt =
		&((struct mhi_pcie_dev_info *)file->private_data)->mhi_ctxt;
	size_t amount_copied = 0;
	uintptr_t align_len = 0x1000;
	u32 tx_db_val = 0;

	if (buf == NULL || 0 == count)
		return -EIO;

	if (count > BHI_MAX_IMAGE_SIZE)
		return -ENOMEM;

	wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
			mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);

	mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);

	bhi_ctxt->unaligned_image_loc = kmalloc(count + (align_len - 1),
						GFP_KERNEL);
	if (bhi_ctxt->unaligned_image_loc == NULL)
		return -ENOMEM;

	mhi_log(MHI_MSG_INFO, "Unaligned Img Loc: %p\n",
			bhi_ctxt->unaligned_image_loc);
	bhi_ctxt->image_loc =
			(void *)((uintptr_t)bhi_ctxt->unaligned_image_loc +
		 (align_len - (((uintptr_t)bhi_ctxt->unaligned_image_loc) %
			       align_len)));

	mhi_log(MHI_MSG_INFO, "Aligned Img Loc: %p\n", bhi_ctxt->image_loc);

	bhi_ctxt->image_size = count;

	if (0 != copy_from_user(bhi_ctxt->image_loc, buf, count)) {
		ret_val = -ENOMEM;
		goto bhi_copy_error;
	}
	amount_copied = count;
	/* Flush the writes, in anticipation for a device read */
	wmb();
	mhi_log(MHI_MSG_INFO,
		"Copied image from user at addr: %p\n", bhi_ctxt->image_loc);
	bhi_ctxt->phy_image_loc = dma_map_single(
			&mhi_dev_ctxt->dev_info->plat_dev->dev,
			bhi_ctxt->image_loc,
			bhi_ctxt->image_size,
			DMA_TO_DEVICE);

	if (dma_mapping_error(NULL, bhi_ctxt->phy_image_loc)) {
		ret_val = -EIO;
		goto bhi_copy_error;
	}
	mhi_log(MHI_MSG_INFO,
		"Mapped image to DMA addr 0x%lx:\n",
		(uintptr_t)bhi_ctxt->phy_image_loc);

	bhi_ctxt->image_size = count;

	/* Write the image size */
	pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
	mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
				BHI_IMGADDR_HIGH,
				0xFFFFFFFF,
				0,
				pcie_word_val);

	pcie_word_val = LOW_WORD(bhi_ctxt->phy_image_loc);

	mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
				BHI_IMGADDR_LOW,
				0xFFFFFFFF,
				0,
				pcie_word_val);

	pcie_word_val = bhi_ctxt->image_size;
	mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGSIZE,
			0xFFFFFFFF, 0, pcie_word_val);

	pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHI_IMGTXDB);
	mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
			BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);

	mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);

	for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
		tx_db_val = mhi_reg_read_field(bhi_ctxt->bhi_base,
						BHI_STATUS,
						BHI_STATUS_MASK,
						BHI_STATUS_SHIFT);
		mhi_log(MHI_MSG_CRITICAL, "BHI STATUS 0x%x\n", tx_db_val);
		if (BHI_STATUS_SUCCESS != tx_db_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Incorrect BHI status: %d retry: %d\n",
				tx_db_val, i);
		else
			break;
		usleep_range(20000, 25000);
	}
	dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
			bhi_ctxt->phy_image_loc,
			bhi_ctxt->image_size, DMA_TO_DEVICE);

	kfree(bhi_ctxt->unaligned_image_loc);

	ret_val = mhi_init_state_transition(mhi_dev_ctxt,
					STATE_TRANSITION_RESET);
	if (MHI_STATUS_SUCCESS != ret_val) {
		mhi_log(MHI_MSG_CRITICAL,
				"Failed to start state change event\n");
	}
	return amount_copied;

bhi_copy_error:
	kfree(bhi_ctxt->unaligned_image_loc);
	return amount_copied;
}