MHI_STATUS process_M1_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { unsigned long flags = 0; int ret_val = 0; mhi_log(MHI_MSG_INFO, "Processing M1 state transition from state %d\n", mhi_dev_ctxt->mhi_state); mhi_dev_ctxt->counters.m0_m1++; mhi_log(MHI_MSG_VERBOSE, "Cancelling Inactivity timer\n"); switch(hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) { case 0: mhi_log(MHI_MSG_VERBOSE, "Timer was not active\n"); break; case 1: mhi_log(MHI_MSG_VERBOSE, "Timer was active\n"); break; case -1: mhi_log(MHI_MSG_VERBOSE, "Timer executing and can't stop\n"); break; } if(mhi_dev_ctxt->flags.link_up == 0){ mhi_log(MHI_MSG_ERROR, "pcie link is not up\n"); return MHI_STATUS_ERROR; } write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (!mhi_dev_ctxt->flags.pending_M3) { atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_dev_ctxt->mhi_state = MHI_STATE_M2; mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL, MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, MHI_STATE_M2); mhi_dev_ctxt->counters.m1_m2++; } else { mhi_log(MHI_MSG_INFO, "Skip M2 transition\n"); atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 1); } write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ret_val = mhi_set_bus_request(mhi_dev_ctxt, 0); if (ret_val) mhi_log(MHI_MSG_INFO, "Failed to update bus request\n"); if (!atomic_cmpxchg(&mhi_dev_ctxt->flags.m3_work_enabled, 0, 1)) { mhi_log(MHI_MSG_INFO, "Starting M3 deferred work\n"); ret_val = queue_delayed_work(mhi_dev_ctxt->work_queue, &mhi_dev_ctxt->m3_work, msecs_to_jiffies(m3_timer_val_ms)); if (ret_val == 0) mhi_log(MHI_MSG_CRITICAL, "Failed to start M3 delayed work.\n"); } return MHI_STATUS_SUCCESS; }
MHI_STATUS process_M0_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { unsigned long flags; int ret_val; mhi_log(MHI_MSG_INFO, "Entered\n"); ret_val = cancel_delayed_work(&mhi_dev_ctxt->m3_work); if (ret_val) { atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_log(MHI_MSG_INFO, "M3 work was cancelled\n"); } else { mhi_log(MHI_MSG_INFO, "M3 work NOT cancelled, either running or never started\n"); } /* Wait for mhi_dev_ctxt transition to M0 */ /* Proxy vote to prevent M3 while we are ringing DBs */ if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { mhi_dev_ctxt->counters.m2_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_dev_ctxt->counters.m3_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { } else { mhi_log(MHI_MSG_INFO, "MHI State %d link state %d. Quitting\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); goto exit; } read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_state = MHI_STATE_M0; atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_assert_device_wake(mhi_dev_ctxt); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.mhi_initialized) { /* Bump up the vote for pending data */ ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); } atomic_dec(&mhi_dev_ctxt->flags.data_pending); ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1); if (ret_val) mhi_log(MHI_MSG_CRITICAL, "Could not set bus frequency ret: %d\n", ret_val); mhi_dev_ctxt->flags.pending_M0 = 0; atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); wake_up_interruptible(mhi_dev_ctxt->M0_event); ret_val = hrtimer_start(&mhi_dev_ctxt->m1_timer, mhi_dev_ctxt->m1_timeout, HRTIMER_MODE_REL); mhi_log(MHI_MSG_VERBOSE, "Starting M1 timer, ret %d\n", ret_val); exit: mhi_log(MHI_MSG_INFO, "Exited\n"); return MHI_STATUS_SUCCESS; }
static enum MHI_STATUS process_m0_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { unsigned long flags; int ret_val; mhi_log(MHI_MSG_INFO, "Entered\n"); if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { mhi_dev_ctxt->counters.m2_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_dev_ctxt->counters.m3_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { mhi_log(MHI_MSG_INFO, "Transitioning from READY.\n"); } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { mhi_log(MHI_MSG_INFO, "Transitioning from M1.\n"); } else { mhi_log(MHI_MSG_INFO, "MHI State %d link state %d. Quitting\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); } read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_state = MHI_STATE_M0; atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_assert_device_wake(mhi_dev_ctxt); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.mhi_initialized) { ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); } atomic_dec(&mhi_dev_ctxt->flags.data_pending); ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1); if (ret_val) mhi_log(MHI_MSG_CRITICAL, "Could not set bus frequency ret: %d\n", ret_val); mhi_dev_ctxt->flags.pending_M0 = 0; if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) { atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0); atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0); } wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (!mhi_dev_ctxt->flags.pending_M3 && mhi_dev_ctxt->flags.link_up && mhi_dev_ctxt->flags.mhi_initialized) mhi_deassert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Exited\n"); return MHI_STATUS_SUCCESS; }
static enum MHI_STATUS process_m1_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { unsigned long flags = 0; int ret_val = 0; int r = 0; mhi_log(MHI_MSG_INFO, "Processing M1 state transition from state %d\n", mhi_dev_ctxt->mhi_state); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (!mhi_dev_ctxt->flags.pending_M3) { mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n"); atomic_inc(&mhi_dev_ctxt->flags.m2_transition); mhi_dev_ctxt->mhi_state = MHI_STATE_M2; mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2); mhi_dev_ctxt->counters.m1_m2++; } write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ret_val = mhi_set_bus_request(mhi_dev_ctxt, 0); if (ret_val) mhi_log(MHI_MSG_INFO, "Failed to update bus request\n"); mhi_log(MHI_MSG_INFO, "Debouncing M2\n"); msleep(MHI_M2_DEBOUNCE_TMR_MS); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Pending acks %d\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) || mhi_dev_ctxt->flags.pending_M3) { mhi_assert_device_wake(mhi_dev_ctxt); } else { pm_runtime_mark_last_busy( &mhi_dev_ctxt->dev_info->plat_dev->dev); r = pm_request_autosuspend( &mhi_dev_ctxt->dev_info->plat_dev->dev); if (r) { mhi_log(MHI_MSG_ERROR, "Failed to remove counter ret %d\n", r); BUG_ON(mhi_dev_ctxt->dev_info-> plat_dev->dev.power.runtime_error); } } atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0); mhi_log(MHI_MSG_INFO, "M2 transition complete.\n"); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0); return MHI_STATUS_SUCCESS; }
enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt) { int r = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_assert_device_wake(mhi_dev_ctxt); mhi_dev_ctxt->flags.link_up = 1; r = mhi_set_bus_request(mhi_dev_ctxt, 1); if (r) mhi_log(MHI_MSG_INFO, "Failed to scale bus request to active set.\n"); ret_val = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to start state change event, to %d\n", mhi_dev_ctxt->base_state); } return ret_val; }
static enum MHI_STATUS mhi_process_link_down( struct mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r; mhi_log(MHI_MSG_INFO, "Entered.\n"); if (NULL == mhi_dev_ctxt) return MHI_STATUS_ERROR; write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.mhi_initialized = 0; mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; mhi_deassert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.stop_threads = 1; while (!mhi_dev_ctxt->flags.ev_thread_stopped) { wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); mhi_log(MHI_MSG_INFO, "Waiting for threads to SUSPEND EVT: %d, STT: %d\n", mhi_dev_ctxt->flags.st_thread_stopped, mhi_dev_ctxt->flags.ev_thread_stopped); msleep(20); } r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to scale bus request to sleep set.\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); mhi_dev_ctxt->dev_info->link_down_cntr++; atomic_set(&mhi_dev_ctxt->flags.data_pending, 0); mhi_log(MHI_MSG_INFO, "Exited.\n"); return MHI_STATUS_SUCCESS; }
int mhi_initiate_m3(mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r = 0; int abort_m3 = 0; mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, mhi_dev_ctxt->flags.pending_M3); mutex_lock(&mhi_dev_ctxt->pm_lock); switch (mhi_dev_ctxt->mhi_state) { case MHI_STATE_M1: case MHI_STATE_M2: mhi_log(MHI_MSG_INFO, "Triggering wake out of M2\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.pending_M3 = 1; mhi_assert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M1, msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); if (0 == r || -ERESTARTSYS == r) { mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "MDM failed to come out of M2.\n"); goto exit; } break; case MHI_STATE_M3: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); if (mhi_dev_ctxt->flags.link_up) r = -EPERM; else r = 0; goto exit; case MHI_STATE_RESET: mhi_log(MHI_MSG_INFO, "MHI in RESET turning link off and quitting\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; default: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); break; } while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "There are still %d acks pending from device\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); abort_m3 = 1; goto exit; } if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { abort_m3 = 1; goto exit; } r = hrtimer_cancel(&mhi_dev_ctxt->m1_timer); if (r) mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was active\n"); else mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was not active\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.pending_M0) { write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Pending M0 detected, aborting M3 procedure\n"); r = -EPERM; goto exit; } mhi_dev_ctxt->flags.pending_M3 = 1; atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "Waiting for M3 completion.\n"); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M3, msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); switch(r) { case 0: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "MDM failed to suspend after %d ms\n", MHI_MAX_SUSPEND_TIMEOUT); mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; r = -EAGAIN; goto exit; break; case -ERESTARTSYS: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Going Down...\n"); goto exit; break; default: mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "M3 completion received\n"); break; } mhi_deassert_device_wake(mhi_dev_ctxt); /* Turn off PCIe link*/ mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); exit: if (abort_m3) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ring_all_chan_dbs(mhi_dev_ctxt); atomic_dec(&mhi_dev_ctxt->flags.data_pending); r = -EAGAIN; if(atomic_read(&mhi_dev_ctxt->flags.cp_m1_state)) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_dev_ctxt->mhi_state = MHI_STATE_M2; mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL, MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, MHI_STATE_M2); mhi_dev_ctxt->counters.m1_m2++; write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); } } /* We have to be careful here, we are setting a pending_M3 to 0 * even if we did not set it above. This works since the only other * entity that sets this flag must also acquire the pm_lock */ atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_dev_ctxt->flags.pending_M3 = 0; mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; }
MHI_STATUS mhi_process_link_down(mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r; mhi_log(MHI_MSG_INFO, "Entered.\n"); if (NULL == mhi_dev_ctxt) return MHI_STATUS_ERROR; mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_DISABLED); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.mhi_initialized = 0; mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; mhi_deassert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = cancel_delayed_work_sync(&mhi_dev_ctxt->m3_work); if (r) { atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_log(MHI_MSG_INFO, "M3 work cancelled\n"); } r = cancel_work_sync(&mhi_dev_ctxt->m0_work); if (r) { atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0); mhi_log(MHI_MSG_INFO, "M0 work cancelled\n"); } mhi_dev_ctxt->flags.stop_threads = 1; while(!mhi_dev_ctxt->ev_thread_stopped) { wake_up_interruptible(mhi_dev_ctxt->event_handle); mhi_log(MHI_MSG_INFO, "Waiting for threads to SUSPEND EVT: %d, STT: %d\n", mhi_dev_ctxt->st_thread_stopped, mhi_dev_ctxt->ev_thread_stopped); msleep(20); } switch(hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) { case 0: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Timer was not active\n"); break; case 1: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Timer was active\n"); break; case -1: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Timer executing and can't stop\n"); } r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to scale bus request to sleep set.\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); mhi_dev_ctxt->dev_info->link_down_cntr++; atomic_set(&mhi_dev_ctxt->flags.data_pending, 0); mhi_log(MHI_MSG_INFO, "Exited.\n"); return MHI_STATUS_SUCCESS; }
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r = 0; int abort_m3 = 0; mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, mhi_dev_ctxt->flags.pending_M3); mutex_lock(&mhi_dev_ctxt->pm_lock); switch (mhi_dev_ctxt->mhi_state) { case MHI_STATE_RESET: mhi_log(MHI_MSG_INFO, "MHI in RESET turning link off and quitting\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; break; case MHI_STATE_M0: case MHI_STATE_M1: case MHI_STATE_M2: mhi_log(MHI_MSG_INFO, "Triggering wake out of M2\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.pending_M3 = 1; if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) { mhi_log(MHI_MSG_INFO, "M2 transition not set\n"); mhi_assert_device_wake(mhi_dev_ctxt); } write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = wait_event_interruptible_timeout( *mhi_dev_ctxt->mhi_ev_wq.m0_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M1, msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); if (0 == r || -ERESTARTSYS == r) { mhi_log(MHI_MSG_INFO, "MDM failed to come out of M2.\n"); r = -EAGAIN; goto exit; } break; case MHI_STATE_M3: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); if (mhi_dev_ctxt->flags.link_up) r = -EAGAIN; else r = 0; goto exit; default: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); break; } while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { mhi_log(MHI_MSG_INFO, "There are still %d acks pending from device\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); abort_m3 = 1; r = -EAGAIN; goto exit; } if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { abort_m3 = 1; r = -EAGAIN; goto exit; } write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.pending_M0) { write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = -EAGAIN; goto exit; } mhi_dev_ctxt->flags.pending_M3 = 1; mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Waiting for M3 completion.\n"); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M3, msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); switch (r) { case 0: mhi_log(MHI_MSG_CRITICAL, "MDM failed to suspend after %d ms\n", MHI_MAX_SUSPEND_TIMEOUT); mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; goto exit; break; default: mhi_log(MHI_MSG_INFO, "M3 completion received\n"); break; } mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); exit: if (abort_m3) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); atomic_dec(&mhi_dev_ctxt->flags.data_pending); mhi_deassert_device_wake(mhi_dev_ctxt); } mhi_dev_ctxt->flags.pending_M3 = 0; mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; }