MHI_STATUS process_M0_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { unsigned long flags; int ret_val; mhi_log(MHI_MSG_INFO, "Entered\n"); ret_val = cancel_delayed_work(&mhi_dev_ctxt->m3_work); if (ret_val) { atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_log(MHI_MSG_INFO, "M3 work was cancelled\n"); } else { mhi_log(MHI_MSG_INFO, "M3 work NOT cancelled, either running or never started\n"); } /* Wait for mhi_dev_ctxt transition to M0 */ /* Proxy vote to prevent M3 while we are ringing DBs */ if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { mhi_dev_ctxt->counters.m2_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_dev_ctxt->counters.m3_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { } else { mhi_log(MHI_MSG_INFO, "MHI State %d link state %d. Quitting\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); goto exit; } read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_state = MHI_STATE_M0; atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_assert_device_wake(mhi_dev_ctxt); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.mhi_initialized) { /* Bump up the vote for pending data */ ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); } atomic_dec(&mhi_dev_ctxt->flags.data_pending); ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1); if (ret_val) mhi_log(MHI_MSG_CRITICAL, "Could not set bus frequency ret: %d\n", ret_val); mhi_dev_ctxt->flags.pending_M0 = 0; atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); wake_up_interruptible(mhi_dev_ctxt->M0_event); ret_val = hrtimer_start(&mhi_dev_ctxt->m1_timer, mhi_dev_ctxt->m1_timeout, HRTIMER_MODE_REL); mhi_log(MHI_MSG_VERBOSE, "Starting M1 timer, ret %d\n", ret_val); exit: mhi_log(MHI_MSG_INFO, "Exited\n"); return MHI_STATUS_SUCCESS; }
static enum MHI_STATUS process_m0_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { unsigned long flags; int ret_val; mhi_log(MHI_MSG_INFO, "Entered\n"); if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { mhi_dev_ctxt->counters.m2_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { mhi_dev_ctxt->counters.m3_m0++; } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { mhi_log(MHI_MSG_INFO, "Transitioning from READY.\n"); } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { mhi_log(MHI_MSG_INFO, "Transitioning from M1.\n"); } else { mhi_log(MHI_MSG_INFO, "MHI State %d link state %d. Quitting\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); } read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_state = MHI_STATE_M0; atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_assert_device_wake(mhi_dev_ctxt); read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.mhi_initialized) { ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); } atomic_dec(&mhi_dev_ctxt->flags.data_pending); ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1); if (ret_val) mhi_log(MHI_MSG_CRITICAL, "Could not set bus frequency ret: %d\n", ret_val); mhi_dev_ctxt->flags.pending_M0 = 0; if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) { atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0); atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0); } wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (!mhi_dev_ctxt->flags.pending_M3 && mhi_dev_ctxt->flags.link_up && mhi_dev_ctxt->flags.mhi_initialized) mhi_deassert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Exited\n"); return MHI_STATUS_SUCCESS; }
MHI_STATUS process_AMSS_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { MHI_STATUS ret_val; u32 chan; unsigned long flags; mhi_chan_ctxt *chan_ctxt; mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_assert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ret_val = mhi_add_elements_to_event_rings(mhi_dev_ctxt, cur_work_item); if (MHI_STATUS_SUCCESS != ret_val) return MHI_STATUS_ERROR; for (chan = 0; chan <= MHI_MAX_CHANNELS; ++chan) { if (VALID_CHAN_NR(chan)) { chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; if (MHI_CHAN_STATE_ENABLED == chan_ctxt->mhi_chan_state) { mhi_log(MHI_MSG_INFO, "Starting Channel 0x%x \n", chan); ret_val = mhi_send_cmd(mhi_dev_ctxt, MHI_COMMAND_START_CHAN, chan); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to start chan0x%x,0x%x\n", chan, ret_val); return MHI_STATUS_ERROR; } else { atomic_inc( &mhi_dev_ctxt->start_cmd_pending_ack); } } } } mhi_log(MHI_MSG_INFO, "Waiting for cmd completions\n"); wait_event_interruptible(*mhi_dev_ctxt->chan_start_complete, atomic_read(&mhi_dev_ctxt->start_cmd_pending_ack) == 0); if (0 == mhi_dev_ctxt->flags.mhi_initialized) { mhi_dev_ctxt->flags.mhi_initialized = 1; ret_val = mhi_set_state_of_all_channels(mhi_dev_ctxt, MHI_CHAN_STATE_RUNNING); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to set local chan state\n"); if (!mhi_dev_ctxt->flags.mhi_clients_probed) { ret_val = probe_clients(mhi_dev_ctxt, cur_work_item); if (ret_val != MHI_STATUS_SUCCESS) mhi_log(MHI_MSG_CRITICAL, "Failed to probe MHI CORE clients.\n"); mhi_dev_ctxt->flags.mhi_clients_probed = 1; ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); } else { ring_all_chan_dbs(mhi_dev_ctxt); mhi_log(MHI_MSG_CRITICAL, "Notifying clients that MHI is enabled\n"); mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_ENABLED); } if (ret_val != MHI_STATUS_SUCCESS) mhi_log(MHI_MSG_CRITICAL, "Failed to probe MHI CORE clients, ret 0x%x \n", ret_val); } atomic_dec(&mhi_dev_ctxt->flags.data_pending); mhi_log(MHI_MSG_INFO, "Exited\n"); return MHI_STATUS_SUCCESS; }