MHI_STATUS process_AMSS_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { MHI_STATUS ret_val; u32 chan; unsigned long flags; mhi_chan_ctxt *chan_ctxt; mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); mhi_assert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ret_val = mhi_add_elements_to_event_rings(mhi_dev_ctxt, cur_work_item); if (MHI_STATUS_SUCCESS != ret_val) return MHI_STATUS_ERROR; for (chan = 0; chan <= MHI_MAX_CHANNELS; ++chan) { if (VALID_CHAN_NR(chan)) { chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; if (MHI_CHAN_STATE_ENABLED == chan_ctxt->mhi_chan_state) { mhi_log(MHI_MSG_INFO, "Starting Channel 0x%x \n", chan); ret_val = mhi_send_cmd(mhi_dev_ctxt, MHI_COMMAND_START_CHAN, chan); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to start chan0x%x,0x%x\n", chan, ret_val); return MHI_STATUS_ERROR; } else { atomic_inc( &mhi_dev_ctxt->start_cmd_pending_ack); } } } } mhi_log(MHI_MSG_INFO, "Waiting for cmd completions\n"); wait_event_interruptible(*mhi_dev_ctxt->chan_start_complete, atomic_read(&mhi_dev_ctxt->start_cmd_pending_ack) == 0); if (0 == mhi_dev_ctxt->flags.mhi_initialized) { mhi_dev_ctxt->flags.mhi_initialized = 1; ret_val = mhi_set_state_of_all_channels(mhi_dev_ctxt, MHI_CHAN_STATE_RUNNING); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to set local chan state\n"); if (!mhi_dev_ctxt->flags.mhi_clients_probed) { ret_val = probe_clients(mhi_dev_ctxt, cur_work_item); if (ret_val != MHI_STATUS_SUCCESS) mhi_log(MHI_MSG_CRITICAL, "Failed to probe MHI CORE clients.\n"); mhi_dev_ctxt->flags.mhi_clients_probed = 1; ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); } else { ring_all_chan_dbs(mhi_dev_ctxt); mhi_log(MHI_MSG_CRITICAL, "Notifying clients that MHI is enabled\n"); mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_ENABLED); } if (ret_val != MHI_STATUS_SUCCESS) mhi_log(MHI_MSG_CRITICAL, "Failed to probe MHI CORE clients, ret 0x%x \n", ret_val); } atomic_dec(&mhi_dev_ctxt->flags.data_pending); mhi_log(MHI_MSG_INFO, "Exited\n"); return MHI_STATUS_SUCCESS; }
MHI_STATUS mhi_process_link_down(mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r; mhi_log(MHI_MSG_INFO, "Entered.\n"); if (NULL == mhi_dev_ctxt) return MHI_STATUS_ERROR; mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_DISABLED); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.mhi_initialized = 0; mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; mhi_deassert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = cancel_delayed_work_sync(&mhi_dev_ctxt->m3_work); if (r) { atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_log(MHI_MSG_INFO, "M3 work cancelled\n"); } r = cancel_work_sync(&mhi_dev_ctxt->m0_work); if (r) { atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0); mhi_log(MHI_MSG_INFO, "M0 work cancelled\n"); } mhi_dev_ctxt->flags.stop_threads = 1; while(!mhi_dev_ctxt->ev_thread_stopped) { wake_up_interruptible(mhi_dev_ctxt->event_handle); mhi_log(MHI_MSG_INFO, "Waiting for threads to SUSPEND EVT: %d, STT: %d\n", mhi_dev_ctxt->st_thread_stopped, mhi_dev_ctxt->ev_thread_stopped); msleep(20); } switch(hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) { case 0: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Timer was not active\n"); break; case 1: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Timer was active\n"); break; case -1: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Timer executing and can't stop\n"); } r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to scale bus request to sleep set.\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); mhi_dev_ctxt->dev_info->link_down_cntr++; atomic_set(&mhi_dev_ctxt->flags.data_pending, 0); mhi_log(MHI_MSG_INFO, "Exited.\n"); return MHI_STATUS_SUCCESS; }
static int mhi_ssr_notify_cb(struct notifier_block *nb, unsigned long action, void *data) { int ret_val = 0; struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_devices.device_list[0].mhi_ctxt; struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; if (NULL != mhi_dev_ctxt) mhi_dev_ctxt->esoc_notif = action; switch (action) { case SUBSYS_BEFORE_POWERUP: mhi_log(MHI_MSG_INFO, "Received Subsystem event BEFORE_POWERUP\n"); atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); break; case SUBSYS_AFTER_POWERUP: mhi_log(MHI_MSG_INFO, "Received Subsystem event AFTER_POWERUP\n"); break; case SUBSYS_POWERUP_FAILURE: mhi_log(MHI_MSG_INFO, "Received Subsystem event POWERUP_FAILURE\n"); break; case SUBSYS_BEFORE_SHUTDOWN: mhi_log(MHI_MSG_INFO, "Received Subsystem event BEFORE_SHUTDOWN\n"); atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 1); mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_DISABLED); break; case SUBSYS_AFTER_SHUTDOWN: mhi_log(MHI_MSG_INFO, "Received Subsystem event AFTER_SHUTDOWN\n"); ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_LINK_DOWN); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to init state transition, to %d\n", STATE_TRANSITION_LINK_DOWN); } break; case SUBSYS_RAMDUMP_NOTIFICATION: mhi_log(MHI_MSG_INFO, "Received Subsystem event RAMDUMP\n"); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); break; default: mhi_log(MHI_MSG_INFO, "Received ESOC notifcation %d, NOT handling\n", (int)action); break; } return NOTIFY_OK; }