static enum MHI_STATUS process_reset_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { u32 i = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; unsigned long flags = 0; mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->counters.mhi_reset_cntr++; mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL; ret_val = mhi_test_for_device_reset(mhi_dev_ctxt); ret_val = mhi_test_for_device_ready(mhi_dev_ctxt); switch (ret_val) { case MHI_STATUS_SUCCESS: break; case MHI_STATUS_LINK_DOWN: mhi_log(MHI_MSG_CRITICAL, "Link down detected\n"); break; case MHI_STATUS_DEVICE_NOT_READY: ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_RESET); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans\n", STATE_TRANSITION_RESET); break; default: mhi_log(MHI_MSG_CRITICAL, "Unexpected ret code detected for\n"); break; } for (i = 0; i < NR_OF_CMD_RINGS; ++i) { mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i]. mhi_cmd_ring_read_ptr = virt_to_dma(NULL, mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp); } for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) mhi_reset_ev_ctxt(mhi_dev_ctxt, i); for (i = 0; i < MHI_MAX_CHANNELS; ++i) { if (VALID_CHAN_NR(i)) mhi_reset_chan_ctxt(mhi_dev_ctxt, i); } ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_READY); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans\n", STATE_TRANSITION_READY); return ret_val; }
void mhi_link_state_cb(struct msm_pcie_notify *notify) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; struct mhi_pcie_dev_info *mhi_pcie_dev = notify->data; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; if (NULL == notify || NULL == notify->data) { mhi_log(MHI_MSG_CRITICAL, "Incomplete handle received\n"); return; } mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; switch (notify->event) { case MSM_PCIE_EVENT_LINKDOWN: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKDOWN\n"); break; case MSM_PCIE_EVENT_LINKUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKUP\n"); if (0 == mhi_pcie_dev->link_up_cntr) { mhi_log(MHI_MSG_INFO, "Initializing MHI for the first time\n"); mhi_ctxt_init(mhi_pcie_dev); mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; mhi_pcie_dev->mhi_ctxt.flags.link_up = 1; pci_set_master(mhi_pcie_dev->pcie_device); init_mhi_base_state(mhi_dev_ctxt); } else { mhi_log(MHI_MSG_INFO, "Received Link Up Callback\n"); } mhi_pcie_dev->link_up_cntr++; break; case MSM_PCIE_EVENT_WAKEUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_WAKE\n"); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { mhi_log(MHI_MSG_INFO, "There is a pending resume, doing nothing.\n"); return; } ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_WAKE); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to init state transition, to %d\n", STATE_TRANSITION_WAKE); } break; default: mhi_log(MHI_MSG_INFO, "Received bad link event\n"); return; break; } }
MHI_STATUS process_SYSERR_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n"); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n"); return MHI_STATUS_ERROR; } mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; if (MHI_STATUS_SUCCESS != mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_RESET)) mhi_log(MHI_MSG_ERROR, "Failed to init state transition to RESET.\n"); return ret_val; }
enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt) { int r = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_assert_device_wake(mhi_dev_ctxt); mhi_dev_ctxt->flags.link_up = 1; r = mhi_set_bus_request(mhi_dev_ctxt, 1); if (r) mhi_log(MHI_MSG_INFO, "Failed to scale bus request to active set.\n"); ret_val = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to start state change event, to %d\n", mhi_dev_ctxt->base_state); } return ret_val; }
enum MHI_STATUS mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt) { enum MHI_STATUS ret_val; unsigned long flags = 0; mhi_log(MHI_MSG_INFO, "Entered\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR; write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n"); mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET); mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n"); ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_RESET); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans ret %d\n", STATE_TRANSITION_RESET, ret_val); mhi_log(MHI_MSG_INFO, "Exiting\n"); return ret_val; }
MHI_STATUS mhi_process_event_ring(mhi_device_ctxt *mhi_dev_ctxt, u32 ev_index, u32 event_quota) { mhi_event_pkt *local_rp = NULL; mhi_event_pkt *device_rp = NULL; mhi_event_pkt event_to_process; mhi_event_ctxt *ev_ctxt = NULL; mhi_ring *local_ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index]; ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_index]; device_rp = (mhi_event_pkt *)mhi_p2v_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, ev_ctxt->mhi_event_read_ptr); local_rp = (mhi_event_pkt *)local_ev_ctxt->rp; if (unlikely(MHI_STATUS_SUCCESS != validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp))) mhi_log(MHI_MSG_ERROR, "Failed to validate event ring element 0x%p\n", device_rp); while ((local_rp != device_rp) && (event_quota > 0) && (device_rp != NULL) && (local_rp != NULL)) { event_to_process = *local_rp; if (unlikely(MHI_STATUS_SUCCESS != recycle_trb_and_ring(mhi_dev_ctxt, local_ev_ctxt, MHI_RING_TYPE_EVENT_RING, ev_index))) mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n"); switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) { case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: mhi_log(MHI_MSG_INFO, "MHI CCE received ring 0x%x\n", ev_index); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); parse_cmd_event(mhi_dev_ctxt, &event_to_process); break; case MHI_PKT_TYPE_TX_EVENT: { u32 chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process); if (((MHI_EV_READ_CODE(EV_TRB_CODE, &event_to_process) == MHI_EVENT_CC_OOB) || (MHI_EV_READ_CODE(EV_TRB_CODE, &event_to_process) == MHI_EVENT_CC_DB_MODE)) && (chan == MHI_CLIENT_IP_HW_0_OUT) && (mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp)) { mhi_log(MHI_MSG_VERBOSE, "Empty OOB chan %d\n", chan); parse_xfer_event(mhi_dev_ctxt, &event_to_process); } else { __pm_stay_awake(&mhi_dev_ctxt->wake_lock); parse_xfer_event(mhi_dev_ctxt, &event_to_process); __pm_relax(&mhi_dev_ctxt->wake_lock); } } break; case MHI_PKT_TYPE_STATE_CHANGE_EVENT: { STATE_TRANSITION new_state; new_state = MHI_READ_STATE(&event_to_process); mhi_log(MHI_MSG_INFO, "MHI STE received ring 0x%x\n", ev_index); mhi_init_state_transition(mhi_dev_ctxt, new_state); break; } case MHI_PKT_TYPE_EE_EVENT: { STATE_TRANSITION new_state; mhi_log(MHI_MSG_INFO, "MHI EEE received ring 0x%x\n", ev_index); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); switch(MHI_READ_EXEC_ENV(&event_to_process)) { case MHI_EXEC_ENV_SBL: new_state = STATE_TRANSITION_SBL; mhi_init_state_transition(mhi_dev_ctxt, new_state); break; case MHI_EXEC_ENV_AMSS: new_state = STATE_TRANSITION_AMSS; mhi_init_state_transition(mhi_dev_ctxt, new_state); break; } break; } default: mhi_log(MHI_MSG_ERROR, "Unsupported packet type code 0x%x\n", MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)); break; } local_rp = (mhi_event_pkt *)local_ev_ctxt->rp; device_rp = (mhi_event_pkt *)mhi_p2v_addr( mhi_dev_ctxt->mhi_ctrl_seg_info, (u64)ev_ctxt->mhi_event_read_ptr); --event_quota; } return MHI_STATUS_SUCCESS; }
MHI_STATUS process_RESET_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { u32 i = 0; u32 ev_ring_index; MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n"); mhi_dev_ctxt->counters.mhi_reset_cntr++; if (mhi_dev_ctxt->counters.mhi_reset_cntr >= 10) { panic("CP Crash: need CP dump"); } ret_val = mhi_test_for_device_ready(mhi_dev_ctxt); switch (ret_val) { case MHI_STATUS_SUCCESS: mhi_dev_ctxt->counters.mhi_reset_cntr = 0; mhi_dev_ctxt->mhi_state = MHI_STATE_READY; ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_READY); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans\n", STATE_TRANSITION_READY); break; case MHI_STATUS_LINK_DOWN: mhi_log(MHI_MSG_CRITICAL, "Link down detected\n"); break; case MHI_STATUS_DEVICE_NOT_READY: ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_RESET); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to initiate 0x%x state trans\n", STATE_TRANSITION_RESET); break; default: mhi_log(MHI_MSG_CRITICAL, "Unexpected ret code detected for\n"); break; } /* Synchronise the local rp/wp with the ctxt rp/wp This will enable the device to pick up exactly where it left off, should this be an SSR recovery */ for (i = 0; i < NR_OF_CMD_RINGS; ++i) { mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i].mhi_cmd_ring_read_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp); } for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { ev_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_ring_index].mhi_event_read_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[ev_ring_index].rp); } for (i = 0; i < MHI_MAX_CHANNELS; ++i) { if (VALID_CHAN_NR(i)) { mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].mhi_trb_read_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[i].rp); mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].mhi_trb_write_ptr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[i].wp); } } /* reset outbound_ack count */ atomic_set(&mhi_dev_ctxt->counters.outbound_acks, 0); return ret_val; }
static int mhi_ssr_notify_cb(struct notifier_block *nb, unsigned long action, void *data) { int ret_val = 0; struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_devices.device_list[0].mhi_ctxt; struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; if (NULL != mhi_dev_ctxt) mhi_dev_ctxt->esoc_notif = action; switch (action) { case SUBSYS_BEFORE_POWERUP: mhi_log(MHI_MSG_INFO, "Received Subsystem event BEFORE_POWERUP\n"); atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); break; case SUBSYS_AFTER_POWERUP: mhi_log(MHI_MSG_INFO, "Received Subsystem event AFTER_POWERUP\n"); break; case SUBSYS_POWERUP_FAILURE: mhi_log(MHI_MSG_INFO, "Received Subsystem event POWERUP_FAILURE\n"); break; case SUBSYS_BEFORE_SHUTDOWN: mhi_log(MHI_MSG_INFO, "Received Subsystem event BEFORE_SHUTDOWN\n"); atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 1); mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_DISABLED); break; case SUBSYS_AFTER_SHUTDOWN: mhi_log(MHI_MSG_INFO, "Received Subsystem event AFTER_SHUTDOWN\n"); ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_LINK_DOWN); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to init state transition, to %d\n", STATE_TRANSITION_LINK_DOWN); } break; case SUBSYS_RAMDUMP_NOTIFICATION: mhi_log(MHI_MSG_INFO, "Received Subsystem event RAMDUMP\n"); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); break; default: mhi_log(MHI_MSG_INFO, "Received ESOC notifcation %d, NOT handling\n", (int)action); break; } return NOTIFY_OK; }
static ssize_t bhi_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; u32 pcie_word_val = 0; u32 i = 0; struct bhi_ctxt_t *bhi_ctxt = &(((struct mhi_pcie_dev_info *)file->private_data)->bhi_ctxt); struct mhi_device_ctxt *mhi_dev_ctxt = &((struct mhi_pcie_dev_info *)file->private_data)->mhi_ctxt; size_t amount_copied = 0; uintptr_t align_len = 0x1000; u32 tx_db_val = 0; if (buf == NULL || 0 == count) return -EIO; if (count > BHI_MAX_IMAGE_SIZE) return -ENOMEM; wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event, mhi_dev_ctxt->mhi_state == MHI_STATE_BHI); mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count); bhi_ctxt->unaligned_image_loc = kmalloc(count + (align_len - 1), GFP_KERNEL); if (bhi_ctxt->unaligned_image_loc == NULL) return -ENOMEM; mhi_log(MHI_MSG_INFO, "Unaligned Img Loc: %p\n", bhi_ctxt->unaligned_image_loc); bhi_ctxt->image_loc = (void *)((uintptr_t)bhi_ctxt->unaligned_image_loc + (align_len - (((uintptr_t)bhi_ctxt->unaligned_image_loc) % align_len))); mhi_log(MHI_MSG_INFO, "Aligned Img Loc: %p\n", bhi_ctxt->image_loc); bhi_ctxt->image_size = count; if (0 != copy_from_user(bhi_ctxt->image_loc, buf, count)) { ret_val = -ENOMEM; goto bhi_copy_error; } amount_copied = count; /* Flush the writes, in anticipation for a device read */ wmb(); mhi_log(MHI_MSG_INFO, "Copied image from user at addr: %p\n", bhi_ctxt->image_loc); bhi_ctxt->phy_image_loc = dma_map_single( &mhi_dev_ctxt->dev_info->plat_dev->dev, bhi_ctxt->image_loc, bhi_ctxt->image_size, DMA_TO_DEVICE); if (dma_mapping_error(NULL, bhi_ctxt->phy_image_loc)) { ret_val = -EIO; goto bhi_copy_error; } mhi_log(MHI_MSG_INFO, "Mapped image to DMA addr 0x%lx:\n", (uintptr_t)bhi_ctxt->phy_image_loc); bhi_ctxt->image_size = count; /* Write the image size */ pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc); mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGADDR_HIGH, 0xFFFFFFFF, 0, pcie_word_val); pcie_word_val = LOW_WORD(bhi_ctxt->phy_image_loc); mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGADDR_LOW, 0xFFFFFFFF, 0, pcie_word_val); pcie_word_val = bhi_ctxt->image_size; mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGSIZE, 0xFFFFFFFF, 0, pcie_word_val); pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHI_IMGTXDB); mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val); mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0); for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) { tx_db_val = mhi_reg_read_field(bhi_ctxt->bhi_base, BHI_STATUS, BHI_STATUS_MASK, BHI_STATUS_SHIFT); mhi_log(MHI_MSG_CRITICAL, "BHI STATUS 0x%x\n", tx_db_val); if (BHI_STATUS_SUCCESS != tx_db_val) mhi_log(MHI_MSG_CRITICAL, "Incorrect BHI status: %d retry: %d\n", tx_db_val, i); else break; usleep_range(20000, 25000); } dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev, bhi_ctxt->phy_image_loc, bhi_ctxt->image_size, DMA_TO_DEVICE); kfree(bhi_ctxt->unaligned_image_loc); ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_RESET); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to start state change event\n"); } return amount_copied; bhi_copy_error: kfree(bhi_ctxt->unaligned_image_loc); return amount_copied; }