int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); queue_up_suspend_work(); #ifndef CONFIG_PM_SYNC_BEFORE_SUSPEND printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); printk("done.\n"); #endif } else { pm_wakep_autosleep_enabled(false); } mutex_unlock(&autosleep_lock); return 0; }
int __ref pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); queue_up_suspend_work(); } else { pm_wakep_autosleep_enabled(false); } if (IS_ENABLED(CONFIG_PM_EARLYSUSPEND)) pm_request_early_suspend_state(state); mutex_unlock(&autosleep_lock); return 0; }
int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); zw_queue_up_suspend_work(state); queue_up_suspend_work(); } else { pm_wakep_autosleep_enabled(false); } mutex_unlock(&autosleep_lock); return 0; }
static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id) { int i; struct gpio_kp *kp = dev_id; struct gpio_event_matrix_info *mi = kp->keypad_info; unsigned gpio_keypad_flags = mi->flags; if (!kp->use_irq) { /* ignore interrupt while registering the handler */ kp->disabled_irq = 1; disable_irq_nosync(irq_in); return IRQ_HANDLED; } for (i = 0; i < mi->ninputs; i++) disable_irq_nosync(gpio_to_irq(mi->input_gpios[i])); for (i = 0; i < mi->noutputs; i++) { if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) gpio_set_value(mi->output_gpios[i], !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH)); else gpio_direction_input(mi->output_gpios[i]); } __pm_stay_awake(&kp->wakeup_source); hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); return IRQ_HANDLED; }
int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); queue_up_suspend_work(); #ifdef CONFIG_POWERSUSPEND // Yank555.lu : add hook to handle powersuspend tasks (sleep) set_power_suspend_state_autosleep_hook(POWER_SUSPEND_ACTIVE); #endif } else { pm_wakep_autosleep_enabled(false); #ifdef CONFIG_POWERSUSPEND // Yank555.lu : add hook to handle powersuspend tasks (wakeup) set_power_suspend_state_autosleep_hook(POWER_SUSPEND_INACTIVE); #endif } mutex_unlock(&autosleep_lock); return 0; }
void mhi_link_state_cb(struct msm_pcie_notify *notify) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; struct mhi_pcie_dev_info *mhi_pcie_dev = notify->data; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; if (NULL == notify || NULL == notify->data) { mhi_log(MHI_MSG_CRITICAL, "Incomplete handle received\n"); return; } mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; switch (notify->event) { case MSM_PCIE_EVENT_LINKDOWN: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKDOWN\n"); break; case MSM_PCIE_EVENT_LINKUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKUP\n"); if (0 == mhi_pcie_dev->link_up_cntr) { mhi_log(MHI_MSG_INFO, "Initializing MHI for the first time\n"); mhi_ctxt_init(mhi_pcie_dev); mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; mhi_pcie_dev->mhi_ctxt.flags.link_up = 1; pci_set_master(mhi_pcie_dev->pcie_device); init_mhi_base_state(mhi_dev_ctxt); } else { mhi_log(MHI_MSG_INFO, "Received Link Up Callback\n"); } mhi_pcie_dev->link_up_cntr++; break; case MSM_PCIE_EVENT_WAKEUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_WAKE\n"); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { mhi_log(MHI_MSG_INFO, "There is a pending resume, doing nothing.\n"); return; } ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_WAKE); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to init state transition, to %d\n", STATE_TRANSITION_WAKE); } break; default: mhi_log(MHI_MSG_INFO, "Received bad link event\n"); return; break; } }
INT32 wmt_plat_wake_lock_ctrl(ENUM_WL_OP opId) { #ifdef CFG_WMT_WAKELOCK_SUPPORT static INT32 counter; INT32 status; INT32 ret = 0; ret = mutex_lock_killable(&gOsSLock); if (ret) { WMT_PLAT_ERR_FUNC("--->lock gOsSLock failed, ret=%d\n", ret); return ret; } if (WL_OP_GET == opId) ++counter; else if (WL_OP_PUT == opId) --counter; mutex_unlock(&gOsSLock); if (WL_OP_GET == opId && counter == 1) { #ifdef CONFIG_PM_WAKELOCKS __pm_stay_awake(&wmtWakeLock); status = wmtWakeLock.active; #else wake_lock(&wmtWakeLock); status = wake_lock_active(&wmtWakeLock); #endif WMT_PLAT_DBG_FUNC("WMT-PLAT: after wake_lock(%d), counter(%d)\n", status, counter); } else if (WL_OP_PUT == opId && counter == 0) { #ifdef CONFIG_PM_WAKELOCKS __pm_relax(&wmtWakeLock); status = wmtWakeLock.active; #else wake_unlock(&wmtWakeLock); status = wake_lock_active(&wmtWakeLock); #endif WMT_PLAT_DBG_FUNC("WMT-PLAT: after wake_unlock(%d), counter(%d)\n", status, counter); } else { #ifdef CONFIG_PM_WAKELOCKS status = wmtWakeLock.active; #else status = wake_lock_active(&wmtWakeLock); #endif WMT_PLAT_WARN_FUNC("WMT-PLAT: wakelock status(%d), counter(%d)\n", status, counter); } return 0; #else WMT_PLAT_WARN_FUNC("WMT-PLAT: host awake function is not supported.\n"); return 0; #endif }
int pm_autosleep_set_state(suspend_state_t state) { #ifdef CONFIG_SEC_GPIO_DVS static bool gpio_init_done = false; #endif #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif #ifdef CONFIG_SEC_GPIO_DVS /************************ Caution !!! ****************************/ /* This function must be located in appropriate INIT position * in accordance with the specification of each BB vendor. */ /************************ Caution !!! ****************************/ if (unlikely(!gpio_init_done) && state==PM_SUSPEND_ON) { gpio_dvs_check_initgpio(); gpio_init_done = true; } #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); #ifdef CONFIG_SEC_PM_DEBUG wakeup_sources_stats_active(); #endif if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); #ifdef CONFIG_ADAPTIVE_KSM AKSM_suspend(); #endif queue_up_suspend_work(); } else { #ifdef CONFIG_ADAPTIVE_KSM AKSM_resume(); #endif pm_wakep_autosleep_enabled(false); } mutex_unlock(&autosleep_lock); return 0; }
static irqreturn_t bcm2079x_dev_irq_handler(int irq, void *dev_id) { struct bcm2079x_dev *bcm2079x_dev = dev_id; unsigned long flags; DBG2(dev_info(&bcm2079x_dev->client->dev, "irq go high\n")); spin_lock_irqsave(&bcm2079x_dev->irq_enabled_lock, flags); bcm2079x_dev->count_irq++; __pm_stay_awake(bcm2079x_dev->host_wake_ws); mod_timer(&bcm2079x_dev->wake_timer, jiffies + msecs_to_jiffies(500)); spin_unlock_irqrestore(&bcm2079x_dev->irq_enabled_lock, flags); wake_up(&bcm2079x_dev->read_wq); #ifdef CONFIG_HAS_WAKELOCK wake_lock(&nfc_soft_wake_lock); #endif return IRQ_HANDLED; }
static enum MHI_STATUS process_wake_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; int r = 0; mhi_log(MHI_MSG_INFO, "Entered\n"); __pm_stay_awake(&mhi_dev_ctxt->w_lock); if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) { mhi_log(MHI_MSG_CRITICAL, "Pending SSR, Ignoring.\n"); goto exit; } if (mhi_dev_ctxt->flags.mhi_initialized) { r = pm_request_resume(&mhi_dev_ctxt->dev_info->plat_dev->dev); mhi_log(MHI_MSG_VERBOSE, "MHI is initialized, transitioning to M0, ret %d\n", r); } if (!mhi_dev_ctxt->flags.mhi_initialized) { mhi_log(MHI_MSG_INFO, "MHI is not initialized transitioning to base.\n"); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); } exit: __pm_relax(&mhi_dev_ctxt->w_lock); mhi_log(MHI_MSG_INFO, "Exited.\n"); return ret_val; }
int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); //[+++]Debug for active wakelock before entering suspend g_resume_status = false; //Add a timer to trigger wakelock debug pr_info("[PM]unattended_timer: mod_timer (auto_sleep)\n"); mod_timer(&unattended_timer, jiffies + msecs_to_jiffies(PM_UNATTENDED_TIMEOUT)); //[---]Debug for active wakelock before entering suspend queue_up_suspend_work(); } else { pm_wakep_autosleep_enabled(false); //[+++]Debug for active wakelock before entering suspend //Add a timer to trigger wakelock debug pr_info("[PM]unattended_timer: del_timer (late_resume)\n"); del_timer(&unattended_timer); //[---]Debug for active wakelock before entering suspend } mutex_unlock(&autosleep_lock); return 0; }
MHI_STATUS process_WAKE_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_log(MHI_MSG_INFO, "Entered\n"); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); ret_val = mhi_turn_on_pcie_link(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to turn on PCIe link.\n"); goto exit; } mhi_dev_ctxt->flags.stop_threads = 0; if (mhi_dev_ctxt->flags.mhi_initialized && mhi_dev_ctxt->flags.link_up) { mhi_log(MHI_MSG_CRITICAL, "MHI is initialized, transitioning to M0.\n"); mhi_initiate_m0(mhi_dev_ctxt); } if (!mhi_dev_ctxt->flags.mhi_initialized) { mhi_log(MHI_MSG_CRITICAL, "MHI is not initialized transitioning to base.\n"); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); } exit: mhi_log(MHI_MSG_INFO, "Exited.\n"); __pm_relax(&mhi_dev_ctxt->wake_lock); return ret_val; }
static void smd_tty_notify(void *priv, unsigned event) { struct smd_tty_info *info = priv; struct tty_struct *tty; unsigned long flags; switch (event) { case SMD_EVENT_DATA: spin_lock_irqsave(&info->reset_lock_lha2, flags); if (!info->is_open) { spin_unlock_irqrestore(&info->reset_lock_lha2, flags); break; } spin_unlock_irqrestore(&info->reset_lock_lha2, flags); /* There may be clients (tty framework) that are blocked * waiting for space to write data, so if a possible read * interrupt came in wake anyone waiting and disable the * interrupts */ if (smd_write_avail(info->ch)) { smd_disable_read_intr(info->ch); tty = tty_port_tty_get(&info->port); if (tty) wake_up_interruptible(&tty->write_wait); tty_kref_put(tty); } spin_lock_irqsave(&info->ra_lock_lha3, flags); if (smd_read_avail(info->ch)) { __pm_stay_awake(&info->ra_wakeup_source); tasklet_hi_schedule(&info->tty_tsklt); } spin_unlock_irqrestore(&info->ra_lock_lha3, flags); break; case SMD_EVENT_OPEN: tty = tty_port_tty_get(&info->port); spin_lock_irqsave(&info->reset_lock_lha2, flags); if (tty) clear_bit(TTY_OTHER_CLOSED, &tty->flags); info->in_reset = 0; info->in_reset_updated = 1; info->is_open = 1; wake_up_interruptible(&info->ch_opened_wait_queue); spin_unlock_irqrestore(&info->reset_lock_lha2, flags); tty_kref_put(tty); break; case SMD_EVENT_CLOSE: spin_lock_irqsave(&info->reset_lock_lha2, flags); info->in_reset = 1; info->in_reset_updated = 1; info->is_open = 0; wake_up_interruptible(&info->ch_opened_wait_queue); spin_unlock_irqrestore(&info->reset_lock_lha2, flags); tty = tty_port_tty_get(&info->port); if (tty) { /* send TTY_BREAK through read tasklet */ set_bit(TTY_OTHER_CLOSED, &tty->flags); tasklet_hi_schedule(&info->tty_tsklt); if (tty->index == LOOPBACK_IDX) schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000)); } tty_kref_put(tty); break; #ifdef CONFIG_LGE_USES_SMD_DS_TTY /* */ case SMD_EVENT_REOPEN_READY: /* smd channel is closed completely */ spin_lock_irqsave(&info->reset_lock_lha2, flags); info->in_reset = 1; info->in_reset_updated = 1; info->is_open = 0; wake_up_interruptible(&info->ch_opened_wait_queue); spin_unlock_irqrestore(&info->reset_lock_lha2, flags); break; #endif } }
MHI_STATUS mhi_process_event_ring(mhi_device_ctxt *mhi_dev_ctxt, u32 ev_index, u32 event_quota) { mhi_event_pkt *local_rp = NULL; mhi_event_pkt *device_rp = NULL; mhi_event_pkt event_to_process; mhi_event_ctxt *ev_ctxt = NULL; mhi_ring *local_ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index]; ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_index]; device_rp = (mhi_event_pkt *)mhi_p2v_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, ev_ctxt->mhi_event_read_ptr); local_rp = (mhi_event_pkt *)local_ev_ctxt->rp; if (unlikely(MHI_STATUS_SUCCESS != validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp))) mhi_log(MHI_MSG_ERROR, "Failed to validate event ring element 0x%p\n", device_rp); while ((local_rp != device_rp) && (event_quota > 0) && (device_rp != NULL) && (local_rp != NULL)) { event_to_process = *local_rp; if (unlikely(MHI_STATUS_SUCCESS != recycle_trb_and_ring(mhi_dev_ctxt, local_ev_ctxt, MHI_RING_TYPE_EVENT_RING, ev_index))) mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n"); switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) { case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: mhi_log(MHI_MSG_INFO, "MHI CCE received ring 0x%x\n", ev_index); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); parse_cmd_event(mhi_dev_ctxt, &event_to_process); break; case MHI_PKT_TYPE_TX_EVENT: { u32 chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process); if (((MHI_EV_READ_CODE(EV_TRB_CODE, &event_to_process) == MHI_EVENT_CC_OOB) || (MHI_EV_READ_CODE(EV_TRB_CODE, &event_to_process) == MHI_EVENT_CC_DB_MODE)) && (chan == MHI_CLIENT_IP_HW_0_OUT) && (mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp)) { mhi_log(MHI_MSG_VERBOSE, "Empty OOB chan %d\n", chan); parse_xfer_event(mhi_dev_ctxt, &event_to_process); } else { __pm_stay_awake(&mhi_dev_ctxt->wake_lock); parse_xfer_event(mhi_dev_ctxt, &event_to_process); __pm_relax(&mhi_dev_ctxt->wake_lock); } } break; case MHI_PKT_TYPE_STATE_CHANGE_EVENT: { STATE_TRANSITION new_state; new_state = MHI_READ_STATE(&event_to_process); mhi_log(MHI_MSG_INFO, "MHI STE received ring 0x%x\n", ev_index); mhi_init_state_transition(mhi_dev_ctxt, new_state); break; } case MHI_PKT_TYPE_EE_EVENT: { STATE_TRANSITION new_state; mhi_log(MHI_MSG_INFO, "MHI EEE received ring 0x%x\n", ev_index); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); switch(MHI_READ_EXEC_ENV(&event_to_process)) { case MHI_EXEC_ENV_SBL: new_state = STATE_TRANSITION_SBL; mhi_init_state_transition(mhi_dev_ctxt, new_state); break; case MHI_EXEC_ENV_AMSS: new_state = STATE_TRANSITION_AMSS; mhi_init_state_transition(mhi_dev_ctxt, new_state); break; } break; } default: mhi_log(MHI_MSG_ERROR, "Unsupported packet type code 0x%x\n", MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)); break; } local_rp = (mhi_event_pkt *)local_ev_ctxt->rp; device_rp = (mhi_event_pkt *)mhi_p2v_addr( mhi_dev_ctxt->mhi_ctrl_seg_info, (u64)ev_ctxt->mhi_event_read_ptr); --event_quota; } return MHI_STATUS_SUCCESS; }
int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, void **data, int func) { int i; int err; int key_count; struct gpio_kp *kp; struct gpio_event_matrix_info *mi; mi = container_of(info, struct gpio_event_matrix_info, info); if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) { /* TODO: disable scanning */ return 0; } if (func == GPIO_EVENT_FUNC_INIT) { if (mi->keymap == NULL || mi->input_gpios == NULL || mi->output_gpios == NULL) { err = -ENODEV; pr_err("gpiomatrix: Incomplete pdata\n"); goto err_invalid_platform_data; } key_count = mi->ninputs * mi->noutputs; *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) * BITS_TO_LONGS(key_count), GFP_KERNEL); if (kp == NULL) { err = -ENOMEM; pr_err("gpiomatrix: Failed to allocate private data\n"); goto err_kp_alloc_failed; } kp->input_devs = input_devs; kp->keypad_info = mi; for (i = 0; i < key_count; i++) { unsigned short keyentry = mi->keymap[i]; unsigned short keycode = keyentry & MATRIX_KEY_MASK; unsigned short dev = keyentry >> MATRIX_CODE_BITS; if (dev >= input_devs->count) { pr_err("gpiomatrix: bad device index %d >= " "%d for key code %d\n", dev, input_devs->count, keycode); err = -EINVAL; goto err_bad_keymap; } if (keycode && keycode <= KEY_MAX) input_set_capability(input_devs->dev[dev], EV_KEY, keycode); } for (i = 0; i < mi->noutputs; i++) { err = gpio_request(mi->output_gpios[i], "gpio_kp_out"); if (err) { pr_err("gpiomatrix: gpio_request failed for " "output %d\n", mi->output_gpios[i]); goto err_request_output_gpio_failed; } if (gpio_cansleep(mi->output_gpios[i])) { pr_err("gpiomatrix: unsupported output gpio %d," " can sleep\n", mi->output_gpios[i]); err = -EINVAL; goto err_output_gpio_configure_failed; } if (mi->flags & GPIOKPF_DRIVE_INACTIVE) err = gpio_direction_output(mi->output_gpios[i], !(mi->flags & GPIOKPF_ACTIVE_HIGH)); else err = gpio_direction_input(mi->output_gpios[i]); if (err) { pr_err("gpiomatrix: gpio_configure failed for " "output %d\n", mi->output_gpios[i]); goto err_output_gpio_configure_failed; } } for (i = 0; i < mi->ninputs; i++) { err = gpio_request(mi->input_gpios[i], "gpio_kp_in"); if (err) { pr_err("gpiomatrix: gpio_request failed for " "input %d\n", mi->input_gpios[i]); goto err_request_input_gpio_failed; } err = gpio_direction_input(mi->input_gpios[i]); if (err) { pr_err("gpiomatrix: gpio_direction_input failed" " for input %d\n", mi->input_gpios[i]); goto err_gpio_direction_input_failed; } } kp->current_output = mi->noutputs; kp->key_state_changed = 1; hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); kp->timer.function = gpio_keypad_timer_func; wakeup_source_init(&kp->wakeup_source,"gpio_kp"); err = gpio_keypad_request_irqs(kp); kp->use_irq = err == 0; pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for " "%s%s in %s mode\n", input_devs->dev[0]->name, (input_devs->count > 1) ? "..." : "", kp->use_irq ? "interrupt" : "polling"); if (kp->use_irq) __pm_stay_awake(&kp->wakeup_source); hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); return 0; } err = 0; kp = *data; if (kp->use_irq) for (i = mi->noutputs - 1; i >= 0; i--) free_irq(gpio_to_irq(mi->input_gpios[i]), kp); hrtimer_cancel(&kp->timer); wakeup_source_trash(&kp->wakeup_source); for (i = mi->noutputs - 1; i >= 0; i--) { err_gpio_direction_input_failed: gpio_free(mi->input_gpios[i]); err_request_input_gpio_failed: ; } for (i = mi->noutputs - 1; i >= 0; i--) { err_output_gpio_configure_failed: gpio_free(mi->output_gpios[i]); err_request_output_gpio_failed: ; } err_bad_keymap: kfree(kp); err_kp_alloc_failed: err_invalid_platform_data: return err; }
void cnss_pm_wake_lock(struct wakeup_source *ws) { __pm_stay_awake(ws); }
int mhi_initiate_m3(mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r = 0; int abort_m3 = 0; mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, mhi_dev_ctxt->flags.pending_M3); mutex_lock(&mhi_dev_ctxt->pm_lock); switch (mhi_dev_ctxt->mhi_state) { case MHI_STATE_M1: case MHI_STATE_M2: mhi_log(MHI_MSG_INFO, "Triggering wake out of M2\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.pending_M3 = 1; mhi_assert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M1, msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); if (0 == r || -ERESTARTSYS == r) { mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "MDM failed to come out of M2.\n"); goto exit; } break; case MHI_STATE_M3: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); if (mhi_dev_ctxt->flags.link_up) r = -EPERM; else r = 0; goto exit; case MHI_STATE_RESET: mhi_log(MHI_MSG_INFO, "MHI in RESET turning link off and quitting\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; default: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); break; } while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "There are still %d acks pending from device\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); abort_m3 = 1; goto exit; } if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { abort_m3 = 1; goto exit; } r = hrtimer_cancel(&mhi_dev_ctxt->m1_timer); if (r) mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was active\n"); else mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was not active\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.pending_M0) { write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Pending M0 detected, aborting M3 procedure\n"); r = -EPERM; goto exit; } mhi_dev_ctxt->flags.pending_M3 = 1; atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "Waiting for M3 completion.\n"); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M3, msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); switch(r) { case 0: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "MDM failed to suspend after %d ms\n", MHI_MAX_SUSPEND_TIMEOUT); mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; r = -EAGAIN; goto exit; break; case -ERESTARTSYS: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Going Down...\n"); goto exit; break; default: mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "M3 completion received\n"); break; } mhi_deassert_device_wake(mhi_dev_ctxt); /* Turn off PCIe link*/ mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); exit: if (abort_m3) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ring_all_chan_dbs(mhi_dev_ctxt); atomic_dec(&mhi_dev_ctxt->flags.data_pending); r = -EAGAIN; if(atomic_read(&mhi_dev_ctxt->flags.cp_m1_state)) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_dev_ctxt->mhi_state = MHI_STATE_M2; mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL, MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, MHI_STATE_M2); mhi_dev_ctxt->counters.m1_m2++; write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); } } /* We have to be careful here, we are setting a pending_M3 to 0 * even if we did not set it above. This works since the only other * entity that sets this flag must also acquire the pm_lock */ atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_dev_ctxt->flags.pending_M3 = 0; mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; }
int smb358_chg_uv(struct opchg_charger *chip, u8 status) { int rc = 0; if(chip->chg_present && (status == 0)){ pr_err("%s chg has plugged in,return\n",__func__); return 0; } opchg_inout_charge_parameters(chip); //opchg_switch_to_usbin(chip,!status); if (status == 0) { chip->g_chg_in = 1; if(chip->g_is_wakeup == 0){ //if awake not be lock,lock it here else do nothing __pm_stay_awake(&chip->source); chip->g_is_wakeup= 1; } } else { chip->g_chg_in = 0; schedule_delayed_work(&chip->opchg_delayed_wakeup_work, round_jiffies_relative(msecs_to_jiffies(2000))); } /* use this to detect USB insertion only if !apsd */ if (chip->disable_apsd && status == 0) { chip->chg_present = true; dev_dbg(chip->dev, "%s updating usb_psy present=%d", __func__, chip->chg_present); power_supply_set_supply_type(chip->usb_psy, POWER_SUPPLY_TYPE_USB); power_supply_set_present(chip->usb_psy, chip->chg_present); } if (status != 0) { chip->chg_present = false; dev_dbg(chip->dev, "%s updating usb_psy present=%d", __func__, chip->chg_present); /* we can't set usb_psy as UNKNOWN so early, it'll lead USERSPACE issue */ power_supply_set_present(chip->usb_psy, chip->chg_present); if (chip->bms_controlled_charging){ /* * Disable SOC based USB suspend to enable charging on * USB insertion. */ rc = smb358_charging_disable(chip, SOC, false); if (rc < 0) dev_err(chip->dev,"Couldn't disable usb suspend rc = %d\n",rc); } } //chip->BMT_status.charger_exist = chip->chg_present; power_supply_changed(chip->usb_psy); if(is_project(OPPO_15005)){ schedule_work(&chip->opchg_modify_tp_param_work); } dev_dbg(chip->dev, "chip->chg_present = %d\n", chip->chg_present); return 0; }
void mhi_wake(struct mhi_device_ctxt *mhi_dev_ctxt) { mhi_log(MHI_MSG_INFO, "System cannot sleep.\n"); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); }
static void fastcg_work_func(struct work_struct *work) { int data = 0; int i; int bit = 0; int retval = 0; int ret_info = 0; static int fw_ver_info = 0; int volt = 0; int temp = 0; int soc = 0; int current_now = 0; int remain_cap = 0; static bool isnot_power_on = 0; free_irq(bq27541_di->irq, bq27541_di); for(i = 0; i < 7; i++) { gpio_set_value(0, 0); gpio_tlmm_config(AP_TX_EN, GPIO_CFG_ENABLE); usleep_range(1000,1000); gpio_set_value(0, 1); gpio_tlmm_config(AP_TX_DIS, GPIO_CFG_ENABLE); usleep_range(19000,19000); bit = gpio_get_value(1); data |= bit<<(6-i); if((i == 2) && (data != 0x50) && (!fw_ver_info)) { //data recvd not start from "101" pr_err("%s data err:%d\n",__func__,data); if(bq27541_di->fast_chg_started == true) { bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = false; bq27541_di->fast_normal_to_warm = false; bq27541_di->fast_chg_ing = false; gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } power_supply_changed(bq27541_di->batt_psy); } goto out; } } pr_err("%s recv data:0x%x\n", __func__, data); if(data == 0x52) { //request fast charging __pm_stay_awake(&bq27541_di->fastchg_wakeup_source); pic_need_to_up_fw = 0; fw_ver_info = 0; bq27541_di->alow_reading = false; bq27541_di->fast_chg_started = true; bq27541_di->fast_chg_allow = false; bq27541_di->fast_normal_to_warm = false; mod_timer(&bq27541_di->watchdog, jiffies + msecs_to_jiffies(10000)); if(!isnot_power_on) { isnot_power_on = 1; ret_info = 0x1; } else { ret_info = 0x2; } } else if(data == 0x54) { //fast charge stopped bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = false; bq27541_di->fast_normal_to_warm = false; bq27541_di->fast_chg_ing = false; //switch off fast chg pr_info("%s fastchg stop unexpectly,switch off fastchg\n", __func__); gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x58) { //tell ap can read i2c bq27541_di->alow_reading = true; //reading bq27541_di->fast_chg_ing = true; volt = bq27541_get_battery_mvolts(); temp = bq27541_get_battery_temperature(); remain_cap = bq27541_get_batt_remaining_capacity(); soc = bq27541_get_battery_soc(); current_now = bq27541_get_average_current(); pr_err("%s volt:%d,temp:%d,remain_cap:%d,soc:%d,current:%d\n",__func__,volt,temp, remain_cap,soc,current_now); //don't read bq27541_di->alow_reading = false; mod_timer(&bq27541_di->watchdog, jiffies + msecs_to_jiffies(10000)); ret_info = 0x2; } else if(data == 0x5a) { //fastchg full,vbatt > 4350 #if 0 //lfc modify for it(set fast_switch_to_normal ture) is earlier than usb_plugged_out irq(set it false) bq27541_di->fast_switch_to_normal = true; bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; #endif //switch off fast chg pr_info("%s fastchg full,switch off fastchg,set GPIO96 0\n", __func__); gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x53) { if (bq27541_di->battery_type == BATTERY_3000MA) { //13097 ATL battery //if temp:10~20 decigec,vddmax = 4250mv //switch off fast chg pr_info("%s fastchg low temp full,switch off fastchg,set GPIO96 0\n", __func__); gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x59) { //usb bad connected,stop fastchg #if 0 //lfc modify for it(set fast_switch_to_normal ture) is earlier than usb_plugged_out irq(set it false) bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = true; #endif //switch off fast chg pr_info("%s usb bad connect,switch off fastchg\n", __func__); gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x5c) { //fastchg temp over 45 or under 20 pr_info("%s fastchg temp > 45 or < 20,switch off fastchg,set GPIO96 0\n", __func__); gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s switch usb error %d\n", __func__, retval); } del_timer(&bq27541_di->watchdog); ret_info = 0x2; } else if(data == 0x56) { //ready to get fw_ver fw_ver_info = 1; ret_info = 0x2; } else if(fw_ver_info) { //get fw_ver //fw in local is large than mcu1503_fw_ver if((!pic_have_updated) && (Pic16F_firmware_data[pic_fw_ver_count - 4] > data)) { ret_info = 0x2; pic_need_to_up_fw = 1; //need to update fw } else { ret_info = 0x1; pic_need_to_up_fw = 0; //fw is already new,needn't to up } pr_info("local_fw:0x%x,need_to_up_fw:%d\n",Pic16F_firmware_data[pic_fw_ver_count - 4],pic_need_to_up_fw); fw_ver_info = 0; } else { gpio_set_value(96, 0); retval = gpio_tlmm_config(AP_SWITCH_USB, GPIO_CFG_ENABLE); if (retval) { pr_err("%s data err(101xxxx) switch usb error %d\n", __func__, retval); goto out; //avoid i2c conflict } msleep(500); //avoid i2c conflict //data err bq27541_di->alow_reading = true; bq27541_di->fast_chg_started = false; bq27541_di->fast_chg_allow = false; bq27541_di->fast_switch_to_normal = false; bq27541_di->fast_normal_to_warm = false; bq27541_di->fast_chg_ing = false; //data err pr_info("%s data err(101xxxx),switch off fastchg\n", __func__); power_supply_changed(bq27541_di->batt_psy); goto out; } msleep(2); gpio_tlmm_config(GPIO_CFG(1,0,GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),1); gpio_direction_output(1, 0); for(i = 0; i < 3; i++) { if(i == 0) { //tell mcu1503 battery_type gpio_set_value(1, ret_info >> 1); } else if(i == 1) {
/* microUSB switch IC : SM5502 - Silicon Mitus */ static void detect_dev_sm5502(struct sm5502_usbsw *usbsw, u8 intr1, u8 intr2, void *data) { struct sm5502_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; u8 val1, val2, val3, adc, vbusin, intr1_tmp, val; int dev_classifi = 0; read_reg(client, REG_DEV_T1, &val1); read_reg(client, REG_DEV_T2, &val2); read_reg(client, REG_DEV_T3, &val3); read_reg(client, REG_ADC, &adc); read_reg(client, REG_RSV_ID1, &vbusin); /* IC Bug Case W/A */ if ( intr1 & OVP_EVENT_M ) { read_reg(client, REG_CTRL, &val); if ( val == 0x1F ) { sm5502_reg_init(usbsw); return; } } /* Detach -> Attach quickly */ if (intr1 == (ATTACHED | DETACHED)) { dev_info(&client->dev, "Bug Case 1\n"); intr1 &= ~(DETACHED); } /* Attach -> Detach quickly */ else if (intr1 & ATTACHED && probing != 1) { read_reg(client, REG_INT1, &intr1_tmp); if (intr1_tmp & DETACHED) { dev_info(&client->dev, "Bug Case 2\n"); intr1 &= ~(ATTACHED); } intr1 |= intr1_tmp; } /* Attached */ if (intr1 & ATTACHED || (intr2 & (VBUSOUT_ON | VBUSOUT_OFF) && !(intr1 & DETACHED)) || intr2 & REV_ACCE) { if (val1 & DEV_USB && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE1_USB_MUIC; dev_info(&client->dev, "USB ATTACHED*****\n"); } if (val1 & DEV_CHARGER && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE1_TA_MUIC; dev_info(&client->dev, "TA(DCP/CDP) ATTACHED*****\n"); } if (val1 & DEV_USB_OTG) { dev_classifi = CABLE_TYPE1_OTG_MUIC; dev_info(&client->dev, "OTG ATTACHED*****\n"); } if (val1 & DEV_CARKIT_CHG && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE1_CARKIT_T1OR2_MUIC; manual_usbpath_ctrl(1); dev_info(&client->dev, "CARKIT or L USB Cable ATTACHED*****\n"); } if (val2 & DEV_JIG_UART_OFF) { if (vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_UART_OFF_VB_MUIC; dev_info(&client->dev, "JIG_UARTOFF_VB ATTACHED*****\n"); } else { dev_classifi = CABLE_TYPE2_JIG_UART_OFF_MUIC; dev_info(&client->dev, "JIG_UARTOFF ATTACHED*****\n"); } additional_vbus_int_enable(usbsw); } if (val2 & DEV_JIG_UART_ON) { if (vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_UART_ON_VB_MUIC; dev_info(&client->dev, "JIG_UARTON_VB ATTACHED*****\n"); } else { dev_classifi = CABLE_TYPE2_JIG_UART_ON_MUIC; dev_info(&client->dev, "JIG_UARTON ATTACHED*****\n"); } additional_vbus_int_enable(usbsw); } if (val2 & DEV_JIG_USB_OFF && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_USB_OFF_MUIC; dev_info(&client->dev, "JIG_USB_OFF ATTACHED*****\n"); } if (val2 & DEV_JIG_USB_ON && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_USB_ON_MUIC; dev_info(&client->dev, "JIG_USB_ON ATTACHED*****\n"); } if (val2 & DEV_JIG_ALL) { if (!jig_wakelock_acq) { __pm_stay_awake(&jig_suspend_wake); pm_qos_update_request(&usbsw->qos_idle, PM_QOS_CPUIDLE_BLOCK_AXI_VALUE); jig_wakelock_acq = 1; dev_info(&client->dev, "AP WakeLock for FactoryTest *****\n"); } } /* Desktop Dock Case */ if (val2 & DEV_AV) { /* Check device3 register for Dock+VBUS */ if (val3 & DEV_AV_VBUS && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE3_DESKDOCK_VB_MUIC; dev_info(&client->dev, "DESKDOCK+VBUS ATTACHED*****\n"); } else { dev_classifi = CABLE_TYPE2_DESKDOCK_MUIC; dev_info(&client->dev, "DESKDOCK ATTACHED*****\n"); } additional_vbus_int_enable(usbsw); /* Dock */ switch_set_state(&usbsw->dock_dev, 1); if (jack_is_detected) sm5502_dock_audiopath_ctrl(0); else sm5502_dock_audiopath_ctrl(1); } if (val3 & DEV_U200_CHG && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE3_U200CHG_MUIC; dev_info(&client->dev, "TA(U200 CHG) ATTACHED*****\n"); } if (val3 & DEV_DCD_OUT_SDP && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE3_NONSTD_SDP_MUIC; dev_info(&client->dev, "TA(NON-STANDARD SDP) ATTACHED*****\n"); } /* W/A */ if (val1 == 0 && val2 == 0 && val3 == 0 && reset_count < MAX_RESET_TRIAL && probing != 1) { u8 sintm1, sintm2, sctrl, stime1, smansw1; read_reg(client, REG_INT1_MASK, &sintm1); read_reg(client, REG_INT2_MASK, &sintm2); read_reg(client, REG_TIMING1, &stime1); read_reg(client, REG_MANSW1, &smansw1); read_reg(client, REG_CTRL, &sctrl); write_reg(client, REG_RESET, IC_RESET); msleep(20); dev_info(&client->dev, "SM5502 was reset, reset_count : %d\n", reset_count); write_reg(client, REG_INT1_MASK, sintm1); write_reg(client, REG_INT2_MASK, sintm2); write_reg(client, REG_TIMING1, stime1); write_reg(client, REG_MANSW1, smansw1); write_reg(client, REG_CTRL, sctrl); reset_count++; return; } /* for Charger driver */ if (pdata->charger_cb) pdata->charger_cb(dev_classifi); if (probing == 1) *(int *)data = dev_classifi; blocking_notifier_call_chain(&usb_switch_notifier, dev_classifi, NULL); } /* Detached */ if (intr1 & DETACHED) { if (usbsw->dev1 & DEV_USB && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "USB DETACHED*****\n"); } if (usbsw->dev1 & DEV_CHARGER && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "TA(DCP/CDP) DETACHED*****\n"); } if (usbsw->dev1 & DEV_USB_OTG) { dev_info(&client->dev, "OTG DETACHED*****\n"); } if (usbsw->dev1 & DEV_CARKIT_CHG && usbsw->vbusin & VBUSIN_VALID) { manual_usbpath_ctrl(0); dev_info(&client->dev, "CARKIT or L USB Cable DETACHED*****\n"); } if (usbsw->dev2 & DEV_JIG_UART_OFF) { if (usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_UARTOFF+VBUS DETACHED*****\n"); } else { dev_info(&client->dev, "JIG_UARTOFF DETACHED*****\n"); } additional_vbus_int_disable(usbsw); } if (usbsw->dev2 & DEV_JIG_UART_ON) { if (usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_UARTON_VB DETACHED*****\n"); } else { dev_info(&client->dev, "JIG_UARTON DETACHED*****\n"); } additional_vbus_int_disable(usbsw); } if (usbsw->dev2 & DEV_JIG_USB_OFF && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_USB_OFF DETACHED*****\n"); } if (usbsw->dev2 & DEV_JIG_USB_ON && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_USB_ON DETACHED*****\n"); } if (usbsw->dev2 & DEV_JIG_ALL) { if (jig_wakelock_acq) { __pm_relax(&jig_suspend_wake); pm_qos_update_request(&usbsw->qos_idle, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE); jig_wakelock_acq = 0; dev_info(&client->dev, "AP WakeLock Release *****\n"); } } if (usbsw->dev2 & DEV_AV) { /* Check device3 register for Dock+VBUS */ if (usbsw->dev3 & DEV_AV_VBUS && usbsw->vbusin & VBUSIN_VALID ) { dev_info(&client->dev, "DESKDOCK+VBUS DETTACHED*****\n"); } else { dev_info(&client->dev, "DESKDOCK DETACHED*****\n"); } additional_vbus_int_disable(usbsw); /* Dock */ switch_set_state(&usbsw->dock_dev, 0); sm5502_dock_audiopath_ctrl(0); } if (usbsw->dev3 & DEV_U200_CHG && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "TA(U200_CHG) DETTACHED*****\n"); } if (usbsw->dev3 & DEV_DCD_OUT_SDP && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "TA(NON-STANDARD SDP) DETACHED*****\n"); } /* for Charger driver */ if (pdata->charger_cb) pdata->charger_cb(CABLE_TYPE_NONE_MUIC); blocking_notifier_call_chain(&usb_switch_notifier, CABLE_TYPE_NONE_MUIC, NULL); reset_count = 0; } usbsw->dev1 = val1; usbsw->dev2 = val2; usbsw->dev3 = val3; usbsw->adc = adc; usbsw->vbusin = vbusin; return; }
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r = 0; int abort_m3 = 0; mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, mhi_dev_ctxt->flags.pending_M3); mutex_lock(&mhi_dev_ctxt->pm_lock); switch (mhi_dev_ctxt->mhi_state) { case MHI_STATE_RESET: mhi_log(MHI_MSG_INFO, "MHI in RESET turning link off and quitting\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; break; case MHI_STATE_M0: case MHI_STATE_M1: case MHI_STATE_M2: mhi_log(MHI_MSG_INFO, "Triggering wake out of M2\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.pending_M3 = 1; if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) { mhi_log(MHI_MSG_INFO, "M2 transition not set\n"); mhi_assert_device_wake(mhi_dev_ctxt); } write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = wait_event_interruptible_timeout( *mhi_dev_ctxt->mhi_ev_wq.m0_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M1, msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); if (0 == r || -ERESTARTSYS == r) { mhi_log(MHI_MSG_INFO, "MDM failed to come out of M2.\n"); r = -EAGAIN; goto exit; } break; case MHI_STATE_M3: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); if (mhi_dev_ctxt->flags.link_up) r = -EAGAIN; else r = 0; goto exit; default: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); break; } while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { mhi_log(MHI_MSG_INFO, "There are still %d acks pending from device\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); abort_m3 = 1; r = -EAGAIN; goto exit; } if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { abort_m3 = 1; r = -EAGAIN; goto exit; } write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.pending_M0) { write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = -EAGAIN; goto exit; } mhi_dev_ctxt->flags.pending_M3 = 1; mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Waiting for M3 completion.\n"); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M3, msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); switch (r) { case 0: mhi_log(MHI_MSG_CRITICAL, "MDM failed to suspend after %d ms\n", MHI_MAX_SUSPEND_TIMEOUT); mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; goto exit; break; default: mhi_log(MHI_MSG_INFO, "M3 completion received\n"); break; } mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); exit: if (abort_m3) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); atomic_dec(&mhi_dev_ctxt->flags.data_pending); mhi_deassert_device_wake(mhi_dev_ctxt); } mhi_dev_ctxt->flags.pending_M3 = 0; mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; }