static void alarm_clear(enum android_alarm_type alarm_type, struct timespec *ts) { uint32_t alarm_type_mask = 1U << alarm_type; unsigned long flags; mutex_lock(&alarm_mutex); spin_lock_irqsave(&alarm_slock, flags); alarm_dbg(IO, "alarm %d clear\n", alarm_type); devalarm_try_to_cancel(&alarms[alarm_type]); if (alarm_pending) { alarm_pending &= ~alarm_type_mask; if (!alarm_pending && !wait_pending) __pm_relax(&alarm_wake_lock); } alarm_enabled &= ~alarm_type_mask; spin_unlock_irqrestore(&alarm_slock, flags); if (alarm_type == ANDROID_ALARM_RTC_POWEROFF_WAKEUP) set_power_on_alarm(ts->tv_sec, 0); mutex_unlock(&alarm_mutex); }
static int alarm_release(struct inode *inode, struct file *file) { int i; unsigned long flags; spin_lock_irqsave(&alarm_slock, flags); if (file->private_data) { for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { uint32_t alarm_type_mask = 1U << i; if (alarm_enabled & alarm_type_mask) { alarm_dbg(INFO, "%s: clear alarm, pending %d\n", __func__, !!(alarm_pending & alarm_type_mask)); alarm_enabled &= ~alarm_type_mask; } spin_unlock_irqrestore(&alarm_slock, flags); #ifdef CONFIG_BCM_RTC_ALARM_BOOT if (alarms[i].type == ANDROID_ALARM_RTC_POWERON) alarm_poweron_cancel(); else devalarm_cancel(&alarms[i]); #else devalarm_cancel(&alarms[i]); #endif /*CONFIG_BCM_RTC_ALARM_BOOT*/ spin_lock_irqsave(&alarm_slock, flags); } if (alarm_pending | wait_pending) { if (alarm_pending) alarm_dbg(INFO, "%s: clear pending alarms %x\n", __func__, alarm_pending); __pm_relax(&alarm_wake_lock); wait_pending = 0; alarm_pending = 0; } alarm_opened = 0; } spin_unlock_irqrestore(&alarm_slock, flags); return 0; }
int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); queue_up_suspend_work(); #ifdef CONFIG_POWERSUSPEND // Yank555.lu : add hook to handle powersuspend tasks (sleep) set_power_suspend_state_autosleep_hook(POWER_SUSPEND_ACTIVE); #ifndef CONFIG_PM_SYNC_BEFORE_SUSPEND printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); printk("done.\n"); #endif #endif } else { pm_wakep_autosleep_enabled(false); #ifdef CONFIG_POWERSUSPEND // Yank555.lu : add hook to handle powersuspend tasks (wakeup) set_power_suspend_state_autosleep_hook(POWER_SUSPEND_INACTIVE); #endif } mutex_unlock(&autosleep_lock); return 0; }
int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); #ifdef CONFIG_SEC_PM_DEBUG wakeup_sources_stats_active(); #endif if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); queue_up_suspend_work(); #ifdef CONFIG_POWERSUSPEND // Yank555.lu : add hook to handle powersuspend tasks (sleep) set_power_suspend_state_autosleep_hook(POWER_SUSPEND_ACTIVE); #endif } else { pm_wakep_autosleep_enabled(false); #ifdef CONFIG_POWERSUSPEND // Yank555.lu : add hook to handle powersuspend tasks (wakeup) set_power_suspend_state_autosleep_hook(POWER_SUSPEND_INACTIVE); #endif } mutex_unlock(&autosleep_lock); return 0; }
static enum MHI_STATUS process_wake_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; int r = 0; mhi_log(MHI_MSG_INFO, "Entered\n"); __pm_stay_awake(&mhi_dev_ctxt->w_lock); if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) { mhi_log(MHI_MSG_CRITICAL, "Pending SSR, Ignoring.\n"); goto exit; } if (mhi_dev_ctxt->flags.mhi_initialized) { r = pm_request_resume(&mhi_dev_ctxt->dev_info->plat_dev->dev); mhi_log(MHI_MSG_VERBOSE, "MHI is initialized, transitioning to M0, ret %d\n", r); } if (!mhi_dev_ctxt->flags.mhi_initialized) { mhi_log(MHI_MSG_INFO, "MHI is not initialized transitioning to base.\n"); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); } exit: __pm_relax(&mhi_dev_ctxt->w_lock); mhi_log(MHI_MSG_INFO, "Exited.\n"); return ret_val; }
int pm_autosleep_set_state(suspend_state_t state) { #ifndef CONFIG_HIBERNATION if (state >= PM_SUSPEND_MAX) return -EINVAL; #endif __pm_stay_awake(autosleep_ws); mutex_lock(&autosleep_lock); autosleep_state = state; __pm_relax(autosleep_ws); if (state > PM_SUSPEND_ON) { pm_wakep_autosleep_enabled(true); //[+++]Debug for active wakelock before entering suspend g_resume_status = false; //Add a timer to trigger wakelock debug pr_info("[PM]unattended_timer: mod_timer (auto_sleep)\n"); mod_timer(&unattended_timer, jiffies + msecs_to_jiffies(PM_UNATTENDED_TIMEOUT)); //[---]Debug for active wakelock before entering suspend queue_up_suspend_work(); } else { pm_wakep_autosleep_enabled(false); //[+++]Debug for active wakelock before entering suspend //Add a timer to trigger wakelock debug pr_info("[PM]unattended_timer: del_timer (late_resume)\n"); del_timer(&unattended_timer); //[---]Debug for active wakelock before entering suspend } mutex_unlock(&autosleep_lock); return 0; }
static int alarm_wait(void) { unsigned long flags; int rv = 0; spin_lock_irqsave(&alarm_slock, flags); alarm_dbg(IO, "alarm wait\n"); if (!alarm_pending && wait_pending) { __pm_relax(&alarm_wake_lock); wait_pending = 0; } spin_unlock_irqrestore(&alarm_slock, flags); rv = wait_event_interruptible(alarm_wait_queue, alarm_pending); if (rv) return rv; spin_lock_irqsave(&alarm_slock, flags); //AndyPan add #ifdef LENOVO_ALARM if (alarm_pending & pwoff_mask) { printk("andy alarm_pending &= ~ pwoff_mask =%d \r\n",alarm_pending); alarm_pending &= ~ pwoff_mask; alarm_pending |= deviceup_mask; printk("andy alarm_pending |= deviceup_mask =%d \r\n",alarm_pending); } //AndyPan add #endif rv = alarm_pending; wait_pending = 1; alarm_pending = 0; spin_unlock_irqrestore(&alarm_slock, flags); return rv; }
MHI_STATUS process_WAKE_transition(mhi_device_ctxt *mhi_dev_ctxt, STATE_TRANSITION cur_work_item) { MHI_STATUS ret_val = MHI_STATUS_SUCCESS; mhi_log(MHI_MSG_INFO, "Entered\n"); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); ret_val = mhi_turn_on_pcie_link(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to turn on PCIe link.\n"); goto exit; } mhi_dev_ctxt->flags.stop_threads = 0; if (mhi_dev_ctxt->flags.mhi_initialized && mhi_dev_ctxt->flags.link_up) { mhi_log(MHI_MSG_CRITICAL, "MHI is initialized, transitioning to M0.\n"); mhi_initiate_m0(mhi_dev_ctxt); } if (!mhi_dev_ctxt->flags.mhi_initialized) { mhi_log(MHI_MSG_CRITICAL, "MHI is not initialized transitioning to base.\n"); ret_val = init_mhi_base_state(mhi_dev_ctxt); if (MHI_STATUS_SUCCESS != ret_val) mhi_log(MHI_MSG_CRITICAL, "Failed to transition to base state %d.\n", ret_val); } exit: mhi_log(MHI_MSG_INFO, "Exited.\n"); __pm_relax(&mhi_dev_ctxt->wake_lock); return ret_val; }
static int alarm_release(struct inode *inode, struct file *file) { int i; unsigned long flags; spin_lock_irqsave(&alarm_slock, flags); if (file->private_data) { for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { uint32_t alarm_type_mask = 1U << i; if (alarm_enabled & alarm_type_mask) { alarm_dbg(INFO, "%s: clear alarm, pending %d\n", __func__, !!(alarm_pending & alarm_type_mask)); alarm_enabled &= ~alarm_type_mask; } spin_unlock_irqrestore(&alarm_slock, flags); devalarm_cancel(&alarms[i]); spin_lock_irqsave(&alarm_slock, flags); } if (alarm_pending | wait_pending) { if (alarm_pending) alarm_dbg(INFO, "%s: clear pending alarms %x\n", __func__, alarm_pending); __pm_relax(&alarm_wake_lock); wait_pending = 0; alarm_pending = 0; } alarm_opened = 0; } spin_unlock_irqrestore(&alarm_slock, flags); alarm_dbg(INFO, "alarm_release (%d:%d)(%lu)\n", current->tgid, current->pid, (uintptr_t)file->private_data); return 0; }
static void alarm_clear(enum android_alarm_type alarm_type, struct timespec *ts) { uint32_t alarm_type_mask = 1U << alarm_type; unsigned long flags; alarm_dbg(IO, "alarm %d clear\n", alarm_type); if (alarm_type == ANDROID_ALARM_POWER_ON || alarm_type == ANDROID_ALARM_POWER_ON_LOGO) { ts->tv_sec = 0; alarm_set_power_on(*ts, false); return; } spin_lock_irqsave(&alarm_slock, flags); devalarm_try_to_cancel(&alarms[alarm_type]); if (alarm_pending) { alarm_pending &= ~alarm_type_mask; if (!alarm_pending && !wait_pending) __pm_relax(&alarm_wake_lock); } alarm_enabled &= ~alarm_type_mask; spin_unlock_irqrestore(&alarm_slock, flags); }
MHI_STATUS mhi_process_event_ring(mhi_device_ctxt *mhi_dev_ctxt, u32 ev_index, u32 event_quota) { mhi_event_pkt *local_rp = NULL; mhi_event_pkt *device_rp = NULL; mhi_event_pkt event_to_process; mhi_event_ctxt *ev_ctxt = NULL; mhi_ring *local_ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index]; ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_index]; device_rp = (mhi_event_pkt *)mhi_p2v_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, ev_ctxt->mhi_event_read_ptr); local_rp = (mhi_event_pkt *)local_ev_ctxt->rp; if (unlikely(MHI_STATUS_SUCCESS != validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp))) mhi_log(MHI_MSG_ERROR, "Failed to validate event ring element 0x%p\n", device_rp); while ((local_rp != device_rp) && (event_quota > 0) && (device_rp != NULL) && (local_rp != NULL)) { event_to_process = *local_rp; if (unlikely(MHI_STATUS_SUCCESS != recycle_trb_and_ring(mhi_dev_ctxt, local_ev_ctxt, MHI_RING_TYPE_EVENT_RING, ev_index))) mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n"); switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) { case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: mhi_log(MHI_MSG_INFO, "MHI CCE received ring 0x%x\n", ev_index); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); parse_cmd_event(mhi_dev_ctxt, &event_to_process); break; case MHI_PKT_TYPE_TX_EVENT: { u32 chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process); if (((MHI_EV_READ_CODE(EV_TRB_CODE, &event_to_process) == MHI_EVENT_CC_OOB) || (MHI_EV_READ_CODE(EV_TRB_CODE, &event_to_process) == MHI_EVENT_CC_DB_MODE)) && (chan == MHI_CLIENT_IP_HW_0_OUT) && (mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp)) { mhi_log(MHI_MSG_VERBOSE, "Empty OOB chan %d\n", chan); parse_xfer_event(mhi_dev_ctxt, &event_to_process); } else { __pm_stay_awake(&mhi_dev_ctxt->wake_lock); parse_xfer_event(mhi_dev_ctxt, &event_to_process); __pm_relax(&mhi_dev_ctxt->wake_lock); } } break; case MHI_PKT_TYPE_STATE_CHANGE_EVENT: { STATE_TRANSITION new_state; new_state = MHI_READ_STATE(&event_to_process); mhi_log(MHI_MSG_INFO, "MHI STE received ring 0x%x\n", ev_index); mhi_init_state_transition(mhi_dev_ctxt, new_state); break; } case MHI_PKT_TYPE_EE_EVENT: { STATE_TRANSITION new_state; mhi_log(MHI_MSG_INFO, "MHI EEE received ring 0x%x\n", ev_index); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); switch(MHI_READ_EXEC_ENV(&event_to_process)) { case MHI_EXEC_ENV_SBL: new_state = STATE_TRANSITION_SBL; mhi_init_state_transition(mhi_dev_ctxt, new_state); break; case MHI_EXEC_ENV_AMSS: new_state = STATE_TRANSITION_AMSS; mhi_init_state_transition(mhi_dev_ctxt, new_state); break; } break; } default: mhi_log(MHI_MSG_ERROR, "Unsupported packet type code 0x%x\n", MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)); break; } local_rp = (mhi_event_pkt *)local_ev_ctxt->rp; device_rp = (mhi_event_pkt *)mhi_p2v_addr( mhi_dev_ctxt->mhi_ctrl_seg_info, (u64)ev_ctxt->mhi_event_read_ptr); --event_quota; } return MHI_STATUS_SUCCESS; }
static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer) { int out, in; int key_index; int gpio; struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer); struct gpio_event_matrix_info *mi = kp->keypad_info; unsigned gpio_keypad_flags = mi->flags; unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH); out = kp->current_output; if (out == mi->noutputs) { out = 0; kp->last_key_state_changed = kp->key_state_changed; kp->key_state_changed = 0; kp->some_keys_pressed = 0; } else { key_index = out * mi->ninputs; for (in = 0; in < mi->ninputs; in++, key_index++) { gpio = mi->input_gpios[in]; if (gpio_get_value(gpio) ^ !polarity) { if (kp->some_keys_pressed < 3) kp->some_keys_pressed++; kp->key_state_changed |= !__test_and_set_bit( key_index, kp->keys_pressed); } else kp->key_state_changed |= __test_and_clear_bit( key_index, kp->keys_pressed); } gpio = mi->output_gpios[out]; if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) gpio_set_value(gpio, !polarity); else gpio_direction_input(gpio); out++; } kp->current_output = out; if (out < mi->noutputs) { gpio = mi->output_gpios[out]; if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) gpio_set_value(gpio, polarity); else gpio_direction_output(gpio, polarity); hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL); return HRTIMER_NORESTART; } if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) { if (kp->key_state_changed) { hrtimer_start(&kp->timer, mi->debounce_delay, HRTIMER_MODE_REL); return HRTIMER_NORESTART; } kp->key_state_changed = kp->last_key_state_changed; } if (kp->key_state_changed) { if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS) remove_phantom_keys(kp); key_index = 0; for (out = 0; out < mi->noutputs; out++) for (in = 0; in < mi->ninputs; in++, key_index++) report_key(kp, key_index, out, in); report_sync(kp); } if (!kp->use_irq || kp->some_keys_pressed) { hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL); return HRTIMER_NORESTART; } /* No keys are pressed, reenable interrupt */ for (out = 0; out < mi->noutputs; out++) { if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) gpio_set_value(mi->output_gpios[out], polarity); else gpio_direction_output(mi->output_gpios[out], polarity); } for (in = 0; in < mi->ninputs; in++) enable_irq(gpio_to_irq(mi->input_gpios[in])); __pm_relax(&kp->wakeup_source); return HRTIMER_NORESTART; }
int mhi_initiate_m3(mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r = 0; int abort_m3 = 0; mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, mhi_dev_ctxt->flags.pending_M3); mutex_lock(&mhi_dev_ctxt->pm_lock); switch (mhi_dev_ctxt->mhi_state) { case MHI_STATE_M1: case MHI_STATE_M2: mhi_log(MHI_MSG_INFO, "Triggering wake out of M2\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.pending_M3 = 1; mhi_assert_device_wake(mhi_dev_ctxt); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M1, msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); if (0 == r || -ERESTARTSYS == r) { mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "MDM failed to come out of M2.\n"); goto exit; } break; case MHI_STATE_M3: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); if (mhi_dev_ctxt->flags.link_up) r = -EPERM; else r = 0; goto exit; case MHI_STATE_RESET: mhi_log(MHI_MSG_INFO, "MHI in RESET turning link off and quitting\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; default: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); break; } while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "There are still %d acks pending from device\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); __pm_stay_awake(&mhi_dev_ctxt->wake_lock); __pm_relax(&mhi_dev_ctxt->wake_lock); abort_m3 = 1; goto exit; } if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { abort_m3 = 1; goto exit; } r = hrtimer_cancel(&mhi_dev_ctxt->m1_timer); if (r) mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was active\n"); else mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was not active\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.pending_M0) { write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Pending M0 detected, aborting M3 procedure\n"); r = -EPERM; goto exit; } mhi_dev_ctxt->flags.pending_M3 = 1; atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "Waiting for M3 completion.\n"); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M3, msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); switch(r) { case 0: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "MDM failed to suspend after %d ms\n", MHI_MAX_SUSPEND_TIMEOUT); mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; r = -EAGAIN; goto exit; break; case -ERESTARTSYS: mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, "Going Down...\n"); goto exit; break; default: mhi_log(MHI_MSG_INFO | MHI_DBG_POWER, "M3 completion received\n"); break; } mhi_deassert_device_wake(mhi_dev_ctxt); /* Turn off PCIe link*/ mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); exit: if (abort_m3) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ring_all_chan_dbs(mhi_dev_ctxt); atomic_dec(&mhi_dev_ctxt->flags.data_pending); r = -EAGAIN; if(atomic_read(&mhi_dev_ctxt->flags.cp_m1_state)) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_set(&mhi_dev_ctxt->flags.cp_m1_state, 0); mhi_dev_ctxt->mhi_state = MHI_STATE_M2; mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); mhi_reg_write_field(mhi_dev_ctxt->mmio_addr, MHICTRL, MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, MHI_STATE_M2); mhi_dev_ctxt->counters.m1_m2++; write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); } } /* We have to be careful here, we are setting a pending_M3 to 0 * even if we did not set it above. This works since the only other * entity that sets this flag must also acquire the pm_lock */ atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); mhi_dev_ctxt->flags.pending_M3 = 0; mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; }
void cnss_pm_wake_lock_release(struct wakeup_source *ws) { __pm_relax(ws); }
static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rv = 0; unsigned long flags; struct timespec new_alarm_time; struct timespec new_rtc_time; struct timespec tmp_time; struct rtc_time new_rtc_tm; struct rtc_device *rtc_dev; enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd); uint32_t alarm_type_mask = 1U << alarm_type; if (alarm_type >= ANDROID_ALARM_TYPE_COUNT) return -EINVAL; if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) { if ((file->f_flags & O_ACCMODE) == O_RDONLY) return -EPERM; if (file->private_data == NULL && cmd != ANDROID_ALARM_SET_RTC) { spin_lock_irqsave(&alarm_slock, flags); if (alarm_opened) { spin_unlock_irqrestore(&alarm_slock, flags); return -EBUSY; } alarm_opened = 1; file->private_data = (void *)1; spin_unlock_irqrestore(&alarm_slock, flags); } } switch (ANDROID_ALARM_BASE_CMD(cmd)) { case ANDROID_ALARM_CLEAR(0): spin_lock_irqsave(&alarm_slock, flags); alarm_dbg(IO, "alarm %d clear\n", alarm_type); devalarm_try_to_cancel(&alarms[alarm_type]); if (alarm_pending) { alarm_pending &= ~alarm_type_mask; if (!alarm_pending && !wait_pending) __pm_relax(&alarm_wake_lock); } alarm_enabled &= ~alarm_type_mask; spin_unlock_irqrestore(&alarm_slock, flags); break; case ANDROID_ALARM_SET_OLD: case ANDROID_ALARM_SET_AND_WAIT_OLD: if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) { rv = -EFAULT; goto err1; } new_alarm_time.tv_nsec = 0; goto from_old_alarm_set; case ANDROID_ALARM_SET_AND_WAIT(0): case ANDROID_ALARM_SET(0): if (copy_from_user(&new_alarm_time, (void __user *)arg, sizeof(new_alarm_time))) { rv = -EFAULT; goto err1; } from_old_alarm_set: spin_lock_irqsave(&alarm_slock, flags); alarm_dbg(IO, "alarm %d set %ld.%09ld\n", alarm_type, new_alarm_time.tv_sec, new_alarm_time.tv_nsec); alarm_enabled |= alarm_type_mask; devalarm_start(&alarms[alarm_type], timespec_to_ktime(new_alarm_time)); spin_unlock_irqrestore(&alarm_slock, flags); if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0) && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD) break; /* fall though */ case ANDROID_ALARM_WAIT: spin_lock_irqsave(&alarm_slock, flags); alarm_dbg(IO, "alarm wait\n"); if (!alarm_pending && wait_pending) { __pm_relax(&alarm_wake_lock); wait_pending = 0; } spin_unlock_irqrestore(&alarm_slock, flags); rv = wait_event_interruptible(alarm_wait_queue, alarm_pending); if (rv) goto err1; spin_lock_irqsave(&alarm_slock, flags); rv = alarm_pending; wait_pending = 1; alarm_pending = 0; spin_unlock_irqrestore(&alarm_slock, flags); break; case ANDROID_ALARM_SET_RTC: if (copy_from_user(&new_rtc_time, (void __user *)arg, sizeof(new_rtc_time))) { rv = -EFAULT; goto err1; } rtc_time_to_tm(new_rtc_time.tv_sec, &new_rtc_tm); rtc_dev = alarmtimer_get_rtcdev(); rv = do_settimeofday(&new_rtc_time); if (rv < 0) goto err1; if (rtc_dev) rv = rtc_set_time(rtc_dev, &new_rtc_tm); spin_lock_irqsave(&alarm_slock, flags); alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK; wake_up(&alarm_wait_queue); spin_unlock_irqrestore(&alarm_slock, flags); if (rv < 0) goto err1; break; case ANDROID_ALARM_GET_TIME(0): switch (alarm_type) { case ANDROID_ALARM_RTC_WAKEUP: case ANDROID_ALARM_RTC: getnstimeofday(&tmp_time); break; case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP: case ANDROID_ALARM_ELAPSED_REALTIME: get_monotonic_boottime(&tmp_time); break; case ANDROID_ALARM_SYSTEMTIME: ktime_get_ts(&tmp_time); break; default: rv = -EINVAL; goto err1; } if (copy_to_user((void __user *)arg, &tmp_time, sizeof(tmp_time))) { rv = -EFAULT; goto err1; } break; default: rv = -EINVAL; } err1: return rv; }
static ssize_t bcm2079x_dev_read(struct file *filp, char __user *buf, size_t count, loff_t *offset) { struct bcm2079x_dev *bcm2079x_dev = filp->private_data; unsigned char tmp[MAX_BUFFER_SIZE]; int total, len, ret; total = 0; len = 0; DBG2(dev_info(&bcm2079x_dev->client->dev, "bcm2079x_dev_read\n")); if (bcm2079x_dev->count_irq > 0) bcm2079x_dev->count_irq--; bcm2079x_dev->count_read++; if (count > MAX_BUFFER_SIZE) count = MAX_BUFFER_SIZE; mutex_lock(&bcm2079x_dev->read_mutex); /* Read the first 4 bytes to include the length of the NCI or HCI packet. */ ret = i2c_master_recv(bcm2079x_dev->client, tmp, PACKET_HEADER_SIZE_NCI); if (ret == PACKET_HEADER_SIZE_NCI) { total = ret; /* First byte is the packet type */ switch (tmp[0]) { case PACKET_TYPE_NCI: len = tmp[PACKET_HEADER_SIZE_NCI-1]; break; case PACKET_TYPE_HCIEV: len = tmp[PACKET_HEADER_SIZE_HCI-1]; if (len == 0) total--; /*Since payload is 0, decrement total size (from 4 to 3) */ else len--; /*First byte of payload is in tmp[3] already */ break; default: len = 0;/*Unknown packet byte */ break; } /* make sure full packet fits in the buffer */ if (len > 0 && (len + total) <= count) { /* read the remainder of the packet. */ ret = i2c_master_recv(bcm2079x_dev->client, tmp + total, len); if (ret < 0) { mutex_unlock(&bcm2079x_dev->read_mutex); return ret; } if (ret == len) total += len; } } else { mutex_unlock(&bcm2079x_dev->read_mutex); if (ret < 0) return ret; else { dev_err(&bcm2079x_dev->client->dev, "received only %d bytes as header\n", ret); return -EIO; } } mutex_unlock(&bcm2079x_dev->read_mutex); if (total > count || copy_to_user(buf, tmp, total)) { dev_err(&bcm2079x_dev->client->dev, "failed to copy to user space, total = %d\n", total); total = -EFAULT; bcm2079x_dev->error_read++; } if (bcm2079x_dev->count_irq == 0) { del_timer(&bcm2079x_dev->wake_timer); __pm_relax(bcm2079x_dev->host_wake_ws); } DBG2(dev_info(&bcm2079x_dev->client->dev, "bcm2079x_dev_read %d\n", total)); #ifdef CONFIG_HAS_WAKELOCK if (bcm2079x_dev->count_irq == 0) { wake_unlock(&nfc_soft_wake_lock); DBG2(dev_info(&bcm2079x_dev->client->dev, "release wake lock\n")); } #endif return total; }
void wake_timer_callback(unsigned long data) { struct bcm2079x_dev *bcm2079x_dev = (struct bcm2079x_dev *)data; __pm_relax(bcm2079x_dev->host_wake_ws); }
/* microUSB switch IC : SM5502 - Silicon Mitus */ static void detect_dev_sm5502(struct sm5502_usbsw *usbsw, u8 intr1, u8 intr2, void *data) { struct sm5502_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; u8 val1, val2, val3, adc, vbusin, intr1_tmp, val; int dev_classifi = 0; read_reg(client, REG_DEV_T1, &val1); read_reg(client, REG_DEV_T2, &val2); read_reg(client, REG_DEV_T3, &val3); read_reg(client, REG_ADC, &adc); read_reg(client, REG_RSV_ID1, &vbusin); /* IC Bug Case W/A */ if ( intr1 & OVP_EVENT_M ) { read_reg(client, REG_CTRL, &val); if ( val == 0x1F ) { sm5502_reg_init(usbsw); return; } } /* Detach -> Attach quickly */ if (intr1 == (ATTACHED | DETACHED)) { dev_info(&client->dev, "Bug Case 1\n"); intr1 &= ~(DETACHED); } /* Attach -> Detach quickly */ else if (intr1 & ATTACHED && probing != 1) { read_reg(client, REG_INT1, &intr1_tmp); if (intr1_tmp & DETACHED) { dev_info(&client->dev, "Bug Case 2\n"); intr1 &= ~(ATTACHED); } intr1 |= intr1_tmp; } /* Attached */ if (intr1 & ATTACHED || (intr2 & (VBUSOUT_ON | VBUSOUT_OFF) && !(intr1 & DETACHED)) || intr2 & REV_ACCE) { if (val1 & DEV_USB && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE1_USB_MUIC; dev_info(&client->dev, "USB ATTACHED*****\n"); } if (val1 & DEV_CHARGER && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE1_TA_MUIC; dev_info(&client->dev, "TA(DCP/CDP) ATTACHED*****\n"); } if (val1 & DEV_USB_OTG) { dev_classifi = CABLE_TYPE1_OTG_MUIC; dev_info(&client->dev, "OTG ATTACHED*****\n"); } if (val1 & DEV_CARKIT_CHG && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE1_CARKIT_T1OR2_MUIC; manual_usbpath_ctrl(1); dev_info(&client->dev, "CARKIT or L USB Cable ATTACHED*****\n"); } if (val2 & DEV_JIG_UART_OFF) { if (vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_UART_OFF_VB_MUIC; dev_info(&client->dev, "JIG_UARTOFF_VB ATTACHED*****\n"); } else { dev_classifi = CABLE_TYPE2_JIG_UART_OFF_MUIC; dev_info(&client->dev, "JIG_UARTOFF ATTACHED*****\n"); } additional_vbus_int_enable(usbsw); } if (val2 & DEV_JIG_UART_ON) { if (vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_UART_ON_VB_MUIC; dev_info(&client->dev, "JIG_UARTON_VB ATTACHED*****\n"); } else { dev_classifi = CABLE_TYPE2_JIG_UART_ON_MUIC; dev_info(&client->dev, "JIG_UARTON ATTACHED*****\n"); } additional_vbus_int_enable(usbsw); } if (val2 & DEV_JIG_USB_OFF && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_USB_OFF_MUIC; dev_info(&client->dev, "JIG_USB_OFF ATTACHED*****\n"); } if (val2 & DEV_JIG_USB_ON && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE2_JIG_USB_ON_MUIC; dev_info(&client->dev, "JIG_USB_ON ATTACHED*****\n"); } if (val2 & DEV_JIG_ALL) { if (!jig_wakelock_acq) { __pm_stay_awake(&jig_suspend_wake); pm_qos_update_request(&usbsw->qos_idle, PM_QOS_CPUIDLE_BLOCK_AXI_VALUE); jig_wakelock_acq = 1; dev_info(&client->dev, "AP WakeLock for FactoryTest *****\n"); } } /* Desktop Dock Case */ if (val2 & DEV_AV) { /* Check device3 register for Dock+VBUS */ if (val3 & DEV_AV_VBUS && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE3_DESKDOCK_VB_MUIC; dev_info(&client->dev, "DESKDOCK+VBUS ATTACHED*****\n"); } else { dev_classifi = CABLE_TYPE2_DESKDOCK_MUIC; dev_info(&client->dev, "DESKDOCK ATTACHED*****\n"); } additional_vbus_int_enable(usbsw); /* Dock */ switch_set_state(&usbsw->dock_dev, 1); if (jack_is_detected) sm5502_dock_audiopath_ctrl(0); else sm5502_dock_audiopath_ctrl(1); } if (val3 & DEV_U200_CHG && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE3_U200CHG_MUIC; dev_info(&client->dev, "TA(U200 CHG) ATTACHED*****\n"); } if (val3 & DEV_DCD_OUT_SDP && vbusin & VBUSIN_VALID) { dev_classifi = CABLE_TYPE3_NONSTD_SDP_MUIC; dev_info(&client->dev, "TA(NON-STANDARD SDP) ATTACHED*****\n"); } /* W/A */ if (val1 == 0 && val2 == 0 && val3 == 0 && reset_count < MAX_RESET_TRIAL && probing != 1) { u8 sintm1, sintm2, sctrl, stime1, smansw1; read_reg(client, REG_INT1_MASK, &sintm1); read_reg(client, REG_INT2_MASK, &sintm2); read_reg(client, REG_TIMING1, &stime1); read_reg(client, REG_MANSW1, &smansw1); read_reg(client, REG_CTRL, &sctrl); write_reg(client, REG_RESET, IC_RESET); msleep(20); dev_info(&client->dev, "SM5502 was reset, reset_count : %d\n", reset_count); write_reg(client, REG_INT1_MASK, sintm1); write_reg(client, REG_INT2_MASK, sintm2); write_reg(client, REG_TIMING1, stime1); write_reg(client, REG_MANSW1, smansw1); write_reg(client, REG_CTRL, sctrl); reset_count++; return; } /* for Charger driver */ if (pdata->charger_cb) pdata->charger_cb(dev_classifi); if (probing == 1) *(int *)data = dev_classifi; blocking_notifier_call_chain(&usb_switch_notifier, dev_classifi, NULL); } /* Detached */ if (intr1 & DETACHED) { if (usbsw->dev1 & DEV_USB && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "USB DETACHED*****\n"); } if (usbsw->dev1 & DEV_CHARGER && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "TA(DCP/CDP) DETACHED*****\n"); } if (usbsw->dev1 & DEV_USB_OTG) { dev_info(&client->dev, "OTG DETACHED*****\n"); } if (usbsw->dev1 & DEV_CARKIT_CHG && usbsw->vbusin & VBUSIN_VALID) { manual_usbpath_ctrl(0); dev_info(&client->dev, "CARKIT or L USB Cable DETACHED*****\n"); } if (usbsw->dev2 & DEV_JIG_UART_OFF) { if (usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_UARTOFF+VBUS DETACHED*****\n"); } else { dev_info(&client->dev, "JIG_UARTOFF DETACHED*****\n"); } additional_vbus_int_disable(usbsw); } if (usbsw->dev2 & DEV_JIG_UART_ON) { if (usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_UARTON_VB DETACHED*****\n"); } else { dev_info(&client->dev, "JIG_UARTON DETACHED*****\n"); } additional_vbus_int_disable(usbsw); } if (usbsw->dev2 & DEV_JIG_USB_OFF && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_USB_OFF DETACHED*****\n"); } if (usbsw->dev2 & DEV_JIG_USB_ON && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "JIG_USB_ON DETACHED*****\n"); } if (usbsw->dev2 & DEV_JIG_ALL) { if (jig_wakelock_acq) { __pm_relax(&jig_suspend_wake); pm_qos_update_request(&usbsw->qos_idle, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE); jig_wakelock_acq = 0; dev_info(&client->dev, "AP WakeLock Release *****\n"); } } if (usbsw->dev2 & DEV_AV) { /* Check device3 register for Dock+VBUS */ if (usbsw->dev3 & DEV_AV_VBUS && usbsw->vbusin & VBUSIN_VALID ) { dev_info(&client->dev, "DESKDOCK+VBUS DETTACHED*****\n"); } else { dev_info(&client->dev, "DESKDOCK DETACHED*****\n"); } additional_vbus_int_disable(usbsw); /* Dock */ switch_set_state(&usbsw->dock_dev, 0); sm5502_dock_audiopath_ctrl(0); } if (usbsw->dev3 & DEV_U200_CHG && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "TA(U200_CHG) DETTACHED*****\n"); } if (usbsw->dev3 & DEV_DCD_OUT_SDP && usbsw->vbusin & VBUSIN_VALID) { dev_info(&client->dev, "TA(NON-STANDARD SDP) DETACHED*****\n"); } /* for Charger driver */ if (pdata->charger_cb) pdata->charger_cb(CABLE_TYPE_NONE_MUIC); blocking_notifier_call_chain(&usb_switch_notifier, CABLE_TYPE_NONE_MUIC, NULL); reset_count = 0; } usbsw->dev1 = val1; usbsw->dev2 = val2; usbsw->dev3 = val3; usbsw->adc = adc; usbsw->vbusin = vbusin; return; }
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) { unsigned long flags; int r = 0; int abort_m3 = 0; mhi_log(MHI_MSG_INFO, "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, mhi_dev_ctxt->flags.pending_M3); mutex_lock(&mhi_dev_ctxt->pm_lock); switch (mhi_dev_ctxt->mhi_state) { case MHI_STATE_RESET: mhi_log(MHI_MSG_INFO, "MHI in RESET turning link off and quitting\n"); mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; break; case MHI_STATE_M0: case MHI_STATE_M1: case MHI_STATE_M2: mhi_log(MHI_MSG_INFO, "Triggering wake out of M2\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); mhi_dev_ctxt->flags.pending_M3 = 1; if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) { mhi_log(MHI_MSG_INFO, "M2 transition not set\n"); mhi_assert_device_wake(mhi_dev_ctxt); } write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = wait_event_interruptible_timeout( *mhi_dev_ctxt->mhi_ev_wq.m0_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M1, msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); if (0 == r || -ERESTARTSYS == r) { mhi_log(MHI_MSG_INFO, "MDM failed to come out of M2.\n"); r = -EAGAIN; goto exit; } break; case MHI_STATE_M3: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); if (mhi_dev_ctxt->flags.link_up) r = -EAGAIN; else r = 0; goto exit; default: mhi_log(MHI_MSG_INFO, "MHI state %d, link state %d.\n", mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); break; } while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { mhi_log(MHI_MSG_INFO, "There are still %d acks pending from device\n", atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); abort_m3 = 1; r = -EAGAIN; goto exit; } if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { abort_m3 = 1; r = -EAGAIN; goto exit; } write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); if (mhi_dev_ctxt->flags.pending_M0) { write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); r = -EAGAIN; goto exit; } mhi_dev_ctxt->flags.pending_M3 = 1; mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); mhi_log(MHI_MSG_INFO, "Waiting for M3 completion.\n"); r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, mhi_dev_ctxt->mhi_state == MHI_STATE_M3, msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); switch (r) { case 0: mhi_log(MHI_MSG_CRITICAL, "MDM failed to suspend after %d ms\n", MHI_MAX_SUSPEND_TIMEOUT); mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; goto exit; break; default: mhi_log(MHI_MSG_INFO, "M3 completion received\n"); break; } mhi_turn_off_pcie_link(mhi_dev_ctxt); r = mhi_set_bus_request(mhi_dev_ctxt, 0); if (r) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); exit: if (abort_m3) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); atomic_inc(&mhi_dev_ctxt->flags.data_pending); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); ring_all_chan_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt); atomic_dec(&mhi_dev_ctxt->flags.data_pending); mhi_deassert_device_wake(mhi_dev_ctxt); } mhi_dev_ctxt->flags.pending_M3 = 0; mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; }
static void smd_tty_read(unsigned long param) { unsigned char *ptr; int avail; struct smd_tty_info *info = (struct smd_tty_info *)param; struct tty_struct *tty = tty_port_tty_get(&info->port); unsigned long flags; if (!tty) return; for (;;) { if (is_in_reset(info)) { /* signal TTY clients using TTY_BREAK */ tty_insert_flip_char(tty, 0x00, TTY_BREAK); tty_flip_buffer_push(tty); break; } if (test_bit(TTY_THROTTLED, &tty->flags)) break; spin_lock_irqsave(&info->ra_lock_lha3, flags); avail = smd_read_avail(info->ch); if (avail == 0) { __pm_relax(&info->ra_wakeup_source); spin_unlock_irqrestore(&info->ra_lock_lha3, flags); break; } spin_unlock_irqrestore(&info->ra_lock_lha3, flags); if (avail > MAX_TTY_BUF_SIZE) avail = MAX_TTY_BUF_SIZE; avail = tty_prepare_flip_string(tty, &ptr, avail); if (avail <= 0) { mod_timer(&info->buf_req_timer, jiffies + msecs_to_jiffies(30)); tty_kref_put(tty); return; } if (smd_read(info->ch, ptr, avail) != avail) { /* shouldn't be possible since we're in interrupt ** context here and nobody else could 'steal' our ** characters. */ SMD_TTY_ERR( "%s - Possible smd_tty_buffer mismatch for %s", __func__, info->ch->name); } /* * Keep system awake long enough to allow the TTY * framework to pass the flip buffer to any waiting * userspace clients. */ __pm_wakeup_event(&info->pending_ws, TTY_PUSH_WS_DELAY); tty_flip_buffer_push(tty); } /* XXX only when writable and necessary */ tty_wakeup(tty); tty_kref_put(tty); }
void mhi_link_state_cb(struct msm_pcie_notify *notify) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; struct mhi_pcie_dev_info *mhi_pcie_dev = notify->data; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; int r = 0; if (NULL == notify || NULL == notify->data) { mhi_log(MHI_MSG_CRITICAL, "Incomplete handle received\n"); return; } mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; switch (notify->event) { case MSM_PCIE_EVENT_LINKDOWN: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKDOWN\n"); break; case MSM_PCIE_EVENT_LINKUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKUP\n"); if (0 == mhi_pcie_dev->link_up_cntr) { mhi_log(MHI_MSG_INFO, "Initializing MHI for the first time\n"); r = mhi_ctxt_init(mhi_pcie_dev); if (r) { mhi_log(MHI_MSG_ERROR, "MHI initialization failed, ret %d.\n", r); r = msm_pcie_register_event( &mhi_pcie_dev->mhi_pci_link_event); mhi_log(MHI_MSG_ERROR, "Deregistered from PCIe notif r %d.\n", r); return; } mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; mhi_pcie_dev->mhi_ctxt.flags.link_up = 1; pci_set_master(mhi_pcie_dev->pcie_device); init_mhi_base_state(mhi_dev_ctxt); } else { mhi_log(MHI_MSG_INFO, "Received Link Up Callback\n"); } mhi_pcie_dev->link_up_cntr++; break; case MSM_PCIE_EVENT_WAKEUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_WAKE\n"); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { mhi_log(MHI_MSG_INFO, "There is a pending resume, doing nothing.\n"); return; } ret_val = mhi_init_state_transition(mhi_dev_ctxt, STATE_TRANSITION_WAKE); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to init state transition, to %d\n", STATE_TRANSITION_WAKE); } break; default: mhi_log(MHI_MSG_INFO, "Received bad link event\n"); return; } }
void mhi_wake_relax(struct mhi_device_ctxt *mhi_dev_ctxt) { mhi_log(MHI_MSG_INFO, "System may sleep.\n"); __pm_relax(&mhi_dev_ctxt->wake_lock); }