static void alarmtimer_triggered_func(void *p) { struct rtc_device *rtc = rtcdev; if (!(rtc->irq_data & RTC_AF)) return; __pm_wakeup_event(ws, 2 * MSEC_PER_SEC); }
static void devalarm_triggered(struct devalarm *alarm) { unsigned long flags; uint32_t alarm_type_mask = 1U << alarm->type; alarm_dbg(INT, "%s: type %d\n", __func__, alarm->type); spin_lock_irqsave(&alarm_slock, flags); if (alarm_enabled & alarm_type_mask) { __pm_wakeup_event(&alarm_wake_lock, 5000); /* 5secs */ __pm_wakeup_event(&temp_wake_lock, 1000); /* 1secs */ alarm_enabled &= ~alarm_type_mask; alarm_pending |= alarm_type_mask; wake_up(&alarm_wait_queue); } spin_unlock_irqrestore(&alarm_slock, flags); }
void dp_packet_send_cb(struct shm_rbctl *rbctl) { struct data_path *dp; struct shm_skctl *skctl; static unsigned long last_time = INITIAL_JIFFIES; if (!rbctl) return; dp = rbctl->priv; dp->stat.rx_interrupts++; /* * hold 2s wakeup source for user space * do not try to hold again if it is already held in last 0.5s */ if (time_after(jiffies, last_time + HZ / 2)) { __pm_wakeup_event(&dp_rx_wakeup, 2000); last_time = jiffies; } skctl = rbctl->skctl_va; trace_psd_recv_irq(skctl->cp_wptr); data_path_schedule_rx(dp); }
/* 88PM80x gives us an interrupt when ONKEY is held */ static irqreturn_t pm80x_onkey_handler(int irq, void *data) { struct pm80x_onkey_info *info = data; int ret = 0; unsigned int val; ret = regmap_read(info->map, PM800_STATUS_1, &val); if (ret < 0) { dev_err(info->idev->dev.parent, "failed to read status: %d\n", ret); return IRQ_NONE; } val &= PM800_ONKEY_STS1; /* * HW workaround: There is bug of LONG_ONKEY_EVENT will be detected wrongly, * then it will trigger power down/power up cycle unexpectedly. * Sw will reset the LONG_ONKEY timer to avoid this kind of issue. */ regmap_update_bits(info->map, PM800_WAKEUP1, PM800_LONGKEY_RESET, PM800_LONGKEY_RESET); #ifdef CONFIG_FAKE_SYSTEMOFF if (fake_sysoff_block_onkey()) goto out; if (fake_sysoff_status_query()) { if (val) { /*down key*/ if (!atomic_cmpxchg(&longpress_work_state, 0, 1)) { schedule_delayed_work(&presscheck_work, LONGPRESS_INTERVAL); /* Think about following case: onkey down/up-> * kernel will awake 5s ->after 4s-> * user long down on key -> after 1s, suspend * -> have no chance for 3s timeout*/ __pm_wakeup_event(&suspend_longkey_lock, jiffies_to_msecs(LONGPRESS_INTERVAL + HZ)); } } else { /*up key*/ if (atomic_cmpxchg(&longpress_work_state, 1, 0)) { /* short press */ cancel_delayed_work_sync(&presscheck_work); } } } else { #endif #if defined(CONFIG_KERNEL_DEBUG_SEC) && (defined(CONFIG_MACH_LT02) || defined(CONFIG_MACH_COCOA7)) key_dbg("%s state = %d\n", __func__, val); gpio_keys_setstate(KEY_POWER, val ? true : false); if (val && gpio_keys_getstate(KEY_VOLUMEUP) && jack_is_detected) gpio_keys_start_upload_modtimer(); #endif input_report_key(info->idev, KEY_POWER, val); input_sync(info->idev); #ifdef CONFIG_FAKE_SYSTEMOFF } out: #endif is_power_key_pressed = val; // AT+KEYSHORT cmd return IRQ_HANDLED; }
/* 88PM822 gives us an interrupt when ONKEY is held */ static irqreturn_t pm822_onkey_handler(int irq, void *data) { struct pm822_onkey_info *info = data; int ret = 0; unsigned int val; ret = regmap_read(info->map, PM822_STATUS1, &val); if (ret < 0) { dev_err(info->idev->dev.parent, "failed to read status: %d\n", ret); return IRQ_NONE; } val &= PM822_ONKEY_STS1; #ifdef CONFIG_FAKE_SYSTEMOFF if (fake_sysoff_block_onkey()) goto out; if (fake_sysoff_status_query()) { if (val) { /*down key*/ if (!atomic_cmpxchg(&longpress_work_state, 0, 1)) { schedule_delayed_work(&presscheck_work, LONGPRESS_INTERVAL); /* Think about following case: onkey down/up-> * kernel will awake 5s ->after 4s-> * user long down on key -> after 1s, suspend * -> have no chance for 3s timeout*/ __pm_wakeup_event(&suspend_longkey_lock, jiffies_to_msecs(LONGPRESS_INTERVAL + HZ)); } } else { /*up key*/ if (atomic_cmpxchg(&longpress_work_state, 1, 0)) { /* short press */ cancel_delayed_work_sync(&presscheck_work); } } } else { #endif #if !defined(CONFIG_SAMSUNG_PRODUCT_SHIP) printk("power key val =%d\n",val); #endif input_report_key(info->idev, KEY_POWER, val); input_sync(info->idev); #ifdef CONFIG_FAKE_SYSTEMOFF } out: #endif is_power_key_pressed = val; // AT+KEYSHORT cmd return IRQ_HANDLED; }
void dp_rb_resume_cb(struct shm_rbctl *rbctl) { struct data_path *dp; if (!rbctl) return; shm_rb_resume(rbctl); dp = rbctl->priv; __pm_wakeup_event(&dp_acipc_wakeup, 2000); pr_warn("MSOCK: dp_rb_resume_cb!!!\n"); if (dp && (atomic_read(&dp->state) == dp_state_opened)) { /* do not need to check queue length, * as we need to resume upper layer in tx_func */ data_path_schedule_tx(dp); } }
void dp_rb_stop_cb(struct shm_rbctl *rbctl) { struct data_path *dp; if (!rbctl) return; shm_rb_stop(rbctl); dp = rbctl->priv; __pm_wakeup_event(&dp_acipc_wakeup, 5000); pr_warn("MSOCK: dp_rb_stop_cb!!!\n"); if (dp && (atomic_read(&dp->state) == dp_state_opened)) { if (dp->cbs && dp->cbs->rx_stop) dp->cbs->rx_stop(); data_path_schedule_rx(dp); } }
static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) { struct acpi_device *adev; if (val != ACPI_NOTIFY_DEVICE_WAKE) return; adev = acpi_bus_get_acpi_device(handle); if (!adev) return; mutex_lock(&acpi_pm_notifier_lock); if (adev->wakeup.flags.notifier_present) { __pm_wakeup_event(adev->wakeup.ws, 0); if (adev->wakeup.context.work.func) queue_pm_work(&adev->wakeup.context.work); } mutex_unlock(&acpi_pm_notifier_lock); acpi_bus_put_acpi_device(adev); }
void cnss_pm_wake_lock_timeout(struct wakeup_source *ws, ulong msec) { __pm_wakeup_event(ws, msec); }
static void smd_tty_read(unsigned long param) { unsigned char *ptr; int avail; struct smd_tty_info *info = (struct smd_tty_info *)param; struct tty_struct *tty = tty_port_tty_get(&info->port); unsigned long flags; if (!tty) return; for (;;) { if (is_in_reset(info)) { /* signal TTY clients using TTY_BREAK */ tty_insert_flip_char(tty, 0x00, TTY_BREAK); tty_flip_buffer_push(tty); break; } if (test_bit(TTY_THROTTLED, &tty->flags)) break; spin_lock_irqsave(&info->ra_lock_lha3, flags); avail = smd_read_avail(info->ch); if (avail == 0) { __pm_relax(&info->ra_wakeup_source); spin_unlock_irqrestore(&info->ra_lock_lha3, flags); break; } spin_unlock_irqrestore(&info->ra_lock_lha3, flags); if (avail > MAX_TTY_BUF_SIZE) avail = MAX_TTY_BUF_SIZE; avail = tty_prepare_flip_string(tty, &ptr, avail); if (avail <= 0) { mod_timer(&info->buf_req_timer, jiffies + msecs_to_jiffies(30)); tty_kref_put(tty); return; } if (smd_read(info->ch, ptr, avail) != avail) { /* shouldn't be possible since we're in interrupt ** context here and nobody else could 'steal' our ** characters. */ SMD_TTY_ERR( "%s - Possible smd_tty_buffer mismatch for %s", __func__, info->ch->name); } /* * Keep system awake long enough to allow the TTY * framework to pass the flip buffer to any waiting * userspace clients. */ __pm_wakeup_event(&info->pending_ws, TTY_PUSH_WS_DELAY); tty_flip_buffer_push(tty); } /* XXX only when writable and necessary */ tty_wakeup(tty); tty_kref_put(tty); }
static void try_to_suspend(struct work_struct *work) { unsigned int initial_count, final_count; int error = 0; #ifdef CONFIG_PM_SLEEP_HISTORY int i; static unsigned int autosleep_active; static struct wakeup_source *last_ws[4]; struct timespec ts; if (autosleep_active == 0) { autosleep_active = 1; getnstimeofday(&ts); sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_ENTRY, &ts, NULL); } #endif if (!pm_get_wakeup_count(&initial_count, true)) goto out; mutex_lock(&autosleep_lock); if (!pm_save_wakeup_count(initial_count)) { mutex_unlock(&autosleep_lock); goto out; } #ifdef CONFIG_PM_SLEEP_HISTORY memset(last_ws, 0, sizeof(last_ws)); pm_get_last_wakeup_sources(&last_ws[0], sizeof(last_ws)/sizeof(struct wakeup_source *)); autosleep_active = 0; getnstimeofday(&ts); if (last_ws[0]) { sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_EXIT, &ts, last_ws[0]); for (i = 1; last_ws[i] && i < sizeof(last_ws)/sizeof(struct wakeup_source *); i++) sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_EXIT, NULL, last_ws[i]); memset(last_ws, 0, sizeof(last_ws)); } else sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_EXIT, &ts, autosleep_ws); #endif if (autosleep_state == PM_SUSPEND_ON) { mutex_unlock(&autosleep_lock); return; } if (autosleep_state >= PM_SUSPEND_MAX) hibernate(); else error = pm_suspend(autosleep_state); mutex_unlock(&autosleep_lock); #ifdef CONFIG_PM_SLEEP_HISTORY if (autosleep_active == 0) { autosleep_active = 1; getnstimeofday(&ts); sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_ENTRY, &ts, NULL); } if (error) goto out; if (!pm_get_wakeup_count(&final_count, false)) { __pm_wakeup_event(autosleep_ws, AUTOSLEEP_SUSPEND_BLOCK_TIME); goto out; } #else if (error) goto out; if (!pm_get_wakeup_count(&final_count, false)) { __pm_wakeup_event(autosleep_ws, AUTOSLEEP_SUSPEND_BLOCK_TIME); goto out; } #endif /* * If the wakeup occured for an unknown reason, wait to prevent the * system from trying to suspend and waking up in a tight loop. */ if (final_count == initial_count) schedule_timeout_uninterruptible(HZ / 2); out: #ifdef CONFIG_PM_SLEEP_HISTORY memset(last_ws, 0, sizeof(last_ws)); pm_get_last_wakeup_sources(&last_ws[0], sizeof(last_ws)/sizeof(struct wakeup_source *)); if (autosleep_state == PM_SUSPEND_ON) { autosleep_active = 0; getnstimeofday(&ts); if (last_ws[0]) { sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_EXIT, &ts, last_ws[0]); for (i = 1; last_ws[i] && i < sizeof(last_ws)/sizeof(struct wakeup_source *); i++) sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_EXIT, NULL, last_ws[i]); memset(last_ws, 0, sizeof(last_ws)); } else sleep_history_marker(SLEEP_HISTORY_AUTOSLEEP_EXIT, &ts, autosleep_ws); } #endif /* * If the device failed to suspend, wait to prevent the * system from trying to suspend and waking up in a tight loop. */ if (error) { pr_info("PM: suspend returned(%d)\n", error); schedule_timeout_uninterruptible(HZ / 2); } queue_up_suspend_work(); }