static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port) { static bool extended; if (str & 0x20) return false; /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/ if (unlikely(data == 0xe0)) { extended = true; return false; } else if (unlikely(extended)) { extended = false; switch (data) { case 0xE4: schedule_delayed_work(&msi_touchpad_work, round_jiffies_relative(0.5 * HZ)); break; case 0x54: case 0x62: case 0x76: schedule_delayed_work(&msi_rfkill_work, round_jiffies_relative(0.5 * HZ)); break; } } return false; }
static void idtp9017_resume(struct device *dev) { // struct i2c_client *client = to_i2c_client(dev); // struct idtp9017_chip *chip = i2c_get_clientdata(client); pr_info("[WLC] wlc_resume\n"); the_chip->suspend = false; schedule_delayed_work(&the_chip->wlc_status_work, round_jiffies_relative( msecs_to_jiffies(500))); #ifdef CONFIG_LGE_PM_CHARGING_USING_CHGSTS_WLC schedule_delayed_work(&chip->update_soc_worker, round_jiffies_relative (msecs_to_jiffies(500))); #endif }
static inline void handle_dc_inout(struct smb349_dual_charger *chip, int present) { if (chip->chg_present != present) { chip->chg_present = present; power_supply_changed(&chip->cradle_psy); if (present) { dev_dbg(chip->dev, "DC IN\n"); smb349_hw_init(chip); smb349_pm_stay_awake(chip, PM_SMB349_DC_INSERTED); schedule_delayed_work( &chip->periodic_charge_handler_dwork, round_jiffies_relative( msecs_to_jiffies(PERIODIC_DELAY_MS))); notify_external_charger(chip, 0); dev_dbg(chip->dev, "Periodic work started\n"); } else { dev_dbg(chip->dev, "DC OUT\n"); cancel_delayed_work( &chip->periodic_charge_handler_dwork); smb349_pm_relax(chip, PM_SMB349_DC_INSERTED); notify_external_charger(chip, 1); dev_dbg(chip->dev, "Periodic work stopped\n"); } } }
static int __init init_nonfatal_mce_checker(void) { struct cpuinfo_x86 *c = &boot_cpu_data; /* Check for MCE support */ if (!cpu_has(c, X86_FEATURE_MCE)) return -ENODEV; /* Check for PPro style MCA */ if (!cpu_has(c, X86_FEATURE_MCA)) return -ENODEV; /* Some Athlons misbehave when we frob bank 0 */ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 == 6) firstbank = 1; else firstbank = 0; /* * Check for non-fatal errors every MCE_RATE s */ schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); printk(KERN_INFO "Machine check exception polling timer started.\n"); return 0; }
static int rfkill_init(struct platform_device *sdev) { /* add rfkill */ int retval; /* keep the hardware wireless state */ get_wireless_state_ec_standard(); rfk_bluetooth = rfkill_alloc("msi-bluetooth", &sdev->dev, RFKILL_TYPE_BLUETOOTH, &rfkill_bluetooth_ops, NULL); if (!rfk_bluetooth) { retval = -ENOMEM; goto err_bluetooth; } retval = rfkill_register(rfk_bluetooth); if (retval) goto err_bluetooth; rfk_wlan = rfkill_alloc("msi-wlan", &sdev->dev, RFKILL_TYPE_WLAN, &rfkill_wlan_ops, NULL); if (!rfk_wlan) { retval = -ENOMEM; goto err_wlan; } retval = rfkill_register(rfk_wlan); if (retval) goto err_wlan; if (threeg_exists) { rfk_threeg = rfkill_alloc("msi-threeg", &sdev->dev, RFKILL_TYPE_WWAN, &rfkill_threeg_ops, NULL); if (!rfk_threeg) { retval = -ENOMEM; goto err_threeg; } retval = rfkill_register(rfk_threeg); if (retval) goto err_threeg; } /* schedule to run rfkill state initial */ schedule_delayed_work(&msi_rfkill_init, round_jiffies_relative(1 * HZ)); return 0; err_threeg: rfkill_destroy(rfk_threeg); if (rfk_wlan) rfkill_unregister(rfk_wlan); err_wlan: rfkill_destroy(rfk_wlan); if (rfk_bluetooth) rfkill_unregister(rfk_bluetooth); err_bluetooth: rfkill_destroy(rfk_bluetooth); return retval; }
static void sta32x_watchdog_start(struct sta32x_priv *sta32x) { if (sta32x->pdata->needs_esd_watchdog) { sta32x->shutdown = 0; schedule_delayed_work(&sta32x->watchdog_work, round_jiffies_relative(HZ)); } }
static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { u32 duration = mvm->thermal_throttle.params->ct_kill_duration; IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, round_jiffies_relative(duration * HZ)); }
static void sta32x_watchdog_start(struct sta32x_priv *sta32x) { if (sta32x->pdata->needs_esd_watchdog) { sta32x->shutdown = 0; queue_delayed_work(system_power_efficient_wq, &sta32x->watchdog_work, round_jiffies_relative(HZ)); } }
static void adkey_input_queue_work(struct adgpio_key_data *adkey) { unsigned long delay; delay = msecs_to_jiffies(adkey->poll_interval); if (delay >= HZ) delay = round_jiffies_relative(delay); queue_delayed_work(system_freezable_wq, &adkey->work, delay); }
static void input_polldev_queue_work(struct input_polled_dev *dev) { unsigned long delay; delay = msecs_to_jiffies(dev->poll_interval); if (delay >= HZ) delay = round_jiffies_relative(delay); queue_delayed_work(system_freezable_wq, &dev->work, delay); }
static void wlc_eoc_work(struct work_struct *work) { struct rt9536_chip *rt9536_chg = container_of(work, struct rt9536_chip, wireless_eoc_work); pr_err("[RT9536] %s \n", __func__); rt9536_chg->chg_done = 1; if (rt9536_set_charger_enable(rt9536_chg, 0)) { pr_err("%s : charger enable fail!!!\n", __func__); } power_supply_changed(&rt9536_chg->charger); if (wake_lock_active(&rt9536_chg->wl)) wake_unlock(&rt9536_chg->wl); #if 0 int chg_state = 0; int wlc_connect_state = 0; struct rt9536_chip *rt9536_chg = container_of(work, struct rt9536_chip, wireless_eoc_work.work); pr_err("[RT9536] %s \n", __func__); chg_state = wlc_check_irq_pin(rt9536_chg); wlc_connect_state = wlc_is_connected(); if (chg_state) { if (wlc_connect_state == 1) { power_supply_changed(&rt9536_chg->charger); rt9536_chg->chg_done = 1; } else { schedule_delayed_work(&rt9536_chg->wireless_set_offline_work, round_jiffies_relative(msecs_to_jiffies(500))); } } else { schedule_delayed_work(&rt9536_chg->wireless_set_online_work, round_jiffies_relative(msecs_to_jiffies(500))); } /* msleep(3500); gpio_set_value(rt9536_chg->pdata->wlc_chg_full_pin,0); rt9536_chg->chg_done=1; rt9536_set_charger_enable(0); // temp */ #endif return; }
static irqreturn_t smb349_chg_stat_handler(int irq, void *dev_id) { struct smb349_dual_charger *chip = dev_id; smb349_pm_stay_awake(chip, PM_SMB349_IRQ_HANDLING); schedule_delayed_work(&chip->irq_handler_dwork, round_jiffies_relative(msecs_to_jiffies(DELAY_IRQ_LEVEL_MS))); return IRQ_HANDLED; }
/* must be called with ts->mutex held */ static void __tsc200x_enable(struct tsc200x *ts) { tsc200x_start_scan(ts); if (ts->esd_timeout && (ts->set_reset || ts->reset_gpio)) { ts->last_valid_interrupt = jiffies; schedule_delayed_work(&ts->esd_work, round_jiffies_relative( msecs_to_jiffies(ts->esd_timeout))); } }
void mlx4_start_sense(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) return; queue_delayed_work(mlx4_wq , &sense->sense_poll, round_jiffies_relative(MLX4_SENSE_RANGE)); }
static void rcu_stutter_wait(char *title) { while (stutter_pause_test || !rcutorture_runnable) { if (rcutorture_runnable) schedule_timeout_interruptible(1); else schedule_timeout_interruptible(round_jiffies_relative(HZ)); rcutorture_shutdown_absorb(title); } }
static ssize_t pwronoff_trigger_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { sscanf(buf, "%d\n", &pwr_on_trigger); if (!s_pwrkey) return 0; schedule_delayed_work(&s_pwrkey->power_key_emulation_work, round_jiffies_relative(msecs_to_jiffies(pwrkey_delay_ms))); return count; }
static void tsc200x_esd_work(struct work_struct *work) { struct tsc200x *ts = container_of(work, struct tsc200x, esd_work.work); int error; unsigned int r; if (!mutex_trylock(&ts->mutex)) { /* * If the mutex is taken, it means that disable or enable is in * progress. In that case just reschedule the work. If the work * is not needed, it will be canceled by disable. */ goto reschedule; } if (time_is_after_jiffies(ts->last_valid_interrupt + msecs_to_jiffies(ts->esd_timeout))) goto out; /* We should be able to read register without disabling interrupts. */ error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r); if (!error && !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) { goto out; } /* * If we could not read our known value from configuration register 0 * then we should reset the controller as if from power-up and start * scanning again. */ dev_info(ts->dev, "TSC200X not responding - resetting\n"); disable_irq(ts->irq); del_timer_sync(&ts->penup_timer); tsc200x_update_pen_state(ts, 0, 0, 0); tsc200x_set_reset(ts, false); usleep_range(100, 500); /* only 10us required */ tsc200x_set_reset(ts, true); enable_irq(ts->irq); tsc200x_start_scan(ts); out: mutex_unlock(&ts->mutex); reschedule: /* re-arm the watchdog */ schedule_delayed_work(&ts->esd_work, round_jiffies_relative( msecs_to_jiffies(ts->esd_timeout))); }
static void uv_mntn_work(struct work_struct *work) { char battery_capacity_info[16] = {0}; PMIC_MNTN_DESC *pmic_mntn = NULL; long battery_capacity = 0; mm_segment_t fs = 0; struct irq_desc *desc = NULL ; /* 0:mask; 1:unmask; 2:undefined */ static int uv_irq_flag = 2; static int battery_info_fd = -1; pmic_mntn = container_of(work, PMIC_MNTN_DESC, uv_mntn_delayed_work.work); desc = irq_to_desc(pmic_mntn->uv_irq); if (!desc) { pr_err("[%s]irq_to_desc failed\n", __func__); return ; } fs = get_fs(); set_fs(KERNEL_DS); if (battery_info_fd < 0) { battery_info_fd = sys_open(PATH_BATTERY_CAPACITY, O_RDONLY, 0); } if (battery_info_fd >= 0) { sys_lseek(battery_info_fd, 0, SEEK_SET); if (sys_read(battery_info_fd, battery_capacity_info, sizeof(battery_capacity_info)) < 0) { sys_close(battery_info_fd); battery_info_fd = -1; } } set_fs(fs); kstrtol(battery_capacity_info, 0, &battery_capacity); if ((battery_capacity >= pmic_mntn->bat_cap_info.bat_cap_threshold)) { if (1 != uv_irq_flag) { pr_info("unmask uv irq,battery_capacity:%ld\n", battery_capacity); if (NULL != desc->irq_data.chip->irq_unmask) desc->irq_data.chip->irq_unmask(&desc->irq_data); uv_irq_flag = 1; } } else { if ((0 != uv_irq_flag)) { pr_info("mask uv irq,battery_capacity:%ld\n", battery_capacity); if (NULL != desc->irq_data.chip->irq_mask) desc->irq_data.chip->irq_mask(&desc->irq_data); uv_irq_flag = 0; } } schedule_delayed_work(&pmic_mntn->uv_mntn_delayed_work, round_jiffies_relative(msecs_to_jiffies(pmic_mntn->bat_cap_info.check_interval))); }
/* * Reschedule delayed work timer. */ static void __schedule_delayed(struct ceph_mon_client *monc) { unsigned long delay; if (monc->hunting) delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; else delay = CEPH_MONC_PING_INTERVAL; dout("__schedule_delayed after %lu\n", delay); mod_delayed_work(system_wq, &monc->delayed_work, round_jiffies_relative(delay)); }
static void input_polled_device_work(struct work_struct *work) { struct input_polled_dev *dev = container_of(work, struct input_polled_dev, work.work); unsigned long delay; dev->poll(dev); delay = msecs_to_jiffies(dev->poll_interval); if (delay >= HZ) delay = round_jiffies_relative(delay); queue_delayed_work(polldev_wq, &dev->work, delay); }
static void qpnp_iadc_work(struct work_struct *work) { struct qpnp_iadc_drv *iadc = qpnp_iadc; int rc = 0; rc = qpnp_iadc_calibrate_for_trim(); if (rc) pr_debug("periodic IADC calibration failed\n"); else schedule_delayed_work(&iadc->iadc_work, round_jiffies_relative(msecs_to_jiffies (QPNP_IADC_CALIB_SECONDS))); return; }
static void qpnp_iadc_work(struct work_struct *work) { struct qpnp_iadc_chip *iadc = container_of(work, struct qpnp_iadc_chip, iadc_work.work); int rc = 0; if (!iadc->skip_auto_calibrations) { rc = qpnp_iadc_calibrate_for_trim(iadc, true); if (rc) pr_debug("periodic IADC calibration failed\n"); } schedule_delayed_work(&iadc->iadc_work, round_jiffies_relative(msecs_to_jiffies (QPNP_IADC_CALIB_SECONDS))); return; }
static int hisi_pmic_uv_mntn_initial(struct platform_device *pdev, PMIC_MNTN_DESC *pmic_mntn) { int ret; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct irq_desc *desc = NULL ; pmic_mntn->uv_irq = platform_get_irq_byname(pdev, "uv"); if (pmic_mntn->uv_irq < 0) { dev_err(dev, "[%s] platform_get_irq_byname uv failed.\n", __func__); return -ENODEV; } ret = devm_request_irq(dev, pmic_mntn->uv_irq, hisi_pmic_uv_irq_handler, IRQF_DISABLED|IRQF_NO_SUSPEND, "pmu-uv-irq", (void *)pmic_mntn); if (ret) { dev_err(dev, "[%s]devm_request_irq uv_irq failed.\n", __func__); return -ENODEV; } /* mask or unmask is up to battery capacity,do not know the battery capacity here */ desc = irq_to_desc(pmic_mntn->uv_irq); if (!desc) { dev_err(dev, "[%s]irq_to_desc failed\n", __func__); return -1; } if (NULL != desc->irq_data.chip->irq_mask) desc->irq_data.chip->irq_mask(&desc->irq_data); ret = of_property_read_u32_array(np, "hisilicon,battery-capacity-check", (u32 *)&(pmic_mntn->bat_cap_info), 2); if (ret) { dev_err(dev, "of_property_read_u32_array read fail\n"); return -ENODEV; } INIT_DELAYED_WORK(&pmic_mntn->uv_mntn_delayed_work, uv_mntn_work); schedule_delayed_work(&pmic_mntn->uv_mntn_delayed_work, round_jiffies_relative(msecs_to_jiffies(pmic_mntn->bat_cap_info.check_interval))); return 0; }
/* work around ESD issue where sta32x resets and loses all configuration */ static void sta32x_watchdog(struct work_struct *work) { struct sta32x_priv *sta32x = container_of(work, struct sta32x_priv, watchdog_work.work); struct snd_soc_codec *codec = sta32x->codec; unsigned int confa, confa_cached; /* check if sta32x has reset itself */ confa_cached = snd_soc_read(codec, STA32X_CONFA); codec->cache_bypass = 1; confa = snd_soc_read(codec, STA32X_CONFA); codec->cache_bypass = 0; if (confa != confa_cached) { codec->cache_sync = 1; sta32x_cache_sync(codec); } if (!sta32x->shutdown) schedule_delayed_work(&sta32x->watchdog_work, round_jiffies_relative(HZ)); }
/* * There is a PMI Fuel Gauge requirement to lower * the Fast Charge current for 2 seconds each 2 minutes by at least 200mA */ static void periodic_charge_work(struct work_struct *work) { int save_current, rc; struct smb349_dual_charger *chip = container_of(work, struct smb349_dual_charger, periodic_charge_handler_dwork.work); rc = smb349_enable_volatile_writes(chip); if (rc) { dev_dbg(chip->dev, "Couldn't configure volatile writes rc=%d\n", rc); goto resched; } save_current = chip->fastchg_current_max_ma; chip->fastchg_current_max_ma -= LOW_CHARGE_CURRENT_MA; /* lower the fast charge current limit to allow PMIC FG metering */ rc = smb349_fastchg_current_set(chip); if (rc) { dev_dbg(chip->dev, "Couldn't set fastchg current rc=%d\n", rc); goto resched; } /* The required delay to ensure PMI FG detects the transient */ msleep(LOW_CHARGE_DELAY_MS); chip->fastchg_current_max_ma = save_current; /* set the fast charge current limit */ rc = smb349_fastchg_current_set(chip); if (rc) { dev_dbg(chip->dev, "Couldn't set fastchg current rc=%d\n", rc); goto resched; } resched: schedule_delayed_work(&chip->periodic_charge_handler_dwork, round_jiffies_relative(msecs_to_jiffies(PERIODIC_DELAY_MS))); return; }
static void qpnp_iadc_work(struct work_struct *work) { struct qpnp_iadc_drv *iadc = qpnp_iadc; int rc = 0; mutex_lock(&iadc->adc->adc_lock); rc = qpnp_iadc_calibrate_for_trim(); if (rc) { pr_err("periodic IADC calibration failed\n"); iadc->iadc_err_cnt++; } mutex_unlock(&iadc->adc->adc_lock); if (iadc->iadc_err_cnt < QPNP_IADC_ERR_CHK_RATELIMIT) schedule_delayed_work(&iadc->iadc_work, round_jiffies_relative(msecs_to_jiffies (QPNP_IADC_CALIB_SECONDS))); return; }
static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; u32 duration = mvm->thermal_throttle.params->ct_kill_duration; if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); tt->throttle = false; tt->dynamic_smps = false; /* Don't schedule an exit work if we're in test mode, since * the temperature will not change unless we manually set it * again (or disable testing). */ if (!mvm->temperature_test) schedule_delayed_work(&tt->ct_kill_exit, round_jiffies_relative(duration * HZ)); }
/* work around ESD issue where sta32x resets and loses all configuration */ static void sta32x_watchdog(struct work_struct *work) { struct sta32x_priv *sta32x = container_of(work, struct sta32x_priv, watchdog_work.work); struct snd_soc_codec *codec = sta32x->codec; unsigned int confa, confa_cached; /* check if sta32x has reset itself */ confa_cached = snd_soc_read(codec, STA32X_CONFA); regcache_cache_bypass(sta32x->regmap, true); confa = snd_soc_read(codec, STA32X_CONFA); regcache_cache_bypass(sta32x->regmap, false); if (confa != confa_cached) { regcache_mark_dirty(sta32x->regmap); sta32x_cache_sync(codec); } if (!sta32x->shutdown) queue_delayed_work(system_power_efficient_wq, &sta32x->watchdog_work, round_jiffies_relative(HZ)); }
static void mlx4_sense_port(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, sense_poll); struct mlx4_dev *dev = sense->dev; struct mlx4_priv *priv = mlx4_priv(dev); enum mlx4_port_type stype[MLX4_MAX_PORTS]; mutex_lock(&priv->port_mutex); mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); if (mlx4_check_port_params(dev, stype)) goto sense_again; if (mlx4_change_port_types(dev, stype)) mlx4_err(dev, "Failed to change port_types\n"); sense_again: mutex_unlock(&priv->port_mutex); queue_delayed_work(mlx4_wq , &sense->sense_poll, round_jiffies_relative(MLX4_SENSE_RANGE)); }
static void update_wlc_worker(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct idtp9017_chip *chip = container_of(dwork, struct idtp9017_chip, update_soc_worker); int ret = 0; int cur_soc = 0; if (chip->wlc_chg_en) { cur_soc = idtp9017_get_soc(chip); if (cur_soc != chip->previous_soc) { ret = idtp9017_set_chg_status(chip); if (ret < 0) { pr_err("Failed to write chg_status at worker ret : %d\n", ret); return; } } chip->previous_soc = cur_soc; } schedule_delayed_work(&chip->update_soc_worker, round_jiffies_relative (msecs_to_jiffies(200))); }