void mlx4_start_catas_poll(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); phys_addr_t addr; INIT_LIST_HEAD(&priv->catas_err.list); timer_setup(&priv->catas_err.timer, poll_catas, 0); priv->catas_err.map = NULL; if (!mlx4_is_slave(dev)) { addr = pci_resource_start(dev->persist->pdev, priv->fw.catas_bar) + priv->fw.catas_offset; priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); if (!priv->catas_err.map) { mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", (unsigned long long)addr); return; } } priv->catas_err.timer.expires = round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); add_timer(&priv->catas_err.timer); }
static void check_exit_ctkill(struct work_struct *work) { struct iwl_mvm_tt_mgmt *tt; struct iwl_mvm *mvm; u32 duration; s32 temp; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); duration = tt->params->ct_kill_duration; iwl_trans_start_hw(mvm->trans); temp = check_nic_temperature(mvm); iwl_trans_stop_device(mvm->trans); if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) { IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n"); goto reschedule; } IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); if (temp <= tt->params->ct_kill_exit) { iwl_mvm_exit_ctkill(mvm); return; } reschedule: schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, round_jiffies(duration * HZ)); }
unsigned long pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; long elapsed; unsigned long last_busy; unsigned long expires = 0; if (!dev->power.use_autosuspend) goto out; autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) goto out; last_busy = ACCESS_ONCE(dev->power.last_busy); elapsed = jiffies - last_busy; if (elapsed < 0) goto out; expires = last_busy + msecs_to_jiffies(autosuspend_delay); if (autosuspend_delay >= 1000) expires = round_jiffies(expires); expires += !expires; if (elapsed >= expires - last_busy) expires = 0; out: return expires; }
/* * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. * @dev: Device to handle. * * Compute the autosuspend-delay expiration time based on the device's * power.last_busy time. If the delay has already expired or is disabled * (negative) or the power.use_autosuspend flag isn't set, return 0. * Otherwise return the expiration time in jiffies (adjusted to be nonzero). * * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; long elapsed; unsigned long last_busy; unsigned long expires = 0; if (!dev->power.use_autosuspend) goto out; autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) goto out; last_busy = ACCESS_ONCE(dev->power.last_busy); elapsed = jiffies - last_busy; if (elapsed < 0) goto out; /* jiffies has wrapped around. */ /* * If the autosuspend_delay is >= 1 second, align the timer by rounding * up to the nearest second. */ expires = last_busy + msecs_to_jiffies(autosuspend_delay); if (autosuspend_delay >= 1000) expires = round_jiffies(expires); expires += !expires; if (elapsed >= expires - last_busy) expires = 0; /* Already expired. */ out: return expires; }
static void dev_watchdog(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; netif_tx_lock(dev); if (dev->qdisc != &noop_qdisc) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { if (netif_queue_stopped(dev) && time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { #if 0 // Fix QA-Bug #8011 on Agile. printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); #endif dev->tx_timeout(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) dev_hold(dev); } } netif_tx_unlock(dev); dev_put(dev); }
static inline void setup_hres_sched_clock(unsigned long clock) { #ifdef CONFIG_CPU_SUPPORTS_HR_SCHED_CLOCK unsigned long data; #ifdef CONFIG_32BIT unsigned long long v; v = NSEC_PER_SEC; v <<= CYC2NS_SHIFT; v += clock/2; do_div(v, clock); mult = v; shift = CYC2NS_SHIFT; /* * We want an even value to automatically clear the top bit * returned by cnt32_to_63() without an additional run time * instruction. So if the LSB is 1 then round it up. */ if (mult & 1) mult++; #else mult = clocksource_mips.mult; shift = clocksource_mips.shift; #endif data = 0x80000000UL / clock * HZ; setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, data); mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data)); #endif }
static void tpt_trig_timer(unsigned long data) { struct ieee80211_local *local = (void *)data; struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; struct led_classdev *led_cdev; unsigned long on, off, tpt; int i; if (!tpt_trig->running) return; mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); tpt = tpt_trig_traffic(local, tpt_trig); /* default to just solid on */ on = 1; off = 0; for (i = tpt_trig->blink_table_len - 1; i >= 0; i--) { if (tpt_trig->blink_table[i].throughput < 0 || tpt > tpt_trig->blink_table[i].throughput) { off = tpt_trig->blink_table[i].blink_time / 2; on = tpt_trig->blink_table[i].blink_time - off; break; } } read_lock(&tpt_trig->trig.leddev_list_lock); list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list) led_blink_set(led_cdev, &on, &off); read_unlock(&tpt_trig->trig.leddev_list_lock); }
void vio_port_up(struct vio_driver_state *vio) { unsigned long flags; int err, state; spin_lock_irqsave(&vio->lock, flags); state = ldc_state(vio->lp); err = 0; if (state == LDC_STATE_INIT) { err = ldc_bind(vio->lp); if (err) printk(KERN_WARNING "%s: Port %lu bind failed, " "err=%d\n", vio->name, vio->vdev->channel_id, err); } if (!err) { err = ldc_connect(vio->lp); if (err) printk(KERN_WARNING "%s: Port %lu connect failed, " "err=%d\n", vio->name, vio->vdev->channel_id, err); } if (err) { unsigned long expires = jiffies + HZ; expires = round_jiffies(expires); mod_timer(&vio->timer, expires); } spin_unlock_irqrestore(&vio->lock, flags); }
static void boot_moniter_work(struct work_struct *work) { struct aml_wdt_dev *wdev=container_of(work,struct aml_wdt_dev,boot_queue.work); reset_watchdog(); mod_delayed_work(system_freezable_wq, &wdev->boot_queue, round_jiffies(msecs_to_jiffies(wdev->reset_watchdog_time*1000))); }
static void tsc2005_esd_work(struct work_struct *work) { struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work); u16 r; mutex_lock(&ts->mutex); if (ts->disable_depth) goto out; /* * If we cannot read our known value from configuration register 0 then * reset the controller as if from power-up and start scanning again. */ tsc2005_read(ts, TSC2005_REG_CFR0, &r); if ((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK) { dev_info(&ts->spi->dev, "TSC2005 not responding - resetting\n"); ts->set_reset(0); tsc2005_update_pen_state(ts, 0, 0, 0); msleep(1); /* only 10us required */ ts->set_reset(1); tsc2005_start_scan(ts); } /* re-arm the watchdog */ mod_timer(&ts->esd_timer, round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout))); out: mutex_unlock(&ts->mutex); }
static void poll_catas(struct timer_list *t) { struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer); struct mlx4_dev *dev = &priv->dev; u32 slave_read; if (mlx4_is_slave(dev)) { slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); if (mlx4_comm_internal_err(slave_read)) { mlx4_warn(dev, "Internal error detected on the communication channel\n"); goto internal_err; } } else if (readl(priv->catas_err.map)) { dump_err_buf(dev); goto internal_err; } if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { mlx4_warn(dev, "Internal error mark was detected on device\n"); goto internal_err; } mod_timer(&priv->catas_err.timer, round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); return; internal_err: if (mlx4_internal_err_reset) queue_work(dev->persist->catas_wq, &dev->persist->catas_work); }
static void fnic_notify_timer(struct timer_list *t) { struct fnic *fnic = from_timer(fnic, t, notify_timer); fnic_handle_link_event(fnic); mod_timer(&fnic->notify_timer, round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); }
static void fnic_notify_timer(unsigned long data) { struct fnic *fnic = (struct fnic *)data; fnic_handle_link_event(fnic); mod_timer(&fnic->notify_timer, round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); }
/* * @interval in seconds */ static void cec_mod_timer(struct timer_list *t, unsigned long interval) { unsigned long iv; iv = interval * HZ + jiffies; mod_timer(t, round_jiffies(iv)); }
int __init round_jiffies_init(void) { printk("the round_jiffies test begin\n"); unsigned long j=jiffies; //记录当前节拍 unsigned long __result1=__round_jiffies(j,0); //参数j当前节拍,0是cpu编号 unsigned long __result2=__round_jiffies(j,1);//参数j当前节拍,1是cpu编号 unsigned long result1=round_jiffies(j);//参数j当前节拍 unsigned long result2=round_jiffies(j);//参数j当前节拍 printk("the jiffies is :%ld\n",j);//显示当前节拍 //显示函数调用结果 printk("the __result1 of __round_jiffies(j,0) is :%ld\n",__result1); printk("the __result2 of __round_jiffies(j,1) is :%ld\n",__result2); printk("the result1 of round_jiffies(j) is :%ld\n",result1); printk("the result2 of round_jiffies(j) is :%ld\n",result2); printk("<0>out round_jiffies_init"); return 0; }
/* * protection_work * * Sleep on waitqueue c2c_dev->waitq on event prot_event!=0 * If c2c->reset_flag => end work * If genio[i]->poll_timeout => errhandler() and end work. * If c2c->pending_armed => possible pending operations * if (genio[i].pending!=0) arm wakeup timeout * If c2c->pws_is_on => release pending operation timeout (wakeup timeout) */ static void protection_work(struct work_struct *work) { struct ap9540_c2c *c2c; struct c2c_genio *genio; unsigned long exp; int ret, i, n; c2c = container_of(work, struct ap9540_c2c, protection_work); ret = 0; while (1) { /* sleep until some event occur */ ret = wait_event_interruptible(c2c->waitq, (c2c->protection_event != 0)); if (ret == -ERESTARTSYS) { dev_err(c2c->dev, "unexpected wake of 'prot_work'\n"); break; } c2c->protection_event = 0; /* genio reset */ if (c2c->reset_flag) { ret = 0; break; } /* polling timeout */ ret = 0; for (i = 0, genio = c2c->genio; i < 32; i++, genio++) { if (genio->poll_timeout) protection_nfy_tout(c2c, genio); } /* pending set operation */ for (i = 0, n = 0, genio = c2c->genio; i < 32; i++, genio++) { if (genio->pending) n++; } if (c2c->powerup_timeout) protection_nfy_powerup_tout(c2c, n); if (n) { if (c2c->pwr_is_on) { dev_err(c2c->dev, "%d pending op(s) !!!\n", n); } else if (c2c->powerup_timeout_armed == 0) { c2c->powerup_timeout_armed = 1; exp = round_jiffies((c2c->powerup_timeout_ms * HZ) / 1000); exp += jiffies; c2c->powerup_timer.expires = exp; add_timer(&c2c->powerup_timer); } } /* C2C wakeup acknowledge */ if (c2c->pwr_is_on) { del_timer(&c2c->powerup_timer); c2c->powerup_timeout_armed = 0; } } if ((ret < 0) && c2c->errhandler) c2c->errhandler(ret); }
static void enic_notify_timer(unsigned long data) { struct enic *enic = (struct enic *)data; enic_notify_check(enic); mod_timer(&enic->notify_timer, round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); }
static inline void setup_hres_sched_clock(unsigned long clock) { #ifdef CONFIG_CPU_SUPPORTS_HR_SCHED_CLOCK unsigned long data; data = 0x80000000UL / clock * HZ; setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, data); mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data)); #endif }
void __netdev_watchdog_up(struct net_device *dev) { if (dev->tx_timeout) { if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = 5*HZ; if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) dev_hold(dev); } }
/* must be called with mutex held */ static void tsc2005_enable(struct tsc2005 *ts) { if (--ts->disable_depth != 0) return; tsc2005_start_scan(ts); enable_irq(ts->spi->irq); if (!ts->esd_timeout) return; mod_timer(&ts->esd_timer, round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout))); }
void mlx4_start_sense(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) return; sense->resched = 1; queue_delayed_work(sense->sense_wq , &sense->sense_poll, round_jiffies(MLX4_SENSE_RANGE)); }
static void ab8500_charger_polling_periodic_work(struct work_struct *work) { struct charger_extra_sysfs *instance = container_of(work, struct charger_extra_sysfs, polling_work.work); if (instance->polling_active) { update_polled_attributes() ; update_power_supply_attributes() ; update_callback_attributes(); /* Schedule a new set of measurements */ queue_delayed_work(instance->polling_queue,&instance->polling_work,round_jiffies(5 * HZ)); } }
static void check_exit_ctkill(struct work_struct *work) { struct iwl_mvm_tt_mgmt *tt; struct iwl_mvm *mvm; u32 duration; s32 temp; int ret; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); if (iwl_mvm_is_tt_in_fw(mvm)) { iwl_mvm_exit_ctkill(mvm); return; } duration = tt->params.ct_kill_duration; mutex_lock(&mvm->mutex); if (__iwl_mvm_mac_start(mvm)) goto reschedule; /* make sure the device is available for direct read/writes */ if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) { __iwl_mvm_mac_stop(mvm); goto reschedule; } ret = iwl_mvm_get_temp(mvm, &temp); iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); __iwl_mvm_mac_stop(mvm); if (ret) goto reschedule; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); if (temp <= tt->params.ct_kill_exit) { mutex_unlock(&mvm->mutex); iwl_mvm_exit_ctkill(mvm); return; } reschedule: mutex_unlock(&mvm->mutex); schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, round_jiffies(duration * HZ)); }
void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; init_timer(&health->timer); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; health->timer.data = (unsigned long)dev; health->timer.function = poll_health; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); }
static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local) { struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger; if (tpt_trig->running) return; /* reset traffic */ tpt_trig_traffic(local, tpt_trig); tpt_trig->running = true; tpt_trig_timer((unsigned long)local); mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ)); }
void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; timer_setup(&health->timer, poll_health, 0); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); }
static void tsc2005_enable(struct tsc2005 *ts) { if (ts->disable_depth != 1) goto out; if (ts->esd_timeout) { unsigned long wdj = msecs_to_jiffies(ts->esd_timeout); ts->esd_timer.expires = round_jiffies(jiffies+wdj); add_timer(&ts->esd_timer); } tsc2005_start_scan(ts); enable_irq(ts->spi->irq); out: --ts->disable_depth; }
static int aml_wdt_probe(struct platform_device *pdev) { struct watchdog_device *aml_wdt; struct aml_wdt_dev *wdev; int ret; aml_wdt = devm_kzalloc(&pdev->dev, sizeof(*aml_wdt), GFP_KERNEL); if (!aml_wdt) return -ENOMEM; wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL); if (!wdev) return -ENOMEM; wdev->dev = &pdev->dev; mutex_init(&wdev->lock); aml_init_pdata(wdev); aml_wdt->info = &aml_wdt_info; aml_wdt->ops = &aml_wdt_ops; aml_wdt->min_timeout = wdev->min_timeout; aml_wdt->max_timeout = wdev->max_timeout; aml_wdt->timeout = 0xffffffff; wdev->timeout = 0xffffffff; watchdog_set_drvdata(aml_wdt, wdev); platform_set_drvdata(pdev, aml_wdt); if (wdev->reset_watchdog_method == 1) { INIT_DELAYED_WORK(&wdev->boot_queue, boot_moniter_work); mod_delayed_work(system_freezable_wq, &wdev->boot_queue, round_jiffies(msecs_to_jiffies(wdev->reset_watchdog_time*1000))); enable_watchdog(wdev); set_watchdog_cnt(wdev, wdev->default_timeout * wdev->one_second); dev_info(wdev->dev, "creat work queue for watch dog\n"); } ret = watchdog_register_device(aml_wdt); if (ret) return ret; awdtv = wdev; register_pm_notifier(&aml_wdt_pm_notifier); register_reboot_notifier(&aml_wdt_reboot_notifier); dev_info(wdev->dev, "AML Watchdog Timer probed done\n"); return 0; }
static void __init setup_sched_clock(unsigned long tclk) { unsigned long long v; unsigned long data; v = NSEC_PER_SEC; v <<= TCLK2NS_SCALE_FACTOR; v += tclk/2; do_div(v, tclk); if (v & 1) v++; tclk2ns_scale = v; data = (0xffffffffUL / tclk / 2 - 2) * HZ; setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, data); mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data)); }
static void dev_watchdog(struct timer_list *t) { struct net_device *dev = from_timer(dev, t, watchdog_timer); netif_tx_lock(dev); if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { int some_queue_timedout = 0; unsigned int i; unsigned long trans_start; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); trans_start = txq->trans_start; if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { some_queue_timedout = 1; txq->trans_timeout++; break; } } if (some_queue_timedout) { WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", dev->name, netdev_drivername(dev), i); dev->netdev_ops->ndo_tx_timeout(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) dev_hold(dev); } } netif_tx_unlock(dev); dev_put(dev); }