static int watchdog_stop(struct watchdog_device *wddev) { int err = 0; mutex_lock(&wddev->lock); if (test_bit(WDOG_UNREGISTERED, &wddev->status)) { err = -ENODEV; goto out_stop; } if (!watchdog_active(wddev)) goto out_stop; if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) { dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n"); err = -EBUSY; goto out_stop; } err = wddev->ops->stop(wddev); if (err == 0) clear_bit(WDOG_ACTIVE, &wddev->status); out_stop: mutex_unlock(&wddev->lock); return err; }
static int watchdog_stop(struct watchdog_device *wdd) { int err = 0; if (!watchdog_active(wdd)) return 0; if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n", wdd->id); return -EBUSY; } if (wdd->ops->stop) { clear_bit(WDOG_HW_RUNNING, &wdd->status); err = wdd->ops->stop(wdd); } else { set_bit(WDOG_HW_RUNNING, &wdd->status); } if (err == 0) { clear_bit(WDOG_ACTIVE, &wdd->status); watchdog_update_worker(wdd); } return err; }
static int ts72xx_wdt_settimeout(struct watchdog_device *wdd, unsigned int to) { struct ts72xx_wdt_priv *priv = watchdog_get_drvdata(wdd); if (to == 1) { priv->regval = TS72XX_WDT_CTRL_1SEC; } else if (to == 2) { priv->regval = TS72XX_WDT_CTRL_2SEC; } else if (to <= 4) { priv->regval = TS72XX_WDT_CTRL_4SEC; to = 4; } else { priv->regval = TS72XX_WDT_CTRL_8SEC; if (to <= 8) to = 8; } wdd->timeout = to; if (watchdog_active(wdd)) { ts72xx_wdt_stop(wdd); ts72xx_wdt_start(wdd); } return 0; }
static void mtk_wdt_shutdown(struct platform_device *pdev) { struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev); if (watchdog_active(&mtk_wdt->wdt_dev)) mtk_wdt_stop(&mtk_wdt->wdt_dev); }
static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data = wdd->wd_data; unsigned int timeout_ms = wdd->timeout * 1000; ktime_t keepalive_interval; ktime_t last_heartbeat, latest_heartbeat; ktime_t virt_timeout; unsigned int hw_heartbeat_ms; virt_timeout = ktime_add(wd_data->last_keepalive, ms_to_ktime(timeout_ms)); hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms); keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2); if (!watchdog_active(wdd)) return keepalive_interval; /* * To ensure that the watchdog times out wdd->timeout seconds * after the most recent ping from userspace, the last * worker ping has to come in hw_heartbeat_ms before this timeout. */ last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms)); latest_heartbeat = ktime_sub(last_heartbeat, ktime_get()); if (ktime_before(latest_heartbeat, keepalive_interval)) return latest_heartbeat; return keepalive_interval; }
static void watchdog_cdev_unregister(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data = wdd->wd_data; cdev_del(&wd_data->cdev); if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; } if (watchdog_active(wdd) && test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) { watchdog_stop(wdd); } mutex_lock(&wd_data->lock); wd_data->wdd = NULL; wdd->wd_data = NULL; mutex_unlock(&wd_data->lock); hrtimer_cancel(&wd_data->timer); kthread_cancel_work_sync(&wd_data->work); kref_put(&wd_data->kref, watchdog_core_data_release); }
static int twl4030_wdt_resume(struct platform_device *pdev) { struct watchdog_device *wdt = platform_get_drvdata(pdev); if (watchdog_active(wdt)) return twl4030_wdt_start(wdt); return 0; }
static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state) { struct watchdog_device *wdt = platform_get_drvdata(pdev); if (watchdog_active(wdt)) return twl4030_wdt_stop(wdt); return 0; }
/* Enable watchdog if necessary */ static int __maybe_unused sbsa_gwdt_resume(struct device *dev) { struct sbsa_gwdt *gwdt = dev_get_drvdata(dev); if (watchdog_active(&gwdt->wdd)) sbsa_gwdt_start(&gwdt->wdd); return 0; }
static int __maybe_unused stmp3xxx_wdt_suspend(struct device *dev) { struct watchdog_device *wdd = &stmp3xxx_wdd; if (watchdog_active(wdd)) return wdt_stop(wdd); return 0; }
static int __maybe_unused armada_37xx_wdt_resume(struct device *dev) { struct watchdog_device *wdt = dev_get_drvdata(dev); if (watchdog_active(wdt)) return armada_37xx_wdt_start(wdt); return 0; }
/* * Timer tick: the timer will make sure that the watchdog timer hardware * is being reset in time. The conditions to do this are: * 1) the watchdog timer has been started and /dev/watchdog is open * and there is still time left before userspace should send the * next heartbeat/ping. (note: the internal heartbeat is much smaller * then the external/userspace heartbeat). * 2) the watchdog timer has been stopped by userspace. */ static void wdt_timer_tick(struct timer_list *unused) { if (time_before(jiffies, next_heartbeat) || (!watchdog_active(&wdt_dev))) { wdt_reset(); mod_timer(&timer, jiffies + WDT_HEARTBEAT); } else pr_crit("I will reboot your machine !\n"); }
static int mtk_wdt_suspend(struct device *dev) { struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev); if (watchdog_active(&mtk_wdt->wdt_dev)) mtk_wdt_stop(&mtk_wdt->wdt_dev); return 0; }
static const void *inject_drive_command(const void *buf) { const int RESET_TIMES = 10; //static struct timespec latest_tag_time; //static uint64_t latest_tag; static int reset_counter; struct timespec now; //uint64_t tag; if (!shm) return buf; if (!watchdog_active()) return buf; clock_gettime(CLOCK_MONOTONIC, &now); //tag = ((const struct drive_command *)buf)->tag; //if (latest_tag < tag) { // latest_tag = tag; // latest_tag_time = now; // return buf; //} static struct drive_command cmd; static struct timespec last_deadline; struct timespec deadline; pthread_mutex_lock(&shm->cmd_lock); deadline = shm->cmd_time; if (!timespec_eq(&deadline, &last_deadline)) { last_deadline = deadline; cmd = shm->cmd; reset_counter = RESET_TIMES; } pthread_mutex_unlock(&shm->cmd_lock); if (timespec_ge(&now, &deadline)) { if (reset_counter <= 0) return buf; cmd.lin_vel = 0; cmd.ang_vel = 0; cmd.lin_acc = 0; cmd.ang_acc = 0; cmd.lin_lim = 0; cmd.ang_lim = 0; reset_counter--; } //uint64_t msecs = timespec_diff_msec(&latest_tag_time, &now); //cmd.tag = ((latest_tag >> 33) + msecs) << 33; cmd.tag = limiter_tag; cmd.crc = _docrc(&cmd, sizeof(cmd) - sizeof(cmd.crc)); return &cmd; }
static int __maybe_unused meson_gxbb_wdt_suspend(struct device *dev) { struct meson_gxbb_wdt *data = dev_get_drvdata(dev); if (watchdog_active(&data->wdt_dev)) meson_gxbb_wdt_stop(&data->wdt_dev); return 0; }
static int __maybe_unused rwdt_suspend(struct device *dev) { struct rwdt_priv *priv = dev_get_drvdata(dev); if (watchdog_active(&priv->wdev)) { priv->time_left = readw(priv->base + RWTCNT); rwdt_stop(&priv->wdev); } return 0; }
static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct watchdog_device *wdd = dev_get_drvdata(dev); if (watchdog_active(wdd)) return sprintf(buf, "active\n"); return sprintf(buf, "inactive\n"); }
static int __maybe_unused rwdt_resume(struct device *dev) { struct rwdt_priv *priv = dev_get_drvdata(dev); if (watchdog_active(&priv->wdev)) { rwdt_start(&priv->wdev); rwdt_write(priv, priv->time_left, RWTCNT); } return 0; }
/* * Timer tick */ static void at91_ping(struct timer_list *t) { struct at91wdt *wdt = from_timer(wdt, t, timer); if (time_before(jiffies, wdt->next_heartbeat) || !watchdog_active(&wdt->wdd)) { at91_wdt_reset(wdt); mod_timer(&wdt->timer, jiffies + wdt->heartbeat); } else { pr_crit("I will reset your machine !\n"); } }
/** * cdns_wdt_suspend - Stop the device. * * @dev: handle to the device structure. * Return: 0 always. */ static int __maybe_unused cdns_wdt_suspend(struct device *dev) { struct cdns_wdt *wdt = dev_get_drvdata(dev); if (watchdog_active(&wdt->cdns_wdt_device)) { cdns_wdt_stop(&wdt->cdns_wdt_device); clk_disable_unprepare(wdt->clk); } return 0; }
/* * Timer tick */ static void at91_ping(unsigned long data) { struct at91wdt *wdt = (struct at91wdt *)data; if (time_before(jiffies, wdt->next_heartbeat) || !watchdog_active(&wdt->wdd)) { at91_wdt_reset(wdt); mod_timer(&wdt->timer, jiffies + wdt->heartbeat); } else { pr_crit("I will reset your machine !\n"); } }
static int mtk_wdt_resume(struct device *dev) { struct mtk_wdt_dev *mtk_wdt = dev_get_drvdata(dev); if (watchdog_active(&mtk_wdt->wdt_dev)) { mtk_wdt_start(&mtk_wdt->wdt_dev); mtk_wdt_ping(&mtk_wdt->wdt_dev); } return 0; }
static int ux500_wdt_resume(struct platform_device *pdev) { if (watchdog_active(&ux500_wdt)) { ux500_wdt_stop(&ux500_wdt); prcmu_config_a9wdog(PRCMU_WDOG_CPU1, false); prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000); ux500_wdt_start(&ux500_wdt); } return 0; }
static int watchdog_ping(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data = wdd->wd_data; if (!watchdog_active(wdd) && !watchdog_hw_running(wdd)) return 0; set_bit(_WDOG_KEEPALIVE, &wd_data->status); wd_data->last_keepalive = jiffies; return __watchdog_ping(wdd); }
static int stm32_iwdg_set_timeout(struct watchdog_device *wdd, unsigned int timeout) { dev_dbg(wdd->parent, "%s timeout: %d sec\n", __func__, timeout); wdd->timeout = timeout; if (watchdog_active(wdd)) return stm32_iwdg_start(wdd); return 0; }
static int iTCO_wdt_suspend_noirq(struct device *dev) { int ret = 0; iTCO_wdt_private.suspended = false; if (watchdog_active(&iTCO_wdt_watchdog_dev) && need_suspend()) { ret = iTCO_wdt_stop(&iTCO_wdt_watchdog_dev); if (!ret) iTCO_wdt_private.suspended = true; } return ret; }
static int ux500_wdt_suspend(struct platform_device *pdev, pm_message_t state) { if (watchdog_active(&ux500_wdt)) { ux500_wdt_stop(&ux500_wdt); prcmu_config_a9wdog(PRCMU_WDOG_CPU1, true); prcmu_load_a9wdog(PRCMU_WDOG_ALL, timeout * 1000); ux500_wdt_start(&ux500_wdt); } return 0; }
static inline bool watchdog_need_worker(struct watchdog_device *wdd) { /* All variables in milli-seconds */ unsigned int hm = wdd->max_hw_heartbeat_ms; unsigned int t = wdd->timeout * 1000; /* * A worker to generate heartbeat requests is needed if all of the * following conditions are true. * - Userspace activated the watchdog. * - The driver provided a value for the maximum hardware timeout, and * thus is aware that the framework supports generating heartbeat * requests. * - Userspace requests a longer timeout than the hardware can handle. * * Alternatively, if userspace has not opened the watchdog * device, we take care of feeding the watchdog if it is * running. */ return (hm && watchdog_active(wdd) && t > hm) || (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)); }
static void watchdog_ping_work(struct work_struct *work) { struct watchdog_core_data *wd_data; struct watchdog_device *wdd; wd_data = container_of(to_delayed_work(work), struct watchdog_core_data, work); mutex_lock(&wd_data->lock); wdd = wd_data->wdd; if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd))) __watchdog_ping(wdd); mutex_unlock(&wd_data->lock); }
/** * cdns_wdt_resume - Resume the device. * * @dev: handle to the device structure. * Return: 0 on success, errno otherwise. */ static int __maybe_unused cdns_wdt_resume(struct device *dev) { int ret; struct cdns_wdt *wdt = dev_get_drvdata(dev); if (watchdog_active(&wdt->cdns_wdt_device)) { ret = clk_prepare_enable(wdt->clk); if (ret) { dev_err(dev, "unable to enable clock\n"); return ret; } cdns_wdt_start(&wdt->cdns_wdt_device); } return 0; }