/* * MPSAFE */ int sys_clock_gettime(struct clock_gettime_args *uap) { struct timespec ats; int error; error = kern_clock_gettime(uap->clock_id, &ats); if (error == 0) error = copyout(&ats, uap->tp, sizeof(ats)); return (error); }
static int wd_get_time_left(struct thread *td, time_t *remainp) { struct timespec ts; int error; error = kern_clock_gettime(td, CLOCK_MONOTONIC_FAST, &ts); if (error) return (error); if (!wd_lastpat_valid) return (ENOENT); *remainp = ts.tv_sec - wd_lastpat; return (0); }
/* Fetches the time value of a clock. */ int cloudabi_clock_time_get(struct thread *td, cloudabi_clockid_t clock_id, cloudabi_timestamp_t *ret) { struct timespec ts; int error; clockid_t clockid; error = cloudabi_convert_clockid(clock_id, &clockid); if (error != 0) return (error); error = kern_clock_gettime(td, clockid, &ts); if (error != 0) return (error); return (cloudabi_convert_timespec(&ts, ret)); }
int sys_linux_clock_gettime(struct linux_clock_gettime_args *args) { struct l_timespec lts; int error; clockid_t nwhich = 0; /* XXX: GCC */ struct timespec tp; error = linux_to_native_clockid(&nwhich, args->which); if (error != 0) return (error); error = kern_clock_gettime(nwhich, &tp); if (error != 0) return (error); native_to_linux_timespec(<s, &tp); return (copyout(<s, args->tp, sizeof lts)); }
int cloudabi_sys_clock_time_get(struct thread *td, struct cloudabi_sys_clock_time_get_args *uap) { struct timespec ts; cloudabi_timestamp_t cts; int error; clockid_t clockid; error = cloudabi_convert_clockid(uap->clock_id, &clockid); if (error != 0) return (error); error = kern_clock_gettime(td, clockid, &ts); if (error != 0) return (error); error = cloudabi_convert_timespec(&ts, &cts); if (error != 0) return (error); td->td_retval[0] = cts; return (0); }
int wdog_kern_pat(u_int utim) { int error; if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0) return (EINVAL); if ((utim & WD_LASTVAL) != 0) { /* * if WD_LASTVAL is set, fill in the bits for timeout * from the saved value in wd_last_u. */ MPASS((wd_last_u & ~WD_INTERVAL) == 0); utim &= ~WD_LASTVAL; utim |= wd_last_u; } else { /* * Otherwise save the new interval. * This can be zero (to disable the watchdog) */ wd_last_u = (utim & WD_INTERVAL); wd_last_u_sysctl = wd_last_u; wd_last_u_sysctl_secs = pow2ns_to_ticks(wd_last_u) / hz; } if ((utim & WD_INTERVAL) == WD_TO_NEVER) { utim = 0; /* Assume all is well; watchdog signals failure. */ error = 0; } else { /* Assume no watchdog available; watchdog flags success */ error = EOPNOTSUPP; } if (wd_softtimer) { if (utim == 0) { callout_stop(&wd_softtimeo_handle); } else { (void) callout_reset(&wd_softtimeo_handle, pow2ns_to_ticks(utim), wd_timeout_cb, "soft"); } error = 0; } else { EVENTHANDLER_INVOKE(watchdog_list, utim, &error); } wd_set_pretimeout(wd_pretimeout, true); /* * If we were able to arm/strobe the watchdog, then * update the last time it was strobed for WDIOC_GETTIMELEFT */ if (!error) { struct timespec ts; error = kern_clock_gettime(curthread /* XXX */, CLOCK_MONOTONIC_FAST, &ts); if (!error) { wd_lastpat = ts.tv_sec; wd_lastpat_valid = 1; } } return (error); }