static void vmbus_timesync(struct hv_util_sc *sc, uint64_t hvtime, uint8_t tsflags) { struct timespec vm_ts; uint64_t hv_ns, vm_ns; hv_ns = (hvtime - VMBUS_ICMSG_TS_BASE) * VMBUS_ICMSG_TS_FACTOR; nanotime(&vm_ts); vm_ns = (vm_ts.tv_sec * NANOSEC) + vm_ts.tv_nsec; if ((tsflags & VMBUS_ICMSG_TS_FLAG_SYNC) && !vmbus_ts_ignore_sync) { struct timespec hv_ts; if (bootverbose) { device_printf(sc->ic_dev, "apply sync request, " "hv: %ju, vm: %ju\n", (uintmax_t)hv_ns, (uintmax_t)vm_ns); } hv_ts.tv_sec = hv_ns / NANOSEC; hv_ts.tv_nsec = hv_ns % NANOSEC; kern_clock_settime(curthread, CLOCK_REALTIME, &hv_ts); /* Done! */ return; } if ((tsflags & VMBUS_ICMSG_TS_FLAG_SAMPLE) && vmbus_ts_sample_thresh > 0) { int64_t diff; if (vmbus_ts_sample_verbose) { device_printf(sc->ic_dev, "sample request, " "hv: %ju, vm: %ju\n", (uintmax_t)hv_ns, (uintmax_t)vm_ns); } if (hv_ns > vm_ns) diff = hv_ns - vm_ns; else diff = vm_ns - hv_ns; /* nanosec -> millisec */ diff /= 1000000; if (diff > vmbus_ts_sample_thresh) { struct timespec hv_ts; if (bootverbose) { device_printf(sc->ic_dev, "apply sample request, hv: %ju, vm: %ju\n", (uintmax_t)hv_ns, (uintmax_t)vm_ns); } hv_ts.tv_sec = hv_ns / NANOSEC; hv_ts.tv_nsec = hv_ns % NANOSEC; kern_clock_settime(curthread, CLOCK_REALTIME, &hv_ts); } /* Done */ return; } }
/** * Set host time based on time sync message from host */ static void hv_set_host_time(void *context) { time_sync_data* time_msg = (time_sync_data*) context; uint64_t hosttime = time_msg->data; struct timespec guest_ts, host_ts; uint64_t host_tns; int64_t diff; int error; host_tns = (hosttime - HV_WLTIMEDELTA) * 100; host_ts.tv_sec = (time_t)(host_tns/HV_NANO_SEC_PER_SEC); host_ts.tv_nsec = (long)(host_tns%HV_NANO_SEC_PER_SEC); nanotime(&guest_ts); diff = (int64_t)host_ts.tv_sec - (int64_t)guest_ts.tv_sec; /* * If host differs by 5 seconds then make the guest catch up */ if (diff > 5 || diff < -5) { error = kern_clock_settime(curthread, CLOCK_REALTIME, &host_ts); } /* * Free the hosttime that was allocated in hv_adj_guesttime() */ free(time_msg, M_DEVBUF); }
/** * Set host time based on time sync message from host */ static void hv_set_host_time(void *context, int pending) { hv_timesync_sc *softc = (hv_timesync_sc*)context; uint64_t hosttime = softc->time_msg.data; struct timespec guest_ts, host_ts; uint64_t host_tns; int64_t diff; int error; host_tns = (hosttime - HV_WLTIMEDELTA) * 100; host_ts.tv_sec = (time_t)(host_tns/HV_NANO_SEC_PER_SEC); host_ts.tv_nsec = (long)(host_tns%HV_NANO_SEC_PER_SEC); nanotime(&guest_ts); diff = (int64_t)host_ts.tv_sec - (int64_t)guest_ts.tv_sec; /* * If host differs by 5 seconds then make the guest catch up */ if (diff > 5 || diff < -5) { error = kern_clock_settime(curthread, CLOCK_REALTIME, &host_ts); } }
/* * MPALMOSTSAFE */ int sys_clock_settime(struct clock_settime_args *uap) { struct timespec ats; int error; if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) return (error); get_mplock(); error = kern_clock_settime(uap->clock_id, &ats); rel_mplock(); return (error); }
int sys_linux_clock_settime(struct linux_clock_settime_args *args) { struct timespec ts; struct l_timespec lts; int error; clockid_t nwhich = 0; /* XXX: GCC */ error = linux_to_native_clockid(&nwhich, args->which); if (error != 0) return (error); error = copyin(args->tp, <s, sizeof lts); if (error != 0) return (error); error = linux_to_native_timespec(&ts, <s); if (error != 0) return (error); return (kern_clock_settime(nwhich, &ts)); }