static void start_kern_loop(void) { static int atexit_done; int ntp_adj_ret; pll_control = TRUE; ZERO(ntv); ntv.modes = MOD_BITS; ntv.status = STA_PLL; ntv.maxerror = MAXDISPERSE; ntv.esterror = MAXDISPERSE; ntv.constant = sys_poll; /* why is it that here constant is unconditionally set to sys_poll, whereas elsewhere is is modified depending on nanosecond vs. microsecond kernel? */ #ifdef SIGSYS /* * Use sigsetjmp() to save state and then call ntp_adjtime(); if * it fails, then pll_trap() will set pll_control FALSE before * returning control using siglogjmp(). */ newsigsys.sa_handler = pll_trap; newsigsys.sa_flags = 0; if (sigaction(SIGSYS, &newsigsys, &sigsys)) { msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m"); pll_control = FALSE; } else { if (sigsetjmp(env, 1) == 0) { if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); } } if (sigaction(SIGSYS, &sigsys, NULL)) { msyslog(LOG_ERR, "sigaction() restore SIGSYS: %m"); pll_control = FALSE; } } #else /* SIGSYS */ if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); } #endif /* SIGSYS */ /* * Save the result status and light up an external clock * if available. */ pll_status = ntv.status; if (pll_control) { if (!atexit_done) { atexit_done = TRUE; atexit(&stop_kern_loop); } #ifdef STA_NANO if (pll_status & STA_CLK) ext_enable = TRUE; #endif /* STA_NANO */ report_event(EVNT_KERN, NULL, "kernel time sync enabled"); } }
/* * If a system has multiple independant time-correcting mechanisms, this * function should clear out any corrections on those mechanisms that we * will NOT be using. We can leave a prior correction intact on the * mechanism that we ARE using. * * However, it is usually a good idea to clean out any offset correction * that is still in progress anyway. We leave the frequency correction * intact. */ void sysntp_clear_alternative_corrections(void) { struct timex ntp; int64_t offset; if (no_update_opt) return; /* * Clear the ntp interface. We will use the sysctl interface * (XXX) */ bzero(&ntp, sizeof(ntp)); ntp.modes = MOD_OFFSET | MOD_FREQUENCY; ntp.offset = 0; ntp.freq = 0; ntp_adjtime(&ntp); /* * Clean out any offset still being applied to real time. Leave * any prior frequency correction intact. */ offset = 0; sysctlbyname("kern.ntp.delta", NULL, 0, &offset, sizeof(offset)); }
static void kt_setfreq(struct ocx *ocx, double frequency) { struct timex tx; int i; assert(isfinite(frequency)); memset(&tx, 0, sizeof tx); tx.modes = MOD_STATUS; #if defined(MOD_NANO) tx.modes |= MOD_NANO; #elif defined(MOD_MICRO) tx.modes |= MOD_MICRO; #endif tx.status = STA_PLL | STA_FREQHOLD; tx.modes = MOD_FREQUENCY; tx.freq = (long)floor(frequency * (65536 * 1e6)); errno = 0; i = ntp_adjtime(&tx); Put(ocx, OCX_TRACE, "KERNPLL %.6e %d\n", frequency, i); /* XXX: what is the correct error test here ? */ assert(i >= 0); }
/* * set_freq - set clock frequency correction * * Used to step the frequency correction at startup, possibly again once * the frequency is measured (that is, transitioning from EVNT_NSET to * EVNT_FSET), and finally to switch between daemon and kernel loop * discipline at runtime. * * When the kernel loop discipline is available but the daemon loop is * in use, the kernel frequency correction is disabled (set to 0) to * ensure drift_comp is applied by only one of the loops. */ static void set_freq( double freq /* frequency update */ ) { const char * loop_desc; int ntp_adj_ret; drift_comp = freq; loop_desc = "ntpd"; #ifdef KERNEL_PLL if (pll_control) { ZERO(ntv); ntv.modes = MOD_FREQUENCY; if (kern_enable) { loop_desc = "kernel"; ntv.freq = DTOFREQ(drift_comp); } if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); } } #endif /* KERNEL_PLL */ mprintf_event(EVNT_FSET, NULL, "%s %.3f PPM", loop_desc, drift_comp * 1e6); }
/* * calc_freq - calculate frequency directly * * This is very carefully done. When the offset is first computed at the * first update, a residual frequency component results. Subsequently, * updates are suppresed until the end of the measurement interval while * the offset is amortized. At the end of the interval the frequency is * calculated from the current offset, residual offset, length of the * interval and residual frequency component. At the same time the * frequenchy file is armed for update at the next hourly stats. */ static double direct_freq( double fp_offset ) { #ifdef KERNEL_PLL /* * If the kernel is enabled, we need the residual offset to * calculate the frequency correction. */ if (pll_control && kern_enable) { memset(&ntv, 0, sizeof(ntv)); ntp_adjtime(&ntv); #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ drift_comp = FREQTOD(ntv.freq); } #endif /* KERNEL_PLL */ set_freq((fp_offset - clock_offset) / (current_time - clock_epoch) + drift_comp); wander_resid = 0; return (drift_comp); }
/* * set_freq - set clock frequency */ static void set_freq( double freq /* frequency update */ ) { char tbuf[80]; drift_comp = freq; #ifdef KERNEL_PLL /* * If the kernel is enabled, update the kernel frequency. */ if (pll_control && kern_enable) { memset(&ntv, 0, sizeof(ntv)); ntv.modes = MOD_FREQUENCY; ntv.freq = DTOFREQ(drift_comp); ntp_adjtime(&ntv); snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM", drift_comp * 1e6); report_event(EVNT_FSET, NULL, tbuf); } else { snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6); report_event(EVNT_FSET, NULL, tbuf); } #else /* KERNEL_PLL */ snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6); report_event(EVNT_FSET, NULL, tbuf); #endif /* KERNEL_PLL */ }
static void determine_ntp_resolution (void) { struct timex tx; tx.modes = 0; if (ntp_adjtime(&tx) < 0) { ntp_resolution = 0; } else if (tx.status & STA_NANO) { ntp_resolution = 1e-9; } else { ntp_resolution = 1e-6; } }
/* * adj_systime - adjust system time by the argument. */ int adj_systime(double now) /* adjustment (s) */ { struct timeval adjtv; /* new adjustment */ double dtemp; long ticks; int isneg = 0; /* * Most Unix adjtime() implementations adjust the system clock * in microsecond quanta, but some adjust in 10-ms quanta. We * carefully round the adjustment to the nearest quantum, then * adjust in quanta and keep the residue for later. */ dtemp = now; if (dtemp < 0) { isneg = 1; dtemp = -dtemp; } adjtv.tv_sec = (long)dtemp; dtemp -= adjtv.tv_sec; ticks = (long)(dtemp / sys_tick + .5); adjtv.tv_usec = (long)(ticks * sys_tick * 1e6); dtemp -= adjtv.tv_usec / 1e6; /* * Convert to signed seconds and microseconds for the Unix * adjtime() system call. Note we purposely lose the adjtime() * leftover. */ if (isneg) { adjtv.tv_sec = -adjtv.tv_sec; adjtv.tv_usec = -adjtv.tv_usec; } if (adjtv.tv_sec != 0 || adjtv.tv_usec != 0) { struct timex tntx; tntx.offset = adjtv.tv_usec + adjtv.tv_sec * 1000000L; tntx.modes = ADJ_OFFSET_SINGLESHOT; if (ntp_adjtime(&tntx) < 0) { //msyslog(LOG_ERR, "adj_systime: failed to set system time adjustment"); return (0); } } return (1); }
// Call ntp_adjtime() to obtain the latest calibration coefficient. void update_ppm(double ppm) { struct timex ntx; int status; double ppm_new; ntx.modes = 0; /* only read */ status = ntp_adjtime(&ntx); if (status != TIME_OK) { //cerr << "Error: clock not synchronized" << endl; //return; } ppm_new = (double)ntx.freq/(double)(1 << 16); /* frequency scale */ if (abs(ppm_new)>200) { printf( "Warning: absolute ppm value is greater than 200 and is being ignored!\n"); } else { if (ppm!=ppm_new) { printf(" Obtained new ppm value: %f\n",ppm_new); } ppm=ppm_new; } }
/* * local_clock - the NTP logical clock loop filter. * * Return codes: * -1 update ignored: exceeds panic threshold * 0 update ignored: popcorn or exceeds step threshold * 1 clock was slewed * 2 clock was stepped * * LOCKCLOCK: The only thing this routine does is set the * sys_rootdisp variable equal to the peer dispersion. */ int local_clock( struct peer *peer, /* synch source peer structure */ double fp_offset /* clock offset (s) */ ) { int rval; /* return code */ int osys_poll; /* old system poll */ int ntp_adj_ret; /* returned by ntp_adjtime */ double mu; /* interval since last update */ double clock_frequency; /* clock frequency */ double dtemp, etemp; /* double temps */ char tbuf[80]; /* report buffer */ /* * If the loop is opened or the NIST LOCKCLOCK is in use, * monitor and record the offsets anyway in order to determine * the open-loop response and then go home. */ #ifdef LOCKCLOCK { #else if (!ntp_enable) { #endif /* LOCKCLOCK */ record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); return (0); } #ifndef LOCKCLOCK /* * If the clock is way off, panic is declared. The clock_panic * defaults to 1000 s; if set to zero, the panic will never * occur. The allow_panic defaults to FALSE, so the first panic * will exit. It can be set TRUE by a command line option, in * which case the clock will be set anyway and time marches on. * But, allow_panic will be set FALSE when the update is less * than the step threshold; so, subsequent panics will exit. */ if (fabs(fp_offset) > clock_panic && clock_panic > 0 && !allow_panic) { snprintf(tbuf, sizeof(tbuf), "%+.0f s; set clock manually within %.0f s.", fp_offset, clock_panic); report_event(EVNT_SYSFAULT, NULL, tbuf); return (-1); } /* * This section simulates ntpdate. If the offset exceeds the * step threshold (128 ms), step the clock to that time and * exit. Otherwise, slew the clock to that time and exit. Note * that the slew will persist and eventually complete beyond the * life of this program. Note that while ntpdate is active, the * terminal does not detach, so the termination message prints * directly to the terminal. */ if (mode_ntpdate) { if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0)) { step_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time set %+.6f s", fp_offset); printf("ntpd: time set %+.6fs\n", fp_offset); } else { adj_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s", fp_offset); printf("ntpd: time slew %+.6fs\n", fp_offset); } record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); exit (0); } /* * The huff-n'-puff filter finds the lowest delay in the recent * interval. This is used to correct the offset by one-half the * difference between the sample delay and minimum delay. This * is most effective if the delays are highly assymetric and * clockhopping is avoided and the clock frequency wander is * relatively small. */ if (sys_huffpuff != NULL) { if (peer->delay < sys_huffpuff[sys_huffptr]) sys_huffpuff[sys_huffptr] = peer->delay; if (peer->delay < sys_mindly) sys_mindly = peer->delay; if (fp_offset > 0) dtemp = -(peer->delay - sys_mindly) / 2; else dtemp = (peer->delay - sys_mindly) / 2; fp_offset += dtemp; #ifdef DEBUG if (debug) printf( "local_clock: size %d mindly %.6f huffpuff %.6f\n", sys_hufflen, sys_mindly, dtemp); #endif } /* * Clock state machine transition function which defines how the * system reacts to large phase and frequency excursion. There * are two main regimes: when the offset exceeds the step * threshold (128 ms) and when it does not. Under certain * conditions updates are suspended until the stepout theshold * (900 s) is exceeded. See the documentation on how these * thresholds interact with commands and command line options. * * Note the kernel is disabled if step is disabled or greater * than 0.5 s or in ntpdate mode. */ osys_poll = sys_poll; if (sys_poll < peer->minpoll) sys_poll = peer->minpoll; if (sys_poll > peer->maxpoll) sys_poll = peer->maxpoll; mu = current_time - clock_epoch; clock_frequency = drift_comp; rval = 1; if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0) || force_step_once ) { if (force_step_once) { force_step_once = FALSE; /* we want this only once after startup */ msyslog(LOG_NOTICE, "Doing intital time step" ); } switch (state) { /* * In SYNC state we ignore the first outlyer and switch * to SPIK state. */ case EVNT_SYNC: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_SPIK, NULL, tbuf); state = EVNT_SPIK; return (0); /* * In FREQ state we ignore outlyers and inlyers. At the * first outlyer after the stepout threshold, compute * the apparent frequency correction and step the phase. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through to EVNT_SPIK */ /* * In SPIK state we ignore succeeding outlyers until * either an inlyer is found or the stepout threshold is * exceeded. */ case EVNT_SPIK: if (mu < clock_minstep) return (0); /* fall through to default */ /* * We get here by default in NSET and FSET states and * from above in FREQ or SPIK states. * * In NSET state an initial frequency correction is not * available, usually because the frequency file has not * yet been written. Since the time is outside the step * threshold, the clock is stepped. The frequency will * be set directly following the stepout interval. * * In FSET state the initial frequency has been set from * the frequency file. Since the time is outside the * step threshold, the clock is stepped immediately, * rather than after the stepout interval. Guys get * nervous if it takes 15 minutes to set the clock for * the first time. * * In FREQ and SPIK states the stepout threshold has * expired and the phase is still above the step * threshold. Note that a single spike greater than the * step threshold is always suppressed, even with a * long time constant. */ default: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_CLOCKRESET, NULL, tbuf); step_systime(fp_offset); reinit_timer(); tc_counter = 0; clock_jitter = LOGTOD(sys_precision); rval = 2; if (state == EVNT_NSET) { rstclock(EVNT_FREQ, 0); return (rval); } break; } rstclock(EVNT_SYNC, 0); } else { /* * The offset is less than the step threshold. Calculate * the jitter as the exponentially weighted offset * differences. */ etemp = SQUARE(clock_jitter); dtemp = SQUARE(max(fabs(fp_offset - last_offset), LOGTOD(sys_precision))); clock_jitter = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); switch (state) { /* * In NSET state this is the first update received and * the frequency has not been initialized. Adjust the * phase, but do not adjust the frequency until after * the stepout threshold. */ case EVNT_NSET: adj_systime(fp_offset); rstclock(EVNT_FREQ, fp_offset); break; /* * In FREQ state ignore updates until the stepout * threshold. After that, compute the new frequency, but * do not adjust the frequency until the holdoff counter * decrements to zero. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through */ /* * We get here by default in FSET, SPIK and SYNC states. * Here compute the frequency update due to PLL and FLL * contributions. Note, we avoid frequency discipline at * startup until the initial transient has subsided. */ default: allow_panic = FALSE; if (freq_cnt == 0) { /* * The FLL and PLL frequency gain constants * depend on the time constant and Allan * intercept. The PLL is always used, but * becomes ineffective above the Allan intercept * where the FLL becomes effective. */ if (sys_poll >= allan_xpt) clock_frequency += (fp_offset - clock_offset) / max(ULOGTOD(sys_poll), mu) * CLOCK_FLL; /* * The PLL frequency gain (numerator) depends on * the minimum of the update interval and Allan * intercept. This reduces the PLL gain when the * FLL becomes effective. */ etemp = min(ULOGTOD(allan_xpt), mu); dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); clock_frequency += fp_offset * etemp / (dtemp * dtemp); } rstclock(EVNT_SYNC, fp_offset); if (fabs(fp_offset) < CLOCK_FLOOR) freq_cnt = 0; break; } } #ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * step threshold is less than 0.5 s, as anything higher can * lead to overflow problems. This might occur if some misguided * lad set the step threshold to something ridiculous. */ if (pll_control && kern_enable && freq_cnt == 0) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the frequency file. */ ZERO(ntv); if (ext_enable) { ntv.modes = MOD_STATUS; } else { #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5; #ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll; #else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4; #endif /* STA_NANO */ if (ntv.constant < 0) ntv.constant = 0; ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6); ntv.status = STA_PLL; /* * Enable/disable the PPS if requested. */ if (hardpps_enable) { ntv.status |= (STA_PPSTIME | STA_PPSFREQ); if (!(pll_status & STA_PPSTIME)) sync_status("PPS enabled", pll_status, ntv.status); } else { ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ); if (pll_status & STA_PPSTIME) sync_status("PPS disabled", pll_status, ntv.status); } if (sys_leap == LEAP_ADDSECOND) ntv.status |= STA_INS; else if (sys_leap == LEAP_DELSECOND) ntv.status |= STA_DEL; } /* * Pass the stuff to the kernel. If it squeals, turn off * the pps. In any case, fetch the kernel offset, * frequency and jitter. */ ntp_adj_ret = ntp_adjtime(&ntv); /* * A squeal is a return status < 0, or a state change. */ if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) { kernel_status = ntp_adj_ret; ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1); } pll_status = ntv.status; #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ clock_frequency = FREQTOD(ntv.freq); /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { #ifdef STA_NANO clock_jitter = ntv.jitter / 1e9; #else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6; #endif /* STA_NANO */ } #if defined(STA_NANO) && NTP_API == 4 /* * If the TAI changes, update the kernel TAI. */ if (loop_tai != sys_tai) { loop_tai = sys_tai; ntv.modes = MOD_TAI; ntv.constant = sys_tai; if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1); } } #endif /* STA_NANO */ } #endif /* KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate * the frequency difference since the last update. */ if (fabs(clock_frequency) > NTP_MAXFREQ) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", clock_frequency * 1e6, NTP_MAXFREQ * 1e6); dtemp = SQUARE(clock_frequency - drift_comp); if (clock_frequency > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (clock_frequency < -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = clock_frequency; /* * Calculate the wander as the exponentially weighted RMS * frequency differences. Record the change for the frequency * file update. */ etemp = SQUARE(clock_stability); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the time constant by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. Don't * fiddle with the poll during the startup clamp period. */ if (freq_cnt > 0) { tc_counter = 0; } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * If the time constant has changed, update the poll variables. */ if (osys_poll != sys_poll) poll_update(peer, sys_poll); /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll); #ifdef DEBUG if (debug) printf( "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n", clock_offset, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll); #endif /* DEBUG */ return (rval); #endif /* LOCKCLOCK */ } /* * adj_host_clock - Called once every second to update the local clock. * * LOCKCLOCK: The only thing this routine does is increment the * sys_rootdisp variable. */ void adj_host_clock( void ) { double offset_adj; double freq_adj; /* * Update the dispersion since the last update. In contrast to * NTPv3, NTPv4 does not declare unsynchronized after one day, * since the dispersion check serves this function. Also, * since the poll interval can exceed one day, the old test * would be counterproductive. During the startup clamp period, the * time constant is clamped at 2. */ sys_rootdisp += clock_phi; #ifndef LOCKCLOCK if (!ntp_enable || mode_ntpdate) return; /* * Determine the phase adjustment. The gain factor (denominator) * increases with poll interval, so is dominated by the FLL * above the Allan intercept. Note the reduced time constant at * startup. */ if (state != EVNT_SYNC) { offset_adj = 0.; } else if (freq_cnt > 0) { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1)); freq_cnt--; #ifdef KERNEL_PLL } else if (pll_control && kern_enable) { offset_adj = 0.; #endif /* KERNEL_PLL */ } else { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll)); } /* * If the kernel discipline is enabled the frequency correction * drift_comp has already been engaged via ntp_adjtime() in * set_freq(). Otherwise it is a component of the adj_systime() * offset. */ #ifdef KERNEL_PLL if (pll_control && kern_enable) freq_adj = 0.; else #endif /* KERNEL_PLL */ freq_adj = drift_comp; /* Bound absolute value of total adjustment to NTP_MAXFREQ. */ if (offset_adj + freq_adj > NTP_MAXFREQ) offset_adj = NTP_MAXFREQ - freq_adj; else if (offset_adj + freq_adj < -NTP_MAXFREQ) offset_adj = -NTP_MAXFREQ - freq_adj; clock_offset -= offset_adj; /* * Windows port adj_systime() must be called each second, * even if the argument is zero, to ease emulation of * adjtime() using Windows' slew API which controls the rate * but does not automatically stop slewing when an offset * has decayed to zero. */ adj_systime(offset_adj + freq_adj); #endif /* LOCKCLOCK */ }
/* * loop_config - configure the loop filter * * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops. */ void loop_config( int item, double freq ) { int i; double ftemp; #ifdef DEBUG if (debug > 1) printf("loop_config: item %d freq %f\n", item, freq); #endif switch (item) { /* * We first assume the kernel supports the ntp_adjtime() * syscall. If that syscall works, initialize the kernel time * variables. Otherwise, continue leaving no harm behind. */ case LOOP_DRIFTINIT: #ifndef LOCKCLOCK #ifdef KERNEL_PLL if (mode_ntpdate) break; start_kern_loop(); #endif /* KERNEL_PLL */ /* * Initialize frequency if given; otherwise, begin frequency * calibration phase. */ ftemp = init_drift_comp / 1e6; if (ftemp > NTP_MAXFREQ) ftemp = NTP_MAXFREQ; else if (ftemp < -NTP_MAXFREQ) ftemp = -NTP_MAXFREQ; set_freq(ftemp); if (freq_set) rstclock(EVNT_FSET, 0); else rstclock(EVNT_NSET, 0); loop_started = TRUE; #endif /* LOCKCLOCK */ break; case LOOP_KERN_CLEAR: #if 0 /* XXX: needs more review, and how can we get here? */ #ifndef LOCKCLOCK # ifdef KERNEL_PLL if (pll_control && kern_enable) { memset((char *)&ntv, 0, sizeof(ntv)); ntv.modes = MOD_STATUS; ntv.status = STA_UNSYNC; ntp_adjtime(&ntv); sync_status("kernel time sync disabled", pll_status, ntv.status); } # endif /* KERNEL_PLL */ #endif /* LOCKCLOCK */ #endif break; /* * Tinker command variables for Ulrich Windl. Very dangerous. */ case LOOP_ALLAN: /* Allan intercept (log2) (allan) */ allan_xpt = (u_char)freq; break; case LOOP_CODEC: /* audio codec frequency (codec) */ clock_codec = freq / 1e6; break; case LOOP_PHI: /* dispersion threshold (dispersion) */ clock_phi = freq / 1e6; break; case LOOP_FREQ: /* initial frequency (freq) */ init_drift_comp = freq; freq_set++; break; case LOOP_HUFFPUFF: /* huff-n'-puff length (huffpuff) */ if (freq < HUFFPUFF) freq = HUFFPUFF; sys_hufflen = (int)(freq / HUFFPUFF); sys_huffpuff = emalloc(sizeof(sys_huffpuff[0]) * sys_hufflen); for (i = 0; i < sys_hufflen; i++) sys_huffpuff[i] = 1e9; sys_mindly = 1e9; break; case LOOP_PANIC: /* panic threshold (panic) */ clock_panic = freq; break; case LOOP_MAX: /* step threshold (step) */ clock_max_fwd = clock_max_back = freq; if (freq == 0 || freq > 0.5) select_loop(FALSE); break; case LOOP_MAX_BACK: /* step threshold (step) */ clock_max_back = freq; /* * Leave using the kernel discipline code unless both * limits are massive. This assumes the reason to stop * using it is that it's pointless, not that it goes wrong. */ if ( (clock_max_back == 0 || clock_max_back > 0.5) || (clock_max_fwd == 0 || clock_max_fwd > 0.5)) select_loop(FALSE); break; case LOOP_MAX_FWD: /* step threshold (step) */ clock_max_fwd = freq; if ( (clock_max_back == 0 || clock_max_back > 0.5) || (clock_max_fwd == 0 || clock_max_fwd > 0.5)) select_loop(FALSE); break; case LOOP_MINSTEP: /* stepout threshold (stepout) */ if (freq < CLOCK_MINSTEP) clock_minstep = CLOCK_MINSTEP; else clock_minstep = freq; break; case LOOP_TICK: /* tick increment (tick) */ set_sys_tick_precision(freq); break; case LOOP_LEAP: /* not used, fall through */ default: msyslog(LOG_NOTICE, "loop_config: unsupported option %d", item); } }
/* * Function: _BWLInitNTP * * Description: * Initialize NTP. * * In Args: * * Out Args: * * Scope: * Returns: * Side Effect: * * If STA_NANO is defined, we insist it is set, this way we can be sure that * ntp_gettime is returning a timespec and not a timeval. * * TODO: The correct way to fix this is: * 1. If ntptimeval contains a struct timespec - then use nano's period. * 2. else if STA_NANO is set, then use nano's. * 3. else ???(mills solution requires root - ugh) * will this work? * (do a timing test: * gettimeofday(A); * getntptime(B); * nanosleep(1000); * getntptime(C); * gettimeofday(D); * * 1. Interprete B and C as usecs * if(D-A < C-B) * nano's * else * usecs */ int _BWLInitNTP( BWLContext ctx ) { char *toffstr=NULL; ntpsyscall_fails = 1; /* * If this system has the ntp system calls, use them. Otherwise, * assume the clock is not synchronized. * (Setting SyncFuzz is advisable in this case.) */ #ifdef HAVE_SYS_TIMEX_H { struct timex ntp_conf; memset(&ntp_conf,0,sizeof(ntp_conf)); if( ntp_adjtime(&ntp_conf) < 0){ BWLError(ctx,BWLErrWARNING,BWLErrUNKNOWN,"ntp_adjtime(): %M"); BWLError(ctx,BWLErrWARNING,BWLErrUNKNOWN, "NTP: BWCTL will not be able to verify synchronization on this system"); goto NOADJTIME; } ntpsyscall_fails = 0; if(ntp_conf.status & STA_UNSYNC){ BWLError(ctx,BWLErrWARNING,BWLErrUNKNOWN, "NTP: Status UNSYNC (clock offset problems likely)"); } else{ ntp_unsync = 0; } #ifdef STA_NANO if( !(ntp_conf.status & STA_NANO)){ BWLError(ctx,BWLErrFATAL,BWLErrUNKNOWN, "NTP: STA_NANO should be set. Make sure ntpd is running, and your NTP configuration is good."); } #endif /* STA_NANO */ } NOADJTIME: #else BWLError(ctx,BWLErrWARNING,BWLErrUNKNOWN, "NTP: Status Unknown (NTP syscalls unavailable)"); #endif if( !(toffstr = getenv("BWCTL_DEBUG_TIMEOFFSET"))){ timeoffset.tv_sec = 0; timeoffset.tv_usec = 0; } else{ double td; double td2; char *estr=NULL; td = strtod(toffstr,&estr); if((toffstr == estr) || (errno == ERANGE)){ BWLError(ctx,BWLErrFATAL,BWLErrUNKNOWN, "Invalid BWCTL_DEBUG_TIMEOFFSET env var: %s",toffstr); return 1; } if(td == 0.0){ sign_timeoffset = 0; } else{ if(td > 0.0){ sign_timeoffset = 1; } else{ sign_timeoffset = -1; td = -td; } /* * remove seconds from td and assign to tv_sec */ td2 = trunc(td); timeoffset.tv_sec = (long int)td2; td -= td2; /* * convert fractional seconds from td into usec */ td *= 1000000; td2 = trunc(td); timeoffset.tv_usec = (long int)td2; BWLError(ctx,BWLErrDEBUG,BWLErrUNKNOWN, "BWCTL_DEBUG_TIMEOFFSET: sec=%c%lu, usec=%lu", (sign_timeoffset > 0)?'+':'-', timeoffset.tv_sec,timeoffset.tv_usec); } } return 0; }
struct timespec * _BWLGetTimespec( BWLContext ctx, struct timespec *ts, uint32_t *esterr, int *synchronized ) { struct timeval tod; static BWLBoolean check_fuzz=False; static long syncfuzz = 0; uint32_t maxerr; /* * By default, assume the clock is unsynchronized, but that it * is still acurate to within .1 second (1000000 usec's). */ *synchronized = 0; maxerr = (uint32_t)100000; if(gettimeofday(&tod,NULL) != 0){ BWLError(ctx,BWLErrFATAL,BWLErrUNKNOWN,"gettimeofday(): %M"); return NULL; } if(sign_timeoffset > 0){ tvaladd(&tod,&timeoffset); } else if(sign_timeoffset < 0){ tvalsub(&tod,&timeoffset); } /* assign localtime */ ts->tv_sec = tod.tv_sec; ts->tv_nsec = tod.tv_usec * 1000; /* convert to nsecs */ /* * If ntp system calls are available use them to determine * time error. */ #ifdef HAVE_SYS_TIMEX_H if( !ntpsyscall_fails){ struct timex ntp_conf; int n; memset(&ntp_conf,0,sizeof(ntp_conf)); n = ntp_adjtime(&ntp_conf); /* * Check sync flag */ if(n < 0){ BWLError(ctx,BWLErrWARNING,BWLErrUNKNOWN,"ntp_adjtime(): %M"); BWLError(ctx,BWLErrWARNING,BWLErrUNKNOWN, "NTP: BWCTL will not be able to verify synchronization on this system"); ntpsyscall_fails = 1; } else if(ntp_conf.status & STA_UNSYNC){ /* * Report the unsync state - but only at level "info". * This is reported at level "warning" at initialization. * (Only report if this is a state change.) */ if(!ntp_unsync){ BWLError(ctx,BWLErrINFO,BWLErrUNKNOWN,"NTP: Status UNSYNC"); ntp_unsync = 1; } } else{ long sec; if(ntp_unsync){ BWLError(ctx,BWLErrINFO,BWLErrUNKNOWN,"NTP: Status SYNC (recovered)"); ntp_unsync = 0; } *synchronized = 1; /* * Apply ntp "offset" */ #ifdef STA_NANO if(ntp_conf.status & STA_NANO) sec = 1000000000; else sec = 1000000; #else sec = 1000000; #endif /* * Convert negative offsets to positive ones by decreasing * the ts->tv_sec. */ while(ntp_conf.offset < 0){ ts->tv_sec--; ntp_conf.offset += sec; } /* * Make sure the "offset" is less than 1 second */ while(ntp_conf.offset >= sec){ ts->tv_sec++; ntp_conf.offset -= sec; } #ifdef STA_NANO if(!(ntp_conf.status & STA_NANO)) ntp_conf.offset *= 1000; #else ntp_conf.offset *= 1000; #endif ts->tv_nsec += ntp_conf.offset; if(ts->tv_nsec >= 1000000000){ ts->tv_sec++; ts->tv_nsec -= 1000000000; } maxerr = (uint32_t)ntp_conf.maxerror; } } #endif /* * See if SyncFuzz was set. * Used to increase tolerance for incomplete NTP configs. */ if(!check_fuzz){ double tdbl; if(BWLContextConfigGetDbl(ctx,BWLSyncFuzz,&tdbl)){ /* * BWLSyncFuzz is specified as a double (sec) * ntp errors are long (usec) convert. */ syncfuzz = (long int) (tdbl * 1000000); } check_fuzz=True; } /* * Set estimated error */ *esterr = maxerr + syncfuzz; /* * Make sure a non-zero error is always returned - perfection * is not allowed. ;) */ if(!*esterr){ *esterr = 1; } return ts; }
/* * Function: _OWPInitNTP * * Description: * Initialize NTP. * * In Args: * * Out Args: * * Scope: * Returns: * Side Effect: * * If STA_NANO is defined, we insist it is set, this way we can be sure that * ntp_gettime is returning a timespec and not a timeval. * * TODO: The correct way to fix this is: * 1. If ntptimeval contains a struct timespec - then use nano's period. * 2. else if STA_NANO is set, then use nano's. * 3. else ???(mills solution requires root - ugh) * will this work? * (do a timing test: * gettimeofday(A); * getntptime(B); * nanosleep(1000); * getntptime(C); * gettimeofday(D); * * 1. Interprete B and C as usecs * if(D-A < C-B) * nano's * else * usecs */ int _OWPInitNTP( OWPContext ctx ) { char *toffstr=NULL; /* * If the system has NTP system calls use them. Otherwise * timestamps will be marked UNSYNC. */ #ifdef HAVE_SYS_TIMEX_H { struct timex ntp_conf; memset(&ntp_conf,0,sizeof(ntp_conf)); if(ntp_adjtime(&ntp_conf) < 0){ OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN,"ntp_adjtime(): %M"); return 1; } if(ntp_conf.status & STA_UNSYNC){ OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN, "NTP: Status UNSYNC (clock offset issues likely)"); } #ifdef STA_NANO if( !(ntp_conf.status & STA_NANO)){ OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN, "NTP: STA_NANO should be set. Make sure ntpd is running, and your NTP configuration is good."); } #endif } #else OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN, "NTP syscalls unavail: Status UNSYNC (clock offset issues likely)"); #endif /* HAVE_SYS_TIMEX_H */ if( !(toffstr = getenv("OWAMP_DEBUG_TIMEOFFSET"))){ timeoffset.tv_sec = 0; timeoffset.tv_usec = 0; } else{ double td; char *estr=NULL; td = strtod(toffstr,&estr); if((toffstr == estr) || (errno == ERANGE)){ OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN, "Invalid OWAMP_DEBUG_TIMEOFFSET env var: %s",toffstr); return 1; } if(td == 0.0){ sign_timeoffset = 0; } else{ if(td > 0.0){ sign_timeoffset = 1; } else{ sign_timeoffset = -1; td = -td; } timeoffset.tv_sec = trunc(td); td -= timeoffset.tv_sec; td *= 1000000; timeoffset.tv_usec = trunc(td); OWPError(ctx,OWPErrDEBUG,OWPErrUNKNOWN, "OWAMP_DEBUG_TIMEOFFSET: sec=%c%lu, usec=%lu", (sign_timeoffset > 0)?'+':'-', timeoffset.tv_sec,timeoffset.tv_usec); } } return 0; }
struct timespec * _OWPGetTimespec( OWPContext ctx __attribute__((unused)), struct timespec *ts, uint32_t *esterr, uint8_t *sync ) { struct timeval tod; uint32_t timeerr; /* * By default, assume the clock is unsynchronized. */ *sync = 0; timeerr = (uint32_t)0; if(gettimeofday(&tod,NULL) != 0){ OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN,"gettimeofday(): %M"); return NULL; } if(sign_timeoffset > 0){ tvaladd(&tod,&timeoffset); } else if(sign_timeoffset < 0){ tvalsub(&tod,&timeoffset); } /* assign localtime */ ts->tv_sec = tod.tv_sec; ts->tv_nsec = tod.tv_usec * 1000; /* convert to nsecs */ /* * If ntp system calls are available use them to determine * time error. */ #ifdef HAVE_SYS_TIMEX_H { struct timex ntp_conf; memset(&ntp_conf,0,sizeof(ntp_conf)); if(ntp_adjtime(&ntp_conf) < 0){ OWPError(ctx,OWPErrFATAL,OWPErrUNKNOWN,"ntp_adjtime(): %M"); return NULL; } /* * Check sync flag */ if(!(ntp_conf.status & STA_UNSYNC)){ long sec; *sync = 1; /* * Apply ntp "offset" */ #ifdef STA_NANO sec = 1000000000; #else sec = 1000000; #endif /* * Convert negative offsets to positive ones by decreasing * the ts->tv_sec. */ while(ntp_conf.offset < 0){ ts->tv_sec--; ntp_conf.offset += sec; } /* * Make sure the "offset" is less than 1 second */ while(ntp_conf.offset >= sec){ ts->tv_sec++; ntp_conf.offset -= sec; } #ifndef STA_NANO ntp_conf.offset *= 1000; #endif ts->tv_nsec += ntp_conf.offset; if(ts->tv_nsec >= 1000000000){ ts->tv_sec++; ts->tv_nsec -= 1000000000; } timeerr = (uint32_t)ntp_conf.esterror; } } #endif /* * Set estimated error */ *esterr = timeerr; /* * Make sure a non-zero error is always returned - perfection * is not allowed if SYNC is true. ;) */ if(*sync && !*esterr){ *esterr = 1; } return ts; }
void tickadj(const bool json, const int newtick) { #ifndef HAVE_ADJTIMEX UNUSED_ARG(json); UNUSED_ARG(newtick); fputs("ntpfrob: \n", stderr); exit(1); #else if (newtick != 0) { #ifdef STRUCT_TIMEX_HAS_TIME_TICK if ( (txc.time_tick = newtick) < 1 ) #else if ( (txc.tick = newtick) < 1 ) #endif /* STRUCT_TIMEX_HAS_TIME_TICK */ { fprintf(stderr, "ntpfrob: silly value for tick: %d\n", newtick); exit(1); } #ifdef MOD_TIMETICK txc.modes = MOD_TIMETICK; #else #ifdef STRUCT_TIMEX_HAS_MODES txc.modes = ADJ_TICK; #else txc.mode = ADJ_TICK; #endif /* STRUCT_TIMEX_HAS_MODES */ #endif /* MOD_TIMETICK */ } else { #ifdef MOD_TIMETICK txc.modes = 0; #else #ifdef STRUCT_TIMEX_HAS_MODES txc.modes = 0; #else txc.mode = 0; #endif /* STRUCT_TIMEX_HAS_MODES */ #endif /* MOD_TIMETICK */ } if (ntp_adjtime(&txc) < 0) { perror("ntp_adjtime"); } else { #ifdef STRUCT_TIMEX_HAS_TIME_TICK if (json) printf("{\"tick\":%ld,\"tick_adj\":%ld}\n", txc.time_tick, txc.tickadj); else printf("tick = %ld\ntick_adj = %ld\n", txc.time_tick, txc.tickadj); #else if (json) printf("{\"tick\":%ld}\n", txc.tick); else printf("tick = %ld\n", txc.tick); #endif /* STRUCT_TIMEX_HAS_TIME_TICK */ } #endif /* HAVE_ADJTIMEX */ }
/* * loop_config - configure the loop filter * * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops. */ void loop_config( int item, double freq ) { int i; switch (item) { case LOOP_DRIFTINIT: #ifndef LOCKCLOCK #ifdef KERNEL_PLL /* * Assume the kernel supports the ntp_adjtime() syscall. * If that syscall works, initialize the kernel time * variables. Otherwise, continue leaving no harm * behind. While at it, ask to set nanosecond mode. If * the kernel agrees, rejoice; othewise, it does only * microseconds. */ if (mode_ntpdate) break; pll_control = 1; memset(&ntv, 0, sizeof(ntv)); #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ ntv.maxerror = MAXDISPERSE; ntv.esterror = MAXDISPERSE; ntv.status = STA_UNSYNC; #ifdef SIGSYS /* * Use sigsetjmp() to save state and then call * ntp_adjtime(); if it fails, then siglongjmp() is used * to return control */ newsigsys.sa_handler = pll_trap; newsigsys.sa_flags = 0; if (sigaction(SIGSYS, &newsigsys, &sigsys)) { msyslog(LOG_ERR, "sigaction() fails to save SIGSYS trap: %m"); pll_control = 0; } if (sigsetjmp(env, 1) == 0) ntp_adjtime(&ntv); if ((sigaction(SIGSYS, &sigsys, (struct sigaction *)NULL))) { msyslog(LOG_ERR, "sigaction() fails to restore SIGSYS trap: %m"); pll_control = 0; } #else /* SIGSYS */ ntp_adjtime(&ntv); #endif /* SIGSYS */ /* * Save the result status and light up an external clock * if available. */ pll_status = ntv.status; if (pll_control) { #ifdef STA_NANO if (pll_status & STA_CLK) ext_enable = 1; #endif /* STA_NANO */ NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_INFO, "kernel time sync status %04x", pll_status); } #endif /* KERNEL_PLL */ #endif /* LOCKCLOCK */ break; case LOOP_DRIFTCOMP: #ifndef LOCKCLOCK /* * If the frequency value is reasonable, set the initial * frequency to the given value and the state to S_FSET. * Otherwise, the drift file may be missing or broken, * so set the frequency to zero. This erases past * history should somebody break something. */ if (freq <= NTP_MAXFREQ && freq >= -NTP_MAXFREQ) { drift_comp = freq; rstclock(S_FSET, 0, 0); } else { drift_comp = 0; } #ifdef KERNEL_PLL /* * Sanity check. If the kernel is available, load the * frequency and light up the loop. Make sure the offset * is zero to cancel any previous nonsense. If you don't * want this initialization, remove the ntp.drift file. */ if (pll_control && kern_enable) { memset((char *)&ntv, 0, sizeof(ntv)); ntv.modes = MOD_OFFSET | MOD_FREQUENCY; ntv.freq = (int32)(drift_comp * 65536e6); ntp_adjtime(&ntv); } #endif /* KERNEL_PLL */ #endif /* LOCKCLOCK */ break; case LOOP_KERN_CLEAR: #ifndef LOCKCLOCK #ifdef KERNEL_PLL /* Completely turn off the kernel time adjustments. */ if (pll_control) { memset((char *)&ntv, 0, sizeof(ntv)); ntv.modes = MOD_BITS | MOD_OFFSET | MOD_FREQUENCY; ntv.status = STA_UNSYNC; ntp_adjtime(&ntv); NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_INFO, "kernel time sync disabled %04x", ntv.status); } #endif /* KERNEL_PLL */ #endif /* LOCKCLOCK */ break; /* * Special tinker variables for Ulrich Windl. Very dangerous. */ case LOOP_MAX: /* step threshold */ clock_max = freq; break; case LOOP_PANIC: /* panic threshold */ clock_panic = freq; break; case LOOP_PHI: /* dispersion rate */ clock_phi = freq; break; case LOOP_MINSTEP: /* watchdog bark */ clock_minstep = freq; break; case LOOP_ALLAN: /* Allan intercept */ allan_xpt = freq; break; case LOOP_HUFFPUFF: /* huff-n'-puff filter length */ if (freq < HUFFPUFF) freq = HUFFPUFF; sys_hufflen = (int)(freq / HUFFPUFF); sys_huffpuff = (double *)emalloc(sizeof(double) * sys_hufflen); for (i = 0; i < sys_hufflen; i++) sys_huffpuff[i] = 1e9; sys_mindly = 1e9; break; case LOOP_FREQ: /* initial frequency */ drift_comp = freq / 1e6; rstclock(S_FSET, 0, 0); break; } }
/* * local_poll - called by the transmit procedure * * LOCKCLOCK: If the kernel supports the nanokernel or microkernel * system calls, the leap bits are extracted from the kernel. If there * is a kernel error or the kernel leap bits are set to 11, the NTP leap * bits are set to 11 and the stratum is set to infinity. Otherwise, the * NTP leap bits are set to the kernel leap bits and the stratum is set * as fudged. This behavior does not faithfully follow the * specification, but is probably more appropriate in a multiple-server * national laboratory network. */ static void local_poll( int unit, struct peer *peer ) { #if defined(KERNEL_PLL) && defined(LOCKCLOCK) struct timex ntv; #endif /* KERNEL_PLL LOCKCLOCK */ struct refclockproc *pp; /* * Do no evil unless the house is dark or lit with our own lamp. */ if (!(sys_peer == NULL || sys_peer == peer)) return; #if defined(VMS) && defined(VMS_LOCALUNIT) if (unit == VMS_LOCALUNIT) { extern void vms_local_poll(struct peer *); vms_local_poll(peer); return; } #endif /* VMS && VMS_LOCALUNIT */ pp = peer->procptr; pp->polls++; /* * Ramble through the usual filtering and grooming code, which * is essentially a no-op and included mostly for pretty * billboards. */ poll_time = current_time; refclock_process_offset(pp, pp->lastrec, pp->lastrec, 0); /* * If another process is disciplining the system clock, we set * the leap bits and quality indicators from the kernel. */ #if defined(KERNEL_PLL) && defined(LOCKCLOCK) memset(&ntv, 0, sizeof ntv); switch (ntp_adjtime(&ntv)) { case TIME_OK: pp->leap = LEAP_NOWARNING; peer->stratum = pp->stratum; break; case TIME_INS: pp->leap = LEAP_ADDSECOND; peer->stratum = pp->stratum; break; case TIME_DEL: pp->leap = LEAP_DELSECOND; peer->stratum = pp->stratum; break; default: pp->leap = LEAP_NOTINSYNC; peer->stratum = STRATUM_UNSPEC; } pp->disp = 0; pp->jitter = 0; #else /* KERNEL_PLL LOCKCLOCK */ pp->disp = DISPERSION; pp->jitter = 0; #endif /* KERNEL_PLL LOCKCLOCK */ pp->lastref = pp->lastrec; refclock_receive(peer); }
/* * local_poll - called by the transmit procedure * * LOCKCLOCK: If the kernel supports the nanokernel or microkernel * system calls, the leap bits are extracted from the kernel. If there * is a kernel error or the kernel leap bits are set to 11, the NTP leap * bits are set to 11 and the stratum is set to infinity. Otherwise, the * NTP leap bits are set to the kernel leap bits and the stratum is set * as fudged. This behavior does not faithfully follow the * specification, but is probably more appropriate in a multiple-server * national laboratory network. */ static void local_poll( int unit, struct peer *peer ) { #if defined(KERNEL_PLL) && defined(LOCKCLOCK) struct timex ntv; #endif /* KERNEL_PLL LOCKCLOCK */ struct refclockproc *pp; #if defined(VMS) && defined(VMS_LOCALUNIT) if (unit == VMS_LOCALUNIT) { extern void vms_local_poll(struct peer *); vms_local_poll(peer); return; } #endif /* VMS && VMS_LOCALUNIT */ pp = peer->procptr; pp->polls++; /* * Ramble through the usual filtering and grooming code, which * is essentially a no-op and included mostly for pretty * billboards. We allow a one-time time adjustment using fudge * time1 (s) and a continuous frequency adjustment using fudge * time 2 (ppm). */ get_systime(&pp->lastrec); pp->fudgetime1 += pp->fudgetime2 * 1e-6 * (current_time - poll_time); poll_time = current_time; refclock_process_offset(pp, pp->lastrec, pp->lastrec, pp->fudgetime1); /* * If another process is disciplining the system clock, we set * the leap bits and quality indicators from the kernel. */ #if defined(KERNEL_PLL) && defined(LOCKCLOCK) memset(&ntv, 0, sizeof ntv); switch (ntp_adjtime(&ntv)) { case TIME_OK: pp->leap = LEAP_NOWARNING; peer->stratum = pp->stratum; break; case TIME_INS: pp->leap = LEAP_ADDSECOND; peer->stratum = pp->stratum; break; case TIME_DEL: pp->leap = LEAP_DELSECOND; peer->stratum = pp->stratum; break; default: pp->leap = LEAP_NOTINSYNC; peer->stratum = STRATUM_UNSPEC; } pp->disp = 0; pp->jitter = 0; #else /* KERNEL_PLL LOCKCLOCK */ pp->leap = LEAP_NOWARNING; pp->disp = DISPERSION; pp->jitter = 0; #endif /* KERNEL_PLL LOCKCLOCK */ pp->lastref = pp->lastrec; refclock_receive(peer); pp->fudgetime1 = 0; }
/* * loop_config - configure the loop filter * * LOCKCLOCK: The LOOP_DRIFTINIT and LOOP_DRIFTCOMP cases are no-ops. */ void loop_config( int item, double freq ) { int i; #ifdef DEBUG if (debug > 1) printf("loop_config: item %d freq %f\n", item, freq); #endif switch (item) { /* * We first assume the kernel supports the ntp_adjtime() * syscall. If that syscall works, initialize the kernel time * variables. Otherwise, continue leaving no harm behind. */ case LOOP_DRIFTINIT: #ifndef LOCKCLOCK #ifdef KERNEL_PLL if (mode_ntpdate) break; pll_control = 1; memset(&ntv, 0, sizeof(ntv)); ntv.modes = MOD_BITS; ntv.status = STA_PLL; ntv.maxerror = MAXDISPERSE; ntv.esterror = MAXDISPERSE; ntv.constant = sys_poll; #ifdef SIGSYS /* * Use sigsetjmp() to save state and then call * ntp_adjtime(); if it fails, then siglongjmp() is used * to return control */ newsigsys.sa_handler = pll_trap; newsigsys.sa_flags = 0; if (sigaction(SIGSYS, &newsigsys, &sigsys)) { msyslog(LOG_ERR, "sigaction() fails to save SIGSYS trap: %m"); pll_control = 0; } if (sigsetjmp(env, 1) == 0) ntp_adjtime(&ntv); if ((sigaction(SIGSYS, &sigsys, (struct sigaction *)NULL))) { msyslog(LOG_ERR, "sigaction() fails to restore SIGSYS trap: %m"); pll_control = 0; } #else /* SIGSYS */ ntp_adjtime(&ntv); #endif /* SIGSYS */ /* * Save the result status and light up an external clock * if available. */ pll_status = ntv.status; if (pll_control) { #ifdef STA_NANO if (pll_status & STA_CLK) ext_enable = 1; #endif /* STA_NANO */ report_event(EVNT_KERN, NULL, "kernel time sync enabled"); } #endif /* KERNEL_PLL */ #endif /* LOCKCLOCK */ break; /* * Initialize the frequency. If the frequency file is missing or * broken, set the initial frequency to zero and set the state * to NSET. Otherwise, set the initial frequency to the given * value and the state to FSET. */ case LOOP_DRIFTCOMP: #ifndef LOCKCLOCK if (freq > NTP_MAXFREQ || freq < -NTP_MAXFREQ) { set_freq(0); rstclock(EVNT_NSET, 0); } else { set_freq(freq); rstclock(EVNT_FSET, 0); } #endif /* LOCKCLOCK */ break; /* * Disable the kernel at shutdown. The microkernel just abandons * ship. The nanokernel carefully cleans up so applications can * see this. Note the last programmed offset and frequency are * left in place. */ case LOOP_KERN_CLEAR: #ifndef LOCKCLOCK #ifdef KERNEL_PLL if (pll_control && kern_enable) { memset((char *)&ntv, 0, sizeof(ntv)); ntv.modes = MOD_STATUS; ntv.status = STA_UNSYNC; ntp_adjtime(&ntv); report_event(EVNT_KERN, NULL, "kernel time sync disabledx"); } #endif /* KERNEL_PLL */ #endif /* LOCKCLOCK */ break; /* * Tinker command variables for Ulrich Windl. Very dangerous. */ case LOOP_ALLAN: /* Allan intercept (log2) (allan) */ allan_xpt = (u_char)freq; break; case LOOP_CODEC: /* audio codec frequency (codec) */ clock_codec = freq / 1e6; break; case LOOP_PHI: /* dispersion threshold (dispersion) */ clock_phi = freq / 1e6; break; case LOOP_FREQ: /* initial frequency (freq) */ set_freq(freq / 1e6); rstclock(EVNT_FSET, 0); break; case LOOP_HUFFPUFF: /* huff-n'-puff length (huffpuff) */ if (freq < HUFFPUFF) freq = HUFFPUFF; sys_hufflen = (int)(freq / HUFFPUFF); sys_huffpuff = (double *)emalloc(sizeof(double) * sys_hufflen); for (i = 0; i < sys_hufflen; i++) sys_huffpuff[i] = 1e9; sys_mindly = 1e9; break; case LOOP_PANIC: /* panic threshold (panic) */ clock_panic = freq; break; case LOOP_MAX: /* step threshold (step) */ clock_max = freq; if (clock_max == 0 || clock_max > 0.5) kern_enable = 0; break; case LOOP_MINSTEP: /* stepout threshold (stepout) */ clock_minstep = freq; break; case LOOP_LEAP: /* not used */ default: msyslog(LOG_NOTICE, "loop_config: unsupported option %d", item); } }
/* * local_clock - the NTP logical clock loop filter. * * Return codes: * -1 update ignored: exceeds panic threshold * 0 update ignored: popcorn or exceeds step threshold * 1 clock was slewed * 2 clock was stepped * * LOCKCLOCK: The only thing this routine does is set the * sys_rootdispersion variable equal to the peer dispersion. */ int local_clock( struct peer *peer, /* synch source peer structure */ double fp_offset /* clock offset (s) */ ) { int rval; /* return code */ u_long mu; /* interval since last update (s) */ double flladj; /* FLL frequency adjustment (ppm) */ double plladj; /* PLL frequency adjustment (ppm) */ double clock_frequency; /* clock frequency adjustment (ppm) */ double dtemp, etemp; /* double temps */ #ifdef OPENSSL u_int32 *tpt; int i; u_int len; long togo; #endif /* OPENSSL */ /* * If the loop is opened or the NIST LOCKCLOCK is in use, * monitor and record the offsets anyway in order to determine * the open-loop response and then go home. */ #ifdef DEBUG if (debug) printf( "local_clock: assocID %d offset %.9f freq %.3f state %d\n", peer->associd, fp_offset, drift_comp * 1e6, state); #endif #ifdef LOCKCLOCK return (0); #else /* LOCKCLOCK */ if (!ntp_enable) { record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); return (0); } /* * If the clock is way off, panic is declared. The clock_panic * defaults to 1000 s; if set to zero, the panic will never * occur. The allow_panic defaults to FALSE, so the first panic * will exit. It can be set TRUE by a command line option, in * which case the clock will be set anyway and time marches on. * But, allow_panic will be set FALSE when the update is less * than the step threshold; so, subsequent panics will exit. */ if (fabs(fp_offset) > clock_panic && clock_panic > 0 && !allow_panic) { msyslog(LOG_ERR, "time correction of %.0f seconds exceeds sanity limit (%.0f); set clock manually to the correct UTC time.", fp_offset, clock_panic); return (-1); } /* * If simulating ntpdate, set the clock directly, rather than * using the discipline. The clock_max defines the step * threshold, above which the clock will be stepped instead of * slewed. The value defaults to 128 ms, but can be set to even * unreasonable values. If set to zero, the clock will never be * stepped. Note that a slew will persist beyond the life of * this program. * * Note that if ntpdate is active, the terminal does not detach, * so the termination comments print directly to the console. */ if (mode_ntpdate) { if (fabs(fp_offset) > clock_max && clock_max > 0) { step_systime(fp_offset); msyslog(LOG_NOTICE, "time reset %+.6f s", fp_offset); printf("ntpd: time set %+.6fs\n", fp_offset); } else { adj_systime(fp_offset); msyslog(LOG_NOTICE, "time slew %+.6f s", fp_offset); printf("ntpd: time slew %+.6fs\n", fp_offset); } record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); exit (0); } /* * The huff-n'-puff filter finds the lowest delay in the recent * interval. This is used to correct the offset by one-half the * difference between the sample delay and minimum delay. This * is most effective if the delays are highly assymetric and * clockhopping is avoided and the clock frequency wander is * relatively small. * * Note either there is no prefer peer or this update is from * the prefer peer. */ if (sys_huffpuff != NULL && (sys_prefer == NULL || sys_prefer == peer)) { if (peer->delay < sys_huffpuff[sys_huffptr]) sys_huffpuff[sys_huffptr] = peer->delay; if (peer->delay < sys_mindly) sys_mindly = peer->delay; if (fp_offset > 0) dtemp = -(peer->delay - sys_mindly) / 2; else dtemp = (peer->delay - sys_mindly) / 2; fp_offset += dtemp; #ifdef DEBUG if (debug) printf( "local_clock: size %d mindly %.6f huffpuff %.6f\n", sys_hufflen, sys_mindly, dtemp); #endif } /* * Clock state machine transition function. This is where the * action is and defines how the system reacts to large phase * and frequency errors. There are two main regimes: when the * offset exceeds the step threshold and when it does not. * However, if the step threshold is set to zero, a step will * never occur. See the instruction manual for the details how * these actions interact with the command line options. * * Note the system poll is set to minpoll only if the clock is * stepped. Note also the kernel is disabled if step is * disabled or greater than 0.5 s. */ clock_frequency = flladj = plladj = 0; mu = peer->epoch - sys_clocktime; if (clock_max == 0 || clock_max > 0.5) kern_enable = 0; rval = 1; if (fabs(fp_offset) > clock_max && clock_max > 0) { switch (state) { /* * In S_SYNC state we ignore the first outlyer amd * switch to S_SPIK state. */ case S_SYNC: state = S_SPIK; return (0); /* * In S_FREQ state we ignore outlyers and inlyers. At * the first outlyer after the stepout threshold, * compute the apparent frequency correction and step * the phase. */ case S_FREQ: if (mu < clock_minstep) return (0); clock_frequency = (fp_offset - clock_offset) / mu; /* fall through to S_SPIK */ /* * In S_SPIK state we ignore succeeding outlyers until * either an inlyer is found or the stepout threshold is * exceeded. */ case S_SPIK: if (mu < clock_minstep) return (0); /* fall through to default */ /* * We get here by default in S_NSET and S_FSET states * and from above in S_FREQ or S_SPIK states. * * In S_NSET state an initial frequency correction is * not available, usually because the frequency file has * not yet been written. Since the time is outside the * step threshold, the clock is stepped. The frequency * will be set directly following the stepout interval. * * In S_FSET state the initial frequency has been set * from the frequency file. Since the time is outside * the step threshold, the clock is stepped immediately, * rather than after the stepout interval. Guys get * nervous if it takes 17 minutes to set the clock for * the first time. * * In S_FREQ and S_SPIK states the stepout threshold has * expired and the phase is still above the step * threshold. Note that a single spike greater than the * step threshold is always suppressed, even at the * longer poll intervals. */ default: step_systime(fp_offset); msyslog(LOG_NOTICE, "time reset %+.6f s", fp_offset); reinit_timer(); tc_counter = 0; sys_poll = NTP_MINPOLL; sys_tai = 0; clock_jitter = LOGTOD(sys_precision); rval = 2; if (state == S_NSET) { rstclock(S_FREQ, peer->epoch, 0); return (rval); } break; } rstclock(S_SYNC, peer->epoch, 0); } else { /* * The offset is less than the step threshold. Calculate * the jitter as the exponentially weighted offset * differences. */ etemp = SQUARE(clock_jitter); dtemp = SQUARE(max(fabs(fp_offset - last_offset), LOGTOD(sys_precision))); clock_jitter = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); switch (state) { /* * In S_NSET state this is the first update received and * the frequency has not been initialized. Adjust the * phase, but do not adjust the frequency until after * the stepout threshold. */ case S_NSET: rstclock(S_FREQ, peer->epoch, fp_offset); break; /* * In S_FSET state this is the first update received and * the frequency has been initialized. Adjust the phase, * but do not adjust the frequency until the next * update. */ case S_FSET: rstclock(S_SYNC, peer->epoch, fp_offset); break; /* * In S_FREQ state ignore updates until the stepout * threshold. After that, correct the phase and * frequency and switch to S_SYNC state. */ case S_FREQ: if (mu < clock_minstep) return (0); clock_frequency = (fp_offset - clock_offset) / mu; rstclock(S_SYNC, peer->epoch, fp_offset); break; /* * We get here by default in S_SYNC and S_SPIK states. * Here we compute the frequency update due to PLL and * FLL contributions. */ default: allow_panic = FALSE; /* * The FLL and PLL frequency gain constants * depend on the poll interval and Allan * intercept. The PLL is always used, but * becomes ineffective above the Allan * intercept. The FLL is not used below one-half * the Allan intercept. Above that the loop gain * increases in steps to 1 / CLOCK_AVG. */ if (ULOGTOD(sys_poll) > allan_xpt / 2) { dtemp = CLOCK_FLL - sys_poll; flladj = (fp_offset - clock_offset) / (max(mu, allan_xpt) * dtemp); } /* * For the PLL the integration interval * (numerator) is the minimum of the update * interval and poll interval. This allows * oversampling, but not undersampling. */ etemp = min(mu, (u_long)ULOGTOD(sys_poll)); dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); plladj = fp_offset * etemp / (dtemp * dtemp); rstclock(S_SYNC, peer->epoch, fp_offset); break; } } #ifdef OPENSSL /* * Scan the loopsecond table to determine the TAI offset. If * there is a scheduled leap in future, set the leap warning, * but only if less than 30 days before the leap. */ tpt = (u_int32 *)tai_leap.ptr; len = ntohl(tai_leap.vallen) / sizeof(u_int32); if (tpt != NULL) { for (i = 0; i < len; i++) { togo = (long)ntohl(tpt[i]) - (long)peer->rec.l_ui; if (togo > 0) { if (togo < CLOCK_JUNE) leap_next |= LEAP_ADDSECOND; break; } } #if defined(STA_NANO) && NTP_API == 4 if (pll_control && kern_enable && sys_tai == 0) { memset(&ntv, 0, sizeof(ntv)); ntv.modes = MOD_TAI; ntv.constant = i + TAI_1972 - 1; ntp_adjtime(&ntv); } #endif /* STA_NANO */ sys_tai = i + TAI_1972 - 1; } #endif /* OPENSSL */ #ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * step threshold is less than 0.5 s, as anything higher can * lead to overflow problems. This might occur if some misguided * lad set the step threshold to something ridiculous. */ if (pll_control && kern_enable) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the frequency file. */ memset(&ntv, 0, sizeof(ntv)); if (ext_enable) { ntv.modes = MOD_STATUS; } else { struct tm *tm = NULL; time_t tstamp; #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5; #ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll; #else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4; #endif /* STA_NANO */ /* * The frequency is set directly only if * clock_frequency is nonzero coming out of FREQ * state. */ if (clock_frequency != 0) { ntv.modes |= MOD_FREQUENCY; ntv.freq = (int32)((clock_frequency + drift_comp) * 65536e6); } ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdispersion) * 1e6); ntv.status = STA_PLL; /* * Set the leap bits in the status word, but * only on the last day of June or December. */ tstamp = peer->rec.l_ui - JAN_1970; tm = gmtime(&tstamp); if (tm != NULL) { if ((tm->tm_mon + 1 == 6 && tm->tm_mday == 30) || (tm->tm_mon + 1 == 12 && tm->tm_mday == 31)) { if (leap_next & LEAP_ADDSECOND) ntv.status |= STA_INS; else if (leap_next & LEAP_DELSECOND) ntv.status |= STA_DEL; } } /* * If the PPS signal is up and enabled, light * the frequency bit. If the PPS driver is * working, light the phase bit as well. If not, * douse the lights, since somebody else may * have left the switch on. */ if (pps_enable && pll_status & STA_PPSSIGNAL) { ntv.status |= STA_PPSFREQ; if (pps_stratum < STRATUM_UNSPEC) ntv.status |= STA_PPSTIME; } else { ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME); } } /* * Pass the stuff to the kernel. If it squeals, turn off * the pig. In any case, fetch the kernel offset and * frequency and pretend we did it here. */ if (ntp_adjtime(&ntv) == TIME_ERROR) { NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "kernel time sync error %04x", ntv.status); ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME); } pll_status = ntv.status; #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ clock_frequency = ntv.freq / 65536e6; flladj = plladj = 0; /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { pps_control = current_time; #ifdef STA_NANO clock_jitter = ntv.jitter / 1e9; #else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6; #endif /* STA_NANO */ } } else { #endif /* KERNEL_PLL */ /* * We get here if the kernel discipline is not enabled. * Adjust the clock frequency as the sum of the directly * computed frequency (if measured) and the PLL and FLL * increments. */ clock_frequency = drift_comp + clock_frequency + flladj + plladj; #ifdef KERNEL_PLL } #endif /* KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate * the frequency change since the last update. */ if (fabs(clock_frequency) > NTP_MAXFREQ) NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", clock_frequency * 1e6, NTP_MAXFREQ * 1e6); dtemp = SQUARE(clock_frequency - drift_comp); if (clock_frequency > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (clock_frequency < -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = clock_frequency; /* * Calculate the wander as the exponentially weighted frequency * differences. */ etemp = SQUARE(clock_stability); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the poll interval by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. */ if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll); #ifdef DEBUG if (debug) printf( "local_clock: mu %lu jitr %.6f freq %.3f stab %.6f poll %d count %d\n", mu, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll, tc_counter); #endif /* DEBUG */ return (rval); #endif /* LOCKCLOCK */ }