/* * refclock_transmit - simulate the transmit procedure * * This routine implements the NTP transmit procedure for a reference * clock. This provides a mechanism to call the driver at the NTP poll * interval, as well as provides a reachability mechanism to detect a * broken radio or other madness. */ void refclock_transmit( struct peer *peer /* peer structure pointer */ ) { u_char clktype; int unit; clktype = peer->refclktype; unit = peer->refclkunit; peer->sent++; get_systime(&peer->xmt); /* * This is a ripoff of the peer transmit routine, but * specialized for reference clocks. We do a little less * protocol here and call the driver-specific transmit routine. */ if (peer->burst == 0) { u_char oreach; #ifdef DEBUG if (debug) printf("refclock_transmit: at %ld %s\n", current_time, stoa(&(peer->srcadr))); #endif /* * Update reachability and poll variables like the * network code. */ oreach = peer->reach; peer->reach <<= 1; peer->outdate = current_time; if (!peer->reach) { if (oreach) { report_event(EVNT_UNREACH, peer); peer->timereachable = current_time; } } else { if (!(oreach & 0x07)) { clock_filter(peer, 0., 0., MAXDISPERSE); clock_select(); } if (peer->flags & FLAG_BURST) peer->burst = NSTAGE; } } else { peer->burst--; } if (refclock_conf[clktype]->clock_poll != noentry) (refclock_conf[clktype]->clock_poll)(unit, peer); poll_update(peer, peer->hpoll); }
/* * local_clock - the NTP logical clock loop filter. * * Return codes: * -1 update ignored: exceeds panic threshold * 0 update ignored: popcorn or exceeds step threshold * 1 clock was slewed * 2 clock was stepped * * LOCKCLOCK: The only thing this routine does is set the * sys_rootdisp variable equal to the peer dispersion. */ int local_clock( struct peer *peer, /* synch source peer structure */ double fp_offset /* clock offset (s) */ ) { int rval; /* return code */ int osys_poll; /* old system poll */ int ntp_adj_ret; /* returned by ntp_adjtime */ double mu; /* interval since last update */ double clock_frequency; /* clock frequency */ double dtemp, etemp; /* double temps */ char tbuf[80]; /* report buffer */ /* * If the loop is opened or the NIST LOCKCLOCK is in use, * monitor and record the offsets anyway in order to determine * the open-loop response and then go home. */ #ifdef LOCKCLOCK { #else if (!ntp_enable) { #endif /* LOCKCLOCK */ record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); return (0); } #ifndef LOCKCLOCK /* * If the clock is way off, panic is declared. The clock_panic * defaults to 1000 s; if set to zero, the panic will never * occur. The allow_panic defaults to FALSE, so the first panic * will exit. It can be set TRUE by a command line option, in * which case the clock will be set anyway and time marches on. * But, allow_panic will be set FALSE when the update is less * than the step threshold; so, subsequent panics will exit. */ if (fabs(fp_offset) > clock_panic && clock_panic > 0 && !allow_panic) { snprintf(tbuf, sizeof(tbuf), "%+.0f s; set clock manually within %.0f s.", fp_offset, clock_panic); report_event(EVNT_SYSFAULT, NULL, tbuf); return (-1); } /* * This section simulates ntpdate. If the offset exceeds the * step threshold (128 ms), step the clock to that time and * exit. Otherwise, slew the clock to that time and exit. Note * that the slew will persist and eventually complete beyond the * life of this program. Note that while ntpdate is active, the * terminal does not detach, so the termination message prints * directly to the terminal. */ if (mode_ntpdate) { if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0)) { step_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time set %+.6f s", fp_offset); printf("ntpd: time set %+.6fs\n", fp_offset); } else { adj_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s", fp_offset); printf("ntpd: time slew %+.6fs\n", fp_offset); } record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); exit (0); } /* * The huff-n'-puff filter finds the lowest delay in the recent * interval. This is used to correct the offset by one-half the * difference between the sample delay and minimum delay. This * is most effective if the delays are highly assymetric and * clockhopping is avoided and the clock frequency wander is * relatively small. */ if (sys_huffpuff != NULL) { if (peer->delay < sys_huffpuff[sys_huffptr]) sys_huffpuff[sys_huffptr] = peer->delay; if (peer->delay < sys_mindly) sys_mindly = peer->delay; if (fp_offset > 0) dtemp = -(peer->delay - sys_mindly) / 2; else dtemp = (peer->delay - sys_mindly) / 2; fp_offset += dtemp; #ifdef DEBUG if (debug) printf( "local_clock: size %d mindly %.6f huffpuff %.6f\n", sys_hufflen, sys_mindly, dtemp); #endif } /* * Clock state machine transition function which defines how the * system reacts to large phase and frequency excursion. There * are two main regimes: when the offset exceeds the step * threshold (128 ms) and when it does not. Under certain * conditions updates are suspended until the stepout theshold * (900 s) is exceeded. See the documentation on how these * thresholds interact with commands and command line options. * * Note the kernel is disabled if step is disabled or greater * than 0.5 s or in ntpdate mode. */ osys_poll = sys_poll; if (sys_poll < peer->minpoll) sys_poll = peer->minpoll; if (sys_poll > peer->maxpoll) sys_poll = peer->maxpoll; mu = current_time - clock_epoch; clock_frequency = drift_comp; rval = 1; if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0) || force_step_once ) { if (force_step_once) { force_step_once = FALSE; /* we want this only once after startup */ msyslog(LOG_NOTICE, "Doing intital time step" ); } switch (state) { /* * In SYNC state we ignore the first outlyer and switch * to SPIK state. */ case EVNT_SYNC: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_SPIK, NULL, tbuf); state = EVNT_SPIK; return (0); /* * In FREQ state we ignore outlyers and inlyers. At the * first outlyer after the stepout threshold, compute * the apparent frequency correction and step the phase. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through to EVNT_SPIK */ /* * In SPIK state we ignore succeeding outlyers until * either an inlyer is found or the stepout threshold is * exceeded. */ case EVNT_SPIK: if (mu < clock_minstep) return (0); /* fall through to default */ /* * We get here by default in NSET and FSET states and * from above in FREQ or SPIK states. * * In NSET state an initial frequency correction is not * available, usually because the frequency file has not * yet been written. Since the time is outside the step * threshold, the clock is stepped. The frequency will * be set directly following the stepout interval. * * In FSET state the initial frequency has been set from * the frequency file. Since the time is outside the * step threshold, the clock is stepped immediately, * rather than after the stepout interval. Guys get * nervous if it takes 15 minutes to set the clock for * the first time. * * In FREQ and SPIK states the stepout threshold has * expired and the phase is still above the step * threshold. Note that a single spike greater than the * step threshold is always suppressed, even with a * long time constant. */ default: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_CLOCKRESET, NULL, tbuf); step_systime(fp_offset); reinit_timer(); tc_counter = 0; clock_jitter = LOGTOD(sys_precision); rval = 2; if (state == EVNT_NSET) { rstclock(EVNT_FREQ, 0); return (rval); } break; } rstclock(EVNT_SYNC, 0); } else { /* * The offset is less than the step threshold. Calculate * the jitter as the exponentially weighted offset * differences. */ etemp = SQUARE(clock_jitter); dtemp = SQUARE(max(fabs(fp_offset - last_offset), LOGTOD(sys_precision))); clock_jitter = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); switch (state) { /* * In NSET state this is the first update received and * the frequency has not been initialized. Adjust the * phase, but do not adjust the frequency until after * the stepout threshold. */ case EVNT_NSET: adj_systime(fp_offset); rstclock(EVNT_FREQ, fp_offset); break; /* * In FREQ state ignore updates until the stepout * threshold. After that, compute the new frequency, but * do not adjust the frequency until the holdoff counter * decrements to zero. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through */ /* * We get here by default in FSET, SPIK and SYNC states. * Here compute the frequency update due to PLL and FLL * contributions. Note, we avoid frequency discipline at * startup until the initial transient has subsided. */ default: allow_panic = FALSE; if (freq_cnt == 0) { /* * The FLL and PLL frequency gain constants * depend on the time constant and Allan * intercept. The PLL is always used, but * becomes ineffective above the Allan intercept * where the FLL becomes effective. */ if (sys_poll >= allan_xpt) clock_frequency += (fp_offset - clock_offset) / max(ULOGTOD(sys_poll), mu) * CLOCK_FLL; /* * The PLL frequency gain (numerator) depends on * the minimum of the update interval and Allan * intercept. This reduces the PLL gain when the * FLL becomes effective. */ etemp = min(ULOGTOD(allan_xpt), mu); dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); clock_frequency += fp_offset * etemp / (dtemp * dtemp); } rstclock(EVNT_SYNC, fp_offset); if (fabs(fp_offset) < CLOCK_FLOOR) freq_cnt = 0; break; } } #ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * step threshold is less than 0.5 s, as anything higher can * lead to overflow problems. This might occur if some misguided * lad set the step threshold to something ridiculous. */ if (pll_control && kern_enable && freq_cnt == 0) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the frequency file. */ ZERO(ntv); if (ext_enable) { ntv.modes = MOD_STATUS; } else { #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5; #ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll; #else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4; #endif /* STA_NANO */ if (ntv.constant < 0) ntv.constant = 0; ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6); ntv.status = STA_PLL; /* * Enable/disable the PPS if requested. */ if (hardpps_enable) { ntv.status |= (STA_PPSTIME | STA_PPSFREQ); if (!(pll_status & STA_PPSTIME)) sync_status("PPS enabled", pll_status, ntv.status); } else { ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ); if (pll_status & STA_PPSTIME) sync_status("PPS disabled", pll_status, ntv.status); } if (sys_leap == LEAP_ADDSECOND) ntv.status |= STA_INS; else if (sys_leap == LEAP_DELSECOND) ntv.status |= STA_DEL; } /* * Pass the stuff to the kernel. If it squeals, turn off * the pps. In any case, fetch the kernel offset, * frequency and jitter. */ ntp_adj_ret = ntp_adjtime(&ntv); /* * A squeal is a return status < 0, or a state change. */ if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) { kernel_status = ntp_adj_ret; ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1); } pll_status = ntv.status; #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ clock_frequency = FREQTOD(ntv.freq); /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { #ifdef STA_NANO clock_jitter = ntv.jitter / 1e9; #else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6; #endif /* STA_NANO */ } #if defined(STA_NANO) && NTP_API == 4 /* * If the TAI changes, update the kernel TAI. */ if (loop_tai != sys_tai) { loop_tai = sys_tai; ntv.modes = MOD_TAI; ntv.constant = sys_tai; if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1); } } #endif /* STA_NANO */ } #endif /* KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate * the frequency difference since the last update. */ if (fabs(clock_frequency) > NTP_MAXFREQ) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", clock_frequency * 1e6, NTP_MAXFREQ * 1e6); dtemp = SQUARE(clock_frequency - drift_comp); if (clock_frequency > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (clock_frequency < -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = clock_frequency; /* * Calculate the wander as the exponentially weighted RMS * frequency differences. Record the change for the frequency * file update. */ etemp = SQUARE(clock_stability); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the time constant by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. Don't * fiddle with the poll during the startup clamp period. */ if (freq_cnt > 0) { tc_counter = 0; } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * If the time constant has changed, update the poll variables. */ if (osys_poll != sys_poll) poll_update(peer, sys_poll); /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll); #ifdef DEBUG if (debug) printf( "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n", clock_offset, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll); #endif /* DEBUG */ return (rval); #endif /* LOCKCLOCK */ } /* * adj_host_clock - Called once every second to update the local clock. * * LOCKCLOCK: The only thing this routine does is increment the * sys_rootdisp variable. */ void adj_host_clock( void ) { double offset_adj; double freq_adj; /* * Update the dispersion since the last update. In contrast to * NTPv3, NTPv4 does not declare unsynchronized after one day, * since the dispersion check serves this function. Also, * since the poll interval can exceed one day, the old test * would be counterproductive. During the startup clamp period, the * time constant is clamped at 2. */ sys_rootdisp += clock_phi; #ifndef LOCKCLOCK if (!ntp_enable || mode_ntpdate) return; /* * Determine the phase adjustment. The gain factor (denominator) * increases with poll interval, so is dominated by the FLL * above the Allan intercept. Note the reduced time constant at * startup. */ if (state != EVNT_SYNC) { offset_adj = 0.; } else if (freq_cnt > 0) { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1)); freq_cnt--; #ifdef KERNEL_PLL } else if (pll_control && kern_enable) { offset_adj = 0.; #endif /* KERNEL_PLL */ } else { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll)); } /* * If the kernel discipline is enabled the frequency correction * drift_comp has already been engaged via ntp_adjtime() in * set_freq(). Otherwise it is a component of the adj_systime() * offset. */ #ifdef KERNEL_PLL if (pll_control && kern_enable) freq_adj = 0.; else #endif /* KERNEL_PLL */ freq_adj = drift_comp; /* Bound absolute value of total adjustment to NTP_MAXFREQ. */ if (offset_adj + freq_adj > NTP_MAXFREQ) offset_adj = NTP_MAXFREQ - freq_adj; else if (offset_adj + freq_adj < -NTP_MAXFREQ) offset_adj = -NTP_MAXFREQ - freq_adj; clock_offset -= offset_adj; /* * Windows port adj_systime() must be called each second, * even if the argument is zero, to ease emulation of * adjtime() using Windows' slew API which controls the rate * but does not automatically stop slewing when an offset * has decayed to zero. */ adj_systime(offset_adj + freq_adj); #endif /* LOCKCLOCK */ }