/* ** set_time applies 'offset' to the local clock. */ int set_time( double offset ) { int rc; if (time_adjusted) return EX_OK; /* ** If we can step but we cannot slew, then step. ** If we can step or slew and and |offset| > steplimit, then step. */ if (ENABLED_OPT(STEP) && ( !ENABLED_OPT(SLEW) || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit)) )) { rc = step_systime(offset); /* If there was a problem, can we rely on errno? */ if (1 == rc) time_adjusted = TRUE; return (time_adjusted) ? EX_OK : 1; /* ** In case of error, what should we use? ** EX_UNAVAILABLE? ** EX_OSERR? ** EX_NOPERM? */ } if (ENABLED_OPT(SLEW)) { rc = adj_systime(offset); /* If there was a problem, can we rely on errno? */ if (1 == rc) time_adjusted = TRUE; return (time_adjusted) ? EX_OK : 1; /* ** In case of error, what should we use? ** EX_UNAVAILABLE? ** EX_OSERR? ** EX_NOPERM? */ } return EX_SOFTWARE; }
/* * timer - event timer */ void timer(void) { register struct peer *peer, *next_peer; u_int n; long delta; /* * The basic timerevent is one second. This is used to adjust * the system clock in time and frequency, implement the * kiss-o'-deatch function and implement the association * polling function.. */ current_time += nap_time; get_systime(&sys_time); nap_time = (u_long)-1; if (do_adjtime) { if (adjust_timer <= current_time) { adjust_timer += 1; adj_host_clock(); #ifdef REFCLOCK for (n = 0; n < NTP_HASH_SIZE; n++) { for (peer = peer_hash[n]; peer != 0; peer = next_peer) { next_peer = peer->next; if (peer->flags & FLAG_REFCLOCK) refclock_timer(peer); } } #endif /* REFCLOCK */ } nap_time = 1; } if (awake_timer) { if (awake_timer <= current_time) { for (n = 0; n < NTP_HASH_SIZE; n++) { for (peer = peer_hash[n]; peer != 0; peer = peer->next) { peer->burst = NSTAGE; peer->nextdate = current_time; peer->throttle = 0; } } allow_panic = TRUE; /* Allow for large time offsets */ init_loopfilter(); state = EVNT_NSET; awake_timer = 0; } else { delta = awake_timer - current_time; if (delta < nap_time) { nap_time = delta; } } } /* * Now dispatch any peers whose event timer has expired. Be * careful here, since the peer structure might go away as the * result of the call. */ for (n = 0; n < NTP_HASH_SIZE; n++) { for (peer = peer_hash[n]; peer != 0; peer = next_peer) { next_peer = peer->next; if (peer->action && peer->nextaction <= current_time) peer->action(peer); /* * Restrain the non-burst packet rate not more * than one packet every 16 seconds. This is * usually tripped using iburst and minpoll of * 128 s or less. */ if (peer->throttle > 0) peer->throttle--; if (peer->nextdate <= current_time) { #ifdef REFCLOCK if (peer->flags & FLAG_REFCLOCK) refclock_transmit(peer); else transmit(peer); #else /* REFCLOCK */ transmit(peer); #endif /* REFCLOCK */ } if (peer->action && peer->nextaction >= current_time) { delta = peer->nextaction - current_time; if (delta < nap_time) { nap_time = delta; } } else if (peer->nextdate >= current_time) { delta = peer->nextdate - current_time; if (delta < nap_time) { nap_time = delta; } } } } /* * Orphan mode is active when enabled and when no servers less * than the orphan statum are available. A server with no other * synchronization source is an orphan It shows offset zero and * reference ID the loopback address. */ if (sys_orphan < STRATUM_UNSPEC && sys_peer == NULL) { if (sys_leap == LEAP_NOTINSYNC) { sys_leap = LEAP_NOWARNING; #ifdef OPENSSL if (crypto_flags) crypto_update(); #endif /* OPENSSL */ } sys_stratum = (u_char)sys_orphan; if (sys_stratum > 1) sys_refid = htonl(LOOPBACKADR); else memcpy(&sys_refid, "LOOP", 4); sys_offset = 0; sys_rootdelay = 0; sys_rootdisp = 0; } /* * Leapseconds. If a leap is pending, decrement the time * remaining. If less than one day remains, set the leap bits. * When no time remains, clear the leap bits and increment the * TAI. If kernel suppport is not available, do the leap * crudely. Note a leap cannot be pending unless the clock is * set. */ if (leapsec > 0) { leapsec--; if (leapsec == 0) { sys_leap = LEAP_NOWARNING; sys_tai = leap_tai; #ifdef KERNEL_PLL if (!(pll_control && kern_enable)) step_systime(-1.0); #else /* KERNEL_PLL */ #ifndef SYS_WINNT /* WinNT port has its own leap second handling */ step_systime(-1.0); #endif /* SYS_WINNT */ #endif /* KERNEL_PLL */ report_event(EVNT_LEAP, NULL, NULL); } else { if (leapsec < DAY) sys_leap = LEAP_ADDSECOND; if (leap_tai > 0) sys_tai = leap_tai - 1; } } /* * Update huff-n'-puff filter. */ if (huffpuff_timer <= current_time) { huffpuff_timer += HUFFPUFF; huffpuff(); } if (huffpuff_timer >= current_time) { delta = huffpuff_timer - current_time; if (delta < nap_time) { nap_time = delta; } } #ifdef OPENSSL /* * Garbage collect expired keys. */ if (keys_timer <= current_time) { keys_timer += 1 << sys_automax; auth_agekeys(); } if (keys_timer >= current_time) { delta = keys_timer - current_time; if (delta < nap_time) { nap_time = delta; } } /* * Garbage collect key list and generate new private value. The * timer runs only after initial synchronization and fires about * once per day. */ if (revoke_timer <= current_time && sys_leap != LEAP_NOTINSYNC) { revoke_timer += 1 << sys_revoke; RAND_bytes((u_char *)&sys_private, 4); } if (revoke_timer >= current_time) { delta = revoke_timer - current_time; if (delta < nap_time) { nap_time = delta; } } #endif /* OPENSSL */ /* * Interface update timer */ if (interface_interval && interface_timer <= current_time) { timer_interfacetimeout(current_time + interface_interval); DPRINTF(2, ("timer: interface update\n")); interface_update(NULL, NULL); } if (interface_interval && interface_timer >= current_time) { delta = interface_timer - current_time; if (delta < nap_time) { nap_time = delta; } } if (dns_timer && (dns_timer <= current_time)) { dns_timer = update_dns_peers(); } if (dns_timer >= current_time) { delta = dns_timer - current_time; if (delta < nap_time) { nap_time = delta; } } /* * Finally, write hourly stats. */ if (stats_timer <= current_time) { stats_timer += HOUR; write_stats(); if (sys_tai != 0 && sys_time.l_ui > leap_expire) report_event(EVNT_LEAPVAL, NULL, NULL); } else if (!do_adjtime && drift_file_sw) { write_stats(); /* update more frequently for pacemaker */ } if (stats_timer >= current_time) { delta = stats_timer - current_time; if (delta < nap_time) { nap_time = delta; } } if (nap_time == 0) { nap_time = 1; } if (debug) { msyslog(LOG_INFO, "%s: current_time: %ld, nap_time: %ld", __FUNCTION__, current_time, nap_time); } itimer.it_interval.tv_sec = itimer.it_value.tv_sec = nap_time; setitimer(ITIMER_REAL, &itimer, (struct itimerval *)0); }
/* * local_clock - the NTP logical clock loop filter. * * Return codes: * -1 update ignored: exceeds panic threshold * 0 update ignored: popcorn or exceeds step threshold * 1 clock was slewed * 2 clock was stepped * * LOCKCLOCK: The only thing this routine does is set the * sys_rootdisp variable equal to the peer dispersion. */ int local_clock( struct peer *peer, /* synch source peer structure */ double fp_offset /* clock offset (s) */ ) { int rval; /* return code */ int osys_poll; /* old system poll */ int ntp_adj_ret; /* returned by ntp_adjtime */ double mu; /* interval since last update */ double clock_frequency; /* clock frequency */ double dtemp, etemp; /* double temps */ char tbuf[80]; /* report buffer */ /* * If the loop is opened or the NIST LOCKCLOCK is in use, * monitor and record the offsets anyway in order to determine * the open-loop response and then go home. */ #ifdef LOCKCLOCK { #else if (!ntp_enable) { #endif /* LOCKCLOCK */ record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); return (0); } #ifndef LOCKCLOCK /* * If the clock is way off, panic is declared. The clock_panic * defaults to 1000 s; if set to zero, the panic will never * occur. The allow_panic defaults to FALSE, so the first panic * will exit. It can be set TRUE by a command line option, in * which case the clock will be set anyway and time marches on. * But, allow_panic will be set FALSE when the update is less * than the step threshold; so, subsequent panics will exit. */ if (fabs(fp_offset) > clock_panic && clock_panic > 0 && !allow_panic) { snprintf(tbuf, sizeof(tbuf), "%+.0f s; set clock manually within %.0f s.", fp_offset, clock_panic); report_event(EVNT_SYSFAULT, NULL, tbuf); return (-1); } /* * This section simulates ntpdate. If the offset exceeds the * step threshold (128 ms), step the clock to that time and * exit. Otherwise, slew the clock to that time and exit. Note * that the slew will persist and eventually complete beyond the * life of this program. Note that while ntpdate is active, the * terminal does not detach, so the termination message prints * directly to the terminal. */ if (mode_ntpdate) { if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0)) { step_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time set %+.6f s", fp_offset); printf("ntpd: time set %+.6fs\n", fp_offset); } else { adj_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s", fp_offset); printf("ntpd: time slew %+.6fs\n", fp_offset); } record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); exit (0); } /* * The huff-n'-puff filter finds the lowest delay in the recent * interval. This is used to correct the offset by one-half the * difference between the sample delay and minimum delay. This * is most effective if the delays are highly assymetric and * clockhopping is avoided and the clock frequency wander is * relatively small. */ if (sys_huffpuff != NULL) { if (peer->delay < sys_huffpuff[sys_huffptr]) sys_huffpuff[sys_huffptr] = peer->delay; if (peer->delay < sys_mindly) sys_mindly = peer->delay; if (fp_offset > 0) dtemp = -(peer->delay - sys_mindly) / 2; else dtemp = (peer->delay - sys_mindly) / 2; fp_offset += dtemp; #ifdef DEBUG if (debug) printf( "local_clock: size %d mindly %.6f huffpuff %.6f\n", sys_hufflen, sys_mindly, dtemp); #endif } /* * Clock state machine transition function which defines how the * system reacts to large phase and frequency excursion. There * are two main regimes: when the offset exceeds the step * threshold (128 ms) and when it does not. Under certain * conditions updates are suspended until the stepout theshold * (900 s) is exceeded. See the documentation on how these * thresholds interact with commands and command line options. * * Note the kernel is disabled if step is disabled or greater * than 0.5 s or in ntpdate mode. */ osys_poll = sys_poll; if (sys_poll < peer->minpoll) sys_poll = peer->minpoll; if (sys_poll > peer->maxpoll) sys_poll = peer->maxpoll; mu = current_time - clock_epoch; clock_frequency = drift_comp; rval = 1; if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0) || force_step_once ) { if (force_step_once) { force_step_once = FALSE; /* we want this only once after startup */ msyslog(LOG_NOTICE, "Doing intital time step" ); } switch (state) { /* * In SYNC state we ignore the first outlyer and switch * to SPIK state. */ case EVNT_SYNC: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_SPIK, NULL, tbuf); state = EVNT_SPIK; return (0); /* * In FREQ state we ignore outlyers and inlyers. At the * first outlyer after the stepout threshold, compute * the apparent frequency correction and step the phase. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through to EVNT_SPIK */ /* * In SPIK state we ignore succeeding outlyers until * either an inlyer is found or the stepout threshold is * exceeded. */ case EVNT_SPIK: if (mu < clock_minstep) return (0); /* fall through to default */ /* * We get here by default in NSET and FSET states and * from above in FREQ or SPIK states. * * In NSET state an initial frequency correction is not * available, usually because the frequency file has not * yet been written. Since the time is outside the step * threshold, the clock is stepped. The frequency will * be set directly following the stepout interval. * * In FSET state the initial frequency has been set from * the frequency file. Since the time is outside the * step threshold, the clock is stepped immediately, * rather than after the stepout interval. Guys get * nervous if it takes 15 minutes to set the clock for * the first time. * * In FREQ and SPIK states the stepout threshold has * expired and the phase is still above the step * threshold. Note that a single spike greater than the * step threshold is always suppressed, even with a * long time constant. */ default: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_CLOCKRESET, NULL, tbuf); step_systime(fp_offset); reinit_timer(); tc_counter = 0; clock_jitter = LOGTOD(sys_precision); rval = 2; if (state == EVNT_NSET) { rstclock(EVNT_FREQ, 0); return (rval); } break; } rstclock(EVNT_SYNC, 0); } else { /* * The offset is less than the step threshold. Calculate * the jitter as the exponentially weighted offset * differences. */ etemp = SQUARE(clock_jitter); dtemp = SQUARE(max(fabs(fp_offset - last_offset), LOGTOD(sys_precision))); clock_jitter = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); switch (state) { /* * In NSET state this is the first update received and * the frequency has not been initialized. Adjust the * phase, but do not adjust the frequency until after * the stepout threshold. */ case EVNT_NSET: adj_systime(fp_offset); rstclock(EVNT_FREQ, fp_offset); break; /* * In FREQ state ignore updates until the stepout * threshold. After that, compute the new frequency, but * do not adjust the frequency until the holdoff counter * decrements to zero. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through */ /* * We get here by default in FSET, SPIK and SYNC states. * Here compute the frequency update due to PLL and FLL * contributions. Note, we avoid frequency discipline at * startup until the initial transient has subsided. */ default: allow_panic = FALSE; if (freq_cnt == 0) { /* * The FLL and PLL frequency gain constants * depend on the time constant and Allan * intercept. The PLL is always used, but * becomes ineffective above the Allan intercept * where the FLL becomes effective. */ if (sys_poll >= allan_xpt) clock_frequency += (fp_offset - clock_offset) / max(ULOGTOD(sys_poll), mu) * CLOCK_FLL; /* * The PLL frequency gain (numerator) depends on * the minimum of the update interval and Allan * intercept. This reduces the PLL gain when the * FLL becomes effective. */ etemp = min(ULOGTOD(allan_xpt), mu); dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); clock_frequency += fp_offset * etemp / (dtemp * dtemp); } rstclock(EVNT_SYNC, fp_offset); if (fabs(fp_offset) < CLOCK_FLOOR) freq_cnt = 0; break; } } #ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * step threshold is less than 0.5 s, as anything higher can * lead to overflow problems. This might occur if some misguided * lad set the step threshold to something ridiculous. */ if (pll_control && kern_enable && freq_cnt == 0) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the frequency file. */ ZERO(ntv); if (ext_enable) { ntv.modes = MOD_STATUS; } else { #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5; #ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll; #else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4; #endif /* STA_NANO */ if (ntv.constant < 0) ntv.constant = 0; ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6); ntv.status = STA_PLL; /* * Enable/disable the PPS if requested. */ if (hardpps_enable) { ntv.status |= (STA_PPSTIME | STA_PPSFREQ); if (!(pll_status & STA_PPSTIME)) sync_status("PPS enabled", pll_status, ntv.status); } else { ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ); if (pll_status & STA_PPSTIME) sync_status("PPS disabled", pll_status, ntv.status); } if (sys_leap == LEAP_ADDSECOND) ntv.status |= STA_INS; else if (sys_leap == LEAP_DELSECOND) ntv.status |= STA_DEL; } /* * Pass the stuff to the kernel. If it squeals, turn off * the pps. In any case, fetch the kernel offset, * frequency and jitter. */ ntp_adj_ret = ntp_adjtime(&ntv); /* * A squeal is a return status < 0, or a state change. */ if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) { kernel_status = ntp_adj_ret; ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1); } pll_status = ntv.status; #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ clock_frequency = FREQTOD(ntv.freq); /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { #ifdef STA_NANO clock_jitter = ntv.jitter / 1e9; #else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6; #endif /* STA_NANO */ } #if defined(STA_NANO) && NTP_API == 4 /* * If the TAI changes, update the kernel TAI. */ if (loop_tai != sys_tai) { loop_tai = sys_tai; ntv.modes = MOD_TAI; ntv.constant = sys_tai; if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1); } } #endif /* STA_NANO */ } #endif /* KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate * the frequency difference since the last update. */ if (fabs(clock_frequency) > NTP_MAXFREQ) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", clock_frequency * 1e6, NTP_MAXFREQ * 1e6); dtemp = SQUARE(clock_frequency - drift_comp); if (clock_frequency > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (clock_frequency < -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = clock_frequency; /* * Calculate the wander as the exponentially weighted RMS * frequency differences. Record the change for the frequency * file update. */ etemp = SQUARE(clock_stability); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the time constant by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. Don't * fiddle with the poll during the startup clamp period. */ if (freq_cnt > 0) { tc_counter = 0; } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * If the time constant has changed, update the poll variables. */ if (osys_poll != sys_poll) poll_update(peer, sys_poll); /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll); #ifdef DEBUG if (debug) printf( "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n", clock_offset, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll); #endif /* DEBUG */ return (rval); #endif /* LOCKCLOCK */ } /* * adj_host_clock - Called once every second to update the local clock. * * LOCKCLOCK: The only thing this routine does is increment the * sys_rootdisp variable. */ void adj_host_clock( void ) { double offset_adj; double freq_adj; /* * Update the dispersion since the last update. In contrast to * NTPv3, NTPv4 does not declare unsynchronized after one day, * since the dispersion check serves this function. Also, * since the poll interval can exceed one day, the old test * would be counterproductive. During the startup clamp period, the * time constant is clamped at 2. */ sys_rootdisp += clock_phi; #ifndef LOCKCLOCK if (!ntp_enable || mode_ntpdate) return; /* * Determine the phase adjustment. The gain factor (denominator) * increases with poll interval, so is dominated by the FLL * above the Allan intercept. Note the reduced time constant at * startup. */ if (state != EVNT_SYNC) { offset_adj = 0.; } else if (freq_cnt > 0) { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1)); freq_cnt--; #ifdef KERNEL_PLL } else if (pll_control && kern_enable) { offset_adj = 0.; #endif /* KERNEL_PLL */ } else { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll)); } /* * If the kernel discipline is enabled the frequency correction * drift_comp has already been engaged via ntp_adjtime() in * set_freq(). Otherwise it is a component of the adj_systime() * offset. */ #ifdef KERNEL_PLL if (pll_control && kern_enable) freq_adj = 0.; else #endif /* KERNEL_PLL */ freq_adj = drift_comp; /* Bound absolute value of total adjustment to NTP_MAXFREQ. */ if (offset_adj + freq_adj > NTP_MAXFREQ) offset_adj = NTP_MAXFREQ - freq_adj; else if (offset_adj + freq_adj < -NTP_MAXFREQ) offset_adj = -NTP_MAXFREQ - freq_adj; clock_offset -= offset_adj; /* * Windows port adj_systime() must be called each second, * even if the argument is zero, to ease emulation of * adjtime() using Windows' slew API which controls the rate * but does not automatically stop slewing when an offset * has decayed to zero. */ adj_systime(offset_adj + freq_adj); #endif /* LOCKCLOCK */ }
static void check_leapsec( u_int32 now , const time_t * tpiv , int/*BOOL*/ reset) { static const char leapmsg_p_step[] = "Positive leap second, stepped backward."; static const char leapmsg_p_slew[] = "Positive leap second, no step correction. " "System clock will be inaccurate for a long time."; static const char leapmsg_n_step[] = "Negative leap second, stepped forward."; static const char leapmsg_n_slew[] = "Negative leap second, no step correction. " "System clock will be inaccurate for a long time."; leap_result_t lsdata; u_int32 lsprox; #ifdef AUTOKEY int/*BOOL*/ update_autokey = FALSE; #endif #ifndef SYS_WINNT /* WinNT port has its own leap second handling */ # ifdef KERNEL_PLL leapsec_electric(pll_control && kern_enable); # else leapsec_electric(0); # endif #endif #ifdef LEAP_SMEAR leap_smear.enabled = leap_smear_intv != 0; #endif if (reset) { lsprox = LSPROX_NOWARN; leapsec_reset_frame(); memset(&lsdata, 0, sizeof(lsdata)); } else { int fired = leapsec_query(&lsdata, now, tpiv); DPRINTF(1, ("*** leapsec_query: fired %i, now %u (0x%08X), tai_diff %i, ddist %u\n", fired, now, now, lsdata.tai_diff, lsdata.ddist)); #ifdef LEAP_SMEAR leap_smear.in_progress = 0; leap_smear.doffset = 0.0; if (leap_smear.enabled) { if (lsdata.tai_diff) { if (leap_smear.interval == 0) { leap_smear.interval = leap_smear_intv; leap_smear.intv_end = lsdata.ttime.Q_s; leap_smear.intv_start = leap_smear.intv_end - leap_smear.interval; DPRINTF(1, ("*** leapsec_query: setting leap_smear interval %li, begin %.0f, end %.0f\n", leap_smear.interval, leap_smear.intv_start, leap_smear.intv_end)); } } else { if (leap_smear.interval) DPRINTF(1, ("*** leapsec_query: clearing leap_smear interval\n")); leap_smear.interval = 0; } if (leap_smear.interval) { double dtemp = now; if (dtemp >= leap_smear.intv_start && dtemp <= leap_smear.intv_end) { double leap_smear_time = dtemp - leap_smear.intv_start; /* * For now we just do a linear interpolation over the smear interval */ #if 0 // linear interpolation leap_smear.doffset = -(leap_smear_time * lsdata.tai_diff / leap_smear.interval); #else // Google approach: lie(t) = (1.0 - cos(pi * t / w)) / 2.0 leap_smear.doffset = -((double) lsdata.tai_diff - cos( M_PI * leap_smear_time / leap_smear.interval)) / 2.0; #endif /* * TODO see if we're inside an inserted leap second, so we need to compute * leap_smear.doffset = 1.0 - leap_smear.doffset */ leap_smear.in_progress = 1; #if 0 && defined( DEBUG ) msyslog(LOG_NOTICE, "*** leapsec_query: [%.0f:%.0f] (%li), now %u (%.0f), smear offset %.6f ms\n", leap_smear.intv_start, leap_smear.intv_end, leap_smear.interval, now, leap_smear_time, leap_smear.doffset); #else DPRINTF(1, ("*** leapsec_query: [%.0f:%.0f] (%li), now %u (%.0f), smear offset %.6f ms\n", leap_smear.intv_start, leap_smear.intv_end, leap_smear.interval, now, leap_smear_time, leap_smear.doffset)); #endif } } } else leap_smear.interval = 0; /* * Update the current leap smear offset, eventually 0.0 if outside smear interval. */ DTOLFP(leap_smear.doffset, &leap_smear.offset); #endif /* LEAP_SMEAR */ if (fired) { /* Full hit. Eventually step the clock, but always * announce the leap event has happened. */ const char *leapmsg = NULL; if (lsdata.warped < 0) { if (clock_max_back > 0.0 && clock_max_back < abs(lsdata.warped)) { step_systime(lsdata.warped); leapmsg = leapmsg_p_step; } else { leapmsg = leapmsg_p_slew; } } else if (lsdata.warped > 0) { if (clock_max_fwd > 0.0 && clock_max_fwd < abs(lsdata.warped)) { step_systime(lsdata.warped); leapmsg = leapmsg_n_step; } else { leapmsg = leapmsg_n_slew; } } if (leapmsg) msyslog(LOG_NOTICE, "%s", leapmsg); report_event(EVNT_LEAP, NULL, NULL); #ifdef AUTOKEY update_autokey = TRUE; #endif lsprox = LSPROX_NOWARN; leapsec = LSPROX_NOWARN; sys_tai = lsdata.tai_offs; } else { #ifdef AUTOKEY update_autokey = (sys_tai != (u_int)lsdata.tai_offs); #endif lsprox = lsdata.proximity; sys_tai = lsdata.tai_offs; } } /* We guard against panic alarming during the red alert phase. * Strange and evil things might happen if we go from stone cold * to piping hot in one step. If things are already that wobbly, * we let the normal clock correction take over, even if a jump * is involved. * Also make sure the alarming events are edge-triggered, that is, * ceated only when the threshold is crossed. */ if ( (leapsec > 0 || lsprox < LSPROX_ALERT) && leapsec < lsprox ) { if ( leapsec < LSPROX_SCHEDULE && lsprox >= LSPROX_SCHEDULE) { if (lsdata.dynamic) report_event(PEVNT_ARMED, sys_peer, NULL); else report_event(EVNT_ARMED, NULL, NULL); } leapsec = lsprox; } if (leapsec > lsprox) { if ( leapsec >= LSPROX_SCHEDULE && lsprox < LSPROX_SCHEDULE) { report_event(EVNT_DISARMED, NULL, NULL); } leapsec = lsprox; } if (leapsec >= LSPROX_SCHEDULE) leapdif = lsdata.tai_diff; else leapdif = 0; check_leap_sec_in_progress(&lsdata); #ifdef AUTOKEY if (update_autokey) crypto_update_taichange(); #endif }
static void check_leapsec( u_int32 now , const time_t * tpiv , int/*BOOL*/ reset) { leap_result_t lsdata; u_int32 lsprox; #ifndef SYS_WINNT /* WinNT port has its own leap second handling */ # ifdef KERNEL_PLL leapsec_electric(pll_control && kern_enable); # else leapsec_electric(0); # endif #endif if (reset) { lsprox = LSPROX_NOWARN; leapsec_reset_frame(); memset(&lsdata, 0, sizeof(lsdata)); } else if (leapsec_query(&lsdata, now, tpiv)) { /* Full hit. Eventually step the clock, but always * announce the leap event has happened. */ if (lsdata.warped < 0) { step_systime(lsdata.warped); msyslog(LOG_NOTICE, "Inserting positive leap second."); } else if (lsdata.warped > 0) { step_systime(lsdata.warped); msyslog(LOG_NOTICE, "Inserting negative leap second."); } report_event(EVNT_LEAP, NULL, NULL); lsprox = LSPROX_NOWARN; leapsec = LSPROX_NOWARN; sys_tai = lsdata.tai_offs; } else { lsprox = lsdata.proximity; sys_tai = lsdata.tai_offs; } /* We guard against panic alarming during the red alert phase. * Strange and evil things might happen if we go from stone cold * to piping hot in one step. If things are already that wobbly, * we let the normal clock correction take over, even if a jump * is involved. * Also make sure the alarming events are edge-triggered, that is, * ceated only when the threshold is crossed. */ if ( (leapsec > 0 || lsprox < LSPROX_ALERT) && leapsec < lsprox ) { if ( leapsec < LSPROX_SCHEDULE && lsprox >= LSPROX_SCHEDULE) { if (lsdata.dynamic) report_event(PEVNT_ARMED, sys_peer, NULL); else report_event(EVNT_ARMED, NULL, NULL); } leapsec = lsprox; } if (leapsec > lsprox) { if ( leapsec >= LSPROX_SCHEDULE && lsprox < LSPROX_SCHEDULE) { report_event(EVNT_DISARMED, NULL, NULL); } leapsec = lsprox; } if (leapsec >= LSPROX_SCHEDULE) leapdif = lsdata.tai_diff; else leapdif = 0; }
/* * local_clock - the NTP logical clock loop filter. * * Return codes: * -1 update ignored: exceeds panic threshold * 0 update ignored: popcorn or exceeds step threshold * 1 clock was slewed * 2 clock was stepped * * LOCKCLOCK: The only thing this routine does is set the * sys_rootdispersion variable equal to the peer dispersion. */ int local_clock( struct peer *peer, /* synch source peer structure */ double fp_offset /* clock offset (s) */ ) { int rval; /* return code */ u_long mu; /* interval since last update (s) */ double flladj; /* FLL frequency adjustment (ppm) */ double plladj; /* PLL frequency adjustment (ppm) */ double clock_frequency; /* clock frequency adjustment (ppm) */ double dtemp, etemp; /* double temps */ #ifdef OPENSSL u_int32 *tpt; int i; u_int len; long togo; #endif /* OPENSSL */ /* * If the loop is opened or the NIST LOCKCLOCK is in use, * monitor and record the offsets anyway in order to determine * the open-loop response and then go home. */ #ifdef DEBUG if (debug) printf( "local_clock: assocID %d offset %.9f freq %.3f state %d\n", peer->associd, fp_offset, drift_comp * 1e6, state); #endif #ifdef LOCKCLOCK return (0); #else /* LOCKCLOCK */ if (!ntp_enable) { record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); return (0); } /* * If the clock is way off, panic is declared. The clock_panic * defaults to 1000 s; if set to zero, the panic will never * occur. The allow_panic defaults to FALSE, so the first panic * will exit. It can be set TRUE by a command line option, in * which case the clock will be set anyway and time marches on. * But, allow_panic will be set FALSE when the update is less * than the step threshold; so, subsequent panics will exit. */ if (fabs(fp_offset) > clock_panic && clock_panic > 0 && !allow_panic) { msyslog(LOG_ERR, "time correction of %.0f seconds exceeds sanity limit (%.0f); set clock manually to the correct UTC time.", fp_offset, clock_panic); return (-1); } /* * If simulating ntpdate, set the clock directly, rather than * using the discipline. The clock_max defines the step * threshold, above which the clock will be stepped instead of * slewed. The value defaults to 128 ms, but can be set to even * unreasonable values. If set to zero, the clock will never be * stepped. Note that a slew will persist beyond the life of * this program. * * Note that if ntpdate is active, the terminal does not detach, * so the termination comments print directly to the console. */ if (mode_ntpdate) { if (fabs(fp_offset) > clock_max && clock_max > 0) { step_systime(fp_offset); msyslog(LOG_NOTICE, "time reset %+.6f s", fp_offset); printf("ntpd: time set %+.6fs\n", fp_offset); } else { adj_systime(fp_offset); msyslog(LOG_NOTICE, "time slew %+.6f s", fp_offset); printf("ntpd: time slew %+.6fs\n", fp_offset); } record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); exit (0); } /* * The huff-n'-puff filter finds the lowest delay in the recent * interval. This is used to correct the offset by one-half the * difference between the sample delay and minimum delay. This * is most effective if the delays are highly assymetric and * clockhopping is avoided and the clock frequency wander is * relatively small. * * Note either there is no prefer peer or this update is from * the prefer peer. */ if (sys_huffpuff != NULL && (sys_prefer == NULL || sys_prefer == peer)) { if (peer->delay < sys_huffpuff[sys_huffptr]) sys_huffpuff[sys_huffptr] = peer->delay; if (peer->delay < sys_mindly) sys_mindly = peer->delay; if (fp_offset > 0) dtemp = -(peer->delay - sys_mindly) / 2; else dtemp = (peer->delay - sys_mindly) / 2; fp_offset += dtemp; #ifdef DEBUG if (debug) printf( "local_clock: size %d mindly %.6f huffpuff %.6f\n", sys_hufflen, sys_mindly, dtemp); #endif } /* * Clock state machine transition function. This is where the * action is and defines how the system reacts to large phase * and frequency errors. There are two main regimes: when the * offset exceeds the step threshold and when it does not. * However, if the step threshold is set to zero, a step will * never occur. See the instruction manual for the details how * these actions interact with the command line options. * * Note the system poll is set to minpoll only if the clock is * stepped. Note also the kernel is disabled if step is * disabled or greater than 0.5 s. */ clock_frequency = flladj = plladj = 0; mu = peer->epoch - sys_clocktime; if (clock_max == 0 || clock_max > 0.5) kern_enable = 0; rval = 1; if (fabs(fp_offset) > clock_max && clock_max > 0) { switch (state) { /* * In S_SYNC state we ignore the first outlyer amd * switch to S_SPIK state. */ case S_SYNC: state = S_SPIK; return (0); /* * In S_FREQ state we ignore outlyers and inlyers. At * the first outlyer after the stepout threshold, * compute the apparent frequency correction and step * the phase. */ case S_FREQ: if (mu < clock_minstep) return (0); clock_frequency = (fp_offset - clock_offset) / mu; /* fall through to S_SPIK */ /* * In S_SPIK state we ignore succeeding outlyers until * either an inlyer is found or the stepout threshold is * exceeded. */ case S_SPIK: if (mu < clock_minstep) return (0); /* fall through to default */ /* * We get here by default in S_NSET and S_FSET states * and from above in S_FREQ or S_SPIK states. * * In S_NSET state an initial frequency correction is * not available, usually because the frequency file has * not yet been written. Since the time is outside the * step threshold, the clock is stepped. The frequency * will be set directly following the stepout interval. * * In S_FSET state the initial frequency has been set * from the frequency file. Since the time is outside * the step threshold, the clock is stepped immediately, * rather than after the stepout interval. Guys get * nervous if it takes 17 minutes to set the clock for * the first time. * * In S_FREQ and S_SPIK states the stepout threshold has * expired and the phase is still above the step * threshold. Note that a single spike greater than the * step threshold is always suppressed, even at the * longer poll intervals. */ default: step_systime(fp_offset); msyslog(LOG_NOTICE, "time reset %+.6f s", fp_offset); reinit_timer(); tc_counter = 0; sys_poll = NTP_MINPOLL; sys_tai = 0; clock_jitter = LOGTOD(sys_precision); rval = 2; if (state == S_NSET) { rstclock(S_FREQ, peer->epoch, 0); return (rval); } break; } rstclock(S_SYNC, peer->epoch, 0); } else { /* * The offset is less than the step threshold. Calculate * the jitter as the exponentially weighted offset * differences. */ etemp = SQUARE(clock_jitter); dtemp = SQUARE(max(fabs(fp_offset - last_offset), LOGTOD(sys_precision))); clock_jitter = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); switch (state) { /* * In S_NSET state this is the first update received and * the frequency has not been initialized. Adjust the * phase, but do not adjust the frequency until after * the stepout threshold. */ case S_NSET: rstclock(S_FREQ, peer->epoch, fp_offset); break; /* * In S_FSET state this is the first update received and * the frequency has been initialized. Adjust the phase, * but do not adjust the frequency until the next * update. */ case S_FSET: rstclock(S_SYNC, peer->epoch, fp_offset); break; /* * In S_FREQ state ignore updates until the stepout * threshold. After that, correct the phase and * frequency and switch to S_SYNC state. */ case S_FREQ: if (mu < clock_minstep) return (0); clock_frequency = (fp_offset - clock_offset) / mu; rstclock(S_SYNC, peer->epoch, fp_offset); break; /* * We get here by default in S_SYNC and S_SPIK states. * Here we compute the frequency update due to PLL and * FLL contributions. */ default: allow_panic = FALSE; /* * The FLL and PLL frequency gain constants * depend on the poll interval and Allan * intercept. The PLL is always used, but * becomes ineffective above the Allan * intercept. The FLL is not used below one-half * the Allan intercept. Above that the loop gain * increases in steps to 1 / CLOCK_AVG. */ if (ULOGTOD(sys_poll) > allan_xpt / 2) { dtemp = CLOCK_FLL - sys_poll; flladj = (fp_offset - clock_offset) / (max(mu, allan_xpt) * dtemp); } /* * For the PLL the integration interval * (numerator) is the minimum of the update * interval and poll interval. This allows * oversampling, but not undersampling. */ etemp = min(mu, (u_long)ULOGTOD(sys_poll)); dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); plladj = fp_offset * etemp / (dtemp * dtemp); rstclock(S_SYNC, peer->epoch, fp_offset); break; } } #ifdef OPENSSL /* * Scan the loopsecond table to determine the TAI offset. If * there is a scheduled leap in future, set the leap warning, * but only if less than 30 days before the leap. */ tpt = (u_int32 *)tai_leap.ptr; len = ntohl(tai_leap.vallen) / sizeof(u_int32); if (tpt != NULL) { for (i = 0; i < len; i++) { togo = (long)ntohl(tpt[i]) - (long)peer->rec.l_ui; if (togo > 0) { if (togo < CLOCK_JUNE) leap_next |= LEAP_ADDSECOND; break; } } #if defined(STA_NANO) && NTP_API == 4 if (pll_control && kern_enable && sys_tai == 0) { memset(&ntv, 0, sizeof(ntv)); ntv.modes = MOD_TAI; ntv.constant = i + TAI_1972 - 1; ntp_adjtime(&ntv); } #endif /* STA_NANO */ sys_tai = i + TAI_1972 - 1; } #endif /* OPENSSL */ #ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * step threshold is less than 0.5 s, as anything higher can * lead to overflow problems. This might occur if some misguided * lad set the step threshold to something ridiculous. */ if (pll_control && kern_enable) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the frequency file. */ memset(&ntv, 0, sizeof(ntv)); if (ext_enable) { ntv.modes = MOD_STATUS; } else { struct tm *tm = NULL; time_t tstamp; #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5; #ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll; #else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4; #endif /* STA_NANO */ /* * The frequency is set directly only if * clock_frequency is nonzero coming out of FREQ * state. */ if (clock_frequency != 0) { ntv.modes |= MOD_FREQUENCY; ntv.freq = (int32)((clock_frequency + drift_comp) * 65536e6); } ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdispersion) * 1e6); ntv.status = STA_PLL; /* * Set the leap bits in the status word, but * only on the last day of June or December. */ tstamp = peer->rec.l_ui - JAN_1970; tm = gmtime(&tstamp); if (tm != NULL) { if ((tm->tm_mon + 1 == 6 && tm->tm_mday == 30) || (tm->tm_mon + 1 == 12 && tm->tm_mday == 31)) { if (leap_next & LEAP_ADDSECOND) ntv.status |= STA_INS; else if (leap_next & LEAP_DELSECOND) ntv.status |= STA_DEL; } } /* * If the PPS signal is up and enabled, light * the frequency bit. If the PPS driver is * working, light the phase bit as well. If not, * douse the lights, since somebody else may * have left the switch on. */ if (pps_enable && pll_status & STA_PPSSIGNAL) { ntv.status |= STA_PPSFREQ; if (pps_stratum < STRATUM_UNSPEC) ntv.status |= STA_PPSTIME; } else { ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME); } } /* * Pass the stuff to the kernel. If it squeals, turn off * the pig. In any case, fetch the kernel offset and * frequency and pretend we did it here. */ if (ntp_adjtime(&ntv) == TIME_ERROR) { NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "kernel time sync error %04x", ntv.status); ntv.status &= ~(STA_PPSFREQ | STA_PPSTIME); } pll_status = ntv.status; #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ clock_frequency = ntv.freq / 65536e6; flladj = plladj = 0; /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { pps_control = current_time; #ifdef STA_NANO clock_jitter = ntv.jitter / 1e9; #else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6; #endif /* STA_NANO */ } } else { #endif /* KERNEL_PLL */ /* * We get here if the kernel discipline is not enabled. * Adjust the clock frequency as the sum of the directly * computed frequency (if measured) and the PLL and FLL * increments. */ clock_frequency = drift_comp + clock_frequency + flladj + plladj; #ifdef KERNEL_PLL } #endif /* KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate * the frequency change since the last update. */ if (fabs(clock_frequency) > NTP_MAXFREQ) NLOG(NLOG_SYNCEVENT | NLOG_SYSEVENT) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", clock_frequency * 1e6, NTP_MAXFREQ * 1e6); dtemp = SQUARE(clock_frequency - drift_comp); if (clock_frequency > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (clock_frequency < -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = clock_frequency; /* * Calculate the wander as the exponentially weighted frequency * differences. */ etemp = SQUARE(clock_stability); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the poll interval by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. */ if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll); #ifdef DEBUG if (debug) printf( "local_clock: mu %lu jitr %.6f freq %.3f stab %.6f poll %d count %d\n", mu, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll, tc_counter); #endif /* DEBUG */ return (rval); #endif /* LOCKCLOCK */ }