/* * get_systime - return system time in NTP timestamp format. */ void get_systime(l_fp *now) /* system time */ { double dtemp; struct timeval tv; /* seconds and microseconds */ /* * Convert Unix timeval from seconds and microseconds to NTP * seconds and fraction. */ gettimeofday(&tv, NULL); now->l_i = tv.tv_sec + JAN_1970; dtemp = 0; if (sys_tick > FUZZ) dtemp = ntp_random() * 2. / FRAC * sys_tick * 1e6; else if (sys_tick > 0) dtemp = ntp_random() * 2. / FRAC; dtemp = (tv.tv_usec + dtemp) * 1e-6; if (dtemp >= 1.) { dtemp -= 1.; now->l_i++; } else if (dtemp < 0) { dtemp += 1.; now->l_i--; } now->l_uf = (uint32_t)(dtemp * FRAC); }
/* * init_peer - initialize peer data structures and counters * * N.B. We use the random number routine in here. It had better be * initialized prior to getting here. */ void init_peer(void) { int i; /* * Initialize peer free list from static allocation. */ for (i = COUNTOF(init_peer_alloc) - 1; i >= 0; i--) LINK_SLIST(peer_free, &init_peer_alloc[i], p_link); total_peer_structs = COUNTOF(init_peer_alloc); peer_free_count = COUNTOF(init_peer_alloc); /* * Initialize our first association ID */ do current_association_ID = ntp_random() & ASSOCID_MAX; while (!current_association_ID); }
/* * init_peer - initialize peer data structures and counters * * N.B. We use the random number routine in here. It had better be * initialized prior to getting here. */ void init_peer(void) { register int i; /* * Clear hash table and counters. */ for (i = 0; i < NTP_HASH_SIZE; i++) { peer_hash[i] = 0; peer_hash_count[i] = 0; assoc_hash[i] = 0; assoc_hash_count[i] = 0; } /* * Clear stat counters */ findpeer_calls = peer_allocations = 0; assocpeer_calls = peer_demobilizations = 0; /* * Initialize peer memory. */ peer_free = 0; for (i = 0; i < INIT_PEER_ALLOC; i++) { init_peer_alloc[i].next = peer_free; peer_free = &init_peer_alloc[i]; } total_peer_structs = INIT_PEER_ALLOC; peer_free_count = INIT_PEER_ALLOC; /* * Initialize our first association ID */ while ((current_association_ID = ntp_random() & 0xffff) == 0); }
/* * ntp_monitor - record stats about this packet * * Returns supplied restriction flags, with RES_LIMITED and RES_KOD * cleared unless the packet should not be responded to normally * (RES_LIMITED) and possibly should trigger a KoD response (RES_KOD). * The returned flags are saved in the MRU entry, so that it reflects * whether the last packet from that source triggered rate limiting, * and if so, possible KoD response. This implies you can not tell * whether a given address is eligible for rate limiting/KoD from the * monlist restrict bits, only whether or not the last packet triggered * such responses. ntpdc -c reslist lets you see whether RES_LIMITED * or RES_KOD is lit for a particular address before ntp_monitor()'s * typical dousing. */ u_short ntp_monitor( struct recvbuf *rbufp, u_short flags ) { l_fp interval_fp; struct pkt * pkt; mon_entry * mon; mon_entry * oldest; int oldest_age; u_int hash; u_short restrict_mask; u_char mode; u_char version; int interval; int head; /* headway increment */ int leak; /* new headway */ int limit; /* average threshold */ REQUIRE(rbufp != NULL); if (mon_enabled == MON_OFF) return ~(RES_LIMITED | RES_KOD) & flags; pkt = &rbufp->recv_pkt; hash = MON_HASH(&rbufp->recv_srcadr); mode = PKT_MODE(pkt->li_vn_mode); version = PKT_VERSION(pkt->li_vn_mode); mon = mon_hash[hash]; /* * We keep track of all traffic for a given IP in one entry, * otherwise cron'ed ntpdate or similar evades RES_LIMITED. */ for (; mon != NULL; mon = mon->hash_next) if (SOCK_EQ(&mon->rmtadr, &rbufp->recv_srcadr)) break; if (mon != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &mon->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); interval = interval_fp.l_i; mon->last = rbufp->recv_time; NSRCPORT(&mon->rmtadr) = NSRCPORT(&rbufp->recv_srcadr); mon->count++; restrict_mask = flags; mon->vn_mode = VN_MODE(version, mode); /* Shuffle to the head of the MRU list. */ UNLINK_DLIST(mon, mru); LINK_DLIST(mon_mru_list, mon, mru); /* * At this point the most recent arrival is first in the * MRU list. Decrease the counter by the headway, but * not less than zero. */ mon->leak -= interval; mon->leak = max(0, mon->leak); head = 1 << ntp_minpoll; leak = mon->leak + head; limit = NTP_SHIFT * head; DPRINTF(2, ("MRU: interval %d headway %d limit %d\n", interval, leak, limit)); /* * If the minimum and average thresholds are not * exceeded, douse the RES_LIMITED and RES_KOD bits and * increase the counter by the headway increment. Note * that we give a 1-s grace for the minimum threshold * and a 2-s grace for the headway increment. If one or * both thresholds are exceeded and the old counter is * less than the average threshold, set the counter to * the average threshold plus the increment and leave * the RES_LIMITED and RES_KOD bits lit. Otherwise, * leave the counter alone and douse the RES_KOD bit. * This rate-limits the KoDs to no less than the average * headway. */ if (interval + 1 >= ntp_minpkt && leak < limit) { mon->leak = leak - 2; restrict_mask &= ~(RES_LIMITED | RES_KOD); } else if (mon->leak < limit) mon->leak = limit + head; else restrict_mask &= ~RES_KOD; mon->flags = restrict_mask; return mon->flags; } /* * If we got here, this is the first we've heard of this * guy. Get him some memory, either from the free list * or from the tail of the MRU list. * * The following ntp.conf "mru" knobs come into play determining * the depth (or count) of the MRU list: * - mru_mindepth ("mru mindepth") is a floor beneath which * entries are kept without regard to their age. The * default is 600 which matches the longtime implementation * limit on the total number of entries. * - mru_maxage ("mru maxage") is a ceiling on the age in * seconds of entries. Entries older than this are * reclaimed once mon_mindepth is exceeded. 64s default. * Note that entries older than this can easily survive * as they are reclaimed only as needed. * - mru_maxdepth ("mru maxdepth") is a hard limit on the * number of entries. * - "mru maxmem" sets mru_maxdepth to the number of entries * which fit in the given number of kilobytes. The default is * 1024, or 1 megabyte. * - mru_initalloc ("mru initalloc" sets the count of the * initial allocation of MRU entries. * - "mru initmem" sets mru_initalloc in units of kilobytes. * The default is 4. * - mru_incalloc ("mru incalloc" sets the number of entries to * allocate on-demand each time the free list is empty. * - "mru incmem" sets mru_incalloc in units of kilobytes. * The default is 4. * Whichever of "mru maxmem" or "mru maxdepth" occurs last in * ntp.conf controls. Similarly for "mru initalloc" and "mru * initmem", and for "mru incalloc" and "mru incmem". */ if (mru_entries < mru_mindepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); } else { oldest = TAIL_DLIST(mon_mru_list, mru); oldest_age = 0; /* silence uninit warning */ if (oldest != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &oldest->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); oldest_age = interval_fp.l_i; } /* note -1 is legal for mru_maxage (disables) */ if (oldest != NULL && mru_maxage < oldest_age) { mon_reclaim_entry(oldest); mon = oldest; } else if (mon_free != NULL || mru_alloc < mru_maxdepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); /* Preempt from the MRU list if old enough. */ } else if (ntp_random() / (2. * FRAC) > (double)oldest_age / mon_age) { return ~(RES_LIMITED | RES_KOD) & flags; } else { mon_reclaim_entry(oldest); mon = oldest; } } INSIST(mon != NULL); /* * Got one, initialize it */ mru_entries++; mru_peakentries = max(mru_peakentries, mru_entries); mon->last = rbufp->recv_time; mon->first = mon->last; mon->count = 1; mon->flags = ~(RES_LIMITED | RES_KOD) & flags; mon->leak = 0; memcpy(&mon->rmtadr, &rbufp->recv_srcadr, sizeof(mon->rmtadr)); mon->vn_mode = VN_MODE(version, mode); mon->lcladr = rbufp->dstadr; mon->cast_flags = (u_char)(((rbufp->dstadr->flags & INT_MCASTOPEN) && rbufp->fd == mon->lcladr->fd) ? MDF_MCAST : rbufp->fd == mon->lcladr->bfd ? MDF_BCAST : MDF_UCAST); /* * Drop him into front of the hash table. Also put him on top of * the MRU list. */ LINK_SLIST(mon_hash[hash], mon, hash_next); LINK_DLIST(mon_mru_list, mon, mru); return mon->flags; }
/* * get_systime - return system time in NTP timestamp format. */ void get_systime( l_fp *now /* system time */ ) { static struct timespec ts_prev; /* prior os time */ static l_fp lfp_prev; /* prior result */ static double dfuzz_prev; /* prior fuzz */ struct timespec ts; /* seconds and nanoseconds */ struct timespec ts_min; /* earliest permissible */ struct timespec ts_lam; /* lamport fictional increment */ struct timespec ts_prev_log; /* for msyslog only */ double dfuzz; double ddelta; l_fp result; l_fp lfpfuzz; l_fp lfpdelta; get_ostime(&ts); DEBUG_REQUIRE(systime_init_done); ENTER_GET_SYSTIME_CRITSEC(); /* * After default_get_precision() has set a nonzero sys_fuzz, * ensure every reading of the OS clock advances by at least * sys_fuzz over the prior reading, thereby assuring each * fuzzed result is strictly later than the prior. Limit the * necessary fiction to 1 second. */ if (!USING_SIGIO()) { ts_min = add_tspec_ns(ts_prev, sys_fuzz_nsec); if (cmp_tspec(ts, ts_min) < 0) { ts_lam = sub_tspec(ts_min, ts); if (ts_lam.tv_sec > 0 && !lamport_violated) { msyslog(LOG_ERR, "get_systime Lamport advance exceeds one second (%.9f)", ts_lam.tv_sec + 1e-9 * ts_lam.tv_nsec); exit(1); } if (!lamport_violated) ts = ts_min; } ts_prev_log = ts_prev; ts_prev = ts; } else { /* * Quiet "ts_prev_log.tv_sec may be used uninitialized" * warning from x86 gcc 4.5.2. */ ZERO(ts_prev_log); } /* convert from timespec to l_fp fixed-point */ result = tspec_stamp_to_lfp(ts); /* * Add in the fuzz. */ dfuzz = ntp_random() * 2. / FRAC * sys_fuzz; DTOLFP(dfuzz, &lfpfuzz); L_ADD(&result, &lfpfuzz); /* * Ensure result is strictly greater than prior result (ignoring * sys_residual's effect for now) once sys_fuzz has been * determined. */ if (!USING_SIGIO()) { if (!L_ISZERO(&lfp_prev) && !lamport_violated) { if (!L_ISGTU(&result, &lfp_prev) && sys_fuzz > 0.) { msyslog(LOG_ERR, "ts_prev %s ts_min %s", tspectoa(ts_prev_log), tspectoa(ts_min)); msyslog(LOG_ERR, "ts %s", tspectoa(ts)); msyslog(LOG_ERR, "sys_fuzz %ld nsec, prior fuzz %.9f", sys_fuzz_nsec, dfuzz_prev); msyslog(LOG_ERR, "this fuzz %.9f", dfuzz); lfpdelta = lfp_prev; L_SUB(&lfpdelta, &result); LFPTOD(&lfpdelta, ddelta); msyslog(LOG_ERR, "prev get_systime 0x%x.%08x is %.9f later than 0x%x.%08x", lfp_prev.l_ui, lfp_prev.l_uf, ddelta, result.l_ui, result.l_uf); } } lfp_prev = result; dfuzz_prev = dfuzz; if (lamport_violated) lamport_violated = FALSE; } LEAVE_GET_SYSTIME_CRITSEC(); *now = result; }
/* * get_systime - return system time in NTP timestamp format. */ void get_systime( l_fp *now /* system time */ ) { double dtemp; #if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_GETCLOCK) struct timespec ts; /* seconds and nanoseconds */ /* * Convert Unix timespec from seconds and nanoseconds to NTP * seconds and fraction. */ # ifdef HAVE_CLOCK_GETTIME clock_gettime(CLOCK_REALTIME, &ts); # else getclock(TIMEOFDAY, &ts); # endif now->l_i = (int32)ts.tv_sec + JAN_1970; dtemp = 0; if (sys_tick > FUZZ) dtemp = ntp_random() * 2. / FRAC * sys_tick * 1e9; else if (sys_tick > 0) dtemp = ntp_random() * 2. / FRAC; dtemp = (ts.tv_nsec + dtemp) * 1e-9 + sys_residual; if (dtemp >= 1.) { dtemp -= 1.; now->l_i++; } else if (dtemp < 0) { dtemp += 1.; now->l_i--; } now->l_uf = (u_int32)(dtemp * FRAC); #else /* HAVE_CLOCK_GETTIME || HAVE_GETCLOCK */ struct timeval tv; /* seconds and microseconds */ /* * Convert Unix timeval from seconds and microseconds to NTP * seconds and fraction. */ GETTIMEOFDAY(&tv, NULL); now->l_i = tv.tv_sec + JAN_1970; dtemp = 0; if (sys_tick > FUZZ) dtemp = ntp_random() * 2. / FRAC * sys_tick * 1e6; else if (sys_tick > 0) dtemp = ntp_random() * 2. / FRAC; dtemp = (tv.tv_usec + dtemp) * 1e-6 + sys_residual; if (dtemp >= 1.) { dtemp -= 1.; now->l_i++; } else if (dtemp < 0) { dtemp += 1.; now->l_i--; } now->l_uf = (u_int32)(dtemp * FRAC); #endif /* HAVE_CLOCK_GETTIME || HAVE_GETCLOCK */ }