/* * set_freq - set clock frequency */ static void set_freq( double freq /* frequency update */ ) { char tbuf[80]; drift_comp = freq; #ifdef KERNEL_PLL /* * If the kernel is enabled, update the kernel frequency. */ if (pll_control && kern_enable) { memset(&ntv, 0, sizeof(ntv)); ntv.modes = MOD_FREQUENCY; ntv.freq = DTOFREQ(drift_comp); ntp_adjtime(&ntv); snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM", drift_comp * 1e6); report_event(EVNT_FSET, NULL, tbuf); } else { snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6); report_event(EVNT_FSET, NULL, tbuf); } #else /* KERNEL_PLL */ snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6); report_event(EVNT_FSET, NULL, tbuf); #endif /* KERNEL_PLL */ }
inline void schedule_tick(void) { TCNT = 0; // clear timer state if (g_cycle_flag != 0) { report_event(EVENT_CODE_SCHED_OVER, ((g_next_slice<<8)|g_next_item), MODE_UPDATE); g_cycle_flag++; } else { g_cycle_flag = 1; /* re-enable global interrupts */ sei(); /* execute the schedule for this slice */ exec_slice(); if (++g_next_slice >= NUMBER_OF_SCHEDULE_SLOTS) g_next_slice = 0; /* check for cycle overrun */ if (g_cycle_flag > 1) report_event(EVENT_CODE_SCHED_OVER_CNT, g_cycle_flag, MODE_UPDATE); /* service the watchdog timer */ wdt_reset(); g_cycle_flag = 0; } }
static void inotify_callback(char* path, int event) { if (event & IN_CREATE || event & IN_MOVED_TO) { report_event("CREATE", path); report_event("CHANGE", path); return; } if (event & IN_MODIFY) { report_event("CHANGE", path); return; } if (event & IN_ATTRIB) { report_event("STATS", path); return; } if (event & IN_DELETE || event & IN_MOVED_FROM) { report_event("DELETE", path); return; } if (event & IN_UNMOUNT) { output("RESET\n"); userlog(LOG_DEBUG, "RESET"); return; } }
static char* parse_end(PSTATE* p_state, char *beg, char *end, U32 utf8, SV* self) { char *s = beg+2; hctype_t name_first, name_char; if (p_state->strict_names || p_state->xml_mode) { name_first = HCTYPE_NAME_FIRST; name_char = HCTYPE_NAME_CHAR; } else { name_first = name_char = HCTYPE_NOT_SPACE_GT; } if (isHCTYPE(*s, name_first)) { token_pos_t tagname; tagname.beg = s; s++; while (s < end && isHCTYPE(*s, name_char)) s++; tagname.end = s; if (p_state->strict_end) { while (isHSPACE(*s)) s++; } else { s = skip_until_gt(s, end); } if (s < end) { if (*s == '>') { s++; /* a complete end tag has been recognized */ report_event(p_state, E_END, beg, s, utf8, &tagname, 1, self); return s; } } else { return beg; } } else if (!p_state->strict_comment) { s = skip_until_gt(s, end); if (s < end) { token_pos_t token; token.beg = beg + 2; token.end = s; s++; report_event(p_state, E_COMMENT, beg, s, utf8, &token, 1, self); return s; } else { return beg; } } return 0; }
static void flush_pending_text(PSTATE* p_state, SV* self) { dTHX; bool old_unbroken_text = p_state->unbroken_text; SV* old_pend_text = p_state->pend_text; bool old_is_cdata = p_state->is_cdata; STRLEN old_offset = p_state->offset; STRLEN old_line = p_state->line; STRLEN old_column = p_state->column; assert(p_state->pend_text && SvOK(p_state->pend_text)); p_state->unbroken_text = 0; p_state->pend_text = 0; p_state->is_cdata = p_state->pend_text_is_cdata; p_state->offset = p_state->pend_text_offset; p_state->line = p_state->pend_text_line; p_state->column = p_state->pend_text_column; report_event(p_state, E_TEXT, SvPVX(old_pend_text), SvEND(old_pend_text), SvUTF8(old_pend_text), 0, 0, self); SvOK_off(old_pend_text); p_state->unbroken_text = old_unbroken_text; p_state->pend_text = old_pend_text; p_state->is_cdata = old_is_cdata; p_state->offset = old_offset; p_state->line = old_line; p_state->column = old_column; }
static char* parse_process(PSTATE* p_state, char *beg, char *end, U32 utf8, SV* self) { char *s = beg + 2; /* skip '<?' */ /* processing instruction */ token_pos_t token_pos; token_pos.beg = s; while (s < end) { if (*s == '>') { token_pos.end = s; s++; if (p_state->xml_mode) { /* XML processing instructions are ended by "?>" */ if (s - beg < 4 || s[-2] != '?') continue; token_pos.end = s - 2; } /* a complete processing instruction seen */ report_event(p_state, E_PROCESS, beg, s, utf8, &token_pos, 1, self); return s; } s++; } return beg; /* could not fix end */ }
/* * acts_close - close and prepare for next call. * * In ClOSE state no further protocol actions are required * other than to close and release the device and prepare to * dial the next number if necessary. */ void acts_close( struct peer *peer ) { struct actsunit *up; struct refclockproc *pp; char lockfile[128]; int dtr; pp = peer->procptr; up = pp->unitptr; if (pp->io.fd != -1) { report_event(PEVNT_CLOCK, peer, "close"); dtr = TIOCM_DTR; if (ioctl(pp->io.fd, TIOCMBIC, &dtr) < 0) msyslog(LOG_ERR, "acts: ioctl(TIOCMBIC) failed: %m"); io_closeclock(&pp->io); pp->io.fd = -1; } if (pp->sloppyclockflag & CLK_FLAG2) { snprintf(lockfile, sizeof(lockfile), LOCKFILE, up->unit); unlink(lockfile); } if (up->msgcnt == 0 && up->retry > 0) { if (sys_phone[up->retry] != NULL) { up->state = S_IDLE; up->timer = REDIAL; return; } } up->state = S_IDLE; up->timer = 0; }
static void stop_kern_loop(void) { if (pll_control && kern_enable) report_event(EVNT_KERN, NULL, "kernel time sync disabled"); }
static void start_kern_loop(void) { static int atexit_done; int ntp_adj_ret; pll_control = TRUE; ZERO(ntv); ntv.modes = MOD_BITS; ntv.status = STA_PLL; ntv.maxerror = MAXDISPERSE; ntv.esterror = MAXDISPERSE; ntv.constant = sys_poll; /* why is it that here constant is unconditionally set to sys_poll, whereas elsewhere is is modified depending on nanosecond vs. microsecond kernel? */ #ifdef SIGSYS /* * Use sigsetjmp() to save state and then call ntp_adjtime(); if * it fails, then pll_trap() will set pll_control FALSE before * returning control using siglogjmp(). */ newsigsys.sa_handler = pll_trap; newsigsys.sa_flags = 0; if (sigaction(SIGSYS, &newsigsys, &sigsys)) { msyslog(LOG_ERR, "sigaction() trap SIGSYS: %m"); pll_control = FALSE; } else { if (sigsetjmp(env, 1) == 0) { if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); } } if (sigaction(SIGSYS, &sigsys, NULL)) { msyslog(LOG_ERR, "sigaction() restore SIGSYS: %m"); pll_control = FALSE; } } #else /* SIGSYS */ if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 0, __LINE__ - 1); } #endif /* SIGSYS */ /* * Save the result status and light up an external clock * if available. */ pll_status = ntv.status; if (pll_control) { if (!atexit_done) { atexit_done = TRUE; atexit(&stop_kern_loop); } #ifdef STA_NANO if (pll_status & STA_CLK) ext_enable = TRUE; #endif /* STA_NANO */ report_event(EVNT_KERN, NULL, "kernel time sync enabled"); } }
BOOST_LOG_EXPORT void basic_event_log_backend< CharT >::consume(record_type const& record) { if (!m_pImpl->m_EventComposer.empty()) { log::aux::cleanup_guard< insertion_list > cleaner(m_pImpl->m_Insertions); // Get event ID and construct insertions DWORD id = m_pImpl->m_EventComposer(record, m_pImpl->m_Insertions); WORD string_count = static_cast< WORD >(m_pImpl->m_Insertions.size()); scoped_array< const char_type* > strings(new const char_type*[string_count]); for (WORD i = 0; i < string_count; ++i) strings[i] = m_pImpl->m_Insertions[i].c_str(); // Get event type WORD event_type = EVENTLOG_INFORMATION_TYPE; if (!m_pImpl->m_LevelMapper.empty()) event_type = static_cast< WORD >(m_pImpl->m_LevelMapper(record)); WORD event_category = 0; if (!m_pImpl->m_CategoryMapper.empty()) event_category = static_cast< WORD >(m_pImpl->m_CategoryMapper(record)); report_event( m_pImpl->m_SourceHandle, // Event log handle. event_type, // Event type. event_category, // Event category. id, // Event identifier. NULL, // No user security identifier. string_count, // Number of substitution strings. 0, // No data. strings.get(), // Pointer to strings. NULL); // No data. } }
BOOST_LOG_EXPORT void basic_simple_event_log_backend< CharT >::consume( record_type const& record, target_string_type const& formatted_message) { const char_type* message = formatted_message.c_str(); event_log::event_type evt_type = event_log::info; if (!m_pImpl->m_LevelMapper.empty()) evt_type = m_pImpl->m_LevelMapper(record); DWORD event_id; switch (evt_type) { case event_log::success: event_id = BOOST_LOG_MSG_DEBUG; break; case event_log::warning: event_id = BOOST_LOG_MSG_WARNING; break; case event_log::error: event_id = BOOST_LOG_MSG_ERROR; break; default: event_id = BOOST_LOG_MSG_INFO; break; } report_event( m_pImpl->m_SourceHandle, // Event log handle. static_cast< WORD >(evt_type), // Event type. 0, // Event category. event_id, // Event identifier. NULL, // No user security identifier. 1, // Number of substitution strings. 0, // No data. &message, // Pointer to strings. NULL); // No data. }
void report_call_event (int evt, eXosip_call_t * jc, eXosip_dialog_t * jd, osip_transaction_t * tr) { eXosip_event_t *je; je = eXosip_event_init_for_call (evt, jc, jd, tr); report_event (je, NULL); }
static void sync_status(const char *what, int ostatus, int nstatus) { char obuf[256], nbuf[256], tbuf[1024]; snprintf(obuf, sizeof(obuf), "%04x", ostatus); snprintf(nbuf, sizeof(nbuf), "%04x", nstatus); snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf); report_event(EVNT_KERN, NULL, tbuf); }
static inline void update_and_check_report_als(int32_t lux) { int32_t lux_last; lux_last = pStkAlsData->als_lux_last; if (unlikely(abs(lux - lux_last) >= CONFIG_STK_ALS_CHANGE_THRESHOLD)) { pStkAlsData->als_lux_last = lux; report_event(pStkAlsData->input_dev, lux); } }
void enqueue_fn(const uint8_t code) { if (g_fn_buffer_length < FN_BUFFER_SIZE) { g_fn_buffer[g_fn_buffer_length] = code; g_fn_buffer_length++; } else { report_event(EVENT_CODE_KEYMAP_FN_BUF_FULL, 0, MODE_UPDATE); g_fn_buffer[FN_BUFFER_SIZE] = 0; } }
/* * refclock_transmit - simulate the transmit procedure * * This routine implements the NTP transmit procedure for a reference * clock. This provides a mechanism to call the driver at the NTP poll * interval, as well as provides a reachability mechanism to detect a * broken radio or other madness. */ void refclock_transmit( struct peer *peer /* peer structure pointer */ ) { u_char clktype; int unit; clktype = peer->refclktype; unit = peer->refclkunit; peer->sent++; get_systime(&peer->xmt); /* * This is a ripoff of the peer transmit routine, but * specialized for reference clocks. We do a little less * protocol here and call the driver-specific transmit routine. */ if (peer->burst == 0) { u_char oreach; #ifdef DEBUG if (debug) printf("refclock_transmit: at %ld %s\n", current_time, stoa(&(peer->srcadr))); #endif /* * Update reachability and poll variables like the * network code. */ oreach = peer->reach; peer->reach <<= 1; peer->outdate = current_time; if (!peer->reach) { if (oreach) { report_event(EVNT_UNREACH, peer); peer->timereachable = current_time; } } else { if (!(oreach & 0x07)) { clock_filter(peer, 0., 0., MAXDISPERSE); clock_select(); } if (peer->flags & FLAG_BURST) peer->burst = NSTAGE; } } else { peer->burst--; } if (refclock_conf[clktype]->clock_poll != noentry) (refclock_conf[clktype]->clock_poll)(unit, peer); poll_update(peer, peer->hpoll); }
/* * refclock_receive - simulate the receive and packet procedures * * This routine simulates the NTP receive and packet procedures for a * reference clock. This provides a mechanism in which the ordinary NTP * filter, selection and combining algorithms can be used to suppress * misbehaving radios and to mitigate between them when more than one is * available for backup. */ void refclock_receive( struct peer *peer /* peer structure pointer */ ) { struct refclockproc *pp; #ifdef DEBUG if (debug) printf("refclock_receive: at %lu %s\n", current_time, stoa(&peer->srcadr)); #endif /* * Do a little sanity dance and update the peer structure. Groom * the median filter samples and give the data to the clock * filter. */ pp = peer->procptr; peer->leap = pp->leap; if (peer->leap == LEAP_NOTINSYNC) return; peer->received++; peer->timereceived = current_time; if (!peer->reach) { report_event(EVNT_REACH, peer); peer->timereachable = current_time; } peer->reach |= 1; peer->reftime = pp->lastref; peer->org = pp->lastrec; peer->rootdispersion = pp->disp; get_systime(&peer->rec); if (!refclock_sample(pp)) return; clock_filter(peer, pp->offset, 0., pp->jitter); record_peer_stats(&peer->srcadr, ctlpeerstatus(peer), peer->offset, peer->delay, clock_phi * (current_time - peer->epoch), peer->jitter); if (cal_enable && last_offset < MINDISPERSE) { #ifdef KERNEL_PLL if (peer != sys_peer || pll_status & STA_PPSTIME) #else if (peer != sys_peer) #endif /* KERNEL_PLL */ pp->fudgetime1 -= pp->offset * FUDGEFAC; else pp->fudgetime1 -= pp->fudgetime1 * FUDGEFAC; } }
void initial_actuate(const uint8_t row, const uint8_t col) { #ifndef SIMPLE_DEVICE const uint8_t code = pgm_read_byte(&LAYERS[0][row][col]); if ((code == HID_KEYBOARD_SC_ENTER) || (code == HID_KEYBOARD_SC_KEYPAD_ENTER)) { report_event(EVENT_CODE_NVM_ERASE_SETTINGS, 0, MODE_REOCCUR); nvm_init_eeprom(); } #endif /* SIMPLE_DEVICE */ }
static ssize_t lux_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { unsigned long value = 0; if (kstrtoul(buf, 10, &value)) return -EINVAL; STK_LOCK(1); report_event(pStkAlsData->input_dev, value); STK_LOCK(0); return len; }
static void sync_status(const char *what, int ostatus, int nstatus) { char obuf[256], nbuf[256], tbuf[1024]; #if defined(USE_SNPRINTB) && defined (STA_FMT) snprintb(obuf, sizeof(obuf), STA_FMT, ostatus); snprintb(nbuf, sizeof(nbuf), STA_FMT, nstatus); #else snprintf(obuf, sizeof(obuf), "%04x", ostatus); snprintf(nbuf, sizeof(nbuf), "%04x", nstatus); #endif snprintf(tbuf, sizeof(tbuf), "%s status: %s -> %s", what, obuf, nbuf); report_event(EVNT_KERN, NULL, tbuf); }
/* * Clock state machine. Enter new state and set state variables. */ static void rstclock( int trans, /* new state */ double offset /* new offset */ ) { DPRINTF(2, ("rstclock: mu %lu state %d poll %d count %d\n", current_time - clock_epoch, trans, sys_poll, tc_counter)); if (trans != state && trans != EVNT_FSET) report_event(trans, NULL, NULL); state = trans; last_offset = clock_offset = offset; clock_epoch = current_time; }
/* * refclock_receive - simulate the receive and packet procedures * * This routine simulates the NTP receive and packet procedures for a * reference clock. This provides a mechanism in which the ordinary NTP * filter, selection and combining algorithms can be used to suppress * misbehaving radios and to mitigate between them when more than one is * available for backup. */ void refclock_receive( struct peer *peer /* peer structure pointer */ ) { struct refclockproc *pp; #ifdef DEBUG if (debug) printf("refclock_receive: at %lu %s\n", current_time, stoa(&peer->srcadr)); #endif /* * Do a little sanity dance and update the peer structure. Groom * the median filter samples and give the data to the clock * filter. */ pp = peer->procptr; peer->leap = pp->leap; if (peer->leap == LEAP_NOTINSYNC) return; peer->received++; peer->timereceived = current_time; if (!peer->reach) { report_event(PEVNT_REACH, peer, NULL); peer->timereachable = current_time; } peer->reach |= 1; peer->reftime = pp->lastref; peer->aorg = pp->lastrec; peer->rootdisp = pp->disp; get_systime(&peer->dst); if (!refclock_sample(pp)) return; clock_filter(peer, pp->offset, 0., pp->jitter); if (cal_enable && fabs(last_offset) < sys_mindisp && sys_peer != NULL) { if (sys_peer->refclktype == REFCLK_ATOM_PPS && peer->refclktype != REFCLK_ATOM_PPS) pp->fudgetime1 -= pp->offset * FUDGEFAC; } }
/* * Clock state machine. Enter new state and set state variables. */ static void rstclock( int trans, /* new state */ double offset /* new offset */ ) { #ifdef DEBUG if (debug > 1) printf("local_clock: mu %lu state %d poll %d count %d\n", current_time - clock_epoch, trans, sys_poll, tc_counter); #endif if (trans != state && trans != EVNT_FSET) report_event(trans, NULL, NULL); state = trans; last_offset = clock_offset = offset; clock_epoch = current_time; }
/* * refclock_report - note the occurance of an event * * This routine presently just remembers the report and logs it, but * does nothing heroic for the trap handler. It tries to be a good * citizen and bothers the system log only if things change. */ void refclock_report( struct peer *peer, int code ) { struct refclockproc *pp; pp = peer->procptr; if (pp == NULL) return; switch (code) { case CEVNT_TIMEOUT: pp->noreply++; break; case CEVNT_BADREPLY: pp->badformat++; break; case CEVNT_FAULT: break; case CEVNT_BADDATE: case CEVNT_BADTIME: pp->baddata++; break; default: /* ignore others */ break; } if (pp->lastevent < 15) pp->lastevent++; if (pp->currentstatus != code) { pp->currentstatus = (u_char)code; report_event(PEVNT_CLOCK, peer, ceventstr(code)); } }
void delete_fn(const uint8_t code) { int8_t i; for (i=0; i<g_fn_buffer_length; i++) { if (g_fn_buffer[i] == code) { break; } } if (i >= g_fn_buffer_length) { report_event(EVENT_CODE_KEYMAP_FN_NOT_FOUND, code, MODE_UPDATE); return; } for (; i<g_fn_buffer_length; i++) { g_fn_buffer[i] = g_fn_buffer[i+1]; } g_fn_buffer_length--; }
static void xsyslog(BIO *bp, int priority, const char *string) { LPCSTR lpszStrings[2]; WORD evtype= EVENTLOG_ERROR_TYPE; int pid = _getpid(); char pidbuf[DECIMAL_SIZE(pid)+4]; switch (priority) { case LOG_EMERG: case LOG_ALERT: case LOG_CRIT: case LOG_ERR: evtype = EVENTLOG_ERROR_TYPE; break; case LOG_WARNING: evtype = EVENTLOG_WARNING_TYPE; break; case LOG_NOTICE: case LOG_INFO: case LOG_DEBUG: evtype = EVENTLOG_INFORMATION_TYPE; break; default: /* Should never happen, but set it as error anyway. */ evtype = EVENTLOG_ERROR_TYPE; break; } sprintf(pidbuf, "[%d] ", pid); lpszStrings[0] = pidbuf; lpszStrings[1] = string; if(report_event && bp->ptr) report_event(bp->ptr, evtype, 0, 1024, NULL, 2, 0, lpszStrings, NULL); }
/* * stats_config - configure the stats operation */ void stats_config( int item, const char *invalue /* only one type so far */ ) { FILE *fp; const char *value; int len; char tbuf[80]; char str1[20], str2[20]; #ifndef VMS const char temp_ext[] = ".TEMP"; #else const char temp_ext[] = "-TEMP"; #endif /* * Expand environment strings under Windows NT, since the * command interpreter doesn't do this, the program must. */ #ifdef SYS_WINNT char newvalue[MAX_PATH], parameter[MAX_PATH]; if (!ExpandEnvironmentStrings(invalue, newvalue, MAX_PATH)) { switch(item) { case STATS_FREQ_FILE: strcpy(parameter,"STATS_FREQ_FILE"); break; case STATS_LEAP_FILE: strcpy(parameter,"STATS_LEAP_FILE"); break; case STATS_STATSDIR: strcpy(parameter,"STATS_STATSDIR"); break; case STATS_PID_FILE: strcpy(parameter,"STATS_PID_FILE"); break; default: strcpy(parameter,"UNKNOWN"); break; } value = invalue; msyslog(LOG_ERR, "ExpandEnvironmentStrings(%s) failed: %m\n", parameter); } else { value = newvalue; } #else value = invalue; #endif /* SYS_WINNT */ switch(item) { /* * Open and read frequency file. */ case STATS_FREQ_FILE: if (!value || (len = strlen(value)) == 0) break; stats_drift_file = erealloc(stats_drift_file, len + 1); stats_temp_file = erealloc(stats_temp_file, len + sizeof(".TEMP")); memcpy(stats_drift_file, value, (unsigned)(len+1)); memcpy(stats_temp_file, value, (unsigned)len); memcpy(stats_temp_file + len, temp_ext, sizeof(temp_ext)); /* * Open drift file and read frequency. If the file is * missing or contains errors, tell the loop to reset. */ if ((fp = fopen(stats_drift_file, "r")) == NULL) break; if (fscanf(fp, "%lf", &old_drift) != 1) { msyslog(LOG_ERR, "format error frequency file %s", stats_drift_file); fclose(fp); break; } fclose(fp); old_drift /= 1e6; prev_drift_comp = old_drift; break; /* * Specify statistics directory. */ case STATS_STATSDIR: /* * HMS: the following test is insufficient: * - value may be missing the DIR_SEP * - we still need the filename after it */ if (strlen(value) >= sizeof(statsdir)) { msyslog(LOG_ERR, "statsdir too long (>%d, sigh)", (int)sizeof(statsdir) - 1); } else { l_fp now; int add_dir_sep; int value_l = strlen(value); /* Add a DIR_SEP unless we already have one. */ if (value_l == 0) add_dir_sep = 0; else add_dir_sep = (DIR_SEP != value[value_l - 1]); if (add_dir_sep) snprintf(statsdir, sizeof(statsdir), "%s%c", value, DIR_SEP); else snprintf(statsdir, sizeof(statsdir), "%s", value); get_systime(&now); if(peerstats.prefix == &statsdir[0] && peerstats.fp != NULL) { fclose(peerstats.fp); peerstats.fp = NULL; filegen_setup(&peerstats, now.l_ui); } if(loopstats.prefix == &statsdir[0] && loopstats.fp != NULL) { fclose(loopstats.fp); loopstats.fp = NULL; filegen_setup(&loopstats, now.l_ui); } if(clockstats.prefix == &statsdir[0] && clockstats.fp != NULL) { fclose(clockstats.fp); clockstats.fp = NULL; filegen_setup(&clockstats, now.l_ui); } if(rawstats.prefix == &statsdir[0] && rawstats.fp != NULL) { fclose(rawstats.fp); rawstats.fp = NULL; filegen_setup(&rawstats, now.l_ui); } if(sysstats.prefix == &statsdir[0] && sysstats.fp != NULL) { fclose(sysstats.fp); sysstats.fp = NULL; filegen_setup(&sysstats, now.l_ui); } if(protostats.prefix == &statsdir[0] && protostats.fp != NULL) { fclose(protostats.fp); protostats.fp = NULL; filegen_setup(&protostats, now.l_ui); } #ifdef OPENSSL if(cryptostats.prefix == &statsdir[0] && cryptostats.fp != NULL) { fclose(cryptostats.fp); cryptostats.fp = NULL; filegen_setup(&cryptostats, now.l_ui); } #endif /* OPENSSL */ #ifdef DEBUG_TIMING if(timingstats.prefix == &statsdir[0] && timingstats.fp != NULL) { fclose(timingstats.fp); timingstats.fp = NULL; filegen_setup(&timingstats, now.l_ui); } #endif /* DEBUG_TIMING */ } break; /* * Open pid file. */ case STATS_PID_FILE: if ((fp = fopen(value, "w")) == NULL) { msyslog(LOG_ERR, "pid file %s: %m", value); break; } fprintf(fp, "%d", (int)getpid()); fclose(fp);; break; /* * Read leapseconds file. */ case STATS_LEAP_FILE: if ((fp = fopen(value, "r")) == NULL) { msyslog(LOG_ERR, "leapseconds file %s: %m", value); break; } if (leap_file(fp) < 0) { msyslog(LOG_ERR, "format error leapseconds file %s", value); } else { strcpy(str1, fstostr(leap_sec)); strcpy(str2, fstostr(leap_expire)); snprintf(tbuf, sizeof(tbuf), "%d leap %s expire %s", leap_tai, str1, str2); report_event(EVNT_TAI, NULL, tbuf); } fclose(fp); break; default: /* oh well */ break; } }
/* * timer - event timer */ void timer(void) { register struct peer *peer, *next_peer; u_int n; long delta; /* * The basic timerevent is one second. This is used to adjust * the system clock in time and frequency, implement the * kiss-o'-deatch function and implement the association * polling function.. */ current_time += nap_time; get_systime(&sys_time); nap_time = (u_long)-1; if (do_adjtime) { if (adjust_timer <= current_time) { adjust_timer += 1; adj_host_clock(); #ifdef REFCLOCK for (n = 0; n < NTP_HASH_SIZE; n++) { for (peer = peer_hash[n]; peer != 0; peer = next_peer) { next_peer = peer->next; if (peer->flags & FLAG_REFCLOCK) refclock_timer(peer); } } #endif /* REFCLOCK */ } nap_time = 1; } if (awake_timer) { if (awake_timer <= current_time) { for (n = 0; n < NTP_HASH_SIZE; n++) { for (peer = peer_hash[n]; peer != 0; peer = peer->next) { peer->burst = NSTAGE; peer->nextdate = current_time; peer->throttle = 0; } } allow_panic = TRUE; /* Allow for large time offsets */ init_loopfilter(); state = EVNT_NSET; awake_timer = 0; } else { delta = awake_timer - current_time; if (delta < nap_time) { nap_time = delta; } } } /* * Now dispatch any peers whose event timer has expired. Be * careful here, since the peer structure might go away as the * result of the call. */ for (n = 0; n < NTP_HASH_SIZE; n++) { for (peer = peer_hash[n]; peer != 0; peer = next_peer) { next_peer = peer->next; if (peer->action && peer->nextaction <= current_time) peer->action(peer); /* * Restrain the non-burst packet rate not more * than one packet every 16 seconds. This is * usually tripped using iburst and minpoll of * 128 s or less. */ if (peer->throttle > 0) peer->throttle--; if (peer->nextdate <= current_time) { #ifdef REFCLOCK if (peer->flags & FLAG_REFCLOCK) refclock_transmit(peer); else transmit(peer); #else /* REFCLOCK */ transmit(peer); #endif /* REFCLOCK */ } if (peer->action && peer->nextaction >= current_time) { delta = peer->nextaction - current_time; if (delta < nap_time) { nap_time = delta; } } else if (peer->nextdate >= current_time) { delta = peer->nextdate - current_time; if (delta < nap_time) { nap_time = delta; } } } } /* * Orphan mode is active when enabled and when no servers less * than the orphan statum are available. A server with no other * synchronization source is an orphan It shows offset zero and * reference ID the loopback address. */ if (sys_orphan < STRATUM_UNSPEC && sys_peer == NULL) { if (sys_leap == LEAP_NOTINSYNC) { sys_leap = LEAP_NOWARNING; #ifdef OPENSSL if (crypto_flags) crypto_update(); #endif /* OPENSSL */ } sys_stratum = (u_char)sys_orphan; if (sys_stratum > 1) sys_refid = htonl(LOOPBACKADR); else memcpy(&sys_refid, "LOOP", 4); sys_offset = 0; sys_rootdelay = 0; sys_rootdisp = 0; } /* * Leapseconds. If a leap is pending, decrement the time * remaining. If less than one day remains, set the leap bits. * When no time remains, clear the leap bits and increment the * TAI. If kernel suppport is not available, do the leap * crudely. Note a leap cannot be pending unless the clock is * set. */ if (leapsec > 0) { leapsec--; if (leapsec == 0) { sys_leap = LEAP_NOWARNING; sys_tai = leap_tai; #ifdef KERNEL_PLL if (!(pll_control && kern_enable)) step_systime(-1.0); #else /* KERNEL_PLL */ #ifndef SYS_WINNT /* WinNT port has its own leap second handling */ step_systime(-1.0); #endif /* SYS_WINNT */ #endif /* KERNEL_PLL */ report_event(EVNT_LEAP, NULL, NULL); } else { if (leapsec < DAY) sys_leap = LEAP_ADDSECOND; if (leap_tai > 0) sys_tai = leap_tai - 1; } } /* * Update huff-n'-puff filter. */ if (huffpuff_timer <= current_time) { huffpuff_timer += HUFFPUFF; huffpuff(); } if (huffpuff_timer >= current_time) { delta = huffpuff_timer - current_time; if (delta < nap_time) { nap_time = delta; } } #ifdef OPENSSL /* * Garbage collect expired keys. */ if (keys_timer <= current_time) { keys_timer += 1 << sys_automax; auth_agekeys(); } if (keys_timer >= current_time) { delta = keys_timer - current_time; if (delta < nap_time) { nap_time = delta; } } /* * Garbage collect key list and generate new private value. The * timer runs only after initial synchronization and fires about * once per day. */ if (revoke_timer <= current_time && sys_leap != LEAP_NOTINSYNC) { revoke_timer += 1 << sys_revoke; RAND_bytes((u_char *)&sys_private, 4); } if (revoke_timer >= current_time) { delta = revoke_timer - current_time; if (delta < nap_time) { nap_time = delta; } } #endif /* OPENSSL */ /* * Interface update timer */ if (interface_interval && interface_timer <= current_time) { timer_interfacetimeout(current_time + interface_interval); DPRINTF(2, ("timer: interface update\n")); interface_update(NULL, NULL); } if (interface_interval && interface_timer >= current_time) { delta = interface_timer - current_time; if (delta < nap_time) { nap_time = delta; } } if (dns_timer && (dns_timer <= current_time)) { dns_timer = update_dns_peers(); } if (dns_timer >= current_time) { delta = dns_timer - current_time; if (delta < nap_time) { nap_time = delta; } } /* * Finally, write hourly stats. */ if (stats_timer <= current_time) { stats_timer += HOUR; write_stats(); if (sys_tai != 0 && sys_time.l_ui > leap_expire) report_event(EVNT_LEAPVAL, NULL, NULL); } else if (!do_adjtime && drift_file_sw) { write_stats(); /* update more frequently for pacemaker */ } if (stats_timer >= current_time) { delta = stats_timer - current_time; if (delta < nap_time) { nap_time = delta; } } if (nap_time == 0) { nap_time = 1; } if (debug) { msyslog(LOG_INFO, "%s: current_time: %ld, nap_time: %ld", __FUNCTION__, current_time, nap_time); } itimer.it_interval.tv_sec = itimer.it_value.tv_sec = nap_time; setitimer(ITIMER_REAL, &itimer, (struct itimerval *)0); }
int ntpdmain( int argc, char *argv[] ) { l_fp now; struct recvbuf *rbuf; const char * logfilename; # ifdef HAVE_UMASK mode_t uv; # endif # if defined(HAVE_GETUID) && !defined(MPE) /* MPE lacks the concept of root */ uid_t uid; # endif # if defined(HAVE_WORKING_FORK) long wait_sync = 0; int pipe_fds[2]; int rc; int exit_code; # ifdef _AIX struct sigaction sa; # endif # if !defined(HAVE_SETSID) && !defined (HAVE_SETPGID) && defined(TIOCNOTTY) int fid; # endif # endif /* HAVE_WORKING_FORK*/ # ifdef SCO5_CLOCK int fd; int zero; # endif # ifdef NEED_PTHREAD_WARMUP my_pthread_warmup(); # endif # ifdef HAVE_UMASK uv = umask(0); if (uv) umask(uv); else umask(022); # endif saved_argc = argc; saved_argv = argv; progname = argv[0]; initializing = TRUE; /* mark that we are initializing */ parse_cmdline_opts(&argc, &argv); # ifdef DEBUG debug = OPT_VALUE_SET_DEBUG_LEVEL; # ifdef HAVE_SETLINEBUF setlinebuf(stdout); # endif # endif if (HAVE_OPT(NOFORK) || HAVE_OPT(QUIT) # ifdef DEBUG || debug # endif || HAVE_OPT(SAVECONFIGQUIT)) nofork = TRUE; init_logging(progname, NLOG_SYNCMASK, TRUE); /* honor -l/--logfile option to log to a file */ if (HAVE_OPT(LOGFILE)) { logfilename = OPT_ARG(LOGFILE); syslogit = FALSE; change_logfile(logfilename, FALSE); } else { logfilename = NULL; if (nofork) msyslog_term = TRUE; if (HAVE_OPT(SAVECONFIGQUIT)) syslogit = FALSE; } msyslog(LOG_NOTICE, "%s: Starting", Version); { int i; char buf[1024]; /* Secret knowledge of msyslog buf length */ char *cp = buf; /* Note that every arg has an initial space character */ snprintf(cp, sizeof(buf), "Command line:"); cp += strlen(cp); for (i = 0; i < saved_argc ; ++i) { snprintf(cp, sizeof(buf) - (cp - buf), " %s", saved_argv[i]); cp += strlen(cp); } msyslog(LOG_INFO, "%s", buf); } /* * Install trap handlers to log errors and assertion failures. * Default handlers print to stderr which doesn't work if detached. */ isc_assertion_setcallback(assertion_failed); isc_error_setfatal(library_fatal_error); isc_error_setunexpected(library_unexpected_error); /* MPE lacks the concept of root */ # if defined(HAVE_GETUID) && !defined(MPE) uid = getuid(); if (uid && !HAVE_OPT( SAVECONFIGQUIT )) { msyslog_term = TRUE; msyslog(LOG_ERR, "must be run as root, not uid %ld", (long)uid); exit(1); } # endif /* * Enable the Multi-Media Timer for Windows? */ # ifdef SYS_WINNT if (HAVE_OPT( MODIFYMMTIMER )) set_mm_timer(MM_TIMER_HIRES); # endif #ifdef HAVE_DNSREGISTRATION /* * Enable mDNS registrations? */ if (HAVE_OPT( MDNS )) { mdnsreg = TRUE; } #endif /* HAVE_DNSREGISTRATION */ if (HAVE_OPT( NOVIRTUALIPS )) listen_to_virtual_ips = 0; /* * --interface, listen on specified interfaces */ if (HAVE_OPT( INTERFACE )) { int ifacect = STACKCT_OPT( INTERFACE ); const char** ifaces = STACKLST_OPT( INTERFACE ); sockaddr_u addr; while (ifacect-- > 0) { add_nic_rule( is_ip_address(*ifaces, AF_UNSPEC, &addr) ? MATCH_IFADDR : MATCH_IFNAME, *ifaces, -1, ACTION_LISTEN); ifaces++; } } if (HAVE_OPT( NICE )) priority_done = 0; # ifdef HAVE_SCHED_SETSCHEDULER if (HAVE_OPT( PRIORITY )) { config_priority = OPT_VALUE_PRIORITY; config_priority_override = 1; priority_done = 0; } # endif # ifdef HAVE_WORKING_FORK /* make sure the FDs are initialised */ pipe_fds[0] = -1; pipe_fds[1] = -1; do { /* 'loop' once */ if (!HAVE_OPT( WAIT_SYNC )) break; wait_sync = OPT_VALUE_WAIT_SYNC; if (wait_sync <= 0) { wait_sync = 0; break; } /* -w requires a fork() even with debug > 0 */ nofork = FALSE; if (pipe(pipe_fds)) { exit_code = (errno) ? errno : -1; msyslog(LOG_ERR, "Pipe creation failed for --wait-sync: %m"); exit(exit_code); } waitsync_fd_to_close = pipe_fds[1]; } while (0); /* 'loop' once */ # endif /* HAVE_WORKING_FORK */ init_lib(); # ifdef SYS_WINNT /* * Start interpolation thread, must occur before first * get_systime() */ init_winnt_time(); # endif /* * Initialize random generator and public key pair */ get_systime(&now); ntp_srandom((int)(now.l_i * now.l_uf)); /* * Detach us from the terminal. May need an #ifndef GIZMO. */ if (!nofork) { # ifdef HAVE_WORKING_FORK rc = fork(); if (-1 == rc) { exit_code = (errno) ? errno : -1; msyslog(LOG_ERR, "fork: %m"); exit(exit_code); } if (rc > 0) { /* parent */ exit_code = wait_child_sync_if(pipe_fds[0], wait_sync); exit(exit_code); } /* * child/daemon * close all open files excepting waitsync_fd_to_close. * msyslog() unreliable until after init_logging(). */ closelog(); if (syslog_file != NULL) { fclose(syslog_file); syslog_file = NULL; syslogit = TRUE; } close_all_except(waitsync_fd_to_close); INSIST(0 == open("/dev/null", 0) && 1 == dup2(0, 1) \ && 2 == dup2(0, 2)); init_logging(progname, 0, TRUE); /* we lost our logfile (if any) daemonizing */ setup_logfile(logfilename); # ifdef SYS_DOMAINOS { uid_$t puid; status_$t st; proc2_$who_am_i(&puid); proc2_$make_server(&puid, &st); } # endif /* SYS_DOMAINOS */ # ifdef HAVE_SETSID if (setsid() == (pid_t)-1) msyslog(LOG_ERR, "setsid(): %m"); # elif defined(HAVE_SETPGID) if (setpgid(0, 0) == -1) msyslog(LOG_ERR, "setpgid(): %m"); # else /* !HAVE_SETSID && !HAVE_SETPGID follows */ # ifdef TIOCNOTTY fid = open("/dev/tty", 2); if (fid >= 0) { ioctl(fid, (u_long)TIOCNOTTY, NULL); close(fid); } # endif /* TIOCNOTTY */ ntp_setpgrp(0, getpid()); # endif /* !HAVE_SETSID && !HAVE_SETPGID */ # ifdef _AIX /* Don't get killed by low-on-memory signal. */ sa.sa_handler = catch_danger; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; sigaction(SIGDANGER, &sa, NULL); # endif /* _AIX */ # endif /* HAVE_WORKING_FORK */ } # ifdef SCO5_CLOCK /* * SCO OpenServer's system clock offers much more precise timekeeping * on the base CPU than the other CPUs (for multiprocessor systems), * so we must lock to the base CPU. */ fd = open("/dev/at1", O_RDONLY); if (fd >= 0) { zero = 0; if (ioctl(fd, ACPU_LOCK, &zero) < 0) msyslog(LOG_ERR, "cannot lock to base CPU: %m"); close(fd); } # endif /* Setup stack size in preparation for locking pages in memory. */ # if defined(HAVE_MLOCKALL) # ifdef HAVE_SETRLIMIT ntp_rlimit(RLIMIT_STACK, DFLT_RLIMIT_STACK * 4096, 4096, "4k"); # ifdef RLIMIT_MEMLOCK /* * The default RLIMIT_MEMLOCK is very low on Linux systems. * Unless we increase this limit malloc calls are likely to * fail if we drop root privilege. To be useful the value * has to be larger than the largest ntpd resident set size. */ ntp_rlimit(RLIMIT_MEMLOCK, DFLT_RLIMIT_MEMLOCK * 1024 * 1024, 1024 * 1024, "MB"); # endif /* RLIMIT_MEMLOCK */ # endif /* HAVE_SETRLIMIT */ # else /* !HAVE_MLOCKALL follows */ # ifdef HAVE_PLOCK # ifdef PROCLOCK # ifdef _AIX /* * set the stack limit for AIX for plock(). * see get_aix_stack() for more info. */ if (ulimit(SET_STACKLIM, (get_aix_stack() - 8 * 4096)) < 0) msyslog(LOG_ERR, "Cannot adjust stack limit for plock: %m"); # endif /* _AIX */ # endif /* PROCLOCK */ # endif /* HAVE_PLOCK */ # endif /* !HAVE_MLOCKALL */ /* * Set up signals we pay attention to locally. */ # ifdef SIGDIE1 signal_no_reset(SIGDIE1, finish); signal_no_reset(SIGDIE2, finish); signal_no_reset(SIGDIE3, finish); signal_no_reset(SIGDIE4, finish); # endif # ifdef SIGBUS signal_no_reset(SIGBUS, finish); # endif # if !defined(SYS_WINNT) && !defined(VMS) # ifdef DEBUG (void) signal_no_reset(MOREDEBUGSIG, moredebug); (void) signal_no_reset(LESSDEBUGSIG, lessdebug); # else (void) signal_no_reset(MOREDEBUGSIG, no_debug); (void) signal_no_reset(LESSDEBUGSIG, no_debug); # endif /* DEBUG */ # endif /* !SYS_WINNT && !VMS */ /* * Set up signals we should never pay attention to. */ # ifdef SIGPIPE signal_no_reset(SIGPIPE, SIG_IGN); # endif /* * Call the init_ routines to initialize the data structures. * * Exactly what command-line options are we expecting here? */ INIT_SSL(); init_auth(); init_util(); init_restrict(); init_mon(); init_timer(); init_request(); init_control(); init_peer(); # ifdef REFCLOCK init_refclock(); # endif set_process_priority(); init_proto(); /* Call at high priority */ init_io(); init_loopfilter(); mon_start(MON_ON); /* monitor on by default now */ /* turn off in config if unwanted */ /* * Get the configuration. This is done in a separate module * since this will definitely be different for the gizmo board. */ getconfig(argc, argv); if (-1 == cur_memlock) { # if defined(HAVE_MLOCKALL) /* * lock the process into memory */ if ( !HAVE_OPT(SAVECONFIGQUIT) # ifdef RLIMIT_MEMLOCK && -1 != DFLT_RLIMIT_MEMLOCK # endif && 0 != mlockall(MCL_CURRENT|MCL_FUTURE)) msyslog(LOG_ERR, "mlockall(): %m"); # else /* !HAVE_MLOCKALL follows */ # ifdef HAVE_PLOCK # ifdef PROCLOCK /* * lock the process into memory */ if (!HAVE_OPT(SAVECONFIGQUIT) && 0 != plock(PROCLOCK)) msyslog(LOG_ERR, "plock(PROCLOCK): %m"); # else /* !PROCLOCK follows */ # ifdef TXTLOCK /* * Lock text into ram */ if (!HAVE_OPT(SAVECONFIGQUIT) && 0 != plock(TXTLOCK)) msyslog(LOG_ERR, "plock(TXTLOCK) error: %m"); # else /* !TXTLOCK follows */ msyslog(LOG_ERR, "plock() - don't know what to lock!"); # endif /* !TXTLOCK */ # endif /* !PROCLOCK */ # endif /* HAVE_PLOCK */ # endif /* !HAVE_MLOCKALL */ } loop_config(LOOP_DRIFTINIT, 0); report_event(EVNT_SYSRESTART, NULL, NULL); initializing = FALSE; # ifdef HAVE_DROPROOT if (droproot) { /* Drop super-user privileges and chroot now if the OS supports this */ # ifdef HAVE_LINUX_CAPABILITIES /* set flag: keep privileges accross setuid() call (we only really need cap_sys_time): */ if (prctl( PR_SET_KEEPCAPS, 1L, 0L, 0L, 0L ) == -1) { msyslog( LOG_ERR, "prctl( PR_SET_KEEPCAPS, 1L ) failed: %m" ); exit(-1); } # elif HAVE_SOLARIS_PRIVS /* Nothing to do here */ # else /* we need a user to switch to */ if (user == NULL) { msyslog(LOG_ERR, "Need user name to drop root privileges (see -u flag!)" ); exit(-1); } # endif /* HAVE_LINUX_CAPABILITIES || HAVE_SOLARIS_PRIVS */ if (user != NULL) { if (isdigit((unsigned char)*user)) { sw_uid = (uid_t)strtoul(user, &endp, 0); if (*endp != '\0') goto getuser; if ((pw = getpwuid(sw_uid)) != NULL) { free(user); user = estrdup(pw->pw_name); sw_gid = pw->pw_gid; } else { errno = 0; msyslog(LOG_ERR, "Cannot find user ID %s", user); exit (-1); } } else { getuser: errno = 0; if ((pw = getpwnam(user)) != NULL) { sw_uid = pw->pw_uid; sw_gid = pw->pw_gid; } else { if (errno) msyslog(LOG_ERR, "getpwnam(%s) failed: %m", user); else msyslog(LOG_ERR, "Cannot find user `%s'", user); exit (-1); } } } if (group != NULL) { if (isdigit((unsigned char)*group)) { sw_gid = (gid_t)strtoul(group, &endp, 0); if (*endp != '\0') goto getgroup; } else { getgroup: if ((gr = getgrnam(group)) != NULL) { sw_gid = gr->gr_gid; } else { errno = 0; msyslog(LOG_ERR, "Cannot find group `%s'", group); exit (-1); } } } if (chrootdir ) { /* make sure cwd is inside the jail: */ if (chdir(chrootdir)) { msyslog(LOG_ERR, "Cannot chdir() to `%s': %m", chrootdir); exit (-1); } if (chroot(chrootdir)) { msyslog(LOG_ERR, "Cannot chroot() to `%s': %m", chrootdir); exit (-1); } if (chdir("/")) { msyslog(LOG_ERR, "Cannot chdir() to`root after chroot(): %m"); exit (-1); } } # ifdef HAVE_SOLARIS_PRIVS if ((lowprivs = priv_str_to_set(LOWPRIVS, ",", NULL)) == NULL) { msyslog(LOG_ERR, "priv_str_to_set() failed:%m"); exit(-1); } if ((highprivs = priv_allocset()) == NULL) { msyslog(LOG_ERR, "priv_allocset() failed:%m"); exit(-1); } (void) getppriv(PRIV_PERMITTED, highprivs); (void) priv_intersect(highprivs, lowprivs); if (setppriv(PRIV_SET, PRIV_PERMITTED, lowprivs) == -1) { msyslog(LOG_ERR, "setppriv() failed:%m"); exit(-1); } # endif /* HAVE_SOLARIS_PRIVS */ if (user && initgroups(user, sw_gid)) { msyslog(LOG_ERR, "Cannot initgroups() to user `%s': %m", user); exit (-1); } if (group && setgid(sw_gid)) { msyslog(LOG_ERR, "Cannot setgid() to group `%s': %m", group); exit (-1); } if (group && setegid(sw_gid)) { msyslog(LOG_ERR, "Cannot setegid() to group `%s': %m", group); exit (-1); } if (group) { if (0 != setgroups(1, &sw_gid)) { msyslog(LOG_ERR, "setgroups(1, %d) failed: %m", sw_gid); exit (-1); } } else if (pw) if (0 != initgroups(pw->pw_name, pw->pw_gid)) { msyslog(LOG_ERR, "initgroups(<%s>, %d) filed: %m", pw->pw_name, pw->pw_gid); exit (-1); } if (user && setuid(sw_uid)) { msyslog(LOG_ERR, "Cannot setuid() to user `%s': %m", user); exit (-1); } if (user && seteuid(sw_uid)) { msyslog(LOG_ERR, "Cannot seteuid() to user `%s': %m", user); exit (-1); } # if !defined(HAVE_LINUX_CAPABILITIES) && !defined(HAVE_SOLARIS_PRIVS) /* * for now assume that the privilege to bind to privileged ports * is associated with running with uid 0 - should be refined on * ports that allow binding to NTP_PORT with uid != 0 */ disable_dynamic_updates |= (sw_uid != 0); /* also notifies routing message listener */ # endif /* !HAVE_LINUX_CAPABILITIES && !HAVE_SOLARIS_PRIVS */ if (disable_dynamic_updates && interface_interval) { interface_interval = 0; msyslog(LOG_INFO, "running as non-root disables dynamic interface tracking"); } # ifdef HAVE_LINUX_CAPABILITIES { /* * We may be running under non-root uid now, but we still hold full root privileges! * We drop all of them, except for the crucial one or two: cap_sys_time and * cap_net_bind_service if doing dynamic interface tracking. */ cap_t caps; char *captext; captext = (0 != interface_interval) ? "cap_sys_time,cap_net_bind_service=pe" : "cap_sys_time=pe"; caps = cap_from_text(captext); if (!caps) { msyslog(LOG_ERR, "cap_from_text(%s) failed: %m", captext); exit(-1); } if (-1 == cap_set_proc(caps)) { msyslog(LOG_ERR, "cap_set_proc() failed to drop root privs: %m"); exit(-1); } cap_free(caps); } # endif /* HAVE_LINUX_CAPABILITIES */ # ifdef HAVE_SOLARIS_PRIVS if (priv_delset(lowprivs, "proc_setid") == -1) { msyslog(LOG_ERR, "priv_delset() failed:%m"); exit(-1); } if (setppriv(PRIV_SET, PRIV_PERMITTED, lowprivs) == -1) { msyslog(LOG_ERR, "setppriv() failed:%m"); exit(-1); } priv_freeset(lowprivs); priv_freeset(highprivs); # endif /* HAVE_SOLARIS_PRIVS */ root_dropped = TRUE; fork_deferred_worker(); } /* if (droproot) */ # endif /* HAVE_DROPROOT */ /* libssecomp sandboxing */ #if defined (LIBSECCOMP) && (KERN_SECCOMP) scmp_filter_ctx ctx; if ((ctx = seccomp_init(SCMP_ACT_KILL)) < 0) msyslog(LOG_ERR, "%s: seccomp_init(SCMP_ACT_KILL) failed: %m", __func__); else { msyslog(LOG_DEBUG, "%s: seccomp_init(SCMP_ACT_KILL) succeeded", __func__); } #ifdef __x86_64__ int scmp_sc[] = { SCMP_SYS(adjtimex), SCMP_SYS(bind), SCMP_SYS(brk), SCMP_SYS(chdir), SCMP_SYS(clock_gettime), SCMP_SYS(clock_settime), SCMP_SYS(close), SCMP_SYS(connect), SCMP_SYS(exit_group), SCMP_SYS(fstat), SCMP_SYS(fsync), SCMP_SYS(futex), SCMP_SYS(getitimer), SCMP_SYS(getsockname), SCMP_SYS(ioctl), SCMP_SYS(lseek), SCMP_SYS(madvise), SCMP_SYS(mmap), SCMP_SYS(munmap), SCMP_SYS(open), SCMP_SYS(poll), SCMP_SYS(read), SCMP_SYS(recvmsg), SCMP_SYS(rename), SCMP_SYS(rt_sigaction), SCMP_SYS(rt_sigprocmask), SCMP_SYS(rt_sigreturn), SCMP_SYS(select), SCMP_SYS(sendto), SCMP_SYS(setitimer), SCMP_SYS(setsid), SCMP_SYS(socket), SCMP_SYS(stat), SCMP_SYS(time), SCMP_SYS(write), }; #endif #ifdef __i386__ int scmp_sc[] = { SCMP_SYS(_newselect), SCMP_SYS(adjtimex), SCMP_SYS(brk), SCMP_SYS(chdir), SCMP_SYS(clock_gettime), SCMP_SYS(clock_settime), SCMP_SYS(close), SCMP_SYS(exit_group), SCMP_SYS(fsync), SCMP_SYS(futex), SCMP_SYS(getitimer), SCMP_SYS(madvise), SCMP_SYS(mmap), SCMP_SYS(mmap2), SCMP_SYS(munmap), SCMP_SYS(open), SCMP_SYS(poll), SCMP_SYS(read), SCMP_SYS(rename), SCMP_SYS(rt_sigaction), SCMP_SYS(rt_sigprocmask), SCMP_SYS(select), SCMP_SYS(setitimer), SCMP_SYS(setsid), SCMP_SYS(sigprocmask), SCMP_SYS(sigreturn), SCMP_SYS(socketcall), SCMP_SYS(stat64), SCMP_SYS(time), SCMP_SYS(write), }; #endif { int i; for (i = 0; i < COUNTOF(scmp_sc); i++) { if (seccomp_rule_add(ctx, SCMP_ACT_ALLOW, scmp_sc[i], 0) < 0) { msyslog(LOG_ERR, "%s: seccomp_rule_add() failed: %m", __func__); } } } if (seccomp_load(ctx) < 0) msyslog(LOG_ERR, "%s: seccomp_load() failed: %m", __func__); else { msyslog(LOG_DEBUG, "%s: seccomp_load() succeeded", __func__); } #endif /* LIBSECCOMP and KERN_SECCOMP */ # ifdef HAVE_IO_COMPLETION_PORT for (;;) { GetReceivedBuffers(); # else /* normal I/O */ BLOCK_IO_AND_ALARM(); was_alarmed = FALSE; for (;;) { if (alarm_flag) { /* alarmed? */ was_alarmed = TRUE; alarm_flag = FALSE; } if (!was_alarmed && !has_full_recv_buffer()) { /* * Nothing to do. Wait for something. */ io_handler(); } if (alarm_flag) { /* alarmed? */ was_alarmed = TRUE; alarm_flag = FALSE; } if (was_alarmed) { UNBLOCK_IO_AND_ALARM(); /* * Out here, signals are unblocked. Call timer routine * to process expiry. */ timer(); was_alarmed = FALSE; BLOCK_IO_AND_ALARM(); } # endif /* !HAVE_IO_COMPLETION_PORT */ # ifdef DEBUG_TIMING { l_fp pts; l_fp tsa, tsb; int bufcount = 0; get_systime(&pts); tsa = pts; # endif rbuf = get_full_recv_buffer(); while (rbuf != NULL) { if (alarm_flag) { was_alarmed = TRUE; alarm_flag = FALSE; } UNBLOCK_IO_AND_ALARM(); if (was_alarmed) { /* avoid timer starvation during lengthy I/O handling */ timer(); was_alarmed = FALSE; } /* * Call the data procedure to handle each received * packet. */ if (rbuf->receiver != NULL) { # ifdef DEBUG_TIMING l_fp dts = pts; L_SUB(&dts, &rbuf->recv_time); DPRINTF(2, ("processing timestamp delta %s (with prec. fuzz)\n", lfptoa(&dts, 9))); collect_timing(rbuf, "buffer processing delay", 1, &dts); bufcount++; # endif (*rbuf->receiver)(rbuf); } else { msyslog(LOG_ERR, "fatal: receive buffer callback NULL"); abort(); } BLOCK_IO_AND_ALARM(); freerecvbuf(rbuf); rbuf = get_full_recv_buffer(); } # ifdef DEBUG_TIMING get_systime(&tsb); L_SUB(&tsb, &tsa); if (bufcount) { collect_timing(NULL, "processing", bufcount, &tsb); DPRINTF(2, ("processing time for %d buffers %s\n", bufcount, lfptoa(&tsb, 9))); } } # endif /* * Go around again */ # ifdef HAVE_DNSREGISTRATION if (mdnsreg && (current_time - mdnsreg ) > 60 && mdnstries && sys_leap != LEAP_NOTINSYNC) { mdnsreg = current_time; msyslog(LOG_INFO, "Attempting to register mDNS"); if ( DNSServiceRegister (&mdns, 0, 0, NULL, "_ntp._udp", NULL, NULL, htons(NTP_PORT), 0, NULL, NULL, NULL) != kDNSServiceErr_NoError ) { if (!--mdnstries) { msyslog(LOG_ERR, "Unable to register mDNS, giving up."); } else { msyslog(LOG_INFO, "Unable to register mDNS, will try later."); } } else { msyslog(LOG_INFO, "mDNS service registered."); mdnsreg = FALSE; } } # endif /* HAVE_DNSREGISTRATION */ } UNBLOCK_IO_AND_ALARM(); return 1; } #endif /* !SIM */ #if !defined(SIM) && defined(SIGDIE1) /* * finish - exit gracefully */ static RETSIGTYPE finish( int sig ) { const char *sig_desc; sig_desc = NULL; #ifdef HAVE_STRSIGNAL sig_desc = strsignal(sig); #endif if (sig_desc == NULL) sig_desc = ""; msyslog(LOG_NOTICE, "%s exiting on signal %d (%s)", progname, sig, sig_desc); /* See Bug 2513 and Bug 2522 re the unlink of PIDFILE */ # ifdef HAVE_DNSREGISTRATION if (mdns != NULL) DNSServiceRefDeallocate(mdns); # endif peer_cleanup(); exit(0); } #endif /* !SIM && SIGDIE1 */ #ifndef SIM /* * wait_child_sync_if - implements parent side of -w/--wait-sync */ # ifdef HAVE_WORKING_FORK static int wait_child_sync_if( int pipe_read_fd, long wait_sync ) { int rc; int exit_code; time_t wait_end_time; time_t cur_time; time_t wait_rem; fd_set readset; struct timeval wtimeout; if (0 == wait_sync) return 0; /* waitsync_fd_to_close used solely by child */ close(waitsync_fd_to_close); wait_end_time = time(NULL) + wait_sync; do { cur_time = time(NULL); wait_rem = (wait_end_time > cur_time) ? (wait_end_time - cur_time) : 0; wtimeout.tv_sec = wait_rem; wtimeout.tv_usec = 0; FD_ZERO(&readset); FD_SET(pipe_read_fd, &readset); rc = select(pipe_read_fd + 1, &readset, NULL, NULL, &wtimeout); if (-1 == rc) { if (EINTR == errno) continue; exit_code = (errno) ? errno : -1; msyslog(LOG_ERR, "--wait-sync select failed: %m"); return exit_code; } if (0 == rc) { /* * select() indicated a timeout, but in case * its timeouts are affected by a step of the * system clock, select() again with a zero * timeout to confirm. */ FD_ZERO(&readset); FD_SET(pipe_read_fd, &readset); wtimeout.tv_sec = 0; wtimeout.tv_usec = 0; rc = select(pipe_read_fd + 1, &readset, NULL, NULL, &wtimeout); if (0 == rc) /* select() timeout */ break; else /* readable */ return 0; } else /* readable */ return 0; } while (wait_rem > 0); fprintf(stderr, "%s: -w/--wait-sync %ld timed out.\n", progname, wait_sync); return ETIMEDOUT; }
/* * local_clock - the NTP logical clock loop filter. * * Return codes: * -1 update ignored: exceeds panic threshold * 0 update ignored: popcorn or exceeds step threshold * 1 clock was slewed * 2 clock was stepped * * LOCKCLOCK: The only thing this routine does is set the * sys_rootdisp variable equal to the peer dispersion. */ int local_clock( struct peer *peer, /* synch source peer structure */ double fp_offset /* clock offset (s) */ ) { int rval; /* return code */ int osys_poll; /* old system poll */ int ntp_adj_ret; /* returned by ntp_adjtime */ double mu; /* interval since last update */ double clock_frequency; /* clock frequency */ double dtemp, etemp; /* double temps */ char tbuf[80]; /* report buffer */ /* * If the loop is opened or the NIST LOCKCLOCK is in use, * monitor and record the offsets anyway in order to determine * the open-loop response and then go home. */ #ifdef LOCKCLOCK { #else if (!ntp_enable) { #endif /* LOCKCLOCK */ record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); return (0); } #ifndef LOCKCLOCK /* * If the clock is way off, panic is declared. The clock_panic * defaults to 1000 s; if set to zero, the panic will never * occur. The allow_panic defaults to FALSE, so the first panic * will exit. It can be set TRUE by a command line option, in * which case the clock will be set anyway and time marches on. * But, allow_panic will be set FALSE when the update is less * than the step threshold; so, subsequent panics will exit. */ if (fabs(fp_offset) > clock_panic && clock_panic > 0 && !allow_panic) { snprintf(tbuf, sizeof(tbuf), "%+.0f s; set clock manually within %.0f s.", fp_offset, clock_panic); report_event(EVNT_SYSFAULT, NULL, tbuf); return (-1); } /* * This section simulates ntpdate. If the offset exceeds the * step threshold (128 ms), step the clock to that time and * exit. Otherwise, slew the clock to that time and exit. Note * that the slew will persist and eventually complete beyond the * life of this program. Note that while ntpdate is active, the * terminal does not detach, so the termination message prints * directly to the terminal. */ if (mode_ntpdate) { if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0)) { step_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time set %+.6f s", fp_offset); printf("ntpd: time set %+.6fs\n", fp_offset); } else { adj_systime(fp_offset); msyslog(LOG_NOTICE, "ntpd: time slew %+.6f s", fp_offset); printf("ntpd: time slew %+.6fs\n", fp_offset); } record_loop_stats(fp_offset, drift_comp, clock_jitter, clock_stability, sys_poll); exit (0); } /* * The huff-n'-puff filter finds the lowest delay in the recent * interval. This is used to correct the offset by one-half the * difference between the sample delay and minimum delay. This * is most effective if the delays are highly assymetric and * clockhopping is avoided and the clock frequency wander is * relatively small. */ if (sys_huffpuff != NULL) { if (peer->delay < sys_huffpuff[sys_huffptr]) sys_huffpuff[sys_huffptr] = peer->delay; if (peer->delay < sys_mindly) sys_mindly = peer->delay; if (fp_offset > 0) dtemp = -(peer->delay - sys_mindly) / 2; else dtemp = (peer->delay - sys_mindly) / 2; fp_offset += dtemp; #ifdef DEBUG if (debug) printf( "local_clock: size %d mindly %.6f huffpuff %.6f\n", sys_hufflen, sys_mindly, dtemp); #endif } /* * Clock state machine transition function which defines how the * system reacts to large phase and frequency excursion. There * are two main regimes: when the offset exceeds the step * threshold (128 ms) and when it does not. Under certain * conditions updates are suspended until the stepout theshold * (900 s) is exceeded. See the documentation on how these * thresholds interact with commands and command line options. * * Note the kernel is disabled if step is disabled or greater * than 0.5 s or in ntpdate mode. */ osys_poll = sys_poll; if (sys_poll < peer->minpoll) sys_poll = peer->minpoll; if (sys_poll > peer->maxpoll) sys_poll = peer->maxpoll; mu = current_time - clock_epoch; clock_frequency = drift_comp; rval = 1; if ( ( fp_offset > clock_max_fwd && clock_max_fwd > 0) || (-fp_offset > clock_max_back && clock_max_back > 0) || force_step_once ) { if (force_step_once) { force_step_once = FALSE; /* we want this only once after startup */ msyslog(LOG_NOTICE, "Doing intital time step" ); } switch (state) { /* * In SYNC state we ignore the first outlyer and switch * to SPIK state. */ case EVNT_SYNC: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_SPIK, NULL, tbuf); state = EVNT_SPIK; return (0); /* * In FREQ state we ignore outlyers and inlyers. At the * first outlyer after the stepout threshold, compute * the apparent frequency correction and step the phase. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through to EVNT_SPIK */ /* * In SPIK state we ignore succeeding outlyers until * either an inlyer is found or the stepout threshold is * exceeded. */ case EVNT_SPIK: if (mu < clock_minstep) return (0); /* fall through to default */ /* * We get here by default in NSET and FSET states and * from above in FREQ or SPIK states. * * In NSET state an initial frequency correction is not * available, usually because the frequency file has not * yet been written. Since the time is outside the step * threshold, the clock is stepped. The frequency will * be set directly following the stepout interval. * * In FSET state the initial frequency has been set from * the frequency file. Since the time is outside the * step threshold, the clock is stepped immediately, * rather than after the stepout interval. Guys get * nervous if it takes 15 minutes to set the clock for * the first time. * * In FREQ and SPIK states the stepout threshold has * expired and the phase is still above the step * threshold. Note that a single spike greater than the * step threshold is always suppressed, even with a * long time constant. */ default: snprintf(tbuf, sizeof(tbuf), "%+.6f s", fp_offset); report_event(EVNT_CLOCKRESET, NULL, tbuf); step_systime(fp_offset); reinit_timer(); tc_counter = 0; clock_jitter = LOGTOD(sys_precision); rval = 2; if (state == EVNT_NSET) { rstclock(EVNT_FREQ, 0); return (rval); } break; } rstclock(EVNT_SYNC, 0); } else { /* * The offset is less than the step threshold. Calculate * the jitter as the exponentially weighted offset * differences. */ etemp = SQUARE(clock_jitter); dtemp = SQUARE(max(fabs(fp_offset - last_offset), LOGTOD(sys_precision))); clock_jitter = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); switch (state) { /* * In NSET state this is the first update received and * the frequency has not been initialized. Adjust the * phase, but do not adjust the frequency until after * the stepout threshold. */ case EVNT_NSET: adj_systime(fp_offset); rstclock(EVNT_FREQ, fp_offset); break; /* * In FREQ state ignore updates until the stepout * threshold. After that, compute the new frequency, but * do not adjust the frequency until the holdoff counter * decrements to zero. */ case EVNT_FREQ: if (mu < clock_minstep) return (0); clock_frequency = direct_freq(fp_offset); /* fall through */ /* * We get here by default in FSET, SPIK and SYNC states. * Here compute the frequency update due to PLL and FLL * contributions. Note, we avoid frequency discipline at * startup until the initial transient has subsided. */ default: allow_panic = FALSE; if (freq_cnt == 0) { /* * The FLL and PLL frequency gain constants * depend on the time constant and Allan * intercept. The PLL is always used, but * becomes ineffective above the Allan intercept * where the FLL becomes effective. */ if (sys_poll >= allan_xpt) clock_frequency += (fp_offset - clock_offset) / max(ULOGTOD(sys_poll), mu) * CLOCK_FLL; /* * The PLL frequency gain (numerator) depends on * the minimum of the update interval and Allan * intercept. This reduces the PLL gain when the * FLL becomes effective. */ etemp = min(ULOGTOD(allan_xpt), mu); dtemp = 4 * CLOCK_PLL * ULOGTOD(sys_poll); clock_frequency += fp_offset * etemp / (dtemp * dtemp); } rstclock(EVNT_SYNC, fp_offset); if (fabs(fp_offset) < CLOCK_FLOOR) freq_cnt = 0; break; } } #ifdef KERNEL_PLL /* * This code segment works when clock adjustments are made using * precision time kernel support and the ntp_adjtime() system * call. This support is available in Solaris 2.6 and later, * Digital Unix 4.0 and later, FreeBSD, Linux and specially * modified kernels for HP-UX 9 and Ultrix 4. In the case of the * DECstation 5000/240 and Alpha AXP, additional kernel * modifications provide a true microsecond clock and nanosecond * clock, respectively. * * Important note: The kernel discipline is used only if the * step threshold is less than 0.5 s, as anything higher can * lead to overflow problems. This might occur if some misguided * lad set the step threshold to something ridiculous. */ if (pll_control && kern_enable && freq_cnt == 0) { /* * We initialize the structure for the ntp_adjtime() * system call. We have to convert everything to * microseconds or nanoseconds first. Do not update the * system variables if the ext_enable flag is set. In * this case, the external clock driver will update the * variables, which will be read later by the local * clock driver. Afterwards, remember the time and * frequency offsets for jitter and stability values and * to update the frequency file. */ ZERO(ntv); if (ext_enable) { ntv.modes = MOD_STATUS; } else { #ifdef STA_NANO ntv.modes = MOD_BITS | MOD_NANO; #else /* STA_NANO */ ntv.modes = MOD_BITS; #endif /* STA_NANO */ if (clock_offset < 0) dtemp = -.5; else dtemp = .5; #ifdef STA_NANO ntv.offset = (int32)(clock_offset * 1e9 + dtemp); ntv.constant = sys_poll; #else /* STA_NANO */ ntv.offset = (int32)(clock_offset * 1e6 + dtemp); ntv.constant = sys_poll - 4; #endif /* STA_NANO */ if (ntv.constant < 0) ntv.constant = 0; ntv.esterror = (u_int32)(clock_jitter * 1e6); ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6); ntv.status = STA_PLL; /* * Enable/disable the PPS if requested. */ if (hardpps_enable) { ntv.status |= (STA_PPSTIME | STA_PPSFREQ); if (!(pll_status & STA_PPSTIME)) sync_status("PPS enabled", pll_status, ntv.status); } else { ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ); if (pll_status & STA_PPSTIME) sync_status("PPS disabled", pll_status, ntv.status); } if (sys_leap == LEAP_ADDSECOND) ntv.status |= STA_INS; else if (sys_leap == LEAP_DELSECOND) ntv.status |= STA_DEL; } /* * Pass the stuff to the kernel. If it squeals, turn off * the pps. In any case, fetch the kernel offset, * frequency and jitter. */ ntp_adj_ret = ntp_adjtime(&ntv); /* * A squeal is a return status < 0, or a state change. */ if ((0 > ntp_adj_ret) || (ntp_adj_ret != kernel_status)) { kernel_status = ntp_adj_ret; ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, hardpps_enable, 0, __LINE__ - 1); } pll_status = ntv.status; #ifdef STA_NANO clock_offset = ntv.offset / 1e9; #else /* STA_NANO */ clock_offset = ntv.offset / 1e6; #endif /* STA_NANO */ clock_frequency = FREQTOD(ntv.freq); /* * If the kernel PPS is lit, monitor its performance. */ if (ntv.status & STA_PPSTIME) { #ifdef STA_NANO clock_jitter = ntv.jitter / 1e9; #else /* STA_NANO */ clock_jitter = ntv.jitter / 1e6; #endif /* STA_NANO */ } #if defined(STA_NANO) && NTP_API == 4 /* * If the TAI changes, update the kernel TAI. */ if (loop_tai != sys_tai) { loop_tai = sys_tai; ntv.modes = MOD_TAI; ntv.constant = sys_tai; if ((ntp_adj_ret = ntp_adjtime(&ntv)) != 0) { ntp_adjtime_error_handler(__func__, &ntv, ntp_adj_ret, errno, 0, 1, __LINE__ - 1); } } #endif /* STA_NANO */ } #endif /* KERNEL_PLL */ /* * Clamp the frequency within the tolerance range and calculate * the frequency difference since the last update. */ if (fabs(clock_frequency) > NTP_MAXFREQ) msyslog(LOG_NOTICE, "frequency error %.0f PPM exceeds tolerance %.0f PPM", clock_frequency * 1e6, NTP_MAXFREQ * 1e6); dtemp = SQUARE(clock_frequency - drift_comp); if (clock_frequency > NTP_MAXFREQ) drift_comp = NTP_MAXFREQ; else if (clock_frequency < -NTP_MAXFREQ) drift_comp = -NTP_MAXFREQ; else drift_comp = clock_frequency; /* * Calculate the wander as the exponentially weighted RMS * frequency differences. Record the change for the frequency * file update. */ etemp = SQUARE(clock_stability); clock_stability = SQRT(etemp + (dtemp - etemp) / CLOCK_AVG); /* * Here we adjust the time constant by comparing the current * offset with the clock jitter. If the offset is less than the * clock jitter times a constant, then the averaging interval is * increased, otherwise it is decreased. A bit of hysteresis * helps calm the dance. Works best using burst mode. Don't * fiddle with the poll during the startup clamp period. */ if (freq_cnt > 0) { tc_counter = 0; } else if (fabs(clock_offset) < CLOCK_PGATE * clock_jitter) { tc_counter += sys_poll; if (tc_counter > CLOCK_LIMIT) { tc_counter = CLOCK_LIMIT; if (sys_poll < peer->maxpoll) { tc_counter = 0; sys_poll++; } } } else { tc_counter -= sys_poll << 1; if (tc_counter < -CLOCK_LIMIT) { tc_counter = -CLOCK_LIMIT; if (sys_poll > peer->minpoll) { tc_counter = 0; sys_poll--; } } } /* * If the time constant has changed, update the poll variables. */ if (osys_poll != sys_poll) poll_update(peer, sys_poll); /* * Yibbidy, yibbbidy, yibbidy; that'h all folks. */ record_loop_stats(clock_offset, drift_comp, clock_jitter, clock_stability, sys_poll); #ifdef DEBUG if (debug) printf( "local_clock: offset %.9f jit %.9f freq %.3f stab %.3f poll %d\n", clock_offset, clock_jitter, drift_comp * 1e6, clock_stability * 1e6, sys_poll); #endif /* DEBUG */ return (rval); #endif /* LOCKCLOCK */ } /* * adj_host_clock - Called once every second to update the local clock. * * LOCKCLOCK: The only thing this routine does is increment the * sys_rootdisp variable. */ void adj_host_clock( void ) { double offset_adj; double freq_adj; /* * Update the dispersion since the last update. In contrast to * NTPv3, NTPv4 does not declare unsynchronized after one day, * since the dispersion check serves this function. Also, * since the poll interval can exceed one day, the old test * would be counterproductive. During the startup clamp period, the * time constant is clamped at 2. */ sys_rootdisp += clock_phi; #ifndef LOCKCLOCK if (!ntp_enable || mode_ntpdate) return; /* * Determine the phase adjustment. The gain factor (denominator) * increases with poll interval, so is dominated by the FLL * above the Allan intercept. Note the reduced time constant at * startup. */ if (state != EVNT_SYNC) { offset_adj = 0.; } else if (freq_cnt > 0) { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1)); freq_cnt--; #ifdef KERNEL_PLL } else if (pll_control && kern_enable) { offset_adj = 0.; #endif /* KERNEL_PLL */ } else { offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll)); } /* * If the kernel discipline is enabled the frequency correction * drift_comp has already been engaged via ntp_adjtime() in * set_freq(). Otherwise it is a component of the adj_systime() * offset. */ #ifdef KERNEL_PLL if (pll_control && kern_enable) freq_adj = 0.; else #endif /* KERNEL_PLL */ freq_adj = drift_comp; /* Bound absolute value of total adjustment to NTP_MAXFREQ. */ if (offset_adj + freq_adj > NTP_MAXFREQ) offset_adj = NTP_MAXFREQ - freq_adj; else if (offset_adj + freq_adj < -NTP_MAXFREQ) offset_adj = -NTP_MAXFREQ - freq_adj; clock_offset -= offset_adj; /* * Windows port adj_systime() must be called each second, * even if the argument is zero, to ease emulation of * adjtime() using Windows' slew API which controls the rate * but does not automatically stop slewing when an offset * has decayed to zero. */ adj_systime(offset_adj + freq_adj); #endif /* LOCKCLOCK */ }