Esempio n. 1
0
static void
start_adjust(void)
{
  struct timeval newadj, oldadj;
  struct timeval T1;
  double elapsed, accrued_error;
  double adjust_required;
  struct timeval exact_newadj;
  long delta, tickdelta;
  double rounding_error;
  double old_adjust_remaining;

  /* Determine the amount of error built up since the last adjustment */
  if (gettimeofday(&T1, NULL) < 0) {
    LOG_FATAL(LOGF_SysNetBSD, "gettimeofday() failed");
  }

  UTI_DiffTimevalsToDouble(&elapsed, &T1, &T0);
  accrued_error = elapsed * current_freq;
  
  adjust_required = - (accrued_error + offset_register);

  UTI_DoubleToTimeval(adjust_required, &exact_newadj);

  /* At this point, we need to round the required adjustment the
     same way the kernel does. */

  delta = exact_newadj.tv_sec * 1000000 + exact_newadj.tv_usec;
  if (delta > kern_bigadj || delta < -kern_bigadj)
    tickdelta = 10 * kern_tickadj;
  else
    tickdelta = kern_tickadj;
  if (delta % tickdelta)
	delta = delta / tickdelta * tickdelta;
  newadj.tv_sec = 0;
  newadj.tv_usec = delta;
  UTI_NormaliseTimeval(&newadj);

  /* Add rounding error back onto offset register. */
  UTI_DiffTimevalsToDouble(&rounding_error, &newadj, &exact_newadj);

  if (adjtime(&newadj, &oldadj) < 0) {
    LOG_FATAL(LOGF_SysNetBSD, "adjtime() failed");
  }

  UTI_TimevalToDouble(&oldadj, &old_adjust_remaining);

  offset_register = rounding_error - old_adjust_remaining;

  T0 = T1;
  UTI_TimevalToDouble(&newadj, &adjustment_requested);

}
Esempio n. 2
0
int
MNL_AcceptTimestamp(struct timeval *ts, long *offset_cs, double *dfreq_ppm, double *new_afreq_ppm)
{
  struct timeval now;
  double offset, diff;
  int i;

  if (enabled) {
    LCL_ReadCookedTime(&now, NULL);

    /* Make sure the provided timestamp is sane and the sample
       is not too close to the last one */

    if (!UTI_IsTimeOffsetSane(ts, 0.0))
     return 0;

    if (n_samples) {
      UTI_DiffTimevalsToDouble(&diff, &now, &samples[n_samples - 1].when);
      if (diff < MIN_SAMPLE_SEPARATION)
        return 0;
    }

    UTI_DiffTimevalsToDouble(&offset, &now, ts);

    /* Check if buffer full up */
    if (n_samples == MAX_SAMPLES) {
      /* Shift samples down */
      for (i=1; i<n_samples; i++) {
        samples[i-1] = samples[i];
      }
      --n_samples;
    }
    
    samples[n_samples].when = now;
    samples[n_samples].offset = offset;
    samples[n_samples].orig_offset = offset;
    ++n_samples;

    estimate_and_set_system(&now, 1, offset, offset_cs, dfreq_ppm, new_afreq_ppm);

    return 1;

  } else {
  
    return 0;

  }
}
Esempio n. 3
0
static void
stop_adjust(void)
{
  struct timeval T1;
  struct timeval zeroadj, remadj;
  double adjustment_remaining, adjustment_achieved;
  double elapsed, elapsed_plus_adjust;

  zeroadj.tv_sec = 0;
  zeroadj.tv_usec = 0;

  if (adjtime(&zeroadj, &remadj) < 0) {
    LOG_FATAL(LOGF_SysNetBSD, "adjtime() failed");
  }

  if (gettimeofday(&T1, NULL) < 0) {
    LOG_FATAL(LOGF_SysNetBSD, "gettimeofday() failed");
  }
  
  UTI_DiffTimevalsToDouble(&elapsed, &T1, &T0);
  UTI_TimevalToDouble(&remadj, &adjustment_remaining);

  adjustment_achieved = adjustment_requested - adjustment_remaining;
  elapsed_plus_adjust = elapsed - adjustment_achieved;

  offset_register += current_freq * elapsed_plus_adjust - adjustment_remaining;

  adjustment_requested = 0.0;
  T0 = T1;

}
Esempio n. 4
0
int
SST_IsGoodSample(SST_Stats inst, double offset, double delay,
    double max_delay_dev_ratio, double clock_error, struct timeval *when)
{
  double elapsed, allowed_increase, delay_increase;

  if (inst->n_samples < 3)
    return 1;

  UTI_DiffTimevalsToDouble(&elapsed, when, &inst->offset_time);

  /* Require that the ratio of the increase in delay from the minimum to the
     standard deviation is less than max_delay_dev_ratio. In the allowed
     increase in delay include also skew and clock_error. */
    
  allowed_increase = sqrt(inst->variance) * max_delay_dev_ratio +
    elapsed * (inst->skew + clock_error);
  delay_increase = (delay - SST_MinRoundTripDelay(inst)) / 2.0;

  if (delay_increase < allowed_increase)
    return 1;

  offset -= inst->estimated_offset + elapsed * inst->estimated_frequency;

  /* Before we decide to drop the sample, make sure the difference between
     measured offset and predicted offset is not significantly larger than
     the increase in delay */
  if (fabs(offset) - delay_increase > allowed_increase)
    return 1;

  DEBUG_LOG(LOGF_SourceStats, "Bad sample: offset=%f delay=%f incr_delay=%f allowed=%f",
      offset, delay, allowed_increase, delay_increase);

  return 0;
}
Esempio n. 5
0
void
SST_GetTrackingData(SST_Stats inst, struct timeval *ref_time,
                    double *average_offset, double *offset_sd,
                    double *frequency, double *skew,
                    double *root_delay, double *root_dispersion)
{
  int i, j;
  double elapsed_sample;

  assert(inst->n_samples > 0);

  i = get_runsbuf_index(inst, inst->best_single_sample);
  j = get_buf_index(inst, inst->best_single_sample);

  *ref_time = inst->offset_time;
  *average_offset = inst->estimated_offset;
  *offset_sd = inst->estimated_offset_sd;
  *frequency = inst->estimated_frequency;
  *skew = inst->skew;
  *root_delay = inst->root_delays[j];

  UTI_DiffTimevalsToDouble(&elapsed_sample, &inst->offset_time, &inst->sample_times[i]);
  *root_dispersion = inst->root_dispersions[j] + inst->skew * elapsed_sample;

  DEBUG_LOG(LOGF_SourceStats, "n=%d freq=%f (%.3fppm) skew=%f (%.3fppm) avoff=%f offsd=%f disp=%f",
      inst->n_samples, *frequency, 1.0e6* *frequency, *skew, 1.0e6* *skew, *average_offset, *offset_sd, *root_dispersion);

}
Esempio n. 6
0
void
NSR_HandleBadSource(IPAddr *address)
{
  static struct timeval last_replacement;
  struct timeval now;
  NTP_Remote_Address remote_addr;
  SourceRecord *record;
  int slot, found;
  double diff;

  remote_addr.ip_addr = *address;
  remote_addr.port = 0;

  find_slot(&remote_addr, &slot, &found);
  if (!found)
    return;

  record = get_record(slot);

  /* Only sources with a name can be replaced */
  if (!record->name)
    return;

  /* Don't resolve names too frequently */
  SCH_GetLastEventTime(NULL, NULL, &now);
  UTI_DiffTimevalsToDouble(&diff, &now, &last_replacement);
  if (fabs(diff) < RESOLVE_INTERVAL_UNIT * (1 << MIN_REPLACEMENT_INTERVAL)) {
    DEBUG_LOG(LOGF_NtpSources, "replacement postponed");
    return;
  }
  last_replacement = now;

  resolve_source_replacement(record);
}
Esempio n. 7
0
void
SST_GetSelectionData(SST_Stats inst, struct timeval *now,
                     int *stratum,
                     double *offset_lo_limit,
                     double *offset_hi_limit,
                     double *root_distance,
                     double *variance, int *select_ok)
{
  double offset, sample_elapsed;
  int i, j;
  
  i = get_runsbuf_index(inst, inst->best_single_sample);
  j = get_buf_index(inst, inst->best_single_sample);

  *stratum = inst->strata[get_buf_index(inst, inst->n_samples - 1)];
  *variance = inst->variance;

  UTI_DiffTimevalsToDouble(&sample_elapsed, now, &inst->sample_times[i]);
  offset = inst->offsets[i] + sample_elapsed * inst->estimated_frequency;
  *root_distance = 0.5 * inst->root_delays[j] +
    inst->root_dispersions[j] + sample_elapsed * inst->skew;

  *offset_lo_limit = offset - *root_distance;
  *offset_hi_limit = offset + *root_distance;

#if 0
  double average_offset, elapsed;
  int average_ok;
  /* average_ok ignored for now */
  UTI_DiffTimevalsToDouble(&elapsed, now, &(inst->offset_time));
  average_offset = inst->estimated_offset + inst->estimated_frequency * elapsed;
  if (fabs(average_offset - offset) <=
      inst->peer_dispersions[j] + 0.5 * inst->peer_delays[j]) {
    average_ok = 1;
  } else {
    average_ok = 0;
  }
#endif

  *select_ok = inst->regression_ok;

  DEBUG_LOG(LOGF_SourceStats, "n=%d off=%f dist=%f var=%f selok=%d",
      inst->n_samples, offset, *root_distance, *variance, *select_ok);
}
Esempio n. 8
0
void
SST_DoSourcestatsReport(SST_Stats inst, RPT_SourcestatsReport *report, struct timeval *now)
{
  double dspan;
  double elapsed, sample_elapsed;
  int li, lj, bi, bj;

  report->n_samples = inst->n_samples;
  report->n_runs = inst->nruns;

  if (inst->n_samples > 1) {
    li = get_runsbuf_index(inst, inst->n_samples - 1);
    lj = get_buf_index(inst, inst->n_samples - 1);
    UTI_DiffTimevalsToDouble(&dspan, &inst->sample_times[li],
        &inst->sample_times[get_runsbuf_index(inst, 0)]);
    report->span_seconds = (unsigned long) (dspan + 0.5);

    if (inst->n_samples > 3) {
      UTI_DiffTimevalsToDouble(&elapsed, now, &inst->offset_time);
      bi = get_runsbuf_index(inst, inst->best_single_sample);
      bj = get_buf_index(inst, inst->best_single_sample);
      UTI_DiffTimevalsToDouble(&sample_elapsed, now, &inst->sample_times[bi]);
      report->est_offset = inst->estimated_offset + elapsed * inst->estimated_frequency;
      report->est_offset_err = (inst->estimated_offset_sd +
                 sample_elapsed * inst->skew +
                 (0.5*inst->root_delays[bj] + inst->root_dispersions[bj]));
    } else {
      report->est_offset = inst->offsets[li];
      report->est_offset_err = 0.5*inst->root_delays[lj] + inst->root_dispersions[lj];
    }
  } else {
    report->span_seconds = 0;
    report->est_offset = 0;
    report->est_offset_err = 0;
  }

  report->resid_freq_ppm = 1.0e6 * inst->estimated_frequency;
  report->skew_ppm = 1.0e6 * inst->skew;
  report->sd = sqrt(inst->variance);
}
Esempio n. 9
0
static void
convert_to_intervals(SST_Stats inst, double *times_back)
{
  struct timeval *newest_tv;
  int i;

  newest_tv = &(inst->sample_times[inst->last_sample]);
  for (i = -inst->runs_samples; i < inst->n_samples; i++) {
    /* The entries in times_back[] should end up negative */
    UTI_DiffTimevalsToDouble(&times_back[i],
        &inst->sample_times[get_runsbuf_index(inst, i)], newest_tv);
  }
}
Esempio n. 10
0
double
SST_PredictOffset(SST_Stats inst, struct timeval *when)
{
  double elapsed;
  
  if (inst->n_samples < 3) {
    /* We don't have any useful statistics, and presumably the poll
       interval is minimal.  We can't do any useful prediction other
       than use the latest sample or zero if we don't have any samples */
    if (inst->n_samples > 0) {
      return inst->offsets[inst->last_sample];
    } else {
      return 0.0;
    }
  } else {
    UTI_DiffTimevalsToDouble(&elapsed, when, &inst->offset_time);
    return inst->estimated_offset + elapsed * inst->estimated_frequency;
  }

}
Esempio n. 11
0
static int
apply_step_offset(double offset)
{
  struct timeval old_time, new_time;
  double err;

  LCL_ReadRawTime(&old_time);
  UTI_AddDoubleToTimeval(&old_time, -offset, &new_time);

  if (PRV_SetTime(&new_time, NULL) < 0) {
    DEBUG_LOG(LOGF_SysGeneric, "settimeofday() failed");
    return 0;
  }

  LCL_ReadRawTime(&old_time);
  UTI_DiffTimevalsToDouble(&err, &old_time, &new_time);

  lcl_InvokeDispersionNotifyHandlers(fabs(err));

  return 1;
}
Esempio n. 12
0
int
MNL_AcceptTimestamp(struct timeval *ts, long *offset_cs, double *dfreq_ppm, double *new_afreq_ppm)
{
  struct timeval now;
  double offset;
  int i;

  if (enabled) {

    /* Check whether timestamp is within margin of old one */
    LCL_ReadCookedTime(&now, NULL);

    UTI_DiffTimevalsToDouble(&offset, &now, ts);

    /* Check if buffer full up */
    if (n_samples == MAX_SAMPLES) {
      /* Shift samples down */
      for (i=1; i<n_samples; i++) {
        samples[i-1] = samples[i];
      }
      --n_samples;
    }
    
    samples[n_samples].when = now;
    samples[n_samples].offset = offset;
    samples[n_samples].orig_offset = offset;
    ++n_samples;

    estimate_and_set_system(&now, 1, offset, offset_cs, dfreq_ppm, new_afreq_ppm);

    return 1;

  } else {
  
    return 0;

  }
}
Esempio n. 13
0
static void
offset_convert(struct timeval *raw,
               double *corr, double *err)
{
  double duration, fastslew_corr, fastslew_err;

  UTI_DiffTimevalsToDouble(&duration, raw, &slew_start);

  if (drv_get_offset_correction && fastslew_active) {
    drv_get_offset_correction(raw, &fastslew_corr, &fastslew_err);
    if (fastslew_corr == 0.0 && fastslew_err == 0.0)
      fastslew_active = 0;
  } else {
    fastslew_corr = fastslew_err = 0.0;
  }

  *corr = slew_freq * duration + fastslew_corr - offset_register;

  if (err) {
    *err = fastslew_err;
    if (fabs(duration) <= max_freq_change_delay)
      *err += slew_error;
  }
}
Esempio n. 14
0
static void
estimate_and_set_system(struct timeval *now, int offset_provided, double offset, long *offset_cs, double *dfreq_ppm, double *new_afreq_ppm)
{
  double agos[MAX_SAMPLES], offsets[MAX_SAMPLES];
  double b0, b1;
  int n_runs, best_start; /* Unused results from regression analyser */
  int i;
  double freq = 0.0;
  double skew = 0.099999999; /* All 9's when printed to log file */
  int found_freq;
  double slew_by;

  if (n_samples > 1) {
    for (i=0; i<n_samples; i++) {
      UTI_DiffTimevalsToDouble(&agos[i], &samples[n_samples-1].when, &samples[i].when);
      offsets[i] = samples[i].offset;
    }
    
    RGR_FindBestRobustRegression(agos, offsets, n_samples,
                                 1.0e-8, /* 0.01ppm easily good enough for this! */
                                 &b0, &b1, &n_runs, &best_start);
    
    
    /* Ignore b0 from regression; treat offset as being the most
       recently entered value.  (If the administrator knows he's put
       an outlier in, he will rerun the settime operation.)   However,
       the frequency estimate comes from the regression. */
    
    freq = -b1;
    found_freq = 1;
  } else {
    if (offset_provided) {
      b0 = offset;
    } else {
      b0 = 0.0;
    }
    b1 = freq = 0.0;
    found_freq = 0;
    agos[0] = 0.0;
    offsets[0] = b0;
  }

  if (offset_provided) {
    slew_by = offset;
  } else {
    slew_by = b0;
  }
  
  if (found_freq) {
    LOG(LOGS_INFO, LOGF_Manual,
        "Making a frequency change of %.3f ppm and a slew of %.6f",
        1.0e6 * freq, slew_by);
    
    REF_SetManualReference(now,
                           slew_by,
                           freq, skew);
  } else {
    LOG(LOGS_INFO, LOGF_Manual, "Making a slew of %.6f", slew_by);
    REF_SetManualReference(now,
                           slew_by,
                           0.0, skew);
  }
  
  if (offset_cs) *offset_cs = (long)(0.5 + 100.0 * b0);
  if (dfreq_ppm) *dfreq_ppm = 1.0e6 * freq;
  if (new_afreq_ppm) *new_afreq_ppm = LCL_ReadAbsoluteFrequency();
  
  /* Calculate residuals to store them */
  for (i=0; i<n_samples; i++) {
    samples[i].residual = offsets[i] - (b0 + agos[i] * b1);
  }
  
}
Esempio n. 15
0
static void
update_slew(void)
{
  struct timeval now, end_of_slew;
  double old_slew_freq, total_freq, corr_freq, duration;

  /* Remove currently running timeout */
  SCH_RemoveTimeout(slew_timeout_id);

  LCL_ReadRawTime(&now);

  /* Adjust the offset register by achieved slew */
  UTI_DiffTimevalsToDouble(&duration, &now, &slew_start);
  offset_register -= slew_freq * duration;

  stop_fastslew(&now);

  /* Estimate how long should the next slew take */
  if (fabs(offset_register) < MIN_OFFSET_CORRECTION) {
    duration = MAX_SLEW_TIMEOUT;
  } else {
    duration = correction_rate / fabs(offset_register);
    if (duration < MIN_SLEW_TIMEOUT)
      duration = MIN_SLEW_TIMEOUT;
  }

  /* Get frequency offset needed to slew the offset in the duration
     and clamp it to the allowed maximum */
  corr_freq = offset_register / duration;
  if (corr_freq < -max_corr_freq)
    corr_freq = -max_corr_freq;
  else if (corr_freq > max_corr_freq)
    corr_freq = max_corr_freq;

  /* Let the system driver perform the slew if the requested frequency
     offset is too large for the frequency driver */
  if (drv_accrue_offset && fabs(corr_freq) >= fastslew_max_rate &&
      fabs(offset_register) > fastslew_min_offset) {
    start_fastslew();
    corr_freq = 0.0;
  }

  /* Get the new real frequency and clamp it */
  total_freq = clamp_freq(base_freq + corr_freq * (1.0e6 - base_freq));

  /* Set the new frequency (the actual frequency returned by the call may be
     slightly different from the requested frequency due to rounding) */
  total_freq = (*drv_set_freq)(total_freq);

  /* Compute the new slewing frequency, it's relative to the real frequency to
     make the calculation in offset_convert() cheaper */
  old_slew_freq = slew_freq;
  slew_freq = (total_freq - base_freq) / (1.0e6 - total_freq);

  /* Compute the dispersion introduced by changing frequency and add it
     to all statistics held at higher levels in the system */
  slew_error = fabs((old_slew_freq - slew_freq) * max_freq_change_delay);
  if (slew_error >= MIN_OFFSET_CORRECTION)
    lcl_InvokeDispersionNotifyHandlers(slew_error);

  /* Compute the duration of the slew and clamp it.  If the slewing frequency
     is zero or has wrong sign (e.g. due to rounding in the frequency driver or
     when base_freq is larger than max_freq, or fast slew is active), use the
     maximum timeout and try again on the next update. */
  if (fabs(offset_register) < MIN_OFFSET_CORRECTION ||
      offset_register * slew_freq <= 0.0) {
    duration = MAX_SLEW_TIMEOUT;
  } else {
    duration = offset_register / slew_freq;
    if (duration < MIN_SLEW_TIMEOUT)
      duration = MIN_SLEW_TIMEOUT;
    else if (duration > MAX_SLEW_TIMEOUT)
      duration = MAX_SLEW_TIMEOUT;
  }

  /* Restart timer for the next update */
  UTI_AddDoubleToTimeval(&now, duration, &end_of_slew);
  slew_timeout_id = SCH_AddTimeout(&end_of_slew, handle_end_of_slew, NULL);
  slew_start = now;

  DEBUG_LOG(LOGF_SysGeneric, "slew offset=%e corr_rate=%e base_freq=%f total_freq=%f slew_freq=%e duration=%f slew_error=%e",
      offset_register, correction_rate, base_freq, total_freq, slew_freq,
      duration, slew_error);
}
Esempio n. 16
0
int
RTC_Linux_TimePreInit(time_t driftfile_time)
{
  int fd, status;
  struct rtc_time rtc_raw, rtc_raw_retry;
  struct tm rtc_tm;
  time_t rtc_t;
  double accumulated_error, sys_offset;
  struct timeval new_sys_time, old_sys_time;

  coefs_file_name = CNF_GetRtcFile();

  setup_config();
  read_coefs_from_file();

  fd = open(CNF_GetRtcDevice(), O_RDONLY);

  if (fd < 0) {
    return 0; /* Can't open it, and won't be able to later */
  }

  /* Retry reading the rtc until both read attempts give the same sec value.
     This way the race condition is prevented that the RTC has updated itself
     during the first read operation. */
  do {
    status = ioctl(fd, RTC_RD_TIME, &rtc_raw);
    if (status >= 0) {
      status = ioctl(fd, RTC_RD_TIME, &rtc_raw_retry);
    }
  } while (status >= 0 && rtc_raw.tm_sec != rtc_raw_retry.tm_sec);

  /* Read system clock */
  LCL_ReadCookedTime(&old_sys_time, NULL);

  close(fd);

  if (status >= 0) {
    /* Convert to seconds since 1970 */
    rtc_tm.tm_sec = rtc_raw.tm_sec;
    rtc_tm.tm_min = rtc_raw.tm_min;
    rtc_tm.tm_hour = rtc_raw.tm_hour;
    rtc_tm.tm_mday = rtc_raw.tm_mday;
    rtc_tm.tm_mon = rtc_raw.tm_mon;
    rtc_tm.tm_year = rtc_raw.tm_year;
    
    rtc_t = t_from_rtc(&rtc_tm);

    if (rtc_t != (time_t)(-1)) {

      /* Work out approximatation to correct time (to about the
         nearest second) */
      if (valid_coefs_from_file) {
        accumulated_error = file_ref_offset +
          (rtc_t - file_ref_time) * 1.0e-6 * file_rate_ppm;
      } else {
        accumulated_error = 0.0;
      }

      /* Correct time */

      new_sys_time.tv_sec = rtc_t;
      /* Average error in the RTC reading */
      new_sys_time.tv_usec = 500000;

      UTI_AddDoubleToTimeval(&new_sys_time, -accumulated_error, &new_sys_time);

      if (new_sys_time.tv_sec < driftfile_time) {
        LOG(LOGS_WARN, LOGF_RtcLinux, "RTC time before last driftfile modification (ignored)");
        return 0;
      }

      UTI_DiffTimevalsToDouble(&sys_offset, &old_sys_time, &new_sys_time);

      /* Set system time only if the step is larger than 1 second */
      if (fabs(sys_offset) >= 1.0) {
        if (LCL_ApplyStepOffset(sys_offset))
          LOG(LOGS_INFO, LOGF_RtcLinux, "System time set from RTC");
      }
    } else {
      return 0;
    }
  } else {
    return 0;
  }

  return 1;
}