Exemplo n.º 1
0
static void
handle_doffset(CMD_Request *rx_message, CMD_Reply *tx_message)
{
  long sec, usec;
  double doffset;
  sec = (int32_t)ntohl(rx_message->data.doffset.sec);
  usec = (int32_t)ntohl(rx_message->data.doffset.usec);
  doffset = (double) sec + 1.0e-6 * (double) usec;
  LOG(LOGS_INFO, LOGF_CmdMon, "Accumulated delta offset of %.6f seconds", doffset);
  LCL_AccumulateOffset(doffset, 0.0);
}
Exemplo n.º 2
0
static void
handle_initial_trim(void)
{
  double rate;
  long delta_time;
  double rtc_error_now, sys_error_now;

    /* The idea is to accumulate some number of samples at 1 second
       intervals, then do a robust regression fit to this.  This
       should give a good fix on the intercept (=system clock error
       rel to RTC) at a particular time, removing risk of any
       particular sample being an outlier.  We can then look at the
       elapsed interval since the epoch recorded in the RTC file,
       and correct the system time accordingly. */
    
  run_regression(1, &coefs_valid, &coef_ref_time, &coef_seconds_fast, &coef_gain_rate);

  n_samples_since_regression = 0;

  /* Set sample number to -1 so the next sample is not used, as it will not yet be corrected for System Trim*/

  n_samples = -1;


  read_coefs_from_file();

  if (valid_coefs_from_file) {
    /* Can process data */
    delta_time = coef_ref_time - file_ref_time;
    rate = 1.0e-6 * file_rate_ppm;
    rtc_error_now = file_ref_offset + rate * (double) delta_time;
          
    /* sys_error_now is positive if the system clock is fast */
    sys_error_now = rtc_error_now - coef_seconds_fast;
          
    LOG(LOGS_INFO, LOGF_RtcLinux, "System trim from RTC = %f", sys_error_now);
    LCL_AccumulateOffset(sys_error_now, 0.0);
  } else {
    LOG(LOGS_WARN, LOGF_RtcLinux, "No valid file coefficients, cannot trim system time");
  }
  
  coefs_valid = 0;
  
  (after_init_hook)(after_init_hook_arg);
  
  operating_mode = OM_NORMAL;

  return;
}