static int binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) { struct vdso_timehands *th; uint32_t curr, gen; do { if (!tk->tk_enabled) return (ENOSYS); /* * XXXKIB. The load of tk->tk_current should use * atomic_load_acq_32 to provide load barrier. But * since tk points to r/o mapped page, x86 * implementation of atomic_load_acq faults. */ curr = tk->tk_current; rmb(); th = &tk->tk_th[curr]; if (th->th_algo != VDSO_TH_ALGO_1) return (ENOSYS); gen = th->th_gen; *bt = th->th_offset; bintime_addx(bt, th->th_scale * tc_delta(th)); if (abs) bintime_add(bt, &th->th_boottime); /* * Barrier for load of both tk->tk_current and th->th_gen. */ rmb(); } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen); return (0); }
/* * Calculate the absolute or boot-relative time from the * machine-specific fast timecounter and the published timehands * structure read from the shared page. * * The lockless reading scheme is similar to the one used to read the * in-kernel timehands, see sys/kern/kern_tc.c:binuptime(). This code * is based on the kernel implementation. */ static int binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) { struct vdso_timehands *th; uint32_t curr, gen; u_int delta; int error; do { if (!tk->tk_enabled) return (ENOSYS); curr = atomic_load_acq_32(&tk->tk_current); th = &tk->tk_th[curr]; gen = atomic_load_acq_32(&th->th_gen); *bt = th->th_offset; error = tc_delta(th, &delta); if (error == EAGAIN) continue; if (error != 0) return (error); bintime_addx(bt, th->th_scale * delta); if (abs) bintime_add(bt, &th->th_boottime); /* * Ensure that the load of th_offset is completed * before the load of th_gen. */ atomic_thread_fence_acq(); } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen); return (0); }
void bintime(struct bintime *bt) { binuptime(bt); bintime_add(bt, &boottimebin); }
/* * Step our concept of UTC. This is done by modifying our estimate of * when we booted. * XXX: not locked. */ void tc_setclock(struct timespec *ts) { struct timespec tbef, taft; struct bintime bt, bt2; cpu_tick_calibrate(1); nanotime(&tbef); timespec2bintime(ts, &bt); binuptime(&bt2); bintime_sub(&bt, &bt2); bintime_add(&bt2, &boottimebin); boottimebin = bt; bintime2timeval(&bt, &boottime); /* XXX fiddle all the little crinkly bits around the fiords... */ tc_windup(); nanotime(&taft); if (timestepwarnings) { log(LOG_INFO, "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n", (intmax_t)tbef.tv_sec, tbef.tv_nsec, (intmax_t)taft.tv_sec, taft.tv_nsec, (intmax_t)ts->tv_sec, ts->tv_nsec); } cpu_tick_calibrate(1); }
/* * Calculate the absolute or boot-relative time from the * machine-specific fast timecounter and the published timehands * structure read from the shared page. * * The lockless reading scheme is similar to the one used to read the * in-kernel timehands, see sys/kern/kern_tc.c:binuptime(). This code * is based on the kernel implementation. */ static int binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) { struct vdso_timehands *th; uint32_t curr, gen; do { if (!tk->tk_enabled) return (ENOSYS); curr = atomic_load_acq_32(&tk->tk_current); th = &tk->tk_th[curr]; if (th->th_algo != VDSO_TH_ALGO_1) return (ENOSYS); gen = atomic_load_acq_32(&th->th_gen); *bt = th->th_offset; bintime_addx(bt, th->th_scale * tc_delta(th)); if (abs) bintime_add(bt, &th->th_boottime); /* * Ensure that the load of th_offset is completed * before the load of th_gen. */ atomic_thread_fence_acq(); } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen); return (0); }
void getbintime(struct bintime *bt) { struct timehands *th; u_int gen; do { th = timehands; gen = th->th_generation; *bt = th->th_offset; } while (gen == 0 || gen != th->th_generation); bintime_add(bt, &boottimebin); }
static void compute_totals(struct nfsstatsv1 *total_stats, struct nfsstatsv1 *cur_stats) { int i; bzero(total_stats, sizeof(*total_stats)); for (i = 0; i < (NFSV42_NOPS + NFSV4OP_FAKENOPS); i++) { total_stats->srvbytes[0] += cur_stats->srvbytes[i]; total_stats->srvops[0] += cur_stats->srvops[i]; bintime_add(&total_stats->srvduration[0], &cur_stats->srvduration[i]); total_stats->srvrpccnt[i] = cur_stats->srvrpccnt[i]; } total_stats->srvstartcnt = cur_stats->srvstartcnt; total_stats->srvdonecnt = cur_stats->srvdonecnt; total_stats->busytime = cur_stats->busytime; }
/* * Step our concept of UTC. This is done by modifying our estimate of * when we booted. * XXX: not locked. */ void tc_setclock(struct timespec *ts) { struct timespec ts2; struct bintime bt, bt2; binuptime(&bt2); timespec2bintime(ts, &bt); bintime_sub(&bt, &bt2); bintime_add(&bt2, &boottimebin); boottimebin = bt; bintime2timeval(&bt, &boottime); /* XXX fiddle all the little crinkly bits around the fiords... */ tc_windup(); if (timestepwarnings) { bintime2timespec(&bt2, &ts2); log(LOG_INFO, "Time stepped from %ld.%09ld to %ld.%09ld\n", (long)ts2.tv_sec, ts2.tv_nsec, (long)ts->tv_sec, ts->tv_nsec); } }
static void compute_stats(struct ctl_lun_io_stats *cur_stats, struct ctl_lun_io_stats *prev_stats, long double etime, long double *mbsec, long double *kb_per_transfer, long double *transfers_per_second, long double *ms_per_transfer, long double *ms_per_dma, long double *dmas_per_second) { uint64_t total_bytes = 0, total_operations = 0, total_dmas = 0; uint32_t port; struct bintime total_time_bt, total_dma_bt; struct timespec total_time_ts, total_dma_ts; int i; bzero(&total_time_bt, sizeof(total_time_bt)); bzero(&total_dma_bt, sizeof(total_dma_bt)); bzero(&total_time_ts, sizeof(total_time_ts)); bzero(&total_dma_ts, sizeof(total_dma_ts)); for (port = 0; port < CTL_MAX_PORTS; port++) { for (i = 0; i < CTL_STATS_NUM_TYPES; i++) { total_bytes += cur_stats->ports[port].bytes[i]; total_operations += cur_stats->ports[port].operations[i]; total_dmas += cur_stats->ports[port].num_dmas[i]; bintime_add(&total_time_bt, &cur_stats->ports[port].time[i]); bintime_add(&total_dma_bt, &cur_stats->ports[port].dma_time[i]); if (prev_stats != NULL) { total_bytes -= prev_stats->ports[port].bytes[i]; total_operations -= prev_stats->ports[port].operations[i]; total_dmas -= prev_stats->ports[port].num_dmas[i]; bintime_sub(&total_time_bt, &prev_stats->ports[port].time[i]); bintime_sub(&total_dma_bt, &prev_stats->ports[port].dma_time[i]); } } } *mbsec = total_bytes; *mbsec /= 1024 * 1024; if (etime > 0.0) *mbsec /= etime; else *mbsec = 0; *kb_per_transfer = total_bytes; *kb_per_transfer /= 1024; if (total_operations > 0) *kb_per_transfer /= total_operations; else *kb_per_transfer = 0; *transfers_per_second = total_operations; *dmas_per_second = total_dmas; if (etime > 0.0) { *transfers_per_second /= etime; *dmas_per_second /= etime; } else { *transfers_per_second = 0; *dmas_per_second = 0; } bintime2timespec(&total_time_bt, &total_time_ts); bintime2timespec(&total_dma_bt, &total_dma_ts); if (total_operations > 0) { /* * Convert the timespec to milliseconds. */ *ms_per_transfer = total_time_ts.tv_sec * 1000; *ms_per_transfer += total_time_ts.tv_nsec / 1000000; *ms_per_transfer /= total_operations; } else *ms_per_transfer = 0; if (total_dmas > 0) { /* * Convert the timespec to milliseconds. */ *ms_per_dma = total_dma_ts.tv_sec * 1000; *ms_per_dma += total_dma_ts.tv_nsec / 1000000; *ms_per_dma /= total_dmas; } else *ms_per_dma = 0; }
/* * Initialize the next struct timehands in the ring and make * it the active timehands. Along the way we might switch to a different * timecounter and/or do seconds processing in NTP. Slightly magic. */ void tc_windup(void) { struct bintime bt; struct timehands *th, *tho; u_int64_t scale; u_int delta, ncount, ogen; int i; #ifdef leapsecs time_t t; #endif /* * Make the next timehands a copy of the current one, but do not * overwrite the generation or next pointer. While we update * the contents, the generation must be zero. */ tho = timehands; th = tho->th_next; ogen = th->th_generation; th->th_generation = 0; bcopy(tho, th, offsetof(struct timehands, th_generation)); /* * Capture a timecounter delta on the current timecounter and if * changing timecounters, a counter value from the new timecounter. * Update the offset fields accordingly. */ delta = tc_delta(th); if (th->th_counter != timecounter) ncount = timecounter->tc_get_timecount(timecounter); else ncount = 0; th->th_offset_count += delta; th->th_offset_count &= th->th_counter->tc_counter_mask; bintime_addx(&th->th_offset, th->th_scale * delta); #ifdef notyet /* * Hardware latching timecounters may not generate interrupts on * PPS events, so instead we poll them. There is a finite risk that * the hardware might capture a count which is later than the one we * got above, and therefore possibly in the next NTP second which might * have a different rate than the current NTP second. It doesn't * matter in practice. */ if (tho->th_counter->tc_poll_pps) tho->th_counter->tc_poll_pps(tho->th_counter); #endif /* * Deal with NTP second processing. The for loop normally * iterates at most once, but in extreme situations it might * keep NTP sane if timeouts are not run for several seconds. * At boot, the time step can be large when the TOD hardware * has been read, so on really large steps, we call * ntp_update_second only twice. We need to call it twice in * case we missed a leap second. */ bt = th->th_offset; bintime_add(&bt, &boottimebin); i = bt.sec - tho->th_microtime.tv_sec; if (i > LARGE_STEP) i = 2; for (; i > 0; i--) ntp_update_second(&th->th_adjustment, &bt.sec); /* Update the UTC timestamps used by the get*() functions. */ /* XXX shouldn't do this here. Should force non-`get' versions. */ bintime2timeval(&bt, &th->th_microtime); bintime2timespec(&bt, &th->th_nanotime); /* Now is a good time to change timecounters. */ if (th->th_counter != timecounter) { th->th_counter = timecounter; th->th_offset_count = ncount; } /*- * Recalculate the scaling factor. We want the number of 1/2^64 * fractions of a second per period of the hardware counter, taking * into account the th_adjustment factor which the NTP PLL/adjtime(2) * processing provides us with. * * The th_adjustment is nanoseconds per second with 32 bit binary * fraction and we want 64 bit binary fraction of second: * * x = a * 2^32 / 10^9 = a * 4.294967296 * * The range of th_adjustment is +/- 5000PPM so inside a 64bit int * we can only multiply by about 850 without overflowing, but that * leaves suitably precise fractions for multiply before divide. * * Divide before multiply with a fraction of 2199/512 results in a * systematic undercompensation of 10PPM of th_adjustment. On a * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. * * We happily sacrifice the lowest of the 64 bits of our result * to the goddess of code clarity. * */ scale = (u_int64_t)1 << 63; scale += (th->th_adjustment / 1024) * 2199; scale /= th->th_counter->tc_frequency; th->th_scale = scale * 2; /* * Now that the struct timehands is again consistent, set the new * generation number, making sure to not make it zero. */ if (++ogen == 0) ogen = 1; th->th_generation = ogen; /* Go live with the new struct timehands. */ time_second = th->th_microtime.tv_sec; time_uptime = th->th_offset.sec; timehands = th; }
void pps_event(struct pps_state *pps, int event) { struct bintime bt; struct timespec ts, *tsp, *osp; u_int tcount, *pcount; int foff, fhard; pps_seq_t *pseq; KASSERT(pps != NULL, ("NULL pps pointer in pps_event")); /* If the timecounter was wound up underneath us, bail out. */ if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation) return; /* Things would be easier with arrays. */ if (event == PPS_CAPTUREASSERT) { tsp = &pps->ppsinfo.assert_timestamp; osp = &pps->ppsparam.assert_offset; foff = pps->ppsparam.mode & PPS_OFFSETASSERT; fhard = pps->kcmode & PPS_CAPTUREASSERT; pcount = &pps->ppscount[0]; pseq = &pps->ppsinfo.assert_sequence; } else { tsp = &pps->ppsinfo.clear_timestamp; osp = &pps->ppsparam.clear_offset; foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; fhard = pps->kcmode & PPS_CAPTURECLEAR; pcount = &pps->ppscount[1]; pseq = &pps->ppsinfo.clear_sequence; } /* * If the timecounter changed, we cannot compare the count values, so * we have to drop the rest of the PPS-stuff until the next event. */ if (pps->ppstc != pps->capth->th_counter) { pps->ppstc = pps->capth->th_counter; *pcount = pps->capcount; pps->ppscount[2] = pps->capcount; return; } /* Convert the count to a timespec. */ tcount = pps->capcount - pps->capth->th_offset_count; tcount &= pps->capth->th_counter->tc_counter_mask; bt = pps->capth->th_offset; bintime_addx(&bt, pps->capth->th_scale * tcount); bintime_add(&bt, &boottimebin); bintime2timespec(&bt, &ts); /* If the timecounter was wound up underneath us, bail out. */ if (pps->capgen != pps->capth->th_generation) return; *pcount = pps->capcount; (*pseq)++; *tsp = ts; if (foff) { timespecadd(tsp, osp); if (tsp->tv_nsec < 0) { tsp->tv_nsec += 1000000000; tsp->tv_sec -= 1; } } #ifdef PPS_SYNC if (fhard) { uint64_t scale; /* * Feed the NTP PLL/FLL. * The FLL wants to know how many (hardware) nanoseconds * elapsed since the previous event. */ tcount = pps->capcount - pps->ppscount[2]; pps->ppscount[2] = pps->capcount; tcount &= pps->capth->th_counter->tc_counter_mask; scale = (uint64_t)1 << 63; scale /= pps->capth->th_counter->tc_frequency; scale *= 2; bt.sec = 0; bt.frac = 0; bintime_addx(&bt, scale * tcount); bintime2timespec(&bt, &ts); hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); } #endif }