lk_time_t current_time(void) { lk_time_t time; time = u32_mul_u64_fp32_64(get_global_val(), timer_freq_msec_conversion_inverse); return time; }
static void set_pit_frequency(uint32_t frequency) { uint32_t count, remainder; /* figure out the correct pit_divisor for the desired frequency */ if (frequency <= 18) { count = 0xffff; } else if (frequency >= INTERNAL_FREQ) { count = 1; } else { count = INTERNAL_FREQ_3X / frequency; remainder = INTERNAL_FREQ_3X % frequency; if (remainder >= INTERNAL_FREQ_3X / 2) { count += 1; } count /= 3; remainder = count % 3; if (remainder >= 1) { count += 1; } } pit_divisor = count & 0xffff; /* * funky math that i don't feel like explaining. essentially 32.32 fixed * point representation of the configured timer delta. */ fp_32_64_div_32_32(&us_per_pit, 1000 * 1000 * 3 * count, INTERNAL_FREQ_3X); // Add 1us to the PIT tick rate to deal with rounding ns_per_pit_rounded_up = (u32_mul_u64_fp32_64(1, us_per_pit) + 1) * 1000; //dprintf(DEBUG, "set_pit_frequency: pit_divisor=%04x\n", pit_divisor); /* * setup the Programmable Interval Timer * timer 0, mode 2, binary counter, LSB followed by MSB */ outp(I8253_CONTROL_REG, 0x34); outp(I8253_DATA_REG, static_cast<uint8_t>(pit_divisor)); // LSB outp(I8253_DATA_REG, static_cast<uint8_t>(pit_divisor >> 8)); // MSB }
static void calibrate_tsc(bool has_pvclock) { ASSERT(arch_ints_disabled()); const uint64_t tsc_freq = has_pvclock ? pvclock_get_tsc_freq() : x86_lookup_tsc_freq(); if (tsc_freq != 0) { tsc_ticks_per_ms = tsc_freq / 1000; printf("TSC frequency: %" PRIu64 " ticks/ms\n", tsc_ticks_per_ms); } else { printf("Could not find TSC frequency: Calibrating TSC with %s\n", clock_name[calibration_clock]); uint32_t duration_ms[2] = {2, 4}; uint64_t best_time[2] = { calibrate_tsc_count(static_cast<uint16_t>(duration_ms[0])), calibrate_tsc_count(static_cast<uint16_t>(duration_ms[1]))}; while (best_time[0] >= best_time[1] && 2 * duration_ms[1] < MAX_TIMER_INTERVAL) { duration_ms[0] = duration_ms[1]; duration_ms[1] *= 2; best_time[0] = best_time[1]; best_time[1] = calibrate_tsc_count(static_cast<uint16_t>(duration_ms[1])); } ASSERT(best_time[0] < best_time[1]); tsc_ticks_per_ms = (best_time[1] - best_time[0]) / (duration_ms[1] - duration_ms[0]); printf("TSC calibrated: %" PRIu64 " ticks/ms\n", tsc_ticks_per_ms); } ASSERT(tsc_ticks_per_ms <= UINT32_MAX); fp_32_64_div_32_32(&ns_per_tsc, 1000 * 1000, static_cast<uint32_t>(tsc_ticks_per_ms)); fp_32_64_div_32_32(&tsc_per_ns, static_cast<uint32_t>(tsc_ticks_per_ms), 1000 * 1000); // Add 1ns to conservatively deal with rounding ns_per_tsc_rounded_up = u32_mul_u64_fp32_64(1, ns_per_tsc) + 1; LTRACEF("ns_per_tsc: %08x.%08x%08x\n", ns_per_tsc.l0, ns_per_tsc.l32, ns_per_tsc.l64); }
static lk_time_t cntpct_to_lk_time(uint64_t cntpct) { return u32_mul_u64_fp32_64(cntpct, ms_per_cntpct); }
static void pc_init_timer(uint level) { const struct x86_model_info* cpu_model = x86_get_model(); constant_tsc = false; if (x86_vendor == X86_VENDOR_INTEL) { /* This condition taken from Intel 3B 17.15 (Time-Stamp Counter). This * is the negation of the non-Constant TSC section, since the Constant * TSC section is incomplete (the behavior is architectural going * forward, and modern CPUs are not on the list). */ constant_tsc = !((cpu_model->family == 0x6 && cpu_model->model == 0x9) || (cpu_model->family == 0x6 && cpu_model->model == 0xd) || (cpu_model->family == 0xf && cpu_model->model < 0x3)); } invariant_tsc = x86_feature_test(X86_FEATURE_INVAR_TSC); bool has_pvclock = pvclock_is_present(); if (has_pvclock) { zx_status_t status = pvclock_init(); if (status == ZX_OK) { invariant_tsc = pvclock_is_stable(); } else { has_pvclock = false; } } bool has_hpet = hpet_is_present(); if (has_hpet) { calibration_clock = CLOCK_HPET; const uint64_t hpet_ms_rate = hpet_ticks_per_ms(); ASSERT(hpet_ms_rate <= UINT32_MAX); printf("HPET frequency: %" PRIu64 " ticks/ms\n", hpet_ms_rate); fp_32_64_div_32_32(&ns_per_hpet, 1000 * 1000, static_cast<uint32_t>(hpet_ms_rate)); // Add 1ns to conservatively deal with rounding ns_per_hpet_rounded_up = u32_mul_u64_fp32_64(1, ns_per_hpet) + 1; } else { calibration_clock = CLOCK_PIT; } const char* force_wallclock = cmdline_get("kernel.wallclock"); bool use_invariant_tsc = invariant_tsc && (!force_wallclock || !strcmp(force_wallclock, "tsc")); use_tsc_deadline = use_invariant_tsc && x86_feature_test(X86_FEATURE_TSC_DEADLINE); if (!use_tsc_deadline) { calibrate_apic_timer(); } if (use_invariant_tsc) { calibrate_tsc(has_pvclock); // Program PIT in the software strobe configuration, but do not load // the count. This will pause the PIT. outp(I8253_CONTROL_REG, 0x38); wall_clock = CLOCK_TSC; } else { if (constant_tsc || invariant_tsc) { // Calibrate the TSC even though it's not as good as we want, so we // can still let folks still use it for cheap timing. calibrate_tsc(has_pvclock); } if (has_hpet && (!force_wallclock || !strcmp(force_wallclock, "hpet"))) { wall_clock = CLOCK_HPET; hpet_set_value(0); hpet_enable(); } else { if (force_wallclock && strcmp(force_wallclock, "pit")) { panic("Could not satisfy kernel.wallclock choice\n"); } wall_clock = CLOCK_PIT; set_pit_frequency(1000); // ~1ms granularity uint32_t irq = apic_io_isa_to_global(ISA_IRQ_PIT); zx_status_t status = register_int_handler(irq, &pit_timer_tick, NULL); DEBUG_ASSERT(status == ZX_OK); unmask_interrupt(irq); } } printf("timer features: constant_tsc %d invariant_tsc %d tsc_deadline %d\n", constant_tsc, invariant_tsc, use_tsc_deadline); printf("Using %s as wallclock\n", clock_name[wall_clock]); }