zx_status_t platform_set_oneshot_timer(zx_time_t deadline) { DEBUG_ASSERT(arch_ints_disabled()); if (deadline < 0) { deadline = 0; } deadline = discrete_time_roundup(deadline); DEBUG_ASSERT(deadline > 0); if (use_tsc_deadline) { // Check if the deadline would overflow the TSC. const uint64_t tsc_ticks_per_ns = tsc_ticks_per_ms / ZX_MSEC(1); if (UINT64_MAX / deadline < tsc_ticks_per_ns) { return ZX_ERR_INVALID_ARGS; } // We rounded up to the tick after above. const uint64_t tsc_deadline = u64_mul_u64_fp32_64(deadline, tsc_per_ns); LTRACEF("Scheduling oneshot timer: %" PRIu64 " deadline\n", tsc_deadline); apic_timer_set_tsc_deadline(tsc_deadline, false /* unmasked */); return ZX_OK; } const zx_time_t now = current_time(); if (now >= deadline) { // Deadline has already passed. We still need to schedule a timer so that // the interrupt fires. LTRACEF("Scheduling oneshot timer for min duration\n"); return apic_timer_set_oneshot(1, 1, false /* unmasked */); } const zx_duration_t interval = zx_time_sub_time(deadline, now); DEBUG_ASSERT(interval > 0); uint64_t apic_ticks_needed = u64_mul_u64_fp32_64(interval, apic_ticks_per_ns); if (apic_ticks_needed == 0) { apic_ticks_needed = 1; } // Find the shift needed for this timeout, since count is 32-bit. const uint highest_set_bit = log2_ulong_floor(apic_ticks_needed); uint8_t extra_shift = (highest_set_bit <= 31) ? 0 : static_cast<uint8_t>(highest_set_bit - 31); if (extra_shift > 8) { extra_shift = 8; } uint32_t divisor = apic_divisor << extra_shift; uint32_t count; // If the divisor is too large, we're at our maximum timeout. Saturate the // timer. It'll fire earlier than requested, but the scheduler will notice // and ask us to set the timer up again. if (divisor <= 128) { count = (uint32_t)(apic_ticks_needed >> extra_shift); DEBUG_ASSERT((apic_ticks_needed >> extra_shift) <= UINT32_MAX); } else {
status_t platform_set_oneshot_timer (platform_timer_callback callback, void *arg, lk_time_t interval) { LTRACEF("callback %p, arg %p, timeout %u\n", callback, arg, interval); uint64_t ticks = u64_mul_u64_fp32_64(interval, timer_freq_msec_conversion); if (unlikely(ticks == 0)) ticks = 1; if (unlikely(ticks > 0xffffffff)) ticks = 0xffffffff; spin_lock_saved_state_t state; spin_lock_irqsave(&lock, state); t_callback = callback; oneshot_interval = interval; // disable timer TIMREG(TIMER_CONTROL) = 0; TIMREG(TIMER_LOAD) = ticks; TIMREG(TIMER_CONTROL) = (1<<2) | (1<<0) | (1<<0); // irq enable, oneshot, enable spin_unlock_irqrestore(&lock, state); return NO_ERROR; }
status_t platform_set_oneshot_timer (platform_timer_callback callback, void *arg, lk_time_t interval) { LTRACEF("callback %p, arg %p, timeout %lu\n", callback, arg, interval); uint64_t ticks = u64_mul_u64_fp32_64(interval, timer_freq_msec_conversion); if (unlikely(ticks == 0)) ticks = 1; if (unlikely(ticks > 0xffffffff)) ticks = 0xffffffff; enter_critical_section(); t_callback = callback; oneshot_interval = interval; // disable timer TIMREG(TIMER_CONTROL) = 0; TIMREG(TIMER_LOAD) = ticks; TIMREG(TIMER_CONTROL) = (1<<2) | (1<<0) | (1<<0); // irq enable, oneshot, enable exit_critical_section(); return NO_ERROR; }
lk_bigtime_t current_time_hires(void) { lk_bigtime_t time; time = u64_mul_u64_fp32_64(get_global_val(), timer_freq_usec_conversion_inverse); return time; }
zx_time_t current_time(void) { zx_time_t time; switch (wall_clock) { case CLOCK_TSC: { uint64_t tsc = rdtsc(); time = ticks_to_nanos(tsc); break; } case CLOCK_HPET: { uint64_t counter = hpet_get_value(); time = u64_mul_u64_fp32_64(counter, ns_per_hpet); break; } case CLOCK_PIT: { time = u64_mul_u64_fp32_64(pit_ticks, us_per_pit) * 1000; break; } default: panic("Invalid wall clock source\n"); } return time; }
static lk_bigtime_t cntpct_to_lk_bigtime(uint64_t cntpct) { return u64_mul_u64_fp32_64(cntpct, us_per_cntpct); }
zx_time_t ticks_to_nanos(zx_ticks_t ticks) { return u64_mul_u64_fp32_64(ticks, ns_per_tsc); }