unsigned long long TimerManager::now() { if (ms_clockDg) return ms_clockDg(); #ifdef WINDOWS LARGE_INTEGER count; if (!QueryPerformanceCounter(&count)) MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("QueryPerformanceCounter"); unsigned long long countUll = (unsigned long long)count.QuadPart; if (g_frequency == 0) g_frequency = queryFrequency(); return muldiv64(countUll, 1000000, g_frequency); #elif defined(OSX) unsigned long long absoluteTime = mach_absolute_time(); if (g_timebase.denom == 0) g_timebase = queryTimebase(); return muldiv64(absoluteTime, g_timebase.numer, (uint64_t)g_timebase.denom * 1000); #else struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("clock_gettime"); return ts.tv_sec * 1000000ull + ts.tv_nsec / 1000; #endif }
static void pxa2xx_timer_update4(void *opaque, uint64_t now_qemu, int n) { PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; uint32_t now_vm; uint64_t new_qemu; static const int counters[8] = { 0, 0, 0, 0, 4, 4, 6, 6 }; int counter; if (s->tm4[n].control & (1 << 7)) counter = n; else counter = counters[n]; if (!s->tm4[counter].freq) { qemu_del_timer(s->tm4[n].tm.qtimer); return; } now_vm = s->tm4[counter].clock + muldiv64(now_qemu - s->tm4[counter].lastload, s->tm4[counter].freq, get_ticks_per_sec()); new_qemu = now_qemu + muldiv64((uint32_t) (s->tm4[n].tm.value - now_vm), get_ticks_per_sec(), s->tm4[counter].freq); qemu_mod_timer(s->tm4[n].tm.qtimer, new_qemu); }
void cpu_openrisc_count_update(OpenRISCCPU *cpu) { uint64_t now, next; uint32_t wait; now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (!is_counting) { timer_del(cpu->env.timer); last_clk = now; return; } cpu->env.ttcr += (uint32_t)muldiv64(now - last_clk, TIMER_FREQ, get_ticks_per_sec()); last_clk = now; if ((cpu->env.ttmr & TTMR_TP) <= (cpu->env.ttcr & TTMR_TP)) { wait = TTMR_TP - (cpu->env.ttcr & TTMR_TP) + 1; wait += cpu->env.ttmr & TTMR_TP; } else { wait = (cpu->env.ttmr & TTMR_TP) - (cpu->env.ttcr & TTMR_TP); } next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ); timer_mod(cpu->env.timer, next); }
/* handle periodic timer */ static void periodic_timer_update(RTCState *s, int64_t current_time) { int period_code, period; int64_t cur_clock, next_irq_clock; period_code = s->cmos_data[RTC_REG_A] & 0x0f; if (period_code != 0 && ((s->cmos_data[RTC_REG_B] & REG_B_PIE) || ((s->cmos_data[RTC_REG_B] & REG_B_SQWE) && s->sqw_irq))) { if (period_code <= 2) period_code += 7; /* period in 32 Khz cycles */ period = 1 << (period_code - 1); #ifdef TARGET_I386 if (period != s->period) { s->irq_coalesced = (s->irq_coalesced * s->period) / period; DPRINTF_C("cmos: coalesced irqs scaled to %d\n", s->irq_coalesced); } s->period = period; #endif /* compute 32 khz clock */ cur_clock = muldiv64(current_time, RTC_CLOCK_RATE, get_ticks_per_sec()); next_irq_clock = (cur_clock & ~(period - 1)) + period; s->next_periodic_time = muldiv64(next_irq_clock, get_ticks_per_sec(), RTC_CLOCK_RATE) + 1; qemu_mod_timer(s->periodic_timer, s->next_periodic_time); } else { #ifdef TARGET_I386 s->irq_coalesced = 0; #endif qemu_del_timer(s->periodic_timer); } }
static void host_alarm_handler(int host_signum) #endif { //printf("host_alarm_handler\n"); coremu_assert_hw_thr("Host_alarm_handler should be called by hw thr\n"); struct qemu_alarm_timer *t = alarm_timer; if (!t) return; #if 0 #define DISP_FREQ 1000 { static int64_t delta_min = INT64_MAX; static int64_t delta_max, delta_cum, last_clock, delta, ti; static int count; ti = qemu_get_clock(vm_clock); if (last_clock != 0) { delta = ti - last_clock; if (delta < delta_min) delta_min = delta; if (delta > delta_max) delta_max = delta; delta_cum += delta; if (++count == DISP_FREQ) { printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n", muldiv64(delta_min, 1000000, get_ticks_per_sec()), muldiv64(delta_max, 1000000, get_ticks_per_sec()), muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()), (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ)); count = 0; delta_min = INT64_MAX; delta_max = 0; delta_cum = 0; } } last_clock = ti; } #endif if (alarm_has_dynticks(t) || (!use_icount && qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL], qemu_get_clock(vm_clock))) || qemu_timer_expired(active_timers[QEMU_CLOCK_REALTIME], qemu_get_clock(rt_clock)) || qemu_timer_expired(active_timers[QEMU_CLOCK_HOST], qemu_get_clock(host_clock))) { t->expired = alarm_has_dynticks(t); t->pending = 1; qemu_notify_event(); } }
static void rtc_timer_update(RTCState *s, int64_t current_time) { int period_code, period; int64_t cur_clock, next_irq_clock; period_code = s->cmos_data[RTC_REG_A] & 0x0f; #if defined TARGET_I386 || defined TARGET_X86_64 /* disable periodic timer if hpet is in legacy mode, since interrupts are * disabled anyway. */ if (period_code != 0 && (s->cmos_data[RTC_REG_B] & REG_B_PIE) && !hpet_in_legacy_mode()) { #else if (period_code != 0 && (s->cmos_data[RTC_REG_B] & REG_B_PIE)) { #endif if (period_code <= 2) period_code += 7; /* period in 32 Khz cycles */ period = 1 << (period_code - 1); #ifdef TARGET_I386 if(period != s->period) s->irq_coalesced = (s->irq_coalesced * s->period) / period; s->period = period; #endif /* compute 32 khz clock */ cur_clock = muldiv64(current_time, 32768, ticks_per_sec); next_irq_clock = (cur_clock & ~(period - 1)) + period; s->next_periodic_time = muldiv64(next_irq_clock, ticks_per_sec, 32768) + 1; qemu_mod_timer(s->periodic_timer, s->next_periodic_time); } else { #ifdef TARGET_I386 s->irq_coalesced = 0; #endif qemu_del_timer(s->periodic_timer); } } static void rtc_periodic_timer(void *opaque) { RTCState *s = opaque; rtc_timer_update(s, s->next_periodic_time); #ifdef TARGET_I386 if ((s->cmos_data[RTC_REG_C] & 0xc0) && rtc_td_hack) { s->irq_coalesced++; return; } #endif s->cmos_data[RTC_REG_C] |= 0xc0; rtc_irq_raise(s->irq); }
static uint64_t calculate_next(struct AspeedTimer *t) { uint64_t next = 0; uint32_t rate = calculate_rate(t); while (!next) { /* We don't know the relationship between the values in the match * registers, so sort using MAX/MIN/zero. We sort in that order as the * timer counts down to zero. */ uint64_t seq[] = { calculate_time(t, MAX(t->match[0], t->match[1])), calculate_time(t, MIN(t->match[0], t->match[1])), calculate_time(t, 0), }; uint64_t reload_ns; uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (now < seq[0]) { next = seq[0]; } else if (now < seq[1]) { next = seq[1]; } else if (now < seq[2]) { next = seq[2]; } else if (t->reload) { reload_ns = muldiv64(t->reload, NANOSECONDS_PER_SECOND, rate); t->start = now - ((now - t->start) % reload_ns); } else { /* no reload value, return 0 */ break; } } return next; }
uint32_t cpu_ppc_load_decr (CPUState *env) { ppc_tb_t *tb_env = env->tb_env; uint32_t decr; int64_t diff; diff = tb_env->decr_next - qemu_get_clock(vm_clock); if (diff >= 0) decr = muldiv64(diff, tb_env->tb_freq, ticks_per_sec); else decr = -muldiv64(-diff, tb_env->tb_freq, ticks_per_sec); #if defined(DEBUG_TB) printf("%s: 0x%08x\n", __func__, decr); #endif return decr; }
/* This function is called when the watchdog has either been enabled * (hence it starts counting down) or has been keep-alived. */ static void i6300esb_restart_timer(I6300State *d, int stage) { int64_t timeout; if (!d->enabled) return; d->stage = stage; if (d->stage <= 1) timeout = d->timer1_preload; else timeout = d->timer2_preload; if (d->clock_scale == CLOCK_SCALE_1KHZ) timeout <<= 15; else timeout <<= 5; /* Get the timeout in units of ticks_per_sec. * * ticks_per_sec is typically 10^9 == 0x3B9ACA00 (30 bits), with * 20 bits of user supplied preload, and 15 bits of scale, the * multiply here can exceed 64-bits, before we divide by 33MHz, so * we use a higher-precision intermediate result. */ timeout = muldiv64(get_ticks_per_sec(), timeout, 33000000); i6300esb_debug("stage %d, timeout %" PRIi64 "\n", d->stage, timeout); timer_mod(d->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); }
static ssize_t dump_receive(NetClientState *nc, const uint8_t *buf, size_t size) { DumpState *s = DO_UPCAST(DumpState, nc, nc); struct pcap_sf_pkthdr hdr; int64_t ts; int caplen; /* Early return in case of previous error. */ if (s->fd < 0) { return size; } ts = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1000000, get_ticks_per_sec()); caplen = size > s->pcap_caplen ? s->pcap_caplen : size; hdr.ts.tv_sec = ts / 1000000 + s->start_ts; hdr.ts.tv_usec = ts % 1000000; hdr.caplen = caplen; hdr.len = size; if (write(s->fd, &hdr, sizeof(hdr)) != sizeof(hdr) || write(s->fd, buf, caplen) != caplen) { qemu_log("-net dump write error - stop dump\n"); close(s->fd); s->fd = -1; } return size; }
static void pxa2xx_timer_update(void *opaque, uint64_t now_qemu) { PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; int i; uint32_t now_vm; uint64_t new_qemu; now_vm = s->clock + muldiv64(now_qemu - s->lastload, s->freq, get_ticks_per_sec()); for (i = 0; i < 4; i ++) { new_qemu = now_qemu + muldiv64((uint32_t) (s->timer[i].value - now_vm), get_ticks_per_sec(), s->freq); qemu_mod_timer(s->timer[i].qtimer, new_qemu); } }
/* * Called when mtimecmp is written to update the QEMU timer or immediately * trigger timer interrupt if mtimecmp <= current timer value. */ static inline void cpu_riscv_timer_update(CPURISCVState *env) { uint64_t next; uint64_t diff; uint64_t rtc_r = rtc_read_with_delta(env); #ifdef TIMER_DEBUGGING_RISCV printf("timer update: mtimecmp %016lx, timew %016lx\n", env->csr[NEW_CSR_MTIMECMP], rtc_r); #endif if (env->csr[NEW_CSR_MTIMECMP] <= rtc_r) { // if we're setting an MTIMECMP value in the "past", immediately raise // the timer interrupt env->csr[NEW_CSR_MIP] |= MIP_MTIP; qemu_irq_raise(env->irq[7]); return; } // otherwise, set up the future timer interrupt diff = env->csr[NEW_CSR_MTIMECMP] - rtc_r; // back to ns (note args switched in muldiv64) next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + muldiv64(diff, get_ticks_per_sec(), TIMER_FREQ); timer_mod(env->timer, next); }
static void goldfish_timer_write(void *opaque, target_phys_addr_t offset, uint32_t value) { struct timer_state *s = (struct timer_state *)opaque; int64_t alarm, now; switch(offset) { case TIMER_ALARM_LOW: s->alarm_low = value; alarm = muldiv64(s->alarm_low | (int64_t)s->alarm_high << 32, ticks_per_sec, 1000000000); now = qemu_get_clock(vm_clock); if (alarm <= now) { goldfish_device_set_irq(&s->dev, 0, 1); } else { qemu_mod_timer(s->timer, alarm); s->armed = 1; } break; case TIMER_ALARM_HIGH: s->alarm_high = value; //printf("alarm_high %d\n", s->alarm_high); break; case TIMER_CLEAR_ALARM: qemu_del_timer(s->timer); s->armed = 0; /* fall through */ case TIMER_CLEAR_INTERRUPT: goldfish_device_set_irq(&s->dev, 0, 0); break; default: cpu_abort (cpu_single_env, "goldfish_timer_write: Bad offset %x\n", offset); } }
static void cpu_ppc_store_tb (ppc_tb_t *tb_env, uint64_t value) { tb_env->tb_offset = muldiv64(value, ticks_per_sec, tb_env->tb_freq) - qemu_get_clock(vm_clock); #ifdef DEBUG_TB printf("%s: tb=0x%016lx offset=%08x\n", __func__, value); #endif }
static inline uint32_t calculate_ticks(struct AspeedTimer *t, uint64_t now_ns) { uint64_t delta_ns = now_ns - MIN(now_ns, t->start); uint32_t rate = calculate_rate(t); uint64_t ticks = muldiv64(delta_ns, rate, NANOSECONDS_PER_SECOND); return t->reload - MIN(t->reload, ticks); }
static int get_pmsts(PIIX4PMState *s) { int64_t d; d = muldiv64(qemu_get_clock(vm_clock), PM_FREQ, get_ticks_per_sec()); if (d >= s->tmr_overflow_time) s->pmsts |= TMROF_EN; return s->pmsts; }
static void host_alarm_handler(int host_signum) #endif { struct qemu_alarm_timer *t = alarm_timer; if (!t) return; #if 0 #define DISP_FREQ 1000 { static int64_t delta_min = INT64_MAX; static int64_t delta_max, delta_cum, last_clock, delta, ti; static int count; ti = qemu_get_clock_ns(vm_clock); if (last_clock != 0) { delta = ti - last_clock; if (delta < delta_min) delta_min = delta; if (delta > delta_max) delta_max = delta; delta_cum += delta; if (++count == DISP_FREQ) { printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n", muldiv64(delta_min, 1000000, get_ticks_per_sec()), muldiv64(delta_max, 1000000, get_ticks_per_sec()), muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()), (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ)); count = 0; delta_min = INT64_MAX; delta_max = 0; delta_cum = 0; } } last_clock = ti; } #endif if (alarm_has_dynticks(t) || qemu_next_alarm_deadline () <= 0) { t->expired = alarm_has_dynticks(t); t->pending = 1; qemu_notify_event(); } }
static int get_pmsts(PIIX4PMState *s) { int64_t d; d = muldiv64(qemu_get_clock(vm_clock), PM_TIMER_FREQUENCY, get_ticks_per_sec()); if (d >= s->tmr_overflow_time) s->pmsts |= ACPI_BITMASK_TIMER_STATUS; return s->pmsts; }
static inline uint64_t calculate_time(struct AspeedTimer *t, uint32_t ticks) { uint64_t delta_ns; uint64_t delta_ticks; delta_ticks = t->reload - MIN(t->reload, ticks); delta_ns = muldiv64(delta_ticks, NANOSECONDS_PER_SECOND, calculate_rate(t)); return t->start + delta_ns; }
static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size) { uint32_t value = 0; uint64_t systime = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); uint64_t kltime; kltime = muldiv64(systime, 4194300, get_ticks_per_sec() * 4); kltime = muldiv64(kltime, 18432000, 1048575); switch (addr) { case 0x38: value = kltime; break; case 0x3c: value = kltime >> 32; break; } return value; }
/* ACPI PM1a EVT */ uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar) { /* Compare ns-clock, not PM timer ticks, because acpi_pm_tmr_update function uses ns for setting the timer. */ int64_t d = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (d >= muldiv64(ar->tmr.overflow_time, NANOSECONDS_PER_SECOND, PM_TIMER_FREQUENCY)) { ar->pm1.evt.sts |= ACPI_BITMASK_TIMER_STATUS; } return ar->pm1.evt.sts; }
static void goldfish_timer_save(QEMUFile* f, void* opaque) { struct timer_state* s = opaque; qemu_put_be64(f, s->now); /* in case the kernel is in the middle of a timer read */ qemu_put_byte(f, s->armed); if (s->armed) { int64_t now = qemu_get_clock(vm_clock); int64_t alarm = muldiv64(s->alarm_low | (int64_t)s->alarm_high << 32, ticks_per_sec, 1000000000); qemu_put_be64(f, alarm-now); } }
static void rtc_coalesced_timer_update(RTCState *s) { if (s->irq_coalesced == 0) { qemu_del_timer(s->coalesced_timer); } else { /* divide each RTC interval to 2 - 8 smaller intervals */ int c = MIN(s->irq_coalesced, 7) + 1; int64_t next_clock = qemu_get_clock_ns(rtc_clock) + muldiv64(s->period / c, get_ticks_per_sec(), RTC_CLOCK_RATE); qemu_mod_timer(s->coalesced_timer, next_clock); } }
static void pm_ioport_writew(void *opaque, uint32_t addr, uint32_t val) { PIIX4PMState *s = opaque; addr &= 0x3f; switch(addr) { case 0x00: { int64_t d; int pmsts; pmsts = get_pmsts(s); if (pmsts & val & ACPI_BITMASK_TIMER_STATUS) { /* if TMRSTS is reset, then compute the new overflow time */ d = muldiv64(qemu_get_clock(vm_clock), PM_TIMER_FREQUENCY, get_ticks_per_sec()); s->tmr_overflow_time = (d + 0x800000LL) & ~0x7fffffLL; } s->pmsts &= ~val; pm_update_sci(s); } break; case 0x02: s->pmen = val; pm_update_sci(s); break; case 0x04: { int sus_typ; s->pmcntrl = val & ~(ACPI_BITMASK_SLEEP_ENABLE); if (val & ACPI_BITMASK_SLEEP_ENABLE) { /* change suspend type */ sus_typ = (val >> 10) & 7; switch(sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(); break; case 1: /* ACPI_BITMASK_WAKE_STATUS should be set on resume. Pretend that resume was caused by power button */ s->pmsts |= (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS); qemu_system_reset_request(); if (s->cmos_s3) { qemu_irq_raise(s->cmos_s3); } default: break; } } } break; default: break; }
static void pm_ioport_writew(void *opaque, uint32_t addr, uint32_t val) { PIIX4PMState *s = opaque; addr &= 0x3f; switch(addr) { case 0x00: { int64_t d; int pmsts; pmsts = get_pmsts(s); if (pmsts & val & TMROF_EN) { /* if TMRSTS is reset, then compute the new overflow time */ d = muldiv64(qemu_get_clock(vm_clock), PM_FREQ, get_ticks_per_sec()); s->tmr_overflow_time = (d + 0x800000LL) & ~0x7fffffLL; } s->pmsts &= ~val; pm_update_sci(s); } break; case 0x02: s->pmen = val; qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC, val & RTC_EN); qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER, val & TMROF_EN); pm_update_sci(s); break; case 0x04: { int sus_typ; s->pmcntrl = val & ~(SUS_EN); if (val & SUS_EN) { /* change suspend type */ sus_typ = (val >> 10) & 7; switch(sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(); break; case 1: qemu_system_suspend_request(); break; default: if (sus_typ == s->s4_val) { /* S4 request */ monitor_protocol_event(QEVENT_SUSPEND_DISK, NULL); qemu_system_shutdown_request(); } break; } } } break; default: break; }
static void rtc_timer_update(RTCState *s, int64_t current_time) { int period_code, period; int64_t cur_clock, next_irq_clock; int enable_pie; period_code = s->cmos_data[RTC_REG_A] & 0x0f; #if defined TARGET_I386 /* disable periodic timer if hpet is in legacy mode, since interrupts are * disabled anyway. */ enable_pie = !hpet_in_legacy_mode(); #else enable_pie = 1; #endif if (period_code != 0 && (((s->cmos_data[RTC_REG_B] & REG_B_PIE) && enable_pie) || ((s->cmos_data[RTC_REG_B] & REG_B_SQWE) && s->sqw_irq))) { if (period_code <= 2) period_code += 7; /* period in 32 Khz cycles */ period = 1 << (period_code - 1); #ifdef TARGET_I386 if(period != s->period) s->irq_coalesced = (s->irq_coalesced * s->period) / period; s->period = period; #endif /* compute 32 khz clock */ cur_clock = muldiv64(current_time, 32768, get_ticks_per_sec()); next_irq_clock = (cur_clock & ~(period - 1)) + period; s->next_periodic_time = muldiv64(next_irq_clock, get_ticks_per_sec(), 32768) + 1; qemu_mod_timer(s->periodic_timer, s->next_periodic_time); } else { #ifdef TARGET_I386 s->irq_coalesced = 0; #endif qemu_del_timer(s->periodic_timer); } }
static void pm_ioport_writew(void *opaque, uint32_t addr, uint32_t val) { PIIX4PMState *s = opaque; addr &= 0x3f; switch(addr) { case 0x00: { int64_t d; int pmsts; pmsts = get_pmsts(s); if (pmsts & val & TMROF_EN) { /* if TMRSTS is reset, then compute the new overflow time */ d = muldiv64(qemu_get_clock(vm_clock), PM_FREQ, get_ticks_per_sec()); s->tmr_overflow_time = (d + 0x800000LL) & ~0x7fffffLL; } s->pmsts &= ~val; pm_update_sci(s); } break; case 0x02: s->pmen = val; pm_update_sci(s); break; case 0x04: { int sus_typ; s->pmcntrl = val & ~(SUS_EN); if (val & SUS_EN) { /* change suspend type */ sus_typ = (val >> 10) & 7; switch(sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(); break; case 1: /* RSM_STS should be set on resume. Pretend that resume was caused by power button */ s->pmsts |= (RSM_STS | PWRBTN_STS); qemu_system_reset_request(); #if defined(TARGET_I386) cmos_set_s3_resume(); #endif default: break; } } } break; default: break; }
/* Recalculates the output frequency based on the clock's input_freq variable. */ static void clktree_recalc_output_freq(Clk clk) { int i; Clk next_clk, next_clk_input; uint32_t new_output_freq; /* Get the output frequency, or 0 if the output is disabled. */ new_output_freq = clk->enabled ? muldiv64(clk->input_freq, clk->multiplier, clk->divisor) : 0; /* if the frequency has changed. */ if(new_output_freq != clk->output_freq) { clk->output_freq = new_output_freq; #ifdef DEBUG_CLKTREE clktree_print_state(clk); #endif /* Check the new frequency against the max frequency. */ if(new_output_freq > clk->max_output_freq) { fprintf(stderr, "%s: Clock %s output frequency (%d Hz) exceeds max frequency (%d Hz).\n", __FUNCTION__, clk->name, new_output_freq, clk->max_output_freq); } /* Notify users of change. */ for(i=0; i < clk->user_count; i++) { qemu_set_irq(clk->user[i], 1); } /* Propagate the frequency change to the child clocks */ for(i=0; i < clk->output_count; i++) { next_clk = clk->output[i]; assert(next_clk != NULL); /* Only propagate the change if the child has selected the current * clock as input. */ next_clk_input = clktree_get_input_clk(next_clk); if(next_clk_input == clk) { /* Recursively propagate changes. The clock tree should not be * too deep, so we shouldn't have to recurse too many times. */ clktree_set_input_freq(next_clk, new_output_freq); } } } }
/* ACPI PM_TMR */ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable) { int64_t expire_time; /* schedule a timer interruption if needed */ if (enable) { expire_time = muldiv64(ar->tmr.overflow_time, NANOSECONDS_PER_SECOND, PM_TIMER_FREQUENCY); timer_mod(ar->tmr.timer, expire_time); } else { timer_del(ar->tmr.timer); } }
/* ACPI PM_TMR */ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable) { int64_t expire_time; /* schedule a timer interruption if needed */ if (enable) { expire_time = muldiv64(ar->tmr.overflow_time, get_ticks_per_sec(), PM_TIMER_FREQUENCY); qemu_mod_timer(ar->tmr.timer, expire_time); } else { qemu_del_timer(ar->tmr.timer); } }