static void rtc_set_time(RTCState *s) { struct tm tm; rtc_get_time(s, &tm); s->base_rtc = mktimegm(&tm); s->last_update = qemu_get_clock_ns(rtc_clock); rtc_change_mon_event(&tm); }
static void uart_tx_redo(UartState *s) { uint64_t new_tx_time = qemu_get_clock_ns(vm_clock); qemu_mod_timer(s->tx_time_handle, new_tx_time + s->char_tx_time); s->r[R_SR] |= UART_SR_INTR_TEMPTY; uart_update_status(s); }
static void qed_start_need_check_timer(BDRVQEDState *s) { trace_qed_start_need_check_timer(s); /* Use vm_clock so we don't alter the image file while suspended for * migration. */ qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); }
void qemu_clock_warp(QEMUClock *clock) { int64_t deadline; /* * There are too many global variables to make the "warp" behavior * applicable to other clocks. But a clock argument removes the * need for if statements all over the place. */ if (clock != vm_clock || !use_icount) { return; } /* * If the CPUs have been sleeping, advance the vm_clock timer now. This * ensures that the deadline for the timer is computed correctly below. * This also makes sure that the insn counter is synchronized before the * CPU starts running, in case the CPU is woken by an event other than * the earliest vm_clock timer. */ icount_warp_rt(NULL); if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) { qemu_del_timer(icount_warp_timer); return; } if (qtest_enabled()) { /* When testing, qtest commands advance icount. */ return; } vm_clock_warp_start = qemu_get_clock_ns(rt_clock); deadline = qemu_clock_deadline(vm_clock); if (deadline > 0) { /* * Ensure the vm_clock proceeds even when the virtual CPU goes to * sleep. Otherwise, the CPU might be waiting for a future timer * interrupt to wake it up, but the interrupt never comes because * the vCPU isn't running any insns and thus doesn't advance the * vm_clock. * * An extreme solution for this problem would be to never let VCPUs * sleep in icount mode if there is a pending vm_clock timer; rather * time could just advance to the next vm_clock event. Instead, we * do stop VCPUs and only advance vm_clock after some "real" time, * (related to the time left until the next event) has passed. This * rt_clock timer will do this. This avoids that the warps are too * visible externally---for example, you will not be sending network * packets continuously instead of every 100ms. */ qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline); } else { qemu_notify_event(); } }
static void timerblock_reload(timerblock *tb, int restart) { if (tb->count == 0) { return; } if (restart) { tb->tick = qemu_get_clock_ns(vm_clock); } tb->tick += (int64_t)tb->count * timerblock_scale(tb); qemu_mod_timer(tb->timer, tb->tick); }
void stop_sleep_cycle_count(void) { if(started_sleep_count) { // printf("Stop Sleep Time passed %lld \n", (qemu_get_clock_ns(vm_clock) - sleep_start_time)); if(curr_power_model != NULL) curr_power_model->sleep_stop_notify(qemu_get_clock_ns(vm_clock) - sleep_start_time); started_sleep_count = false; sleep_start_time = 0; } }
int64_t qemu_next_deadline(void) { /* To avoid problems with overflow limit this to 2^32. */ int64_t delta = INT32_MAX; if (active_timers[QEMU_CLOCK_VIRTUAL]) { delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time - qemu_get_clock_ns(vm_clock); } if (active_timers[QEMU_CLOCK_HOST]) { int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time - qemu_get_clock_ns(host_clock); if (hdelta < delta) delta = hdelta; } if (delta < 0) delta = 0; return delta; }
static void rtc_coalesced_timer_update(RTCState *s) { if (s->irq_coalesced == 0) { qemu_del_timer(s->coalesced_timer); } else { /* divide each RTC interval to 2 - 8 smaller intervals */ int c = MIN(s->irq_coalesced, 7) + 1; int64_t next_clock = qemu_get_clock_ns(rtc_clock) + muldiv64(s->period / c, get_ticks_per_sec(), RTC_CLOCK_RATE); qemu_mod_timer(s->coalesced_timer, next_clock); } }
static void pxa2xx_timer_tick4(void *opaque) { PXA2xxTimer4 *t = (PXA2xxTimer4 *) opaque; PXA2xxTimerInfo *i = (PXA2xxTimerInfo *) t->tm.info; pxa2xx_timer_tick(&t->tm); if (t->control & (1 << 3)) t->clock = 0; if (t->control & (1 << 6)) pxa2xx_timer_update4(i, qemu_get_clock_ns(vm_clock), t->tm.num - 4); if (i->events & 0xff0) qemu_irq_raise(i->irq4); }
int64_t qemu_clock_deadline(QEMUClock *clock) { /* To avoid problems with overflow limit this to 2^32. */ int64_t delta = INT32_MAX; if (clock->active_timers) { delta = clock->active_timers->expire_time - qemu_get_clock_ns(clock); } if (delta < 0) { delta = 0; } return delta; }
/* Set CPU Timer */ void HELPER(spt)(CPUS390XState *env, uint64_t a1) { uint64_t time = cpu_ldq_data(env, a1); if (time == -1ULL) { return; } /* nanoseconds */ time = (time * 125) >> 9; qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time); }
static void tusb6010_power(TUSBState *s, int on) { if (!on) { s->power = 0; } else if (!s->power && on) { s->power = 1; /* Pull the interrupt down after TUSB6010 comes up. */ s->intr_ok = 0; tusb_intr_update(s); qemu_mod_timer(s->pwr_timer, qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 2); } }
/* A write to this register enables the timer. */ static void ib700_write_enable_reg(void *vp, uint32_t addr, uint32_t data) { IB700State *s = vp; static int time_map[] = { 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 }; int64_t timeout; ib700_debug("addr = %x, data = %x\n", addr, data); timeout = (int64_t) time_map[data & 0xF] * get_ticks_per_sec(); qemu_mod_timer(s->timer, qemu_get_clock_ns (vm_clock) + timeout); }
static void xtensa_ccompare_cb(void *opaque) { XtensaCPU *cpu = opaque; CPUXtensaState *env = &cpu->env; if (env->halted) { env->halt_clock = qemu_get_clock_ns(vm_clock); xtensa_advance_ccount(env, env->wake_ccount - env->sregs[CCOUNT]); if (!cpu_has_work(CPU(cpu))) { env->sregs[CCOUNT] = env->wake_ccount + 1; xtensa_rearm_ccompare_timer(env); } } }
void enqueue_async_event(NVMEState *n, uint8_t event_type, uint8_t event_info, uint8_t log_page) { AsyncEvent *event = (AsyncEvent *)qemu_malloc(sizeof(AsyncEvent)); event->result.event_type = event_type; event->result.event_info = event_info; event->result.log_page = log_page; QSIMPLEQ_INSERT_TAIL(&(n->async_queue), event, entry); qemu_mod_timer(n->async_event_timer, qemu_get_clock_ns(vm_clock) + 20000); }
static void rtc_set_date_from_host(ISADevice *dev) { RTCState *s = MC146818_RTC(dev); struct tm tm; qemu_get_timedate(&tm, 0); s->base_rtc = mktimegm(&tm); s->last_update = qemu_get_clock_ns(rtc_clock); s->offset = 0; /* set the CMOS date */ rtc_set_cmos(s, &tm); }
static int rtc_initfn(ISADevice *dev) { RTCState *s = DO_UPCAST(RTCState, dev, dev); int base = 0x70; s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; s->cmos_data[RTC_REG_C] = 0x00; s->cmos_data[RTC_REG_D] = 0x80; rtc_set_date_from_host(dev); #ifdef TARGET_I386 switch (s->lost_tick_policy) { case LOST_TICK_SLEW: s->coalesced_timer = qemu_new_timer_ns(rtc_clock, rtc_coalesced_timer, s); break; case LOST_TICK_DISCARD: break; default: return -EINVAL; } #endif s->periodic_timer = qemu_new_timer_ns(rtc_clock, rtc_periodic_timer, s); s->second_timer = qemu_new_timer_ns(rtc_clock, rtc_update_second, s); s->second_timer2 = qemu_new_timer_ns(rtc_clock, rtc_update_second2, s); s->clock_reset_notifier.notify = rtc_notify_clock_reset; qemu_register_clock_reset_notifier(rtc_clock, &s->clock_reset_notifier); s->suspend_notifier.notify = rtc_notify_suspend; qemu_register_suspend_notifier(&s->suspend_notifier); s->next_second_time = qemu_get_clock_ns(rtc_clock) + (get_ticks_per_sec() * 99) / 100; qemu_mod_timer(s->second_timer2, s->next_second_time); memory_region_init_io(&s->io, &cmos_ops, s, "rtc", 2); isa_register_ioport(dev, &s->io, base); qdev_set_legacy_instance_id(&dev->qdev, base, 2); qemu_register_reset(rtc_reset, s); object_property_add(OBJECT(s), "date", "struct tm", rtc_get_date, NULL, NULL, s, NULL); return 0; }
int pic_read_irq(PicState2 *s) { int irq, irq2, intno; irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); #ifdef TARGET_I386 if (time_drift_fix && irq == 0) { timer_acks++; if (timer_ints_to_push > 0) { timer_ints_to_push--; /* simulate an edge irq0, like the one generated by i8254 */ pic_set_irq1(&s->pics[0], 0, 0); pic_set_irq1(&s->pics[0], 0, 1); } } #endif if (irq == 2) { irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) { pic_intack(&s->pics[1], irq2); } else { /* spurious IRQ on slave controller */ irq2 = 7; } intno = s->pics[1].irq_base + irq2; #if defined(DEBUG_PIC) || defined(DEBUG_IRQ_LATENCY) irq = irq2 + 8; #endif } else { intno = s->pics[0].irq_base + irq; } } else { /* spurious IRQ on host controller */ irq = 7; intno = s->pics[0].irq_base + irq; } pic_update_irq(s); #ifdef DEBUG_IRQ_LATENCY printf("IRQ%d latency=%0.3fus\n", irq, (double)(qemu_get_clock_ns(vm_clock) - irq_time[irq]) * 1000000.0 / get_ticks_per_sec()); #endif DPRINTF("pic_interrupt: irq=%d\n", irq); return intno; }
static int pl031_init(SysBusDevice *dev) { pl031_state *s = FROM_SYSBUS(pl031_state, dev); struct tm tm; memory_region_init_io(&s->iomem, &pl031_ops, s, "pl031", 0x1000); sysbus_init_mmio(dev, &s->iomem); sysbus_init_irq(dev, &s->irq); qemu_get_timedate(&tm, 0); s->tick_offset = mktimegm(&tm) - qemu_get_clock_ns(rtc_clock) / get_ticks_per_sec(); s->timer = qemu_new_timer_ns(rtc_clock, pl031_interrupt, s); return 0; }
static int pxa25x_timer_post_load(void *opaque, int version_id) { PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; int64_t now; int i; now = qemu_get_clock_ns(vm_clock); pxa2xx_timer_update(s, now); if (pxa2xx_timer_has_tm4(s)) for (i = 0; i < 8; i ++) pxa2xx_timer_update4(s, now, i); return 0; }
uint64_t cpu_tick_get_count(CPUTimer *timer) { uint64_t real_count = timer_to_cpu_ticks( qemu_get_clock_ns(vm_clock) - timer->clock_offset, timer->frequency); TIMER_DPRINTF("%s get_count count=0x%016lx (%s) p=%p\n", timer->name, real_count, timer->disabled?"disabled":"enabled", timer); if (timer->disabled) real_count |= timer->disabled_mask; return real_count; }
void cpu_tick_set_count(CPUTimer *timer, uint64_t count) { uint64_t real_count = count & ~timer->disabled_mask; uint64_t disabled_bit = count & timer->disabled_mask; int64_t vm_clock_offset = qemu_get_clock_ns(vm_clock) - cpu_to_timer_ticks(real_count, timer->frequency); TIMER_DPRINTF("%s set_count count=0x%016lx (%s) p=%p\n", timer->name, real_count, timer->disabled?"disabled":"enabled", timer); timer->disabled = disabled_bit ? 1 : 0; timer->clock_offset = vm_clock_offset; }
static void set_next_tick(dp8393xState *s) { uint32_t ticks; int64_t delay; if (s->regs[SONIC_CR] & SONIC_CR_STP) { qemu_del_timer(s->watchdog); return; } ticks = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0]; s->wt_last_update = qemu_get_clock_ns(vm_clock); delay = get_ticks_per_sec() * ticks / 5000000; qemu_mod_timer(s->watchdog, s->wt_last_update + delay); }
/* Set Clock Comparator */ void HELPER(sckc)(CPUS390XState *env, uint64_t a1) { uint64_t time = cpu_ldq_data(env, a1); if (time == -1ULL) { return; } /* difference between now and then */ time -= clock_value(env); /* nanoseconds */ time = (time * 125) >> 9; qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time); }
/* set irq level. If an edge is detected, then the IRR is set to 1 */ static void pic_set_irq(void *opaque, int irq, int level) { PICCommonState *s = opaque; int mask = 1 << irq; #if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) || \ defined(DEBUG_IRQ_LATENCY) int irq_index = s->master ? irq : irq + 8; #endif #if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) if (level != irq_level[irq_index]) { DPRINTF("pic_set_irq: irq=%d level=%d\n", irq_index, level); irq_level[irq_index] = level; #ifdef DEBUG_IRQ_COUNT if (level == 1) { irq_count[irq_index]++; } #endif } #endif #ifdef DEBUG_IRQ_LATENCY if (level) { irq_time[irq_index] = qemu_get_clock_ns(vm_clock); } #endif if (s->elcr & mask) { /* level triggered */ if (level) { s->irr |= mask; s->last_irr |= mask; } else { s->irr &= ~mask; s->last_irr &= ~mask; } } else { /* edge triggered */ if (level) { if ((s->last_irr & mask) == 0) { s->irr |= mask; } s->last_irr |= mask; } else { s->last_irr &= ~mask; } } pic_update_irq(s); }
static void pl031_set_alarm(pl031_state *s) { uint32_t ticks; /* The timer wraps around. This subtraction also wraps in the same way, and gives correct results when alarm < now_ticks. */ ticks = s->mr - pl031_get_count(s); DPRINTF("Alarm set in %ud ticks\n", ticks); if (ticks == 0) { qemu_del_timer(s->timer); pl031_interrupt(s); } else { int64_t now = qemu_get_clock_ns(rtc_clock); qemu_mod_timer(s->timer, now + (int64_t)ticks * get_ticks_per_sec()); } }
static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu, QEMUBHFunc *cb, uint32_t frequency, uint64_t disabled_mask) { CPUTimer *timer = g_malloc0(sizeof (CPUTimer)); timer->name = name; timer->frequency = frequency; timer->disabled_mask = disabled_mask; timer->disabled = 1; timer->clock_offset = qemu_get_clock_ns(vm_clock); timer->qtimer = qemu_new_timer_ns(vm_clock, cb, cpu); return timer; }
/* handle update-ended timer */ static void check_update_timer(RTCState *s) { uint64_t next_update_time; uint64_t guest_nsec; int next_alarm_sec; /* From the data sheet: "Holding the dividers in reset prevents * interrupts from operating, while setting the SET bit allows" * them to occur. However, it will prevent an alarm interrupt * from occurring, because the time of day is not updated. */ if ((s->cmos_data[RTC_REG_A] & 0x60) == 0x60) { qemu_del_timer(s->update_timer); return; } if ((s->cmos_data[RTC_REG_C] & REG_C_UF) && (s->cmos_data[RTC_REG_B] & REG_B_SET)) { qemu_del_timer(s->update_timer); return; } if ((s->cmos_data[RTC_REG_C] & REG_C_UF) && (s->cmos_data[RTC_REG_C] & REG_C_AF)) { qemu_del_timer(s->update_timer); return; } guest_nsec = get_guest_rtc_ns(s) % NSEC_PER_SEC; /* if UF is clear, reprogram to next second */ next_update_time = qemu_get_clock_ns(rtc_clock) + NSEC_PER_SEC - guest_nsec; /* Compute time of next alarm. One second is already accounted * for in next_update_time. */ next_alarm_sec = get_next_alarm(s); s->next_alarm_time = next_update_time + (next_alarm_sec - 1) * NSEC_PER_SEC; if (s->cmos_data[RTC_REG_C] & REG_C_UF) { /* UF is set, but AF is clear. Program the timer to target * the alarm time. */ next_update_time = s->next_alarm_time; } if (next_update_time != qemu_timer_expire_time_ns(s->update_timer)) { qemu_mod_timer(s->update_timer, next_update_time); } }
static int throttlePipe_recvBuffers( void* opaque, GoldfishPipeBuffer* buffers, int numBuffers ) { ThrottlePipe* pipe = opaque; int ret; if (pipe->recvExpiration > 0) { return PIPE_ERROR_AGAIN; } ret = pingPongPipe_recvBuffers(&pipe->pingpong, buffers, numBuffers); if (ret > 0) { pipe->recvExpiration = qemu_get_clock_ns(vm_clock) + ret*pipe->recvRate; throttlePipe_rearm(pipe); } return ret; }
static void update_wt_regs(dp8393xState *s) { int64_t elapsed; uint32_t val; if (s->regs[SONIC_CR] & SONIC_CR_STP) { qemu_del_timer(s->watchdog); return; } elapsed = s->wt_last_update - qemu_get_clock_ns(vm_clock); val = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0]; val -= elapsed / 5000000; s->regs[SONIC_WT1] = (val >> 16) & 0xffff; s->regs[SONIC_WT0] = (val >> 0) & 0xffff; set_next_tick(s); }