void xenstore_check_new_media_present(int timeout) { if (insert_timer == NULL) insert_timer = qemu_new_timer(rt_clock, insert_media, NULL); qemu_mod_timer(insert_timer, qemu_get_clock(rt_clock) + timeout); }
static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes) { int64_t now = qemu_get_clock(vm_clock); struct dma_s *dma = (struct dma_s *) ch->dma; qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq); }
static int rtc_initfn(ISADevice *dev) { RTCState *s = DO_UPCAST(RTCState, dev, dev); int base = 0x70; int isairq = 8; isa_init_irq(dev, &s->irq, isairq); s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; s->cmos_data[RTC_REG_C] = 0x00; s->cmos_data[RTC_REG_D] = 0x80; rtc_set_date_from_host(dev); s->periodic_timer = qemu_new_timer(rtc_clock, rtc_periodic_timer, s); #ifdef TARGET_I386 if (rtc_td_hack) s->coalesced_timer = qemu_new_timer(rtc_clock, rtc_coalesced_timer, s); #endif s->second_timer = qemu_new_timer(rtc_clock, rtc_update_second, s); s->second_timer2 = qemu_new_timer(rtc_clock, rtc_update_second2, s); s->next_second_time = qemu_get_clock(rtc_clock) + (get_ticks_per_sec() * 99) / 100; qemu_mod_timer(s->second_timer2, s->next_second_time); register_ioport_write(base, 2, 1, cmos_ioport_write, s); register_ioport_read(base, 2, 1, cmos_ioport_read, s); qdev_set_legacy_instance_id(&dev->qdev, base, 2); qemu_register_reset(rtc_reset, s); return 0; }
static void goldfish_timer_write(void *opaque, target_phys_addr_t offset, uint32_t value) { struct timer_state *s = (struct timer_state *)opaque; int64_t alarm, now; switch(offset) { case TIMER_ALARM_LOW: s->alarm_low = value; alarm = muldiv64(s->alarm_low | (int64_t)s->alarm_high << 32, ticks_per_sec, 1000000000); now = qemu_get_clock(vm_clock); if (alarm <= now) { goldfish_device_set_irq(&s->dev, 0, 1); } else { qemu_mod_timer(s->timer, alarm); s->armed = 1; } break; case TIMER_ALARM_HIGH: s->alarm_high = value; //printf("alarm_high %d\n", s->alarm_high); break; case TIMER_CLEAR_ALARM: qemu_del_timer(s->timer); s->armed = 0; /* fall through */ case TIMER_CLEAR_INTERRUPT: goldfish_device_set_irq(&s->dev, 0, 0); break; default: cpu_abort (cpu_single_env, "goldfish_timer_write: Bad offset %x\n", offset); } }
static void onedram_tcp_read(void *opaque, const uint8_t *buf, int size) { S5pc1xxOneDRAMState *s = (S5pc1xxOneDRAMState *)opaque; uint32_t send_cmd; int64_t timeout; /* In booting stage, we need to set up the connection to * Vmodem thru Socket */ if (!s->vmodem_connected) { if (((uint32_t *)buf)[0] == IPC_CP_CONNECT_APP) { send_cmd = IPC_AP_CONNECT_ACK; onedram_tcp_write(s, (uint8_t *)&send_cmd, CONNECT_LENGTH); s->vmodem_connected = 1; /* put the anthority to AP, * because AP will try to load the modem image for CP */ onedram_put_authority(s); /* before here, the PSI has been loaded by CP already, * in the new onedram driver, * we have to send IPC_CP_READY_FOR_LOADING to AP * rather than waiting for to be read from AP */ timeout = get_ticks_per_sec(); qemu_mod_timer(s->bootup_timer, qemu_get_clock(vm_clock) + timeout/10); } } else { /* The connection to Vmodem has been set up, * so now we only exchange IPC */ if (onedram_writable(s)) { //onedram_prepare_write_fmt(s, buf, size); onedram_write_fmt(s, buf, size); } else { return; } } }
static pxa2xx_timer_info *pxa2xx_timer_init(target_phys_addr_t base, qemu_irq *irqs) { int i; int iomemtype; pxa2xx_timer_info *s; s = (pxa2xx_timer_info *) qemu_mallocz(sizeof(pxa2xx_timer_info)); s->base = base; s->irq_enabled = 0; s->oldclock = 0; s->clock = 0; s->lastload = qemu_get_clock(vm_clock); s->reset3 = 0; for (i = 0; i < 4; i ++) { s->timer[i].value = 0; s->timer[i].irq = irqs[i]; s->timer[i].info = s; s->timer[i].num = i; s->timer[i].level = 0; s->timer[i].qtimer = qemu_new_timer(vm_clock, pxa2xx_timer_tick, &s->timer[i]); } iomemtype = cpu_register_io_memory(0, pxa2xx_timer_readfn, pxa2xx_timer_writefn, s); cpu_register_physical_memory(base, 0x00001000, iomemtype); register_savevm("pxa2xx_timer", 0, 0, pxa2xx_timer_save, pxa2xx_timer_load, s); return s; }
static int64_t qemu_next_alarm_deadline(void) { int64_t delta; int64_t rtdelta; if (!use_icount && active_timers[QEMU_CLOCK_VIRTUAL]) { delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time - qemu_get_clock(vm_clock); } else { delta = INT32_MAX; } if (active_timers[QEMU_CLOCK_HOST]) { int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time - qemu_get_clock_ns(host_clock); if (hdelta < delta) delta = hdelta; } if (active_timers[QEMU_CLOCK_REALTIME]) { rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time * 1000000 - qemu_get_clock_ns(rt_clock)); if (rtdelta < delta) delta = rtdelta; } return delta; }
static void icount_adjust(void) { int64_t cur_time; int64_t cur_icount; int64_t delta; static int64_t last_delta; /* If the VM is not running, then do nothing. */ if (!vm_running) return; cur_time = cpu_get_clock(); cur_icount = qemu_get_clock(vm_clock); delta = cur_icount - cur_time; /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ if (delta > 0 && last_delta + ICOUNT_WOBBLE < delta * 2 && icount_time_shift > 0) { /* The guest is getting too far ahead. Slow time down. */ icount_time_shift--; } if (delta < 0 && last_delta - ICOUNT_WOBBLE > delta * 2 && icount_time_shift < MAX_ICOUNT_SHIFT) { /* The guest is getting too far behind. Speed time up. */ icount_time_shift++; } last_delta = delta; qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift); }
RTCState *rtc_mm_init(target_phys_addr_t base, int it_shift, qemu_irq irq) { RTCState *s; int io_memory; s = qemu_mallocz(sizeof(RTCState)); if (!s) return NULL; s->irq = irq; s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; s->cmos_data[RTC_REG_C] = 0x00; s->cmos_data[RTC_REG_D] = 0x80; rtc_set_date_from_host(s); s->periodic_timer = qemu_new_timer(vm_clock, rtc_periodic_timer, s); s->second_timer = qemu_new_timer(vm_clock, rtc_update_second, s); s->second_timer2 = qemu_new_timer(vm_clock, rtc_update_second2, s); s->next_second_time = qemu_get_clock(vm_clock) + (ticks_per_sec * 99) / 100; qemu_mod_timer(s->second_timer2, s->next_second_time); io_memory = cpu_register_io_memory(0, rtc_mm_read, rtc_mm_write, s); cpu_register_physical_memory(base, 2 << it_shift, io_memory); register_savevm("mc146818rtc", base, 1, rtc_save, rtc_load, s); return s; }
static void i8259_set_irq(void *opaque, int irq, int level) { PicState2 *s = opaque; #if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) if (level != irq_level[irq]) { #if defined(DEBUG_PIC) printf("i8259_set_irq: irq=%d level=%d\n", irq, level); #endif irq_level[irq] = level; #ifdef DEBUG_IRQ_COUNT if (level == 1) irq_count[irq]++; #endif } #endif #ifdef DEBUG_IRQ_LATENCY if (level) { irq_time[irq] = qemu_get_clock(vm_clock); } #endif pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); /* used for IOAPIC irqs */ if (s->alt_irq_func) s->alt_irq_func(s->alt_irq_opaque, irq, level); pic_update_irq(s); }
QEMUFile *qemu_fopen_ops_buffered(void *opaque, size_t bytes_per_sec, BufferedPutFunc *put_buffer, BufferedPutReadyFunc *put_ready, BufferedWaitForUnfreezeFunc *wait_for_unfreeze, BufferedCloseFunc *close) { QEMUFileBuffered *s; s = qemu_mallocz(sizeof(*s)); s->opaque = opaque; s->xfer_limit = bytes_per_sec / 10; s->put_buffer = put_buffer; s->put_ready = put_ready; s->wait_for_unfreeze = wait_for_unfreeze; s->close = close; s->file = qemu_fopen_ops(s, buffered_put_buffer, NULL, buffered_close, buffered_rate_limit, buffered_set_rate_limit); s->timer = qemu_new_timer(rt_clock, buffered_rate_tick, s); qemu_mod_timer(s->timer, qemu_get_clock(rt_clock) + 100); return s->file; }
RTCState *rtc_init(int base, qemu_irq irq) { RTCState *s; s = qemu_mallocz(sizeof(RTCState)); if (!s) return NULL; s->irq = irq; s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; s->cmos_data[RTC_REG_C] = 0x00; s->cmos_data[RTC_REG_D] = 0x80; rtc_set_date_from_host(s); s->periodic_timer = qemu_new_timer(vm_clock, rtc_periodic_timer, s); s->second_timer = qemu_new_timer(vm_clock, rtc_update_second, s); s->second_timer2 = qemu_new_timer(vm_clock, rtc_update_second2, s); s->next_second_time = qemu_get_clock(vm_clock) + (ticks_per_sec * 99) / 100; qemu_mod_timer(s->second_timer2, s->next_second_time); register_ioport_write(base, 2, 1, cmos_ioport_write, s); register_ioport_read(base, 2, 1, cmos_ioport_read, s); register_savevm("mc146818rtc", base, 1, rtc_save, rtc_load, s); return s; }
RTCState *rtc_init(int base, qemu_irq irq, int base_year) { RTCState *s; s = qemu_mallocz(sizeof(RTCState)); s->irq = irq; s->cmos_data[RTC_REG_A] = 0x26; s->cmos_data[RTC_REG_B] = 0x02; s->cmos_data[RTC_REG_C] = 0x00; s->cmos_data[RTC_REG_D] = 0x80; s->base_year = base_year; rtc_set_date_from_host(s); s->periodic_timer = qemu_new_timer(vm_clock, rtc_periodic_timer, s); s->second_timer = qemu_new_timer(vm_clock, rtc_update_second, s); s->second_timer2 = qemu_new_timer(vm_clock, rtc_update_second2, s); s->next_second_time = qemu_get_clock(vm_clock) + (ticks_per_sec * 99) / 100; qemu_mod_timer(s->second_timer2, s->next_second_time); register_ioport_write(base, 2, 1, cmos_ioport_write, s); register_ioport_read(base, 2, 1, cmos_ioport_read, s); register_savevm("mc146818rtc", base, 1, rtc_save, rtc_load, s); #ifdef IRQ_COALESCE_HACK if (rtc_td_hack) register_savevm("mc146818rtc-td", base, 1, rtc_save_td, rtc_load_td, s); #endif return s; }
static void gptm_reload(gptm_state *s, int n, int reset) { int64_t tick; if (reset) tick = qemu_get_clock(vm_clock); else tick = s->tick[n]; if (s->config == 0) { /* 32-bit CountDown. */ uint32_t count; count = s->load[0] | (s->load[1] << 16); tick += (int64_t)count * system_clock_scale; } else if (s->config == 1) { /* 32-bit RTC. 1Hz tick. */ tick += ticks_per_sec; } else if (s->mode[n] == 0xa) { /* PWM mode. Not implemented. */ } else { cpu_abort(cpu_single_env, "TODO: 16-bit timer mode 0x%x\n", s->mode[n]); } s->tick[n] = tick; qemu_mod_timer(s->timer[n], tick); }
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) { VirtIONet *n = to_virtio_net(vdev); virtio_net_vhost_status(n, status); if (!n->tx_waiting) { return; } if (virtio_net_started(n, status) && !n->vhost_started) { if (n->tx_timer) { qemu_mod_timer(n->tx_timer, qemu_get_clock(vm_clock) + n->tx_timeout); } else { qemu_bh_schedule(n->tx_bh); } } else { if (n->tx_timer) { qemu_del_timer(n->tx_timer); } else { qemu_bh_cancel(n->tx_bh); } } }
static void qemu_announce_self_once(void *opaque) { int i, len; VLANState *vlan; VLANClientState *vc; uint8_t buf[256]; static int count = SELF_ANNOUNCE_ROUNDS; QEMUTimer *timer = *(QEMUTimer **)opaque; for (i = 0; i < MAX_NICS; i++) { if (!nd_table[i].used) continue; len = announce_self_create(buf, nd_table[i].macaddr); vlan = nd_table[i].vlan; for(vc = vlan->first_client; vc != NULL; vc = vc->next) { vc->receive(vc, buf, len); } } if (count--) { qemu_mod_timer(timer, qemu_get_clock(rt_clock) + 100); } else { qemu_del_timer(timer); qemu_free_timer(timer); } }
static void goldfish_timer_write(void *opaque, target_phys_addr_t offset, uint32_t value_ns) { struct timer_state *s = (struct timer_state *)opaque; int64_t alarm_tks, now_tks; switch(offset) { case TIMER_ALARM_LOW: s->alarm_low_ns = value_ns; alarm_tks = ns2tks(s->alarm_low_ns | (int64_t)s->alarm_high_ns << 32); now_tks = qemu_get_clock(vm_clock); if (alarm_tks <= now_tks) { goldfish_device_set_irq(&s->dev, 0, 1); } else { qemu_mod_timer(s->timer, alarm_tks); s->armed = 1; } break; case TIMER_ALARM_HIGH: s->alarm_high_ns = value_ns; break; case TIMER_CLEAR_ALARM: qemu_del_timer(s->timer); s->armed = 0; /* fall through */ case TIMER_CLEAR_INTERRUPT: goldfish_device_set_irq(&s->dev, 0, 0); break; default: cpu_abort (cpu_single_env, "goldfish_timer_write: Bad offset %x\n", offset); } }
static void cpu_ppc_store_tb (ppc_tb_t *tb_env, uint64_t value) { tb_env->tb_offset = muldiv64(value, ticks_per_sec, tb_env->tb_freq) - qemu_get_clock(vm_clock); #ifdef DEBUG_TB printf("%s: tb=0x%016lx offset=%08x\n", __func__, value); #endif }
/* Note: we do not have a reliable method to detect if the floppy is present. The current method is to try to open the floppy at every I/O and to keep it opened during a few hundreds of ms. */ static int fd_open(BlockDriverState *bs) { BDRVRawState *s = bs->opaque; int last_media_present; if (s->type != FTYPE_FD) return 0; last_media_present = (s->fd >= 0); if (s->fd >= 0 && (qemu_get_clock(rt_clock) - s->fd_open_time) >= FD_OPEN_TIMEOUT) { close(s->fd); s->fd = -1; #ifdef DEBUG_FLOPPY printf("Floppy closed\n"); #endif } if (s->fd < 0) { if (s->fd_got_error && (qemu_get_clock(rt_clock) - s->fd_error_time) < FD_OPEN_TIMEOUT) { #ifdef DEBUG_FLOPPY printf("No floppy (open delayed)\n"); #endif return -EIO; } s->fd = open(bs->filename, s->fd_open_flags); if (s->fd < 0) { s->fd_error_time = qemu_get_clock(rt_clock); s->fd_got_error = 1; if (last_media_present) s->fd_media_changed = 1; #ifdef DEBUG_FLOPPY printf("No floppy\n"); #endif return -EIO; } #ifdef DEBUG_FLOPPY printf("Floppy opened\n"); #endif } if (!last_media_present) s->fd_media_changed = 1; s->fd_open_time = qemu_get_clock(rt_clock); s->fd_got_error = 0; return 0; }
static int get_pmsts(PIIX4PMState *s) { int64_t d; d = muldiv64(qemu_get_clock(vm_clock), PM_FREQ, get_ticks_per_sec()); if (d >= s->tmr_overflow_time) s->pmsts |= TMROF_EN; return s->pmsts; }
/* try to see if we have semaphore for sending, if not, request it from AP */ static int onedram_fmt_try_send_cmd(S5pc1xxOneDRAMState *s) { if(onedram_read_sem(s)) { fprintf(stderr, "onedram_fmt_try_send_cmd - can't get anthority\n"); qemu_mod_timer(s->sem_timer, qemu_get_clock(vm_clock) + TICK_COUNTDOWN); } else { onedram_fmt_send_cmd(s); } return 1; }
void kvmppc_init(void) { /* XXX The only reason KVM yields control back to qemu is device IO. Since * an idle guest does no IO, qemu's device model will never get a chance to * run. So, until Qemu gains IO threads, we create this timer to ensure * that the device model gets a chance to run. */ kvmppc_timer_rate = ticks_per_sec / 10; kvmppc_timer = qemu_new_timer(vm_clock, &kvmppc_timer_hack, NULL); qemu_mod_timer(kvmppc_timer, qemu_get_clock(vm_clock) + kvmppc_timer_rate); }
static int get_pmsts(PIIX4PMState *s) { int64_t d; d = muldiv64(qemu_get_clock(vm_clock), PM_TIMER_FREQUENCY, get_ticks_per_sec()); if (d >= s->tmr_overflow_time) s->pmsts |= ACPI_BITMASK_TIMER_STATUS; return s->pmsts; }
static void pxa2xx_timer_tick4(void *opaque) { struct pxa2xx_timer4_s *t = (struct pxa2xx_timer4_s *) opaque; pxa2xx_timer_info *i = (pxa2xx_timer_info *) t->tm.info; pxa2xx_timer_tick(&t->tm); if (t->control & (1 << 3)) t->clock = 0; if (t->control & (1 << 6)) pxa2xx_timer_update4(i, qemu_get_clock(vm_clock), t->tm.num - 4); }
static void kbd_recv(KeyBoardState *s, uint8_t value) { if (s->recv_count < SIO_BUFFER_SIZE) { s->recv_buf[(s->recv_write++) & (SIO_BUFFER_SIZE - 1)] = value; s->recv_count++; if (s->recv_count == 1) { qemu_mod_timer(s->sio_timer, qemu_get_clock(rt_clock) + SIO_RECV_DELAY); } } }
static void pm_ioport_writew(void *opaque, uint32_t addr, uint32_t val) { PIIX4PMState *s = opaque; addr &= 0x3f; switch(addr) { case 0x00: { int64_t d; int pmsts; pmsts = get_pmsts(s); if (pmsts & val & ACPI_BITMASK_TIMER_STATUS) { /* if TMRSTS is reset, then compute the new overflow time */ d = muldiv64(qemu_get_clock(vm_clock), PM_TIMER_FREQUENCY, get_ticks_per_sec()); s->tmr_overflow_time = (d + 0x800000LL) & ~0x7fffffLL; } s->pmsts &= ~val; pm_update_sci(s); } break; case 0x02: s->pmen = val; pm_update_sci(s); break; case 0x04: { int sus_typ; s->pmcntrl = val & ~(ACPI_BITMASK_SLEEP_ENABLE); if (val & ACPI_BITMASK_SLEEP_ENABLE) { /* change suspend type */ sus_typ = (val >> 10) & 7; switch(sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(); break; case 1: /* ACPI_BITMASK_WAKE_STATUS should be set on resume. Pretend that resume was caused by power button */ s->pmsts |= (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS); qemu_system_reset_request(); if (s->cmos_s3) { qemu_irq_raise(s->cmos_s3); } default: break; } } } break; default: break; }
static void rtc_coalesced_timer_update(RTCState *s) { if (s->irq_coalesced == 0) { qemu_del_timer(s->coalesced_timer); } else { /* divide each RTC interval to 2 - 8 smaller intervals */ int c = MIN(s->irq_coalesced, 7) + 1; int64_t next_clock = qemu_get_clock(rtc_clock) + muldiv64(s->period / c, get_ticks_per_sec(), 32768); qemu_mod_timer(s->coalesced_timer, next_clock); } }
int64_t qemu_next_deadline(void) { /* To avoid problems with overflow limit this to 2^32. */ int64_t delta = INT32_MAX; if (active_timers[QEMU_CLOCK_VIRTUAL]) { delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time - qemu_get_clock(vm_clock); } if (active_timers[QEMU_CLOCK_HOST]) { int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time - qemu_get_clock(host_clock); if (hdelta < delta) delta = hdelta; } if (delta < 0) delta = 0; return delta; }
static void* winaudio_init(void) { WinAudioState* s = &g_winaudio; #if DEBUG start_time = qemu_get_clock(vm_clock); last_time = 0; #endif return s; }
static void goldfish_timer_save(QEMUFile* f, void* opaque) { struct timer_state* s = opaque; qemu_put_be64(f, s->now_ns); /* in case the kernel is in the middle of a timer read */ qemu_put_byte(f, s->armed); if (s->armed) { int64_t now_tks = qemu_get_clock(vm_clock); int64_t alarm_tks = ns2tks(s->alarm_low_ns | (int64_t)s->alarm_high_ns << 32); qemu_put_be64(f, alarm_tks - now_tks); } }