void qemu_get_timer(QEMUFile *f, QEMUTimer *ts) { uint64_t expire_time; expire_time = qemu_get_be64(f); if (expire_time != -1) { qemu_mod_timer(ts, expire_time); } else { qemu_del_timer(ts); } }
static void sys_timer_free( SysTimer timer ) { if (timer->timer) { qemu_del_timer( timer->timer ); qemu_free_timer( timer->timer ); timer->timer = NULL; } timer->next = _s_free_timers; _s_free_timers = timer; }
static int pci_pcnet_uninit(PCIDevice *dev) { PCIPCNetState *d = DO_UPCAST(PCIPCNetState, pci_dev, dev); memory_region_destroy(&d->state.mmio); memory_region_destroy(&d->io_bar); qemu_del_timer(d->state.poll_timer); qemu_free_timer(d->state.poll_timer); qemu_del_vlan_client(&d->state.nic->nc); return 0; }
static void nic_cleanup(VLANClientState *nc) { dp8393xState *s = DO_UPCAST(NICState, nc, nc)->opaque; cpu_unregister_io_memory(s->mmio_index); qemu_del_timer(s->watchdog); qemu_free_timer(s->watchdog); g_free(s); }
static void rtc_coalesced_timer_update(RTCState *s) { if (s->irq_coalesced == 0) { qemu_del_timer(s->coalesced_timer); } else { /* divide each RTC interval to 2 - 8 smaller intervals */ int c = MIN(s->irq_coalesced, 7) + 1; int64_t next_clock = qemu_get_clock_ns(rtc_clock) + muldiv64(s->period / c, get_ticks_per_sec(), RTC_CLOCK_RATE); qemu_mod_timer(s->coalesced_timer, next_clock); } }
static void nic_cleanup(VLANClientState *nc) { dp8393xState *s = DO_UPCAST(NICState, nc, nc)->opaque; memory_region_del_subregion(s->address_space, &s->mmio); memory_region_destroy(&s->mmio); qemu_del_timer(s->watchdog); qemu_free_timer(s->watchdog); g_free(s); }
void qemu_clock_warp(QEMUClock *clock) { int64_t deadline; if (!clock->warp_timer) { return; } /* * There are too many global variables to make the "warp" behavior * applicable to other clocks. But a clock argument removes the * need for if statements all over the place. */ assert(clock == vm_clock); /* * If the CPUs have been sleeping, advance the vm_clock timer now. This * ensures that the deadline for the timer is computed correctly below. * This also makes sure that the insn counter is synchronized before the * CPU starts running, in case the CPU is woken by an event other than * the earliest vm_clock timer. */ icount_warp_rt(NULL); if (!all_cpu_threads_idle() || !active_timers[clock->type]) { qemu_del_timer(clock->warp_timer); return; } vm_clock_warp_start = qemu_get_clock_ns(rt_clock); deadline = qemu_next_icount_deadline(); if (deadline > 0) { /* * Ensure the vm_clock proceeds even when the virtual CPU goes to * sleep. Otherwise, the CPU might be waiting for a future timer * interrupt to wake it up, but the interrupt never comes because * the vCPU isn't running any insns and thus doesn't advance the * vm_clock. * * An extreme solution for this problem would be to never let VCPUs * sleep in icount mode if there is a pending vm_clock timer; rather * time could just advance to the next vm_clock event. Instead, we * do stop VCPUs and only advance vm_clock after some "real" time, * (related to the time left until the next event) has passed. This * rt_clock timer will do this. This avoids that the warps are too * visible externally---for example, you will not be sending network * packets continously instead of every 100ms. */ qemu_mod_timer(clock->warp_timer, vm_clock_warp_start + deadline); } else { qemu_notify_event(); } }
/* ACPI PM_TMR */ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable) { int64_t expire_time; /* schedule a timer interruption if needed */ if (enable) { expire_time = muldiv64(ar->tmr.overflow_time, get_ticks_per_sec(), PM_TIMER_FREQUENCY); qemu_mod_timer(ar->tmr.timer, expire_time); } else { qemu_del_timer(ar->tmr.timer); } }
static void iscsi_close(BlockDriverState *bs) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; if (iscsilun->nop_timer) { qemu_del_timer(iscsilun->nop_timer); qemu_free_timer(iscsilun->nop_timer); } qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL); iscsi_destroy_context(iscsi); memset(iscsilun, 0, sizeof(IscsiLun)); }
static inline void strongarm_rtc_timer_update(StrongARMRTCState *s) { if ((s->rtsr & RTSR_HZE) && !(s->rtsr & RTSR_HZ)) { qemu_mod_timer(s->rtc_hz, s->last_hz + 1000); } else { qemu_del_timer(s->rtc_hz); } if ((s->rtsr & RTSR_ALE) && !(s->rtsr & RTSR_AL)) { qemu_mod_timer(s->rtc_alarm, s->last_hz + (((s->rtar - s->last_rcnr) * 1000 * ((s->rttr & 0xffff) + 1)) >> 15)); } else {
static int iscsi_create(const char *filename, QEMUOptionParameter *options) { int ret = 0; int64_t total_size = 0; BlockDriverState bs; IscsiLun *iscsilun = NULL; QDict *bs_options; memset(&bs, 0, sizeof(BlockDriverState)); /* Read out options */ while (options && options->name) { if (!strcmp(options->name, "size")) { total_size = options->value.n / BDRV_SECTOR_SIZE; } options++; } bs.opaque = g_malloc0(sizeof(struct IscsiLun)); iscsilun = bs.opaque; bs_options = qdict_new(); qdict_put(bs_options, "filename", qstring_from_str(filename)); ret = iscsi_open(&bs, bs_options, 0); QDECREF(bs_options); if (ret != 0) { goto out; } if (iscsilun->nop_timer) { qemu_del_timer(iscsilun->nop_timer); qemu_free_timer(iscsilun->nop_timer); } if (iscsilun->type != TYPE_DISK) { ret = -ENODEV; goto out; } if (bs.total_sectors < total_size) { ret = -ENOSPC; goto out; } ret = 0; out: if (iscsilun->iscsi != NULL) { iscsi_destroy_context(iscsilun->iscsi); } g_free(bs.opaque); return ret; }
static void rtc_timer_update(RTCState *s, int64_t current_time) { int period_code, period; int64_t cur_clock, next_irq_clock; period_code = s->cmos_data[RTC_REG_A] & 0x0f; #if defined TARGET_I386 || defined TARGET_X86_64 /* disable periodic timer if hpet is in legacy mode, since interrupts are * disabled anyway. */ if (period_code != 0 && (s->cmos_data[RTC_REG_B] & REG_B_PIE) && !hpet_in_legacy_mode()) { #else if (period_code != 0 && (s->cmos_data[RTC_REG_B] & REG_B_PIE)) { #endif if (period_code <= 2) period_code += 7; /* period in 32 Khz cycles */ period = 1 << (period_code - 1); #ifdef TARGET_I386 if(period != s->period) s->irq_coalesced = (s->irq_coalesced * s->period) / period; s->period = period; #endif /* compute 32 khz clock */ cur_clock = muldiv64(current_time, 32768, ticks_per_sec); next_irq_clock = (cur_clock & ~(period - 1)) + period; s->next_periodic_time = muldiv64(next_irq_clock, ticks_per_sec, 32768) + 1; qemu_mod_timer(s->periodic_timer, s->next_periodic_time); } else { #ifdef TARGET_I386 s->irq_coalesced = 0; #endif qemu_del_timer(s->periodic_timer); } } static void rtc_periodic_timer(void *opaque) { RTCState *s = opaque; rtc_timer_update(s, s->next_periodic_time); #ifdef TARGET_I386 if ((s->cmos_data[RTC_REG_C] & 0xc0) && rtc_td_hack) { s->irq_coalesced++; return; } #endif s->cmos_data[RTC_REG_C] |= 0xc0; rtc_irq_raise(s->irq); }
static void set_next_tick(dp8393xState *s) { uint32_t ticks; int64_t delay; if (s->regs[SONIC_CR] & SONIC_CR_STP) { qemu_del_timer(s->watchdog); return; } ticks = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0]; s->wt_last_update = qemu_get_clock(vm_clock); delay = get_ticks_per_sec() * ticks / 5000000; qemu_mod_timer(s->watchdog, s->wt_last_update + delay); }
static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = to_virtio_net(vdev); if (n->tx_timer_active) { virtio_queue_set_notification(vq, 1); qemu_del_timer(n->tx_timer); n->tx_timer_active = 0; virtio_net_flush_tx(n, vq); } else { qemu_mod_timer(n->tx_timer, qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL); n->tx_timer_active = 1; virtio_queue_set_notification(vq, 0); } }
static void pl031_set_alarm(pl031_state *s) { uint32_t ticks; /* The timer wraps around. This subtraction also wraps in the same way, and gives correct results when alarm < now_ticks. */ ticks = s->mr - pl031_get_count(s); DPRINTF("Alarm set in %ud ticks\n", ticks); if (ticks == 0) { qemu_del_timer(s->timer); pl031_interrupt(s); } else { int64_t now = qemu_get_clock_ns(rtc_clock); qemu_mod_timer(s->timer, now + (int64_t)ticks * get_ticks_per_sec()); } }
static void qemu_announce_self_once(void *opaque) { static int count = SELF_ANNOUNCE_ROUNDS; QEMUTimer *timer = *(QEMUTimer **)opaque; qemu_foreach_nic(qemu_announce_self_iter, NULL); if (--count) { /* delay 50ms, 150ms, 250ms, ... */ qemu_mod_timer(timer, qemu_get_clock(rt_clock) + 50 + (SELF_ANNOUNCE_ROUNDS - count - 1) * 100); } else { qemu_del_timer(timer); qemu_free_timer(timer); } }
static void pm_update_sci(PIIX4PMState *s) { int sci_level, pmsts; int64_t expire_time; pmsts = get_pmsts(s); sci_level = (((pmsts & s->pmen) & (RTC_EN | PWRBTN_EN | GBL_EN | TMROF_EN)) != 0); qemu_set_irq(s->irq, sci_level); /* schedule a timer interruption if needed */ if ((s->pmen & TMROF_EN) && !(pmsts & TMROF_EN)) { expire_time = muldiv64(s->tmr_overflow_time, get_ticks_per_sec(), PM_FREQ); qemu_mod_timer(s->tmr_timer, expire_time); } else { qemu_del_timer(s->tmr_timer); } }
static void update_wt_regs(dp8393xState *s) { int64_t elapsed; uint32_t val; if (s->regs[SONIC_CR] & SONIC_CR_STP) { qemu_del_timer(s->watchdog); return; } elapsed = s->wt_last_update - qemu_get_clock(vm_clock); val = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0]; val -= elapsed / 5000000; s->regs[SONIC_WT1] = (val >> 16) & 0xffff; s->regs[SONIC_WT0] = (val >> 0) & 0xffff; set_next_tick(s); }
static void msm_gpt_set_alarm(msm_gpt_state *s) { int64_t now; int64_t t; int64_t wait; uint32_t ticks; now = qemu_get_clock_ns(vm_clock); ticks = s->match_val - s->count_val; LOG_MSM_IO("Alarm set in %u ticks, t %llu, wait %llu\n", ticks, t, wait); if (ticks == 0) { qemu_del_timer(s->timer); msm_gpt_interrupt(s); } else { t = DGT_CLOCK/ticks; wait = get_ticks_per_sec()/t; qemu_mod_timer(s->timer, now + wait); } }
static void rtc_timer_update(RTCState *s, int64_t current_time) { int period_code, period; int64_t cur_clock, next_irq_clock; int enable_pie; period_code = s->cmos_data[RTC_REG_A] & 0x0f; #if defined TARGET_I386 /* disable periodic timer if hpet is in legacy mode, since interrupts are * disabled anyway. */ enable_pie = !hpet_in_legacy_mode(); #else enable_pie = 1; #endif if (period_code != 0 && (((s->cmos_data[RTC_REG_B] & REG_B_PIE) && enable_pie) || ((s->cmos_data[RTC_REG_B] & REG_B_SQWE) && s->sqw_irq))) { if (period_code <= 2) period_code += 7; /* period in 32 Khz cycles */ period = 1 << (period_code - 1); #ifdef TARGET_I386 if(period != s->period) s->irq_coalesced = (s->irq_coalesced * s->period) / period; s->period = period; #endif /* compute 32 khz clock */ cur_clock = muldiv64(current_time, 32768, get_ticks_per_sec()); next_irq_clock = (cur_clock & ~(period - 1)) + period; s->next_periodic_time = muldiv64(next_irq_clock, get_ticks_per_sec(), 32768) + 1; qemu_mod_timer(s->periodic_timer, s->next_periodic_time); } else { #ifdef TARGET_I386 s->irq_coalesced = 0; #endif qemu_del_timer(s->periodic_timer); } }
static void pm_update_sci(PIIX4PMState *s) { int sci_level, pmsts; int64_t expire_time; pmsts = get_pmsts(s); sci_level = (((pmsts & s->pmen) & (ACPI_BITMASK_RT_CLOCK_ENABLE | ACPI_BITMASK_POWER_BUTTON_ENABLE | ACPI_BITMASK_GLOBAL_LOCK_ENABLE | ACPI_BITMASK_TIMER_ENABLE)) != 0); qemu_set_irq(s->irq, sci_level); /* schedule a timer interruption if needed */ if ((s->pmen & ACPI_BITMASK_TIMER_ENABLE) && !(pmsts & ACPI_BITMASK_TIMER_STATUS)) { expire_time = muldiv64(s->tmr_overflow_time, get_ticks_per_sec(), PM_TIMER_FREQUENCY); qemu_mod_timer(s->tmr_timer, expire_time); } else { qemu_del_timer(s->tmr_timer); } }
static int buffered_close(void *opaque) { QEMUFileBuffered *s = opaque; int ret; DPRINTF("closing\n"); while (!s->has_error && s->buffer_size) { buffered_flush(s); if (s->freeze_output) s->wait_for_unfreeze(s); } ret = s->close(s->opaque); qemu_del_timer(s->timer); qemu_free_timer(s->timer); qemu_free(s->buffer); qemu_free(s); return ret; }
/* Boot up timer */ static void onedram_bootup(void *opaque) { uint16_t cmd; S5pc1xxOneDRAMState *s = (S5pc1xxOneDRAMState *)opaque; if(!s->vmodem_bootup) { onedram_io_writel(s, ONEDRAM_MBX_AB, IPC_CP_READY_FOR_LOADING); s->vmodem_bootup = 1; } else { /* Init the in/out head/tail */ onedram_write_inhead(s, 0); onedram_write_intail(s, 0); onedram_write_outhead(s, 0); onedram_write_outtail(s, 0); /* put the authority to AP to let it access to shared memory */ onedram_put_authority(s); cmd = INT_COMMAND(INT_MASK_CMD_PHONE_START|CP_CHIP_INFINEON); onedram_send_cmd_to_pda(s, cmd); qemu_del_timer(s->bootup_timer); } }
static void nic_reset(void *opaque) { dp8393xState *s = opaque; qemu_del_timer(s->watchdog); s->regs[SONIC_CR] = SONIC_CR_RST | SONIC_CR_STP | SONIC_CR_RXDIS; s->regs[SONIC_DCR] &= ~(SONIC_DCR_EXBUS | SONIC_DCR_LBR); s->regs[SONIC_RCR] &= ~(SONIC_RCR_LB0 | SONIC_RCR_LB1 | SONIC_RCR_BRD | SONIC_RCR_RNT); s->regs[SONIC_TCR] |= SONIC_TCR_NCRS | SONIC_TCR_PTX; s->regs[SONIC_TCR] &= ~SONIC_TCR_BCM; s->regs[SONIC_IMR] = 0; s->regs[SONIC_ISR] = 0; s->regs[SONIC_DCR2] = 0; s->regs[SONIC_EOBC] = 0x02F8; s->regs[SONIC_RSC] = 0; s->regs[SONIC_CE] = 0; s->regs[SONIC_RSC] = 0; /* Network cable is connected */ s->regs[SONIC_RCR] |= SONIC_RCR_CRS; dp8393x_update_irq(s); }
void soc_dma_set_request(struct soc_dma_ch_s *ch, int level) { struct dma_s *dma = (struct dma_s *) ch->dma; dma->enabled_count += level - ch->enable; if (level) dma->ch_enable_mask |= 1 << ch->num; else dma->ch_enable_mask &= ~(1 << ch->num); if (level != ch->enable) { soc_dma_ch_freq_update(dma); ch->enable = level; if (!ch->enable) qemu_del_timer(ch->timer); else if (!ch->running) soc_dma_ch_run(ch); else soc_dma_ch_schedule(ch, 1); } }
static void timerblock_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { timerblock *tb = (timerblock *)opaque; int64_t old; addr &= 0x1f; switch (addr) { case 0: /* Load */ tb->load = value; /* Fall through. */ case 4: /* Counter. */ if ((tb->control & 1) && tb->count) { /* Cancel the previous timer. */ qemu_del_timer(tb->timer); } tb->count = value; if (tb->control & 1) { timerblock_reload(tb, 1); } break; case 8: /* Control. */ old = tb->control; tb->control = value; if (((old & 1) == 0) && (value & 1)) { if (tb->count == 0 && (tb->control & 2)) { tb->count = tb->load; } timerblock_reload(tb, 1); } break; case 12: /* Interrupt status. */ tb->status &= ~value; timerblock_update_irq(tb); break; } }
/* It's okay to call this multiple times or when no timer is started */ static void qed_cancel_need_check_timer(BDRVQEDState *s) { trace_qed_cancel_need_check_timer(s); qemu_del_timer(s->need_check_timer); }
static void gptm_stop(gptm_state *s, int n) { qemu_del_timer(s->timer[n]); }
static void tusb_async_writew(void *opaque, target_phys_addr_t addr, uint32_t value) { TUSBState *s = (TUSBState *) opaque; int offset = addr & 0xfff; int epnum; switch (offset) { case TUSB_VLYNQ_CTRL: break; case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): musb_write[2](s->musb, offset & 0x1ff, value); break; case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): musb_write[2](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); break; case TUSB_DEV_CONF: s->dev_config = value; s->host_mode = (value & TUSB_DEV_CONF_USB_HOST_MODE); if (value & TUSB_DEV_CONF_PROD_TEST_MODE) hw_error("%s: Product Test mode not allowed\n", __FUNCTION__); break; case TUSB_PHY_OTG_CTRL_ENABLE: case TUSB_PHY_OTG_CTRL: return; /* TODO */ case TUSB_DEV_OTG_TIMER: s->otg_timer_val = value; if (value & TUSB_DEV_OTG_TIMER_ENABLE) qemu_mod_timer(s->otg_timer, qemu_get_clock(vm_clock) + muldiv64(TUSB_DEV_OTG_TIMER_VAL(value), get_ticks_per_sec(), TUSB_DEVCLOCK)); else qemu_del_timer(s->otg_timer); break; case TUSB_PRCM_CONF: s->prcm_config = value; break; case TUSB_PRCM_MNGMT: s->prcm_mngmt = value; break; case TUSB_PRCM_WAKEUP_CLEAR: break; case TUSB_PRCM_WAKEUP_MASK: s->wkup_mask = value; break; case TUSB_PULLUP_1_CTRL: s->pullup[0] = value; break; case TUSB_PULLUP_2_CTRL: s->pullup[1] = value; break; case TUSB_INT_CTRL_CONF: s->control_config = value; tusb_intr_update(s); break; case TUSB_USBIP_INT_SET: s->usbip_intr |= value; tusb_usbip_intr_update(s); break; case TUSB_USBIP_INT_CLEAR: s->usbip_intr &= ~value; tusb_usbip_intr_update(s); musb_core_intr_clear(s->musb, ~value); break; case TUSB_USBIP_INT_MASK: s->usbip_mask = value; tusb_usbip_intr_update(s); break; case TUSB_DMA_INT_SET: s->dma_intr |= value; tusb_dma_intr_update(s); break; case TUSB_DMA_INT_CLEAR: s->dma_intr &= ~value; tusb_dma_intr_update(s); break; case TUSB_DMA_INT_MASK: s->dma_mask = value; tusb_dma_intr_update(s); break; case TUSB_GPIO_INT_SET: s->gpio_intr |= value; tusb_gpio_intr_update(s); break; case TUSB_GPIO_INT_CLEAR: s->gpio_intr &= ~value; tusb_gpio_intr_update(s); break; case TUSB_GPIO_INT_MASK: s->gpio_mask = value; tusb_gpio_intr_update(s); break; case TUSB_INT_SRC_SET: s->intr |= value; tusb_intr_update(s); break; case TUSB_INT_SRC_CLEAR: s->intr &= ~value; tusb_intr_update(s); break; case TUSB_INT_MASK: s->mask = value; tusb_intr_update(s); break; case TUSB_GPIO_CONF: s->gpio_config = value; break; case TUSB_DMA_REQ_CONF: s->dma_config = value; break; case TUSB_EP0_CONF: s->ep0_config = value & 0x1ff; musb_set_size(s->musb, 0, TUSB_EP0_CONFIG_XFR_SIZE(value), value & TUSB_EP0_CONFIG_DIR_TX); break; case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b): epnum = (offset - TUSB_EP_IN_SIZE) >> 2; s->tx_config[epnum] = value; musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 1); break; case TUSB_DMA_EP_MAP: s->dma_map = value; break; case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b): epnum = (offset - TUSB_EP_OUT_SIZE) >> 2; s->rx_config[epnum] = value; musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 0); break; case TUSB_EP_MAX_PACKET_SIZE_OFFSET ... (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b): epnum = (offset - TUSB_EP_MAX_PACKET_SIZE_OFFSET) >> 2; return; /* TODO */ case TUSB_WAIT_COUNT: return; /* TODO */ case TUSB_SCRATCH_PAD: s->scratch = value; break; case TUSB_PROD_TEST_RESET: s->test_reset = value; break; default: printf("%s: unknown register at %03x\n", __FUNCTION__, offset); return; } }
static void qlooptimer_stop(void* impl) { QEMUTimer* tt = impl; qemu_del_timer(tt); }