static void sdl_update_caption(struct sdl2_console *scon) { char win_title[1024]; char icon_title[1024]; const char *status = ""; if (!runstate_is_running()) { status = " [Stopped]"; } else if (gui_grab) { if (alt_grab) { status = " - Press Ctrl-Alt-Shift to exit grab"; } else if (ctrl_grab) { status = " - Press Right-Ctrl to exit grab"; } else { status = " - Press Ctrl-Alt to exit grab"; } } if (qemu_name) { snprintf(win_title, sizeof(win_title), "QEMU (%s-%d)%s", qemu_name, scon->idx, status); snprintf(icon_title, sizeof(icon_title), "QEMU (%s)", qemu_name); } else { snprintf(win_title, sizeof(win_title), "QEMU%s", status); snprintf(icon_title, sizeof(icon_title), "QEMU"); } if (scon->real_window) { SDL_SetWindowTitle(scon->real_window, win_title); } }
static void sdl_update_caption(void) { char win_title[1024]; char icon_title[1024]; const char *status = ""; if (!runstate_is_running()) status = " [Stopped]"; else if (gui_grab) { if (alt_grab) status = " - Press Ctrl-Alt-Shift to exit mouse grab"; else if (ctrl_grab) status = " - Press Right-Ctrl to exit mouse grab"; else status = " - Press Ctrl-Alt to exit mouse grab"; } if (qemu_name) { snprintf(win_title, sizeof(win_title), "QEMU (%s)%s", qemu_name, status); snprintf(icon_title, sizeof(icon_title), "QEMU (%s)", qemu_name); } else { snprintf(win_title, sizeof(win_title), "QEMU%s", status); snprintf(icon_title, sizeof(icon_title), "QEMU"); } SDL_WM_SetCaption(win_title, icon_title); }
static void icount_adjust(void) { int64_t cur_time; int64_t cur_icount; int64_t delta; static int64_t last_delta; /* If the VM is not running, then do nothing. */ if (!runstate_is_running()) { return; } cur_time = cpu_get_clock(); cur_icount = qemu_get_clock_ns(vm_clock); delta = cur_icount - cur_time; /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ if (delta > 0 && last_delta + ICOUNT_WOBBLE < delta * 2 && icount_time_shift > 0) { /* The guest is getting too far ahead. Slow time down. */ icount_time_shift--; } if (delta < 0 && last_delta - ICOUNT_WOBBLE > delta * 2 && icount_time_shift < MAX_ICOUNT_SHIFT) { /* The guest is getting too far behind. Speed time up. */ icount_time_shift++; } last_delta = delta; qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift); }
static void migrate_fd_put_ready(void *opaque) { MigrationState *s = opaque; int ret; if (s->state != MIG_STATE_ACTIVE) { DPRINTF("put_ready returning because of non-active state\n"); return; } DPRINTF("iterate\n"); ret = qemu_savevm_state_iterate(s->mon, s->file); if (ret < 0) { migrate_fd_error(s); } else if (ret == 1) { int old_vm_running = runstate_is_running(); DPRINTF("done iterating\n"); vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); if (qemu_savevm_state_complete(s->mon, s->file) < 0) { migrate_fd_error(s); } else { migrate_fd_completed(s); } if (s->state != MIG_STATE_COMPLETED) { if (old_vm_running) { vm_start(); } } } }
static void icount_warp_rt(void *opaque) { if (vm_clock_warp_start == -1) { return; } if (runstate_is_running()) { int64_t clock = qemu_get_clock_ns(rt_clock); int64_t warp_delta = clock - vm_clock_warp_start; if (use_icount == 1) { qemu_icount_bias += warp_delta; } else { /* * In adaptive mode, do not let the vm_clock run too * far ahead of real time. */ int64_t cur_time = cpu_get_clock(); int64_t cur_icount = qemu_get_clock_ns(vm_clock); int64_t delta = cur_time - cur_icount; qemu_icount_bias += MIN(warp_delta, delta); } if (qemu_clock_expired(vm_clock)) { qemu_notify_event(); } } vm_clock_warp_start = -1; }
/* does a state transition even if the VM is already stopped, current state is forgotten forever */ void vm_stop_force_state(RunState state) { if (runstate_is_running()) { vm_stop(state); } else { runstate_set(state); } }
static int cpu_can_run(CPUArchState *env) { if (env->stop) { return 0; } if (env->stopped || !runstate_is_running()) { return 0; } return 1; }
static bool cpu_can_run(CPUState *cpu) { if (cpu->stop) { return false; } if (cpu->stopped || !runstate_is_running()) { return false; } return true; }
/* does a state transition even if the VM is already stopped, current state is forgotten forever */ void vm_stop_force_state(RunState state) { if (runstate_is_running()) { vm_stop(state); } else { //if(!mthread){ runstate_set(state); //} } }
/* does a state transition even if the VM is already stopped, current state is forgotten forever */ int vm_stop_force_state(RunState state) { if (runstate_is_running()) { return vm_stop(state); } else { runstate_set(state); /* Make sure to return an error if the flush in a previous vm_stop() * failed. */ return bdrv_flush_all(); } }
static void DMA_run(void *opaque) { struct fs_dma_ctrl *etraxfs_dmac = opaque; int p = 1; if (runstate_is_running()) p = etraxfs_dmac_run(etraxfs_dmac); if (p) qemu_bh_schedule_idle(etraxfs_dmac->bh); }
static void do_vm_stop(RunState state) { if (runstate_is_running()) { cpu_disable_ticks(); pause_all_vcpus(); runstate_set(state); vm_state_notify(0, state); bdrv_drain_all(); bdrv_flush_all(); monitor_protocol_event(QEVENT_STOP, NULL); } }
static bool cpu_thread_is_idle(CPUArchState *env) { if (env->stop || env->queued_work_first) { return false; } if (env->stopped || !runstate_is_running()) { return true; } if (!env->halted || qemu_cpu_has_work(env) || kvm_irqchip_in_kernel()) { return false; } return true; }
static void do_vm_stop(RunState state) { if (runstate_is_running()) { cpu_disable_ticks(); pause_all_vcpus(); //if(!mthread){ // TLC runstate_set(state); //} vm_state_notify(0, state); qemu_aio_flush(); bdrv_flush_all(); monitor_protocol_event(QEVENT_STOP, NULL); } }
static bool cpu_thread_is_idle(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); if (cpu->stop || cpu->queued_work_first) { return false; } if (cpu->stopped || !runstate_is_running()) { return true; } if (!env->halted || qemu_cpu_has_work(cpu) || kvm_async_interrupts_enabled()) { return false; } return true; }
/* * use ssd.lock to protect render_update_cookie_num. * qxl_render_update is called by io thread or vcpu thread, and the completion * callbacks are called by spice_server thread, defering to bh called from the * io thread. */ void qxl_render_update(PCIQXLDevice *qxl) { QXLCookie *cookie; qemu_mutex_lock(&qxl->ssd.lock); if (!runstate_is_running() || !qxl->guest_primary.commands) { qxl_render_update_area_unlocked(qxl); qemu_mutex_unlock(&qxl->ssd.lock); return; } qxl->guest_primary.commands = 0; qxl->render_update_cookie_num++; qemu_mutex_unlock(&qxl->ssd.lock); cookie = qxl_cookie_new(QXL_COOKIE_TYPE_RENDER_UPDATE_AREA, 0); qxl_set_rect_to_surface(qxl, &cookie->u.render.area); qxl_spice_update_area(qxl, 0, &cookie->u.render.area, NULL, 0, 1 /* clear_dirty_region */, QXL_ASYNC, cookie); }
static void kvmclock_pre_save(void *opaque) { KVMClockState *s = opaque; struct kvm_clock_data data; int ret; if (s->clock_valid) { return; } ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); if (ret < 0) { fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret)); data.clock = 0; } s->clock = data.clock; /* * If the VM is stopped, declare the clock state valid to avoid re-reading * it on next vmsave (which would return a different value). Will be reset * when the VM is continued. */ s->clock_valid = !runstate_is_running(); }
void migrate_fd_put_ready(void *opaque) { FdMigrationState *s = opaque; if (s->state != MIG_STATE_ACTIVE) { DPRINTF("put_ready returning because of non-active state\n"); return; } DPRINTF("iterate\n"); if (qemu_savevm_state_iterate(s->mon, s->file) == 1) { int state; int old_vm_running = runstate_is_running(); DPRINTF("done iterating\n"); vm_stop(RSTATE_PRE_MIGRATE); if ((qemu_savevm_state_complete(s->mon, s->file)) < 0) { if (old_vm_running) { vm_start(); } state = MIG_STATE_ERROR; } else { state = MIG_STATE_COMPLETED; } if (migrate_fd_cleanup(s) < 0) { if (old_vm_running) { vm_start(); } state = MIG_STATE_ERROR; } if (state == MIG_STATE_COMPLETED) { runstate_set(RSTATE_POST_MIGRATE); } s->state = state; notifier_list_notify(&migration_state_notifiers, NULL); } }
void migrate_fd_put_ready(MigrationState *s) { int ret; if (s->state != MIG_STATE_ACTIVE) { DPRINTF("put_ready returning because of non-active state\n"); return; } DPRINTF("iterate\n"); ret = qemu_savevm_state_iterate(s->file); if (ret < 0) { migrate_fd_error(s); } else if (ret == 1) { int old_vm_running = runstate_is_running(); int64_t start_time, end_time; DPRINTF("done iterating\n"); start_time = qemu_get_clock_ms(rt_clock); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); if (qemu_savevm_state_complete(s->file) < 0) { migrate_fd_error(s); } else { migrate_fd_completed(s); } end_time = qemu_get_clock_ms(rt_clock); s->total_time = end_time - s->total_time; s->downtime = end_time - start_time; if (s->state != MIG_STATE_COMPLETED) { if (old_vm_running) { vm_start(); } } } }
static bool colo_runstate_is_stopped(void) { return runstate_check(RUN_STATE_COLO) || !runstate_is_running(); }
static void *migration_thread(void *opaque) { MigrationState *s = opaque; int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); int64_t initial_bytes = 0; int64_t max_size = 0; int64_t start_time = initial_time; bool old_vm_running = false; DPRINTF("beginning savevm\n"); qemu_savevm_state_begin(s->file, &s->params); s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE); DPRINTF("setup complete\n"); while (s->state == MIG_STATE_ACTIVE) { int64_t current_time; uint64_t pending_size; if (!qemu_file_rate_limit(s->file)) { DPRINTF("iterate\n"); pending_size = qemu_savevm_state_pending(s->file, max_size); DPRINTF("pending size %" PRIu64 " max %" PRIu64 "\n", pending_size, max_size); if (pending_size && pending_size >= max_size) { qemu_savevm_state_iterate(s->file); } else { int ret; DPRINTF("done iterating\n"); qemu_mutex_lock_iothread(); start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); old_vm_running = runstate_is_running(); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); if (ret >= 0) { qemu_file_set_rate_limit(s->file, INT64_MAX); qemu_savevm_state_complete(s->file); } qemu_mutex_unlock_iothread(); if (ret < 0) { migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR); break; } if (!qemu_file_get_error(s->file)) { migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_COMPLETED); break; } } } if (qemu_file_get_error(s->file)) { migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR); break; } current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); if (current_time >= initial_time + BUFFER_DELAY) { uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes; uint64_t time_spent = current_time - initial_time; double bandwidth = transferred_bytes / time_spent; max_size = bandwidth * migrate_max_downtime() / 1000000; s->mbps = time_spent ? (((double) transferred_bytes * 8.0) / ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1; DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %g max_size %" PRId64 "\n", transferred_bytes, time_spent, bandwidth, max_size); /* if we haven't sent anything, we don't want to recalculate 10000 is a small enough number for our purposes */ if (s->dirty_bytes_rate && transferred_bytes > 10000) { s->expected_downtime = s->dirty_bytes_rate / bandwidth; } qemu_file_reset_rate_limit(s->file); initial_time = current_time; initial_bytes = qemu_ftell(s->file); } if (qemu_file_rate_limit(s->file)) { /* usleep expects microseconds */ g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); } } qemu_mutex_lock_iothread(); if (s->state == MIG_STATE_COMPLETED) { int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); s->total_time = end_time - s->total_time; s->downtime = end_time - start_time; runstate_set(RUN_STATE_POSTMIGRATE); } else { if (old_vm_running) { vm_start(); } } qemu_bh_schedule(s->cleanup_bh); qemu_mutex_unlock_iothread(); return NULL; }
bool cpu_is_stopped(CPUState *cpu) { return cpu->stopped || !runstate_is_running(); }
bool cpu_is_stopped(CPUState *cpu) { return !runstate_is_running() || cpu->stopped; }
static void *buffered_file_thread(void *opaque) { MigrationState *s = opaque; int64_t initial_time = qemu_get_clock_ms(rt_clock); int64_t max_size = 0; bool last_round = false; int ret; qemu_mutex_lock_iothread(); DPRINTF("beginning savevm\n"); ret = qemu_savevm_state_begin(s->file, &s->params); if (ret < 0) { DPRINTF("failed, %d\n", ret); qemu_mutex_unlock_iothread(); goto out; } qemu_mutex_unlock_iothread(); while (true) { int64_t current_time = qemu_get_clock_ms(rt_clock); uint64_t pending_size; qemu_mutex_lock_iothread(); if (s->state != MIG_STATE_ACTIVE) { DPRINTF("put_ready returning because of non-active state\n"); qemu_mutex_unlock_iothread(); break; } if (s->complete) { qemu_mutex_unlock_iothread(); break; } if (s->bytes_xfer < s->xfer_limit) { DPRINTF("iterate\n"); pending_size = qemu_savevm_state_pending(s->file, max_size); DPRINTF("pending size %lu max %lu\n", pending_size, max_size); if (pending_size && pending_size >= max_size) { ret = qemu_savevm_state_iterate(s->file); if (ret < 0) { qemu_mutex_unlock_iothread(); break; } } else { int old_vm_running = runstate_is_running(); int64_t start_time, end_time; DPRINTF("done iterating\n"); start_time = qemu_get_clock_ms(rt_clock); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); if (old_vm_running) { vm_stop(RUN_STATE_FINISH_MIGRATE); } else { vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); } ret = qemu_savevm_state_complete(s->file); if (ret < 0) { qemu_mutex_unlock_iothread(); break; } else { migrate_fd_completed(s); } end_time = qemu_get_clock_ms(rt_clock); s->total_time = end_time - s->total_time; s->downtime = end_time - start_time; if (s->state != MIG_STATE_COMPLETED) { if (old_vm_running) { vm_start(); } } last_round = true; } } qemu_mutex_unlock_iothread(); if (current_time >= initial_time + BUFFER_DELAY) { uint64_t transferred_bytes = s->bytes_xfer; uint64_t time_spent = current_time - initial_time; double bandwidth = transferred_bytes / time_spent; max_size = bandwidth * migrate_max_downtime() / 1000000; DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %g max_size %" PRId64 "\n", transferred_bytes, time_spent, bandwidth, max_size); s->bytes_xfer = 0; initial_time = current_time; } if (!last_round && (s->bytes_xfer >= s->xfer_limit)) { /* usleep expects microseconds */ g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); } ret = buffered_flush(s); if (ret < 0) { break; } } out: if (ret < 0) { migrate_fd_error(s); } g_free(s->buffer); return NULL; }
int cpu_is_stopped(CPUArchState *env) { return !runstate_is_running() || env->stopped; }
static int save_supported_msrs() { int ret; /* IMPORTANT: See note for KVM_GET_MSRS below in the ioctl wrapper. */ #if 0 int i, n; n = 0; g_kvm_msrs.entries[n++].index = MSR_IA32_SYSENTER_CS; g_kvm_msrs.entries[n++].index = MSR_IA32_SYSENTER_ESP; g_kvm_msrs.entries[n++].index = MSR_IA32_SYSENTER_EIP; g_kvm_msrs.entries[n++].index = MSR_PAT; if (has_msr_star) { g_kvm_msrs.entries[n++].index = MSR_STAR; } if (has_msr_hsave_pa) { g_kvm_msrs.entries[n++].index = MSR_VM_HSAVE_PA; } if (has_msr_tsc_deadline) { g_kvm_msrs.entries[n++].index = MSR_IA32_TSCDEADLINE; } if (has_msr_misc_enable) { g_kvm_msrs.entries[n++].index = MSR_IA32_MISC_ENABLE; } if (!env->tsc_valid) { g_kvm_msrs.entries[n++].index = MSR_IA32_TSC; env->tsc_valid = !runstate_is_running(); } if (long_mode_supported) { g_kvm_msrs.entries[n++].index = MSR_CSTAR; g_kvm_msrs.entries[n++].index = MSR_KERNELGSBASE; g_kvm_msrs.entries[n++].index = MSR_FMASK; g_kvm_msrs.entries[n++].index = MSR_LSTAR; } g_kvm_msrs.entries[n++].index = MSR_KVM_SYSTEM_TIME; g_kvm_msrs.entries[n++].index = MSR_KVM_WALL_CLOCK; if (has_msr_async_pf_en) { g_kvm_msrs.entries[n++].index = MSR_KVM_ASYNC_PF_EN; } if (has_msr_pv_eoi_en) { g_kvm_msrs.entries[n++].index = MSR_KVM_PV_EOI_EN; } if (env->mcg_cap) { g_kvm_msrs.entries[n++].index = MSR_MCG_STATUS; g_kvm_msrs.entries[n++].index = MSR_MCG_CTL; for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { g_kvm_msrs.entries[n++].index = MSR_MC0_CTL + i; } } g_kvm_msrs.info.nmsrs = n; #endif /* if 0 */ ret = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_GET_MSRS, &g_kvm_msrs); if (ret < 0) { return ret; } return 0; }