/* modify the current timer so that it will be fired when current_time >= expire_time. The corresponding callback will be called. */ static void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time) { QEMUTimer **pt, *t; qemu_del_timer(ts); /* add the timer in the sorted list */ /* NOTE: this code must be signal safe because qemu_timer_expired() can be called from a signal. */ pt = &active_timers[ts->clock->type]; for(;;) { t = *pt; if (!qemu_timer_expired_ns(t, expire_time)) { break; } pt = &t->next; } ts->expire_time = expire_time; ts->next = *pt; *pt = ts; /* Rearm if necessary */ if (pt == &active_timers[ts->clock->type]) { if (!alarm_timer->pending) { qemu_rearm_alarm_timer(alarm_timer); } /* Interrupt execution to force deadline recalculation. */ qemu_clock_warp(ts->clock); if (use_icount) { qemu_notify_event(); } } }
static void tcg_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUArchState *env = next_cpu; CPUState *cpu = ENV_GET_CPU(env); qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { r = tcg_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (cpu->stop || cpu->stopped) { break; } } exit_request = 0; }
static void tcg_exec_all(void) { int r; /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *cpu = next_cpu; CPUArchState *env = cpu->env_ptr; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { r = tcg_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } } else if (cpu->stop || cpu->stopped) { break; } } exit_request = 0; }
static void qemu_tcg_wait_io_event(void) { CPUArchState *env; while (all_cpu_threads_idle()) { #ifdef CONFIG_S2E_DEBUG s2e_debug_print("CPU: qemu_tcg_wait_io_event: waiting for awaken cpu %p\n", tcg_halt_cond); #endif /* Start accounting real time to the virtual clock if the CPUs are idle. */ qemu_clock_warp(vm_clock); qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); } while (iothread_requesting_mutex) { #ifdef CONFIG_S2E_DEBUG s2e_debug_print("CPU: qemu_tcg_wait_io_event iothread_requesting_mutex\n"); #endif qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); } for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(env); } }
bool cpu_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(env)) { if (kvm_enabled()) { r = kvm_cpu_exec(env); qemu_kvm_eat_signals(env); } else { r = tcg_cpu_exec(env); } if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (env->stop || env->stopped) { break; } } exit_request = 0; return !all_cpu_threads_idle(); }
static void qemu_tcg_wait_io_event(void) { CPUState *env; while (all_cpu_threads_idle()) { /* Start accounting real time to the virtual clock if the CPUs are idle. */ qemu_clock_warp(vm_clock); qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); } qemu_mutex_unlock(&qemu_global_mutex); /* * Users of qemu_global_mutex can be starved, having no chance * to acquire it since this path will get to it first. * So use another lock to provide fairness. */ qemu_mutex_lock(&qemu_fair_mutex); qemu_mutex_unlock(&qemu_fair_mutex); qemu_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(env); } }
bool cpu_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); #ifndef CONFIG_IOTHREAD if (qemu_alarm_pending()) { break; } #endif if (cpu_can_run(env)) { if (kvm_enabled()) { r = kvm_cpu_exec(env); qemu_kvm_eat_signals(env); } else { r = tcg_cpu_exec(env); } if (r == EXCP_TRIPLE) { cpu_dump_state(env, stderr, fprintf, 0); fprintf(stderr, "Triple fault. Halting for inspection via" " QEMU monitor.\n"); if (gdbserver_running()) r = EXCP_DEBUG; else { vm_stop(0); break; } } if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (env->stop || env->stopped) { break; } } exit_request = 0; return !all_cpu_threads_idle(); }
static void qemu_tcg_wait_io_event(void) { CPUArchState *env; while (all_cpu_threads_idle()) { /* Start accounting real time to the virtual clock if the CPUs are idle. */ qemu_clock_warp(vm_clock); qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); } while (iothread_requesting_mutex) { qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); } for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(ENV_GET_CPU(env)); } }
static void qemu_tcg_wait_io_event(void) { CPUState *cpu; while (all_cpu_threads_idle()) { /* Start accounting real time to the virtual clock if the CPUs are idle. */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); } while (iothread_requesting_mutex) { qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); } for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { qemu_wait_io_event_common(cpu); } }
int main_loop_wait(int nonblocking) { int ret; uint32_t timeout = UINT32_MAX; int64_t timeout_ns; if (nonblocking) { timeout = 0; } /* poll any events */ g_array_set_size(gpollfds, 0); /* reset for new iteration */ /* XXX: separate device handlers from system ones */ #ifdef CONFIG_SLIRP slirp_pollfds_fill(gpollfds, &timeout); #endif if (timeout == UINT32_MAX) { timeout_ns = -1; } else { timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS); } timeout_ns = qemu_soonest_timeout(timeout_ns, timerlistgroup_deadline_ns( &main_loop_tlg)); ret = os_host_main_loop_wait(timeout_ns); #ifdef CONFIG_SLIRP slirp_pollfds_poll(gpollfds, (ret < 0)); #endif /* CPU thread can infinitely wait for event after missing the warp */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); qemu_clock_run_all_timers(); return ret; }