Ejemplo n.º 1
0
bool cpu_exec_all(void)
{
    int r;

    /* Account partial waits to the vm_clock.  */
    qemu_clock_warp(vm_clock);

    if (next_cpu == NULL) {
        next_cpu = first_cpu;
    }
    for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
        CPUState *env = next_cpu;

        qemu_clock_enable(vm_clock,
                          (env->singlestep_enabled & SSTEP_NOTIMER) == 0);

        if (cpu_can_run(env)) {
            if (kvm_enabled()) {
                r = kvm_cpu_exec(env);
                qemu_kvm_eat_signals(env);
            } else {
                r = tcg_cpu_exec(env);
            }
            if (r == EXCP_DEBUG) {
                cpu_handle_guest_debug(env);
                break;
            }
        } else if (env->stop || env->stopped) {
            break;
        }
    }
    exit_request = 0;
    return !all_cpu_threads_idle();
}
Ejemplo n.º 2
0
Archivo: cpus.c Proyecto: 0bliv10n/s2e
static void qemu_tcg_wait_io_event(void)
{
    CPUArchState *env;

    while (all_cpu_threads_idle()) {
#ifdef CONFIG_S2E_DEBUG
        s2e_debug_print("CPU: qemu_tcg_wait_io_event: waiting for awaken cpu %p\n", tcg_halt_cond);
#endif
        /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
        qemu_clock_warp(vm_clock);
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    while (iothread_requesting_mutex) {
#ifdef CONFIG_S2E_DEBUG
        s2e_debug_print("CPU: qemu_tcg_wait_io_event iothread_requesting_mutex\n");
#endif
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
    }

    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        qemu_wait_io_event_common(env);
    }
}
Ejemplo n.º 3
0
static void qemu_tcg_wait_io_event(void)
{
    CPUState *env;

    while (all_cpu_threads_idle()) {
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
        qemu_clock_warp(vm_clock);
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    qemu_mutex_unlock(&qemu_global_mutex);

    /*
     * Users of qemu_global_mutex can be starved, having no chance
     * to acquire it since this path will get to it first.
     * So use another lock to provide fairness.
     */
    qemu_mutex_lock(&qemu_fair_mutex);
    qemu_mutex_unlock(&qemu_fair_mutex);

    qemu_mutex_lock(&qemu_global_mutex);

    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        qemu_wait_io_event_common(env);
    }
}
Ejemplo n.º 4
0
void qemu_clock_warp(QEMUClock *clock)
{
    int64_t deadline;

    /*
     * There are too many global variables to make the "warp" behavior
     * applicable to other clocks.  But a clock argument removes the
     * need for if statements all over the place.
     */
    if (clock != vm_clock || !use_icount) {
        return;
    }

    /*
     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
     * ensures that the deadline for the timer is computed correctly below.
     * This also makes sure that the insn counter is synchronized before the
     * CPU starts running, in case the CPU is woken by an event other than
     * the earliest vm_clock timer.
     */
    icount_warp_rt(NULL);
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
        qemu_del_timer(icount_warp_timer);
        return;
    }

    if (qtest_enabled()) {
        /* When testing, qtest commands advance icount.  */
	return;
    }

    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
    deadline = qemu_clock_deadline(vm_clock);
    if (deadline > 0) {
        /*
         * Ensure the vm_clock proceeds even when the virtual CPU goes to
         * sleep.  Otherwise, the CPU might be waiting for a future timer
         * interrupt to wake it up, but the interrupt never comes because
         * the vCPU isn't running any insns and thus doesn't advance the
         * vm_clock.
         *
         * An extreme solution for this problem would be to never let VCPUs
         * sleep in icount mode if there is a pending vm_clock timer; rather
         * time could just advance to the next vm_clock event.  Instead, we
         * do stop VCPUs and only advance vm_clock after some "real" time,
         * (related to the time left until the next event) has passed.  This
         * rt_clock timer will do this.  This avoids that the warps are too
         * visible externally---for example, you will not be sending network
         * packets continuously instead of every 100ms.
         */
        qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
    } else {
        LOGD_CPUS("%s=>qemu_notify_event()\n", __func__);
        qemu_notify_event();
    }
}
Ejemplo n.º 5
0
Archivo: cpus.c Proyecto: linuxcer/qemu
bool cpu_exec_all(void)
{
    int r;

    /* Account partial waits to the vm_clock.  */
    qemu_clock_warp(vm_clock);

    if (next_cpu == NULL) {
        next_cpu = first_cpu;
    }
    for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
        CPUState *env = next_cpu;

        qemu_clock_enable(vm_clock,
                          (env->singlestep_enabled & SSTEP_NOTIMER) == 0);

#ifndef CONFIG_IOTHREAD
        if (qemu_alarm_pending()) {
            break;
        }
#endif
        if (cpu_can_run(env)) {
            if (kvm_enabled()) {
                r = kvm_cpu_exec(env);
                qemu_kvm_eat_signals(env);
            } else {
                r = tcg_cpu_exec(env);
            }
            if (r == EXCP_TRIPLE) {
                cpu_dump_state(env, stderr, fprintf, 0);
                fprintf(stderr, "Triple fault.  Halting for inspection via"
                        " QEMU monitor.\n");
                if (gdbserver_running())
                    r = EXCP_DEBUG;
                else {
                    vm_stop(0);
                    break;
                }
            }
            if (r == EXCP_DEBUG) {
                cpu_handle_guest_debug(env);
                break;
            }
        } else if (env->stop || env->stopped) {
            break;
        }
    }
    exit_request = 0;
    return !all_cpu_threads_idle();
}
Ejemplo n.º 6
0
static void qemu_tcg_wait_io_event(void)
{
    CPUArchState *env;

    while (all_cpu_threads_idle()) {
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
        qemu_clock_warp(vm_clock);
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    while (iothread_requesting_mutex) {
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
    }

    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        qemu_wait_io_event_common(ENV_GET_CPU(env));
    }
}
Ejemplo n.º 7
0
static void qemu_tcg_wait_io_event(void)
{
    CPUState *cpu;

    while (all_cpu_threads_idle()) {
       /* Start accounting real time to the virtual clock if the CPUs
          are idle.  */
        qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    while (iothread_requesting_mutex) {
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
    }

    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
        qemu_wait_io_event_common(cpu);
    }
}
Ejemplo n.º 8
0
void qemu_clock_warp(QEMUClockType type)
{
    int64_t deadline;

    /*
     * There are too many global variables to make the "warp" behavior
     * applicable to other clocks.  But a clock argument removes the
     * need for if statements all over the place.
     */
    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
        return;
    }

    /*
     * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
     * This ensures that the deadline for the timer is computed correctly below.
     * This also makes sure that the insn counter is synchronized before the
     * CPU starts running, in case the CPU is woken by an event other than
     * the earliest QEMU_CLOCK_VIRTUAL timer.
     */
    icount_warp_rt(NULL);
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
        timer_del(icount_warp_timer);
        return;
    }

    if (qtest_enabled()) {
        /* When testing, qtest commands advance icount.  */
	return;
    }

    vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
    /* We want to use the earliest deadline from ALL vm_clocks */
    deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);

    /* Maintain prior (possibly buggy) behaviour where if no deadline
     * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
     * INT32_MAX nanoseconds ahead, we still use INT32_MAX
     * nanoseconds.
     */
    if ((deadline < 0) || (deadline > INT32_MAX)) {
        deadline = INT32_MAX;
    }

    if (deadline > 0) {
        /*
         * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
         * sleep.  Otherwise, the CPU might be waiting for a future timer
         * interrupt to wake it up, but the interrupt never comes because
         * the vCPU isn't running any insns and thus doesn't advance the
         * QEMU_CLOCK_VIRTUAL.
         *
         * An extreme solution for this problem would be to never let VCPUs
         * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
         * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
         * event.  Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
         * after some e"real" time, (related to the time left until the next
         * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
         * This avoids that the warps are visible externally; for example,
         * you will not be sending network packets continuously instead of
         * every 100ms.
         */
        timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
    } else if (deadline == 0) {
        qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
    }
}