예제 #1
0
파일: cpus.c 프로젝트: yujinyu/QEMU_PACER
bool tcg_cpu_exec(void)
{
    int ret = 0;

    if (next_cpu == NULL)
        next_cpu = first_cpu;
    for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
        CPUState *env = cur_cpu = next_cpu;

        qemu_clock_enable(vm_clock,
                          (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);

        if (qemu_alarm_pending())
            break;
        if (cpu_can_run(env))
            ret = qemu_cpu_exec(env);
        else if (env->stop)
            break;

        if (ret == EXCP_DEBUG) {
            gdb_set_stop_cpu(env);
            debug_requested = EXCP_DEBUG;
            break;
        }
    }
    return tcg_has_work();
}
int qemu_calculate_timeout(void)
{
    int timeout;

    if (!vm_running)
        timeout = 5000;
    else if (tcg_has_work())
        timeout = 0;
    else {
#ifdef WIN32
        /* This corresponds to the case where the emulated system is
         * totally idle and waiting for i/o. The problem is that on
         * Windows, the default value will prevent Windows user events
         * to be delivered in less than 5 seconds.
         *
         * Upstream contains a different way to handle this, for now
         * this hack should be sufficient until we integrate it into
         * our tree.
         */
        timeout = 1000/15;  /* deliver user events every 15/th of second */
#else
        timeout = 5000;
#endif
        int64_t timeout_ns = (int64_t)timeout * 1000000LL;
        timeout_ns = qemu_soonest_timeout(
                timeout_ns, timerlistgroup_deadline_ns(&main_loop_tlg));
        timeout = (int)((timeout_ns + 999999LL) / 1000000LL);
    }

    return timeout;
}
예제 #3
0
int qemu_calculate_timeout(void)
{
#ifndef CONFIG_IOTHREAD
    int timeout;

    if (!vm_running)
        timeout = 5000;
    else if (tcg_has_work())
        timeout = 0;
    else if (!use_icount) {
#ifdef WIN32
        /* This corresponds to the case where the emulated system is
         * totally idle and waiting for i/o. The problem is that on
         * Windows, the default value will prevent Windows user events
         * to be delivered in less than 5 seconds.
         *
         * Upstream contains a different way to handle this, for now
         * this hack should be sufficient until we integrate it into
         * our tree.
         */
        timeout = 1000/15;  /* deliver user events every 15/th of second */
#else
        timeout = 5000;
#endif
    } else {
     /* XXX: use timeout computed from timers */
        int64_t add;
        int64_t delta;
        /* Advance virtual time to the next event.  */
	delta = qemu_icount_delta();
        if (delta > 0) {
            /* If virtual time is ahead of real time then just
               wait for IO.  */
            timeout = (delta + 999999) / 1000000;
        } else {
            /* Wait for either IO to occur or the next
               timer event.  */
            add = qemu_next_deadline();
            /* We advance the timer before checking for IO.
               Limit the amount we advance so that early IO
               activity won't get the guest too far ahead.  */
            if (add > 10000000)
                add = 10000000;
            delta += add;
            qemu_icount += qemu_icount_round (add);
            timeout = delta / 1000000;
            if (timeout < 0)
                timeout = 0;
        }
    }

    return timeout;
#else /* CONFIG_IOTHREAD */
    return 1000;
#endif
}
예제 #4
0
파일: cpus.c 프로젝트: yujinyu/QEMU_PACER
static void qemu_wait_io_event(CPUState *env)
{
    while (!tcg_has_work())
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);

    qemu_mutex_unlock(&qemu_global_mutex);

    /*
     * Users of qemu_global_mutex can be starved, having no chance
     * to acquire it since this path will get to it first.
     * So use another lock to provide fairness.
     */
    qemu_mutex_lock(&qemu_fair_mutex);
    qemu_mutex_unlock(&qemu_fair_mutex);

    qemu_mutex_lock(&qemu_global_mutex);
    qemu_wait_io_event_common(env);
}