Пример #1
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUState *env = arg;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(env->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        env->thread_id = qemu_get_thread_id();
        env->created = 1;
    }
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (first_cpu->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
    }

    while (1) {
        cpu_exec_all();
        if (use_icount && qemu_next_icount_deadline() <= 0) {
            qemu_notify_event();
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Пример #2
0
static void icount_warp_rt(void *opaque)
{
    if (vm_clock_warp_start == -1) {
        return;
    }

    if (vm_running) {
        int64_t clock = qemu_get_clock_ns(rt_clock);
        int64_t warp_delta = clock - vm_clock_warp_start;
        if (use_icount == 1) {
            qemu_icount_bias += warp_delta;
        } else {
            /*
             * In adaptive mode, do not let the vm_clock run too
             * far ahead of real time.
             */
            int64_t cur_time = cpu_get_clock();
            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
            int64_t delta = cur_time - cur_icount;
            qemu_icount_bias += MIN(warp_delta, delta);
        }
        if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
                               qemu_get_clock_ns(vm_clock))) {
            qemu_notify_event();
        }
    }
    vm_clock_warp_start = -1;
}
Пример #3
0
/* modify the current timer so that it will be fired when current_time
   >= expire_time. The corresponding callback will be called. */
static void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
{
    QEMUTimer **pt, *t;

    qemu_del_timer(ts);

    /* add the timer in the sorted list */
    /* NOTE: this code must be signal safe because
       qemu_timer_expired() can be called from a signal. */
    pt = &active_timers[ts->clock->type];
    for(;;) {
        t = *pt;
        if (!qemu_timer_expired_ns(t, expire_time)) {
            break;
        }
        pt = &t->next;
    }
    ts->expire_time = expire_time;
    ts->next = *pt;
    *pt = ts;

    /* Rearm if necessary  */
    if (pt == &active_timers[ts->clock->type]) {
        if (!alarm_timer->pending) {
            qemu_rearm_alarm_timer(alarm_timer);
        }
        /* Interrupt execution to force deadline recalculation.  */
        qemu_clock_warp(ts->clock);
        if (use_icount) {
            qemu_notify_event();
        }
    }
}
Пример #4
0
/* triggered by SCLP's read_event_data -
 * copy console data byte-stream into provided (SCLP) buffer
 */
static void get_console_data(SCLPEvent *event, uint8_t *buf, size_t *size,
                             int avail)
{
    SCLPConsole *cons = SCLP_CONSOLE(event);

    /* first byte is hex 0 saying an ascii string follows */
    *buf++ = '\0';
    avail--;
    /* if all data fit into provided SCLP buffer */
    if (avail >= cons->iov_sclp_rest) {
        /* copy character byte-stream to SCLP buffer */
        memcpy(buf, &cons->iov[cons->iov_sclp], cons->iov_sclp_rest);
        *size = cons->iov_sclp_rest + 1;
        cons->iov_sclp = 0;
        cons->iov_bs = 0;
        cons->iov_data_len = 0;
        cons->iov_sclp_rest = 0;
        event->event_pending = false;
        /* data provided and no more data pending */
    } else {
        /* if provided buffer is too small, just copy part */
        memcpy(buf, &cons->iov[cons->iov_sclp], avail);
        *size = avail + 1;
        cons->iov_sclp_rest -= avail;
        cons->iov_sclp += avail;
        /* more data pending */
    }
    if (cons->notify) {
        cons->notify = false;
        qemu_notify_event();
    }
}
Пример #5
0
static void nbd_client_closed(NBDClient *client)
{
    nb_fds--;
    if (nb_fds == 0 && !persistent && state == RUNNING) {
        state = TERMINATE;
    }
    qemu_notify_event();
    nbd_client_put(client);
}
Пример #6
0
static void nbd_request_put(NBDRequest *req)
{
    NBDClient *client = req->client;
    QSIMPLEQ_INSERT_HEAD(&client->exp->requests, req, entry);
    if (client->nb_requests-- == MAX_NBD_REQUESTS) {
        qemu_notify_event();
    }
    nbd_client_put(client);
}
Пример #7
0
void qemu_clock_warp(QEMUClock *clock)
{
    int64_t deadline;

    /*
     * There are too many global variables to make the "warp" behavior
     * applicable to other clocks.  But a clock argument removes the
     * need for if statements all over the place.
     */
    if (clock != vm_clock || !use_icount) {
        return;
    }

    /*
     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
     * ensures that the deadline for the timer is computed correctly below.
     * This also makes sure that the insn counter is synchronized before the
     * CPU starts running, in case the CPU is woken by an event other than
     * the earliest vm_clock timer.
     */
    icount_warp_rt(NULL);
    if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
        qemu_del_timer(icount_warp_timer);
        return;
    }

    if (qtest_enabled()) {
        /* When testing, qtest commands advance icount.  */
	return;
    }

    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
    deadline = qemu_clock_deadline(vm_clock);
    if (deadline > 0) {
        /*
         * Ensure the vm_clock proceeds even when the virtual CPU goes to
         * sleep.  Otherwise, the CPU might be waiting for a future timer
         * interrupt to wake it up, but the interrupt never comes because
         * the vCPU isn't running any insns and thus doesn't advance the
         * vm_clock.
         *
         * An extreme solution for this problem would be to never let VCPUs
         * sleep in icount mode if there is a pending vm_clock timer; rather
         * time could just advance to the next vm_clock event.  Instead, we
         * do stop VCPUs and only advance vm_clock after some "real" time,
         * (related to the time left until the next event) has passed.  This
         * rt_clock timer will do this.  This avoids that the warps are too
         * visible externally---for example, you will not be sending network
         * packets continuously instead of every 100ms.
         */
        qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
    } else {
        LOGD_CPUS("%s=>qemu_notify_event()\n", __func__);
        qemu_notify_event();
    }
}
Пример #8
0
static void host_alarm_handler(int host_signum)
#endif
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t)
	return;

    t->expired = true;
    t->pending = true;
    qemu_notify_event();
}
Пример #9
0
static void host_alarm_handler(int host_signum)
#endif
{
    //printf("host_alarm_handler\n");
    coremu_assert_hw_thr("Host_alarm_handler should be called by hw thr\n");

    struct qemu_alarm_timer *t = alarm_timer;
    if (!t)
        return;

#if 0
#define DISP_FREQ 1000
    {
        static int64_t delta_min = INT64_MAX;
        static int64_t delta_max, delta_cum, last_clock, delta, ti;
        static int count;
        ti = qemu_get_clock(vm_clock);
        if (last_clock != 0) {
            delta = ti - last_clock;
            if (delta < delta_min)
                delta_min = delta;
            if (delta > delta_max)
                delta_max = delta;
            delta_cum += delta;
            if (++count == DISP_FREQ) {
                printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
                       muldiv64(delta_min, 1000000, get_ticks_per_sec()),
                       muldiv64(delta_max, 1000000, get_ticks_per_sec()),
                       muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
                       (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
                count = 0;
                delta_min = INT64_MAX;
                delta_max = 0;
                delta_cum = 0;
            }
        }
        last_clock = ti;
    }
#endif
    if (alarm_has_dynticks(t) ||
        (!use_icount &&
            qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
                               qemu_get_clock(vm_clock))) ||
        qemu_timer_expired(active_timers[QEMU_CLOCK_REALTIME],
                           qemu_get_clock(rt_clock)) ||
        qemu_timer_expired(active_timers[QEMU_CLOCK_HOST],
                           qemu_get_clock(host_clock))) {

        t->expired = alarm_has_dynticks(t);
        t->pending = 1;
        qemu_notify_event();
    }
}
Пример #10
0
static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
                                      DWORD_PTR dwUser, DWORD_PTR dw1,
                                      DWORD_PTR dw2)
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t) {
        return;
    }
    t->expired = true;
    t->pending = true;
    qemu_notify_event();
}
Пример #11
0
void qtest_clock_warp(int64_t dest)
{
    int64_t clock = qemu_get_clock_ns(vm_clock);
    assert(qtest_enabled());
    while (clock < dest) {
        int64_t deadline = qemu_clock_deadline(vm_clock);
        int64_t warp = MIN(dest - clock, deadline);
        qemu_icount_bias += warp;
        qemu_run_timers(vm_clock);
        clock = qemu_get_clock_ns(vm_clock);
    }
    qemu_notify_event();
}
Пример #12
0
static void host_alarm_handler(int host_signum)
#endif
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t)
	return;

    if (alarm_has_dynticks(t) ||
        qemu_next_alarm_deadline () <= 0) {
        t->expired = alarm_has_dynticks(t);
        t->pending = 1;
        qemu_notify_event();
    }
}
Пример #13
0
static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
                                      DWORD_PTR dwUser, DWORD_PTR dw1,
                                      DWORD_PTR dw2)
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t) {
        return;
    }
    if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) {
        t->expired = alarm_has_dynticks(t);
        t->pending = 1;
        qemu_notify_event();
    }
}
static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
                                      DWORD_PTR dwUser, DWORD_PTR dw1,
                                      DWORD_PTR dw2)
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t) {
        return;
    }
    // We can actually call qemu_next_alarm_deadline() here since this
    // doesn't run in a signal handler, but a different thread.
    if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) {
        t->expired = 1;
        timer_alarm_pending = 1;
        qemu_notify_event();
    }
}
Пример #15
0
static void host_alarm_handler(int host_signum)
#endif
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t)
	return;

#if 0
#define DISP_FREQ 1000
    {
        static int64_t delta_min = INT64_MAX;
        static int64_t delta_max, delta_cum, last_clock, delta, ti;
        static int count;
        ti = qemu_get_clock_ns(vm_clock);
        if (last_clock != 0) {
            delta = ti - last_clock;
            if (delta < delta_min)
                delta_min = delta;
            if (delta > delta_max)
                delta_max = delta;
            delta_cum += delta;
            if (++count == DISP_FREQ) {
                printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
                       muldiv64(delta_min, 1000000, get_ticks_per_sec()),
                       muldiv64(delta_max, 1000000, get_ticks_per_sec()),
                       muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
                       (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
                count = 0;
                delta_min = INT64_MAX;
                delta_max = 0;
                delta_cum = 0;
            }
        }
        last_clock = ti;
    }
#endif
    if (alarm_has_dynticks(t) ||
        qemu_next_alarm_deadline () <= 0) {
        t->expired = alarm_has_dynticks(t);
        t->pending = 1;
        qemu_notify_event();
    }
}
Пример #16
0
static void qemu_kvm_eat_signals(CPUState *env)
{
    struct timespec ts = { 0, 0 };
    siginfo_t siginfo;
    sigset_t waitset;
    sigset_t chkset;
    int r;

    sigemptyset(&waitset);
    sigaddset(&waitset, SIG_IPI);
    sigaddset(&waitset, SIGBUS);

    do {
        r = sigtimedwait(&waitset, &siginfo, &ts);
        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
            perror("sigtimedwait");
            exit(1);
        }

        switch (r) {
        case SIGBUS:
            if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
                sigbus_reraise();
            }
            break;
        default:
            break;
        }

        r = sigpending(&chkset);
        if (r == -1) {
            perror("sigpending");
            exit(1);
        }
    } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));

#ifndef CONFIG_IOTHREAD
    if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) {
        qemu_notify_event();
    }
#endif
}
static void host_alarm_handler(int host_signum)
#endif
{
    struct qemu_alarm_timer *t = alarm_timer;
    if (!t)
        return;

    // It's not possible to call qemu_next_alarm_deadline() to know
    // if a timer has really expired, in the case of non-dynamic alarms,
    // so just signal and let the main loop thread do the checks instead.
    timer_alarm_pending = 1;

    // Ensure a dynamic alarm will be properly rescheduled.
    if (alarm_has_dynticks(t))
        t->expired = 1;

    // This forces a cpu_exit() call that will end the current CPU
    // execution ASAP.
    qemu_notify_event();
}
Пример #18
0
void
sbdrop(struct sbuf *sb, int num)
{
    int limit = sb->sb_datalen / 2;

	/*
	 * We can only drop how much we have
	 * This should never succeed
	 */
	if(num > sb->sb_cc)
		num = sb->sb_cc;
	sb->sb_cc -= num;
	sb->sb_rptr += num;
	if(sb->sb_rptr >= sb->sb_data + sb->sb_datalen)
		sb->sb_rptr -= sb->sb_datalen;

    if (sb->sb_cc < limit && sb->sb_cc + num >= limit) {
        qemu_notify_event();
    }
}
Пример #19
0
void replay_account_executed_instructions(void)
{
    if (replay_mode == REPLAY_MODE_PLAY) {
        replay_mutex_lock();
        if (replay_state.instructions_count > 0) {
            int count = (int)(replay_get_current_step()
                              - replay_state.current_step);
            replay_state.instructions_count -= count;
            replay_state.current_step += count;
            if (replay_state.instructions_count == 0) {
                assert(replay_state.data_kind == EVENT_INSTRUCTION);
                replay_finish_event();
                /* Wake up iothread. This is required because
                   timers will not expire until clock counters
                   will be read from the log. */
                qemu_notify_event();
            }
        }
        replay_mutex_unlock();
    }
}
Пример #20
0
/*
 * Triggered by SCLP's read_event_data
 * - convert ASCII byte stream to EBCDIC and
 * - copy converted data into provided (SCLP) buffer
 */
static int get_console_data(SCLPEvent *event, uint8_t *buf, size_t *size,
                            int avail)
{
    int len;

    SCLPConsoleLM *cons = SCLPLM_CONSOLE(event);

    len = cons->length;
    /* data need to fit into provided SCLP buffer */
    if (len > avail) {
        return 1;
    }

    ebcdic_put(buf, (char *)&cons->buf, len);
    *size = len;
    cons->length = 0;
    /* data provided and no more data pending */
    event->event_pending = false;
    qemu_notify_event();
    return 0;
}
Пример #21
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUState *cpu = arg;
    CPUArchState *env;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(cpu->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        cpu = ENV_GET_CPU(env);
        cpu->thread_id = qemu_get_thread_id();
        cpu->created = true;
    }
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (ENV_GET_CPU(first_cpu)->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);

        /* process any pending work */
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
            qemu_wait_io_event_common(ENV_GET_CPU(env));
        }
    }

    while (1) {
        tcg_exec_all();
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
            qemu_notify_event();
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Пример #22
0
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
    CPUArchState *env = arg;

    qemu_tcg_init_cpu_signals();
    qemu_thread_get_self(env->thread);

    /* signal CPU creation */
    qemu_mutex_lock(&qemu_global_mutex);
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
        env->thread_id = qemu_get_thread_id();
        LOGD_CPUS("%s2: Thread ID = %d\n", __func__, env->thread_id);    
        env->created = 1;
    }
    qemu_cond_signal(&qemu_cpu_cond);

    /* wait for initial kick-off after machine start */
    while (first_cpu->stopped) {
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);

        /* process any pending work */
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
            qemu_wait_io_event_common(env);
        }
    }

    while (1) {
        tcg_exec_all();
        if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
            LOGD_CPUS("%s=>qemu_notify_event()\n", __func__);
            qemu_notify_event();
        }
        qemu_tcg_wait_io_event();
    }

    return NULL;
}
Пример #23
0
void qemu_service_io(void)
{
    qemu_notify_event();
}
Пример #24
0
static void nbd_client_closed(NBDClient *client)
{
    nb_fds--;
    qemu_notify_event();
}
Пример #25
0
/* A non-blocking poll of the main AIO context (we cannot use aio_poll
 * because we do not know the AioContext).
 */
static void qemu_aio_wait_nonblocking(void)
{
    qemu_notify_event();
    qemu_aio_wait();
}
Пример #26
0
static void termsig_handler(int signum)
{
    atomic_cmpxchg(&state, RUNNING, TERMINATE);
    qemu_notify_event();
}
Пример #27
0
static void qemu_system_vmstop_request(int reason)
{
    vmstop_requested = reason;
    qemu_notify_event();
}
Пример #28
0
static void termsig_handler(int signum)
{
    sigterm_reported = true;
    qemu_notify_event();
}
Пример #29
0
void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
{
    qemu_notify_event();
}
Пример #30
0
static void termsig_handler(int signum)
{
    state = TERMINATE;
    qemu_notify_event();
}