int qemu_init_main_loop(void) { int ret; qemu_init_sigbus(); ret = qemu_signal_init(); if (ret) { return ret; } /* Note eventfd must be drained before signalfd handlers run */ ret = qemu_event_init(); if (ret) { return ret; } qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); qemu_cond_init(&qemu_work_cond); qemu_cond_init(&qemu_io_proceeded_cond); qemu_mutex_init(&qemu_global_mutex); qemu_mutex_lock(&qemu_global_mutex); qemu_thread_get_self(&io_thread); return 0; }
static void *qemu_kvm_cpu_thread_fn(void *arg) { CPUArchState *env = arg; CPUState *cpu = ENV_GET_CPU(env); int r; qemu_mutex_lock(&qemu_global_mutex); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu_single_env = env; r = kvm_init_vcpu(env); if (r < 0) { fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); exit(1); } qemu_kvm_init_cpu_signals(env); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); while (1) { if (cpu_can_run(cpu)) { r = kvm_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); } } qemu_kvm_wait_io_event(env); } return NULL; }
static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *env = arg; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(env->thread); /* signal CPU creation */ qemu_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) { env->thread_id = qemu_get_thread_id(); env->created = 1; } qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); } while (1) { cpu_exec_all(); if (use_icount && qemu_next_icount_deadline() <= 0) { qemu_notify_event(); } qemu_tcg_wait_io_event(); } return NULL; }
static void *qemu_kvm_cpu_thread_fn(void *arg) { CPUState *env = arg; int r; qemu_mutex_lock(&qemu_global_mutex); qemu_thread_get_self(env->thread); env->thread_id = qemu_get_thread_id(); r = kvm_init_vcpu(env); if (r < 0) { fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); exit(1); } qemu_kvm_init_cpu_signals(env); /* signal CPU creation */ env->created = 1; qemu_cond_signal(&qemu_cpu_cond); // TLC profiling tlc_cpu_profile_init(env); while (1) { if (cpu_can_run(env)) { r = kvm_cpu_exec(env); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); } } qemu_kvm_wait_io_event(env); } return NULL; }
void qemu_init_cpu_loop(void) { qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); qemu_cond_init(&qemu_work_cond); qemu_cond_init(&qemu_io_proceeded_cond); qemu_mutex_init(&qemu_global_mutex); qemu_thread_get_self(&io_thread); }
void qemu_init_cpu_loop(void) { #ifdef CONFIG_S2E tcg_cpu_thread = NULL; tcg_halt_cond = NULL; #endif qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); qemu_cond_init(&qemu_work_cond); qemu_cond_init(&qemu_io_proceeded_cond); qemu_mutex_init(&qemu_global_mutex); qemu_thread_get_self(&io_thread); }
static void *qemu_dummy_cpu_thread_fn(void *arg) { #ifdef _WIN32 fprintf(stderr, "qtest is not supported under Windows\n"); exit(1); #else CPUArchState *env = arg; CPUState *cpu = ENV_GET_CPU(env); sigset_t waitset; int r; qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); sigemptyset(&waitset); sigaddset(&waitset, SIG_IPI); /* signal CPU creation */ cpu->created = true; qemu_cond_signal(&qemu_cpu_cond); cpu_single_env = env; while (1) { cpu_single_env = NULL; qemu_mutex_unlock_iothread(); do { int sig; r = sigwait(&waitset, &sig); } while (r == -1 && (errno == EAGAIN || errno == EINTR)); if (r == -1) { perror("sigwait"); exit(1); } qemu_mutex_lock_iothread(); cpu_single_env = env; qemu_wait_io_event_common(cpu); } return NULL; #endif }
/* * Theory of operation: * * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring * the lock enqueue themselves by incrementing the tail index. When the lock * is unlocked, the head is incremented and waiting threads are notified. * * Recursive locking does not take a ticket since the head is only incremented * when the outermost recursive caller unlocks. */ void rfifolock_lock(RFifoLock *r) { qemu_mutex_lock(&r->lock); /* Take a ticket */ unsigned int ticket = r->tail++; if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) { r->tail--; /* put ticket back, we're nesting */ } else { while (ticket != r->head) { /* Invoke optional contention callback */ if (r->cb) { r->cb(r->cb_opaque); } qemu_cond_wait(&r->cond, &r->lock); } } qemu_thread_get_self(&r->owner_thread); r->nesting++; qemu_mutex_unlock(&r->lock); }
static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; CPUArchState *env; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); /* signal CPU creation */ qemu_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) { cpu = ENV_GET_CPU(env); cpu->thread_id = qemu_get_thread_id(); cpu->created = true; } qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (ENV_GET_CPU(first_cpu)->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(ENV_GET_CPU(env)); } } while (1) { tcg_exec_all(); if (use_icount && qemu_clock_deadline(vm_clock) <= 0) { qemu_notify_event(); } qemu_tcg_wait_io_event(); } return NULL; }
static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUArchState *env = arg; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(env->thread); /* signal CPU creation */ qemu_mutex_lock(&qemu_global_mutex); for (env = first_cpu; env != NULL; env = env->next_cpu) { env->thread_id = qemu_get_thread_id(); LOGD_CPUS("%s2: Thread ID = %d\n", __func__, env->thread_id); env->created = 1; } qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ for (env = first_cpu; env != NULL; env = env->next_cpu) { qemu_wait_io_event_common(env); } } while (1) { tcg_exec_all(); if (use_icount && qemu_clock_deadline(vm_clock) <= 0) { LOGD_CPUS("%s=>qemu_notify_event()\n", __func__); qemu_notify_event(); } qemu_tcg_wait_io_event(); } return NULL; }
static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); qemu_mutex_lock(&qemu_global_mutex); qemu_for_each_cpu(tcg_signal_cpu_creation, NULL); qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { qemu_wait_io_event_common(cpu); } } while (1) { tcg_exec_all(); if (use_icount) { int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); if (deadline == 0) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } } qemu_tcg_wait_io_event(); } return NULL; }