static void qemu_tcg_init_vcpu(void *_env) { CPUArchState *env = _env; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { #ifdef CONFIG_S2E /* Forks inherit parent's memory, therefore we do not want to allocate new memory regions, just overwrite them. */ if (!s2e_is_forking()) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); } #else env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); #endif qemu_cond_init(env->halt_cond); tcg_halt_cond = env->halt_cond; qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env, QEMU_THREAD_JOINABLE); #ifdef _WIN32 env->hThread = qemu_thread_get_handle(env->thread); #endif while (env->created == 0) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } tcg_cpu_thread = env->thread; } else { env->thread = tcg_cpu_thread; env->halt_cond = tcg_halt_cond; } }
void qio_task_run_in_thread(QIOTask *task, QIOTaskWorker worker, gpointer opaque, GDestroyNotify destroy, GMainContext *context) { struct QIOTaskThreadData *data = g_new0(struct QIOTaskThreadData, 1); QemuThread thread; if (context) { g_main_context_ref(context); } data->task = task; data->worker = worker; data->opaque = opaque; data->destroy = destroy; data->context = context; trace_qio_task_thread_start(task, worker, opaque); qemu_thread_create(&thread, "io-task-worker", qio_task_thread_worker, data, QEMU_THREAD_DETACHED); }
static void test_callback(void) { CallbackTestData data; QemuThread thread; int ret; char c; rfifolock_init(&data.lock, rfifolock_cb, &data); ret = qemu_pipe(data.fd); g_assert(ret == 0); /* Hold lock but allow the callback to kick us by writing to the pipe */ rfifolock_lock(&data.lock); qemu_thread_create(&thread, "callback_thread", callback_thread, &data, QEMU_THREAD_JOINABLE); ret = read(data.fd[0], &c, sizeof(c)); g_assert(ret == 1); rfifolock_unlock(&data.lock); /* If we got here then the callback was invoked, as expected */ qemu_thread_join(&thread); close(data.fd[0]); close(data.fd[1]); rfifolock_destroy(&data.lock); }
static void test_acquire(void) { QemuThread thread; AcquireTestData data; /* Dummy event notifier ensures aio_poll() will block */ event_notifier_init(&data.notifier, false); set_event_notifier(ctx, &data.notifier, dummy_notifier_read); g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ qemu_mutex_init(&data.start_lock); qemu_mutex_lock(&data.start_lock); data.thread_acquired = false; qemu_thread_create(&thread, "test_acquire_thread", test_acquire_thread, &data, QEMU_THREAD_JOINABLE); /* Block in aio_poll(), let other thread kick us and acquire context */ aio_context_acquire(ctx); qemu_mutex_unlock(&data.start_lock); /* let the thread run */ g_assert(aio_poll(ctx, true)); g_assert(!data.thread_acquired); aio_context_release(ctx); qemu_thread_join(&thread); set_event_notifier(ctx, &data.notifier, NULL); event_notifier_cleanup(&data.notifier); g_assert(data.thread_acquired); }
static void iothread_complete(UserCreatable *obj, Error **errp) { Error *local_error = NULL; IOThread *iothread = IOTHREAD(obj); char *name, *thread_name; iothread->stopping = false; iothread->thread_id = -1; iothread->ctx = aio_context_new(&local_error); if (!iothread->ctx) { error_propagate(errp, local_error); return; } qemu_mutex_init(&iothread->init_done_lock); qemu_cond_init(&iothread->init_done_cond); /* This assumes we are called from a thread with useful CPU affinity for us * to inherit. */ name = object_get_canonical_path_component(OBJECT(obj)); thread_name = g_strdup_printf("IO %s", name); qemu_thread_create(&iothread->thread, thread_name, iothread_run, iothread, QEMU_THREAD_JOINABLE); g_free(thread_name); g_free(name); /* Wait for initialization to complete */ qemu_mutex_lock(&iothread->init_done_lock); while (iothread->thread_id == -1) { qemu_cond_wait(&iothread->init_done_cond, &iothread->init_done_lock); } qemu_mutex_unlock(&iothread->init_done_lock); }
static void iothread_complete(UserCreatable *obj, Error **errp) { Error *local_error = NULL; IOThread *iothread = IOTHREAD(obj); iothread->stopping = false; iothread->thread_id = -1; iothread->ctx = aio_context_new(&local_error); if (!iothread->ctx) { error_propagate(errp, local_error); return; } qemu_mutex_init(&iothread->init_done_lock); qemu_cond_init(&iothread->init_done_cond); /* This assumes we are called from a thread with useful CPU affinity for us * to inherit. */ qemu_thread_create(&iothread->thread, "iothread", iothread_run, iothread, QEMU_THREAD_JOINABLE); /* Wait for initialization to complete */ qemu_mutex_lock(&iothread->init_done_lock); while (iothread->thread_id == -1) { qemu_cond_wait(&iothread->init_done_cond, &iothread->init_done_lock); } qemu_mutex_unlock(&iothread->init_done_lock); }
static void kvm_start_vcpu(CPUState *env) { env->thread = qemu_mallocz(sizeof(QemuThread)); env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); while (env->created == 0) qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); }
static void create_thread(void *(*func)(void *)) { if (n_threads >= NR_THREADS) { fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); exit(-1); } qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads], QEMU_THREAD_JOINABLE); n_threads++; }
static void qemu_kvm_start_vcpu(CPUState *env) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); while (env->created == 0) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }
static void qemu_dummy_start_vcpu(CPUArchState *env) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_dummy_cpu_thread_fn, env, QEMU_THREAD_JOINABLE); while (env->created == 0) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }
static void qemu_dummy_start_vcpu(CPUState *cpu) { cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }
static void start_comp_thread(RdmaBackendDev *backend_dev) { char thread_name[THR_NAME_LEN] = {}; stop_backend_thread(&backend_dev->comp_thread); snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", ibv_get_device_name(backend_dev->ib_dev)); backend_dev->comp_thread.run = true; qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); }
static void qemu_kvm_start_vcpu(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env, QEMU_THREAD_JOINABLE); while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }
int postcopy_ram_enable_notify(MigrationIncomingState *mis) { /* Open the fd for the kernel to give us userfaults */ mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (mis->userfault_fd == -1) { error_report("%s: Failed to open userfault fd: %s", __func__, strerror(errno)); return -1; } /* * Although the host check already tested the API, we need to * do the check again as an ABI handshake on the new fd. */ if (!ufd_check_and_apply(mis->userfault_fd, mis)) { return -1; } /* Now an eventfd we use to tell the fault-thread to quit */ mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); if (mis->userfault_event_fd == -1) { error_report("%s: Opening userfault_event_fd: %s", __func__, strerror(errno)); close(mis->userfault_fd); return -1; } qemu_sem_init(&mis->fault_thread_sem, 0); qemu_thread_create(&mis->fault_thread, "postcopy/fault", postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE); qemu_sem_wait(&mis->fault_thread_sem); qemu_sem_destroy(&mis->fault_thread_sem); mis->have_fault_thread = true; /* Mark so that we get notified of accesses to unwritten areas */ if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) { return -1; } /* * Ballooning can mark pages as absent while we're postcopying * that would cause false userfaults. */ qemu_balloon_inhibit(true); trace_postcopy_ram_enable_notify(); return 0; }
void migrate_fd_connect(MigrationState *s) { s->state = MIG_STATE_ACTIVE; trace_migrate_set_state(MIG_STATE_ACTIVE); /* This is a best 1st approximation. ns to ms */ s->expected_downtime = max_downtime/1000000; s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO); qemu_thread_create(&s->thread, migration_thread, s, QEMU_THREAD_JOINABLE); notifier_list_notify(&migration_state_notifiers, s); }
void migrate_fd_connect(MigrationState *s) { s->state = MIG_STATE_ACTIVE; s->bytes_xfer = 0; s->buffer = NULL; s->buffer_size = 0; s->buffer_capacity = 0; s->xfer_limit = s->bandwidth_limit / XFER_LIMIT_RATIO; s->complete = false; s->file = qemu_fopen_ops(s, &buffered_file_ops); qemu_thread_create(&s->thread, buffered_file_thread, s, QEMU_THREAD_DETACHED); notifier_list_notify(&migration_state_notifiers, s); }
static void tcg_init_vcpu(void *_env) { CPUState *env = _env; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { env->thread = qemu_mallocz(sizeof(QemuThread)); env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); while (env->created == 0) qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); tcg_cpu_thread = env->thread; tcg_halt_cond = env->halt_cond; } else { env->thread = tcg_cpu_thread; env->halt_cond = tcg_halt_cond; } }
void migrate_fd_connect(MigrationState *s) { s->state = MIGRATION_STATUS_SETUP; trace_migrate_set_state(MIGRATION_STATUS_SETUP); /* This is a best 1st approximation. ns to ms */ s->expected_downtime = max_downtime/1000000; s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO); /* Notify before starting migration thread */ notifier_list_notify(&migration_state_notifiers, s); migrate_compress_threads_create(); qemu_thread_create(&s->thread, "migration", migration_thread, s, QEMU_THREAD_JOINABLE); }
static int pci_edu_init(PCIDevice *pdev) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); uint8_t *pci_conf = pdev->config; timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu); qemu_mutex_init(&edu->thr_mutex); qemu_cond_init(&edu->thr_cond); qemu_thread_create(&edu->thread, "edu", edu_fact_thread, edu, QEMU_THREAD_JOINABLE); pci_config_set_interrupt_pin(pci_conf, 1); memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu, "edu-mmio", 1 << 20); pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio); return 0; }
static void qemu_dummy_start_vcpu(CPUArchState *env) { #ifdef CONFIG_S2E /* Forks inherit parent's memory, therefore we do not want to allocate new memory regions, just overwrite them. */ if (!s2e_is_forking()) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); } #else env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); #endif qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_dummy_cpu_thread_fn, env, QEMU_THREAD_JOINABLE); while (env->created == 0) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }
static void qemu_tcg_init_vcpu(CPUState *cpu) { /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); tcg_halt_cond = cpu->halt_cond; qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } tcg_cpu_thread = cpu->thread; } else { cpu->thread = tcg_cpu_thread; cpu->halt_cond = tcg_halt_cond; } }
static int qemu_archipelago_init(BDRVArchipelagoState *s) { int ret; ret = qemu_archipelago_xseg_init(s); if (ret < 0) { error_report("Cannot initialize XSEG. Aborting...\n"); goto err_exit; } qemu_cond_init(&s->archip_cond); qemu_mutex_init(&s->archip_mutex); qemu_cond_init(&s->request_cond); qemu_mutex_init(&s->request_mutex); s->th_is_signaled = false; qemu_thread_create(&s->request_th, "xseg_io_th", (void *) xseg_request_handler, (void *) s, QEMU_THREAD_JOINABLE); err_exit: return ret; }
static void qemu_tcg_init_vcpu(void *_env) { CPUArchState *env = _env; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); tcg_halt_cond = env->halt_cond; qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env, QEMU_THREAD_JOINABLE); #ifdef _WIN32 env->hThread = qemu_thread_get_handle(env->thread); #endif while (env->created == 0) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } tcg_cpu_thread = env->thread; } else { env->thread = tcg_cpu_thread; env->halt_cond = tcg_halt_cond; } }
static void enable_emu_timer(uc_engine *uc, uint64_t timeout) { uc->timeout = timeout; qemu_thread_create(uc, &uc->timer, "timeout", _timeout_fn, uc, QEMU_THREAD_JOINABLE); }