static void test_callback(void) { CallbackTestData data; QemuThread thread; int ret; char c; rfifolock_init(&data.lock, rfifolock_cb, &data); ret = qemu_pipe(data.fd); g_assert(ret == 0); /* Hold lock but allow the callback to kick us by writing to the pipe */ rfifolock_lock(&data.lock); qemu_thread_create(&thread, "callback_thread", callback_thread, &data, QEMU_THREAD_JOINABLE); ret = read(data.fd[0], &c, sizeof(c)); g_assert(ret == 1); rfifolock_unlock(&data.lock); /* If we got here then the callback was invoked, as expected */ qemu_thread_join(&thread); close(data.fd[0]); close(data.fd[1]); rfifolock_destroy(&data.lock); }
static void test_acquire(void) { QemuThread thread; AcquireTestData data; /* Dummy event notifier ensures aio_poll() will block */ event_notifier_init(&data.notifier, false); set_event_notifier(ctx, &data.notifier, dummy_notifier_read); g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ qemu_mutex_init(&data.start_lock); qemu_mutex_lock(&data.start_lock); data.thread_acquired = false; qemu_thread_create(&thread, "test_acquire_thread", test_acquire_thread, &data, QEMU_THREAD_JOINABLE); /* Block in aio_poll(), let other thread kick us and acquire context */ aio_context_acquire(ctx); qemu_mutex_unlock(&data.start_lock); /* let the thread run */ g_assert(aio_poll(ctx, true)); g_assert(!data.thread_acquired); aio_context_release(ctx); qemu_thread_join(&thread); set_event_notifier(ctx, &data.notifier, NULL); event_notifier_cleanup(&data.notifier); g_assert(data.thread_acquired); }
static void qemu_archipelago_close(BlockDriverState *bs) { int r, targetlen; char *target; struct xseg_request *req; BDRVArchipelagoState *s = bs->opaque; s->stopping = true; qemu_mutex_lock(&s->request_mutex); while (!s->th_is_signaled) { qemu_cond_wait(&s->request_cond, &s->request_mutex); } qemu_mutex_unlock(&s->request_mutex); qemu_thread_join(&s->request_th); qemu_cond_destroy(&s->request_cond); qemu_mutex_destroy(&s->request_mutex); qemu_cond_destroy(&s->archip_cond); qemu_mutex_destroy(&s->archip_mutex); targetlen = strlen(s->volname); req = xseg_get_request(s->xseg, s->srcport, s->vportno, X_ALLOC); if (!req) { archipelagolog("Cannot get XSEG request\n"); goto err_exit; } r = xseg_prep_request(s->xseg, req, targetlen, 0); if (r < 0) { xseg_put_request(s->xseg, req, s->srcport); archipelagolog("Cannot prepare XSEG close request\n"); goto err_exit; } target = xseg_get_target(s->xseg, req); memcpy(target, s->volname, targetlen); req->size = req->datalen; req->offset = 0; req->op = X_CLOSE; xport p = xseg_submit(s->xseg, req, s->srcport, X_ALLOC); if (p == NoPort) { xseg_put_request(s->xseg, req, s->srcport); archipelagolog("Cannot submit XSEG close request\n"); goto err_exit; } xseg_signal(s->xseg, p); wait_reply(s->xseg, s->srcport, s->port, req); xseg_put_request(s->xseg, req, s->srcport); err_exit: g_free(s->volname); g_free(s->segment_name); xseg_quit_local_signal(s->xseg, s->srcport); xseg_leave_dynport(s->xseg, s->port); xseg_leave(s->xseg); }
static void migrate_fd_cleanup(void *opaque) { MigrationState *s = opaque; qemu_bh_delete(s->cleanup_bh); s->cleanup_bh = NULL; if (s->file) { DPRINTF("closing file\n"); qemu_mutex_unlock_iothread(); qemu_thread_join(&s->thread); qemu_mutex_lock_iothread(); qemu_fclose(s->file); s->file = NULL; } assert(s->state != MIG_STATE_ACTIVE); if (s->state != MIG_STATE_COMPLETED) { qemu_savevm_state_cancel(); if (s->state == MIG_STATE_CANCELLING) { migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED); } } notifier_list_notify(&migration_state_notifiers, s); }
static void wait_all_threads(void) { int i; for (i = 0; i < n_threads; i++) { qemu_thread_join(&threads[i]); } n_threads = 0; }
void iothread_stop(IOThread *iothread) { if (!iothread->ctx || iothread->stopping) { return; } iothread->stopping = true; aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread); qemu_thread_join(&iothread->thread); }
/* * At the end of a migration where postcopy_ram_incoming_init was called. */ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) { trace_postcopy_ram_incoming_cleanup_entry(); if (mis->have_fault_thread) { Error *local_err = NULL; if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { error_report_err(local_err); return -1; } if (qemu_ram_foreach_block(cleanup_range, mis)) { return -1; } /* Let the fault thread quit */ atomic_set(&mis->fault_thread_quit, 1); postcopy_fault_thread_notify(mis); trace_postcopy_ram_incoming_cleanup_join(); qemu_thread_join(&mis->fault_thread); trace_postcopy_ram_incoming_cleanup_closeuf(); close(mis->userfault_fd); close(mis->userfault_event_fd); mis->have_fault_thread = false; } qemu_balloon_inhibit(false); if (enable_mlock) { if (os_mlock() < 0) { error_report("mlock: %s", strerror(errno)); /* * It doesn't feel right to fail at this point, we have a valid * VM state. */ } } postcopy_state_set(POSTCOPY_INCOMING_END); if (mis->postcopy_tmp_page) { munmap(mis->postcopy_tmp_page, mis->largest_page_size); mis->postcopy_tmp_page = NULL; } if (mis->postcopy_tmp_zero_page) { munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); mis->postcopy_tmp_zero_page = NULL; } trace_postcopy_ram_incoming_cleanup_blocktime( get_postcopy_total_blocktime()); trace_postcopy_ram_incoming_cleanup_exit(); return 0; }
static int iothread_stop(Object *object, void *opaque) { IOThread *iothread; iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); if (!iothread || !iothread->ctx) { return 0; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); return 0; }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); if (!iothread->ctx) { return; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); aio_context_unref(iothread->ctx); }
static void pci_edu_uninit(PCIDevice *pdev) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); qemu_mutex_lock(&edu->thr_mutex); edu->stopping = true; qemu_mutex_unlock(&edu->thr_mutex); qemu_cond_signal(&edu->thr_cond); qemu_thread_join(&edu->thread); qemu_cond_destroy(&edu->thr_cond); qemu_mutex_destroy(&edu->thr_mutex); timer_del(&edu->dma_timer); }
UNICORN_EXPORT uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) { // reset the counter uc->emu_counter = 0; uc->invalid_error = UC_ERR_OK; uc->block_full = false; uc->emulation_done = false; switch(uc->arch) { default: break; case UC_ARCH_M68K: uc_reg_write(uc, UC_M68K_REG_PC, &begin); break; case UC_ARCH_X86: switch(uc->mode) { default: break; case UC_MODE_16: uc_reg_write(uc, UC_X86_REG_IP, &begin); break; case UC_MODE_32: uc_reg_write(uc, UC_X86_REG_EIP, &begin); break; case UC_MODE_64: uc_reg_write(uc, UC_X86_REG_RIP, &begin); break; } break; case UC_ARCH_ARM: uc_reg_write(uc, UC_ARM_REG_R15, &begin); break; case UC_ARCH_ARM64: uc_reg_write(uc, UC_ARM64_REG_PC, &begin); break; case UC_ARCH_MIPS: // TODO: MIPS32/MIPS64/BIGENDIAN etc uc_reg_write(uc, UC_MIPS_REG_PC, &begin); break; case UC_ARCH_SPARC: // TODO: Sparc/Sparc64 uc_reg_write(uc, UC_SPARC_REG_PC, &begin); break; } uc->stop_request = false; uc->emu_count = count; // remove count hook if counting isn't necessary if (count <= 0 && uc->count_hook != 0) { uc_hook_del(uc, uc->count_hook); uc->count_hook = 0; } // set up count hook to count instructions. if (count > 0 && uc->count_hook == 0) { uc_err err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, NULL, 1, 0); if (err != UC_ERR_OK) { return err; } } uc->addr_end = until; if (timeout) enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds if (uc->vm_start(uc)) { return UC_ERR_RESOURCE; } // emulation is done uc->emulation_done = true; if (timeout) { // wait for the timer to finish qemu_thread_join(&uc->timer); } return uc->invalid_error; }
UNICORN_EXPORT uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) { // reset the counter uc->emu_counter = 0; uc->stop_request = false; uc->invalid_error = UC_ERR_OK; uc->block_full = false; uc->emulation_done = false; switch(uc->arch) { default: break; case UC_ARCH_M68K: uc_reg_write(uc, UC_M68K_REG_PC, &begin); break; case UC_ARCH_X86: switch(uc->mode) { default: break; case UC_MODE_16: uc_reg_write(uc, UC_X86_REG_IP, &begin); break; case UC_MODE_32: uc_reg_write(uc, UC_X86_REG_EIP, &begin); break; case UC_MODE_64: uc_reg_write(uc, UC_X86_REG_RIP, &begin); break; } break; case UC_ARCH_ARM: switch(uc->mode) { default: break; case UC_MODE_THUMB: case UC_MODE_ARM: uc_reg_write(uc, UC_ARM_REG_R15, &begin); break; } break; case UC_ARCH_ARM64: uc_reg_write(uc, UC_ARM64_REG_PC, &begin); break; case UC_ARCH_MIPS: // TODO: MIPS32/MIPS64/BIGENDIAN etc uc_reg_write(uc, UC_MIPS_REG_PC, &begin); break; case UC_ARCH_SPARC: // TODO: Sparc/Sparc64 uc_reg_write(uc, UC_SPARC_REG_PC, &begin); break; } uc->emu_count = count; if (count > 0) { uc->hook_insn = true; } uc->addr_end = until; uc->vm_start(uc); if (timeout) enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds uc->pause_all_vcpus(uc); // emulation is done uc->emulation_done = true; if (timeout) { // wait for the timer to finish qemu_thread_join(&uc->timer); } return uc->invalid_error; }