static void qemu_archipelago_close(BlockDriverState *bs) { int r, targetlen; char *target; struct xseg_request *req; BDRVArchipelagoState *s = bs->opaque; s->stopping = true; qemu_mutex_lock(&s->request_mutex); while (!s->th_is_signaled) { qemu_cond_wait(&s->request_cond, &s->request_mutex); } qemu_mutex_unlock(&s->request_mutex); qemu_thread_join(&s->request_th); qemu_cond_destroy(&s->request_cond); qemu_mutex_destroy(&s->request_mutex); qemu_cond_destroy(&s->archip_cond); qemu_mutex_destroy(&s->archip_mutex); targetlen = strlen(s->volname); req = xseg_get_request(s->xseg, s->srcport, s->vportno, X_ALLOC); if (!req) { archipelagolog("Cannot get XSEG request\n"); goto err_exit; } r = xseg_prep_request(s->xseg, req, targetlen, 0); if (r < 0) { xseg_put_request(s->xseg, req, s->srcport); archipelagolog("Cannot prepare XSEG close request\n"); goto err_exit; } target = xseg_get_target(s->xseg, req); memcpy(target, s->volname, targetlen); req->size = req->datalen; req->offset = 0; req->op = X_CLOSE; xport p = xseg_submit(s->xseg, req, s->srcport, X_ALLOC); if (p == NoPort) { xseg_put_request(s->xseg, req, s->srcport); archipelagolog("Cannot submit XSEG close request\n"); goto err_exit; } xseg_signal(s->xseg, p); wait_reply(s->xseg, s->srcport, s->port, req); xseg_put_request(s->xseg, req, s->srcport); err_exit: g_free(s->volname); g_free(s->segment_name); xseg_quit_local_signal(s->xseg, s->srcport); xseg_leave_dynport(s->xseg, s->port); xseg_leave(s->xseg); }
static int qcrypto_gcrypt_mutex_destroy(void **priv) { QemuMutex *lock = *priv; qemu_mutex_destroy(lock); g_free(lock); return 0; }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); iothread_stop(iothread); /* * Before glib2 2.33.10, there is a glib2 bug that GSource context * pointer may not be cleared even if the context has already been * destroyed (while it should). Here let's free the AIO context * earlier to bypass that glib bug. * * We can remove this comment after the minimum supported glib2 * version boosts to 2.33.10. Before that, let's free the * GSources first before destroying any GMainContext. */ if (iothread->ctx) { aio_context_unref(iothread->ctx); iothread->ctx = NULL; } if (iothread->worker_context) { g_main_context_unref(iothread->worker_context); iothread->worker_context = NULL; } qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); }
static void aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); event_notifier_cleanup(&ctx->notifier); qemu_mutex_destroy(&ctx->bh_lock); g_array_free(ctx->pollfds, TRUE); }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); iothread_stop(obj, NULL); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); if (!iothread->ctx) { return; } aio_context_unref(iothread->ctx); }
static void char_finalize(Object *obj) { Chardev *chr = CHARDEV(obj); if (chr->be) { chr->be->chr = NULL; } g_free(chr->filename); g_free(chr->label); if (chr->logfd != -1) { close(chr->logfd); } qemu_mutex_destroy(&chr->chr_write_lock); }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); if (!iothread->ctx) { return; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); aio_context_unref(iothread->ctx); }
static void pci_edu_uninit(PCIDevice *pdev) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); qemu_mutex_lock(&edu->thr_mutex); edu->stopping = true; qemu_mutex_unlock(&edu->thr_mutex); qemu_cond_signal(&edu->thr_cond); qemu_thread_join(&edu->thread); qemu_cond_destroy(&edu->thr_cond); qemu_mutex_destroy(&edu->thr_mutex); timer_del(&edu->dma_timer); }
static void tpm_emulator_inst_finalize(Object *obj) { TPMEmulator *tpm_emu = TPM_EMULATOR(obj); tpm_emulator_shutdown(tpm_emu); object_unref(OBJECT(tpm_emu->data_ioc)); qemu_chr_fe_deinit(&tpm_emu->ctrl_chr, false); qapi_free_TPMEmulatorOptions(tpm_emu->options); if (tpm_emu->migration_blocker) { migrate_del_blocker(tpm_emu->migration_blocker); error_free(tpm_emu->migration_blocker); } qemu_mutex_destroy(&tpm_emu->mutex); }
void hostmem_init(HostMem *hostmem) { memset(hostmem, 0, sizeof(*hostmem)); qemu_mutex_init(&hostmem->current_regions_lock); hostmem->listener = (MemoryListener){ .begin = hostmem_listener_dummy, .commit = hostmem_listener_commit, .region_add = hostmem_listener_append_region, .region_del = hostmem_listener_section_dummy, .region_nop = hostmem_listener_append_region, .log_start = hostmem_listener_section_dummy, .log_stop = hostmem_listener_section_dummy, .log_sync = hostmem_listener_section_dummy, .log_global_start = hostmem_listener_dummy, .log_global_stop = hostmem_listener_dummy, .eventfd_add = hostmem_listener_eventfd_dummy, .eventfd_del = hostmem_listener_eventfd_dummy, .coalesced_mmio_add = hostmem_listener_coalesced_mmio_dummy, .coalesced_mmio_del = hostmem_listener_coalesced_mmio_dummy, .priority = 10, }; memory_listener_register(&hostmem->listener, &address_space_memory); if (hostmem->num_new_regions > 0) { hostmem_listener_commit(&hostmem->listener); } } void hostmem_finalize(HostMem *hostmem) { memory_listener_unregister(&hostmem->listener); g_free(hostmem->new_regions); g_free(hostmem->current_regions); qemu_mutex_destroy(&hostmem->current_regions_lock); }
static inline void res_tbl_free(RdmaRmResTbl *tbl) { qemu_mutex_destroy(&tbl->lock); g_free(tbl->tbl); bitmap_zero_extend(tbl->bitmap, tbl->tbl_sz, 0); }
void rfifolock_destroy(RFifoLock *r) { qemu_cond_destroy(&r->cond); qemu_mutex_destroy(&r->lock); }
UNICORN_EXPORT uc_err uc_close(uc_engine *uc) { int i; struct list_item *cur; struct hook *hook; CPUState *cpu; // Cleanup internally. if (uc->release) uc->release(uc->tcg_ctx); g_free(uc->tcg_ctx); // Cleanup CPU. CPU_FOREACH(cpu) { g_free(cpu->tcg_as_listener); g_free(cpu->thread); g_free(cpu->halt_cond); } // Cleanup all objects. OBJECT(uc->machine_state->accelerator)->ref = 1; OBJECT(uc->machine_state)->ref = 1; OBJECT(uc->owner)->ref = 1; OBJECT(uc->root)->ref = 1; object_unref(uc, OBJECT(uc->machine_state->accelerator)); object_unref(uc, OBJECT(uc->machine_state)); object_unref(uc, uc->cpu); object_unref(uc, OBJECT(&uc->io_mem_notdirty)); object_unref(uc, OBJECT(&uc->io_mem_unassigned)); object_unref(uc, OBJECT(&uc->io_mem_rom)); object_unref(uc, OBJECT(uc->root)); // System memory. g_free(uc->system_memory); // Thread relateds. if (uc->qemu_thread_data) free(uc->qemu_thread_data); qemu_mutex_destroy(&uc->qemu_global_mutex); qemu_cond_destroy(&uc->qemu_cpu_cond); // Other auxilaries. free(uc->l1_map); if (uc->bounce.buffer) { free(uc->bounce.buffer); } g_hash_table_foreach(uc->type_table, free_table, uc); g_hash_table_destroy(uc->type_table); for (i = 0; i < DIRTY_MEMORY_NUM; i++) { free(uc->ram_list.dirty_memory[i]); } // free hooks and hook lists for (i = 0; i < UC_HOOK_MAX; i++) { cur = uc->hook[i].head; // hook can be in more than one list // so we refcount to know when to free while (cur) { hook = (struct hook *)cur->data; if (--hook->refs == 0) { free(hook); } cur = cur->next; } list_clear(&uc->hook[i]); } free(uc->mapped_blocks); // finally, free uc itself. memset(uc, 0, sizeof(*uc)); free(uc); return UC_ERR_OK; }
void hostmem_finalize(HostMem *hostmem) { cpu_unregister_phys_memory_client(&hostmem->client); qemu_mutex_destroy(&hostmem->mem_lock); qemu_free(hostmem->mem); }