static void qemu_archipelago_close(BlockDriverState *bs) { int r, targetlen; char *target; struct xseg_request *req; BDRVArchipelagoState *s = bs->opaque; s->stopping = true; qemu_mutex_lock(&s->request_mutex); while (!s->th_is_signaled) { qemu_cond_wait(&s->request_cond, &s->request_mutex); } qemu_mutex_unlock(&s->request_mutex); qemu_thread_join(&s->request_th); qemu_cond_destroy(&s->request_cond); qemu_mutex_destroy(&s->request_mutex); qemu_cond_destroy(&s->archip_cond); qemu_mutex_destroy(&s->archip_mutex); targetlen = strlen(s->volname); req = xseg_get_request(s->xseg, s->srcport, s->vportno, X_ALLOC); if (!req) { archipelagolog("Cannot get XSEG request\n"); goto err_exit; } r = xseg_prep_request(s->xseg, req, targetlen, 0); if (r < 0) { xseg_put_request(s->xseg, req, s->srcport); archipelagolog("Cannot prepare XSEG close request\n"); goto err_exit; } target = xseg_get_target(s->xseg, req); memcpy(target, s->volname, targetlen); req->size = req->datalen; req->offset = 0; req->op = X_CLOSE; xport p = xseg_submit(s->xseg, req, s->srcport, X_ALLOC); if (p == NoPort) { xseg_put_request(s->xseg, req, s->srcport); archipelagolog("Cannot submit XSEG close request\n"); goto err_exit; } xseg_signal(s->xseg, p); wait_reply(s->xseg, s->srcport, s->port, req); xseg_put_request(s->xseg, req, s->srcport); err_exit: g_free(s->volname); g_free(s->segment_name); xseg_quit_local_signal(s->xseg, s->srcport); xseg_leave_dynport(s->xseg, s->port); xseg_leave(s->xseg); }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); iothread_stop(iothread); /* * Before glib2 2.33.10, there is a glib2 bug that GSource context * pointer may not be cleared even if the context has already been * destroyed (while it should). Here let's free the AIO context * earlier to bypass that glib bug. * * We can remove this comment after the minimum supported glib2 * version boosts to 2.33.10. Before that, let's free the * GSources first before destroying any GMainContext. */ if (iothread->ctx) { aio_context_unref(iothread->ctx); iothread->ctx = NULL; } if (iothread->worker_context) { g_main_context_unref(iothread->worker_context); iothread->worker_context = NULL; } qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); iothread_stop(obj, NULL); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); if (!iothread->ctx) { return; } aio_context_unref(iothread->ctx); }
static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); if (!iothread->ctx) { return; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); aio_context_unref(iothread->ctx); }
static void pci_edu_uninit(PCIDevice *pdev) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); qemu_mutex_lock(&edu->thr_mutex); edu->stopping = true; qemu_mutex_unlock(&edu->thr_mutex); qemu_cond_signal(&edu->thr_cond); qemu_thread_join(&edu->thread); qemu_cond_destroy(&edu->thr_cond); qemu_mutex_destroy(&edu->thr_mutex); timer_del(&edu->dma_timer); }
void rfifolock_destroy(RFifoLock *r) { qemu_cond_destroy(&r->cond); qemu_mutex_destroy(&r->lock); }
UNICORN_EXPORT uc_err uc_close(uc_engine *uc) { int i; struct list_item *cur; struct hook *hook; CPUState *cpu; // Cleanup internally. if (uc->release) uc->release(uc->tcg_ctx); g_free(uc->tcg_ctx); // Cleanup CPU. CPU_FOREACH(cpu) { g_free(cpu->tcg_as_listener); g_free(cpu->thread); g_free(cpu->halt_cond); } // Cleanup all objects. OBJECT(uc->machine_state->accelerator)->ref = 1; OBJECT(uc->machine_state)->ref = 1; OBJECT(uc->owner)->ref = 1; OBJECT(uc->root)->ref = 1; object_unref(uc, OBJECT(uc->machine_state->accelerator)); object_unref(uc, OBJECT(uc->machine_state)); object_unref(uc, uc->cpu); object_unref(uc, OBJECT(&uc->io_mem_notdirty)); object_unref(uc, OBJECT(&uc->io_mem_unassigned)); object_unref(uc, OBJECT(&uc->io_mem_rom)); object_unref(uc, OBJECT(uc->root)); // System memory. g_free(uc->system_memory); // Thread relateds. if (uc->qemu_thread_data) free(uc->qemu_thread_data); qemu_mutex_destroy(&uc->qemu_global_mutex); qemu_cond_destroy(&uc->qemu_cpu_cond); // Other auxilaries. free(uc->l1_map); if (uc->bounce.buffer) { free(uc->bounce.buffer); } g_hash_table_foreach(uc->type_table, free_table, uc); g_hash_table_destroy(uc->type_table); for (i = 0; i < DIRTY_MEMORY_NUM; i++) { free(uc->ram_list.dirty_memory[i]); } // free hooks and hook lists for (i = 0; i < UC_HOOK_MAX; i++) { cur = uc->hook[i].head; // hook can be in more than one list // so we refcount to know when to free while (cur) { hook = (struct hook *)cur->data; if (--hook->refs == 0) { free(hook); } cur = cur->next; } list_clear(&uc->hook[i]); } free(uc->mapped_blocks); // finally, free uc itself. memset(uc, 0, sizeof(*uc)); free(uc); return UC_ERR_OK; }