static int xen_9pfs_free(struct XenLegacyDevice *xendev) { Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); int i; if (xen_9pdev->rings[0].evtchndev != NULL) { xen_9pfs_disconnect(xendev); } for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].data != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].data, (1 << xen_9pdev->rings[i].ring_order)); } if (xen_9pdev->rings[i].intf != NULL) { xen_be_unmap_grant_refs(&xen_9pdev->xendev, xen_9pdev->rings[i].intf, 1); } if (xen_9pdev->rings[i].bh != NULL) { qemu_bh_delete(xen_9pdev->rings[i].bh); } } g_free(xen_9pdev->id); g_free(xen_9pdev->tag); g_free(xen_9pdev->path); g_free(xen_9pdev->security_model); g_free(xen_9pdev->rings); return 0; }
static void migrate_fd_cleanup(void *opaque) { MigrationState *s = opaque; qemu_bh_delete(s->cleanup_bh); s->cleanup_bh = NULL; if (s->file) { DPRINTF("closing file\n"); qemu_mutex_unlock_iothread(); qemu_thread_join(&s->thread); qemu_mutex_lock_iothread(); qemu_fclose(s->file); s->file = NULL; } assert(s->state != MIG_STATE_ACTIVE); if (s->state != MIG_STATE_COMPLETED) { qemu_savevm_state_cancel(); if (s->state == MIG_STATE_CANCELLING) { migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED); } } notifier_list_notify(&migration_state_notifiers, s); }
void laio_detach_aio_context(void *s_, AioContext *old_context) { struct qemu_laio_state *s = s_; aio_set_event_notifier(old_context, &s->e, NULL); qemu_bh_delete(s->completion_bh); }
static void iscsi_co_generic_bh_cb(void *opaque) { struct IscsiTask *iTask = opaque; iTask->complete = 1; qemu_bh_delete(iTask->bh); qemu_coroutine_enter(iTask->co, NULL); }
static void reschedule_dma(void *opaque) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; qemu_bh_delete(dbs->bh); dbs->bh = NULL; dma_blk_cb(dbs, 0); }
static void qemu_gluster_complete_aio(void *opaque) { GlusterAIOCB *acb = (GlusterAIOCB *)opaque; qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_coroutine_enter(acb->coroutine, NULL); }
/* This callback gets invoked when we have pure sparseness */ static void tar_sparse_cb(void *opaque) { TarAIOCB *acb = (TarAIOCB *)opaque; acb->common.cb(acb->common.opaque, 0); qemu_bh_delete(acb->bh); qemu_aio_release(acb); }
static void bh_delete_cb(void *opaque) { BHTestData *data = opaque; if (++data->n < data->max) { qemu_bh_schedule(data->bh); } else { qemu_bh_delete(data->bh); data->bh = NULL; } }
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { VirtIOBlock *s = to_virtio_blk(vdev); VirtIOBlockReq *req; MultiReqBuffer mrb = { .num_writes = 0, }; while ((req = virtio_blk_get_request(s))) { virtio_blk_handle_request(req, &mrb); } virtio_submit_multiwrite(s->bs, &mrb); /* * FIXME: Want to check for completions before returning to guest mode, * so cached reads and writes are reported as quickly as possible. But * that should be done in the generic block layer. */ } static void virtio_blk_dma_restart_bh(void *opaque) { VirtIOBlock *s = opaque; VirtIOBlockReq *req = s->rq; MultiReqBuffer mrb = { .num_writes = 0, }; qemu_bh_delete(s->bh); s->bh = NULL; s->rq = NULL; while (req) { virtio_blk_handle_request(req, &mrb); req = req->next; } virtio_submit_multiwrite(s->bs, &mrb); } static void virtio_blk_dma_restart_cb(void *opaque, int running, RunState state) { VirtIOBlock *s = opaque; if (!running) return; if (!s->bh) { s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); qemu_bh_schedule(s->bh); } }
static void curl_readv_bh_cb(void *p) { CURLState *state; CURLAIOCB *acb = p; BDRVCURLState *s = acb->common.bs->opaque; qemu_bh_delete(acb->bh); acb->bh = NULL; size_t start = acb->sector_num * SECTOR_SIZE; size_t end; // In case we have the requested data already (e.g. read-ahead), // we can just call the callback and be done. switch (curl_find_buf(s, start, acb->nb_sectors * SECTOR_SIZE, acb)) { case FIND_RET_OK: qemu_aio_release(acb); // fall through case FIND_RET_WAIT: return; default: break; } // No cache found, so let's start a new request state = curl_init_state(s); if (!state) { acb->common.cb(acb->common.opaque, -EIO); qemu_aio_release(acb); return; } acb->start = 0; acb->end = (acb->nb_sectors * SECTOR_SIZE); state->buf_off = 0; if (state->orig_buf) g_free(state->orig_buf); state->buf_start = start; state->buf_len = acb->end + s->readahead_size; end = MIN(start + state->buf_len, s->len) - 1; state->orig_buf = g_malloc(state->buf_len); state->acb[0] = acb; snprintf(state->range, 127, "%zd-%zd", start, end); DPRINTF("CURL (AIO): Reading %d at %zd (%s)\n", (acb->nb_sectors * SECTOR_SIZE), start, state->range); curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range); curl_multi_add_handle(s->multi, state->curl); curl_multi_do(s); }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) { if (!s) { return; } virtio_blk_data_plane_stop(s); qemu_bh_delete(s->bh); object_unref(OBJECT(s->iothread)); g_free(s); }
static void qlooper_destroy(Looper* ll) { QLooper* looper = (QLooper*)ll; QLoopIo* io; while ((io = looper->io_list) != NULL) qloopio_free(io); qemu_bh_delete(looper->io_bh); qemu_free(looper); }
static void iscsi_readv_writev_bh_cb(void *p) { IscsiAIOCB *acb = p; qemu_bh_delete(acb->bh); if (acb->canceled == 0) { acb->common.cb(acb->common.opaque, acb->status); } qemu_aio_release(acb); }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) { if (!s) { return; } virtio_blk_data_plane_stop(s); blk_op_unblock_all(s->conf->conf.blk, s->blocker); error_free(s->blocker); qemu_bh_delete(s->bh); object_unref(OBJECT(s->iothread)); g_free(s); }
static void virtio_balloon_device_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBalloon *s = VIRTIO_BALLOON(dev); if (virtio_balloon_free_page_support(s)) { qemu_bh_delete(s->free_page_bh); virtio_balloon_free_page_stop(s); precopy_remove_notifier(&s->free_page_report_notify); } balloon_stats_destroy_timer(s); qemu_remove_balloon_handler(s); virtio_cleanup(vdev); }
static void rbd_aio_bh_cb(void *opaque) { RBDAIOCB *acb = opaque; if (acb->cmd == RBD_AIO_READ) { qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_aio_release(acb); }
static void qemu_archipelago_complete_aio(void *opaque) { AIORequestData *reqdata = (AIORequestData *) opaque; ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb; qemu_bh_delete(aio_cb->bh); aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret); aio_cb->status = 0; if (!aio_cb->cancelled) { qemu_aio_release(aio_cb); } g_free(reqdata); }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) { if (!s) { return; } virtio_blk_data_plane_stop(s); data_plane_remove_op_blockers(s); notifier_remove(&s->insert_notifier); notifier_remove(&s->remove_notifier); qemu_bh_delete(s->bh); object_unref(OBJECT(s->iothread)); g_free(s); }
static void dma_aio_cancel(BlockAIOCB *acb) { DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); trace_dma_aio_cancel(dbs); if (dbs->acb) { blk_aio_cancel_async(dbs->acb); } if (dbs->bh) { cpu_unregister_map_client(dbs->bh); qemu_bh_delete(dbs->bh); dbs->bh = NULL; } }
static void dma_complete(DMAAIOCB *dbs, int ret) { trace_dma_complete(dbs, ret, dbs->common.cb); dma_blk_unmap(dbs); if (dbs->common.cb) { dbs->common.cb(dbs->common.opaque, ret); } qemu_iovec_destroy(&dbs->iov); if (dbs->bh) { qemu_bh_delete(dbs->bh); dbs->bh = NULL; } qemu_aio_unref(dbs); }
static void blkverify_aio_bh(void *opaque) { BlkverifyAIOCB *acb = opaque; qemu_bh_delete(acb->bh); if (acb->buf) { qemu_iovec_destroy(&acb->raw_qiov); qemu_vfree(acb->buf); } acb->common.cb(acb->common.opaque, acb->ret); if (acb->finished) { *acb->finished = true; } qemu_aio_release(acb); }
static void colo_failover_bh(void *opaque) { int old_state; qemu_bh_delete(failover_bh); failover_bh = NULL; old_state = failover_set_state(FAILOVER_STATUS_REQUIRE, FAILOVER_STATUS_ACTIVE); if (old_state != FAILOVER_STATUS_REQUIRE) { error_report("Unknown error for failover, old_state = %s", FailoverStatus_lookup[old_state]); return; } colo_do_failover(NULL); }
static void vigs_device_exit(PCIDevice *dev) { VIGSState *s = DO_UPCAST(VIGSState, dev.pci_dev, dev); vigs_server_destroy(s->server); qemu_bh_delete(s->fence_ack_bh); vigs_fenceman_destroy(s->fenceman); memory_region_destroy(&s->io_bar); memory_region_destroy(&s->ram_bar); memory_region_destroy(&s->vram_bar); VIGS_LOG_INFO("VIGS deinitialized"); vigs_log_cleanup(); }
static void iscsi_bh_cb(void *p) { IscsiAIOCB *acb = p; qemu_bh_delete(acb->bh); if (acb->canceled == 0) { acb->common.cb(acb->common.opaque, acb->status); } if (acb->task != NULL) { scsi_free_scsi_task(acb->task); acb->task = NULL; } qemu_aio_release(acb); }
static void dma_complete(DMAAIOCB *dbs, int ret) { dma_bdrv_unmap(dbs); if (dbs->common.cb) { dbs->common.cb(dbs->common.opaque, ret); } qemu_iovec_destroy(&dbs->iov); if (dbs->bh) { qemu_bh_delete(dbs->bh); dbs->bh = NULL; } if (!dbs->in_cancel) { /* Requests may complete while dma_aio_cancel is in progress. In * this case, the AIOCB should not be released because it is still * referenced by dma_aio_cancel. */ qemu_aio_release(dbs); } }
static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); VirtIOCryptoQueue *q; int i, max_queues; max_queues = vcrypto->multiqueue ? vcrypto->max_queues : 1; for (i = 0; i < max_queues; i++) { virtio_del_queue(vdev, i); q = &vcrypto->vqs[i]; qemu_bh_delete(q->dataq_bh); } g_free(vcrypto->vqs); virtio_cleanup(vdev); cryptodev_backend_set_used(vcrypto->cryptodev, false); }
static void iscsi_bh_cb(void *p) { IscsiAIOCB *acb = p; qemu_bh_delete(acb->bh); g_free(acb->buf); acb->buf = NULL; acb->common.cb(acb->common.opaque, acb->status); if (acb->task != NULL) { scsi_free_scsi_task(acb->task); acb->task = NULL; } qemu_aio_unref(acb); }
static int blk_free(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); struct ioreq *ioreq; while (!QLIST_EMPTY(&blkdev->freelist)) { ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_destroy(&ioreq->v); qemu_free(ioreq); } qemu_free(blkdev->params); qemu_free(blkdev->mode); qemu_free(blkdev->type); qemu_free(blkdev->dev); qemu_free(blkdev->devtype); qemu_bh_delete(blkdev->bh); return 0; }
static void block_job_defer_to_main_loop_bh(void *opaque) { BlockJobDeferToMainLoopData *data = opaque; AioContext *aio_context; qemu_bh_delete(data->bh); /* Prevent race with block_job_defer_to_main_loop() */ aio_context_acquire(data->aio_context); /* Fetch BDS AioContext again, in case it has changed */ aio_context = bdrv_get_aio_context(data->job->bs); aio_context_acquire(aio_context); data->fn(data->job, data->opaque); aio_context_release(aio_context); aio_context_release(data->aio_context); g_free(data); }
void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) { XenBlockRequest *request; if (!dataplane) { return; } while (!QLIST_EMPTY(&dataplane->freelist)) { request = QLIST_FIRST(&dataplane->freelist); QLIST_REMOVE(request, list); qemu_iovec_destroy(&request->v); qemu_vfree(request->buf); g_free(request); } qemu_bh_delete(dataplane->bh); if (dataplane->iothread) { object_unref(OBJECT(dataplane->iothread)); } g_free(dataplane); }