/* * This function handles both assigning the ioeventfd handler and * registering it with the kernel. * assign: register/deregister ioeventfd with the kernel * set_handler: use the generic ioeventfd handler */ static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus, int n, bool assign, bool set_handler) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; if (assign) { r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %d", __func__, r); return r; } virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); r = k->ioeventfd_assign(proxy, notifier, n, assign); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); return r; } } else { k->ioeventfd_assign(proxy, notifier, n, assign); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); } return r; }
static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n, bool assign, bool set_handler) { VirtQueue *vq = virtio_get_queue(dev->vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; SubchDev *sch = dev->sch; uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; if (assign) { r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %d", __func__, r); return r; } virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); r = s390_assign_subch_ioeventfd(event_notifier_get_fd(notifier), sch_id, n, assign); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); return r; } } else { virtio_queue_set_host_notifier_fd_handler(vq, false, false); s390_assign_subch_ioeventfd(event_notifier_get_fd(notifier), sch_id, n, assign); event_notifier_cleanup(notifier); } return r; }
void kvm_hv_sint_route_destroy(HvSintRoute *sint_route) { kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &sint_route->sint_set_notifier, sint_route->gsi); kvm_irqchip_release_virq(kvm_state, sint_route->gsi); event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL); event_notifier_cleanup(&sint_route->sint_ack_notifier); event_notifier_cleanup(&sint_route->sint_set_notifier); g_free(sint_route); }
HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint, HvSintAckClb sint_ack_clb) { HvSintRoute *sint_route; int r, gsi; sint_route = g_malloc0(sizeof(*sint_route)); r = event_notifier_init(&sint_route->sint_set_notifier, false); if (r) { goto err; } r = event_notifier_init(&sint_route->sint_ack_notifier, false); if (r) { goto err_sint_set_notifier; } event_notifier_set_handler(&sint_route->sint_ack_notifier, false, kvm_hv_sint_ack_handler); gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vcpu_id, sint); if (gsi < 0) { goto err_gsi; } r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &sint_route->sint_set_notifier, &sint_route->sint_ack_notifier, gsi); if (r) { goto err_irqfd; } sint_route->gsi = gsi; sint_route->sint_ack_clb = sint_ack_clb; sint_route->vcpu_id = vcpu_id; sint_route->sint = sint; return sint_route; err_irqfd: kvm_irqchip_release_virq(kvm_state, gsi); err_gsi: event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL); event_notifier_cleanup(&sint_route->sint_ack_notifier); err_sint_set_notifier: event_notifier_cleanup(&sint_route->sint_set_notifier); err: g_free(sint_route); return NULL; }
static void close_guest_eventfds(IVShmemState *s, int posn) { int i, guest_curr_max; if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { return; } if (posn < 0 || posn >= s->nb_peers) { return; } guest_curr_max = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < guest_curr_max; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); for (i = 0; i < guest_curr_max; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
static void test_acquire(void) { QemuThread thread; AcquireTestData data; /* Dummy event notifier ensures aio_poll() will block */ event_notifier_init(&data.notifier, false); set_event_notifier(ctx, &data.notifier, dummy_notifier_read); g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ qemu_mutex_init(&data.start_lock); qemu_mutex_lock(&data.start_lock); data.thread_acquired = false; qemu_thread_create(&thread, "test_acquire_thread", test_acquire_thread, &data, QEMU_THREAD_JOINABLE); /* Block in aio_poll(), let other thread kick us and acquire context */ aio_context_acquire(ctx); qemu_mutex_unlock(&data.start_lock); /* let the thread run */ g_assert(aio_poll(ctx, true)); g_assert(!data.thread_acquired); aio_context_release(ctx); qemu_thread_join(&thread); set_event_notifier(ctx, &data.notifier, NULL); event_notifier_cleanup(&data.notifier); g_assert(data.thread_acquired); }
static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, bool with_irqfd) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); if (assign) { int r = event_notifier_init(notifier, 0); if (r < 0) { return r; } virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); } else { virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); event_notifier_cleanup(notifier); } if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { vdc->guest_notifier_mask(vdev, n, !assign); } return 0; }
void win32_aio_cleanup(QEMUWin32AIOState *aio) { assert(!aio->is_aio_context_attached); CloseHandle(aio->hIOCP); event_notifier_cleanup(&aio->e); g_free(aio); }
static void close_peer_eventfds(IVShmemState *s, int posn) { int i, n; if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { return; } if (posn < 0 || posn >= s->nb_peers) { error_report("invalid peer %d", posn); return; } n = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < n; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); for (i = 0; i < n; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
static void aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); event_notifier_cleanup(&ctx->notifier); }
static void aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); event_notifier_cleanup(&ctx->notifier); qemu_mutex_destroy(&ctx->bh_lock); g_array_free(ctx->pollfds, TRUE); }
void laio_cleanup(void *s_) { struct qemu_laio_state *s = s_; event_notifier_cleanup(&s->e); if (io_destroy(s->ctx) != 0) { fprintf(stderr, "%s: destroy AIO context %p failed\n", __func__, &s->ctx); } g_free(s); }
static void pci_testdev_uninit(PCIDevice *dev) { PCITestDevState *d = PCI_TEST_DEV(dev); int i; pci_testdev_reset(d); for (i = 0; i < IOTEST_MAX; ++i) { if (d->tests[i].hasnotifier) { event_notifier_cleanup(&d->tests[i].notifier); } g_free(d->tests[i].hdr); } g_free(d->tests); }
static void pci_testdev_uninit(PCIDevice *dev) { PCITestDevState *d = DO_UPCAST(PCITestDevState, dev, dev); int i; pci_testdev_reset(d); for (i = 0; i < IOTEST_MAX; ++i) { if (d->tests[i].hasnotifier) { event_notifier_cleanup(&d->tests[i].notifier); } g_free(d->tests[i].hdr); } g_free(d->tests); memory_region_destroy(&d->mmio); memory_region_destroy(&d->portio); }
static void close_guest_eventfds(IVShmemState *s, int posn) { int i, guest_curr_max; guest_curr_max = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < guest_curr_max; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); for (i = 0; i < guest_curr_max; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
/* * This function switches ioeventfd on/off in the device. * The caller must set or clear the handlers for the EventNotifier. */ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; if (!k->ioeventfd_assign) { return -ENOSYS; } if (assign) { assert(!bus->ioeventfd_started); r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %s (%d)", __func__, strerror(-r), r); return r; } r = k->ioeventfd_assign(proxy, notifier, n, true); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); goto cleanup_event_notifier; } return 0; } else { if (!bus->ioeventfd_started) { return 0; } k->ioeventfd_assign(proxy, notifier, n, false); } cleanup_event_notifier: /* Test and clear notifier after disabling event, * in case poll callback didn't have time to run. */ virtio_queue_host_notifier_read(notifier); event_notifier_cleanup(notifier); return r; }
static void close_peer_eventfds(IVShmemState *s, int posn) { int i, n; assert(posn >= 0 && posn < s->nb_peers); n = s->peers[posn].nb_eventfds; if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { memory_region_transaction_begin(); for (i = 0; i < n; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); } for (i = 0; i < n; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
QEMUWin32AIOState *win32_aio_init(void) { QEMUWin32AIOState *s; s = g_malloc0(sizeof(*s)); if (event_notifier_init(&s->e, false) < 0) { goto out_free_state; } s->hIOCP = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); if (s->hIOCP == NULL) { goto out_close_efd; } return s; out_close_efd: event_notifier_cleanup(&s->e); out_free_state: g_free(s); return NULL; }
void *laio_init(void) { struct qemu_laio_state *s; s = g_malloc0(sizeof(*s)); if (event_notifier_init(&s->e, false) < 0) { goto out_free_state; } if (io_setup(MAX_EVENTS, &s->ctx) != 0) { goto out_close_efd; } ioq_init(&s->io_q); return s; out_close_efd: event_notifier_cleanup(&s->e); out_free_state: g_free(s); return NULL; }
static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, bool assign, bool with_irqfd) { VirtQueue *vq = virtio_get_queue(dev->vdev, n); EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(dev->vdev); if (assign) { int r = event_notifier_init(notifier, 0); if (r < 0) { return r; } virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); /* We do not support irqfd for classic I/O interrupts, because the * classic interrupts are intermixed with the subchannel status, that * is queried with test subchannel. We want to use vhost, though. * Lets make sure to have vhost running and wire up the irq fd to * land in qemu (and only the irq fd) in this code. */ if (k->guest_notifier_mask) { k->guest_notifier_mask(dev->vdev, n, false); } /* get lost events and re-inject */ if (k->guest_notifier_pending && k->guest_notifier_pending(dev->vdev, n)) { event_notifier_set(notifier); } } else { if (k->guest_notifier_mask) { k->guest_notifier_mask(dev->vdev, n, true); } virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); event_notifier_cleanup(notifier); } return 0; }