static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n, bool assign, bool set_handler) { VirtQueue *vq = virtio_get_queue(dev->vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; SubchDev *sch = dev->sch; uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; if (assign) { r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %d", __func__, r); return r; } virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); r = s390_assign_subch_ioeventfd(event_notifier_get_fd(notifier), sch_id, n, assign); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); return r; } } else { virtio_queue_set_host_notifier_fd_handler(vq, false, false); s390_assign_subch_ioeventfd(event_notifier_get_fd(notifier), sch_id, n, assign); event_notifier_cleanup(notifier); } return r; }
/* * This function handles both assigning the ioeventfd handler and * registering it with the kernel. * assign: register/deregister ioeventfd with the kernel * set_handler: use the generic ioeventfd handler */ static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus, int n, bool assign, bool set_handler) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; if (assign) { r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %d", __func__, r); return r; } virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); r = k->ioeventfd_assign(proxy, notifier, n, assign); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); return r; } } else { k->ioeventfd_assign(proxy, notifier, n, assign); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); } return r; }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOBlock *vblk = VIRTIO_BLK(s->vdev); int r; if (vblk->dataplane_started || s->starting) { return; } s->starting = true; s->vq = virtio_get_queue(s->vdev, 0); /* Set up guest notifier (irq) */ r = k->set_guest_notifiers(qbus->parent, 1, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set guest notifier (%d), " "ensure -enable-kvm is set\n", r); goto fail_guest_notifiers; } s->guest_notifier = virtio_queue_get_guest_notifier(s->vq); /* Set up virtqueue notify */ r = k->set_host_notifier(qbus->parent, 0, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); goto fail_host_notifier; } s->starting = false; vblk->dataplane_started = true; trace_virtio_blk_data_plane_start(s); blk_set_aio_context(s->conf->conf.blk, s->ctx); /* Kick right away to begin processing requests already in vring */ event_notifier_set(virtio_queue_get_host_notifier(s->vq)); /* Get this show started by hooking up our callbacks */ aio_context_acquire(s->ctx); virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, virtio_blk_data_plane_handle_output); aio_context_release(s->ctx); return; fail_host_notifier: k->set_guest_notifiers(qbus->parent, 1, false); fail_guest_notifiers: vblk->dataplane_disabled = true; s->starting = false; vblk->dataplane_started = true; }
/* * This function switches ioeventfd on/off in the device. * The caller must set or clear the handlers for the EventNotifier. */ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; if (!k->ioeventfd_assign) { return -ENOSYS; } if (assign) { assert(!bus->ioeventfd_started); r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %s (%d)", __func__, strerror(-r), r); return r; } r = k->ioeventfd_assign(proxy, notifier, n, true); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); goto cleanup_event_notifier; } return 0; } else { if (!bus->ioeventfd_started) { return 0; } k->ioeventfd_assign(proxy, notifier, n, false); } cleanup_event_notifier: /* Test and clear notifier after disabling event, * in case poll callback didn't have time to run. */ virtio_queue_host_notifier_read(notifier); event_notifier_cleanup(notifier); return r; }
static int vhost_virtqueue_init(struct vhost_dev *dev, struct VirtIODevice *vdev, struct vhost_virtqueue *vq, unsigned idx) { target_phys_addr_t s, l, a; int r; struct vhost_vring_file file = { .index = idx, }; struct vhost_vring_state state = { .index = idx, }; struct VirtQueue *vvq = virtio_get_queue(vdev, idx); if (!vdev->binding->set_host_notifier) { fprintf(stderr, "binding does not support host notifiers\n"); return -ENOSYS; } vq->num = state.num = virtio_queue_get_num(vdev, idx); r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); if (r) { return -errno; } state.num = virtio_queue_get_last_avail_idx(vdev, idx); r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state); if (r) { return -errno; } s = l = virtio_queue_get_desc_size(vdev, idx); a = virtio_queue_get_desc_addr(vdev, idx); vq->desc = cpu_physical_memory_map(a, &l, 0); if (!vq->desc || l != s) { r = -ENOMEM; goto fail_alloc_desc; } s = l = virtio_queue_get_avail_size(vdev, idx); a = virtio_queue_get_avail_addr(vdev, idx); vq->avail = cpu_physical_memory_map(a, &l, 0); if (!vq->avail || l != s) { r = -ENOMEM; goto fail_alloc_avail; } vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); vq->used = cpu_physical_memory_map(a, &l, 1); if (!vq->used || l != s) { r = -ENOMEM; goto fail_alloc_used; } vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); vq->ring = cpu_physical_memory_map(a, &l, 1); if (!vq->ring || l != s) { r = -ENOMEM; goto fail_alloc_ring; } r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); if (r < 0) { r = -errno; goto fail_alloc; } r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true); if (r < 0) { fprintf(stderr, "Error binding host notifier: %d\n", -r); goto fail_host_notifier; } file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); if (r) { r = -errno; goto fail_kick; } file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file); if (r) { r = -errno; goto fail_call; } return 0; fail_call: fail_kick: vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false); fail_host_notifier: fail_alloc: cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 0, 0); fail_alloc_ring: cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 0, 0); fail_alloc_used: cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 0, 0); fail_alloc_avail: cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 0, 0); fail_alloc_desc: return r; }