static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); /* TODO: need to check if kvm-arm supports irqfd */ bool with_irqfd = false; int r, n; nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); for (n = 0; n < nvqs; n++) { if (!virtio_queue_get_num(vdev, n)) { break; } r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd); if (r < 0) { goto assign_error; } } return 0; assign_error: /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ assert(assign); while (--n >= 0) { virtio_mmio_set_guest_notifier(d, n, !assign, false); } return r; }
/* * This function handles both assigning the ioeventfd handler and * registering it with the kernel. * assign: register/deregister ioeventfd with the kernel * set_handler: use the generic ioeventfd handler */ static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus, int n, bool assign, bool set_handler) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; if (assign) { r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %d", __func__, r); return r; } virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler); r = k->ioeventfd_assign(proxy, notifier, n, assign); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); return r; } } else { k->ioeventfd_assign(proxy, notifier, n, assign); virtio_queue_set_host_notifier_fd_handler(vq, false, false); event_notifier_cleanup(notifier); } return r; }
static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, bool with_irqfd) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); if (assign) { int r = event_notifier_init(notifier, 0); if (r < 0) { return r; } virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); } else { virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); event_notifier_cleanup(notifier); } if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { vdc->guest_notifier_mask(vdev, n, !assign); } return 0; }
int virtio_bus_start_ioeventfd(VirtioBusState *bus) { VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); int r; if (!k->ioeventfd_assign || !k->ioeventfd_enabled(proxy)) { return -ENOSYS; } if (bus->ioeventfd_started) { return 0; } /* Only set our notifier if we have ownership. */ if (!bus->ioeventfd_grabbed) { r = vdc->start_ioeventfd(vdev); if (r < 0) { error_report("%s: failed. Fallback to userspace (slower).", __func__); return r; } } bus->ioeventfd_started = true; return 0; }
/* Reset the virtio_bus */ void virtio_bus_reset(VirtioBusState *bus) { VirtIODevice *vdev = virtio_bus_get_device(bus); DPRINTF("%s: reset device.\n", BUS(bus)->name); if (vdev != NULL) { virtio_reset(vdev); } }
/* Get the features of the plugged device. */ uint32_t virtio_bus_get_vdev_features(VirtioBusState *bus, uint32_t requested_features) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioDeviceClass *k; assert(vdev != NULL); k = VIRTIO_DEVICE_GET_CLASS(vdev); assert(k->get_features != NULL); return k->get_features(vdev, requested_features); }
/* Set config of the plugged device. */ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioDeviceClass *k; assert(vdev != NULL); k = VIRTIO_DEVICE_GET_CLASS(vdev); if (k->set_config != NULL) { k->set_config(vdev, config); } }
static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); int level; if (!vdev) { return; } level = (atomic_read(&vdev->isr) != 0); DPRINTF("virtio_mmio setting IRQ %d\n", level); qemu_set_irq(proxy->irq, level); }
/* Get bad features of the plugged device. */ uint32_t virtio_bus_get_vdev_bad_features(VirtioBusState *bus) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioDeviceClass *k; assert(vdev != NULL); k = VIRTIO_DEVICE_GET_CLASS(vdev); if (k->bad_features != NULL) { return k->bad_features(vdev); } else { return 0; } }
void virtio_bus_stop_ioeventfd(VirtioBusState *bus) { VirtIODevice *vdev; VirtioDeviceClass *vdc; if (!bus->ioeventfd_started) { return; } vdev = virtio_bus_get_device(bus); vdc = VIRTIO_DEVICE_GET_CLASS(vdev); vdc->stop_ioeventfd(vdev); bus->ioeventfd_started = false; }
void virtio_bus_stop_ioeventfd(VirtioBusState *bus) { VirtIODevice *vdev; VirtioDeviceClass *vdc; if (!bus->ioeventfd_started) { return; } /* Only remove our notifier if we have ownership. */ if (!bus->ioeventfd_grabbed) { vdev = virtio_bus_get_device(bus); vdc = VIRTIO_DEVICE_GET_CLASS(vdev); vdc->stop_ioeventfd(vdev); } bus->ioeventfd_started = false; }
/* * This function switches ioeventfd on/off in the device. * The caller must set or clear the handlers for the EventNotifier. */ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign) { VirtIODevice *vdev = virtio_bus_get_device(bus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); VirtQueue *vq = virtio_get_queue(vdev, n); EventNotifier *notifier = virtio_queue_get_host_notifier(vq); int r = 0; if (!k->ioeventfd_assign) { return -ENOSYS; } if (assign) { assert(!bus->ioeventfd_started); r = event_notifier_init(notifier, 1); if (r < 0) { error_report("%s: unable to init event notifier: %s (%d)", __func__, strerror(-r), r); return r; } r = k->ioeventfd_assign(proxy, notifier, n, true); if (r < 0) { error_report("%s: unable to assign ioeventfd: %d", __func__, r); goto cleanup_event_notifier; } return 0; } else { if (!bus->ioeventfd_started) { return 0; } k->ioeventfd_assign(proxy, notifier, n, false); } cleanup_event_notifier: /* Test and clear notifier after disabling event, * in case poll callback didn't have time to run. */ virtio_queue_host_notifier_read(notifier); event_notifier_cleanup(notifier); return r; }
void virtio_bus_stop_ioeventfd(VirtioBusState *bus) { VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); VirtIODevice *vdev; int n, r; if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) { return; } vdev = virtio_bus_get_device(bus); for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { if (!virtio_queue_get_num(vdev, n)) { continue; } r = set_host_notifier_internal(proxy, bus, n, false, false); assert(r >= 0); } k->ioeventfd_set_started(proxy, false, false); }
void virtio_bus_start_ioeventfd(VirtioBusState *bus) { VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); DeviceState *proxy = DEVICE(BUS(bus)->parent); VirtIODevice *vdev; int n, r; if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) { return; } if (k->ioeventfd_disabled(proxy)) { return; } vdev = virtio_bus_get_device(bus); for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { if (!virtio_queue_get_num(vdev, n)) { continue; } r = set_host_notifier_internal(proxy, bus, n, true, true); if (r < 0) { goto assign_error; } } k->ioeventfd_set_started(proxy, true, false); return; assign_error: while (--n >= 0) { if (!virtio_queue_get_num(vdev, n)) { continue; } r = set_host_notifier_internal(proxy, bus, n, false, false); assert(r >= 0); } k->ioeventfd_set_started(proxy, false, true); error_report("%s: failed. Fallback to userspace (slower).", __func__); }
/* Get the device id of the plugged device. */ uint16_t virtio_bus_get_vdev_id(VirtioBusState *bus) { VirtIODevice *vdev = virtio_bus_get_device(bus); assert(vdev != NULL); return vdev->device_id; }
/* Get the config_len field of the plugged device. */ size_t virtio_bus_get_vdev_config_len(VirtioBusState *bus) { VirtIODevice *vdev = virtio_bus_get_device(bus); assert(vdev != NULL); return vdev->config_len; }
static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) { VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset); if (!vdev) { /* If no backend is present, we treat most registers as * read-as-zero, except for the magic number, version and * vendor ID. This is not strictly sanctioned by the virtio * spec, but it allows us to provide transports with no backend * plugged in which don't confuse Linux's virtio code: the * probe won't complain about the bad magic number, but the * device ID of zero means no backend will claim it. */ switch (offset) { case VIRTIO_MMIO_MAGIC: return VIRT_MAGIC; case VIRTIO_MMIO_VERSION: return VIRT_VERSION; case VIRTIO_MMIO_VENDORID: return VIRT_VENDOR; default: return 0; } } if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; switch (size) { case 1: return virtio_config_readb(vdev, offset); case 2: return virtio_config_readw(vdev, offset); case 4: return virtio_config_readl(vdev, offset); default: abort(); } } if (size != 4) { DPRINTF("wrong size access to register!\n"); return 0; } switch (offset) { case VIRTIO_MMIO_MAGIC: return VIRT_MAGIC; case VIRTIO_MMIO_VERSION: return VIRT_VERSION; case VIRTIO_MMIO_DEVICEID: return vdev->device_id; case VIRTIO_MMIO_VENDORID: return VIRT_VENDOR; case VIRTIO_MMIO_HOSTFEATURES: if (proxy->host_features_sel) { return 0; } return vdev->host_features; case VIRTIO_MMIO_QUEUENUMMAX: if (!virtio_queue_get_num(vdev, vdev->queue_sel)) { return 0; } return VIRTQUEUE_MAX_SIZE; case VIRTIO_MMIO_QUEUEPFN: return virtio_queue_get_addr(vdev, vdev->queue_sel) >> proxy->guest_page_shift; case VIRTIO_MMIO_INTERRUPTSTATUS: return atomic_read(&vdev->isr); case VIRTIO_MMIO_STATUS: return vdev->status; case VIRTIO_MMIO_HOSTFEATURESSEL: case VIRTIO_MMIO_GUESTFEATURES: case VIRTIO_MMIO_GUESTFEATURESSEL: case VIRTIO_MMIO_GUESTPAGESIZE: case VIRTIO_MMIO_QUEUESEL: case VIRTIO_MMIO_QUEUENUM: case VIRTIO_MMIO_QUEUEALIGN: case VIRTIO_MMIO_QUEUENOTIFY: case VIRTIO_MMIO_INTERRUPTACK: DPRINTF("read of write-only register\n"); return 0; default: DPRINTF("bad register offset\n"); return 0; } return 0; }
static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n", (int)offset, value); if (!vdev) { /* If no backend is present, we just make all registers * write-ignored. This allows us to provide transports with * no backend plugged in. */ return; } if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; switch (size) { case 1: virtio_config_writeb(vdev, offset, value); break; case 2: virtio_config_writew(vdev, offset, value); break; case 4: virtio_config_writel(vdev, offset, value); break; default: abort(); } return; } if (size != 4) { DPRINTF("wrong size access to register!\n"); return; } switch (offset) { case VIRTIO_MMIO_HOSTFEATURESSEL: proxy->host_features_sel = value; break; case VIRTIO_MMIO_GUESTFEATURES: if (!proxy->guest_features_sel) { virtio_set_features(vdev, value); } break; case VIRTIO_MMIO_GUESTFEATURESSEL: proxy->guest_features_sel = value; break; case VIRTIO_MMIO_GUESTPAGESIZE: proxy->guest_page_shift = ctz32(value); if (proxy->guest_page_shift > 31) { proxy->guest_page_shift = 0; } DPRINTF("guest page size %" PRIx64 " shift %d\n", value, proxy->guest_page_shift); break; case VIRTIO_MMIO_QUEUESEL: if (value < VIRTIO_QUEUE_MAX) { vdev->queue_sel = value; } break; case VIRTIO_MMIO_QUEUENUM: DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE); virtio_queue_set_num(vdev, vdev->queue_sel, value); /* Note: only call this function for legacy devices */ virtio_queue_update_rings(vdev, vdev->queue_sel); break; case VIRTIO_MMIO_QUEUEALIGN: /* Note: this is only valid for legacy devices */ virtio_queue_set_align(vdev, vdev->queue_sel, value); break; case VIRTIO_MMIO_QUEUEPFN: if (value == 0) { virtio_reset(vdev); } else { virtio_queue_set_addr(vdev, vdev->queue_sel, value << proxy->guest_page_shift); } break; case VIRTIO_MMIO_QUEUENOTIFY: if (value < VIRTIO_QUEUE_MAX) { virtio_queue_notify(vdev, value); } break; case VIRTIO_MMIO_INTERRUPTACK: atomic_and(&vdev->isr, ~value); virtio_update_irq(vdev); break; case VIRTIO_MMIO_STATUS: if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { virtio_mmio_stop_ioeventfd(proxy); } virtio_set_status(vdev, value & 0xff); if (value & VIRTIO_CONFIG_S_DRIVER_OK) { virtio_mmio_start_ioeventfd(proxy); } if (vdev->status == 0) { virtio_reset(vdev); } break; case VIRTIO_MMIO_MAGIC: case VIRTIO_MMIO_VERSION: case VIRTIO_MMIO_DEVICEID: case VIRTIO_MMIO_VENDORID: case VIRTIO_MMIO_HOSTFEATURES: case VIRTIO_MMIO_QUEUENUMMAX: case VIRTIO_MMIO_INTERRUPTSTATUS: DPRINTF("write to readonly register\n"); break; default: DPRINTF("bad register offset\n"); } }