static int vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; struct vq_alloc_info *info; int queue, error; uint16_t vq_size; sc = device_get_softc(dev); if (sc->vtpci_nvqs != 0 || nvqs <= 0 || nvqs > VIRTIO_MAX_VIRTQUEUES) return (EINVAL); error = vtpci_alloc_interrupts(sc, flags, nvqs, vq_info); if (error) { device_printf(dev, "cannot allocate interrupts\n"); return (error); } if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); if (error) return (error); } for (queue = 0; queue < nvqs; queue++) { vqx = &sc->vtpci_vqx[queue]; info = &vq_info[queue]; vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue); vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_alloc(dev, queue, vq_size, VIRTIO_PCI_VRING_ALIGN, 0xFFFFFFFFUL, info, &vqx->vq); if (error) return (error); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx); if (error) return (error); } vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); *info->vqai_vq = vqx->vq; sc->vtpci_nvqs++; } return (0); }
static int vtpci_set_host_msix_vectors(struct vtpci_softc *sc) { struct vtpci_interrupt *intr, *tintr; int idx, offset, error; intr = &sc->vtpci_device_interrupt; offset = VIRTIO_MSI_CONFIG_VECTOR; error = vtpci_register_msix_vector(sc, offset, intr); if (error) return (error); intr = sc->vtpci_msix_vq_interrupts; offset = VIRTIO_MSI_QUEUE_VECTOR; for (idx = 0; idx < sc->vtpci_nvqs; idx++) { vtpci_select_virtqueue(sc, idx); if (sc->vtpci_vqs[idx].vtv_no_intr) tintr = NULL; else tintr = intr; error = vtpci_register_msix_vector(sc, offset, tintr); if (error) break; /* * For shared MSIX, all the virtqueues share the first * interrupt. */ if ((sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0) intr++; } return (error); }
static int vtpci_reinit(device_t dev, uint64_t features) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; struct virtqueue *vq; int queue, error; uint16_t vq_size; sc = device_get_softc(dev); /* * Redrive the device initialization. This is a bit of an abuse * of the specification, but both VirtualBox and QEMU/KVM seem * to play nice. We do not allow the host device to change from * what was originally negotiated beyond what the guest driver * changed (MSIX state should not change, number of virtqueues * and their size remain the same, etc). */ if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtpci_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtpci_reinit_complete(). */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtpci_negotiate_features(dev, features); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); if (error) return (error); } for (queue = 0; queue < sc->vtpci_nvqs; queue++) { vqx = &sc->vtpci_vqx[queue]; vq = vqx->vq; KASSERT(vq != NULL, ("vq %d not allocated", queue)); vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue); vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_reinit(vq, vq_size); if (error) return (error); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx); if (error) return (error); } vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } return (0); }