static void vtpci_write_dev_config(device_t dev, bus_size_t offset, void *src, int length) { struct vtpci_softc *sc; bus_size_t off; uint8_t *s; int size; sc = device_get_softc(dev); off = VIRTIO_PCI_CONFIG(sc) + offset; for (s = src; length > 0; s += size, off += size, length -= size) { if (length >= 4) { size = 4; vtpci_write_config_4(sc, off, *(uint32_t *)s); } else if (length >= 2) { size = 2; vtpci_write_config_2(sc, off, *(uint16_t *)s); } else { size = 1; vtpci_write_config_1(sc, off, *s); } } }
static int vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, struct vtpci_interrupt *intr) { device_t dev; uint16_t vector; dev = sc->vtpci_dev; if (intr != NULL) { /* Map from guest rid to host vector. */ vector = intr->vti_rid - 1; } else vector = VIRTIO_MSI_NO_VECTOR; vtpci_write_config_2(sc, offset, vector); /* Read vector to determine if the host had sufficient resources. */ if (vtpci_read_config_2(sc, offset) != vector) { device_printf(dev, "insufficient host resources for MSIX interrupts\n"); return (ENODEV); } return (0); }
static int vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, int res_idx) { device_t dev; uint16_t vector; dev = sc->vtpci_dev; if (offset != VIRTIO_MSI_CONFIG_VECTOR && offset != VIRTIO_MSI_QUEUE_VECTOR) return (EINVAL); if (res_idx != -1) { /* Map from rid to host vector. */ vector = sc->vtpci_intr_res[res_idx].rid - 1; } else vector = VIRTIO_MSI_NO_VECTOR; /* The first resource is special; make sure it is used correctly. */ if (res_idx == 0) { KASSERT(vector == 0, ("unexpected config vector")); KASSERT(offset == VIRTIO_MSI_CONFIG_VECTOR, ("unexpected config offset")); } vtpci_write_config_2(sc, offset, vector); if (vtpci_read_config_2(sc, offset) != vector) { device_printf(dev, "insufficient host resources for " "MSIX interrupts\n"); return (ENODEV); } return (0); }
static void vtpci_notify_virtqueue(device_t dev, uint16_t queue) { struct vtpci_softc *sc; sc = device_get_softc(dev); vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue); }
static void vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc) { int idx; if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR, VIRTIO_MSI_NO_VECTOR); for (idx = 0; idx < sc->vtpci_nvqs; idx++) { vtpci_select_virtqueue(sc, idx); vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR, VIRTIO_MSI_NO_VECTOR); } } vtpci_free_interrupts(sc); }
static int vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; struct vq_alloc_info *info; int queue, error; uint16_t vq_size; sc = device_get_softc(dev); if (sc->vtpci_nvqs != 0 || nvqs <= 0 || nvqs > VIRTIO_MAX_VIRTQUEUES) return (EINVAL); error = vtpci_alloc_interrupts(sc, flags, nvqs, vq_info); if (error) { device_printf(dev, "cannot allocate interrupts\n"); return (error); } if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); if (error) return (error); } for (queue = 0; queue < nvqs; queue++) { vqx = &sc->vtpci_vqx[queue]; info = &vq_info[queue]; vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue); vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_alloc(dev, queue, vq_size, VIRTIO_PCI_VRING_ALIGN, 0xFFFFFFFFUL, info, &vqx->vq); if (error) return (error); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx); if (error) return (error); } vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); *info->vqai_vq = vqx->vq; sc->vtpci_nvqs++; } return (0); }
static void vtpci_select_virtqueue(struct vtpci_softc *sc, int idx) { vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); }
static int vtpci_reinit(device_t dev, uint64_t features) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; struct virtqueue *vq; int queue, error; uint16_t vq_size; sc = device_get_softc(dev); /* * Redrive the device initialization. This is a bit of an abuse * of the specification, but both VirtualBox and QEMU/KVM seem * to play nice. We do not allow the host device to change from * what was originally negotiated beyond what the guest driver * changed (MSIX state should not change, number of virtqueues * and their size remain the same, etc). */ if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtpci_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtpci_reinit_complete(). */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtpci_negotiate_features(dev, features); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); if (error) return (error); } for (queue = 0; queue < sc->vtpci_nvqs; queue++) { vqx = &sc->vtpci_vqx[queue]; vq = vqx->vq; KASSERT(vq != NULL, ("vq %d not allocated", queue)); vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue); vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_reinit(vq, vq_size); if (error) return (error); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx); if (error) return (error); } vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } return (0); }