static int vtpci_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtpci_stop(dev); return (0); }
static int vtpci_reinit(device_t dev, uint64_t features) { struct vtpci_softc *sc; int idx, error; sc = device_get_softc(dev); /* * Redrive the device initialization. This is a bit of an abuse of * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to * play nice. * * We do not allow the host device to change from what was originally * negotiated beyond what the guest driver changed. MSIX state should * not change, number of virtqueues and their size remain the same, etc. * This will need to be rethought when we want to support migration. */ if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtpci_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtpci_reinit_complete(). */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtpci_negotiate_features(dev, features); for (idx = 0; idx < sc->vtpci_nvqs; idx++) { error = vtpci_reinit_virtqueue(sc, idx); if (error) return (error); } if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { error = vtpci_set_host_msix_vectors(sc); if (error) return (error); } return (0); }
static int vtpci_reinit(device_t dev, uint64_t features) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; struct virtqueue *vq; int queue, error; uint16_t vq_size; sc = device_get_softc(dev); /* * Redrive the device initialization. This is a bit of an abuse * of the specification, but both VirtualBox and QEMU/KVM seem * to play nice. We do not allow the host device to change from * what was originally negotiated beyond what the guest driver * changed (MSIX state should not change, number of virtqueues * and their size remain the same, etc). */ if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtpci_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtpci_reinit_complete(). */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtpci_negotiate_features(dev, features); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_CONFIG_VECTOR, 0); if (error) return (error); } for (queue = 0; queue < sc->vtpci_nvqs; queue++) { vqx = &sc->vtpci_vqx[queue]; vq = vqx->vq; KASSERT(vq != NULL, ("vq %d not allocated", queue)); vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, queue); vq_size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_reinit(vq, vq_size); if (error) return (error); if (sc->vtpci_flags & VIRTIO_PCI_FLAG_MSIX) { error = vtpci_register_msix_vector(sc, VIRTIO_MSI_QUEUE_VECTOR, vqx->ires_idx); if (error) return (error); } vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vqx->vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } return (0); }