void virtio_release_ints(struct virtio_softc *sc) { int i; int ret; /* We were running with MSI, unbind them. */ if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) { /* Unbind all vqs */ for (i = 0; i < sc->sc_nvqs; i++) { ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), i); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR), VIRTIO_MSI_NO_VECTOR); } /* And the config */ /* LINTED E_BAD_PTR_CAST_ALIGN */ ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR); } /* Disable the iterrupts. Either the whole block, or one by one. */ if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) { ret = ddi_intr_block_disable(sc->sc_intr_htable, sc->sc_intr_num); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to disable MSIs, won't be able to " "reuse next time"); } } else { for (i = 0; i < sc->sc_intr_num; i++) { ret = ddi_intr_disable(sc->sc_intr_htable[i]); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to disable interrupt %d, " "won't be able to reuse", i); } } } for (i = 0; i < sc->sc_intr_num; i++) { (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]); } for (i = 0; i < sc->sc_intr_num; i++) (void) ddi_intr_free(sc->sc_intr_htable[i]); kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * sc->sc_intr_num); /* After disabling interrupts, the config offset is non-MSI. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; }
static int virtio_enable_msi(struct virtio_softc *sc) { int ret, i; int vq_handler_count = sc->sc_intr_num; /* Number of handlers, not counting the counfig. */ if (sc->sc_intr_config) vq_handler_count--; /* Enable the interrupts. Either the whole block, or one by one. */ if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) { ret = ddi_intr_block_enable(sc->sc_intr_htable, sc->sc_intr_num); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to enable MSI, falling back to INTx"); goto out_enable; } } else { for (i = 0; i < sc->sc_intr_num; i++) { ret = ddi_intr_enable(sc->sc_intr_htable[i]); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to enable MSI %d, " "falling back to INTx", i); while (--i >= 0) { (void) ddi_intr_disable( sc->sc_intr_htable[i]); } goto out_enable; } } } /* Bind the allocated MSI to the queues and config */ for (i = 0; i < vq_handler_count; i++) { int check; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), i); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR), i); check = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR)); if (check != i) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler " "for VQ %d, MSI %d. Check = %x", i, i, check); ret = ENODEV; goto out_bind; } } if (sc->sc_intr_config) { int check; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), i); check = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR)); if (check != i) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler " "for Config updates, MSI %d", i); ret = ENODEV; goto out_bind; } } /* Configuration offset depends on whether MSI-X is used. */ if (sc->sc_int_type == DDI_INTR_TYPE_MSIX) sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSIX; else ASSERT(sc->sc_int_type == DDI_INTR_TYPE_MSI); return (DDI_SUCCESS); out_bind: /* Unbind the vqs */ for (i = 0; i < vq_handler_count - 1; i++) { ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), i); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR), VIRTIO_MSI_NO_VECTOR); } /* And the config */ /* LINTED E_BAD_PTR_CAST_ALIGN */ ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR); /* Disable the interrupts. Either the whole block, or one by one. */ if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) { ret = ddi_intr_block_disable(sc->sc_intr_htable, sc->sc_intr_num); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to disable MSIs, won't be able to " "reuse next time"); } } else { for (i = 0; i < sc->sc_intr_num; i++) { ret = ddi_intr_disable(sc->sc_intr_htable[i]); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to disable interrupt %d, " "won't be able to reuse", i); } } } ret = DDI_FAILURE; out_enable: return (ret); }