Esempio n. 1
0
/* ARGSUSED */
void
ddi_remove_intr(dev_info_t *dip, uint_t inum, ddi_iblock_cookie_t iblock_cookie)
{
	ddi_intr_handle_t	hdl;
	int			ret;

	DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: name=%s%d dip=0x%p "
	    "inum=0x%x\n", ddi_driver_name(dip), ddi_get_instance(dip),
	    (void *)dip, inum));

	if ((hdl = i_ddi_get_intr_handle(dip, inum)) == NULL) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: no handle "
		    "found\n"));
		return;
	}

	if ((ret = ddi_intr_disable(hdl)) != DDI_SUCCESS) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: "
		    "ddi_intr_disable failed, ret 0x%x\n", ret));
		return;
	}

	if ((ret = ddi_intr_remove_handler(hdl)) != DDI_SUCCESS) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: "
		    "ddi_intr_remove_handler failed, ret 0x%x\n", ret));
		return;
	}

	if ((ret = ddi_intr_free(hdl)) != DDI_SUCCESS) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_remove_intr: "
		    "ddi_intr_free failed, ret 0x%x\n", ret));
		return;
	}
}
Esempio n. 2
0
/**
 * Quiesce entry point, called by solaris kernel for disabling the device from
 * generating any interrupts or doing in-bound DMA.
 *
 * @param   pDip            The module structure instance.
 *
 * @return  corresponding solaris error code.
 */
static int vgdrvSolarisQuiesce(dev_info_t *pDip)
{
    int rc = ddi_intr_disable(g_pahIntrs[0]);
    if (rc != DDI_SUCCESS)
        return DDI_FAILURE;

    /** @todo What about HGCM/HGSMI touching guest-memory? */

    return DDI_SUCCESS;
}
Esempio n. 3
0
static int
virtio_intr_teardown(virtionet_state_t *sp)
{
	int			rc;

	rc = ddi_intr_disable(sp->ihandle);
	rc = ddi_intr_remove_handler(sp->ihandle);
	rc = ddi_intr_free(sp->ihandle);
	return (DDI_SUCCESS);
}
/**
 * Quiesce entry point, called by solaris kernel for disabling the device from
 * generating any interrupts or doing in-bound DMA.
 *
 * @param   pDip            The module structure instance.
 *
 * @return  corresponding solaris error code.
 */
static int VBoxGuestSolarisQuiesce(dev_info_t *pDip)
{
    for (int i = 0; i < g_cIntrAllocated; i++)
    {
        int rc = ddi_intr_disable(g_pIntr[i]);
        if (rc != DDI_SUCCESS)
            return DDI_FAILURE;
    }

    /** @todo What about HGCM/HGSMI touching guest-memory? */

    return DDI_SUCCESS;
}
Esempio n. 5
0
/**
 * Removes IRQ for VMMDev.
 *
 * @param   pDip     Pointer to the device info structure.
 */
static void VBoxGuestSolarisRemoveIRQ(dev_info_t *pDip)
{
    LogFlow((DEVICE_NAME "::RemoveIRQ:\n"));

    for (int i = 0; i < g_cIntrAllocated; i++)
    {
        int rc = ddi_intr_disable(g_pIntr[i]);
        if (rc == DDI_SUCCESS)
        {
            rc = ddi_intr_remove_handler(g_pIntr[i]);
            if (rc == DDI_SUCCESS)
                ddi_intr_free(g_pIntr[i]);
        }
    }
    RTMemFree(g_pIntr);
    mutex_destroy(&g_IrqMtx);
}
Esempio n. 6
0
int
efe_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	efe_t *efep = ddi_get_driver_private(dip);

	switch (cmd) {
	case DDI_DETACH:
		break;

	case DDI_SUSPEND:
		return (efe_suspend(efep));

	default:
		return (DDI_FAILURE);
	}

	if (mac_unregister(efep->efe_mh) != 0) {
		efe_error(dip, "unable to unregister from mac!");
		return (DDI_FAILURE);
	}

	mii_free(efep->efe_miih);

	(void) ddi_intr_disable(efep->efe_intrh);
	(void) ddi_intr_remove_handler(efep->efe_intrh);
	(void) ddi_intr_free(efep->efe_intrh);

	mutex_destroy(&efep->efe_txlock);
	mutex_destroy(&efep->efe_intrlock);

	if (efep->efe_tx_ring != NULL) {
		efe_ring_free(&efep->efe_tx_ring);
	}
	if (efep->efe_rx_ring != NULL) {
		efe_ring_free(&efep->efe_rx_ring);
	}

	ddi_regs_map_free(&efep->efe_regs_acch);

	kmem_free(efep, sizeof (efe_t));

	return (DDI_SUCCESS);
}
Esempio n. 7
0
/**
 * Removes IRQ for VMMDev.
 *
 * @param   pDip     Pointer to the device info structure.
 */
static void vgdrvSolarisRemoveIRQ(dev_info_t *pDip)
{
    LogFlow(("vgdrvSolarisRemoveIRQ:\n"));

    int rc = ddi_intr_disable(g_pahIntrs[0]);
    if (rc == DDI_SUCCESS)
    {
        rc = ddi_intr_remove_handler(g_pahIntrs[0]);
        if (rc == DDI_SUCCESS)
            ddi_intr_free(g_pahIntrs[0]);
    }

    if (g_fSoftIntRegistered)
    {
        ddi_intr_remove_softint(g_hSoftIntr);
        mutex_destroy(&g_HighLevelIrqMtx);
        g_fSoftIntRegistered = false;
    }

    mutex_destroy(&g_IrqMtx);
    RTMemFree(g_pahIntrs);
}
Esempio n. 8
0
/*
 * Autoconfiguration entry points.
 */
int
efe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	ddi_acc_handle_t pci;
	int types;
	int count;
	int actual;
	uint_t pri;
	efe_t *efep;
	mac_register_t *macp;

	switch (cmd) {
	case DDI_ATTACH:
		break;

	case DDI_RESUME:
		efep = ddi_get_driver_private(dip);
		return (efe_resume(efep));

	default:
		return (DDI_FAILURE);
	}

	/*
	 * PCI configuration.
	 */
	if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
		efe_error(dip, "unable to setup PCI configuration!");
		return (DDI_FAILURE);
	}

	pci_config_put16(pci, PCI_CONF_COMM,
	    pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_MAE | PCI_COMM_ME);

	pci_config_teardown(&pci);

	if (ddi_intr_get_supported_types(dip, &types)
	    != DDI_SUCCESS || !(types & DDI_INTR_TYPE_FIXED)) {
		efe_error(dip, "fixed interrupts not supported!");
		return (DDI_FAILURE);
	}

	if (ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &count)
	    != DDI_SUCCESS || count != 1) {
		efe_error(dip, "no fixed interrupts available!");
		return (DDI_FAILURE);
	}

	/*
	 * Initialize soft state.
	 */
	efep = kmem_zalloc(sizeof (efe_t), KM_SLEEP);
	ddi_set_driver_private(dip, efep);

	efep->efe_dip = dip;

	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&efep->efe_regs, 0, 0,
	    &efe_regs_acc_attr, &efep->efe_regs_acch) != DDI_SUCCESS) {
		efe_error(dip, "unable to setup register mapping!");
		goto failure;
	}

	efep->efe_rx_ring = efe_ring_alloc(efep->efe_dip, RXDESCL);
	if (efep->efe_rx_ring == NULL) {
		efe_error(efep->efe_dip, "unable to allocate rx ring!");
		goto failure;
	}

	efep->efe_tx_ring = efe_ring_alloc(efep->efe_dip, TXDESCL);
	if (efep->efe_tx_ring == NULL) {
		efe_error(efep->efe_dip, "unable to allocate tx ring!");
		goto failure;
	}

	if (ddi_intr_alloc(dip, &efep->efe_intrh, DDI_INTR_TYPE_FIXED, 0,
	    count, &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS ||
	    actual != count) {
		efe_error(dip, "unable to allocate fixed interrupt!");
		goto failure;
	}

	if (ddi_intr_get_pri(efep->efe_intrh, &pri) != DDI_SUCCESS ||
	    pri >= ddi_intr_get_hilevel_pri()) {
		efe_error(dip, "unable to get valid interrupt priority!");
		goto failure;
	}

	mutex_init(&efep->efe_intrlock, NULL, MUTEX_DRIVER,
	    DDI_INTR_PRI(pri));

	mutex_init(&efep->efe_txlock, NULL, MUTEX_DRIVER,
	    DDI_INTR_PRI(pri));

	/*
	 * Initialize device.
	 */
	mutex_enter(&efep->efe_intrlock);
	mutex_enter(&efep->efe_txlock);

	efe_reset(efep);

	mutex_exit(&efep->efe_txlock);
	mutex_exit(&efep->efe_intrlock);

	/* Use factory address as default */
	efe_getaddr(efep, efep->efe_macaddr);

	/*
	 * Enable the ISR.
	 */
	if (ddi_intr_add_handler(efep->efe_intrh, efe_intr, efep, NULL)
	    != DDI_SUCCESS) {
		efe_error(dip, "unable to add interrupt handler!");
		goto failure;
	}

	if (ddi_intr_enable(efep->efe_intrh) != DDI_SUCCESS) {
		efe_error(dip, "unable to enable interrupt!");
		goto failure;
	}

	/*
	 * Allocate MII resources.
	 */
	if ((efep->efe_miih = mii_alloc(efep, dip, &efe_mii_ops)) == NULL) {
		efe_error(dip, "unable to allocate mii resources!");
		goto failure;
	}

	/*
	 * Allocate MAC resources.
	 */
	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
		efe_error(dip, "unable to allocate mac resources!");
		goto failure;
	}

	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
	macp->m_driver = efep;
	macp->m_dip = dip;
	macp->m_src_addr = efep->efe_macaddr;
	macp->m_callbacks = &efe_m_callbacks;
	macp->m_min_sdu = 0;
	macp->m_max_sdu = ETHERMTU;
	macp->m_margin = VLAN_TAGSZ;

	if (mac_register(macp, &efep->efe_mh) != 0) {
		efe_error(dip, "unable to register with mac!");
		goto failure;
	}
	mac_free(macp);

	ddi_report_dev(dip);

	return (DDI_SUCCESS);

failure:
	if (macp != NULL) {
		mac_free(macp);
	}

	if (efep->efe_miih != NULL) {
		mii_free(efep->efe_miih);
	}

	if (efep->efe_intrh != NULL) {
		(void) ddi_intr_disable(efep->efe_intrh);
		(void) ddi_intr_remove_handler(efep->efe_intrh);
		(void) ddi_intr_free(efep->efe_intrh);
	}

	mutex_destroy(&efep->efe_txlock);
	mutex_destroy(&efep->efe_intrlock);

	if (efep->efe_tx_ring != NULL) {
		efe_ring_free(&efep->efe_tx_ring);
	}
	if (efep->efe_rx_ring != NULL) {
		efe_ring_free(&efep->efe_rx_ring);
	}

	if (efep->efe_regs_acch != NULL) {
		ddi_regs_map_free(&efep->efe_regs_acch);
	}

	kmem_free(efep, sizeof (efe_t));

	return (DDI_FAILURE);
}
Esempio n. 9
0
void
virtio_release_ints(struct virtio_softc *sc)
{
	int i;
	int ret;

	/* We were running with MSI, unbind them. */
	if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
		/* Unbind all vqs */
		for (i = 0; i < sc->sc_nvqs; i++) {
			ddi_put16(sc->sc_ioh,
			    /* LINTED E_BAD_PTR_CAST_ALIGN */
			    (uint16_t *)(sc->sc_io_addr +
			    VIRTIO_CONFIG_QUEUE_SELECT), i);

			ddi_put16(sc->sc_ioh,
			    /* LINTED E_BAD_PTR_CAST_ALIGN */
			    (uint16_t *)(sc->sc_io_addr +
			    VIRTIO_CONFIG_QUEUE_VECTOR),
			    VIRTIO_MSI_NO_VECTOR);
		}
		/* And the config */
		/* LINTED E_BAD_PTR_CAST_ALIGN */
		ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_CONFIG_VECTOR),
		    VIRTIO_MSI_NO_VECTOR);

	}

	/* Disable the iterrupts. Either the whole block, or one by one. */
	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
		ret = ddi_intr_block_disable(sc->sc_intr_htable,
		    sc->sc_intr_num);
		if (ret != DDI_SUCCESS) {
			dev_err(sc->sc_dev, CE_WARN,
			    "Failed to disable MSIs, won't be able to "
			    "reuse next time");
		}
	} else {
		for (i = 0; i < sc->sc_intr_num; i++) {
			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
			if (ret != DDI_SUCCESS) {
				dev_err(sc->sc_dev, CE_WARN,
				    "Failed to disable interrupt %d, "
				    "won't be able to reuse", i);
			}
		}
	}


	for (i = 0; i < sc->sc_intr_num; i++) {
		(void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
	}

	for (i = 0; i < sc->sc_intr_num; i++)
		(void) ddi_intr_free(sc->sc_intr_htable[i]);

	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
	    sc->sc_intr_num);

	/* After disabling interrupts, the config offset is non-MSI. */
	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
}
Esempio n. 10
0
static int
virtio_enable_msi(struct virtio_softc *sc)
{
	int ret, i;
	int vq_handler_count = sc->sc_intr_num;

	/* Number of handlers, not counting the counfig. */
	if (sc->sc_intr_config)
		vq_handler_count--;

	/* Enable the iterrupts. Either the whole block, or one by one. */
	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
		ret = ddi_intr_block_enable(sc->sc_intr_htable,
		    sc->sc_intr_num);
		if (ret != DDI_SUCCESS) {
			dev_err(sc->sc_dev, CE_WARN,
			    "Failed to enable MSI, falling back to INTx");
			goto out_enable;
		}
	} else {
		for (i = 0; i < sc->sc_intr_num; i++) {
			ret = ddi_intr_enable(sc->sc_intr_htable[i]);
			if (ret != DDI_SUCCESS) {
				dev_err(sc->sc_dev, CE_WARN,
				    "Failed to enable MSI %d, "
				    "falling back to INTx", i);

				while (--i >= 0) {
					(void) ddi_intr_disable(
					    sc->sc_intr_htable[i]);
				}
				goto out_enable;
			}
		}
	}

	/* Bind the allocated MSI to the queues and config */
	for (i = 0; i < vq_handler_count; i++) {
		int check;

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_SELECT), i);

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_VECTOR), i);

		check = ddi_get16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_VECTOR));
		if (check != i) {
			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
			    "for VQ %d, MSI %d. Check = %x", i, i, check);
			ret = ENODEV;
			goto out_bind;
		}
	}

	if (sc->sc_intr_config) {
		int check;

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_CONFIG_VECTOR), i);

		check = ddi_get16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_CONFIG_VECTOR));
		if (check != i) {
			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
			    "for Config updates, MSI %d", i);
			ret = ENODEV;
			goto out_bind;
		}
	}

	return (DDI_SUCCESS);

out_bind:
	/* Unbind the vqs */
	for (i = 0; i < vq_handler_count - 1; i++) {
		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_SELECT), i);

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_VECTOR),
		    VIRTIO_MSI_NO_VECTOR);
	}
	/* And the config */
	/* LINTED E_BAD_PTR_CAST_ALIGN */
	ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
	    VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);

	ret = DDI_FAILURE;

out_enable:
	return (ret);
}