void
ipw2200_csr_put16(struct ipw2200_softc *sc, uint32_t off,
	uint16_t val)
{
	ddi_put16(sc->sc_ioh,
	    (uint16_t *)((uintptr_t)sc->sc_regs + off), val);
}
Esempio n. 2
0
/*ARGSUSED*/
void
isadma_put16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value)
{
	ddi_acc_handle_t phdl = hdlp->ahi_common.ah_platform_private;
	isadma_devstate_t *isadmap = hdlp->ahi_common.ah_bus_private;
	off_t offset = (caddr_t)addr - hdlp->ahi_common.ah_addr;

	if (IN_CHILD_SPACE(offset)) {	/* Pass to parent */
#ifdef DEBUG
		isadma_punt++;
#endif
		ddi_put16(phdl, addr, value);
		return;
	}
#ifdef DEBUG
	isadma_check_waiters(isadmap);
#endif
	mutex_enter(&isadmap->isadma_access_lock);
	isadma_dmawait(isadmap);	/* wait until on-going dma completes */

	/* Only Allow access to the 16 bit count and address registers */
	if (!IN_16BIT_SPACE(offset))
		goto exit;

	/* Set the sequencing register to the low byte */
	ddi_put8(phdl, (uint8_t *)HDL_TO_SEQREG_ADDR(hdlp, offset), 0);

	/* Write the low byte, then the high byte */
	ddi_put8(phdl, (uint8_t *)addr, value & 0xff);
	ddi_put8(phdl, (uint8_t *)addr, (value >> 8) & 0xff);
exit:
	isadma_wakeup(isadmap);
	mutex_exit(&isadmap->isadma_access_lock);
}
Esempio n. 3
0
void
virtio_free_vq(struct virtqueue *vq)
{
	struct virtio_softc *sc = vq->vq_owner;
	int i;

	/* tell device that there's no virtqueue any longer */
	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
	    vq->vq_index);
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);

	/* Free the indirect descriptors, if any. */
	for (i = 0; i < vq->vq_num; i++) {
		struct vq_entry *entry = &vq->vq_entries[i];
		if (entry->qe_indirect_descs)
			virtio_free_indirect(entry);
	}

	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);

	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
	ddi_dma_mem_free(&vq->vq_dma_acch);
	ddi_dma_free_handle(&vq->vq_dma_handle);

	mutex_destroy(&vq->vq_used_lock);
	mutex_destroy(&vq->vq_avail_lock);
	mutex_destroy(&vq->vq_freelist_lock);

	kmem_free(vq, sizeof (struct virtqueue));
}
Esempio n. 4
0
static void
fipe_ioat_cancel(void)
{
	uint32_t status;
	uint8_t	*addr = fipe_ioat_ctrl.ioat_reg_addr;
	ddi_acc_handle_t handle = fipe_ioat_ctrl.ioat_reg_handle;

	/*
	 * Reset channel. Sometimes reset is not reliable,
	 * so check completion or abort status after reset.
	 */
	/* LINTED: constant in conditional context */
	while (1) {
		/* Issue reset channel command. */
		ddi_put8(handle, (uint8_t *)(addr + FIPE_IOAT_CHAN_CMD), 0x20);

		/* Query command status. */
		status = ddi_get32(handle,
		    (uint32_t *)(addr + FIPE_IOAT_CHAN_STS_LO));
		if (status & 0x1) {
			/* Reset channel completed. */
			break;
		} else {
			SMT_PAUSE();
		}
	}

	/* Put channel into "not in use" state. */
	ddi_put16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL), 0);
}
Esempio n. 5
0
static void
rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
{
	uint64_t regval;
	void *regaddr;

	RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
	    (void *)rgep, (void *)ppd));

	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
	regval = ppd->pp_acc_data;

	switch (ppd->pp_acc_size) {
	case 1:
		ddi_put8(rgep->io_handle, regaddr, regval);
		break;

	case 2:
		ddi_put16(rgep->io_handle, regaddr, regval);
		break;

	case 4:
		ddi_put32(rgep->io_handle, regaddr, regval);
		break;

	case 8:
		ddi_put64(rgep->io_handle, regaddr, regval);
		break;
	}
}
Esempio n. 6
0
static void
rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data)
{
	RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
	    (void *)rgep, regno, data));

	ddi_put16(rgep->io_handle, REG16(rgep, regno), data);
}
Esempio n. 7
0
void
virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
    uint16_t value)
{
	ASSERT(sc->sc_config_offset);
	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
}
Esempio n. 8
0
void
pci_config_putw(ddi_acc_handle_t handle, off_t offset, uint16_t value)
{
	caddr_t	cfgaddr;
	ddi_acc_hdl_t *hp;

	hp = impl_acc_hdl_get(handle);
	cfgaddr = hp->ah_addr + offset;
	ddi_put16(handle, (uint16_t *)cfgaddr, value);
}
Esempio n. 9
0
/**
 * Update the queue, notify the host.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the Queue that is doing the notification.
 *
 * @return Solaris DDI error code. DDI_SUCCESS or DDI_FAILURE.
 */
static int VirtioPciNotifyQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciNotifyQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    virtio_pci_t *pPciData = pDevice->pvHyper;
    AssertReturn(pPciData, DDI_FAILURE);

    pQueue->Ring.pRingAvail->Index += pQueue->cBufs;
    pQueue->cBufs = 0;

    ASMCompilerBarrier();

    ddi_put16(pPciData->hIO, (uint16_t *)(pPciData->addrIOBase + VIRTIO_PCI_QUEUE_NOTIFY), pQueue->QueueIndex);
    cmn_err(CE_NOTE, "VirtioPciNotifyQueue\n");
}
Esempio n. 10
0
static int
fipe_ioat_trigger(void)
{
	uint16_t ctrl;
	uint32_t err;
	uint8_t	*addr = fipe_ioat_ctrl.ioat_reg_addr;
	ddi_acc_handle_t handle = fipe_ioat_ctrl.ioat_reg_handle;

	/* Check channel in use flag. */
	ctrl = ddi_get16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL));
	if (ctrl & 0x100) {
		/*
		 * Channel is in use by somebody else. IOAT driver may have
		 * been loaded, forbid fipe from accessing IOAT hardware
		 * anymore.
		 */
		fipe_ioat_ctrl.ioat_ready = B_FALSE;
		fipe_ioat_ctrl.ioat_failed = B_TRUE;
		FIPE_KSTAT_INC(ioat_start_fail_cnt);
		return (-1);
	} else {
		/* Set channel in use flag. */
		ddi_put16(handle,
		    (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL), 0x100);
	}

	/* Write command address. */
	ddi_put32(handle,
	    (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_LO),
	    (uint32_t)fipe_ioat_ctrl.ioat_cmd_physaddr);
	ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_HI),
	    (uint32_t)(fipe_ioat_ctrl.ioat_cmd_physaddr >> 32));

	/* Check and clear error flags. */
	err = ddi_get32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR));
	if (err != 0) {
		ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR), err);
	}

	/* Start channel. */
	ddi_put8(handle, (uint8_t *)(addr + FIPE_IOAT_CHAN_CMD), 0x1);

	return (0);
}
Esempio n. 11
0
/**
 * Virtio Pci put queue routine. Places the queue and frees associated queue.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the queue.
 */
static void VirtioPciPutQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciPutQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturnVoid(pDevice);
    AssertReturnVoid(pQueue);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturnVoid(pPci);
    virtio_pci_queue_t *pPciQueue = pQueue->pvData;
    if (RT_UNLIKELY(!pPciQueue))
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciPutQueue missing Pci queue.\n"));
        return;
    }

    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);
    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), 0);

    ddi_dma_unbind_handle(pPciQueue->hDMA);
    ddi_dma_mem_free(&pPciQueue->hIO);
    ddi_dma_free_handle(&pPciQueue->hDMA);
    RTMemFree(pPciQueue);
}
Esempio n. 12
0
void
virtio_sync_vq(struct virtqueue *vq)
{
	struct virtio_softc *vsc = vq->vq_owner;

	/* Make sure the avail ring update hit the buffer */
	membar_producer();

	vq->vq_avail->idx = vq->vq_avail_idx;

	/* Make sure the avail idx update hits the buffer */
	membar_producer();

	/* Make sure we see the flags update */
	membar_consumer();

	if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
		ddi_put16(vsc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(vsc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_NOTIFY),
		    vq->vq_index);
	}
}
Esempio n. 13
0
/*
 * audioixp_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audioixp_alloc_port(audioixp_state_t *statep, int num)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	char			*prop;
	audio_dev_t		*adev;
	audioixp_port_t		*port;
	uint32_t		paddr;
	int			rc;
	dev_info_t		*dip;
	audioixp_bd_entry_t	*bdentry;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	port->statep = statep;
	port->started = B_FALSE;
	port->num = num;

	switch (num) {
	case IXP_REC:
		statep->rec_port = port;
		prop = "record-interrupts";
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
		port->nchan = 2;
		break;
	case IXP_PLAY:
		statep->play_port = port;
		prop = "play-interrupts";
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
		/* This could possibly be conditionalized */
		port->nchan = 6;
		break;
	default:
		audio_dev_warn(adev, "bad port number (%d)!", num);
		return (DDI_FAILURE);
	}

	port->intrs = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
	    DDI_PROP_DONTPASS, prop, IXP_INTS);

	/* make sure the values are good */
	if (port->intrs < IXP_MIN_INTS) {
		audio_dev_warn(adev, "%s too low, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	} else if (port->intrs > IXP_MAX_INTS) {
		audio_dev_warn(adev, "%s too high, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	}

	/*
	 * Figure out how much space we need.  Sample rate is 48kHz, and
	 * we need to store 8 chunks.  (Note that this means that low
	 * interrupt frequencies will require more RAM.)
	 */
	port->fragfr = 48000 / port->intrs;
	port->fragfr = IXP_ROUNDUP(port->fragfr, IXP_MOD_SIZE);
	port->fragsz = port->fragfr * port->nchan * 2;
	port->samp_size = port->fragsz * IXP_BD_NUMS;

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (audioixp_bd_entry_t) * IXP_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	/*
	 * Wire up the BD list.
	 */
	paddr = port->samp_paddr;
	bdentry = (void *)port->bdl_kaddr;

	for (int i = 0; i < IXP_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, &bdentry->buf_base, paddr);
		ddi_put16(port->bdl_acch, &bdentry->status, 0);
		ddi_put16(port->bdl_acch, &bdentry->buf_len, port->fragsz / 4);
		ddi_put32(port->bdl_acch, &bdentry->next, port->bdl_paddr +
		    (((i + 1) % IXP_BD_NUMS) * sizeof (audioixp_bd_entry_t)));
		paddr += port->fragsz;
		bdentry++;
	}
	(void) ddi_dma_sync(port->bdl_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);

	port->engine = audio_engine_alloc(&audioixp_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
Esempio n. 14
0
/**
 * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Where to store the queue.
 *
 * @return An allocated Virtio Pci queue, or NULL in case of errors.
 */
static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturn(pDevice, NULL);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturn(pPci, NULL);

    /*
     * Select a Queue.
     */
    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);

    /*
     * Get the currently selected Queue's size.
     */
    pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM));
    if (RT_UNLIKELY(!pQueue->Ring.cDesc))
    {
        LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex));
        return NULL;
    }

    /*
     * Check if it's already active.
     */
    uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN));
    if (QueuePFN != 0)
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex));
        return NULL;
    }

    LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc));

    /*
     * Allocate and initialize Pci queue data.
     */
    virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t));
    if (pPciQueue)
    {
        /*
         * Setup DMA.
         */
        size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN);
        int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA);
        if (rc == DDI_SUCCESS)
        {
            rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT,
                                   DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf,
                                   &pPciQueue->hIO);
            if (rc == DDI_SUCCESS)
            {
                AssertRelease(pPciQueue->cbBuf >= cbQueue);
                ddi_dma_cookie_t DmaCookie;
                uint_t cCookies;
                rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf,
                                              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
                                              0 /* addr */, &DmaCookie, &cCookies);
                if (rc == DDI_SUCCESS)
                {
                    pPciQueue->physBuf = DmaCookie.dmac_laddress;
                    pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;

                    LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf));
                    cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf);

                    /*
                     * Activate the queue and initialize a ring for the queue.
                     */
                    memset(pQueue->pQueue, 0, pPciQueue->cbBuf);
                    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf);
                    VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN);
                    return pPciQueue;
                }
                else
Esempio n. 15
0
static int
virtio_enable_msi(struct virtio_softc *sc)
{
	int ret, i;
	int vq_handler_count = sc->sc_intr_num;

	/* Number of handlers, not counting the counfig. */
	if (sc->sc_intr_config)
		vq_handler_count--;

	/* Enable the iterrupts. Either the whole block, or one by one. */
	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
		ret = ddi_intr_block_enable(sc->sc_intr_htable,
		    sc->sc_intr_num);
		if (ret != DDI_SUCCESS) {
			dev_err(sc->sc_dev, CE_WARN,
			    "Failed to enable MSI, falling back to INTx");
			goto out_enable;
		}
	} else {
		for (i = 0; i < sc->sc_intr_num; i++) {
			ret = ddi_intr_enable(sc->sc_intr_htable[i]);
			if (ret != DDI_SUCCESS) {
				dev_err(sc->sc_dev, CE_WARN,
				    "Failed to enable MSI %d, "
				    "falling back to INTx", i);

				while (--i >= 0) {
					(void) ddi_intr_disable(
					    sc->sc_intr_htable[i]);
				}
				goto out_enable;
			}
		}
	}

	/* Bind the allocated MSI to the queues and config */
	for (i = 0; i < vq_handler_count; i++) {
		int check;

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_SELECT), i);

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_VECTOR), i);

		check = ddi_get16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_VECTOR));
		if (check != i) {
			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
			    "for VQ %d, MSI %d. Check = %x", i, i, check);
			ret = ENODEV;
			goto out_bind;
		}
	}

	if (sc->sc_intr_config) {
		int check;

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_CONFIG_VECTOR), i);

		check = ddi_get16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_CONFIG_VECTOR));
		if (check != i) {
			dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
			    "for Config updates, MSI %d", i);
			ret = ENODEV;
			goto out_bind;
		}
	}

	return (DDI_SUCCESS);

out_bind:
	/* Unbind the vqs */
	for (i = 0; i < vq_handler_count - 1; i++) {
		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_SELECT), i);

		ddi_put16(sc->sc_ioh,
		    /* LINTED E_BAD_PTR_CAST_ALIGN */
		    (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_QUEUE_VECTOR),
		    VIRTIO_MSI_NO_VECTOR);
	}
	/* And the config */
	/* LINTED E_BAD_PTR_CAST_ALIGN */
	ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
	    VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);

	ret = DDI_FAILURE;

out_enable:
	return (ret);
}
Esempio n. 16
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}
Esempio n. 17
0
void
virtio_release_ints(struct virtio_softc *sc)
{
	int i;
	int ret;

	/* We were running with MSI, unbind them. */
	if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
		/* Unbind all vqs */
		for (i = 0; i < sc->sc_nvqs; i++) {
			ddi_put16(sc->sc_ioh,
			    /* LINTED E_BAD_PTR_CAST_ALIGN */
			    (uint16_t *)(sc->sc_io_addr +
			    VIRTIO_CONFIG_QUEUE_SELECT), i);

			ddi_put16(sc->sc_ioh,
			    /* LINTED E_BAD_PTR_CAST_ALIGN */
			    (uint16_t *)(sc->sc_io_addr +
			    VIRTIO_CONFIG_QUEUE_VECTOR),
			    VIRTIO_MSI_NO_VECTOR);
		}
		/* And the config */
		/* LINTED E_BAD_PTR_CAST_ALIGN */
		ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
		    VIRTIO_CONFIG_CONFIG_VECTOR),
		    VIRTIO_MSI_NO_VECTOR);

	}

	/* Disable the iterrupts. Either the whole block, or one by one. */
	if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
		ret = ddi_intr_block_disable(sc->sc_intr_htable,
		    sc->sc_intr_num);
		if (ret != DDI_SUCCESS) {
			dev_err(sc->sc_dev, CE_WARN,
			    "Failed to disable MSIs, won't be able to "
			    "reuse next time");
		}
	} else {
		for (i = 0; i < sc->sc_intr_num; i++) {
			ret = ddi_intr_disable(sc->sc_intr_htable[i]);
			if (ret != DDI_SUCCESS) {
				dev_err(sc->sc_dev, CE_WARN,
				    "Failed to disable interrupt %d, "
				    "won't be able to reuse", i);
			}
		}
	}


	for (i = 0; i < sc->sc_intr_num; i++) {
		(void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
	}

	for (i = 0; i < sc->sc_intr_num; i++)
		(void) ddi_intr_free(sc->sc_intr_htable[i]);

	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
	    sc->sc_intr_num);

	/* After disabling interrupts, the config offset is non-MSI. */
	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
}