Exemplo n.º 1
0
/*
 * audioixp_free_port()
 *
 * Description:
 *	This routine unbinds the DMA cookies, frees the DMA buffers,
 *	deallocates the DMA handles.
 *
 * Arguments:
 *	audioixp_port_t	*port	The port structure for a DMA engine.
 */
static void
audioixp_free_port(audioixp_port_t *port)
{
	if (port == NULL)
		return;

	if (port->engine) {
		audio_dev_remove_engine(port->statep->adev, port->engine);
		audio_engine_free(port->engine);
	}
	if (port->bdl_paddr) {
		(void) ddi_dma_unbind_handle(port->bdl_dmah);
	}
	if (port->bdl_acch) {
		ddi_dma_mem_free(&port->bdl_acch);
	}
	if (port->bdl_dmah) {
		ddi_dma_free_handle(&port->bdl_dmah);
	}
	if (port->samp_paddr) {
		(void) ddi_dma_unbind_handle(port->samp_dmah);
	}
	if (port->samp_acch) {
		ddi_dma_mem_free(&port->samp_acch);
	}
	if (port->samp_dmah) {
		ddi_dma_free_handle(&port->samp_dmah);
	}
	kmem_free(port, sizeof (*port));
}
Exemplo n.º 2
0
extern void
emlxs_pkt_free(fc_packet_t *pkt)
{
	emlxs_port_t *port = (emlxs_port_t *)pkt->pkt_ulp_private;

	(void) emlxs_fca_pkt_uninit((opaque_t)port, pkt);

	if (pkt->pkt_datalen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_data_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_data_dma);
	}

	if (pkt->pkt_rsplen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);
	}

	if (pkt->pkt_cmdlen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);
	}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
	kmem_free(pkt, (sizeof (fc_packet_t) + sizeof (emlxs_buf_t) +
	    sizeof (emlxs_pkt_cookie_t)));
#else
	kmem_free(pkt, (sizeof (fc_packet_t) + sizeof (emlxs_buf_t)));
#endif /* >= EMLXS_MODREV3 */

	return;

} /* emlxs_pkt_free() */
Exemplo n.º 3
0
/* XXX Bogus to look at the supposedly opaque handles, even to look for NULL */
static void
SMCG_dma_unalloc(smcg_t *smcg)
{
	Adapter_Struc			*pAd = smcg->smcg_pAd;
	struct smcg_rx_buffer_desc	*bdesc;
	int				i, j;

	for (i = 0; i < pAd->num_of_tx_buffs; i++)
		for (j = 0; j < SMCG_MAX_TX_MBLKS; j++) {
			if (smcg->tx_info[i].dmahandle[j] != NULL)
				ddi_dma_free_handle(
				    &smcg->tx_info[i].dmahandle[j]);
			smcg->tx_info[i].dmahandle[j] = NULL;
		}

	ASSERT(smcg->rx_bufs_outstanding == 0);
	/* Free up rx buffers currently on freelist */
	for (bdesc = smcg->rx_freelist; bdesc; bdesc = bdesc->next) {
		if (bdesc->dmahandle != NULL)
			(void) ddi_dma_unbind_handle(bdesc->dmahandle);
		if (bdesc->acchandle != NULL)
			ddi_dma_mem_free(&bdesc->acchandle);
		if (bdesc->dmahandle != NULL)
			ddi_dma_free_handle(&bdesc->dmahandle);
	}

	/* Free up all rx buffers that are associated with rx descriptors */
	for (i = 0; i < pAd->num_of_rx_buffs; i++) {
		if (smcg->bdesc[i] == NULL)
			continue;
		if (smcg->bdesc[i]->dmahandle != NULL)
			(void) ddi_dma_unbind_handle(smcg->bdesc[i]->dmahandle);
		if (smcg->bdesc[i]->acchandle != NULL)
			ddi_dma_mem_free(&smcg->bdesc[i]->acchandle);
		if (smcg->bdesc[i]->dmahandle != NULL)
			ddi_dma_free_handle(&smcg->bdesc[i]->dmahandle);
	}

	kmem_free(smcg->rxbdesc_mem,
	    sizeof (struct smcg_rx_buffer_desc) * pAd->num_of_rx_buffs*2);

	/* Free resources associated with shared ram block */
	if (smcg->hostram_dmahandle != NULL)
		(void) ddi_dma_unbind_handle(smcg->hostram_dmahandle);
	if (smcg->hostram_acchandle != NULL)
		ddi_dma_mem_free(&smcg->hostram_acchandle);
	if (smcg->hostram_dmahandle != NULL)
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
}
Exemplo n.º 4
0
/* ARGSUSED */
i40e_status
i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
{
	if (mem->pa != 0) {
		VERIFY(mem->idm_dma_handle != NULL);
		(void) ddi_dma_unbind_handle(mem->idm_dma_handle);
		mem->pa = 0;
		mem->size = 0;
	}

	if (mem->idm_acc_handle != NULL) {
		ddi_dma_mem_free(&mem->idm_acc_handle);
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
	}

	if (mem->idm_dma_handle != NULL) {
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;
	}

	/*
	 * Watch out for sloppiness.
	 */
	ASSERT(mem->pa == 0);
	ASSERT(mem->va == NULL);
	ASSERT(mem->size == 0);
	mem->idm_alignment = UINT32_MAX;

	return (I40E_SUCCESS);
}
Exemplo n.º 5
0
void
atge_rx_desc_free(atge_t *atgep)
{
	atge_l1e_data_t *l1e;
	atge_dma_t *dma;
	int pages;

	l1e = (atge_l1e_data_t *)atgep->atge_private_data;
	if (l1e == NULL)
		return;

	if (l1e->atge_l1e_rx_page == NULL)
		return;

	for (pages = 0; pages < L1E_RX_PAGES; pages++) {
		dma = l1e->atge_l1e_rx_page[pages];
		if (dma != NULL) {
			(void) ddi_dma_unbind_handle(dma->hdl);
			ddi_dma_mem_free(&dma->acchdl);
			ddi_dma_free_handle(&dma->hdl);
			kmem_free(dma, sizeof (atge_dma_t));
		}
	}

	kmem_free(l1e->atge_l1e_rx_page, L1E_RX_PAGES * sizeof (atge_dma_t *));
	l1e->atge_l1e_rx_page = NULL;
}
Exemplo n.º 6
0
void
virtio_free_vq(struct virtqueue *vq)
{
	struct virtio_softc *sc = vq->vq_owner;
	int i;

	/* tell device that there's no virtqueue any longer */
	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
	    vq->vq_index);
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);

	/* Free the indirect descriptors, if any. */
	for (i = 0; i < vq->vq_num; i++) {
		struct vq_entry *entry = &vq->vq_entries[i];
		if (entry->qe_indirect_descs)
			virtio_free_indirect(entry);
	}

	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);

	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
	ddi_dma_mem_free(&vq->vq_dma_acch);
	ddi_dma_free_handle(&vq->vq_dma_handle);

	mutex_destroy(&vq->vq_used_lock);
	mutex_destroy(&vq->vq_avail_lock);
	mutex_destroy(&vq->vq_freelist_lock);

	kmem_free(vq, sizeof (struct virtqueue));
}
Exemplo n.º 7
0
void
efe_ring_free(efe_ring_t **rpp)
{
	efe_ring_t *rp = *rpp;

	ASSERT(rp != NULL);

	for (int i = 0; i < DESCLEN(rp); ++i) {
		efe_buf_t *bp = GETBUF(rp, i);
		if (bp != NULL) {
			efe_buf_free(&bp);
		}
	}
	kmem_free(rp->r_bufpp, BUFPSZ(DESCLEN(rp)));

	if (rp->r_descp != NULL) {
		(void) ddi_dma_unbind_handle(rp->r_dmah);
	}
	if (rp->r_acch != NULL) {
		ddi_dma_mem_free(&rp->r_acch);
	}
	if (rp->r_dmah != NULL) {
		ddi_dma_free_handle(&rp->r_dmah);
	}
	kmem_free(rp, sizeof (efe_ring_t));

	*rpp = NULL;
}
Exemplo n.º 8
0
/*
 * Allocate /size/ bytes of contiguous DMA-ble memory.
 *
 * Returns:
 *    0 on success, non-zero on failure.
 */
static int
vmxnet3_alloc_dma_mem(vmxnet3_softc_t *dp, vmxnet3_dmabuf_t *dma, size_t size,
    boolean_t canSleep, ddi_dma_attr_t *dma_attrs)
{
	ddi_dma_cookie_t cookie;
	uint_t cookieCount;
	int dmaerr, err = 0;
	int (*cb) (caddr_t) = canSleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;

	ASSERT(size != 0);

	/*
	 * Allocate a DMA handle
	 */
	if ((dmaerr = ddi_dma_alloc_handle(dp->dip, dma_attrs, cb, NULL,
	    &dma->dmaHandle)) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed: %d", dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error;
	}

	/*
	 * Allocate memory
	 */
	if (ddi_dma_mem_alloc(dma->dmaHandle, size, &vmxnet3_dev_attr,
	    DDI_DMA_CONSISTENT, cb, NULL, &dma->buf, &dma->bufLen,
	    &dma->dataHandle) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_mem_alloc() failed");
		err = ENOMEM;
		goto error_dma_handle;
	}

	/*
	 * Map the memory
	 */
	if ((dmaerr = ddi_dma_addr_bind_handle(dma->dmaHandle, NULL, dma->buf,
	    dma->bufLen, DDI_DMA_RDWR | DDI_DMA_STREAMING, cb, NULL, &cookie,
	    &cookieCount)) != DDI_DMA_MAPPED) {
		VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed: %d",
		    dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error_dma_mem;
	}

	ASSERT(cookieCount == 1);
	dma->bufPA = cookie.dmac_laddress;

	return (0);

error_dma_mem:
	ddi_dma_mem_free(&dma->dataHandle);
error_dma_handle:
	ddi_dma_free_handle(&dma->dmaHandle);
error:
	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
	return (err);
}
Exemplo n.º 9
0
static int
rxbuf_ctor(void *arg1, void *arg2, int kmflag)
{
	struct rxbuf *rxb = arg1;
	struct rxbuf_cache_params *p = arg2;
	size_t real_len;
	ddi_dma_cookie_t cookie;
	uint_t ccount = 0;
	int (*callback)(caddr_t);
	int rc = ENOMEM;

	if (kmflag & KM_SLEEP)
		callback = DDI_DMA_SLEEP;
	else
		callback = DDI_DMA_DONTWAIT;

	rc = ddi_dma_alloc_handle(p->dip, &p->dma_attr_rx, callback, 0,
	    &rxb->dhdl);
	if (rc != DDI_SUCCESS)
		return (rc == DDI_DMA_BADATTR ? EINVAL : ENOMEM);

	rc = ddi_dma_mem_alloc(rxb->dhdl, p->buf_size, &p->acc_attr_rx,
	    DDI_DMA_STREAMING, callback, 0, &rxb->va, &real_len, &rxb->ahdl);
	if (rc != DDI_SUCCESS) {
		rc = ENOMEM;
		goto fail1;
	}

	rc = ddi_dma_addr_bind_handle(rxb->dhdl, NULL, rxb->va, p->buf_size,
	    DDI_DMA_READ | DDI_DMA_STREAMING, NULL, NULL, &cookie, &ccount);
	if (rc != DDI_DMA_MAPPED) {
		if (rc == DDI_DMA_INUSE)
			rc = EBUSY;
		else if (rc == DDI_DMA_TOOBIG)
			rc = E2BIG;
		else
			rc = ENOMEM;
		goto fail2;
	}

	if (ccount != 1) {
		rc = E2BIG;
		goto fail3;
	}

	rxb->ref_cnt = 0;
	rxb->buf_size = p->buf_size;
	rxb->freefunc.free_arg = (caddr_t)rxb;
	rxb->freefunc.free_func = rxbuf_free;
	rxb->ba = cookie.dmac_laddress;

	return (0);

fail3:	(void) ddi_dma_unbind_handle(rxb->dhdl);
fail2:	ddi_dma_mem_free(&rxb->ahdl);
fail1:	ddi_dma_free_handle(&rxb->dhdl);
	return (rc);
}
Exemplo n.º 10
0
/* ARGSUSED */
static void
rxbuf_dtor(void *arg1, void *arg2)
{
	struct rxbuf *rxb = arg1;

	(void) ddi_dma_unbind_handle(rxb->dhdl);
	ddi_dma_mem_free(&rxb->ahdl);
	ddi_dma_free_handle(&rxb->dhdl);
}
Exemplo n.º 11
0
/*
 * Cleanup isochronous resources.
 */
void
ehci_isoc_cleanup(
	ehci_state_t		*ehcip)
{
	ehci_isoc_xwrapper_t	*itw;
	ehci_pipe_private_t	*pp;
	ehci_itd_t		*itd;
	int			i, ctrl, rval;

	/* Free all the buffers */
	if (ehcip->ehci_itd_pool_addr && ehcip->ehci_itd_pool_mem_handle) {
		for (i = 0; i < ehci_get_itd_pool_size(); i ++) {
			itd = &ehcip->ehci_itd_pool_addr[i];
			ctrl = Get_ITD(ehcip->
			    ehci_itd_pool_addr[i].itd_state);

			if ((ctrl != EHCI_ITD_FREE) &&
			    (ctrl != EHCI_ITD_DUMMY) &&
			    (itd->itd_trans_wrapper)) {

				mutex_enter(&ehcip->ehci_int_mutex);

				itw = (ehci_isoc_xwrapper_t *)
					EHCI_LOOKUP_ID((uint32_t)
					Get_ITD(itd->itd_trans_wrapper));

				/* Obtain the pipe private structure */
				pp = itw->itw_pipe_private;

				ehci_deallocate_itd(ehcip, itw, itd);
				ehci_deallocate_itw(ehcip, pp, itw);

				mutex_exit(&ehcip->ehci_int_mutex);
			}
		}

		/*
		 * If EHCI_ITD_POOL_BOUND flag is set, then unbind
		 * the handle for ITD pools.
		 */
		if ((ehcip->ehci_dma_addr_bind_flag &
		    EHCI_ITD_POOL_BOUND) == EHCI_ITD_POOL_BOUND) {

			rval = ddi_dma_unbind_handle(
			    ehcip->ehci_itd_pool_dma_handle);

			ASSERT(rval == DDI_SUCCESS);
		}
		ddi_dma_mem_free(&ehcip->ehci_itd_pool_mem_handle);
	}

	/* Free the ITD pool */
	if (ehcip->ehci_itd_pool_dma_handle) {
		ddi_dma_free_handle(&ehcip->ehci_itd_pool_dma_handle);
	}
}
Exemplo n.º 12
0
static void
virtio_free_indirect(struct vq_entry *entry)
{

	(void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);

	entry->qe_indirect_descs = NULL;
}
Exemplo n.º 13
0
static void
virtionet_dma_teardown(virtionet_dma_t *dmap)
{
	if (dmap != NULL) {
		/* Release allocated system resources */
		(void) ddi_dma_unbind_handle(dmap->hdl);
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
	}
}
Exemplo n.º 14
0
/*
 * Free DMA-ble memory.
 */
void
vmxnet3_free_dma_mem(vmxnet3_dmabuf_t *dma)
{
	(void) ddi_dma_unbind_handle(dma->dmaHandle);
	ddi_dma_mem_free(&dma->dataHandle);
	ddi_dma_free_handle(&dma->dmaHandle);

	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
}
Exemplo n.º 15
0
static void
pcn_destroybuf(pcn_buf_t *buf)
{
	if (buf == NULL)
		return;

	if (buf->pb_paddr)
		(void) ddi_dma_unbind_handle(buf->pb_dmah);
	if (buf->pb_acch)
		ddi_dma_mem_free(&buf->pb_acch);
	if (buf->pb_dmah)
		ddi_dma_free_handle(&buf->pb_dmah);
	kmem_free(buf, sizeof (*buf));
}
Exemplo n.º 16
0
static virtionet_dma_t *
virtionet_dma_setup(virtionet_state_t *sp, size_t len)
{
	virtionet_dma_t		*dmap;
	int			rc;

	dmap = kmem_zalloc(sizeof (*dmap), KM_SLEEP);

	vq_dma_attr.dma_attr_flags |= DDI_DMA_FORCE_PHYSICAL;

	rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
	    NULL, &dmap->hdl);

	if (rc == DDI_DMA_BADATTR) {
		cmn_err(CE_NOTE, "Failed to allocate physical DMA; "
		    "failing back to virtual DMA");
		vq_dma_attr.dma_attr_flags &= (~DDI_DMA_FORCE_PHYSICAL);
		rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
		    NULL, &dmap->hdl);
	}

	if (rc != DDI_SUCCESS) {
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	rc = ddi_dma_mem_alloc(dmap->hdl, len, &virtio_native_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmap->addr,
	    &dmap->len, &dmap->acchdl);
	if (rc != DDI_SUCCESS) {
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	bzero(dmap->addr, dmap->len);

	rc = ddi_dma_addr_bind_handle(dmap->hdl, NULL, dmap->addr,
	    dmap->len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
	    NULL, &dmap->cookie, &dmap->ccount);
	if (rc != DDI_DMA_MAPPED) {
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}
	ASSERT(dmap->ccount == 1);

	return (dmap);
}
Exemplo n.º 17
0
static void
virtio_vq_teardown(virtionet_state_t *sp, virtqueue_t *vqp)
{
	if (vqp != NULL) {
		/* Clear the device notion of the virtqueue */
		VIRTIO_PUT16(sp, VIRTIO_QUEUE_SELECT, vqp->vq_num);
		VIRTIO_PUT32(sp, VIRTIO_QUEUE_ADDRESS, 0);

		/* Release allocated system resources */
		(void) ddi_dma_unbind_handle(vqp->vq_dma.hdl);
		ddi_dma_mem_free(&vqp->vq_dma.acchdl);
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
	}
}
Exemplo n.º 18
0
static void
pcn_freerxring(pcn_t *pcnp)
{
	int	i;

	if (pcnp->pcn_rxbufs) {
		for (i = 0; i < PCN_RXRING; i++)
			pcn_destroybuf(pcnp->pcn_rxbufs[i]);

		kmem_free(pcnp->pcn_rxbufs, PCN_RXRING * sizeof (pcn_buf_t *));
	}

	if (pcnp->pcn_rxdesc_paddr)
		(void) ddi_dma_unbind_handle(pcnp->pcn_rxdesc_dmah);
	if (pcnp->pcn_rxdesc_acch)
		ddi_dma_mem_free(&pcnp->pcn_rxdesc_acch);
	if (pcnp->pcn_rxdesc_dmah)
		ddi_dma_free_handle(&pcnp->pcn_rxdesc_dmah);
}
Exemplo n.º 19
0
/*
 * function to delete a dma buffer
 *
 * dev - software handle to device
 * dbuf - dma obj  to delete
 *
 * return none
 */
void
oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
{
	_NOTE(ARGUNUSED(dev));

	if (dbuf == NULL) {
		return;
	}
	if (dbuf->dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(dbuf->dma_handle);
	}
	if (dbuf->acc_handle != NULL) {
		ddi_dma_mem_free(&dbuf->acc_handle);
	}
	if (dbuf->dma_handle != NULL) {
		ddi_dma_free_handle(&dbuf->dma_handle);
	}
	kmem_free(dbuf, sizeof (oce_dma_buf_t));
} /* oce_free_dma_buffer */
Exemplo n.º 20
0
void
efe_buf_free(efe_buf_t **bpp)
{
	efe_buf_t *bp = *bpp;

	ASSERT(bp != NULL);

	if (bp->b_kaddr != NULL) {
		(void) ddi_dma_unbind_handle(bp->b_dmah);
	}
	if (bp->b_acch != NULL) {
		ddi_dma_mem_free(&bp->b_acch);
	}
	if (bp->b_dmah != NULL) {
		ddi_dma_free_handle(&bp->b_dmah);
	}
	kmem_free(bp, sizeof (efe_buf_t));

	*bpp = NULL;
}
Exemplo n.º 21
0
/*
 * igb_free_rbd_ring - Free the rx descriptors of one ring.
 */
static void
igb_free_rbd_ring(igb_rx_data_t *rx_data)
{
	if (rx_data->rbd_area.dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
	}
	if (rx_data->rbd_area.acc_handle != NULL) {
		ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
		rx_data->rbd_area.acc_handle = NULL;
	}
	if (rx_data->rbd_area.dma_handle != NULL) {
		ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
		rx_data->rbd_area.dma_handle = NULL;
	}
	rx_data->rbd_area.address = NULL;
	rx_data->rbd_area.dma_address = NULL;
	rx_data->rbd_area.size = 0;

	rx_data->rbd_ring = NULL;
}
Exemplo n.º 22
0
/*
 * igb_free_tbd_ring - Free the tx descriptors of one ring.
 */
static void
igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
{
	if (tx_ring->tbd_area.dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
	}
	if (tx_ring->tbd_area.acc_handle != NULL) {
		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
		tx_ring->tbd_area.acc_handle = NULL;
	}
	if (tx_ring->tbd_area.dma_handle != NULL) {
		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
		tx_ring->tbd_area.dma_handle = NULL;
	}
	tx_ring->tbd_area.address = NULL;
	tx_ring->tbd_area.dma_address = NULL;
	tx_ring->tbd_area.size = 0;

	tx_ring->tbd_ring = NULL;
}
Exemplo n.º 23
0
/**
 * Virtio Pci put queue routine. Places the queue and frees associated queue.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the queue.
 */
static void VirtioPciPutQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciPutQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturnVoid(pDevice);
    AssertReturnVoid(pQueue);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturnVoid(pPci);
    virtio_pci_queue_t *pPciQueue = pQueue->pvData;
    if (RT_UNLIKELY(!pPciQueue))
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciPutQueue missing Pci queue.\n"));
        return;
    }

    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);
    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), 0);

    ddi_dma_unbind_handle(pPciQueue->hDMA);
    ddi_dma_mem_free(&pPciQueue->hIO);
    ddi_dma_free_handle(&pPciQueue->hDMA);
    RTMemFree(pPciQueue);
}
Exemplo n.º 24
0
/*
 * igb_free_dma_buffer - Free one allocated area of dma memory and handle
 */
void
igb_free_dma_buffer(dma_buffer_t *buf)
{
	if (buf->dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(buf->dma_handle);
		buf->dma_address = NULL;
	} else {
		return;
	}

	if (buf->acc_handle != NULL) {
		ddi_dma_mem_free(&buf->acc_handle);
		buf->acc_handle = NULL;
		buf->address = NULL;
	}

	if (buf->dma_handle != NULL) {
		ddi_dma_free_handle(&buf->dma_handle);
		buf->dma_handle = NULL;
	}

	buf->size = 0;
	buf->len = 0;
}
Exemplo n.º 25
0
/*
 * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
 */
static int
igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	igb_t *igb = rx_data->rx_ring->igb;

	devinfo = igb->dip;
	size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;

	/*
	 * Allocate a new DMA handle for the receive descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &rx_data->rbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate rbd dma handle: %x", ret);
		rx_data->rbd_area.dma_handle = NULL;
		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the receive
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&rx_data->rbd_area.address,
	    &len, &rx_data->rbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate rbd dma memory: %x", ret);
		rx_data->rbd_area.acc_handle = NULL;
		rx_data->rbd_area.address = NULL;
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(rx_data->rbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call.
	 */
	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
	    NULL, (caddr_t)rx_data->rbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		igb_error(igb,
		    "Could not bind rbd dma resource: %x", ret);
		rx_data->rbd_area.dma_address = NULL;
		if (rx_data->rbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
			rx_data->rbd_area.acc_handle = NULL;
			rx_data->rbd_area.address = NULL;
		}
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
	rx_data->rbd_area.size = len;

	rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
	    rx_data->rbd_area.address;

	return (IGB_SUCCESS);
}
Exemplo n.º 26
0
static int
virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
{
	int allocsize, num;
	size_t len;
	unsigned int ncookies;
	int ret;

	num = entry->qe_queue->vq_indirect_num;
	ASSERT(num > 1);

	allocsize = sizeof (struct vring_desc) * num;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for indirect descriptors, "
		    "entry %d, vq %d", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&entry->qe_indirect_descs, &len,
	    &entry->qe_indirect_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for indirect descriptors, "
		    "entry %d, vq %d,", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_alloc;
	}

	(void) memset(entry->qe_indirect_descs, 0xff, allocsize);

	ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
	    (caddr_t)entry->qe_indirect_descs, len,
	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &entry->qe_indirect_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for indirect descriptors, "
		    "entry %d, vq %d", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);

	return (0);

out_bind:
	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
out_alloc:
	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
out_alloc_handle:

	return (ret);
}
Exemplo n.º 27
0
/*
 * hermon_check_iommu_bypass()
 *    Context: Only called from attach() path context
 *    XXX This is a DMA allocation routine outside the normal
 *	  path. FMA hardening will not like this.
 */
static void
hermon_check_iommu_bypass(hermon_state_t *state, hermon_cfg_profile_t *cp)
{
	ddi_dma_handle_t	dmahdl;
	ddi_dma_attr_t		dma_attr;
	int			status;
	ddi_acc_handle_t	acc_hdl;
	caddr_t			kaddr;
	size_t			actual_len;
	ddi_dma_cookie_t	cookie;
	uint_t			cookiecnt;

	hermon_dma_attr_init(state, &dma_attr);

	/* Try mapping for IOMMU bypass (Force Physical) */
	dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL |
	    DDI_DMA_RELAXED_ORDERING;

	/*
	 * Call ddi_dma_alloc_handle().  If this returns DDI_DMA_BADATTR then
	 * it is not possible to use IOMMU bypass with our PCI bridge parent.
	 * Since the function we are in can only be called if iommu bypass was
	 * requested in the config profile, we configure for bypass if the
	 * ddi_dma_alloc_handle() was successful.  Otherwise, we configure
	 * for non-bypass (ie: normal) mapping.
	 */
	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
	    DDI_DMA_SLEEP, NULL, &dmahdl);
	if (status == DDI_DMA_BADATTR) {
		cp->cp_iommu_bypass = HERMON_BINDMEM_NORMAL;
		return;
	} else if (status != DDI_SUCCESS) {	/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
		return;
	} else {
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
	}

	status = ddi_dma_mem_alloc(dmahdl, 256,
	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, (caddr_t *)&kaddr, &actual_len, &acc_hdl);

	if (status != DDI_SUCCESS) {		/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		ddi_dma_free_handle(&dmahdl);
		return;
	}

	status = ddi_dma_addr_bind_handle(dmahdl, NULL, kaddr, actual_len,
	    DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, &cookie, &cookiecnt);

	if (status == DDI_DMA_MAPPED) {
		(void) ddi_dma_unbind_handle(dmahdl);
	} else {
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
	}

	ddi_dma_mem_free(&acc_hdl);
	ddi_dma_free_handle(&dmahdl);
}
Exemplo n.º 28
0
/*
 * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
 */
static int
igb_alloc_dma_buffer(igb_t *igb,
    dma_buffer_t *buf, size_t size)
{
	int ret;
	dev_info_t *devinfo = igb->dip;
	ddi_dma_cookie_t cookie;
	size_t len;
	uint_t cookie_num;

	ret = ddi_dma_alloc_handle(devinfo,
	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
	    NULL, &buf->dma_handle);

	if (ret != DDI_SUCCESS) {
		buf->dma_handle = NULL;
		igb_error(igb,
		    "Could not allocate dma buffer handle: %x", ret);
		return (IGB_FAILURE);
	}

	ret = ddi_dma_mem_alloc(buf->dma_handle,
	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &buf->address,
	    &len, &buf->acc_handle);

	if (ret != DDI_SUCCESS) {
		buf->acc_handle = NULL;
		buf->address = NULL;
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		igb_error(igb,
		    "Could not allocate dma buffer memory: %x", ret);
		return (IGB_FAILURE);
	}

	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
	    buf->address,
	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		buf->dma_address = NULL;
		if (buf->acc_handle != NULL) {
			ddi_dma_mem_free(&buf->acc_handle);
			buf->acc_handle = NULL;
			buf->address = NULL;
		}
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		igb_error(igb,
		    "Could not bind dma buffer handle: %x", ret);
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	buf->dma_address = cookie.dmac_laddress;
	buf->size = len;
	buf->len = 0;

	return (IGB_SUCCESS);
}
Exemplo n.º 29
0
/*
 * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
 */
static int
igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	igb_t *igb = tx_ring->igb;

	devinfo = igb->dip;
	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;

	/*
	 * If tx head write-back is enabled, an extra tbd is allocated
	 * to save the head write-back value
	 */
	if (igb->tx_head_wb_enable) {
		size += sizeof (union e1000_adv_tx_desc);
	}

	/*
	 * Allocate a DMA handle for the transmit descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &tx_ring->tbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate tbd dma handle: %x", ret);
		tx_ring->tbd_area.dma_handle = NULL;

		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the transmit
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&tx_ring->tbd_area.address,
	    &len, &tx_ring->tbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate tbd dma memory: %x", ret);
		tx_ring->tbd_area.acc_handle = NULL;
		tx_ring->tbd_area.address = NULL;
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(tx_ring->tbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
	 * the memory address
	 */
	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
	    NULL, (caddr_t)tx_ring->tbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		igb_error(igb,
		    "Could not bind tbd dma resource: %x", ret);
		tx_ring->tbd_area.dma_address = NULL;
		if (tx_ring->tbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
			tx_ring->tbd_area.acc_handle = NULL;
			tx_ring->tbd_area.address = NULL;
		}
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
	tx_ring->tbd_area.size = len;

	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
	    tx_ring->tbd_area.address;

	return (IGB_SUCCESS);
}
Exemplo n.º 30
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}