示例#1
0
/*
 * audioixp_free_port()
 *
 * Description:
 *	This routine unbinds the DMA cookies, frees the DMA buffers,
 *	deallocates the DMA handles.
 *
 * Arguments:
 *	audioixp_port_t	*port	The port structure for a DMA engine.
 */
static void
audioixp_free_port(audioixp_port_t *port)
{
	if (port == NULL)
		return;

	if (port->engine) {
		audio_dev_remove_engine(port->statep->adev, port->engine);
		audio_engine_free(port->engine);
	}
	if (port->bdl_paddr) {
		(void) ddi_dma_unbind_handle(port->bdl_dmah);
	}
	if (port->bdl_acch) {
		ddi_dma_mem_free(&port->bdl_acch);
	}
	if (port->bdl_dmah) {
		ddi_dma_free_handle(&port->bdl_dmah);
	}
	if (port->samp_paddr) {
		(void) ddi_dma_unbind_handle(port->samp_dmah);
	}
	if (port->samp_acch) {
		ddi_dma_mem_free(&port->samp_acch);
	}
	if (port->samp_dmah) {
		ddi_dma_free_handle(&port->samp_dmah);
	}
	kmem_free(port, sizeof (*port));
}
示例#2
0
extern void
emlxs_pkt_free(fc_packet_t *pkt)
{
	emlxs_port_t *port = (emlxs_port_t *)pkt->pkt_ulp_private;

	(void) emlxs_fca_pkt_uninit((opaque_t)port, pkt);

	if (pkt->pkt_datalen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_data_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_data_dma);
	}

	if (pkt->pkt_rsplen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);
	}

	if (pkt->pkt_cmdlen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);
	}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
	kmem_free(pkt, (sizeof (fc_packet_t) + sizeof (emlxs_buf_t) +
	    sizeof (emlxs_pkt_cookie_t)));
#else
	kmem_free(pkt, (sizeof (fc_packet_t) + sizeof (emlxs_buf_t)));
#endif /* >= EMLXS_MODREV3 */

	return;

} /* emlxs_pkt_free() */
示例#3
0
static virtionet_dma_t *
virtionet_dma_setup(virtionet_state_t *sp, size_t len)
{
	virtionet_dma_t		*dmap;
	int			rc;

	dmap = kmem_zalloc(sizeof (*dmap), KM_SLEEP);

	vq_dma_attr.dma_attr_flags |= DDI_DMA_FORCE_PHYSICAL;

	rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
	    NULL, &dmap->hdl);

	if (rc == DDI_DMA_BADATTR) {
		cmn_err(CE_NOTE, "Failed to allocate physical DMA; "
		    "failing back to virtual DMA");
		vq_dma_attr.dma_attr_flags &= (~DDI_DMA_FORCE_PHYSICAL);
		rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
		    NULL, &dmap->hdl);
	}

	if (rc != DDI_SUCCESS) {
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	rc = ddi_dma_mem_alloc(dmap->hdl, len, &virtio_native_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmap->addr,
	    &dmap->len, &dmap->acchdl);
	if (rc != DDI_SUCCESS) {
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	bzero(dmap->addr, dmap->len);

	rc = ddi_dma_addr_bind_handle(dmap->hdl, NULL, dmap->addr,
	    dmap->len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
	    NULL, &dmap->cookie, &dmap->ccount);
	if (rc != DDI_DMA_MAPPED) {
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}
	ASSERT(dmap->ccount == 1);

	return (dmap);
}
示例#4
0
文件: smcp.c 项目: andreiw/polaris
/* XXX Bogus to look at the supposedly opaque handles, even to look for NULL */
static void
SMCG_dma_unalloc(smcg_t *smcg)
{
	Adapter_Struc			*pAd = smcg->smcg_pAd;
	struct smcg_rx_buffer_desc	*bdesc;
	int				i, j;

	for (i = 0; i < pAd->num_of_tx_buffs; i++)
		for (j = 0; j < SMCG_MAX_TX_MBLKS; j++) {
			if (smcg->tx_info[i].dmahandle[j] != NULL)
				ddi_dma_free_handle(
				    &smcg->tx_info[i].dmahandle[j]);
			smcg->tx_info[i].dmahandle[j] = NULL;
		}

	ASSERT(smcg->rx_bufs_outstanding == 0);
	/* Free up rx buffers currently on freelist */
	for (bdesc = smcg->rx_freelist; bdesc; bdesc = bdesc->next) {
		if (bdesc->dmahandle != NULL)
			(void) ddi_dma_unbind_handle(bdesc->dmahandle);
		if (bdesc->acchandle != NULL)
			ddi_dma_mem_free(&bdesc->acchandle);
		if (bdesc->dmahandle != NULL)
			ddi_dma_free_handle(&bdesc->dmahandle);
	}

	/* Free up all rx buffers that are associated with rx descriptors */
	for (i = 0; i < pAd->num_of_rx_buffs; i++) {
		if (smcg->bdesc[i] == NULL)
			continue;
		if (smcg->bdesc[i]->dmahandle != NULL)
			(void) ddi_dma_unbind_handle(smcg->bdesc[i]->dmahandle);
		if (smcg->bdesc[i]->acchandle != NULL)
			ddi_dma_mem_free(&smcg->bdesc[i]->acchandle);
		if (smcg->bdesc[i]->dmahandle != NULL)
			ddi_dma_free_handle(&smcg->bdesc[i]->dmahandle);
	}

	kmem_free(smcg->rxbdesc_mem,
	    sizeof (struct smcg_rx_buffer_desc) * pAd->num_of_rx_buffs*2);

	/* Free resources associated with shared ram block */
	if (smcg->hostram_dmahandle != NULL)
		(void) ddi_dma_unbind_handle(smcg->hostram_dmahandle);
	if (smcg->hostram_acchandle != NULL)
		ddi_dma_mem_free(&smcg->hostram_acchandle);
	if (smcg->hostram_dmahandle != NULL)
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
}
示例#5
0
void
efe_ring_free(efe_ring_t **rpp)
{
	efe_ring_t *rp = *rpp;

	ASSERT(rp != NULL);

	for (int i = 0; i < DESCLEN(rp); ++i) {
		efe_buf_t *bp = GETBUF(rp, i);
		if (bp != NULL) {
			efe_buf_free(&bp);
		}
	}
	kmem_free(rp->r_bufpp, BUFPSZ(DESCLEN(rp)));

	if (rp->r_descp != NULL) {
		(void) ddi_dma_unbind_handle(rp->r_dmah);
	}
	if (rp->r_acch != NULL) {
		ddi_dma_mem_free(&rp->r_acch);
	}
	if (rp->r_dmah != NULL) {
		ddi_dma_free_handle(&rp->r_dmah);
	}
	kmem_free(rp, sizeof (efe_ring_t));

	*rpp = NULL;
}
示例#6
0
void
virtio_free_vq(struct virtqueue *vq)
{
	struct virtio_softc *sc = vq->vq_owner;
	int i;

	/* tell device that there's no virtqueue any longer */
	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
	    vq->vq_index);
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);

	/* Free the indirect descriptors, if any. */
	for (i = 0; i < vq->vq_num; i++) {
		struct vq_entry *entry = &vq->vq_entries[i];
		if (entry->qe_indirect_descs)
			virtio_free_indirect(entry);
	}

	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);

	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
	ddi_dma_mem_free(&vq->vq_dma_acch);
	ddi_dma_free_handle(&vq->vq_dma_handle);

	mutex_destroy(&vq->vq_used_lock);
	mutex_destroy(&vq->vq_avail_lock);
	mutex_destroy(&vq->vq_freelist_lock);

	kmem_free(vq, sizeof (struct virtqueue));
}
示例#7
0
/**
 * Virtio Net TX buffer destructor for kmem_cache_create().
 *
 * @param pvBuf             Pointer to the allocated buffer.
 * @param pvArg
 */
static void VirtioNetTxBufDestroy(void *pvBuf, void *pvArg)
{
    NOREF(pvArg);
    virtio_net_txbuf_t *pTxBuf = pvBuf;

    ddi_dma_free_handle(&pTxBuf->hDMA);
}
示例#8
0
/* ARGSUSED */
i40e_status
i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
{
	if (mem->pa != 0) {
		VERIFY(mem->idm_dma_handle != NULL);
		(void) ddi_dma_unbind_handle(mem->idm_dma_handle);
		mem->pa = 0;
		mem->size = 0;
	}

	if (mem->idm_acc_handle != NULL) {
		ddi_dma_mem_free(&mem->idm_acc_handle);
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
	}

	if (mem->idm_dma_handle != NULL) {
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;
	}

	/*
	 * Watch out for sloppiness.
	 */
	ASSERT(mem->pa == 0);
	ASSERT(mem->va == NULL);
	ASSERT(mem->size == 0);
	mem->idm_alignment = UINT32_MAX;

	return (I40E_SUCCESS);
}
示例#9
0
void
atge_rx_desc_free(atge_t *atgep)
{
	atge_l1e_data_t *l1e;
	atge_dma_t *dma;
	int pages;

	l1e = (atge_l1e_data_t *)atgep->atge_private_data;
	if (l1e == NULL)
		return;

	if (l1e->atge_l1e_rx_page == NULL)
		return;

	for (pages = 0; pages < L1E_RX_PAGES; pages++) {
		dma = l1e->atge_l1e_rx_page[pages];
		if (dma != NULL) {
			(void) ddi_dma_unbind_handle(dma->hdl);
			ddi_dma_mem_free(&dma->acchdl);
			ddi_dma_free_handle(&dma->hdl);
			kmem_free(dma, sizeof (atge_dma_t));
		}
	}

	kmem_free(l1e->atge_l1e_rx_page, L1E_RX_PAGES * sizeof (atge_dma_t *));
	l1e->atge_l1e_rx_page = NULL;
}
示例#10
0
/*
 * Allocate /size/ bytes of contiguous DMA-ble memory.
 *
 * Returns:
 *    0 on success, non-zero on failure.
 */
static int
vmxnet3_alloc_dma_mem(vmxnet3_softc_t *dp, vmxnet3_dmabuf_t *dma, size_t size,
    boolean_t canSleep, ddi_dma_attr_t *dma_attrs)
{
	ddi_dma_cookie_t cookie;
	uint_t cookieCount;
	int dmaerr, err = 0;
	int (*cb) (caddr_t) = canSleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;

	ASSERT(size != 0);

	/*
	 * Allocate a DMA handle
	 */
	if ((dmaerr = ddi_dma_alloc_handle(dp->dip, dma_attrs, cb, NULL,
	    &dma->dmaHandle)) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed: %d", dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error;
	}

	/*
	 * Allocate memory
	 */
	if (ddi_dma_mem_alloc(dma->dmaHandle, size, &vmxnet3_dev_attr,
	    DDI_DMA_CONSISTENT, cb, NULL, &dma->buf, &dma->bufLen,
	    &dma->dataHandle) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_mem_alloc() failed");
		err = ENOMEM;
		goto error_dma_handle;
	}

	/*
	 * Map the memory
	 */
	if ((dmaerr = ddi_dma_addr_bind_handle(dma->dmaHandle, NULL, dma->buf,
	    dma->bufLen, DDI_DMA_RDWR | DDI_DMA_STREAMING, cb, NULL, &cookie,
	    &cookieCount)) != DDI_DMA_MAPPED) {
		VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed: %d",
		    dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error_dma_mem;
	}

	ASSERT(cookieCount == 1);
	dma->bufPA = cookie.dmac_laddress;

	return (0);

error_dma_mem:
	ddi_dma_mem_free(&dma->dataHandle);
error_dma_handle:
	ddi_dma_free_handle(&dma->dmaHandle);
error:
	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
	return (err);
}
示例#11
0
static int
rxbuf_ctor(void *arg1, void *arg2, int kmflag)
{
	struct rxbuf *rxb = arg1;
	struct rxbuf_cache_params *p = arg2;
	size_t real_len;
	ddi_dma_cookie_t cookie;
	uint_t ccount = 0;
	int (*callback)(caddr_t);
	int rc = ENOMEM;

	if (kmflag & KM_SLEEP)
		callback = DDI_DMA_SLEEP;
	else
		callback = DDI_DMA_DONTWAIT;

	rc = ddi_dma_alloc_handle(p->dip, &p->dma_attr_rx, callback, 0,
	    &rxb->dhdl);
	if (rc != DDI_SUCCESS)
		return (rc == DDI_DMA_BADATTR ? EINVAL : ENOMEM);

	rc = ddi_dma_mem_alloc(rxb->dhdl, p->buf_size, &p->acc_attr_rx,
	    DDI_DMA_STREAMING, callback, 0, &rxb->va, &real_len, &rxb->ahdl);
	if (rc != DDI_SUCCESS) {
		rc = ENOMEM;
		goto fail1;
	}

	rc = ddi_dma_addr_bind_handle(rxb->dhdl, NULL, rxb->va, p->buf_size,
	    DDI_DMA_READ | DDI_DMA_STREAMING, NULL, NULL, &cookie, &ccount);
	if (rc != DDI_DMA_MAPPED) {
		if (rc == DDI_DMA_INUSE)
			rc = EBUSY;
		else if (rc == DDI_DMA_TOOBIG)
			rc = E2BIG;
		else
			rc = ENOMEM;
		goto fail2;
	}

	if (ccount != 1) {
		rc = E2BIG;
		goto fail3;
	}

	rxb->ref_cnt = 0;
	rxb->buf_size = p->buf_size;
	rxb->freefunc.free_arg = (caddr_t)rxb;
	rxb->freefunc.free_func = rxbuf_free;
	rxb->ba = cookie.dmac_laddress;

	return (0);

fail3:	(void) ddi_dma_unbind_handle(rxb->dhdl);
fail2:	ddi_dma_mem_free(&rxb->ahdl);
fail1:	ddi_dma_free_handle(&rxb->dhdl);
	return (rc);
}
示例#12
0
/* ARGSUSED */
static void
rxbuf_dtor(void *arg1, void *arg2)
{
	struct rxbuf *rxb = arg1;

	(void) ddi_dma_unbind_handle(rxb->dhdl);
	ddi_dma_mem_free(&rxb->ahdl);
	ddi_dma_free_handle(&rxb->dhdl);
}
示例#13
0
static void
oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
{
	_NOTE(ARGUNUSED(wq));
	/* Free the DMA handle */
	if (wqmd->dma_handle != NULL)
		(void) ddi_dma_free_handle(&(wqmd->dma_handle));
	wqmd->dma_handle = NULL;
} /* oce_wqm_dtor */
示例#14
0
/*
 * Cleanup isochronous resources.
 */
void
ehci_isoc_cleanup(
	ehci_state_t		*ehcip)
{
	ehci_isoc_xwrapper_t	*itw;
	ehci_pipe_private_t	*pp;
	ehci_itd_t		*itd;
	int			i, ctrl, rval;

	/* Free all the buffers */
	if (ehcip->ehci_itd_pool_addr && ehcip->ehci_itd_pool_mem_handle) {
		for (i = 0; i < ehci_get_itd_pool_size(); i ++) {
			itd = &ehcip->ehci_itd_pool_addr[i];
			ctrl = Get_ITD(ehcip->
			    ehci_itd_pool_addr[i].itd_state);

			if ((ctrl != EHCI_ITD_FREE) &&
			    (ctrl != EHCI_ITD_DUMMY) &&
			    (itd->itd_trans_wrapper)) {

				mutex_enter(&ehcip->ehci_int_mutex);

				itw = (ehci_isoc_xwrapper_t *)
					EHCI_LOOKUP_ID((uint32_t)
					Get_ITD(itd->itd_trans_wrapper));

				/* Obtain the pipe private structure */
				pp = itw->itw_pipe_private;

				ehci_deallocate_itd(ehcip, itw, itd);
				ehci_deallocate_itw(ehcip, pp, itw);

				mutex_exit(&ehcip->ehci_int_mutex);
			}
		}

		/*
		 * If EHCI_ITD_POOL_BOUND flag is set, then unbind
		 * the handle for ITD pools.
		 */
		if ((ehcip->ehci_dma_addr_bind_flag &
		    EHCI_ITD_POOL_BOUND) == EHCI_ITD_POOL_BOUND) {

			rval = ddi_dma_unbind_handle(
			    ehcip->ehci_itd_pool_dma_handle);

			ASSERT(rval == DDI_SUCCESS);
		}
		ddi_dma_mem_free(&ehcip->ehci_itd_pool_mem_handle);
	}

	/* Free the ITD pool */
	if (ehcip->ehci_itd_pool_dma_handle) {
		ddi_dma_free_handle(&ehcip->ehci_itd_pool_dma_handle);
	}
}
示例#15
0
static void
virtio_free_indirect(struct vq_entry *entry)
{

	(void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);

	entry->qe_indirect_descs = NULL;
}
示例#16
0
static void
virtionet_dma_teardown(virtionet_dma_t *dmap)
{
	if (dmap != NULL) {
		/* Release allocated system resources */
		(void) ddi_dma_unbind_handle(dmap->hdl);
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
	}
}
示例#17
0
static void
bd_xfer_dtor(void *buf, void *arg)
{
	bd_xfer_impl_t	*xi = buf;

	_NOTE(ARGUNUSED(arg));

	if (xi->i_dmah)
		ddi_dma_free_handle(&xi->i_dmah);
	xi->i_dmah = NULL;
}
示例#18
0
/*
 * Free DMA-ble memory.
 */
void
vmxnet3_free_dma_mem(vmxnet3_dmabuf_t *dma)
{
	(void) ddi_dma_unbind_handle(dma->dmaHandle);
	ddi_dma_mem_free(&dma->dataHandle);
	ddi_dma_free_handle(&dma->dmaHandle);

	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
}
示例#19
0
static void
pcn_destroybuf(pcn_buf_t *buf)
{
	if (buf == NULL)
		return;

	if (buf->pb_paddr)
		(void) ddi_dma_unbind_handle(buf->pb_dmah);
	if (buf->pb_acch)
		ddi_dma_mem_free(&buf->pb_acch);
	if (buf->pb_dmah)
		ddi_dma_free_handle(&buf->pb_dmah);
	kmem_free(buf, sizeof (*buf));
}
示例#20
0
static void
virtio_vq_teardown(virtionet_state_t *sp, virtqueue_t *vqp)
{
	if (vqp != NULL) {
		/* Clear the device notion of the virtqueue */
		VIRTIO_PUT16(sp, VIRTIO_QUEUE_SELECT, vqp->vq_num);
		VIRTIO_PUT32(sp, VIRTIO_QUEUE_ADDRESS, 0);

		/* Release allocated system resources */
		(void) ddi_dma_unbind_handle(vqp->vq_dma.hdl);
		ddi_dma_mem_free(&vqp->vq_dma.acchdl);
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
	}
}
示例#21
0
/*
 * igb_free_tcb_lists - Release the memory allocated for
 * the transmit control bolcks of one ring.
 */
static void
igb_free_tcb_lists(igb_tx_ring_t *tx_ring)
{
	int i;
	tx_control_block_t *tcb;

	tcb = tx_ring->tcb_area;
	if (tcb == NULL)
		return;

	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
		ASSERT(tcb != NULL);

		/* Free the tx dma handle for dynamical binding */
		if (tcb->tx_dma_handle != NULL) {
			ddi_dma_free_handle(&tcb->tx_dma_handle);
			tcb->tx_dma_handle = NULL;
		} else {
			/*
			 * If the dma handle is NULL, then we don't
			 * have to check the remaining.
			 */
			break;
		}

		igb_free_dma_buffer(&tcb->tx_buf);
	}

	if (tx_ring->tcb_area != NULL) {
		kmem_free(tx_ring->tcb_area,
		    sizeof (tx_control_block_t) * tx_ring->free_list_size);
		tx_ring->tcb_area = NULL;
	}

	if (tx_ring->work_list != NULL) {
		kmem_free(tx_ring->work_list,
		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
		tx_ring->work_list = NULL;
	}

	if (tx_ring->free_list != NULL) {
		kmem_free(tx_ring->free_list,
		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
		tx_ring->free_list = NULL;
	}
}
示例#22
0
static void
pcn_freerxring(pcn_t *pcnp)
{
	int	i;

	if (pcnp->pcn_rxbufs) {
		for (i = 0; i < PCN_RXRING; i++)
			pcn_destroybuf(pcnp->pcn_rxbufs[i]);

		kmem_free(pcnp->pcn_rxbufs, PCN_RXRING * sizeof (pcn_buf_t *));
	}

	if (pcnp->pcn_rxdesc_paddr)
		(void) ddi_dma_unbind_handle(pcnp->pcn_rxdesc_dmah);
	if (pcnp->pcn_rxdesc_acch)
		ddi_dma_mem_free(&pcnp->pcn_rxdesc_acch);
	if (pcnp->pcn_rxdesc_dmah)
		ddi_dma_free_handle(&pcnp->pcn_rxdesc_dmah);
}
示例#23
0
文件: dvma.c 项目: andreiw/polaris
/*ARGSUSED*/
void
dvma_unload(ddi_dma_handle_t h, uint_t objindex, uint_t type)
{
	register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
	struct fast_dvma *nexus_private;
	struct dvma_ops *nexus_funcptr;

	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
		nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
		(void) (*nexus_funcptr->dvma_unload)(h, objindex, type);
	} else {
		ddi_dma_handle_t handle;

		handle = ((ddi_dma_handle_t *)mp->dmai_minfo)[objindex];
		(void) ddi_dma_unbind_handle(handle);
		(void) ddi_dma_free_handle(&handle);
	}
}
示例#24
0
/*
 * function to delete a dma buffer
 *
 * dev - software handle to device
 * dbuf - dma obj  to delete
 *
 * return none
 */
void
oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
{
	_NOTE(ARGUNUSED(dev));

	if (dbuf == NULL) {
		return;
	}
	if (dbuf->dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(dbuf->dma_handle);
	}
	if (dbuf->acc_handle != NULL) {
		ddi_dma_mem_free(&dbuf->acc_handle);
	}
	if (dbuf->dma_handle != NULL) {
		ddi_dma_free_handle(&dbuf->dma_handle);
	}
	kmem_free(dbuf, sizeof (oce_dma_buf_t));
} /* oce_free_dma_buffer */
示例#25
0
void
efe_buf_free(efe_buf_t **bpp)
{
	efe_buf_t *bp = *bpp;

	ASSERT(bp != NULL);

	if (bp->b_kaddr != NULL) {
		(void) ddi_dma_unbind_handle(bp->b_dmah);
	}
	if (bp->b_acch != NULL) {
		ddi_dma_mem_free(&bp->b_acch);
	}
	if (bp->b_dmah != NULL) {
		ddi_dma_free_handle(&bp->b_dmah);
	}
	kmem_free(bp, sizeof (efe_buf_t));

	*bpp = NULL;
}
示例#26
0
/*
 * igb_free_rbd_ring - Free the rx descriptors of one ring.
 */
static void
igb_free_rbd_ring(igb_rx_data_t *rx_data)
{
	if (rx_data->rbd_area.dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
	}
	if (rx_data->rbd_area.acc_handle != NULL) {
		ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
		rx_data->rbd_area.acc_handle = NULL;
	}
	if (rx_data->rbd_area.dma_handle != NULL) {
		ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
		rx_data->rbd_area.dma_handle = NULL;
	}
	rx_data->rbd_area.address = NULL;
	rx_data->rbd_area.dma_address = NULL;
	rx_data->rbd_area.size = 0;

	rx_data->rbd_ring = NULL;
}
示例#27
0
/*
 * igb_free_tbd_ring - Free the tx descriptors of one ring.
 */
static void
igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
{
	if (tx_ring->tbd_area.dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
	}
	if (tx_ring->tbd_area.acc_handle != NULL) {
		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
		tx_ring->tbd_area.acc_handle = NULL;
	}
	if (tx_ring->tbd_area.dma_handle != NULL) {
		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
		tx_ring->tbd_area.dma_handle = NULL;
	}
	tx_ring->tbd_area.address = NULL;
	tx_ring->tbd_area.dma_address = NULL;
	tx_ring->tbd_area.size = 0;

	tx_ring->tbd_ring = NULL;
}
示例#28
0
void *
xen_alloc_pages(pgcnt_t cnt)
{
	size_t len;
	int a = xen_alloc_cnt++;
	caddr_t addr;

	ASSERT(xen_alloc_cnt < MAX_ALLOCATIONS);
	if (ddi_dma_alloc_handle(xpv_dip, &xpv_dma_attr, DDI_DMA_SLEEP, 0,
	    &xpv_dma_handle[a]) != DDI_SUCCESS)
		return (NULL);

	if (ddi_dma_mem_alloc(xpv_dma_handle[a], MMU_PAGESIZE * cnt,
	    &xpv_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
	    &addr, &len, &xpv_dma_acchandle[a]) != DDI_SUCCESS) {
		ddi_dma_free_handle(&xpv_dma_handle[a]);
		cmn_err(CE_WARN, "Couldn't allocate memory for xpv devices");
		return (NULL);
	}
	return (addr);
}
示例#29
0
/**
 * Virtio Pci put queue routine. Places the queue and frees associated queue.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the queue.
 */
static void VirtioPciPutQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciPutQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturnVoid(pDevice);
    AssertReturnVoid(pQueue);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturnVoid(pPci);
    virtio_pci_queue_t *pPciQueue = pQueue->pvData;
    if (RT_UNLIKELY(!pPciQueue))
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciPutQueue missing Pci queue.\n"));
        return;
    }

    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);
    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), 0);

    ddi_dma_unbind_handle(pPciQueue->hDMA);
    ddi_dma_mem_free(&pPciQueue->hIO);
    ddi_dma_free_handle(&pPciQueue->hDMA);
    RTMemFree(pPciQueue);
}
示例#30
0
/*
 * igb_free_dma_buffer - Free one allocated area of dma memory and handle
 */
void
igb_free_dma_buffer(dma_buffer_t *buf)
{
	if (buf->dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(buf->dma_handle);
		buf->dma_address = NULL;
	} else {
		return;
	}

	if (buf->acc_handle != NULL) {
		ddi_dma_mem_free(&buf->acc_handle);
		buf->acc_handle = NULL;
		buf->address = NULL;
	}

	if (buf->dma_handle != NULL) {
		ddi_dma_free_handle(&buf->dma_handle);
		buf->dma_handle = NULL;
	}

	buf->size = 0;
	buf->len = 0;
}