Esempio n. 1
0
/*
 * audioixp_free_port()
 *
 * Description:
 *	This routine unbinds the DMA cookies, frees the DMA buffers,
 *	deallocates the DMA handles.
 *
 * Arguments:
 *	audioixp_port_t	*port	The port structure for a DMA engine.
 */
static void
audioixp_free_port(audioixp_port_t *port)
{
	if (port == NULL)
		return;

	if (port->engine) {
		audio_dev_remove_engine(port->statep->adev, port->engine);
		audio_engine_free(port->engine);
	}
	if (port->bdl_paddr) {
		(void) ddi_dma_unbind_handle(port->bdl_dmah);
	}
	if (port->bdl_acch) {
		ddi_dma_mem_free(&port->bdl_acch);
	}
	if (port->bdl_dmah) {
		ddi_dma_free_handle(&port->bdl_dmah);
	}
	if (port->samp_paddr) {
		(void) ddi_dma_unbind_handle(port->samp_dmah);
	}
	if (port->samp_acch) {
		ddi_dma_mem_free(&port->samp_acch);
	}
	if (port->samp_dmah) {
		ddi_dma_free_handle(&port->samp_dmah);
	}
	kmem_free(port, sizeof (*port));
}
Esempio n. 2
0
extern void
emlxs_pkt_free(fc_packet_t *pkt)
{
	emlxs_port_t *port = (emlxs_port_t *)pkt->pkt_ulp_private;

	(void) emlxs_fca_pkt_uninit((opaque_t)port, pkt);

	if (pkt->pkt_datalen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_data_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_data_dma);
	}

	if (pkt->pkt_rsplen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);
	}

	if (pkt->pkt_cmdlen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);
	}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
	kmem_free(pkt, (sizeof (fc_packet_t) + sizeof (emlxs_buf_t) +
	    sizeof (emlxs_pkt_cookie_t)));
#else
	kmem_free(pkt, (sizeof (fc_packet_t) + sizeof (emlxs_buf_t)));
#endif /* >= EMLXS_MODREV3 */

	return;

} /* emlxs_pkt_free() */
Esempio n. 3
0
/* XXX Bogus to look at the supposedly opaque handles, even to look for NULL */
static void
SMCG_dma_unalloc(smcg_t *smcg)
{
	Adapter_Struc			*pAd = smcg->smcg_pAd;
	struct smcg_rx_buffer_desc	*bdesc;
	int				i, j;

	for (i = 0; i < pAd->num_of_tx_buffs; i++)
		for (j = 0; j < SMCG_MAX_TX_MBLKS; j++) {
			if (smcg->tx_info[i].dmahandle[j] != NULL)
				ddi_dma_free_handle(
				    &smcg->tx_info[i].dmahandle[j]);
			smcg->tx_info[i].dmahandle[j] = NULL;
		}

	ASSERT(smcg->rx_bufs_outstanding == 0);
	/* Free up rx buffers currently on freelist */
	for (bdesc = smcg->rx_freelist; bdesc; bdesc = bdesc->next) {
		if (bdesc->dmahandle != NULL)
			(void) ddi_dma_unbind_handle(bdesc->dmahandle);
		if (bdesc->acchandle != NULL)
			ddi_dma_mem_free(&bdesc->acchandle);
		if (bdesc->dmahandle != NULL)
			ddi_dma_free_handle(&bdesc->dmahandle);
	}

	/* Free up all rx buffers that are associated with rx descriptors */
	for (i = 0; i < pAd->num_of_rx_buffs; i++) {
		if (smcg->bdesc[i] == NULL)
			continue;
		if (smcg->bdesc[i]->dmahandle != NULL)
			(void) ddi_dma_unbind_handle(smcg->bdesc[i]->dmahandle);
		if (smcg->bdesc[i]->acchandle != NULL)
			ddi_dma_mem_free(&smcg->bdesc[i]->acchandle);
		if (smcg->bdesc[i]->dmahandle != NULL)
			ddi_dma_free_handle(&smcg->bdesc[i]->dmahandle);
	}

	kmem_free(smcg->rxbdesc_mem,
	    sizeof (struct smcg_rx_buffer_desc) * pAd->num_of_rx_buffs*2);

	/* Free resources associated with shared ram block */
	if (smcg->hostram_dmahandle != NULL)
		(void) ddi_dma_unbind_handle(smcg->hostram_dmahandle);
	if (smcg->hostram_acchandle != NULL)
		ddi_dma_mem_free(&smcg->hostram_acchandle);
	if (smcg->hostram_dmahandle != NULL)
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
}
Esempio n. 4
0
void
efe_ring_free(efe_ring_t **rpp)
{
	efe_ring_t *rp = *rpp;

	ASSERT(rp != NULL);

	for (int i = 0; i < DESCLEN(rp); ++i) {
		efe_buf_t *bp = GETBUF(rp, i);
		if (bp != NULL) {
			efe_buf_free(&bp);
		}
	}
	kmem_free(rp->r_bufpp, BUFPSZ(DESCLEN(rp)));

	if (rp->r_descp != NULL) {
		(void) ddi_dma_unbind_handle(rp->r_dmah);
	}
	if (rp->r_acch != NULL) {
		ddi_dma_mem_free(&rp->r_acch);
	}
	if (rp->r_dmah != NULL) {
		ddi_dma_free_handle(&rp->r_dmah);
	}
	kmem_free(rp, sizeof (efe_ring_t));

	*rpp = NULL;
}
Esempio n. 5
0
void
atge_rx_desc_free(atge_t *atgep)
{
	atge_l1e_data_t *l1e;
	atge_dma_t *dma;
	int pages;

	l1e = (atge_l1e_data_t *)atgep->atge_private_data;
	if (l1e == NULL)
		return;

	if (l1e->atge_l1e_rx_page == NULL)
		return;

	for (pages = 0; pages < L1E_RX_PAGES; pages++) {
		dma = l1e->atge_l1e_rx_page[pages];
		if (dma != NULL) {
			(void) ddi_dma_unbind_handle(dma->hdl);
			ddi_dma_mem_free(&dma->acchdl);
			ddi_dma_free_handle(&dma->hdl);
			kmem_free(dma, sizeof (atge_dma_t));
		}
	}

	kmem_free(l1e->atge_l1e_rx_page, L1E_RX_PAGES * sizeof (atge_dma_t *));
	l1e->atge_l1e_rx_page = NULL;
}
Esempio n. 6
0
/* ARGSUSED */
i40e_status
i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
{
	if (mem->pa != 0) {
		VERIFY(mem->idm_dma_handle != NULL);
		(void) ddi_dma_unbind_handle(mem->idm_dma_handle);
		mem->pa = 0;
		mem->size = 0;
	}

	if (mem->idm_acc_handle != NULL) {
		ddi_dma_mem_free(&mem->idm_acc_handle);
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
	}

	if (mem->idm_dma_handle != NULL) {
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;
	}

	/*
	 * Watch out for sloppiness.
	 */
	ASSERT(mem->pa == 0);
	ASSERT(mem->va == NULL);
	ASSERT(mem->size == 0);
	mem->idm_alignment = UINT32_MAX;

	return (I40E_SUCCESS);
}
Esempio n. 7
0
void
virtio_free_vq(struct virtqueue *vq)
{
	struct virtio_softc *sc = vq->vq_owner;
	int i;

	/* tell device that there's no virtqueue any longer */
	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
	    vq->vq_index);
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);

	/* Free the indirect descriptors, if any. */
	for (i = 0; i < vq->vq_num; i++) {
		struct vq_entry *entry = &vq->vq_entries[i];
		if (entry->qe_indirect_descs)
			virtio_free_indirect(entry);
	}

	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);

	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
	ddi_dma_mem_free(&vq->vq_dma_acch);
	ddi_dma_free_handle(&vq->vq_dma_handle);

	mutex_destroy(&vq->vq_used_lock);
	mutex_destroy(&vq->vq_avail_lock);
	mutex_destroy(&vq->vq_freelist_lock);

	kmem_free(vq, sizeof (struct virtqueue));
}
Esempio n. 8
0
static int
rxbuf_ctor(void *arg1, void *arg2, int kmflag)
{
	struct rxbuf *rxb = arg1;
	struct rxbuf_cache_params *p = arg2;
	size_t real_len;
	ddi_dma_cookie_t cookie;
	uint_t ccount = 0;
	int (*callback)(caddr_t);
	int rc = ENOMEM;

	if (kmflag & KM_SLEEP)
		callback = DDI_DMA_SLEEP;
	else
		callback = DDI_DMA_DONTWAIT;

	rc = ddi_dma_alloc_handle(p->dip, &p->dma_attr_rx, callback, 0,
	    &rxb->dhdl);
	if (rc != DDI_SUCCESS)
		return (rc == DDI_DMA_BADATTR ? EINVAL : ENOMEM);

	rc = ddi_dma_mem_alloc(rxb->dhdl, p->buf_size, &p->acc_attr_rx,
	    DDI_DMA_STREAMING, callback, 0, &rxb->va, &real_len, &rxb->ahdl);
	if (rc != DDI_SUCCESS) {
		rc = ENOMEM;
		goto fail1;
	}

	rc = ddi_dma_addr_bind_handle(rxb->dhdl, NULL, rxb->va, p->buf_size,
	    DDI_DMA_READ | DDI_DMA_STREAMING, NULL, NULL, &cookie, &ccount);
	if (rc != DDI_DMA_MAPPED) {
		if (rc == DDI_DMA_INUSE)
			rc = EBUSY;
		else if (rc == DDI_DMA_TOOBIG)
			rc = E2BIG;
		else
			rc = ENOMEM;
		goto fail2;
	}

	if (ccount != 1) {
		rc = E2BIG;
		goto fail3;
	}

	rxb->ref_cnt = 0;
	rxb->buf_size = p->buf_size;
	rxb->freefunc.free_arg = (caddr_t)rxb;
	rxb->freefunc.free_func = rxbuf_free;
	rxb->ba = cookie.dmac_laddress;

	return (0);

fail3:	(void) ddi_dma_unbind_handle(rxb->dhdl);
fail2:	ddi_dma_mem_free(&rxb->ahdl);
fail1:	ddi_dma_free_handle(&rxb->dhdl);
	return (rc);
}
Esempio n. 9
0
/* ARGSUSED */
static void
rxbuf_dtor(void *arg1, void *arg2)
{
	struct rxbuf *rxb = arg1;

	(void) ddi_dma_unbind_handle(rxb->dhdl);
	ddi_dma_mem_free(&rxb->ahdl);
	ddi_dma_free_handle(&rxb->dhdl);
}
Esempio n. 10
0
/*
 * function to free  WQE mapping descriptor
 *
 * wq - pointer to WQ structure
 * wqmd - Pointer to WQ mapping descriptor
 *
 * return none
 */
static void
oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
{
	if (wqmd == NULL) {
		return;
	}
	(void) ddi_dma_unbind_handle(wqmd->dma_handle);
	oce_wqm_free(wq, wqmd);
}
Esempio n. 11
0
/*
 * Cleanup isochronous resources.
 */
void
ehci_isoc_cleanup(
	ehci_state_t		*ehcip)
{
	ehci_isoc_xwrapper_t	*itw;
	ehci_pipe_private_t	*pp;
	ehci_itd_t		*itd;
	int			i, ctrl, rval;

	/* Free all the buffers */
	if (ehcip->ehci_itd_pool_addr && ehcip->ehci_itd_pool_mem_handle) {
		for (i = 0; i < ehci_get_itd_pool_size(); i ++) {
			itd = &ehcip->ehci_itd_pool_addr[i];
			ctrl = Get_ITD(ehcip->
			    ehci_itd_pool_addr[i].itd_state);

			if ((ctrl != EHCI_ITD_FREE) &&
			    (ctrl != EHCI_ITD_DUMMY) &&
			    (itd->itd_trans_wrapper)) {

				mutex_enter(&ehcip->ehci_int_mutex);

				itw = (ehci_isoc_xwrapper_t *)
					EHCI_LOOKUP_ID((uint32_t)
					Get_ITD(itd->itd_trans_wrapper));

				/* Obtain the pipe private structure */
				pp = itw->itw_pipe_private;

				ehci_deallocate_itd(ehcip, itw, itd);
				ehci_deallocate_itw(ehcip, pp, itw);

				mutex_exit(&ehcip->ehci_int_mutex);
			}
		}

		/*
		 * If EHCI_ITD_POOL_BOUND flag is set, then unbind
		 * the handle for ITD pools.
		 */
		if ((ehcip->ehci_dma_addr_bind_flag &
		    EHCI_ITD_POOL_BOUND) == EHCI_ITD_POOL_BOUND) {

			rval = ddi_dma_unbind_handle(
			    ehcip->ehci_itd_pool_dma_handle);

			ASSERT(rval == DDI_SUCCESS);
		}
		ddi_dma_mem_free(&ehcip->ehci_itd_pool_mem_handle);
	}

	/* Free the ITD pool */
	if (ehcip->ehci_itd_pool_dma_handle) {
		ddi_dma_free_handle(&ehcip->ehci_itd_pool_dma_handle);
	}
}
Esempio n. 12
0
static void
virtio_free_indirect(struct vq_entry *entry)
{

	(void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);

	entry->qe_indirect_descs = NULL;
}
Esempio n. 13
0
static void
virtionet_dma_teardown(virtionet_dma_t *dmap)
{
	if (dmap != NULL) {
		/* Release allocated system resources */
		(void) ddi_dma_unbind_handle(dmap->hdl);
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
	}
}
Esempio n. 14
0
/*
 * Free DMA-ble memory.
 */
void
vmxnet3_free_dma_mem(vmxnet3_dmabuf_t *dma)
{
	(void) ddi_dma_unbind_handle(dma->dmaHandle);
	ddi_dma_mem_free(&dma->dataHandle);
	ddi_dma_free_handle(&dma->dmaHandle);

	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
}
Esempio n. 15
0
/*
 * SMCG_stop_board() -- stop board receiving
 */
static int
SMCG_stop_board(gld_mac_info_t *macinfo)
{
	smcg_t		*smcg = (smcg_t *)macinfo->gldm_private;
	Adapter_Struc	*pAd = smcg->smcg_pAd;
	int		rc, i;

#ifdef	DEBUG
	if (SMCG_debug & SMCGTRACE)
		cmn_err(CE_CONT, SMCG_NAME "_stop_board(0x%p)",
		    (void *)macinfo);
#endif
	mutex_enter(&smcg->txbuf_lock);
	mutex_enter(&smcg->lm_lock);

	if (smcg->tx_ring_head != smcg->tx_ring_tail)
		LM_Reap_Xmits(pAd);

	i = 20;
	while ((smcg->tx_ring_head != smcg->tx_ring_tail) && (i--)) {
		delay(drv_usectohz(10000));
		LM_Reap_Xmits(pAd);
	}

	rc = LM_Close_Adapter(pAd);

	while (smcg->tx_ring_head != smcg->tx_ring_tail) {
		ASSERT(mutex_owned(&smcg->txbuf_lock));
		freemsg(smcg->tx_info[smcg->tx_ring_tail].mptr);
		for (i = 0; i < smcg->tx_info[smcg->tx_ring_tail].handles_bound;
		    i++)
			(void) ddi_dma_unbind_handle(
			    smcg->tx_info[smcg->tx_ring_tail].dmahandle[i]);
		smcg->tx_ring_tail = (smcg->tx_ring_tail+1) %
		    pAd->num_of_tx_buffs;
	}

	/*
	 * The spec says we should wait for UM_Status_Change, but all LMs
	 * we currently support change the status prior to returning from
	 * LM_Close_Adapter().
	 */
	mutex_exit(&smcg->lm_lock);
	mutex_exit(&smcg->txbuf_lock);

#ifdef	DEBUG
	if (rc != SUCCESS)
		cmn_err(CE_WARN,
		    SMCG_NAME " LM_Close_Adapter failed %d", rc);
#endif

	return (rc == SUCCESS ? GLD_SUCCESS : GLD_FAILURE);
}
Esempio n. 16
0
static void
pcn_destroybuf(pcn_buf_t *buf)
{
	if (buf == NULL)
		return;

	if (buf->pb_paddr)
		(void) ddi_dma_unbind_handle(buf->pb_dmah);
	if (buf->pb_acch)
		ddi_dma_mem_free(&buf->pb_acch);
	if (buf->pb_dmah)
		ddi_dma_free_handle(&buf->pb_dmah);
	kmem_free(buf, sizeof (*buf));
}
Esempio n. 17
0
static void
virtio_vq_teardown(virtionet_state_t *sp, virtqueue_t *vqp)
{
	if (vqp != NULL) {
		/* Clear the device notion of the virtqueue */
		VIRTIO_PUT16(sp, VIRTIO_QUEUE_SELECT, vqp->vq_num);
		VIRTIO_PUT32(sp, VIRTIO_QUEUE_ADDRESS, 0);

		/* Release allocated system resources */
		(void) ddi_dma_unbind_handle(vqp->vq_dma.hdl);
		ddi_dma_mem_free(&vqp->vq_dma.acchdl);
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
	}
}
Esempio n. 18
0
/* ARGSUSED */
int
UM_Send_Complete(int sstatus, Adapter_Struc *pAd)
{
	int	i;
	smcg_t	*smcg = (smcg_t *)pAd->sm_private;

	ASSERT(mutex_owned(&smcg->txbuf_lock));
	freemsg(smcg->tx_info[smcg->tx_ring_tail].mptr);
	for (i = 0; i < smcg->tx_info[smcg->tx_ring_tail].handles_bound; i++) {
		(void) ddi_dma_unbind_handle(
		    smcg->tx_info[smcg->tx_ring_tail].dmahandle[i]);
	}
	smcg->tx_ring_tail = (smcg->tx_ring_tail+1) % pAd->num_of_tx_buffs;

	return (SUCCESS);
}
Esempio n. 19
0
/*
 * function to delete a dma buffer
 *
 * dev - software handle to device
 * dbuf - dma obj  to delete
 *
 * return none
 */
void
oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
{
	_NOTE(ARGUNUSED(dev));

	if (dbuf == NULL) {
		return;
	}
	if (dbuf->dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(dbuf->dma_handle);
	}
	if (dbuf->acc_handle != NULL) {
		ddi_dma_mem_free(&dbuf->acc_handle);
	}
	if (dbuf->dma_handle != NULL) {
		ddi_dma_free_handle(&dbuf->dma_handle);
	}
	kmem_free(dbuf, sizeof (oce_dma_buf_t));
} /* oce_free_dma_buffer */
Esempio n. 20
0
/*ARGSUSED*/
void
dvma_unload(ddi_dma_handle_t h, uint_t objindex, uint_t type)
{
	register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
	struct fast_dvma *nexus_private;
	struct dvma_ops *nexus_funcptr;

	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
		nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
		(void) (*nexus_funcptr->dvma_unload)(h, objindex, type);
	} else {
		ddi_dma_handle_t handle;

		handle = ((ddi_dma_handle_t *)mp->dmai_minfo)[objindex];
		(void) ddi_dma_unbind_handle(handle);
		(void) ddi_dma_free_handle(&handle);
	}
}
Esempio n. 21
0
static void
pcn_freerxring(pcn_t *pcnp)
{
	int	i;

	if (pcnp->pcn_rxbufs) {
		for (i = 0; i < PCN_RXRING; i++)
			pcn_destroybuf(pcnp->pcn_rxbufs[i]);

		kmem_free(pcnp->pcn_rxbufs, PCN_RXRING * sizeof (pcn_buf_t *));
	}

	if (pcnp->pcn_rxdesc_paddr)
		(void) ddi_dma_unbind_handle(pcnp->pcn_rxdesc_dmah);
	if (pcnp->pcn_rxdesc_acch)
		ddi_dma_mem_free(&pcnp->pcn_rxdesc_acch);
	if (pcnp->pcn_rxdesc_dmah)
		ddi_dma_free_handle(&pcnp->pcn_rxdesc_dmah);
}
Esempio n. 22
0
void
efe_buf_free(efe_buf_t **bpp)
{
	efe_buf_t *bp = *bpp;

	ASSERT(bp != NULL);

	if (bp->b_kaddr != NULL) {
		(void) ddi_dma_unbind_handle(bp->b_dmah);
	}
	if (bp->b_acch != NULL) {
		ddi_dma_mem_free(&bp->b_acch);
	}
	if (bp->b_dmah != NULL) {
		ddi_dma_free_handle(&bp->b_dmah);
	}
	kmem_free(bp, sizeof (efe_buf_t));

	*bpp = NULL;
}
Esempio n. 23
0
/*
 * igb_free_rbd_ring - Free the rx descriptors of one ring.
 */
static void
igb_free_rbd_ring(igb_rx_data_t *rx_data)
{
	if (rx_data->rbd_area.dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(rx_data->rbd_area.dma_handle);
	}
	if (rx_data->rbd_area.acc_handle != NULL) {
		ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
		rx_data->rbd_area.acc_handle = NULL;
	}
	if (rx_data->rbd_area.dma_handle != NULL) {
		ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
		rx_data->rbd_area.dma_handle = NULL;
	}
	rx_data->rbd_area.address = NULL;
	rx_data->rbd_area.dma_address = NULL;
	rx_data->rbd_area.size = 0;

	rx_data->rbd_ring = NULL;
}
Esempio n. 24
0
/*
 * igb_free_tbd_ring - Free the tx descriptors of one ring.
 */
static void
igb_free_tbd_ring(igb_tx_ring_t *tx_ring)
{
	if (tx_ring->tbd_area.dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle);
	}
	if (tx_ring->tbd_area.acc_handle != NULL) {
		ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
		tx_ring->tbd_area.acc_handle = NULL;
	}
	if (tx_ring->tbd_area.dma_handle != NULL) {
		ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
		tx_ring->tbd_area.dma_handle = NULL;
	}
	tx_ring->tbd_area.address = NULL;
	tx_ring->tbd_area.dma_address = NULL;
	tx_ring->tbd_area.size = 0;

	tx_ring->tbd_ring = NULL;
}
Esempio n. 25
0
/**
 * Virtio Pci put queue routine. Places the queue and frees associated queue.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Pointer to the queue.
 */
static void VirtioPciPutQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciPutQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturnVoid(pDevice);
    AssertReturnVoid(pQueue);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturnVoid(pPci);
    virtio_pci_queue_t *pPciQueue = pQueue->pvData;
    if (RT_UNLIKELY(!pPciQueue))
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciPutQueue missing Pci queue.\n"));
        return;
    }

    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);
    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), 0);

    ddi_dma_unbind_handle(pPciQueue->hDMA);
    ddi_dma_mem_free(&pPciQueue->hIO);
    ddi_dma_free_handle(&pPciQueue->hDMA);
    RTMemFree(pPciQueue);
}
Esempio n. 26
0
/*
 * igb_free_dma_buffer - Free one allocated area of dma memory and handle
 */
void
igb_free_dma_buffer(dma_buffer_t *buf)
{
	if (buf->dma_handle != NULL) {
		(void) ddi_dma_unbind_handle(buf->dma_handle);
		buf->dma_address = NULL;
	} else {
		return;
	}

	if (buf->acc_handle != NULL) {
		ddi_dma_mem_free(&buf->acc_handle);
		buf->acc_handle = NULL;
		buf->address = NULL;
	}

	if (buf->dma_handle != NULL) {
		ddi_dma_free_handle(&buf->dma_handle);
		buf->dma_handle = NULL;
	}

	buf->size = 0;
	buf->len = 0;
}
Esempio n. 27
0
/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_one --
 *
 *    Map a msg into the Tx command ring of a vmxnet3 device.
 *
 * Results:
 *    VMXNET3_TX_OK if everything went well.
 *    VMXNET3_TX_RINGFULL if the ring is nearly full.
 *    VMXNET3_TX_PULLUP if the msg is overfragmented.
 *    VMXNET3_TX_FAILURE if there was a DMA or offload error.
 *
 * Side effects:
 *    The ring is filled if VMXNET3_TX_OK is returned.
 *
 *---------------------------------------------------------------------------
 */
static vmxnet3_txstatus
vmxnet3_tx_one(vmxnet3_softc_t *dp,
               vmxnet3_txqueue_t *txq,
               vmxnet3_offload_t *ol,
               mblk_t *mp,
               boolean_t retry)
{
   int ret = VMXNET3_TX_OK;
   unsigned int frags = 0, totLen = 0;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   Vmxnet3_GenericDesc *txDesc;
   uint16_t sopIdx, eopIdx;
   uint8_t sopGen, curGen;
   mblk_t *mblk;

   mutex_enter(&dp->txLock);

   sopIdx = eopIdx = cmdRing->next2fill;
   sopGen = cmdRing->gen;
   curGen = !cmdRing->gen;

   for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
      unsigned int len = MBLKL(mblk);
      ddi_dma_cookie_t cookie;
      uint_t cookieCount;

      if (len) {
         totLen += len;
      } else {
         continue;
      }

      if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
                                   (caddr_t) mblk->b_rptr, len,
                                   DDI_DMA_RDWR | DDI_DMA_STREAMING,
                                   DDI_DMA_DONTWAIT, NULL,
                                   &cookie, &cookieCount) != DDI_DMA_MAPPED) {
         VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
         ret = VMXNET3_TX_FAILURE;
         goto error;
      }

      ASSERT(cookieCount);

      do {
         uint64_t addr = cookie.dmac_laddress;
         size_t len = cookie.dmac_size;

         do {
            uint32_t dw2, dw3;
            size_t chunkLen;

            ASSERT(!txq->metaRing[eopIdx].mp);
            ASSERT(cmdRing->avail - frags);

            if (frags >= cmdRing->size - 1 ||
                (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) {

               if (retry) {
                  VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n",
                                frags, cmdRing->size, ol->om);
               }
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_PULLUP;
               goto error;
            }
            if (cmdRing->avail - frags <= 1) {
               dp->txMustResched = B_TRUE;
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_RINGFULL;
               goto error;
            }

            if (len > VMXNET3_MAX_TX_BUF_SIZE) {
               chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
            } else {
               chunkLen = len;
            }

            frags++;
            eopIdx = cmdRing->next2fill;

            txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
            ASSERT(txDesc->txd.gen != cmdRing->gen);

            // txd.addr
            txDesc->txd.addr = addr;
            // txd.dw2
            dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen;
            dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
            txDesc->dword[2] = dw2;
            ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0);
            // txd.dw3
            dw3 = 0;
            txDesc->dword[3] = dw3;

            VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
            curGen = cmdRing->gen;

            addr += chunkLen;
            len -= chunkLen;
         } while (len);

         if (--cookieCount) {
            ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
         }
      } while (cookieCount);

      ddi_dma_unbind_handle(dp->txDmaHandle);
   }

   /* Update the EOP descriptor */
   txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
   txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;

   /* Update the SOP descriptor. Must be done last */
   txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
   if (ol->om == VMXNET3_OM_TSO &&
       txDesc->txd.len != 0 &&
       txDesc->txd.len < ol->hlen) {
      ret = VMXNET3_TX_FAILURE;
      goto error;
   }
   txDesc->txd.om = ol->om;
   txDesc->txd.hlen = ol->hlen;
   txDesc->txd.msscof = ol->msscof;
   membar_producer();
   txDesc->txd.gen = sopGen;

   /* Update the meta ring & metadata */
   txq->metaRing[sopIdx].mp = mp;
   txq->metaRing[eopIdx].sopIdx = sopIdx;
   txq->metaRing[eopIdx].frags = frags;
   cmdRing->avail -= frags;
   if (ol->om == VMXNET3_OM_TSO) {
      txqCtrl->txNumDeferred +=
         (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
   } else {
      txqCtrl->txNumDeferred++;
   }

   VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx);

   goto done;

error:
   /* Reverse the generation bits */
   while (sopIdx != cmdRing->next2fill) {
      VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
      txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
      txDesc->txd.gen = !cmdRing->gen;
   }

done:
   mutex_exit(&dp->txLock);

   return ret;
}
Esempio n. 28
0
/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_prepare_offload --
 *
 *    Build the offload context of a msg.
 *
 * Results:
 *    0 if everything went well.
 *    +n if n bytes need to be pulled up.
 *    -1 in case of error (not used).
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
static int
vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp,
                           vmxnet3_offload_t *ol,
                           mblk_t *mp)
{
   int ret = 0;
   uint32_t start, stuff, value, flags;
#if defined(OPEN_SOLARIS) || defined(SOL11)
   uint32_t lso_flag, mss;
#endif

   ol->om = VMXNET3_OM_NONE;
   ol->hlen = 0;
   ol->msscof = 0;

   hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags);
#if defined(OPEN_SOLARIS) || defined(SOL11)
   mac_lso_get(mp, &mss, &lso_flag);

   if (flags || lso_flag) {
#else
   if (flags) {
#endif
      struct ether_vlan_header *eth = (void *) mp->b_rptr;
      uint8_t ethLen;

      if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) {
         ethLen = sizeof(struct ether_vlan_header);
      } else {
         ethLen = sizeof(struct ether_header);
      }

      VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, stuff=%u, value=%u\n",
                            flags,      ethLen,    start,    stuff,    value);

#if defined(OPEN_SOLARIS) || defined(SOL11)
      if (lso_flag & HW_LSO) {
#else
      if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }
      if (flags & HW_LSO) {
#endif
         mblk_t *mblk = mp;
         uint8_t *ip, *tcp;
         uint8_t ipLen, tcpLen;

         /*
          * Copy e1000g's behavior:
          * - Do not assume all the headers are in the same mblk.
          * - Assume each header is always within one mblk.
          * - Assume the ethernet header is in the first mblk.
          */
         ip = mblk->b_rptr + ethLen;
         if (ip >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            ip = mblk->b_rptr;
         }
         ipLen = IPH_HDR_LENGTH((ipha_t *) ip);
         tcp = ip + ipLen;
         if (tcp >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            tcp = mblk->b_rptr;
         }
         tcpLen = TCP_HDR_LENGTH((tcph_t *) tcp);
         if (tcp + tcpLen > mblk->b_wptr) { // careful, '>' instead of '>=' here
            mblk = mblk->b_cont;
         }

         ol->om = VMXNET3_OM_TSO;
         ol->hlen = ethLen + ipLen + tcpLen;
#if defined(OPEN_SOLARIS) || defined(SOL11)
         ol->msscof = mss;
#else
         /* OpenSolaris fills 'value' with the MSS but Solaris doesn't. */
         ol->msscof = DB_LSOMSS(mp);
#endif
         if (mblk != mp) {
            ret = ol->hlen;
         }
      }
#if defined(OPEN_SOLARIS) || defined(SOL11)
      else if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }
#endif
   }

   return ret;
}

/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_one --
 *
 *    Map a msg into the Tx command ring of a vmxnet3 device.
 *
 * Results:
 *    VMXNET3_TX_OK if everything went well.
 *    VMXNET3_TX_RINGFULL if the ring is nearly full.
 *    VMXNET3_TX_PULLUP if the msg is overfragmented.
 *    VMXNET3_TX_FAILURE if there was a DMA or offload error.
 *
 * Side effects:
 *    The ring is filled if VMXNET3_TX_OK is returned.
 *
 *---------------------------------------------------------------------------
 */
static vmxnet3_txstatus
vmxnet3_tx_one(vmxnet3_softc_t *dp,
               vmxnet3_txqueue_t *txq,
               vmxnet3_offload_t *ol,
               mblk_t *mp,
               boolean_t retry)
{
   int ret = VMXNET3_TX_OK;
   unsigned int frags = 0, totLen = 0;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   Vmxnet3_GenericDesc *txDesc;
   uint16_t sopIdx, eopIdx;
   uint8_t sopGen, curGen;
   mblk_t *mblk;

   mutex_enter(&dp->txLock);

   sopIdx = eopIdx = cmdRing->next2fill;
   sopGen = cmdRing->gen;
   curGen = !cmdRing->gen;

   for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
      unsigned int len = MBLKL(mblk);
      ddi_dma_cookie_t cookie;
      uint_t cookieCount;

      if (len) {
         totLen += len;
      } else {
         continue;
      }

      if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
                                   (caddr_t) mblk->b_rptr, len,
                                   DDI_DMA_RDWR | DDI_DMA_STREAMING,
                                   DDI_DMA_DONTWAIT, NULL,
                                   &cookie, &cookieCount) != DDI_DMA_MAPPED) {
         VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
         ret = VMXNET3_TX_FAILURE;
         goto error;
      }

      ASSERT(cookieCount);

      do {
         uint64_t addr = cookie.dmac_laddress;
         size_t len = cookie.dmac_size;

         do {
            uint32_t dw2, dw3;
            size_t chunkLen;

            ASSERT(!txq->metaRing[eopIdx].mp);
            ASSERT(cmdRing->avail - frags);

            if (frags >= cmdRing->size - 1 ||
                (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) {

               if (retry) {
                  VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n",
                                frags, cmdRing->size, ol->om);
               }
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_PULLUP;
               goto error;
            }
            if (cmdRing->avail - frags <= 1) {
               dp->txMustResched = B_TRUE;
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_RINGFULL;
               goto error;
            }

            if (len > VMXNET3_MAX_TX_BUF_SIZE) {
               chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
            } else {
               chunkLen = len;
            }

            frags++;
            eopIdx = cmdRing->next2fill;

            txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
            ASSERT(txDesc->txd.gen != cmdRing->gen);

            // txd.addr
            txDesc->txd.addr = addr;
            // txd.dw2
            dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen;
            dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
            txDesc->dword[2] = dw2;
            ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0);
            // txd.dw3
            dw3 = 0;
            txDesc->dword[3] = dw3;

            VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
            curGen = cmdRing->gen;

            addr += chunkLen;
            len -= chunkLen;
         } while (len);

         if (--cookieCount) {
            ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
         }
      } while (cookieCount);

      ddi_dma_unbind_handle(dp->txDmaHandle);
   }

   /* Update the EOP descriptor */
   txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
   txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;

   /* Update the SOP descriptor. Must be done last */
   txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
   if (ol->om == VMXNET3_OM_TSO &&
       txDesc->txd.len != 0 &&
       txDesc->txd.len < ol->hlen) {
      ret = VMXNET3_TX_FAILURE;
      goto error;
   }
   txDesc->txd.om = ol->om;
   txDesc->txd.hlen = ol->hlen;
   txDesc->txd.msscof = ol->msscof;
   membar_producer();
   txDesc->txd.gen = sopGen;

   /* Update the meta ring & metadata */
   txq->metaRing[sopIdx].mp = mp;
   txq->metaRing[eopIdx].sopIdx = sopIdx;
   txq->metaRing[eopIdx].frags = frags;
   cmdRing->avail -= frags;
   if (ol->om == VMXNET3_OM_TSO) {
      txqCtrl->txNumDeferred +=
         (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
   } else {
      txqCtrl->txNumDeferred++;
   }

   VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx);

   goto done;

error:
   /* Reverse the generation bits */
   while (sopIdx != cmdRing->next2fill) {
      VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
      txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
      txDesc->txd.gen = !cmdRing->gen;
   }

done:
   mutex_exit(&dp->txLock);

   return ret;
}

/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx --
 *
 *    Send packets on a vmxnet3 device.
 *
 * Results:
 *    NULL in case of success or failure.
 *    The mps to be retransmitted later if the ring is full.
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
mblk_t *
vmxnet3_tx(void *data, mblk_t *mps)
{
   vmxnet3_softc_t *dp = data;
   vmxnet3_txqueue_t *txq = &dp->txQueue;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   vmxnet3_txstatus status = VMXNET3_TX_OK;
   mblk_t *mp;

   ASSERT(mps != NULL);

   do {
      vmxnet3_offload_t ol;
      int pullup;

      mp = mps;
      mps = mp->b_next;
      mp->b_next = NULL;

      if (DB_TYPE(mp) != M_DATA) {
         /*
          * PR #315560: Solaris might pass M_PROTO mblks for some reason.
          * Drop them because we don't understand them and because their
          * contents are not Ethernet frames anyway.
          */
         ASSERT(B_FALSE);
         freemsg(mp);
         continue;
      }

      /*
       * Prepare the offload while we're still handling the original
       * message -- msgpullup() discards the metadata afterwards.
       */
      pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp);
      if (pullup) {
         mblk_t *new_mp = msgpullup(mp, pullup);
         freemsg(mp);
         if (new_mp) {
            mp = new_mp;
         } else {
            continue;
         }
      }

      /*
       * Try to map the message in the Tx ring.
       * This call might fail for non-fatal reasons.
       */
      status = vmxnet3_tx_one(dp, txq, &ol, mp, B_FALSE);
      if (status == VMXNET3_TX_PULLUP) {
         /*
          * Try one more time after flattening
          * the message with msgpullup().
          */
         if (mp->b_cont != NULL) {
            mblk_t *new_mp = msgpullup(mp, -1);
            freemsg(mp);
            if (new_mp) {
               mp = new_mp;
               status = vmxnet3_tx_one(dp, txq, &ol, mp, B_TRUE);
            } else {
               continue;
            }
         }
      }
      if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) {
         /* Fatal failure, drop it */
         freemsg(mp);
      }
   } while (mps && status != VMXNET3_TX_RINGFULL);

   if (status == VMXNET3_TX_RINGFULL) {
      mp->b_next = mps;
      mps = mp;
   } else {
      ASSERT(!mps);
   }

   /* Notify the device */
   mutex_enter(&dp->txLock);
   if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) {
      txqCtrl->txNumDeferred = 0;
      VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill);
   }
   mutex_exit(&dp->txLock);

   return mps;
}
Esempio n. 29
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}
Esempio n. 30
0
/*
 * hermon_check_iommu_bypass()
 *    Context: Only called from attach() path context
 *    XXX This is a DMA allocation routine outside the normal
 *	  path. FMA hardening will not like this.
 */
static void
hermon_check_iommu_bypass(hermon_state_t *state, hermon_cfg_profile_t *cp)
{
	ddi_dma_handle_t	dmahdl;
	ddi_dma_attr_t		dma_attr;
	int			status;
	ddi_acc_handle_t	acc_hdl;
	caddr_t			kaddr;
	size_t			actual_len;
	ddi_dma_cookie_t	cookie;
	uint_t			cookiecnt;

	hermon_dma_attr_init(state, &dma_attr);

	/* Try mapping for IOMMU bypass (Force Physical) */
	dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL |
	    DDI_DMA_RELAXED_ORDERING;

	/*
	 * Call ddi_dma_alloc_handle().  If this returns DDI_DMA_BADATTR then
	 * it is not possible to use IOMMU bypass with our PCI bridge parent.
	 * Since the function we are in can only be called if iommu bypass was
	 * requested in the config profile, we configure for bypass if the
	 * ddi_dma_alloc_handle() was successful.  Otherwise, we configure
	 * for non-bypass (ie: normal) mapping.
	 */
	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
	    DDI_DMA_SLEEP, NULL, &dmahdl);
	if (status == DDI_DMA_BADATTR) {
		cp->cp_iommu_bypass = HERMON_BINDMEM_NORMAL;
		return;
	} else if (status != DDI_SUCCESS) {	/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
		return;
	} else {
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
	}

	status = ddi_dma_mem_alloc(dmahdl, 256,
	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, (caddr_t *)&kaddr, &actual_len, &acc_hdl);

	if (status != DDI_SUCCESS) {		/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		ddi_dma_free_handle(&dmahdl);
		return;
	}

	status = ddi_dma_addr_bind_handle(dmahdl, NULL, kaddr, actual_len,
	    DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, &cookie, &cookiecnt);

	if (status == DDI_DMA_MAPPED) {
		(void) ddi_dma_unbind_handle(dmahdl);
	} else {
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
	}

	ddi_dma_mem_free(&acc_hdl);
	ddi_dma_free_handle(&dmahdl);
}