Exemple #1
0
static pcn_buf_t *
pcn_allocbuf(pcn_t *pcnp)
{
	pcn_buf_t		*buf;
	size_t			len;
	unsigned		ccnt;
	ddi_dma_cookie_t	dmac;

	buf = kmem_zalloc(sizeof (*buf), KM_SLEEP);

	if (ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dma_attr, DDI_DMA_SLEEP,
	    NULL, &buf->pb_dmah) != DDI_SUCCESS) {
		kmem_free(buf, sizeof (*buf));
		return (NULL);
	}

	if (ddi_dma_mem_alloc(buf->pb_dmah, PCN_BUFSZ, &pcn_bufattr,
	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &buf->pb_buf, &len,
	    &buf->pb_acch) != DDI_SUCCESS) {
		pcn_destroybuf(buf);
		return (NULL);
	}

	if (ddi_dma_addr_bind_handle(buf->pb_dmah, NULL, buf->pb_buf, len,
	    DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac,
	    &ccnt) != DDI_DMA_MAPPED) {
		pcn_destroybuf(buf);
		return (NULL);
	}
	buf->pb_paddr = dmac.dmac_address;

	return (buf);
}
Exemple #2
0
/*
 * Allocate /size/ bytes of contiguous DMA-ble memory.
 *
 * Returns:
 *    0 on success, non-zero on failure.
 */
static int
vmxnet3_alloc_dma_mem(vmxnet3_softc_t *dp, vmxnet3_dmabuf_t *dma, size_t size,
    boolean_t canSleep, ddi_dma_attr_t *dma_attrs)
{
	ddi_dma_cookie_t cookie;
	uint_t cookieCount;
	int dmaerr, err = 0;
	int (*cb) (caddr_t) = canSleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;

	ASSERT(size != 0);

	/*
	 * Allocate a DMA handle
	 */
	if ((dmaerr = ddi_dma_alloc_handle(dp->dip, dma_attrs, cb, NULL,
	    &dma->dmaHandle)) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed: %d", dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error;
	}

	/*
	 * Allocate memory
	 */
	if (ddi_dma_mem_alloc(dma->dmaHandle, size, &vmxnet3_dev_attr,
	    DDI_DMA_CONSISTENT, cb, NULL, &dma->buf, &dma->bufLen,
	    &dma->dataHandle) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_mem_alloc() failed");
		err = ENOMEM;
		goto error_dma_handle;
	}

	/*
	 * Map the memory
	 */
	if ((dmaerr = ddi_dma_addr_bind_handle(dma->dmaHandle, NULL, dma->buf,
	    dma->bufLen, DDI_DMA_RDWR | DDI_DMA_STREAMING, cb, NULL, &cookie,
	    &cookieCount)) != DDI_DMA_MAPPED) {
		VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed: %d",
		    dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error_dma_mem;
	}

	ASSERT(cookieCount == 1);
	dma->bufPA = cookie.dmac_laddress;

	return (0);

error_dma_mem:
	ddi_dma_mem_free(&dma->dataHandle);
error_dma_handle:
	ddi_dma_free_handle(&dma->dmaHandle);
error:
	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
	return (err);
}
Exemple #3
0
static int
rxbuf_ctor(void *arg1, void *arg2, int kmflag)
{
	struct rxbuf *rxb = arg1;
	struct rxbuf_cache_params *p = arg2;
	size_t real_len;
	ddi_dma_cookie_t cookie;
	uint_t ccount = 0;
	int (*callback)(caddr_t);
	int rc = ENOMEM;

	if (kmflag & KM_SLEEP)
		callback = DDI_DMA_SLEEP;
	else
		callback = DDI_DMA_DONTWAIT;

	rc = ddi_dma_alloc_handle(p->dip, &p->dma_attr_rx, callback, 0,
	    &rxb->dhdl);
	if (rc != DDI_SUCCESS)
		return (rc == DDI_DMA_BADATTR ? EINVAL : ENOMEM);

	rc = ddi_dma_mem_alloc(rxb->dhdl, p->buf_size, &p->acc_attr_rx,
	    DDI_DMA_STREAMING, callback, 0, &rxb->va, &real_len, &rxb->ahdl);
	if (rc != DDI_SUCCESS) {
		rc = ENOMEM;
		goto fail1;
	}

	rc = ddi_dma_addr_bind_handle(rxb->dhdl, NULL, rxb->va, p->buf_size,
	    DDI_DMA_READ | DDI_DMA_STREAMING, NULL, NULL, &cookie, &ccount);
	if (rc != DDI_DMA_MAPPED) {
		if (rc == DDI_DMA_INUSE)
			rc = EBUSY;
		else if (rc == DDI_DMA_TOOBIG)
			rc = E2BIG;
		else
			rc = ENOMEM;
		goto fail2;
	}

	if (ccount != 1) {
		rc = E2BIG;
		goto fail3;
	}

	rxb->ref_cnt = 0;
	rxb->buf_size = p->buf_size;
	rxb->freefunc.free_arg = (caddr_t)rxb;
	rxb->freefunc.free_func = rxbuf_free;
	rxb->ba = cookie.dmac_laddress;

	return (0);

fail3:	(void) ddi_dma_unbind_handle(rxb->dhdl);
fail2:	ddi_dma_mem_free(&rxb->ahdl);
fail1:	ddi_dma_free_handle(&rxb->dhdl);
	return (rc);
}
Exemple #4
0
static int
pcn_allocrxring(pcn_t *pcnp)
{
	int			rval;
	int			i;
	size_t			len;
	size_t			size;
	ddi_dma_cookie_t	dmac;
	unsigned		ncookies;
	caddr_t			kaddr;

	size = PCN_RXRING * sizeof (pcn_rx_desc_t);

	rval = ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dmadesc_attr,
	    DDI_DMA_SLEEP, NULL, &pcnp->pcn_rxdesc_dmah);
	if (rval != DDI_SUCCESS) {
		pcn_error(pcnp->pcn_dip, "unable to allocate DMA handle for rx "
		    "descriptors");
		return (DDI_FAILURE);
	}

	rval = ddi_dma_mem_alloc(pcnp->pcn_rxdesc_dmah, size, &pcn_devattr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
	    &pcnp->pcn_rxdesc_acch);
	if (rval != DDI_SUCCESS) {
		pcn_error(pcnp->pcn_dip, "unable to allocate DMA memory for rx "
		    "descriptors");
		return (DDI_FAILURE);
	}

	rval = ddi_dma_addr_bind_handle(pcnp->pcn_rxdesc_dmah, NULL, kaddr,
	    size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmac,
	    &ncookies);
	if (rval != DDI_DMA_MAPPED) {
		pcn_error(pcnp->pcn_dip, "unable to bind DMA for rx "
		    "descriptors");
		return (DDI_FAILURE);
	}

	ASSERT(ncookies == 1);

	pcnp->pcn_rxdesc_paddr = dmac.dmac_address;
	pcnp->pcn_rxdescp = (void *)kaddr;

	pcnp->pcn_rxbufs = kmem_zalloc(PCN_RXRING * sizeof (pcn_buf_t *),
	    KM_SLEEP);

	for (i = 0; i < PCN_RXRING; i++) {
		pcn_buf_t *rxb = pcn_allocbuf(pcnp);
		if (rxb == NULL)
			return (DDI_FAILURE);
		pcnp->pcn_rxbufs[i] = rxb;
	}

	return (DDI_SUCCESS);
}
static virtionet_dma_t *
virtionet_dma_setup(virtionet_state_t *sp, size_t len)
{
	virtionet_dma_t		*dmap;
	int			rc;

	dmap = kmem_zalloc(sizeof (*dmap), KM_SLEEP);

	vq_dma_attr.dma_attr_flags |= DDI_DMA_FORCE_PHYSICAL;

	rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
	    NULL, &dmap->hdl);

	if (rc == DDI_DMA_BADATTR) {
		cmn_err(CE_NOTE, "Failed to allocate physical DMA; "
		    "failing back to virtual DMA");
		vq_dma_attr.dma_attr_flags &= (~DDI_DMA_FORCE_PHYSICAL);
		rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
		    NULL, &dmap->hdl);
	}

	if (rc != DDI_SUCCESS) {
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	rc = ddi_dma_mem_alloc(dmap->hdl, len, &virtio_native_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmap->addr,
	    &dmap->len, &dmap->acchdl);
	if (rc != DDI_SUCCESS) {
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	bzero(dmap->addr, dmap->len);

	rc = ddi_dma_addr_bind_handle(dmap->hdl, NULL, dmap->addr,
	    dmap->len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
	    NULL, &dmap->cookie, &dmap->ccount);
	if (rc != DDI_DMA_MAPPED) {
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}
	ASSERT(dmap->ccount == 1);

	return (dmap);
}
Exemple #6
0
efe_ring_t *
efe_ring_alloc(dev_info_t *dip, size_t len)
{
	efe_ring_t *rp;
	size_t rlen;
	uint_t ccount;

	ASSERT(len > 1);

	rp = kmem_zalloc(sizeof (efe_ring_t), KM_SLEEP);
	rp->r_len = len;

	if (ddi_dma_alloc_handle(dip, &efe_dma_attr, DDI_DMA_SLEEP, NULL,
	    &rp->r_dmah) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate DMA handle!");
		goto failure;
	}

	if (ddi_dma_mem_alloc(rp->r_dmah, DESCSZ(len), &efe_buf_acc_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&rp->r_descp,
	    &rlen, &rp->r_acch) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate descriptors!");
		goto failure;
	}

	if (ddi_dma_addr_bind_handle(rp->r_dmah, NULL, (caddr_t)rp->r_descp,
	    DESCSZ(len), DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &rp->r_dmac, &ccount) != DDI_DMA_MAPPED) {
		efe_error(dip, "unable to bind DMA handle to descriptors!");
		goto failure;
	}

	rp->r_bufpp = kmem_zalloc(BUFPSZ(len), KM_SLEEP);

	for (int i = 0; i < len; ++i) {
		efe_buf_t *bp = efe_buf_alloc(dip, BUFSZ);
		if (bp == NULL) {
			goto failure;
		}
		rp->r_bufpp[i] = bp;
	}

	return (rp);

failure:
	efe_ring_free(&rp);

	return (NULL);
}
Exemple #7
0
/*
 * function to copy the packet or dma map on the fly depending on size
 *
 * wq - pointer to WQ
 * wqed - Pointer to WQE descriptor
 * mp - Pointer to packet chain
 *
 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 */
static  int
oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
    uint32_t pkt_len)
{
	ddi_dma_cookie_t cookie;
	oce_wq_mdesc_t *wqmd;
	uint32_t ncookies;
	int ret;
	struct oce_dev *dev = wq->parent;

	wqmd = oce_wqm_alloc(wq);
	if (wqmd == NULL) {
		oce_log(dev, CE_WARN, MOD_TX, "%s",
		    "wqm pool empty");
		return (ENOMEM);
	}

	ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
	    (struct as *)0, (caddr_t)mp->b_rptr,
	    pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
		    ret);
		/* free the last one */
		oce_wqm_free(wq, wqmd);
		return (ENOMEM);
	}
	do {
		wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
		    ADDR_HI(cookie.dmac_laddress);
		wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
		    ADDR_LO(cookie.dmac_laddress);
		wqed->frag[wqed->frag_idx].u0.s.frag_len =
		    (uint32_t)cookie.dmac_size;
		wqed->frag_cnt++;
		wqed->frag_idx++;
		if (--ncookies > 0)
			ddi_dma_nextcookie(wqmd->dma_handle,
			    &cookie);
			else break;
	} while (ncookies > 0);

	wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
	wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
	wqed->nhdl++;
	return (0);
} /* oce_map_wqe */
/*
 * igb_tx_bind
 *
 * Bind the mblk fragment with DMA
 */
static int
igb_tx_bind(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp,
    uint32_t len)
{
	int status, i;
	ddi_dma_cookie_t dma_cookie;
	uint_t ncookies;
	int desc_num;

	/*
	 * Use DMA binding to process the mblk fragment
	 */
	status = ddi_dma_addr_bind_handle(tcb->tx_dma_handle, NULL,
	    (caddr_t)mp->b_rptr, len,
	    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
	    0, &dma_cookie, &ncookies);

	if (status != DDI_DMA_MAPPED) {
		IGB_DEBUG_STAT(tx_ring->stat_fail_dma_bind);
		return (-1);
	}

	tcb->frag_num++;
	tcb->tx_type = USE_DMA;
	/*
	 * Each fragment can span several cookies. One cookie will have
	 * one tx descriptor to transmit.
	 */
	desc_num = 0;
	for (i = ncookies; i > 0; i--) {
		/*
		 * Save the address and length to the private data structure
		 * of the tx control block, which will be used to fill the
		 * tx descriptor ring after all the fragments are processed.
		 */
		igb_save_desc(tcb,
		    dma_cookie.dmac_laddress,
		    dma_cookie.dmac_size);

		desc_num++;

		if (i > 1)
			ddi_dma_nextcookie(tcb->tx_dma_handle, &dma_cookie);
	}

	return (desc_num);
}
Exemple #9
0
void
dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
	ddi_dma_cookie_t *cp)
{
	register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
	struct fast_dvma *nexus_private;
	struct dvma_ops *nexus_funcptr;
	ddi_dma_attr_t dma_attr;
	uint_t ccnt;

	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
		nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
		(void) (*nexus_funcptr->dvma_kaddr_load)(h, a, len, index, cp);
	} else {
		ddi_dma_handle_t handle;
		ddi_dma_lim_t *limp;

		limp = (ddi_dma_lim_t *)mp->dmai_mapping;
		dma_attr.dma_attr_version = DMA_ATTR_V0;
		dma_attr.dma_attr_addr_lo = limp->dlim_addr_lo;
		dma_attr.dma_attr_addr_hi = limp->dlim_addr_hi;
		dma_attr.dma_attr_count_max = limp->dlim_cntr_max;
		dma_attr.dma_attr_align = 1;
		dma_attr.dma_attr_burstsizes = limp->dlim_burstsizes;
		dma_attr.dma_attr_minxfer = limp->dlim_minxfer;
		dma_attr.dma_attr_maxxfer = 0xFFFFFFFFull;
		dma_attr.dma_attr_seg = 0xFFFFFFFFull;
		dma_attr.dma_attr_sgllen = 1;
		dma_attr.dma_attr_granular = 1;
		dma_attr.dma_attr_flags = 0;
		(void) ddi_dma_alloc_handle(HD, &dma_attr, DDI_DMA_SLEEP, NULL,
		    &handle);
		(void) ddi_dma_addr_bind_handle(handle, NULL, a, len,
		    DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, cp, &ccnt);
		((ddi_dma_handle_t *)mp->dmai_minfo)[index] = handle;
	}
}
Exemple #10
0
efe_buf_t *
efe_buf_alloc(dev_info_t *dip, size_t len)
{
	efe_buf_t *bp;
	size_t rlen;
	uint_t ccount;

	bp = kmem_zalloc(sizeof (efe_buf_t), KM_SLEEP);
	bp->b_len = len;

	if (ddi_dma_alloc_handle(dip, &efe_dma_attr, DDI_DMA_SLEEP, NULL,
	    &bp->b_dmah) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate DMA handle!");
		goto failure;
	}

	if (ddi_dma_mem_alloc(bp->b_dmah, len, &efe_buf_acc_attr,
	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &bp->b_kaddr, &rlen,
	    &bp->b_acch) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate buffer!");
		goto failure;
	}

	if (ddi_dma_addr_bind_handle(bp->b_dmah, NULL, bp->b_kaddr,
	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
	    &bp->b_dmac, &ccount) != DDI_DMA_MAPPED) {
		efe_error(dip, "unable to bind DMA handle to buffer!");
		goto failure;
	}

	return (bp);

failure:
	efe_buf_free(&bp);

	return (NULL);
}
Exemple #11
0
/*
 * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
 */
static int
igb_alloc_dma_buffer(igb_t *igb,
    dma_buffer_t *buf, size_t size)
{
	int ret;
	dev_info_t *devinfo = igb->dip;
	ddi_dma_cookie_t cookie;
	size_t len;
	uint_t cookie_num;

	ret = ddi_dma_alloc_handle(devinfo,
	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
	    NULL, &buf->dma_handle);

	if (ret != DDI_SUCCESS) {
		buf->dma_handle = NULL;
		igb_error(igb,
		    "Could not allocate dma buffer handle: %x", ret);
		return (IGB_FAILURE);
	}

	ret = ddi_dma_mem_alloc(buf->dma_handle,
	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &buf->address,
	    &len, &buf->acc_handle);

	if (ret != DDI_SUCCESS) {
		buf->acc_handle = NULL;
		buf->address = NULL;
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		igb_error(igb,
		    "Could not allocate dma buffer memory: %x", ret);
		return (IGB_FAILURE);
	}

	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
	    buf->address,
	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		buf->dma_address = NULL;
		if (buf->acc_handle != NULL) {
			ddi_dma_mem_free(&buf->acc_handle);
			buf->acc_handle = NULL;
			buf->address = NULL;
		}
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		igb_error(igb,
		    "Could not bind dma buffer handle: %x", ret);
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	buf->dma_address = cookie.dmac_laddress;
	buf->size = len;
	buf->len = 0;

	return (IGB_SUCCESS);
}
Exemple #12
0
/*
 * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
 */
static int
igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	igb_t *igb = rx_data->rx_ring->igb;

	devinfo = igb->dip;
	size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;

	/*
	 * Allocate a new DMA handle for the receive descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &rx_data->rbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate rbd dma handle: %x", ret);
		rx_data->rbd_area.dma_handle = NULL;
		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the receive
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&rx_data->rbd_area.address,
	    &len, &rx_data->rbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate rbd dma memory: %x", ret);
		rx_data->rbd_area.acc_handle = NULL;
		rx_data->rbd_area.address = NULL;
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(rx_data->rbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call.
	 */
	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
	    NULL, (caddr_t)rx_data->rbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		igb_error(igb,
		    "Could not bind rbd dma resource: %x", ret);
		rx_data->rbd_area.dma_address = NULL;
		if (rx_data->rbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
			rx_data->rbd_area.acc_handle = NULL;
			rx_data->rbd_area.address = NULL;
		}
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
	rx_data->rbd_area.size = len;

	rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
	    rx_data->rbd_area.address;

	return (IGB_SUCCESS);
}
Exemple #13
0
/*
 * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
 */
static int
igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	igb_t *igb = tx_ring->igb;

	devinfo = igb->dip;
	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;

	/*
	 * If tx head write-back is enabled, an extra tbd is allocated
	 * to save the head write-back value
	 */
	if (igb->tx_head_wb_enable) {
		size += sizeof (union e1000_adv_tx_desc);
	}

	/*
	 * Allocate a DMA handle for the transmit descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &tx_ring->tbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate tbd dma handle: %x", ret);
		tx_ring->tbd_area.dma_handle = NULL;

		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the transmit
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&tx_ring->tbd_area.address,
	    &len, &tx_ring->tbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate tbd dma memory: %x", ret);
		tx_ring->tbd_area.acc_handle = NULL;
		tx_ring->tbd_area.address = NULL;
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(tx_ring->tbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
	 * the memory address
	 */
	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
	    NULL, (caddr_t)tx_ring->tbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		igb_error(igb,
		    "Could not bind tbd dma resource: %x", ret);
		tx_ring->tbd_area.dma_address = NULL;
		if (tx_ring->tbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
			tx_ring->tbd_area.acc_handle = NULL;
			tx_ring->tbd_area.address = NULL;
		}
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
	tx_ring->tbd_area.size = len;

	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
	    tx_ring->tbd_area.address;

	return (IGB_SUCCESS);
}
Exemple #14
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}
Exemple #15
0
static int
virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
{
	int allocsize, num;
	size_t len;
	unsigned int ncookies;
	int ret;

	num = entry->qe_queue->vq_indirect_num;
	ASSERT(num > 1);

	allocsize = sizeof (struct vring_desc) * num;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for indirect descriptors, "
		    "entry %d, vq %d", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&entry->qe_indirect_descs, &len,
	    &entry->qe_indirect_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for indirect descriptors, "
		    "entry %d, vq %d,", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_alloc;
	}

	(void) memset(entry->qe_indirect_descs, 0xff, allocsize);

	ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
	    (caddr_t)entry->qe_indirect_descs, len,
	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &entry->qe_indirect_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for indirect descriptors, "
		    "entry %d, vq %d", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);

	return (0);

out_bind:
	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
out_alloc:
	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
out_alloc_handle:

	return (ret);
}
static virtqueue_t *
virtio_vq_setup(virtionet_state_t *sp, int queue)
{
	virtqueue_t		*vqp = NULL;
	size_t			len;
	size_t			desc_size;
	size_t			avail_size;
	size_t			used_size;
	size_t			part1;
	size_t			part2;
	int			rc;

	vqp = kmem_zalloc(sizeof (*vqp), KM_SLEEP);

	/* save the queue number */
	vqp->vq_num = queue;

	/* Get the queue size */
	VIRTIO_PUT16(sp, VIRTIO_QUEUE_SELECT, queue);
	vqp->vq_size = VIRTIO_GET16(sp, VIRTIO_QUEUE_SIZE);

	desc_size = VRING_DTABLE_SIZE(vqp->vq_size);
	avail_size = VRING_AVAIL_SIZE(vqp->vq_size);
	used_size = VRING_USED_SIZE(vqp->vq_size);

	part1 = VRING_ROUNDUP(desc_size + avail_size);
	part2 = VRING_ROUNDUP(used_size);

	len = part1 + part2;

	vq_dma_attr.dma_attr_flags |= DDI_DMA_FORCE_PHYSICAL;

	rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
	    NULL, &vqp->vq_dma.hdl);

	if (rc == DDI_DMA_BADATTR) {
		cmn_err(CE_NOTE, "Failed to allocate physical DMA; "
		    "failing back to virtual DMA");
		vq_dma_attr.dma_attr_flags &= (~DDI_DMA_FORCE_PHYSICAL);
		rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
		    NULL, &vqp->vq_dma.hdl);
	}

	if (rc != DDI_SUCCESS) {
		kmem_free(vqp, sizeof (*vqp));
		return (NULL);
	}

	rc = ddi_dma_mem_alloc(vqp->vq_dma.hdl, len, &virtio_native_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &vqp->vq_dma.addr,
	    &vqp->vq_dma.len, &vqp->vq_dma.acchdl);
	if (rc != DDI_SUCCESS) {
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
		return (NULL);
	}

	bzero(vqp->vq_dma.addr, vqp->vq_dma.len);

	rc = ddi_dma_addr_bind_handle(vqp->vq_dma.hdl, NULL, vqp->vq_dma.addr,
	    vqp->vq_dma.len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &vqp->vq_dma.cookie, &vqp->vq_dma.ccount);
	if (rc != DDI_DMA_MAPPED) {
		ddi_dma_mem_free(&vqp->vq_dma.acchdl);
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
		return (NULL);
	}
	ASSERT(vqp->vq_dma.ccount == 1);

	vqp->vr_desc = (vring_desc_t *)vqp->vq_dma.addr;
	vqp->vr_avail = (vring_avail_t *)(vqp->vq_dma.addr + desc_size);
	vqp->vr_used = (vring_used_t *)(vqp->vq_dma.addr + part1);

	VIRTIO_PUT32(sp, VIRTIO_QUEUE_ADDRESS,
	    vqp->vq_dma.cookie.dmac_address / VIRTIO_VQ_PCI_ALIGN);

	return (vqp);
}
Exemple #17
0
/**
 * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Where to store the queue.
 *
 * @return An allocated Virtio Pci queue, or NULL in case of errors.
 */
static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturn(pDevice, NULL);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturn(pPci, NULL);

    /*
     * Select a Queue.
     */
    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);

    /*
     * Get the currently selected Queue's size.
     */
    pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM));
    if (RT_UNLIKELY(!pQueue->Ring.cDesc))
    {
        LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex));
        return NULL;
    }

    /*
     * Check if it's already active.
     */
    uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN));
    if (QueuePFN != 0)
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex));
        return NULL;
    }

    LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc));

    /*
     * Allocate and initialize Pci queue data.
     */
    virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t));
    if (pPciQueue)
    {
        /*
         * Setup DMA.
         */
        size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN);
        int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA);
        if (rc == DDI_SUCCESS)
        {
            rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT,
                                   DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf,
                                   &pPciQueue->hIO);
            if (rc == DDI_SUCCESS)
            {
                AssertRelease(pPciQueue->cbBuf >= cbQueue);
                ddi_dma_cookie_t DmaCookie;
                uint_t cCookies;
                rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf,
                                              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
                                              0 /* addr */, &DmaCookie, &cCookies);
                if (rc == DDI_SUCCESS)
                {
                    pPciQueue->physBuf = DmaCookie.dmac_laddress;
                    pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;

                    LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf));
                    cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf);

                    /*
                     * Activate the queue and initialize a ring for the queue.
                     */
                    memset(pQueue->pQueue, 0, pPciQueue->cbBuf);
                    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf);
                    VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN);
                    return pPciQueue;
                }
                else
Exemple #18
0
/*
 * audio1575_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *	int		num	M1575_PLAY or M1575_REC
 *	uint8_t		nchan	Number of channels (2 = stereo, 6 = 5.1, etc.)
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audio1575_alloc_port(audio1575_state_t *statep, int num, uint8_t nchan)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	audio_dev_t		*adev;
	audio1575_port_t	*port;
	uint32_t		*kaddr;
	int			rc;
	dev_info_t		*dip;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	statep->ports[num] = port;
	port->num = num;
	port->statep = statep;
	port->nchan = nchan;

	if (num == M1575_REC) {
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
	} else {
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
	}

	/*
	 * We use one big sample area.  The sample area must be larger
	 * than about 1.5 framework fragment sizes.  (Currently 480 *
	 * 1.5 = 720 frames.)  This is necessary to ensure that we
	 * don't have to involve an interrupt service routine on our
	 * own, to keep the last valid index updated reasonably.
	 */
	port->nframes = 2048;
	port->samp_size = port->nframes * port->nchan * sizeof (int16_t);

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (m1575_bd_entry_t) * M1575_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * Wire up the BD list.  We do this *before* binding the BD list
	 * so that we don't have to do an extra ddi_dma_sync.
	 */
	kaddr = (void *)port->bdl_kaddr;
	for (int i = 0; i < M1575_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, kaddr, port->samp_paddr);
		kaddr++;

		/* set size in frames, and enable IOC interrupt */
		ddi_put32(port->bdl_acch, kaddr,
		    ((port->samp_size / sizeof (int16_t)) | (1U << 31)));
		kaddr++;
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	port->engine = audio_engine_alloc(&audio1575_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
Exemple #19
0
/*
 * SMCG_dma_alloc assumes that either rlist_lock mutex is held or
 * that it is called from a point where no interrupts, send or receives
 * happen.
 */
static int
SMCG_dma_alloc(smcg_t *smcg)
{
	Adapter_Struc		*pAd = smcg->smcg_pAd;
	unsigned int		ramsize = LM_Get_Host_Ram_Size(pAd);
	uint_t			len, ncookies, i, j;
	ddi_dma_cookie_t	cookie;

	/* Allocate resources for shared memory block */
	if (ddi_dma_alloc_handle(smcg->smcg_devinfo, &host_ram_dma_attr,
	    DDI_DMA_SLEEP, 0, &smcg->hostram_dmahandle) != DDI_SUCCESS)
		return (DDI_FAILURE);

	if (ddi_dma_mem_alloc(smcg->hostram_dmahandle, ramsize, &accattr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
	    (caddr_t *)&pAd->host_ram_virt_addr,
	    (size_t *)&len, &smcg->hostram_acchandle) != DDI_SUCCESS) {
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
		return (DDI_FAILURE);
	}

	if (ddi_dma_addr_bind_handle(smcg->hostram_dmahandle, NULL,
	    (caddr_t)pAd->host_ram_virt_addr, ramsize,
	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
	    &cookie, &ncookies) != DDI_SUCCESS) {
		ddi_dma_mem_free(&smcg->hostram_acchandle);
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
		return (DDI_FAILURE);
	}

	ASSERT(ncookies == 1 && cookie.dmac_size >= ramsize);
	pAd->host_ram_phy_addr = cookie.dmac_address;

	/* Allocate a list of receive buffers */
	smcg->rxbdesc_mem = kmem_zalloc(sizeof (struct smcg_rx_buffer_desc) *
	    pAd->num_of_rx_buffs*2, KM_SLEEP);
	smcg->rx_freelist = (struct smcg_rx_buffer_desc *)smcg->rxbdesc_mem;

	for (i = 0; i < pAd->num_of_rx_buffs * 2; i++) {
		if (ddi_dma_alloc_handle(smcg->smcg_devinfo, &buf_dma_attr,
		    DDI_DMA_SLEEP, 0, &smcg->rx_freelist[i].dmahandle)
		    != DDI_SUCCESS)
			goto failure;

		if (ddi_dma_mem_alloc(smcg->rx_freelist[i].dmahandle,
		    ETHERMAX + 4, &accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
		    0, (caddr_t *)&smcg->rx_freelist[i].buf, (size_t *)&len,
		    &smcg->rx_freelist[i].acchandle) != DDI_SUCCESS) {
			ddi_dma_free_handle(&smcg->rx_freelist[i].dmahandle);
			goto failure;
		}

		if (ddi_dma_addr_bind_handle(smcg->rx_freelist[i].dmahandle,
		    NULL, (caddr_t)smcg->rx_freelist[i].buf, ETHERMAX + 4,
		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
		    &cookie, &ncookies) != DDI_SUCCESS) {
			ddi_dma_mem_free(&smcg->rx_freelist[i].acchandle);
			ddi_dma_free_handle(&smcg->rx_freelist[i].dmahandle);
			goto failure;
		}

		ASSERT(ncookies == 1 && cookie.dmac_size >= ETHERMAX+4);
		smcg->rx_freelist[i].physaddr = cookie.dmac_address;
		smcg->rx_freelist[i].smcg = smcg;
		smcg->rx_freelist[i].free_rtn.free_func = SMCG_freertn;
		smcg->rx_freelist[i].free_rtn.free_arg =
		    (char *)&smcg->rx_freelist[i];

		smcg->rx_freelist[i].next = &smcg->rx_freelist[i+1];
	}

	smcg->rx_freelist[i-1].next = NULL;

	/*
	 * Remove one buffer from free list for each receive descriptor,
	 * and associate with an element in the receive ring
	 */
	for (i = 0; i < pAd->num_of_rx_buffs; i++) {
		/* Unlink from free list */
		smcg->bdesc[i] = smcg->rx_freelist;
		smcg->rx_freelist = smcg->bdesc[i]->next;
	}

	smcg->smc_dbuf.fragment_list[0].fragment_length =
	    (ETHERMAX + 4) | (unsigned long)PHYSICAL_ADDR;
	smcg->smc_dbuf.fragment_count = 1;

	/* Allocate the handles to which we bind outgoing data */
	for (i = 0; i < pAd->num_of_tx_buffs; i++)
		for (j = 0; j < SMCG_MAX_TX_MBLKS; j++)
			if (ddi_dma_alloc_handle(smcg->smcg_devinfo,
			    &txdata_attr, DDI_DMA_SLEEP, 0,
			    &smcg->tx_info[i].dmahandle[j]) != DDI_SUCCESS)
				goto failure;

	return (DDI_SUCCESS);

failure:
	SMCG_dma_unalloc(smcg);
	cmn_err(CE_WARN, SMCG_NAME ": could not allocate DMA resources");
	return (DDI_FAILURE);
}
Exemple #20
0
/* ARGSUSED */
i40e_status
i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
    enum i40e_memory_type type, u64 size, u32 alignment)
{
	int rc;
	i40e_t *i40e = OS_DEP(hw)->ios_i40e;
	dev_info_t *dip = i40e->i40e_dip;
	size_t len;
	ddi_dma_cookie_t cookie;
	uint_t cookie_num;
	ddi_dma_attr_t attr;

	/*
	 * Because we need to honor the specified alignment, we need to
	 * dynamically construct the attributes. We save the alignment for
	 * debugging purposes.
	 */
	bcopy(&i40e->i40e_static_dma_attr, &attr, sizeof (ddi_dma_attr_t));
	attr.dma_attr_align = alignment;
	mem->idm_alignment = alignment;
	rc = ddi_dma_alloc_handle(dip, &i40e->i40e_static_dma_attr,
	    DDI_DMA_DONTWAIT, NULL, &mem->idm_dma_handle);
	if (rc != DDI_SUCCESS) {
		mem->idm_dma_handle = NULL;
		i40e_error(i40e, "failed to allocate DMA handle for common "
		    "code: %d", rc);

		/*
		 * Swallow unknown errors and treat them like we do
		 * DDI_DMA_NORESOURCES, in other words, a memory error.
		 */
		if (rc == DDI_DMA_BADATTR)
			return (I40E_ERR_PARAM);
		return (I40E_ERR_NO_MEMORY);
	}

	rc = ddi_dma_mem_alloc(mem->idm_dma_handle, size,
	    &i40e->i40e_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
	    NULL, (caddr_t *)&mem->va, &len, &mem->idm_acc_handle);
	if (rc != DDI_SUCCESS) {
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
		ASSERT(mem->idm_dma_handle != NULL);
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;

		i40e_error(i40e, "failed to allocate %" PRIu64 " bytes of DMA "
		    "memory for common code", size);
		return (I40E_ERR_NO_MEMORY);
	}

	bzero(mem->va, len);

	rc = ddi_dma_addr_bind_handle(mem->idm_dma_handle, NULL, mem->va, len,
	    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
	    &cookie, &cookie_num);
	if (rc != DDI_DMA_MAPPED) {
		mem->pa = NULL;
		ASSERT(mem->idm_acc_handle != NULL);
		ddi_dma_mem_free(&mem->idm_acc_handle);
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
		ASSERT(mem->idm_dma_handle != NULL);
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;

		i40e_error(i40e, "failed to bind %ld byte sized dma region: %d",
		    len, rc);
		switch (rc) {
		case DDI_DMA_INUSE:
			return (I40E_ERR_NOT_READY);
		case DDI_DMA_TOOBIG:
			return (I40E_ERR_INVALID_SIZE);
		case DDI_DMA_NOMAPPING:
		case DDI_DMA_NORESOURCES:
		default:
			return (I40E_ERR_NO_MEMORY);
		}
	}

	ASSERT(cookie_num == 1);
	mem->pa = cookie.dmac_laddress;
	/*
	 * Lint doesn't like this because the common code gives us a uint64_t as
	 * input, but the common code then asks us to assign it to a size_t. So
	 * lint's right, but in this case there isn't much we can do.
	 */
	mem->size = (size_t)size;

	return (I40E_SUCCESS);
}
/*
 * Map a msg into the Tx command ring of a vmxnet3s device.
 */
static vmxnet3s_txstatus_t
vmxnet3s_tx_one(vmxnet3s_softc_t *dp, vmxnet3s_txq_t *txq,
    vmxnet3s_offload_t *ol, mblk_t *mp, int to_copy)
{
	int		ret = VMXNET3_TX_OK;
	uint_t	frags = 0, totlen = 0;
	vmxnet3s_cmdring_t *cmdring = &txq->cmdring;
	vmxnet3s_txqctrl_t *txqctrl = txq->sharedctrl;
	vmxnet3s_gendesc_t *txdesc;
	uint16_t	sopidx;
	uint16_t	eopidx;
	uint8_t		sopgen;
	uint8_t		curgen;
	mblk_t		*mblk;
	uint_t	len;
	size_t		offset = 0;

	mutex_enter(&dp->txlock);

	sopidx = eopidx = cmdring->next2fill;
	sopgen = cmdring->gen;
	curgen = !cmdring->gen;

	mblk = mp;
	len = MBLKL(mblk);

	if (to_copy) {
		uint32_t	dw2;
		uint32_t	dw3;

		ASSERT(len >= to_copy);

		if (cmdring->avail <= 1) {
			dp->txmustresched = B_TRUE;
			ret = VMXNET3_TX_RINGFULL;
			goto error;
		}
		totlen += to_copy;

		len -= to_copy;
		offset = to_copy;

		bcopy(mblk->b_rptr, dp->txcache.nodes[sopidx].va, to_copy);

		eopidx = cmdring->next2fill;
		txdesc = VMXNET3_GET_DESC(cmdring, eopidx);

		ASSERT(txdesc->txd.gen != cmdring->gen);

		txdesc->txd.addr = dp->txcache.nodes[sopidx].pa;
		dw2 = to_copy;
		dw2 |= curgen << VMXNET3_TXD_GEN_SHIFT;
		txdesc->dword[2] = dw2;
		ASSERT(txdesc->txd.len == to_copy || txdesc->txd.len == 0);
		dw3 = 0;
		txdesc->dword[3] = dw3;

		VMXNET3_INC_RING_IDX(cmdring, cmdring->next2fill);
		curgen = cmdring->gen;

		frags++;
	}

	for (; mblk != NULL; mblk = mblk->b_cont, len = mblk ? MBLKL(mblk) : 0,
	    offset = 0) {
		ddi_dma_cookie_t cookie;
		uint_t cookiecount;

		if (len)
			totlen += len;
		else
			continue;

		if (ddi_dma_addr_bind_handle(dp->txdmahdl, NULL,
		    (caddr_t)mblk->b_rptr + offset, len,
		    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
		    &cookie, &cookiecount) != DDI_DMA_MAPPED) {
			ret = VMXNET3_TX_FAILURE;
			goto error;
		}

		ASSERT(cookiecount);

		do {
			uint64_t	addr = cookie.dmac_laddress;
			size_t		len = cookie.dmac_size;

			do {
				uint32_t	dw2;
				uint32_t	dw3;
				size_t		chunklen;

				ASSERT(!txq->metaring[eopidx].mp);
				ASSERT(cmdring->avail - frags);

				if (frags >= cmdring->size - 1 || (ol->om !=
				    VMXNET3_OM_TSO &&
				    frags >= VMXNET3_MAX_TXD_PER_PKT)) {
					(void) ddi_dma_unbind_handle(
					    dp->txdmahdl);
					ret = VMXNET3_TX_PULLUP;
					goto error;
				}
				if (cmdring->avail - frags <= 1) {
					dp->txmustresched = B_TRUE;
					(void) ddi_dma_unbind_handle(
					    dp->txdmahdl);
					ret = VMXNET3_TX_RINGFULL;
					goto error;
				}

				if (len > VMXNET3_MAX_TX_BUF_SIZE)
					chunklen = VMXNET3_MAX_TX_BUF_SIZE;
				else
					chunklen = len;

				frags++;
				eopidx = cmdring->next2fill;

				txdesc = VMXNET3_GET_DESC(cmdring, eopidx);
				ASSERT(txdesc->txd.gen != cmdring->gen);

				/* txd.addr */
				txdesc->txd.addr = addr;
				/* txd.dw2 */
				dw2 = chunklen == VMXNET3_MAX_TX_BUF_SIZE ? 0 :
				    chunklen;
				dw2 |= curgen << VMXNET3_TXD_GEN_SHIFT;
				txdesc->dword[2] = dw2;
				ASSERT(txdesc->txd.len == len ||
				    txdesc->txd.len == 0);
				/* txd.dw3 */
				dw3 = 0;
				txdesc->dword[3] = dw3;

				VMXNET3_INC_RING_IDX(cmdring,
				    cmdring->next2fill);
				curgen = cmdring->gen;

				addr += chunklen;
				len -= chunklen;
			} while (len);

			if (--cookiecount)
				ddi_dma_nextcookie(dp->txdmahdl, &cookie);
		} while (cookiecount);

		(void) ddi_dma_unbind_handle(dp->txdmahdl);
	}

	/* Update the EOP descriptor */
	txdesc = VMXNET3_GET_DESC(cmdring, eopidx);
	txdesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;

	/* Update the SOP descriptor. Must be done last */
	txdesc = VMXNET3_GET_DESC(cmdring, sopidx);
	if (ol->om == VMXNET3_OM_TSO &&
	    txdesc->txd.len != 0 &&
	    txdesc->txd.len < ol->hlen) {
		ret = VMXNET3_TX_FAILURE;
		goto error;
	}
	txdesc->txd.om = ol->om;
	txdesc->txd.hlen = ol->hlen;
	txdesc->txd.msscof = ol->msscof;
	membar_producer();
	txdesc->txd.gen = sopgen;

	/* Update the meta ring & metadata */
	txq->metaring[sopidx].mp = mp;
	txq->metaring[eopidx].sopidx = sopidx;
	txq->metaring[eopidx].frags = frags;
	cmdring->avail -= frags;
	if (ol->om == VMXNET3_OM_TSO) {
		txqctrl->txnumdeferred += (totlen - ol->hlen + ol->msscof - 1) /
		    ol->msscof;
	} else {
		txqctrl->txnumdeferred++;
	}

	goto done;

error:
	/* Reverse the generation bits */
	while (sopidx != cmdring->next2fill) {
		VMXNET3_DEC_RING_IDX(cmdring, cmdring->next2fill);
		txdesc = VMXNET3_GET_DESC(cmdring, cmdring->next2fill);
		txdesc->txd.gen = !cmdring->gen;
	}

done:
	mutex_exit(&dp->txlock);

	return (ret);
}
Exemple #22
0
/*
 * SMCG_send() -- send a packet
 */
static int
SMCG_send(gld_mac_info_t *macinfo, mblk_t *mp)
{
	smcg_t			*smcg = (smcg_t *)macinfo->gldm_private;
	Adapter_Struc		*pAd = smcg->smcg_pAd;
	int			i = 0, j = 0, totlen = 0, msglen = 0, rc;
	mblk_t			*mptr = mp;
	Data_Buff_Structure	dbuf;
	ddi_dma_cookie_t	cookie;
	unsigned int		ncookies;

	for (; mptr != NULL; i++, mptr = mptr->b_cont) {
		if (i >= SMCG_MAX_TX_MBLKS) {
			if (pullupmsg(mp, -1) == 0) {
				smcg->smcg_need_gld_sched = 1;
				return (GLD_NORESOURCES); /* retry send */
			}
			msglen = (mp->b_wptr - mp->b_rptr);
			break;
		}
		msglen += (mptr->b_wptr - mptr->b_rptr);
	}

	if (msglen > ETHERMAX) {
		cmn_err(CE_WARN, SMCG_NAME "%d: dropping oversize packet (%d)",
		    macinfo->gldm_ppa, msglen);
		return (GLD_BADARG);
	}


	mutex_enter(&smcg->txbuf_lock);
	mutex_enter(&smcg->lm_lock);
	LM_Reap_Xmits(pAd);
	mutex_exit(&smcg->lm_lock);

	if ((smcg->tx_ring_head + 1) % pAd->num_of_tx_buffs
	    == smcg->tx_ring_tail) {
		smcg->smcg_need_gld_sched = 1;
		mutex_exit(&smcg->txbuf_lock);
		return (GLD_NORESOURCES); /* retry send */
	}

	for (mptr = mp, i = 0; mptr != NULL; mptr = mptr->b_cont) {
		int blocklen = mptr->b_wptr - mptr->b_rptr;

		if (blocklen == 0)
			continue;

		ASSERT(i < SMCG_MAX_TX_MBLKS);
		rc = ddi_dma_addr_bind_handle(
		    smcg->tx_info[smcg->tx_ring_head].dmahandle[i], NULL,
		    (caddr_t)mptr->b_rptr, (size_t)blocklen, DDI_DMA_WRITE,
		    DDI_DMA_DONTWAIT, 0, &cookie, &ncookies);
		if (rc != DDI_DMA_MAPPED) {
			while (--i >= 0)
				(void) ddi_dma_unbind_handle(
				    smcg->tx_info[smcg->tx_ring_head].
				    dmahandle[i]);
			if (rc == DDI_DMA_NORESOURCES) {
				smcg->smcg_need_gld_sched = 1;
				mutex_exit(&smcg->txbuf_lock);
				return (GLD_NORESOURCES);
			}
#ifdef	DEBUG
	if (SMCG_debug & SMCGTRACE)
		cmn_err(CE_WARN, SMCG_NAME
			"Send bind handle failure = 0x%x", rc);
#endif
			mutex_exit(&smcg->txbuf_lock);
			return (GLD_FAILURE);
		}

		/* CONSTANTCONDITION */
		while (1) {
			dbuf.fragment_list[j].fragment_length =
			    cookie.dmac_size | PHYSICAL_ADDR;
			dbuf.fragment_list[j].fragment_ptr =
			    (unsigned char *)(uintptr_t)cookie.dmac_address;
			j++;
			if (--ncookies == 0)
				break;
			ddi_dma_nextcookie(
			    smcg->tx_info[smcg->tx_ring_head].dmahandle[i],
			    &cookie);
		}
		i++;
		totlen += blocklen;
	}
	dbuf.fragment_count = j;
	smcg->tx_info[smcg->tx_ring_head].handles_bound = i;
	smcg->tx_info[smcg->tx_ring_head].mptr = mp;

	if (totlen < ETHERMIN)
		totlen = ETHERMIN;	/* pad if necessary */

	mutex_enter(&smcg->lm_lock);
	pAd->xmit_interrupts = (smcg->smcg_need_gld_sched) ? 1 : 0;
	rc = LM_Send(&dbuf, pAd, totlen);
	mutex_exit(&smcg->lm_lock);

	if (rc != SUCCESS) {
		for (i = 0;
		    i < smcg->tx_info[smcg->tx_ring_head].handles_bound; i++)
			(void) ddi_dma_unbind_handle(
			    smcg->tx_info[smcg->tx_ring_head].dmahandle[i]);
	} else
		smcg->tx_ring_head =
		    (smcg->tx_ring_head+1) % pAd->num_of_tx_buffs;

	mutex_exit(&smcg->txbuf_lock);

#ifdef	DEBUG
	if (rc != SUCCESS && rc != OUT_OF_RESOURCES)
		cmn_err(CE_WARN,
		    SMCG_NAME "_send: LM_Send failed %d", rc);
#endif
	if (rc == SUCCESS) {
		return (GLD_SUCCESS);
	} else if (rc == OUT_OF_RESOURCES) {
		smcg->smcg_need_gld_sched = 1;
		return (GLD_NORESOURCES);
	} else {
		return (GLD_FAILURE);
	}
}
Exemple #23
0
/*
 * hermon_check_iommu_bypass()
 *    Context: Only called from attach() path context
 *    XXX This is a DMA allocation routine outside the normal
 *	  path. FMA hardening will not like this.
 */
static void
hermon_check_iommu_bypass(hermon_state_t *state, hermon_cfg_profile_t *cp)
{
	ddi_dma_handle_t	dmahdl;
	ddi_dma_attr_t		dma_attr;
	int			status;
	ddi_acc_handle_t	acc_hdl;
	caddr_t			kaddr;
	size_t			actual_len;
	ddi_dma_cookie_t	cookie;
	uint_t			cookiecnt;

	hermon_dma_attr_init(state, &dma_attr);

	/* Try mapping for IOMMU bypass (Force Physical) */
	dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL |
	    DDI_DMA_RELAXED_ORDERING;

	/*
	 * Call ddi_dma_alloc_handle().  If this returns DDI_DMA_BADATTR then
	 * it is not possible to use IOMMU bypass with our PCI bridge parent.
	 * Since the function we are in can only be called if iommu bypass was
	 * requested in the config profile, we configure for bypass if the
	 * ddi_dma_alloc_handle() was successful.  Otherwise, we configure
	 * for non-bypass (ie: normal) mapping.
	 */
	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
	    DDI_DMA_SLEEP, NULL, &dmahdl);
	if (status == DDI_DMA_BADATTR) {
		cp->cp_iommu_bypass = HERMON_BINDMEM_NORMAL;
		return;
	} else if (status != DDI_SUCCESS) {	/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
		return;
	} else {
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
	}

	status = ddi_dma_mem_alloc(dmahdl, 256,
	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, (caddr_t *)&kaddr, &actual_len, &acc_hdl);

	if (status != DDI_SUCCESS) {		/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		ddi_dma_free_handle(&dmahdl);
		return;
	}

	status = ddi_dma_addr_bind_handle(dmahdl, NULL, kaddr, actual_len,
	    DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, &cookie, &cookiecnt);

	if (status == DDI_DMA_MAPPED) {
		(void) ddi_dma_unbind_handle(dmahdl);
	} else {
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
	}

	ddi_dma_mem_free(&acc_hdl);
	ddi_dma_free_handle(&dmahdl);
}
Exemple #24
0
/**
 * Virtio Net Xmit hook.
 *
 * @param pvArg             Pointer to private data.
 * @param pMsg              Pointer to the message.
 *
 * @return Pointer to message not Xmited.
 */
static mblk_t *VirtioNetXmit(void *pvArg, mblk_t *pMsg)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioNetXmit pMsg=%p\n", pMsg));
    cmn_err(CE_NOTE, "Xmit pMsg=%p\n", pMsg);

    PVIRTIODEVICE pDevice = pvArg;
    virtio_net_t *pNet    = pDevice->pvDevice;
    bool fNotify          = false;

    while (pMsg)
    {
        mblk_t *pNextMsg = pMsg->b_next;

#if 0
        mblk_t *pHdr = allocb(sizeof(virtio_net_header_t), BPRI_HI);
        if (RT_UNLIKELY(!pHdr))
            break;

        virtio_net_header_t *pNetHdr = pHdr->b_rptr;
        memset(pNetHdr, 0, sizeof(virtio_net_header_t));
        pNetHdr->u8Flags       = VIRTIO_NET_GUEST_CSUM;
        pNetHdr->u16HdrLen     = sizeof(virtio_net_header_t);

        pHdr->b_wptr += sizeof(virtio_net_header_t);
        pHdr->b_cont = pMsg;
#endif

        virtio_net_txbuf_t *pTxBuf = kmem_cache_alloc(pNet->pTxCache, KM_SLEEP);
        if (!pTxBuf)
            break;

        ddi_dma_cookie_t DmaCookie;
        uint_t cCookies;
        int rc = ddi_dma_addr_bind_handle(pTxBuf->hDMA, NULL /* addrspace */, (char *)pMsg->b_rptr, MBLKL(pMsg),
                                          DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0 /* addr */,
                                          &DmaCookie, &cCookies);
        cmn_err(CE_NOTE, "VirtioNetXmit: MBLKL pMsg=%u\n", MBLKL(pMsg));
        if (rc != DDI_DMA_MAPPED)
        {
            LogRel((VIRTIOLOGNAME ":VirtioNetXmit failed to map address to DMA handle. rc=%d\n", rc));
            kmem_cache_free(pNet->pTxCache, pTxBuf);
            break;
        }

        /** @todo get 'cCookies' slots from the ring. */

        for (uint_t i = 0; i < cCookies; i++)
        {
            uint16_t fFlags = 0;
            if (i < cCookies - 1)
                fFlags |= VIRTIO_FLAGS_RING_DESC_NEXT;

            rc = VirtioRingPush(pNet->pTxQueue, DmaCookie.dmac_laddress, DmaCookie.dmac_size, fFlags);
            if (RT_FAILURE(rc))
            {
                LogRel((VIRTIOLOGNAME ":VirtioNetXmit failed. rc=%Rrc\n", rc));
                break;
            }

            ddi_dma_nextcookie(pTxBuf->hDMA, &DmaCookie);
        }

        pMsg = pNextMsg;
        fNotify = true;
        if (RT_FAILURE(rc))
        {
            ddi_dma_unbind_handle(pTxBuf->hDMA);
            break;
        }
    }

    if (fNotify)
        pDevice->pHyperOps->pfnNotifyQueue(pDevice, pNet->pTxQueue);

    return pMsg;
}
/*
 * audioixp_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audioixp_alloc_port(audioixp_state_t *statep, int num)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	char			*prop;
	audio_dev_t		*adev;
	audioixp_port_t		*port;
	uint32_t		paddr;
	int			rc;
	dev_info_t		*dip;
	audioixp_bd_entry_t	*bdentry;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	port->statep = statep;
	port->started = B_FALSE;
	port->num = num;

	switch (num) {
	case IXP_REC:
		statep->rec_port = port;
		prop = "record-interrupts";
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
		port->nchan = 2;
		break;
	case IXP_PLAY:
		statep->play_port = port;
		prop = "play-interrupts";
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
		/* This could possibly be conditionalized */
		port->nchan = 6;
		break;
	default:
		audio_dev_warn(adev, "bad port number (%d)!", num);
		return (DDI_FAILURE);
	}

	port->intrs = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
	    DDI_PROP_DONTPASS, prop, IXP_INTS);

	/* make sure the values are good */
	if (port->intrs < IXP_MIN_INTS) {
		audio_dev_warn(adev, "%s too low, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	} else if (port->intrs > IXP_MAX_INTS) {
		audio_dev_warn(adev, "%s too high, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	}

	/*
	 * Figure out how much space we need.  Sample rate is 48kHz, and
	 * we need to store 8 chunks.  (Note that this means that low
	 * interrupt frequencies will require more RAM.)
	 */
	port->fragfr = 48000 / port->intrs;
	port->fragfr = IXP_ROUNDUP(port->fragfr, IXP_MOD_SIZE);
	port->fragsz = port->fragfr * port->nchan * 2;
	port->samp_size = port->fragsz * IXP_BD_NUMS;

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (audioixp_bd_entry_t) * IXP_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	/*
	 * Wire up the BD list.
	 */
	paddr = port->samp_paddr;
	bdentry = (void *)port->bdl_kaddr;

	for (int i = 0; i < IXP_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, &bdentry->buf_base, paddr);
		ddi_put16(port->bdl_acch, &bdentry->status, 0);
		ddi_put16(port->bdl_acch, &bdentry->buf_len, port->fragsz / 4);
		ddi_put32(port->bdl_acch, &bdentry->next, port->bdl_paddr +
		    (((i + 1) % IXP_BD_NUMS) * sizeof (audioixp_bd_entry_t)));
		paddr += port->fragsz;
		bdentry++;
	}
	(void) ddi_dma_sync(port->bdl_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);

	port->engine = audio_engine_alloc(&audioixp_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
Exemple #26
0
/*
 * function to allocate a dma buffer for mapping memory va-pa
 *
 * dev - software handle to device
 * size - size of the memory to map
 * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING
 *
 * return pointer to a oce_dma_buf_t structure handling the map
 *      NULL => failure
 */
oce_dma_buf_t *
oce_alloc_dma_buffer(struct oce_dev *dev,
    uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags)
{
	oce_dma_buf_t  *dbuf;
	ddi_dma_cookie_t cookie;
	uint32_t count;
	size_t actual_len;
	int ret = 0;

	ASSERT(size > 0);
	/* if NULL use default */
	if (dma_attr == NULL) {
		dma_attr = &oce_dma_buf_attr;
	}

	dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
	if (dbuf == NULL) {
		return (NULL);
	}

	/* allocate dma handle */
	ret = ddi_dma_alloc_handle(dev->dip, dma_attr,
	    DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to allocate DMA handle");
		goto handle_fail;
	}
	/* allocate the DMA-able memory */
	ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
	    flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
	    &actual_len, &dbuf->acc_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to allocate DMA memory");
		goto alloc_fail;
	}

	/* bind handle */
	ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
	    (struct as *)0, dbuf->base, actual_len,
	    DDI_DMA_RDWR | flags,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &count);
	if (ret != DDI_DMA_MAPPED) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to bind dma handle");
		goto bind_fail;
	}
	bzero(dbuf->base, actual_len);
	dbuf->addr = cookie.dmac_laddress;
	dbuf->size = actual_len;
	/* usable length */
	dbuf->len  = size;
	dbuf->num_pages = OCE_NUM_PAGES(size);
	return (dbuf);

bind_fail:
	ddi_dma_mem_free(&dbuf->acc_handle);
alloc_fail:
	ddi_dma_free_handle(&dbuf->dma_handle);
handle_fail:
	kmem_free(dbuf, sizeof (oce_dma_buf_t));
	return (NULL);
} /* oce_dma_alloc_buffer */
Exemple #27
0
/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_prepare_offload --
 *
 *    Build the offload context of a msg.
 *
 * Results:
 *    0 if everything went well.
 *    +n if n bytes need to be pulled up.
 *    -1 in case of error (not used).
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
static int
vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp,
                           vmxnet3_offload_t *ol,
                           mblk_t *mp)
{
   int ret = 0;
   uint32_t start, stuff, value, flags;
#if defined(OPEN_SOLARIS) || defined(SOL11)
   uint32_t lso_flag, mss;
#endif

   ol->om = VMXNET3_OM_NONE;
   ol->hlen = 0;
   ol->msscof = 0;

   hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags);
#if defined(OPEN_SOLARIS) || defined(SOL11)
   mac_lso_get(mp, &mss, &lso_flag);

   if (flags || lso_flag) {
#else
   if (flags) {
#endif
      struct ether_vlan_header *eth = (void *) mp->b_rptr;
      uint8_t ethLen;

      if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) {
         ethLen = sizeof(struct ether_vlan_header);
      } else {
         ethLen = sizeof(struct ether_header);
      }

      VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, stuff=%u, value=%u\n",
                            flags,      ethLen,    start,    stuff,    value);

#if defined(OPEN_SOLARIS) || defined(SOL11)
      if (lso_flag & HW_LSO) {
#else
      if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }
      if (flags & HW_LSO) {
#endif
         mblk_t *mblk = mp;
         uint8_t *ip, *tcp;
         uint8_t ipLen, tcpLen;

         /*
          * Copy e1000g's behavior:
          * - Do not assume all the headers are in the same mblk.
          * - Assume each header is always within one mblk.
          * - Assume the ethernet header is in the first mblk.
          */
         ip = mblk->b_rptr + ethLen;
         if (ip >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            ip = mblk->b_rptr;
         }
         ipLen = IPH_HDR_LENGTH((ipha_t *) ip);
         tcp = ip + ipLen;
         if (tcp >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            tcp = mblk->b_rptr;
         }
         tcpLen = TCP_HDR_LENGTH((tcph_t *) tcp);
         if (tcp + tcpLen > mblk->b_wptr) { // careful, '>' instead of '>=' here
            mblk = mblk->b_cont;
         }

         ol->om = VMXNET3_OM_TSO;
         ol->hlen = ethLen + ipLen + tcpLen;
#if defined(OPEN_SOLARIS) || defined(SOL11)
         ol->msscof = mss;
#else
         /* OpenSolaris fills 'value' with the MSS but Solaris doesn't. */
         ol->msscof = DB_LSOMSS(mp);
#endif
         if (mblk != mp) {
            ret = ol->hlen;
         }
      }
#if defined(OPEN_SOLARIS) || defined(SOL11)
      else if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }
#endif
   }

   return ret;
}

/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_one --
 *
 *    Map a msg into the Tx command ring of a vmxnet3 device.
 *
 * Results:
 *    VMXNET3_TX_OK if everything went well.
 *    VMXNET3_TX_RINGFULL if the ring is nearly full.
 *    VMXNET3_TX_PULLUP if the msg is overfragmented.
 *    VMXNET3_TX_FAILURE if there was a DMA or offload error.
 *
 * Side effects:
 *    The ring is filled if VMXNET3_TX_OK is returned.
 *
 *---------------------------------------------------------------------------
 */
static vmxnet3_txstatus
vmxnet3_tx_one(vmxnet3_softc_t *dp,
               vmxnet3_txqueue_t *txq,
               vmxnet3_offload_t *ol,
               mblk_t *mp,
               boolean_t retry)
{
   int ret = VMXNET3_TX_OK;
   unsigned int frags = 0, totLen = 0;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   Vmxnet3_GenericDesc *txDesc;
   uint16_t sopIdx, eopIdx;
   uint8_t sopGen, curGen;
   mblk_t *mblk;

   mutex_enter(&dp->txLock);

   sopIdx = eopIdx = cmdRing->next2fill;
   sopGen = cmdRing->gen;
   curGen = !cmdRing->gen;

   for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
      unsigned int len = MBLKL(mblk);
      ddi_dma_cookie_t cookie;
      uint_t cookieCount;

      if (len) {
         totLen += len;
      } else {
         continue;
      }

      if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
                                   (caddr_t) mblk->b_rptr, len,
                                   DDI_DMA_RDWR | DDI_DMA_STREAMING,
                                   DDI_DMA_DONTWAIT, NULL,
                                   &cookie, &cookieCount) != DDI_DMA_MAPPED) {
         VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
         ret = VMXNET3_TX_FAILURE;
         goto error;
      }

      ASSERT(cookieCount);

      do {
         uint64_t addr = cookie.dmac_laddress;
         size_t len = cookie.dmac_size;

         do {
            uint32_t dw2, dw3;
            size_t chunkLen;

            ASSERT(!txq->metaRing[eopIdx].mp);
            ASSERT(cmdRing->avail - frags);

            if (frags >= cmdRing->size - 1 ||
                (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) {

               if (retry) {
                  VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n",
                                frags, cmdRing->size, ol->om);
               }
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_PULLUP;
               goto error;
            }
            if (cmdRing->avail - frags <= 1) {
               dp->txMustResched = B_TRUE;
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_RINGFULL;
               goto error;
            }

            if (len > VMXNET3_MAX_TX_BUF_SIZE) {
               chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
            } else {
               chunkLen = len;
            }

            frags++;
            eopIdx = cmdRing->next2fill;

            txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
            ASSERT(txDesc->txd.gen != cmdRing->gen);

            // txd.addr
            txDesc->txd.addr = addr;
            // txd.dw2
            dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen;
            dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
            txDesc->dword[2] = dw2;
            ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0);
            // txd.dw3
            dw3 = 0;
            txDesc->dword[3] = dw3;

            VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
            curGen = cmdRing->gen;

            addr += chunkLen;
            len -= chunkLen;
         } while (len);

         if (--cookieCount) {
            ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
         }
      } while (cookieCount);

      ddi_dma_unbind_handle(dp->txDmaHandle);
   }

   /* Update the EOP descriptor */
   txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
   txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;

   /* Update the SOP descriptor. Must be done last */
   txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
   if (ol->om == VMXNET3_OM_TSO &&
       txDesc->txd.len != 0 &&
       txDesc->txd.len < ol->hlen) {
      ret = VMXNET3_TX_FAILURE;
      goto error;
   }
   txDesc->txd.om = ol->om;
   txDesc->txd.hlen = ol->hlen;
   txDesc->txd.msscof = ol->msscof;
   membar_producer();
   txDesc->txd.gen = sopGen;

   /* Update the meta ring & metadata */
   txq->metaRing[sopIdx].mp = mp;
   txq->metaRing[eopIdx].sopIdx = sopIdx;
   txq->metaRing[eopIdx].frags = frags;
   cmdRing->avail -= frags;
   if (ol->om == VMXNET3_OM_TSO) {
      txqCtrl->txNumDeferred +=
         (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
   } else {
      txqCtrl->txNumDeferred++;
   }

   VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx);

   goto done;

error:
   /* Reverse the generation bits */
   while (sopIdx != cmdRing->next2fill) {
      VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
      txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
      txDesc->txd.gen = !cmdRing->gen;
   }

done:
   mutex_exit(&dp->txLock);

   return ret;
}

/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx --
 *
 *    Send packets on a vmxnet3 device.
 *
 * Results:
 *    NULL in case of success or failure.
 *    The mps to be retransmitted later if the ring is full.
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
mblk_t *
vmxnet3_tx(void *data, mblk_t *mps)
{
   vmxnet3_softc_t *dp = data;
   vmxnet3_txqueue_t *txq = &dp->txQueue;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   vmxnet3_txstatus status = VMXNET3_TX_OK;
   mblk_t *mp;

   ASSERT(mps != NULL);

   do {
      vmxnet3_offload_t ol;
      int pullup;

      mp = mps;
      mps = mp->b_next;
      mp->b_next = NULL;

      if (DB_TYPE(mp) != M_DATA) {
         /*
          * PR #315560: Solaris might pass M_PROTO mblks for some reason.
          * Drop them because we don't understand them and because their
          * contents are not Ethernet frames anyway.
          */
         ASSERT(B_FALSE);
         freemsg(mp);
         continue;
      }

      /*
       * Prepare the offload while we're still handling the original
       * message -- msgpullup() discards the metadata afterwards.
       */
      pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp);
      if (pullup) {
         mblk_t *new_mp = msgpullup(mp, pullup);
         freemsg(mp);
         if (new_mp) {
            mp = new_mp;
         } else {
            continue;
         }
      }

      /*
       * Try to map the message in the Tx ring.
       * This call might fail for non-fatal reasons.
       */
      status = vmxnet3_tx_one(dp, txq, &ol, mp, B_FALSE);
      if (status == VMXNET3_TX_PULLUP) {
         /*
          * Try one more time after flattening
          * the message with msgpullup().
          */
         if (mp->b_cont != NULL) {
            mblk_t *new_mp = msgpullup(mp, -1);
            freemsg(mp);
            if (new_mp) {
               mp = new_mp;
               status = vmxnet3_tx_one(dp, txq, &ol, mp, B_TRUE);
            } else {
               continue;
            }
         }
      }
      if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) {
         /* Fatal failure, drop it */
         freemsg(mp);
      }
   } while (mps && status != VMXNET3_TX_RINGFULL);

   if (status == VMXNET3_TX_RINGFULL) {
      mp->b_next = mps;
      mps = mp;
   } else {
      ASSERT(!mps);
   }

   /* Notify the device */
   mutex_enter(&dp->txLock);
   if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) {
      txqCtrl->txNumDeferred = 0;
      VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill);
   }
   mutex_exit(&dp->txLock);

   return mps;
}
Exemple #28
0
static int
hxge_start(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
{
	int 			dma_status, status = 0;
	p_tx_desc_t 		tx_desc_ring_vp;
	hpi_handle_t		hpi_desc_handle;
	hxge_os_dma_handle_t 	tx_desc_dma_handle;
	p_tx_desc_t 		tx_desc_p;
	p_tx_msg_t 		tx_msg_ring;
	p_tx_msg_t 		tx_msg_p;
	tx_desc_t		tx_desc, *tmp_desc_p;
	tx_desc_t		sop_tx_desc, *sop_tx_desc_p;
	p_tx_pkt_header_t	hdrp;
	p_tx_pkt_hdr_all_t	pkthdrp;
	uint8_t			npads = 0;
	uint64_t 		dma_ioaddr;
	uint32_t		dma_flags;
	int			last_bidx;
	uint8_t 		*b_rptr;
	caddr_t 		kaddr;
	uint32_t		nmblks;
	uint32_t		ngathers;
	uint32_t		clen;
	int 			len;
	uint32_t		pkt_len, pack_len, min_len;
	uint32_t		bcopy_thresh;
	int 			i, cur_index, sop_index;
	uint16_t		tail_index;
	boolean_t		tail_wrap = B_FALSE;
	hxge_dma_common_t	desc_area;
	hxge_os_dma_handle_t 	dma_handle;
	ddi_dma_cookie_t 	dma_cookie;
	hpi_handle_t		hpi_handle;
	p_mblk_t 		nmp;
	p_mblk_t		t_mp;
	uint32_t 		ncookies;
	boolean_t 		good_packet;
	boolean_t 		mark_mode = B_FALSE;
	p_hxge_stats_t 		statsp;
	p_hxge_tx_ring_stats_t	tdc_stats;
	t_uscalar_t 		start_offset = 0;
	t_uscalar_t 		stuff_offset = 0;
	t_uscalar_t 		end_offset = 0;
	t_uscalar_t 		value = 0;
	t_uscalar_t 		cksum_flags = 0;
	boolean_t		cksum_on = B_FALSE;
	uint32_t		boff = 0;
	uint64_t		tot_xfer_len = 0, tmp_len = 0;
	boolean_t		header_set = B_FALSE;
	tdc_tdr_kick_t		kick;
	uint32_t		offset;
#ifdef HXGE_DEBUG
	p_tx_desc_t 		tx_desc_ring_pp;
	p_tx_desc_t 		tx_desc_pp;
	tx_desc_t		*save_desc_p;
	int			dump_len;
	int			sad_len;
	uint64_t		sad;
	int			xfer_len;
	uint32_t		msgsize;
#endif

	HXGE_DEBUG_MSG((hxgep, TX_CTL,
	    "==> hxge_start: tx dma channel %d", tx_ring_p->tdc));
	HXGE_DEBUG_MSG((hxgep, TX_CTL,
	    "==> hxge_start: Starting tdc %d desc pending %d",
	    tx_ring_p->tdc, tx_ring_p->descs_pending));

	statsp = hxgep->statsp;

	if (hxgep->statsp->port_stats.lb_mode == hxge_lb_normal) {
		if (!statsp->mac_stats.link_up) {
			freemsg(mp);
			HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
			    "link not up or LB mode"));
			goto hxge_start_fail1;
		}
	}

	mac_hcksum_get(mp, &start_offset, &stuff_offset, &end_offset, &value,
	    &cksum_flags);
	if (!HXGE_IS_VLAN_PACKET(mp->b_rptr)) {
		start_offset += sizeof (ether_header_t);
		stuff_offset += sizeof (ether_header_t);
	} else {
		start_offset += sizeof (struct ether_vlan_header);
		stuff_offset += sizeof (struct ether_vlan_header);
	}

	if (cksum_flags & HCK_PARTIALCKSUM) {
		HXGE_DEBUG_MSG((hxgep, TX_CTL,
		    "==> hxge_start: mp $%p len %d "
		    "cksum_flags 0x%x (partial checksum) ",
		    mp, MBLKL(mp), cksum_flags));
		cksum_on = B_TRUE;
	}

	MUTEX_ENTER(&tx_ring_p->lock);
start_again:
	ngathers = 0;
	sop_index = tx_ring_p->wr_index;
#ifdef	HXGE_DEBUG
	if (tx_ring_p->descs_pending) {
		HXGE_DEBUG_MSG((hxgep, TX_CTL,
		    "==> hxge_start: desc pending %d ",
		    tx_ring_p->descs_pending));
	}

	dump_len = (int)(MBLKL(mp));
	dump_len = (dump_len > 128) ? 128: dump_len;

	HXGE_DEBUG_MSG((hxgep, TX_CTL,
	    "==> hxge_start: tdc %d: dumping ...: b_rptr $%p "
	    "(Before header reserve: ORIGINAL LEN %d)",
	    tx_ring_p->tdc, mp->b_rptr, dump_len));

	HXGE_DEBUG_MSG((hxgep, TX_CTL,
	    "==> hxge_start: dump packets (IP ORIGINAL b_rptr $%p): %s",
	    mp->b_rptr, hxge_dump_packet((char *)mp->b_rptr, dump_len)));
#endif

	tdc_stats = tx_ring_p->tdc_stats;
	mark_mode = (tx_ring_p->descs_pending &&
	    ((tx_ring_p->tx_ring_size - tx_ring_p->descs_pending) <
	    hxge_tx_minfree));

	HXGE_DEBUG_MSG((hxgep, TX_CTL,
	    "TX Descriptor ring is channel %d mark mode %d",
	    tx_ring_p->tdc, mark_mode));

	if (!hxge_txdma_reclaim(hxgep, tx_ring_p, hxge_tx_minfree)) {
		HXGE_DEBUG_MSG((hxgep, TX_CTL,
		    "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
		HXGE_DEBUG_MSG((hxgep, TX_CTL,
		    "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
		(void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing, 0, 1);
		tdc_stats->tx_no_desc++;
		MUTEX_EXIT(&tx_ring_p->lock);
		status = 1;
		goto hxge_start_fail1;
	}

	nmp = mp;
	i = sop_index = tx_ring_p->wr_index;
	nmblks = 0;
	ngathers = 0;
	pkt_len = 0;
	pack_len = 0;
	clen = 0;
	last_bidx = -1;
	good_packet = B_TRUE;

	desc_area = tx_ring_p->tdc_desc;
	hpi_handle = desc_area.hpi_handle;
	hpi_desc_handle.regh = (hxge_os_acc_handle_t)
	    DMA_COMMON_ACC_HANDLE(desc_area);
	hpi_desc_handle.hxgep = hxgep;
	tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
#ifdef	HXGE_DEBUG
#if defined(__i386)
	tx_desc_ring_pp = (p_tx_desc_t)(uint32_t)DMA_COMMON_IOADDR(desc_area);
#else
	tx_desc_ring_pp = (p_tx_desc_t)DMA_COMMON_IOADDR(desc_area);
#endif
#endif
	tx_desc_dma_handle = (hxge_os_dma_handle_t)DMA_COMMON_HANDLE(desc_area);
	tx_msg_ring = tx_ring_p->tx_msg_ring;

	HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: wr_index %d i %d",
	    sop_index, i));

#ifdef	HXGE_DEBUG
	msgsize = msgdsize(nmp);
	HXGE_DEBUG_MSG((hxgep, TX_CTL,
	    "==> hxge_start(1): wr_index %d i %d msgdsize %d",
	    sop_index, i, msgsize));
#endif
	/*
	 * The first 16 bytes of the premapped buffer are reserved
	 * for header. No padding will be used.
	 */
	pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
	if (hxge_tx_use_bcopy) {
		bcopy_thresh = (hxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
	} else {
		bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
	}
	while (nmp) {
		good_packet = B_TRUE;
		b_rptr = nmp->b_rptr;
		len = MBLKL(nmp);
		if (len <= 0) {
			nmp = nmp->b_cont;
			continue;
		}
		nmblks++;

		HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(1): nmblks %d "
		    "len %d pkt_len %d pack_len %d",
		    nmblks, len, pkt_len, pack_len));
		/*
		 * Hardware limits the transfer length to 4K.
		 * If len is more than 4K, we need to break
		 * nmp into two chunks: Make first chunk smaller
		 * than 4K. The second chunk will be broken into
		 * less than 4K (if needed) during the next pass.
		 */
		if (len > (TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE)) {
			if ((t_mp = dupb(nmp)) != NULL) {
				nmp->b_wptr = nmp->b_rptr +
				    (TX_MAX_TRANSFER_LENGTH -
				    TX_PKT_HEADER_SIZE);
				t_mp->b_rptr = nmp->b_wptr;
				t_mp->b_cont = nmp->b_cont;
				nmp->b_cont = t_mp;
				len = MBLKL(nmp);
			} else {
				good_packet = B_FALSE;
				goto hxge_start_fail2;
			}
		}
		tx_desc.value = 0;
		tx_desc_p = &tx_desc_ring_vp[i];
#ifdef	HXGE_DEBUG
		tx_desc_pp = &tx_desc_ring_pp[i];
#endif
		tx_msg_p = &tx_msg_ring[i];
#if defined(__i386)
		hpi_desc_handle.regp = (uint32_t)tx_desc_p;
#else
		hpi_desc_handle.regp = (uint64_t)tx_desc_p;
#endif
		if (!header_set &&
		    ((!hxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
		    (len >= bcopy_thresh))) {
			header_set = B_TRUE;
			bcopy_thresh += TX_PKT_HEADER_SIZE;
			boff = 0;
			pack_len = 0;
			kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
			hdrp = (p_tx_pkt_header_t)kaddr;
			clen = pkt_len;
			dma_handle = tx_msg_p->buf_dma_handle;
			dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
			offset = tx_msg_p->offset_index * hxge_bcopy_thresh;
			(void) ddi_dma_sync(dma_handle,
			    offset, hxge_bcopy_thresh, DDI_DMA_SYNC_FORDEV);

			tx_msg_p->flags.dma_type = USE_BCOPY;
			goto hxge_start_control_header_only;
		}

		pkt_len += len;
		pack_len += len;

		HXGE_DEBUG_MSG((hxgep, TX_CTL,
		    "==> hxge_start(3): desc entry %d DESC IOADDR $%p "
		    "desc_vp $%p tx_desc_p $%p desc_pp $%p tx_desc_pp $%p "
		    "len %d pkt_len %d pack_len %d",
		    i,
		    DMA_COMMON_IOADDR(desc_area),
		    tx_desc_ring_vp, tx_desc_p,
		    tx_desc_ring_pp, tx_desc_pp,
		    len, pkt_len, pack_len));

		if (len < bcopy_thresh) {
			HXGE_DEBUG_MSG((hxgep, TX_CTL,
			    "==> hxge_start(4): USE BCOPY: "));
			if (hxge_tx_tiny_pack) {
				uint32_t blst = TXDMA_DESC_NEXT_INDEX(i, -1,
				    tx_ring_p->tx_wrap_mask);
				HXGE_DEBUG_MSG((hxgep, TX_CTL,
				    "==> hxge_start(5): pack"));
				if ((pack_len <= bcopy_thresh) &&
				    (last_bidx == blst)) {
					HXGE_DEBUG_MSG((hxgep, TX_CTL,
					    "==> hxge_start: pack(6) "
					    "(pkt_len %d pack_len %d)",
					    pkt_len, pack_len));
					i = blst;
					tx_desc_p = &tx_desc_ring_vp[i];
#ifdef	HXGE_DEBUG
					tx_desc_pp = &tx_desc_ring_pp[i];
#endif
					tx_msg_p = &tx_msg_ring[i];
					boff = pack_len - len;
					ngathers--;
				} else if (pack_len > bcopy_thresh &&
				    header_set) {
					pack_len = len;
					boff = 0;
					bcopy_thresh = hxge_bcopy_thresh;
					HXGE_DEBUG_MSG((hxgep, TX_CTL,
					    "==> hxge_start(7): > max NEW "
					    "bcopy thresh %d "
					    "pkt_len %d pack_len %d(next)",
					    bcopy_thresh, pkt_len, pack_len));
				}
				last_bidx = i;
			}
			kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
			if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
				hdrp = (p_tx_pkt_header_t)kaddr;
				header_set = B_TRUE;
				HXGE_DEBUG_MSG((hxgep, TX_CTL,
				    "==> hxge_start(7_x2): "
				    "pkt_len %d pack_len %d (new hdrp $%p)",
				    pkt_len, pack_len, hdrp));
			}
			tx_msg_p->flags.dma_type = USE_BCOPY;
			kaddr += boff;
			HXGE_DEBUG_MSG((hxgep, TX_CTL,
			    "==> hxge_start(8): USE BCOPY: before bcopy "
			    "DESC IOADDR $%p entry %d bcopy packets %d "
			    "bcopy kaddr $%p bcopy ioaddr (SAD) $%p "
			    "bcopy clen %d bcopy boff %d",
			    DMA_COMMON_IOADDR(desc_area), i,
			    tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
			    clen, boff));
			HXGE_DEBUG_MSG((hxgep, TX_CTL,
			    "==> hxge_start: 1USE BCOPY: "));
			HXGE_DEBUG_MSG((hxgep, TX_CTL,
			    "==> hxge_start: 2USE BCOPY: "));
			HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
			    "last USE BCOPY: copy from b_rptr $%p "
			    "to KADDR $%p (len %d offset %d",
			    b_rptr, kaddr, len, boff));
			bcopy(b_rptr, kaddr, len);
#ifdef	HXGE_DEBUG
			dump_len = (len > 128) ? 128: len;
			HXGE_DEBUG_MSG((hxgep, TX_CTL,
			    "==> hxge_start: dump packets "
			    "(After BCOPY len %d)"
			    "(b_rptr $%p): %s", len, nmp->b_rptr,
			    hxge_dump_packet((char *)nmp->b_rptr,
			    dump_len)));
#endif
			dma_handle = tx_msg_p->buf_dma_handle;
			dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
			offset = tx_msg_p->offset_index * hxge_bcopy_thresh;
			(void) ddi_dma_sync(dma_handle,
			    offset, hxge_bcopy_thresh, DDI_DMA_SYNC_FORDEV);
			clen = len + boff;
			tdc_stats->tx_hdr_pkts++;
			HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(9): "
			    "USE BCOPY: DESC IOADDR $%p entry %d "
			    "bcopy packets %d bcopy kaddr $%p "
			    "bcopy ioaddr (SAD) $%p bcopy clen %d "
			    "bcopy boff %d",
			    DMA_COMMON_IOADDR(desc_area), i,
			    tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
			    clen, boff));
		} else {
			HXGE_DEBUG_MSG((hxgep, TX_CTL,
			    "==> hxge_start(12): USE DVMA: len %d", len));
			tx_msg_p->flags.dma_type = USE_DMA;
			dma_flags = DDI_DMA_WRITE;
			if (len < hxge_dma_stream_thresh) {
				dma_flags |= DDI_DMA_CONSISTENT;
			} else {
				dma_flags |= DDI_DMA_STREAMING;
			}

			dma_handle = tx_msg_p->dma_handle;
			dma_status = ddi_dma_addr_bind_handle(dma_handle, NULL,
			    (caddr_t)b_rptr, len, dma_flags,
			    DDI_DMA_DONTWAIT, NULL,
			    &dma_cookie, &ncookies);
			if (dma_status == DDI_DMA_MAPPED) {
				dma_ioaddr = dma_cookie.dmac_laddress;
				len = (int)dma_cookie.dmac_size;
				clen = (uint32_t)dma_cookie.dmac_size;
				HXGE_DEBUG_MSG((hxgep, TX_CTL,
				    "==> hxge_start(12_1): "
				    "USE DVMA: len %d clen %d ngathers %d",
				    len, clen, ngathers));
#if defined(__i386)
				hpi_desc_handle.regp = (uint32_t)tx_desc_p;
#else
				hpi_desc_handle.regp = (uint64_t)tx_desc_p;
#endif
				while (ncookies > 1) {
					ngathers++;
					/*
					 * this is the fix for multiple
					 * cookies, which are basically
					 * a descriptor entry, we don't set
					 * SOP bit as well as related fields
					 */

					(void) hpi_txdma_desc_gather_set(
					    hpi_desc_handle, &tx_desc,
					    (ngathers -1), mark_mode,
					    ngathers, dma_ioaddr, clen);
					tx_msg_p->tx_msg_size = clen;
					HXGE_DEBUG_MSG((hxgep, TX_CTL,
					    "==> hxge_start:  DMA "
					    "ncookie %d ngathers %d "
					    "dma_ioaddr $%p len %d"
					    "desc $%p descp $%p (%d)",
					    ncookies, ngathers,
					    dma_ioaddr, clen,
					    *tx_desc_p, tx_desc_p, i));

					ddi_dma_nextcookie(dma_handle,
					    &dma_cookie);
					dma_ioaddr = dma_cookie.dmac_laddress;

					len = (int)dma_cookie.dmac_size;
					clen = (uint32_t)dma_cookie.dmac_size;
					HXGE_DEBUG_MSG((hxgep, TX_CTL,
					    "==> hxge_start(12_2): "
					    "USE DVMA: len %d clen %d ",
					    len, clen));

					i = TXDMA_DESC_NEXT_INDEX(i, 1,
					    tx_ring_p->tx_wrap_mask);
					tx_desc_p = &tx_desc_ring_vp[i];

					hpi_desc_handle.regp =
#if defined(__i386)
					    (uint32_t)tx_desc_p;
#else
						(uint64_t)tx_desc_p;
#endif
					tx_msg_p = &tx_msg_ring[i];
					tx_msg_p->flags.dma_type = USE_NONE;
					tx_desc.value = 0;
					ncookies--;
				}
				tdc_stats->tx_ddi_pkts++;
				HXGE_DEBUG_MSG((hxgep, TX_CTL,
				    "==> hxge_start: DMA: ddi packets %d",
				    tdc_stats->tx_ddi_pkts));
			} else {
				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
				    "dma mapping failed for %d "
				    "bytes addr $%p flags %x (%d)",
				    len, b_rptr, status, status));
				good_packet = B_FALSE;
				tdc_stats->tx_dma_bind_fail++;
				tx_msg_p->flags.dma_type = USE_NONE;
				status = 1;
				goto hxge_start_fail2;
			}
		} /* ddi dvma */

		nmp = nmp->b_cont;
hxge_start_control_header_only:
#if defined(__i386)
		hpi_desc_handle.regp = (uint32_t)tx_desc_p;
#else
		hpi_desc_handle.regp = (uint64_t)tx_desc_p;
#endif
		ngathers++;

		if (ngathers == 1) {
#ifdef	HXGE_DEBUG
			save_desc_p = &sop_tx_desc;
#endif
			sop_tx_desc_p = &sop_tx_desc;
			sop_tx_desc_p->value = 0;
			sop_tx_desc_p->bits.tr_len = clen;
			sop_tx_desc_p->bits.sad = dma_ioaddr >> 32;
			sop_tx_desc_p->bits.sad_l = dma_ioaddr & 0xffffffff;
		} else {
/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_one --
 *
 *    Map a msg into the Tx command ring of a vmxnet3 device.
 *
 * Results:
 *    VMXNET3_TX_OK if everything went well.
 *    VMXNET3_TX_RINGFULL if the ring is nearly full.
 *    VMXNET3_TX_PULLUP if the msg is overfragmented.
 *    VMXNET3_TX_FAILURE if there was a DMA or offload error.
 *
 * Side effects:
 *    The ring is filled if VMXNET3_TX_OK is returned.
 *
 *---------------------------------------------------------------------------
 */
static vmxnet3_txstatus
vmxnet3_tx_one(vmxnet3_softc_t *dp,
               vmxnet3_txqueue_t *txq,
               vmxnet3_offload_t *ol,
               mblk_t *mp,
               boolean_t retry)
{
   int ret = VMXNET3_TX_OK;
   unsigned int frags = 0, totLen = 0;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   Vmxnet3_GenericDesc *txDesc;
   uint16_t sopIdx, eopIdx;
   uint8_t sopGen, curGen;
   mblk_t *mblk;

   mutex_enter(&dp->txLock);

   sopIdx = eopIdx = cmdRing->next2fill;
   sopGen = cmdRing->gen;
   curGen = !cmdRing->gen;

   for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
      unsigned int len = MBLKL(mblk);
      ddi_dma_cookie_t cookie;
      uint_t cookieCount;

      if (len) {
         totLen += len;
      } else {
         continue;
      }

      if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
                                   (caddr_t) mblk->b_rptr, len,
                                   DDI_DMA_RDWR | DDI_DMA_STREAMING,
                                   DDI_DMA_DONTWAIT, NULL,
                                   &cookie, &cookieCount) != DDI_DMA_MAPPED) {
         VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
         ret = VMXNET3_TX_FAILURE;
         goto error;
      }

      ASSERT(cookieCount);

      do {
         uint64_t addr = cookie.dmac_laddress;
         size_t len = cookie.dmac_size;

         do {
            uint32_t dw2, dw3;
            size_t chunkLen;

            ASSERT(!txq->metaRing[eopIdx].mp);
            ASSERT(cmdRing->avail - frags);

            if (frags >= cmdRing->size - 1 ||
                (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) {

               if (retry) {
                  VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n",
                                frags, cmdRing->size, ol->om);
               }
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_PULLUP;
               goto error;
            }
            if (cmdRing->avail - frags <= 1) {
               dp->txMustResched = B_TRUE;
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_RINGFULL;
               goto error;
            }

            if (len > VMXNET3_MAX_TX_BUF_SIZE) {
               chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
            } else {
               chunkLen = len;
            }

            frags++;
            eopIdx = cmdRing->next2fill;

            txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
            ASSERT(txDesc->txd.gen != cmdRing->gen);

            // txd.addr
            txDesc->txd.addr = addr;
            // txd.dw2
            dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen;
            dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
            txDesc->dword[2] = dw2;
            ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0);
            // txd.dw3
            dw3 = 0;
            txDesc->dword[3] = dw3;

            VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
            curGen = cmdRing->gen;

            addr += chunkLen;
            len -= chunkLen;
         } while (len);

         if (--cookieCount) {
            ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
         }
      } while (cookieCount);

      ddi_dma_unbind_handle(dp->txDmaHandle);
   }

   /* Update the EOP descriptor */
   txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
   txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;

   /* Update the SOP descriptor. Must be done last */
   txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
   if (ol->om == VMXNET3_OM_TSO &&
       txDesc->txd.len != 0 &&
       txDesc->txd.len < ol->hlen) {
      ret = VMXNET3_TX_FAILURE;
      goto error;
   }
   txDesc->txd.om = ol->om;
   txDesc->txd.hlen = ol->hlen;
   txDesc->txd.msscof = ol->msscof;
   membar_producer();
   txDesc->txd.gen = sopGen;

   /* Update the meta ring & metadata */
   txq->metaRing[sopIdx].mp = mp;
   txq->metaRing[eopIdx].sopIdx = sopIdx;
   txq->metaRing[eopIdx].frags = frags;
   cmdRing->avail -= frags;
   if (ol->om == VMXNET3_OM_TSO) {
      txqCtrl->txNumDeferred +=
         (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
   } else {
      txqCtrl->txNumDeferred++;
   }

   VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx);

   goto done;

error:
   /* Reverse the generation bits */
   while (sopIdx != cmdRing->next2fill) {
      VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
      txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
      txDesc->txd.gen = !cmdRing->gen;
   }

done:
   mutex_exit(&dp->txLock);

   return ret;
}
Exemple #30
0
extern fc_packet_t *
emlxs_pkt_alloc(emlxs_port_t *port, uint32_t cmdlen, uint32_t rsplen,
    uint32_t datalen, int32_t sleep)
{
	emlxs_hba_t *hba = HBA;
	fc_packet_t *pkt;
	int32_t(*cb) (caddr_t);
	unsigned long real_len;
	uint32_t pkt_size;
	emlxs_buf_t *sbp;

#if (EMLXS_MODREV >= EMLXS_MODREV3)
	emlxs_pkt_cookie_t *pkt_cookie;

	pkt_size =
	    sizeof (fc_packet_t) + sizeof (emlxs_buf_t) +
	    sizeof (emlxs_pkt_cookie_t);
#else
	uint32_t num_cookie;

	pkt_size = sizeof (fc_packet_t) + sizeof (emlxs_buf_t);
#endif /* >= EMLXS_MODREV3 */


	/* Allocate some space */
	if (!(pkt = (fc_packet_t *)kmem_alloc(pkt_size, sleep))) {
		return (NULL);
	}

	bzero(pkt, pkt_size);

	cb = (sleep == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;

	pkt->pkt_ulp_private = (opaque_t)port;
	pkt->pkt_fca_private =
	    (opaque_t)((uintptr_t)pkt + sizeof (fc_packet_t));
	pkt->pkt_comp = emlxs_pkt_callback;
	pkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
	pkt->pkt_cmdlen = cmdlen;
	pkt->pkt_rsplen = rsplen;
	pkt->pkt_datalen = datalen;

#if (EMLXS_MODREV >= EMLXS_MODREV3)
	pkt_cookie =
	    (emlxs_pkt_cookie_t *)((uintptr_t)pkt + sizeof (fc_packet_t) +
	    sizeof (emlxs_buf_t));
	pkt->pkt_cmd_cookie = &pkt_cookie->pkt_cmd_cookie;
	pkt->pkt_resp_cookie = &pkt_cookie->pkt_resp_cookie;
	pkt->pkt_data_cookie = &pkt_cookie->pkt_data_cookie;
#endif /* >= EMLXS_MODREV3 */

	if (cmdlen) {
		/* Allocate the cmd buf */
		if (ddi_dma_alloc_handle(hba->dip, &hba->dma_attr_1sg, cb,
		    NULL, &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		if (ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmdlen,
		    &emlxs_data_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
		    (caddr_t *)&pkt->pkt_cmd, &real_len,
		    &pkt->pkt_cmd_acc) != DDI_SUCCESS) {
			(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);

			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		if (real_len < cmdlen) {
			(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);

			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
		    pkt->pkt_cmd, real_len,
		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
		    pkt->pkt_cmd_cookie,
		    &pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED)
#else
		if (ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
		    pkt->pkt_cmd, real_len,
		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
		    &pkt->pkt_cmd_cookie, &num_cookie) != DDI_DMA_MAPPED)
#endif /* >= EMLXS_MODREV3 */
		{
			(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);

			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (pkt->pkt_cmd_cookie_cnt != 1)
#else
		if (num_cookie != 1)
#endif /* >= EMLXS_MODREV3 */
		{
			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		bzero(pkt->pkt_cmd, cmdlen);

	}

	if (rsplen) {
		/* Allocate the rsp buf */
		if (ddi_dma_alloc_handle(hba->dip, &hba->dma_attr_1sg, cb,
		    NULL, &pkt->pkt_resp_dma) != DDI_SUCCESS) {
			rsplen = 0;
			datalen = 0;
			goto failed;

		}

		if (ddi_dma_mem_alloc(pkt->pkt_resp_dma, rsplen,
		    &emlxs_data_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
		    (caddr_t *)&pkt->pkt_resp, &real_len,
		    &pkt->pkt_resp_acc) != DDI_SUCCESS) {
			(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);

			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		if (real_len < rsplen) {
			(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);

			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
		    pkt->pkt_resp, real_len,
		    DDI_DMA_READ | DDI_DMA_CONSISTENT, cb, NULL,
		    pkt->pkt_resp_cookie,
		    &pkt->pkt_resp_cookie_cnt) != DDI_DMA_MAPPED)
#else
		if (ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
		    pkt->pkt_resp, real_len,
		    DDI_DMA_READ | DDI_DMA_CONSISTENT, cb, NULL,
		    &pkt->pkt_resp_cookie, &num_cookie) != DDI_DMA_MAPPED)
#endif /* >= EMLXS_MODREV3 */
		{
			(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);

			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (pkt->pkt_resp_cookie_cnt != 1)
#else
		if (num_cookie != 1)
#endif /* >= EMLXS_MODREV3 */
		{
			datalen = 0;
			goto failed;
		}

		bzero(pkt->pkt_resp, rsplen);

	}

	/* Allocate the data buf */
	if (datalen) {
		/* Allocate the rsp buf */
		if (ddi_dma_alloc_handle(hba->dip, &hba->dma_attr_1sg, cb,
		    NULL, &pkt->pkt_data_dma) != DDI_SUCCESS) {
			datalen = 0;
			goto failed;
		}

		if (ddi_dma_mem_alloc(pkt->pkt_data_dma, datalen,
		    &emlxs_data_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
		    (caddr_t *)&pkt->pkt_data, &real_len,
		    &pkt->pkt_data_acc) != DDI_SUCCESS) {
			(void) ddi_dma_free_handle(&pkt->pkt_data_dma);

			datalen = 0;
			goto failed;
		}

		if (real_len < datalen) {
			(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_data_dma);

			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (ddi_dma_addr_bind_handle(pkt->pkt_data_dma, NULL,
		    pkt->pkt_data, real_len,
		    DDI_DMA_READ | DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb,
		    NULL, pkt->pkt_data_cookie,
		    &pkt->pkt_data_cookie_cnt) != DDI_DMA_MAPPED)
#else
		if (ddi_dma_addr_bind_handle(pkt->pkt_data_dma, NULL,
		    pkt->pkt_data, real_len,
		    DDI_DMA_READ | DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb,
		    NULL, &pkt->pkt_data_cookie,
		    &num_cookie) != DDI_DMA_MAPPED)
#endif /* >= EMLXS_MODREV3 */
		{
			(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_data_dma);

			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (pkt->pkt_data_cookie_cnt != 1)
#else
		if (num_cookie != 1)
#endif /* >= EMLXS_MODREV3 */
		{
			goto failed;
		}

		bzero(pkt->pkt_data, datalen);
	}

	sbp = PKT2PRIV(pkt);
	bzero((void *)sbp, sizeof (emlxs_buf_t));

	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
	sbp->pkt_flags = PACKET_VALID | PACKET_ULP_OWNED | PACKET_ALLOCATED;
	sbp->port = port;
	sbp->pkt = pkt;
	sbp->iocbq.sbp = sbp;

	return (pkt);

failed:

	if (datalen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_data_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_data_dma);
	}

	if (rsplen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);
	}

	if (cmdlen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);
	}

	if (pkt) {
		kmem_free(pkt, pkt_size);
	}

	return (NULL);

} /* emlxs_pkt_alloc() */