Example #1
0
static virtionet_dma_t *
virtionet_dma_setup(virtionet_state_t *sp, size_t len)
{
	virtionet_dma_t		*dmap;
	int			rc;

	dmap = kmem_zalloc(sizeof (*dmap), KM_SLEEP);

	vq_dma_attr.dma_attr_flags |= DDI_DMA_FORCE_PHYSICAL;

	rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
	    NULL, &dmap->hdl);

	if (rc == DDI_DMA_BADATTR) {
		cmn_err(CE_NOTE, "Failed to allocate physical DMA; "
		    "failing back to virtual DMA");
		vq_dma_attr.dma_attr_flags &= (~DDI_DMA_FORCE_PHYSICAL);
		rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
		    NULL, &dmap->hdl);
	}

	if (rc != DDI_SUCCESS) {
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	rc = ddi_dma_mem_alloc(dmap->hdl, len, &virtio_native_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmap->addr,
	    &dmap->len, &dmap->acchdl);
	if (rc != DDI_SUCCESS) {
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}

	bzero(dmap->addr, dmap->len);

	rc = ddi_dma_addr_bind_handle(dmap->hdl, NULL, dmap->addr,
	    dmap->len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
	    NULL, &dmap->cookie, &dmap->ccount);
	if (rc != DDI_DMA_MAPPED) {
		ddi_dma_mem_free(&dmap->acchdl);
		ddi_dma_free_handle(&dmap->hdl);
		kmem_free(dmap, sizeof (*dmap));
		return (NULL);
	}
	ASSERT(dmap->ccount == 1);

	return (dmap);
}
Example #2
0
static pcn_buf_t *
pcn_allocbuf(pcn_t *pcnp)
{
	pcn_buf_t		*buf;
	size_t			len;
	unsigned		ccnt;
	ddi_dma_cookie_t	dmac;

	buf = kmem_zalloc(sizeof (*buf), KM_SLEEP);

	if (ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dma_attr, DDI_DMA_SLEEP,
	    NULL, &buf->pb_dmah) != DDI_SUCCESS) {
		kmem_free(buf, sizeof (*buf));
		return (NULL);
	}

	if (ddi_dma_mem_alloc(buf->pb_dmah, PCN_BUFSZ, &pcn_bufattr,
	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &buf->pb_buf, &len,
	    &buf->pb_acch) != DDI_SUCCESS) {
		pcn_destroybuf(buf);
		return (NULL);
	}

	if (ddi_dma_addr_bind_handle(buf->pb_dmah, NULL, buf->pb_buf, len,
	    DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac,
	    &ccnt) != DDI_DMA_MAPPED) {
		pcn_destroybuf(buf);
		return (NULL);
	}
	buf->pb_paddr = dmac.dmac_address;

	return (buf);
}
Example #3
0
static int
bd_xfer_ctor(void *buf, void *arg, int kmflag)
{
	bd_xfer_impl_t	*xi;
	bd_t		*bd = arg;
	int		(*dcb)(caddr_t);

	if (kmflag == KM_PUSHPAGE || kmflag == KM_SLEEP) {
		dcb = DDI_DMA_SLEEP;
	} else {
		dcb = DDI_DMA_DONTWAIT;
	}

	xi = buf;
	bzero(xi, sizeof (*xi));
	xi->i_bd = bd;

	if (bd->d_use_dma) {
		if (ddi_dma_alloc_handle(bd->d_dip, &bd->d_dma, dcb, NULL,
		    &xi->i_dmah) != DDI_SUCCESS) {
			return (-1);
		}
	}

	return (0);
}
Example #4
0
/*
 * Allocate /size/ bytes of contiguous DMA-ble memory.
 *
 * Returns:
 *    0 on success, non-zero on failure.
 */
static int
vmxnet3_alloc_dma_mem(vmxnet3_softc_t *dp, vmxnet3_dmabuf_t *dma, size_t size,
    boolean_t canSleep, ddi_dma_attr_t *dma_attrs)
{
	ddi_dma_cookie_t cookie;
	uint_t cookieCount;
	int dmaerr, err = 0;
	int (*cb) (caddr_t) = canSleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;

	ASSERT(size != 0);

	/*
	 * Allocate a DMA handle
	 */
	if ((dmaerr = ddi_dma_alloc_handle(dp->dip, dma_attrs, cb, NULL,
	    &dma->dmaHandle)) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed: %d", dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error;
	}

	/*
	 * Allocate memory
	 */
	if (ddi_dma_mem_alloc(dma->dmaHandle, size, &vmxnet3_dev_attr,
	    DDI_DMA_CONSISTENT, cb, NULL, &dma->buf, &dma->bufLen,
	    &dma->dataHandle) != DDI_SUCCESS) {
		VMXNET3_WARN(dp, "ddi_dma_mem_alloc() failed");
		err = ENOMEM;
		goto error_dma_handle;
	}

	/*
	 * Map the memory
	 */
	if ((dmaerr = ddi_dma_addr_bind_handle(dma->dmaHandle, NULL, dma->buf,
	    dma->bufLen, DDI_DMA_RDWR | DDI_DMA_STREAMING, cb, NULL, &cookie,
	    &cookieCount)) != DDI_DMA_MAPPED) {
		VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed: %d",
		    dmaerr);
		err = vmxnet3_dmaerr2errno(dmaerr);
		goto error_dma_mem;
	}

	ASSERT(cookieCount == 1);
	dma->bufPA = cookie.dmac_laddress;

	return (0);

error_dma_mem:
	ddi_dma_mem_free(&dma->dataHandle);
error_dma_handle:
	ddi_dma_free_handle(&dma->dmaHandle);
error:
	dma->buf = NULL;
	dma->bufPA = NULL;
	dma->bufLen = 0;
	return (err);
}
Example #5
0
static int
rxbuf_ctor(void *arg1, void *arg2, int kmflag)
{
	struct rxbuf *rxb = arg1;
	struct rxbuf_cache_params *p = arg2;
	size_t real_len;
	ddi_dma_cookie_t cookie;
	uint_t ccount = 0;
	int (*callback)(caddr_t);
	int rc = ENOMEM;

	if (kmflag & KM_SLEEP)
		callback = DDI_DMA_SLEEP;
	else
		callback = DDI_DMA_DONTWAIT;

	rc = ddi_dma_alloc_handle(p->dip, &p->dma_attr_rx, callback, 0,
	    &rxb->dhdl);
	if (rc != DDI_SUCCESS)
		return (rc == DDI_DMA_BADATTR ? EINVAL : ENOMEM);

	rc = ddi_dma_mem_alloc(rxb->dhdl, p->buf_size, &p->acc_attr_rx,
	    DDI_DMA_STREAMING, callback, 0, &rxb->va, &real_len, &rxb->ahdl);
	if (rc != DDI_SUCCESS) {
		rc = ENOMEM;
		goto fail1;
	}

	rc = ddi_dma_addr_bind_handle(rxb->dhdl, NULL, rxb->va, p->buf_size,
	    DDI_DMA_READ | DDI_DMA_STREAMING, NULL, NULL, &cookie, &ccount);
	if (rc != DDI_DMA_MAPPED) {
		if (rc == DDI_DMA_INUSE)
			rc = EBUSY;
		else if (rc == DDI_DMA_TOOBIG)
			rc = E2BIG;
		else
			rc = ENOMEM;
		goto fail2;
	}

	if (ccount != 1) {
		rc = E2BIG;
		goto fail3;
	}

	rxb->ref_cnt = 0;
	rxb->buf_size = p->buf_size;
	rxb->freefunc.free_arg = (caddr_t)rxb;
	rxb->freefunc.free_func = rxbuf_free;
	rxb->ba = cookie.dmac_laddress;

	return (0);

fail3:	(void) ddi_dma_unbind_handle(rxb->dhdl);
fail2:	ddi_dma_mem_free(&rxb->ahdl);
fail1:	ddi_dma_free_handle(&rxb->dhdl);
	return (rc);
}
Example #6
0
static int
pcn_allocrxring(pcn_t *pcnp)
{
	int			rval;
	int			i;
	size_t			len;
	size_t			size;
	ddi_dma_cookie_t	dmac;
	unsigned		ncookies;
	caddr_t			kaddr;

	size = PCN_RXRING * sizeof (pcn_rx_desc_t);

	rval = ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dmadesc_attr,
	    DDI_DMA_SLEEP, NULL, &pcnp->pcn_rxdesc_dmah);
	if (rval != DDI_SUCCESS) {
		pcn_error(pcnp->pcn_dip, "unable to allocate DMA handle for rx "
		    "descriptors");
		return (DDI_FAILURE);
	}

	rval = ddi_dma_mem_alloc(pcnp->pcn_rxdesc_dmah, size, &pcn_devattr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
	    &pcnp->pcn_rxdesc_acch);
	if (rval != DDI_SUCCESS) {
		pcn_error(pcnp->pcn_dip, "unable to allocate DMA memory for rx "
		    "descriptors");
		return (DDI_FAILURE);
	}

	rval = ddi_dma_addr_bind_handle(pcnp->pcn_rxdesc_dmah, NULL, kaddr,
	    size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmac,
	    &ncookies);
	if (rval != DDI_DMA_MAPPED) {
		pcn_error(pcnp->pcn_dip, "unable to bind DMA for rx "
		    "descriptors");
		return (DDI_FAILURE);
	}

	ASSERT(ncookies == 1);

	pcnp->pcn_rxdesc_paddr = dmac.dmac_address;
	pcnp->pcn_rxdescp = (void *)kaddr;

	pcnp->pcn_rxbufs = kmem_zalloc(PCN_RXRING * sizeof (pcn_buf_t *),
	    KM_SLEEP);

	for (i = 0; i < PCN_RXRING; i++) {
		pcn_buf_t *rxb = pcn_allocbuf(pcnp);
		if (rxb == NULL)
			return (DDI_FAILURE);
		pcnp->pcn_rxbufs[i] = rxb;
	}

	return (DDI_SUCCESS);
}
Example #7
0
/*
 * WQ map handles contructor
 *
 * wqmd - pointer to WQE mapping handle descriptor
 * wq - Pointer to WQ structure
 *
 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 */
static int
oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq)
{
	struct oce_dev *dev;
	int ret;

	dev = wq->parent;
	/* Allocate DMA handle */
	ret = ddi_dma_alloc_handle(dev->dip, &tx_map_dma_attr,
	    DDI_DMA_DONTWAIT, NULL, &wqmd->dma_handle);

	return (ret);
} /* oce_wqm_ctor */
Example #8
0
/**
 * Virtio Net TX buffer constructor for kmem_cache_create().
 *
 * @param pvBuf             Pointer to the allocated buffer.
 * @param pvArg             Opaque private data.
 * @param fFlags            Propagated KM flag values.
 *
 * @return 0 on success, or -1 on failure.
 */
static int VirtioNetTxBufCreate(void *pvBuf, void *pvArg, int fFlags)
{
    virtio_net_txbuf_t *pTxBuf = pvBuf;
    PVIRTIODEVICE pDevice = pvArg;

    /* @todo ncookies handles? */
    int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioNetBufDmaAttr,
                                  fFlags & KM_NOSLEEP ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP,
                                  0 /* Arg */, &pTxBuf->hDMA);
    if (rc == DDI_SUCCESS)
        return 0;
    return -1;
}
Example #9
0
efe_ring_t *
efe_ring_alloc(dev_info_t *dip, size_t len)
{
	efe_ring_t *rp;
	size_t rlen;
	uint_t ccount;

	ASSERT(len > 1);

	rp = kmem_zalloc(sizeof (efe_ring_t), KM_SLEEP);
	rp->r_len = len;

	if (ddi_dma_alloc_handle(dip, &efe_dma_attr, DDI_DMA_SLEEP, NULL,
	    &rp->r_dmah) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate DMA handle!");
		goto failure;
	}

	if (ddi_dma_mem_alloc(rp->r_dmah, DESCSZ(len), &efe_buf_acc_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&rp->r_descp,
	    &rlen, &rp->r_acch) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate descriptors!");
		goto failure;
	}

	if (ddi_dma_addr_bind_handle(rp->r_dmah, NULL, (caddr_t)rp->r_descp,
	    DESCSZ(len), DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &rp->r_dmac, &ccount) != DDI_DMA_MAPPED) {
		efe_error(dip, "unable to bind DMA handle to descriptors!");
		goto failure;
	}

	rp->r_bufpp = kmem_zalloc(BUFPSZ(len), KM_SLEEP);

	for (int i = 0; i < len; ++i) {
		efe_buf_t *bp = efe_buf_alloc(dip, BUFSZ);
		if (bp == NULL) {
			goto failure;
		}
		rp->r_bufpp[i] = bp;
	}

	return (rp);

failure:
	efe_ring_free(&rp);

	return (NULL);
}
Example #10
0
void *
xen_alloc_pages(pgcnt_t cnt)
{
	size_t len;
	int a = xen_alloc_cnt++;
	caddr_t addr;

	ASSERT(xen_alloc_cnt < MAX_ALLOCATIONS);
	if (ddi_dma_alloc_handle(xpv_dip, &xpv_dma_attr, DDI_DMA_SLEEP, 0,
	    &xpv_dma_handle[a]) != DDI_SUCCESS)
		return (NULL);

	if (ddi_dma_mem_alloc(xpv_dma_handle[a], MMU_PAGESIZE * cnt,
	    &xpv_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
	    &addr, &len, &xpv_dma_acchandle[a]) != DDI_SUCCESS) {
		ddi_dma_free_handle(&xpv_dma_handle[a]);
		cmn_err(CE_WARN, "Couldn't allocate memory for xpv devices");
		return (NULL);
	}
	return (addr);
}
Example #11
0
void
dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
	ddi_dma_cookie_t *cp)
{
	register ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
	struct fast_dvma *nexus_private;
	struct dvma_ops *nexus_funcptr;
	ddi_dma_attr_t dma_attr;
	uint_t ccnt;

	if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
		nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
		nexus_funcptr = (struct dvma_ops *)nexus_private->ops;
		(void) (*nexus_funcptr->dvma_kaddr_load)(h, a, len, index, cp);
	} else {
		ddi_dma_handle_t handle;
		ddi_dma_lim_t *limp;

		limp = (ddi_dma_lim_t *)mp->dmai_mapping;
		dma_attr.dma_attr_version = DMA_ATTR_V0;
		dma_attr.dma_attr_addr_lo = limp->dlim_addr_lo;
		dma_attr.dma_attr_addr_hi = limp->dlim_addr_hi;
		dma_attr.dma_attr_count_max = limp->dlim_cntr_max;
		dma_attr.dma_attr_align = 1;
		dma_attr.dma_attr_burstsizes = limp->dlim_burstsizes;
		dma_attr.dma_attr_minxfer = limp->dlim_minxfer;
		dma_attr.dma_attr_maxxfer = 0xFFFFFFFFull;
		dma_attr.dma_attr_seg = 0xFFFFFFFFull;
		dma_attr.dma_attr_sgllen = 1;
		dma_attr.dma_attr_granular = 1;
		dma_attr.dma_attr_flags = 0;
		(void) ddi_dma_alloc_handle(HD, &dma_attr, DDI_DMA_SLEEP, NULL,
		    &handle);
		(void) ddi_dma_addr_bind_handle(handle, NULL, a, len,
		    DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, cp, &ccnt);
		((ddi_dma_handle_t *)mp->dmai_minfo)[index] = handle;
	}
}
Example #12
0
efe_buf_t *
efe_buf_alloc(dev_info_t *dip, size_t len)
{
	efe_buf_t *bp;
	size_t rlen;
	uint_t ccount;

	bp = kmem_zalloc(sizeof (efe_buf_t), KM_SLEEP);
	bp->b_len = len;

	if (ddi_dma_alloc_handle(dip, &efe_dma_attr, DDI_DMA_SLEEP, NULL,
	    &bp->b_dmah) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate DMA handle!");
		goto failure;
	}

	if (ddi_dma_mem_alloc(bp->b_dmah, len, &efe_buf_acc_attr,
	    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &bp->b_kaddr, &rlen,
	    &bp->b_acch) != DDI_SUCCESS) {
		efe_error(dip, "unable to allocate buffer!");
		goto failure;
	}

	if (ddi_dma_addr_bind_handle(bp->b_dmah, NULL, bp->b_kaddr,
	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
	    &bp->b_dmac, &ccount) != DDI_DMA_MAPPED) {
		efe_error(dip, "unable to bind DMA handle to buffer!");
		goto failure;
	}

	return (bp);

failure:
	efe_buf_free(&bp);

	return (NULL);
}
Example #13
0
/*
 * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks
 * of one ring.
 */
static int
igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring)
{
	int i;
	int ret;
	tx_control_block_t *tcb;
	dma_buffer_t *tx_buf;
	igb_t *igb = tx_ring->igb;
	dev_info_t *devinfo = igb->dip;

	/*
	 * Allocate memory for the work list.
	 */
	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
	    tx_ring->ring_size, KM_NOSLEEP);

	if (tx_ring->work_list == NULL) {
		igb_error(igb,
		    "Cound not allocate memory for tx work list");
		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory for the free list.
	 */
	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
	    tx_ring->free_list_size, KM_NOSLEEP);

	if (tx_ring->free_list == NULL) {
		kmem_free(tx_ring->work_list,
		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
		tx_ring->work_list = NULL;

		igb_error(igb,
		    "Cound not allocate memory for tx free list");
		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory for the tx control blocks of free list.
	 */
	tx_ring->tcb_area =
	    kmem_zalloc(sizeof (tx_control_block_t) *
	    tx_ring->free_list_size, KM_NOSLEEP);

	if (tx_ring->tcb_area == NULL) {
		kmem_free(tx_ring->work_list,
		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
		tx_ring->work_list = NULL;

		kmem_free(tx_ring->free_list,
		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
		tx_ring->free_list = NULL;

		igb_error(igb,
		    "Cound not allocate memory for tx control blocks");
		return (IGB_FAILURE);
	}

	/*
	 * Allocate dma memory for the tx control block of free list.
	 */
	tcb = tx_ring->tcb_area;
	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
		ASSERT(tcb != NULL);

		tx_ring->free_list[i] = tcb;

		/*
		 * Pre-allocate dma handles for transmit. These dma handles
		 * will be dynamically bound to the data buffers passed down
		 * from the upper layers at the time of transmitting.
		 */
		ret = ddi_dma_alloc_handle(devinfo,
		    &igb_tx_dma_attr,
		    DDI_DMA_DONTWAIT, NULL,
		    &tcb->tx_dma_handle);
		if (ret != DDI_SUCCESS) {
			tcb->tx_dma_handle = NULL;
			igb_error(igb,
			    "Could not allocate tx dma handle: %x", ret);
			goto alloc_tcb_lists_fail;
		}

		/*
		 * Pre-allocate transmit buffers for packets that the
		 * size is less than bcopy_thresh.
		 */
		tx_buf = &tcb->tx_buf;

		ret = igb_alloc_dma_buffer(igb,
		    tx_buf, igb->tx_buf_size);

		if (ret != IGB_SUCCESS) {
			ASSERT(tcb->tx_dma_handle != NULL);
			ddi_dma_free_handle(&tcb->tx_dma_handle);
			tcb->tx_dma_handle = NULL;
			igb_error(igb, "Allocate tx dma buffer failed");
			goto alloc_tcb_lists_fail;
		}
		tcb->last_index = MAX_TX_RING_SIZE;
	}

	return (IGB_SUCCESS);

alloc_tcb_lists_fail:
	igb_free_tcb_lists(tx_ring);

	return (IGB_FAILURE);
}
Example #14
0
/*
 * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer
 */
static int
igb_alloc_dma_buffer(igb_t *igb,
    dma_buffer_t *buf, size_t size)
{
	int ret;
	dev_info_t *devinfo = igb->dip;
	ddi_dma_cookie_t cookie;
	size_t len;
	uint_t cookie_num;

	ret = ddi_dma_alloc_handle(devinfo,
	    &igb_buf_dma_attr, DDI_DMA_DONTWAIT,
	    NULL, &buf->dma_handle);

	if (ret != DDI_SUCCESS) {
		buf->dma_handle = NULL;
		igb_error(igb,
		    "Could not allocate dma buffer handle: %x", ret);
		return (IGB_FAILURE);
	}

	ret = ddi_dma_mem_alloc(buf->dma_handle,
	    size, &igb_buf_acc_attr, DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &buf->address,
	    &len, &buf->acc_handle);

	if (ret != DDI_SUCCESS) {
		buf->acc_handle = NULL;
		buf->address = NULL;
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		igb_error(igb,
		    "Could not allocate dma buffer memory: %x", ret);
		return (IGB_FAILURE);
	}

	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
	    buf->address,
	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		buf->dma_address = NULL;
		if (buf->acc_handle != NULL) {
			ddi_dma_mem_free(&buf->acc_handle);
			buf->acc_handle = NULL;
			buf->address = NULL;
		}
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		igb_error(igb,
		    "Could not bind dma buffer handle: %x", ret);
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	buf->dma_address = cookie.dmac_laddress;
	buf->size = len;
	buf->len = 0;

	return (IGB_SUCCESS);
}
Example #15
0
/*
 * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
 */
static int
igb_alloc_rbd_ring(igb_rx_data_t *rx_data)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	igb_t *igb = rx_data->rx_ring->igb;

	devinfo = igb->dip;
	size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size;

	/*
	 * Allocate a new DMA handle for the receive descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &rx_data->rbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate rbd dma handle: %x", ret);
		rx_data->rbd_area.dma_handle = NULL;
		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the receive
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&rx_data->rbd_area.address,
	    &len, &rx_data->rbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate rbd dma memory: %x", ret);
		rx_data->rbd_area.acc_handle = NULL;
		rx_data->rbd_area.address = NULL;
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(rx_data->rbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call.
	 */
	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
	    NULL, (caddr_t)rx_data->rbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		igb_error(igb,
		    "Could not bind rbd dma resource: %x", ret);
		rx_data->rbd_area.dma_address = NULL;
		if (rx_data->rbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
			rx_data->rbd_area.acc_handle = NULL;
			rx_data->rbd_area.address = NULL;
		}
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
	rx_data->rbd_area.size = len;

	rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t)
	    rx_data->rbd_area.address;

	return (IGB_SUCCESS);
}
Example #16
0
/*
 * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
 */
static int
igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	igb_t *igb = tx_ring->igb;

	devinfo = igb->dip;
	size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size;

	/*
	 * If tx head write-back is enabled, an extra tbd is allocated
	 * to save the head write-back value
	 */
	if (igb->tx_head_wb_enable) {
		size += sizeof (union e1000_adv_tx_desc);
	}

	/*
	 * Allocate a DMA handle for the transmit descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &tx_ring->tbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate tbd dma handle: %x", ret);
		tx_ring->tbd_area.dma_handle = NULL;

		return (IGB_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the transmit
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
	    size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&tx_ring->tbd_area.address,
	    &len, &tx_ring->tbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		igb_error(igb,
		    "Could not allocate tbd dma memory: %x", ret);
		tx_ring->tbd_area.acc_handle = NULL;
		tx_ring->tbd_area.address = NULL;
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(tx_ring->tbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
	 * the memory address
	 */
	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
	    NULL, (caddr_t)tx_ring->tbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		igb_error(igb,
		    "Could not bind tbd dma resource: %x", ret);
		tx_ring->tbd_area.dma_address = NULL;
		if (tx_ring->tbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
			tx_ring->tbd_area.acc_handle = NULL;
			tx_ring->tbd_area.address = NULL;
		}
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IGB_FAILURE);
	}

	ASSERT(cookie_num == 1);

	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
	tx_ring->tbd_area.size = len;

	tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t)
	    tx_ring->tbd_area.address;

	return (IGB_SUCCESS);
}
Example #17
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}
Example #18
0
static int
virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
{
	int allocsize, num;
	size_t len;
	unsigned int ncookies;
	int ret;

	num = entry->qe_queue->vq_indirect_num;
	ASSERT(num > 1);

	allocsize = sizeof (struct vring_desc) * num;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
	    DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for indirect descriptors, "
		    "entry %d, vq %d", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&entry->qe_indirect_descs, &len,
	    &entry->qe_indirect_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for indirect descriptors, "
		    "entry %d, vq %d,", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_alloc;
	}

	(void) memset(entry->qe_indirect_descs, 0xff, allocsize);

	ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
	    (caddr_t)entry->qe_indirect_descs, len,
	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &entry->qe_indirect_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for indirect descriptors, "
		    "entry %d, vq %d", entry->qe_index,
		    entry->qe_queue->vq_index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);

	return (0);

out_bind:
	ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
out_alloc:
	ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
out_alloc_handle:

	return (ret);
}
Example #19
0
/*
 * SMCG_dma_alloc assumes that either rlist_lock mutex is held or
 * that it is called from a point where no interrupts, send or receives
 * happen.
 */
static int
SMCG_dma_alloc(smcg_t *smcg)
{
	Adapter_Struc		*pAd = smcg->smcg_pAd;
	unsigned int		ramsize = LM_Get_Host_Ram_Size(pAd);
	uint_t			len, ncookies, i, j;
	ddi_dma_cookie_t	cookie;

	/* Allocate resources for shared memory block */
	if (ddi_dma_alloc_handle(smcg->smcg_devinfo, &host_ram_dma_attr,
	    DDI_DMA_SLEEP, 0, &smcg->hostram_dmahandle) != DDI_SUCCESS)
		return (DDI_FAILURE);

	if (ddi_dma_mem_alloc(smcg->hostram_dmahandle, ramsize, &accattr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
	    (caddr_t *)&pAd->host_ram_virt_addr,
	    (size_t *)&len, &smcg->hostram_acchandle) != DDI_SUCCESS) {
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
		return (DDI_FAILURE);
	}

	if (ddi_dma_addr_bind_handle(smcg->hostram_dmahandle, NULL,
	    (caddr_t)pAd->host_ram_virt_addr, ramsize,
	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
	    &cookie, &ncookies) != DDI_SUCCESS) {
		ddi_dma_mem_free(&smcg->hostram_acchandle);
		ddi_dma_free_handle(&smcg->hostram_dmahandle);
		return (DDI_FAILURE);
	}

	ASSERT(ncookies == 1 && cookie.dmac_size >= ramsize);
	pAd->host_ram_phy_addr = cookie.dmac_address;

	/* Allocate a list of receive buffers */
	smcg->rxbdesc_mem = kmem_zalloc(sizeof (struct smcg_rx_buffer_desc) *
	    pAd->num_of_rx_buffs*2, KM_SLEEP);
	smcg->rx_freelist = (struct smcg_rx_buffer_desc *)smcg->rxbdesc_mem;

	for (i = 0; i < pAd->num_of_rx_buffs * 2; i++) {
		if (ddi_dma_alloc_handle(smcg->smcg_devinfo, &buf_dma_attr,
		    DDI_DMA_SLEEP, 0, &smcg->rx_freelist[i].dmahandle)
		    != DDI_SUCCESS)
			goto failure;

		if (ddi_dma_mem_alloc(smcg->rx_freelist[i].dmahandle,
		    ETHERMAX + 4, &accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
		    0, (caddr_t *)&smcg->rx_freelist[i].buf, (size_t *)&len,
		    &smcg->rx_freelist[i].acchandle) != DDI_SUCCESS) {
			ddi_dma_free_handle(&smcg->rx_freelist[i].dmahandle);
			goto failure;
		}

		if (ddi_dma_addr_bind_handle(smcg->rx_freelist[i].dmahandle,
		    NULL, (caddr_t)smcg->rx_freelist[i].buf, ETHERMAX + 4,
		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
		    &cookie, &ncookies) != DDI_SUCCESS) {
			ddi_dma_mem_free(&smcg->rx_freelist[i].acchandle);
			ddi_dma_free_handle(&smcg->rx_freelist[i].dmahandle);
			goto failure;
		}

		ASSERT(ncookies == 1 && cookie.dmac_size >= ETHERMAX+4);
		smcg->rx_freelist[i].physaddr = cookie.dmac_address;
		smcg->rx_freelist[i].smcg = smcg;
		smcg->rx_freelist[i].free_rtn.free_func = SMCG_freertn;
		smcg->rx_freelist[i].free_rtn.free_arg =
		    (char *)&smcg->rx_freelist[i];

		smcg->rx_freelist[i].next = &smcg->rx_freelist[i+1];
	}

	smcg->rx_freelist[i-1].next = NULL;

	/*
	 * Remove one buffer from free list for each receive descriptor,
	 * and associate with an element in the receive ring
	 */
	for (i = 0; i < pAd->num_of_rx_buffs; i++) {
		/* Unlink from free list */
		smcg->bdesc[i] = smcg->rx_freelist;
		smcg->rx_freelist = smcg->bdesc[i]->next;
	}

	smcg->smc_dbuf.fragment_list[0].fragment_length =
	    (ETHERMAX + 4) | (unsigned long)PHYSICAL_ADDR;
	smcg->smc_dbuf.fragment_count = 1;

	/* Allocate the handles to which we bind outgoing data */
	for (i = 0; i < pAd->num_of_tx_buffs; i++)
		for (j = 0; j < SMCG_MAX_TX_MBLKS; j++)
			if (ddi_dma_alloc_handle(smcg->smcg_devinfo,
			    &txdata_attr, DDI_DMA_SLEEP, 0,
			    &smcg->tx_info[i].dmahandle[j]) != DDI_SUCCESS)
				goto failure;

	return (DDI_SUCCESS);

failure:
	SMCG_dma_unalloc(smcg);
	cmn_err(CE_WARN, SMCG_NAME ": could not allocate DMA resources");
	return (DDI_FAILURE);
}
Example #20
0
/*
 * hermon_check_iommu_bypass()
 *    Context: Only called from attach() path context
 *    XXX This is a DMA allocation routine outside the normal
 *	  path. FMA hardening will not like this.
 */
static void
hermon_check_iommu_bypass(hermon_state_t *state, hermon_cfg_profile_t *cp)
{
	ddi_dma_handle_t	dmahdl;
	ddi_dma_attr_t		dma_attr;
	int			status;
	ddi_acc_handle_t	acc_hdl;
	caddr_t			kaddr;
	size_t			actual_len;
	ddi_dma_cookie_t	cookie;
	uint_t			cookiecnt;

	hermon_dma_attr_init(state, &dma_attr);

	/* Try mapping for IOMMU bypass (Force Physical) */
	dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL |
	    DDI_DMA_RELAXED_ORDERING;

	/*
	 * Call ddi_dma_alloc_handle().  If this returns DDI_DMA_BADATTR then
	 * it is not possible to use IOMMU bypass with our PCI bridge parent.
	 * Since the function we are in can only be called if iommu bypass was
	 * requested in the config profile, we configure for bypass if the
	 * ddi_dma_alloc_handle() was successful.  Otherwise, we configure
	 * for non-bypass (ie: normal) mapping.
	 */
	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
	    DDI_DMA_SLEEP, NULL, &dmahdl);
	if (status == DDI_DMA_BADATTR) {
		cp->cp_iommu_bypass = HERMON_BINDMEM_NORMAL;
		return;
	} else if (status != DDI_SUCCESS) {	/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
		return;
	} else {
		cp->cp_iommu_bypass = HERMON_BINDMEM_BYPASS;
	}

	status = ddi_dma_mem_alloc(dmahdl, 256,
	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, (caddr_t *)&kaddr, &actual_len, &acc_hdl);

	if (status != DDI_SUCCESS) {		/* failed somehow */
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
		ddi_dma_free_handle(&dmahdl);
		return;
	}

	status = ddi_dma_addr_bind_handle(dmahdl, NULL, kaddr, actual_len,
	    DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, &cookie, &cookiecnt);

	if (status == DDI_DMA_MAPPED) {
		(void) ddi_dma_unbind_handle(dmahdl);
	} else {
		hermon_kernel_data_ro = HERMON_RO_DISABLED;
		hermon_user_data_ro = HERMON_RO_DISABLED;
	}

	ddi_dma_mem_free(&acc_hdl);
	ddi_dma_free_handle(&dmahdl);
}
Example #21
0
static virtqueue_t *
virtio_vq_setup(virtionet_state_t *sp, int queue)
{
	virtqueue_t		*vqp = NULL;
	size_t			len;
	size_t			desc_size;
	size_t			avail_size;
	size_t			used_size;
	size_t			part1;
	size_t			part2;
	int			rc;

	vqp = kmem_zalloc(sizeof (*vqp), KM_SLEEP);

	/* save the queue number */
	vqp->vq_num = queue;

	/* Get the queue size */
	VIRTIO_PUT16(sp, VIRTIO_QUEUE_SELECT, queue);
	vqp->vq_size = VIRTIO_GET16(sp, VIRTIO_QUEUE_SIZE);

	desc_size = VRING_DTABLE_SIZE(vqp->vq_size);
	avail_size = VRING_AVAIL_SIZE(vqp->vq_size);
	used_size = VRING_USED_SIZE(vqp->vq_size);

	part1 = VRING_ROUNDUP(desc_size + avail_size);
	part2 = VRING_ROUNDUP(used_size);

	len = part1 + part2;

	vq_dma_attr.dma_attr_flags |= DDI_DMA_FORCE_PHYSICAL;

	rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
	    NULL, &vqp->vq_dma.hdl);

	if (rc == DDI_DMA_BADATTR) {
		cmn_err(CE_NOTE, "Failed to allocate physical DMA; "
		    "failing back to virtual DMA");
		vq_dma_attr.dma_attr_flags &= (~DDI_DMA_FORCE_PHYSICAL);
		rc = ddi_dma_alloc_handle(sp->dip, &vq_dma_attr, DDI_DMA_SLEEP,
		    NULL, &vqp->vq_dma.hdl);
	}

	if (rc != DDI_SUCCESS) {
		kmem_free(vqp, sizeof (*vqp));
		return (NULL);
	}

	rc = ddi_dma_mem_alloc(vqp->vq_dma.hdl, len, &virtio_native_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &vqp->vq_dma.addr,
	    &vqp->vq_dma.len, &vqp->vq_dma.acchdl);
	if (rc != DDI_SUCCESS) {
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
		return (NULL);
	}

	bzero(vqp->vq_dma.addr, vqp->vq_dma.len);

	rc = ddi_dma_addr_bind_handle(vqp->vq_dma.hdl, NULL, vqp->vq_dma.addr,
	    vqp->vq_dma.len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &vqp->vq_dma.cookie, &vqp->vq_dma.ccount);
	if (rc != DDI_DMA_MAPPED) {
		ddi_dma_mem_free(&vqp->vq_dma.acchdl);
		ddi_dma_free_handle(&vqp->vq_dma.hdl);
		kmem_free(vqp, sizeof (*vqp));
		return (NULL);
	}
	ASSERT(vqp->vq_dma.ccount == 1);

	vqp->vr_desc = (vring_desc_t *)vqp->vq_dma.addr;
	vqp->vr_avail = (vring_avail_t *)(vqp->vq_dma.addr + desc_size);
	vqp->vr_used = (vring_used_t *)(vqp->vq_dma.addr + part1);

	VIRTIO_PUT32(sp, VIRTIO_QUEUE_ADDRESS,
	    vqp->vq_dma.cookie.dmac_address / VIRTIO_VQ_PCI_ALIGN);

	return (vqp);
}
Example #22
0
/* ARGSUSED */
i40e_status
i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
    enum i40e_memory_type type, u64 size, u32 alignment)
{
	int rc;
	i40e_t *i40e = OS_DEP(hw)->ios_i40e;
	dev_info_t *dip = i40e->i40e_dip;
	size_t len;
	ddi_dma_cookie_t cookie;
	uint_t cookie_num;
	ddi_dma_attr_t attr;

	/*
	 * Because we need to honor the specified alignment, we need to
	 * dynamically construct the attributes. We save the alignment for
	 * debugging purposes.
	 */
	bcopy(&i40e->i40e_static_dma_attr, &attr, sizeof (ddi_dma_attr_t));
	attr.dma_attr_align = alignment;
	mem->idm_alignment = alignment;
	rc = ddi_dma_alloc_handle(dip, &i40e->i40e_static_dma_attr,
	    DDI_DMA_DONTWAIT, NULL, &mem->idm_dma_handle);
	if (rc != DDI_SUCCESS) {
		mem->idm_dma_handle = NULL;
		i40e_error(i40e, "failed to allocate DMA handle for common "
		    "code: %d", rc);

		/*
		 * Swallow unknown errors and treat them like we do
		 * DDI_DMA_NORESOURCES, in other words, a memory error.
		 */
		if (rc == DDI_DMA_BADATTR)
			return (I40E_ERR_PARAM);
		return (I40E_ERR_NO_MEMORY);
	}

	rc = ddi_dma_mem_alloc(mem->idm_dma_handle, size,
	    &i40e->i40e_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
	    NULL, (caddr_t *)&mem->va, &len, &mem->idm_acc_handle);
	if (rc != DDI_SUCCESS) {
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
		ASSERT(mem->idm_dma_handle != NULL);
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;

		i40e_error(i40e, "failed to allocate %" PRIu64 " bytes of DMA "
		    "memory for common code", size);
		return (I40E_ERR_NO_MEMORY);
	}

	bzero(mem->va, len);

	rc = ddi_dma_addr_bind_handle(mem->idm_dma_handle, NULL, mem->va, len,
	    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
	    &cookie, &cookie_num);
	if (rc != DDI_DMA_MAPPED) {
		mem->pa = NULL;
		ASSERT(mem->idm_acc_handle != NULL);
		ddi_dma_mem_free(&mem->idm_acc_handle);
		mem->idm_acc_handle = NULL;
		mem->va = NULL;
		ASSERT(mem->idm_dma_handle != NULL);
		ddi_dma_free_handle(&mem->idm_dma_handle);
		mem->idm_dma_handle = NULL;

		i40e_error(i40e, "failed to bind %ld byte sized dma region: %d",
		    len, rc);
		switch (rc) {
		case DDI_DMA_INUSE:
			return (I40E_ERR_NOT_READY);
		case DDI_DMA_TOOBIG:
			return (I40E_ERR_INVALID_SIZE);
		case DDI_DMA_NOMAPPING:
		case DDI_DMA_NORESOURCES:
		default:
			return (I40E_ERR_NO_MEMORY);
		}
	}

	ASSERT(cookie_num == 1);
	mem->pa = cookie.dmac_laddress;
	/*
	 * Lint doesn't like this because the common code gives us a uint64_t as
	 * input, but the common code then asks us to assign it to a size_t. So
	 * lint's right, but in this case there isn't much we can do.
	 */
	mem->size = (size_t)size;

	return (I40E_SUCCESS);
}
Example #23
0
/*
 * audioixp_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audioixp_alloc_port(audioixp_state_t *statep, int num)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	char			*prop;
	audio_dev_t		*adev;
	audioixp_port_t		*port;
	uint32_t		paddr;
	int			rc;
	dev_info_t		*dip;
	audioixp_bd_entry_t	*bdentry;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	port->statep = statep;
	port->started = B_FALSE;
	port->num = num;

	switch (num) {
	case IXP_REC:
		statep->rec_port = port;
		prop = "record-interrupts";
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
		port->nchan = 2;
		break;
	case IXP_PLAY:
		statep->play_port = port;
		prop = "play-interrupts";
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
		/* This could possibly be conditionalized */
		port->nchan = 6;
		break;
	default:
		audio_dev_warn(adev, "bad port number (%d)!", num);
		return (DDI_FAILURE);
	}

	port->intrs = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
	    DDI_PROP_DONTPASS, prop, IXP_INTS);

	/* make sure the values are good */
	if (port->intrs < IXP_MIN_INTS) {
		audio_dev_warn(adev, "%s too low, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	} else if (port->intrs > IXP_MAX_INTS) {
		audio_dev_warn(adev, "%s too high, %d, resetting to %d",
		    prop, port->intrs, IXP_INTS);
		port->intrs = IXP_INTS;
	}

	/*
	 * Figure out how much space we need.  Sample rate is 48kHz, and
	 * we need to store 8 chunks.  (Note that this means that low
	 * interrupt frequencies will require more RAM.)
	 */
	port->fragfr = 48000 / port->intrs;
	port->fragfr = IXP_ROUNDUP(port->fragfr, IXP_MOD_SIZE);
	port->fragsz = port->fragfr * port->nchan * 2;
	port->samp_size = port->fragsz * IXP_BD_NUMS;

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (audioixp_bd_entry_t) * IXP_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	/*
	 * Wire up the BD list.
	 */
	paddr = port->samp_paddr;
	bdentry = (void *)port->bdl_kaddr;

	for (int i = 0; i < IXP_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, &bdentry->buf_base, paddr);
		ddi_put16(port->bdl_acch, &bdentry->status, 0);
		ddi_put16(port->bdl_acch, &bdentry->buf_len, port->fragsz / 4);
		ddi_put32(port->bdl_acch, &bdentry->next, port->bdl_paddr +
		    (((i + 1) % IXP_BD_NUMS) * sizeof (audioixp_bd_entry_t)));
		paddr += port->fragsz;
		bdentry++;
	}
	(void) ddi_dma_sync(port->bdl_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);

	port->engine = audio_engine_alloc(&audioixp_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
Example #24
0
/*
 * audio1575_alloc_port()
 *
 * Description:
 *	This routine allocates the DMA handles and the memory for the
 *	DMA engines to use.  It also configures the BDL lists properly
 *	for use.
 *
 * Arguments:
 *	dev_info_t	*dip	Pointer to the device's devinfo
 *	int		num	M1575_PLAY or M1575_REC
 *	uint8_t		nchan	Number of channels (2 = stereo, 6 = 5.1, etc.)
 *
 * Returns:
 *	DDI_SUCCESS		Registers successfully mapped
 *	DDI_FAILURE		Registers not successfully mapped
 */
static int
audio1575_alloc_port(audio1575_state_t *statep, int num, uint8_t nchan)
{
	ddi_dma_cookie_t	cookie;
	uint_t			count;
	int			dir;
	unsigned		caps;
	audio_dev_t		*adev;
	audio1575_port_t	*port;
	uint32_t		*kaddr;
	int			rc;
	dev_info_t		*dip;

	adev = statep->adev;
	dip = statep->dip;

	port = kmem_zalloc(sizeof (*port), KM_SLEEP);
	statep->ports[num] = port;
	port->num = num;
	port->statep = statep;
	port->nchan = nchan;

	if (num == M1575_REC) {
		dir = DDI_DMA_READ;
		caps = ENGINE_INPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORKERNEL;
	} else {
		dir = DDI_DMA_WRITE;
		caps = ENGINE_OUTPUT_CAP;
		port->sync_dir = DDI_DMA_SYNC_FORDEV;
	}

	/*
	 * We use one big sample area.  The sample area must be larger
	 * than about 1.5 framework fragment sizes.  (Currently 480 *
	 * 1.5 = 720 frames.)  This is necessary to ensure that we
	 * don't have to involve an interrupt service routine on our
	 * own, to keep the last valid index updated reasonably.
	 */
	port->nframes = 2048;
	port->samp_size = port->nframes * port->nchan * sizeof (int16_t);

	/* allocate dma handle */
	rc = ddi_dma_alloc_handle(dip, &sample_buf_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->samp_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	/* allocate DMA buffer */
	rc = ddi_dma_mem_alloc(port->samp_dmah, port->samp_size, &buf_attr,
	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &port->samp_kaddr,
	    &port->samp_size, &port->samp_acch);
	if (rc == DDI_FAILURE) {
		audio_dev_warn(adev, "dma_mem_alloc failed");
		return (DDI_FAILURE);
	}

	/* bind DMA buffer */
	rc = ddi_dma_addr_bind_handle(port->samp_dmah, NULL,
	    port->samp_kaddr, port->samp_size, dir|DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev,
		    "ddi_dma_addr_bind_handle failed: %d", rc);
		return (DDI_FAILURE);
	}
	port->samp_paddr = cookie.dmac_address;

	/*
	 * now, from here we allocate DMA memory for buffer descriptor list.
	 * we allocate adjacent DMA memory for all DMA engines.
	 */
	rc = ddi_dma_alloc_handle(dip, &bdlist_dma_attr, DDI_DMA_SLEEP,
	    NULL, &port->bdl_dmah);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_alloc_handle(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * we allocate all buffer descriptors lists in continuous dma memory.
	 */
	port->bdl_size = sizeof (m1575_bd_entry_t) * M1575_BD_NUMS;
	rc = ddi_dma_mem_alloc(port->bdl_dmah, port->bdl_size,
	    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    &port->bdl_kaddr, &port->bdl_size, &port->bdl_acch);
	if (rc != DDI_SUCCESS) {
		audio_dev_warn(adev, "ddi_dma_mem_alloc(bdlist) failed");
		return (DDI_FAILURE);
	}

	/*
	 * Wire up the BD list.  We do this *before* binding the BD list
	 * so that we don't have to do an extra ddi_dma_sync.
	 */
	kaddr = (void *)port->bdl_kaddr;
	for (int i = 0; i < M1575_BD_NUMS; i++) {

		/* set base address of buffer */
		ddi_put32(port->bdl_acch, kaddr, port->samp_paddr);
		kaddr++;

		/* set size in frames, and enable IOC interrupt */
		ddi_put32(port->bdl_acch, kaddr,
		    ((port->samp_size / sizeof (int16_t)) | (1U << 31)));
		kaddr++;
	}

	rc = ddi_dma_addr_bind_handle(port->bdl_dmah, NULL, port->bdl_kaddr,
	    port->bdl_size, DDI_DMA_WRITE|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
	    NULL, &cookie, &count);
	if ((rc != DDI_DMA_MAPPED) || (count != 1)) {
		audio_dev_warn(adev, "addr_bind_handle failed");
		return (DDI_FAILURE);
	}
	port->bdl_paddr = cookie.dmac_address;

	port->engine = audio_engine_alloc(&audio1575_engine_ops, caps);
	if (port->engine == NULL) {
		audio_dev_warn(adev, "audio_engine_alloc failed");
		return (DDI_FAILURE);
	}

	audio_engine_set_private(port->engine, port);
	audio_dev_add_engine(adev, port->engine);

	return (DDI_SUCCESS);
}
Example #25
0
/*
 * function to allocate a dma buffer for mapping memory va-pa
 *
 * dev - software handle to device
 * size - size of the memory to map
 * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING
 *
 * return pointer to a oce_dma_buf_t structure handling the map
 *      NULL => failure
 */
oce_dma_buf_t *
oce_alloc_dma_buffer(struct oce_dev *dev,
    uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags)
{
	oce_dma_buf_t  *dbuf;
	ddi_dma_cookie_t cookie;
	uint32_t count;
	size_t actual_len;
	int ret = 0;

	ASSERT(size > 0);
	/* if NULL use default */
	if (dma_attr == NULL) {
		dma_attr = &oce_dma_buf_attr;
	}

	dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
	if (dbuf == NULL) {
		return (NULL);
	}

	/* allocate dma handle */
	ret = ddi_dma_alloc_handle(dev->dip, dma_attr,
	    DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to allocate DMA handle");
		goto handle_fail;
	}
	/* allocate the DMA-able memory */
	ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
	    flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
	    &actual_len, &dbuf->acc_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to allocate DMA memory");
		goto alloc_fail;
	}

	/* bind handle */
	ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
	    (struct as *)0, dbuf->base, actual_len,
	    DDI_DMA_RDWR | flags,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &count);
	if (ret != DDI_DMA_MAPPED) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to bind dma handle");
		goto bind_fail;
	}
	bzero(dbuf->base, actual_len);
	dbuf->addr = cookie.dmac_laddress;
	dbuf->size = actual_len;
	/* usable length */
	dbuf->len  = size;
	dbuf->num_pages = OCE_NUM_PAGES(size);
	return (dbuf);

bind_fail:
	ddi_dma_mem_free(&dbuf->acc_handle);
alloc_fail:
	ddi_dma_free_handle(&dbuf->dma_handle);
handle_fail:
	kmem_free(dbuf, sizeof (oce_dma_buf_t));
	return (NULL);
} /* oce_dma_alloc_buffer */
Example #26
0
extern fc_packet_t *
emlxs_pkt_alloc(emlxs_port_t *port, uint32_t cmdlen, uint32_t rsplen,
    uint32_t datalen, int32_t sleep)
{
	emlxs_hba_t *hba = HBA;
	fc_packet_t *pkt;
	int32_t(*cb) (caddr_t);
	unsigned long real_len;
	uint32_t pkt_size;
	emlxs_buf_t *sbp;

#if (EMLXS_MODREV >= EMLXS_MODREV3)
	emlxs_pkt_cookie_t *pkt_cookie;

	pkt_size =
	    sizeof (fc_packet_t) + sizeof (emlxs_buf_t) +
	    sizeof (emlxs_pkt_cookie_t);
#else
	uint32_t num_cookie;

	pkt_size = sizeof (fc_packet_t) + sizeof (emlxs_buf_t);
#endif /* >= EMLXS_MODREV3 */


	/* Allocate some space */
	if (!(pkt = (fc_packet_t *)kmem_alloc(pkt_size, sleep))) {
		return (NULL);
	}

	bzero(pkt, pkt_size);

	cb = (sleep == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;

	pkt->pkt_ulp_private = (opaque_t)port;
	pkt->pkt_fca_private =
	    (opaque_t)((uintptr_t)pkt + sizeof (fc_packet_t));
	pkt->pkt_comp = emlxs_pkt_callback;
	pkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
	pkt->pkt_cmdlen = cmdlen;
	pkt->pkt_rsplen = rsplen;
	pkt->pkt_datalen = datalen;

#if (EMLXS_MODREV >= EMLXS_MODREV3)
	pkt_cookie =
	    (emlxs_pkt_cookie_t *)((uintptr_t)pkt + sizeof (fc_packet_t) +
	    sizeof (emlxs_buf_t));
	pkt->pkt_cmd_cookie = &pkt_cookie->pkt_cmd_cookie;
	pkt->pkt_resp_cookie = &pkt_cookie->pkt_resp_cookie;
	pkt->pkt_data_cookie = &pkt_cookie->pkt_data_cookie;
#endif /* >= EMLXS_MODREV3 */

	if (cmdlen) {
		/* Allocate the cmd buf */
		if (ddi_dma_alloc_handle(hba->dip, &hba->dma_attr_1sg, cb,
		    NULL, &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		if (ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmdlen,
		    &emlxs_data_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
		    (caddr_t *)&pkt->pkt_cmd, &real_len,
		    &pkt->pkt_cmd_acc) != DDI_SUCCESS) {
			(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);

			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		if (real_len < cmdlen) {
			(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);

			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
		    pkt->pkt_cmd, real_len,
		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
		    pkt->pkt_cmd_cookie,
		    &pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED)
#else
		if (ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
		    pkt->pkt_cmd, real_len,
		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
		    &pkt->pkt_cmd_cookie, &num_cookie) != DDI_DMA_MAPPED)
#endif /* >= EMLXS_MODREV3 */
		{
			(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);

			cmdlen = 0;
			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (pkt->pkt_cmd_cookie_cnt != 1)
#else
		if (num_cookie != 1)
#endif /* >= EMLXS_MODREV3 */
		{
			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		bzero(pkt->pkt_cmd, cmdlen);

	}

	if (rsplen) {
		/* Allocate the rsp buf */
		if (ddi_dma_alloc_handle(hba->dip, &hba->dma_attr_1sg, cb,
		    NULL, &pkt->pkt_resp_dma) != DDI_SUCCESS) {
			rsplen = 0;
			datalen = 0;
			goto failed;

		}

		if (ddi_dma_mem_alloc(pkt->pkt_resp_dma, rsplen,
		    &emlxs_data_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
		    (caddr_t *)&pkt->pkt_resp, &real_len,
		    &pkt->pkt_resp_acc) != DDI_SUCCESS) {
			(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);

			rsplen = 0;
			datalen = 0;
			goto failed;
		}

		if (real_len < rsplen) {
			(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);

			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
		    pkt->pkt_resp, real_len,
		    DDI_DMA_READ | DDI_DMA_CONSISTENT, cb, NULL,
		    pkt->pkt_resp_cookie,
		    &pkt->pkt_resp_cookie_cnt) != DDI_DMA_MAPPED)
#else
		if (ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
		    pkt->pkt_resp, real_len,
		    DDI_DMA_READ | DDI_DMA_CONSISTENT, cb, NULL,
		    &pkt->pkt_resp_cookie, &num_cookie) != DDI_DMA_MAPPED)
#endif /* >= EMLXS_MODREV3 */
		{
			(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);

			rsplen = 0;
			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (pkt->pkt_resp_cookie_cnt != 1)
#else
		if (num_cookie != 1)
#endif /* >= EMLXS_MODREV3 */
		{
			datalen = 0;
			goto failed;
		}

		bzero(pkt->pkt_resp, rsplen);

	}

	/* Allocate the data buf */
	if (datalen) {
		/* Allocate the rsp buf */
		if (ddi_dma_alloc_handle(hba->dip, &hba->dma_attr_1sg, cb,
		    NULL, &pkt->pkt_data_dma) != DDI_SUCCESS) {
			datalen = 0;
			goto failed;
		}

		if (ddi_dma_mem_alloc(pkt->pkt_data_dma, datalen,
		    &emlxs_data_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
		    (caddr_t *)&pkt->pkt_data, &real_len,
		    &pkt->pkt_data_acc) != DDI_SUCCESS) {
			(void) ddi_dma_free_handle(&pkt->pkt_data_dma);

			datalen = 0;
			goto failed;
		}

		if (real_len < datalen) {
			(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_data_dma);

			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (ddi_dma_addr_bind_handle(pkt->pkt_data_dma, NULL,
		    pkt->pkt_data, real_len,
		    DDI_DMA_READ | DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb,
		    NULL, pkt->pkt_data_cookie,
		    &pkt->pkt_data_cookie_cnt) != DDI_DMA_MAPPED)
#else
		if (ddi_dma_addr_bind_handle(pkt->pkt_data_dma, NULL,
		    pkt->pkt_data, real_len,
		    DDI_DMA_READ | DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb,
		    NULL, &pkt->pkt_data_cookie,
		    &num_cookie) != DDI_DMA_MAPPED)
#endif /* >= EMLXS_MODREV3 */
		{
			(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
			(void) ddi_dma_free_handle(&pkt->pkt_data_dma);

			datalen = 0;
			goto failed;
		}
#if (EMLXS_MODREV >= EMLXS_MODREV3)
		if (pkt->pkt_data_cookie_cnt != 1)
#else
		if (num_cookie != 1)
#endif /* >= EMLXS_MODREV3 */
		{
			goto failed;
		}

		bzero(pkt->pkt_data, datalen);
	}

	sbp = PKT2PRIV(pkt);
	bzero((void *)sbp, sizeof (emlxs_buf_t));

	mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
	sbp->pkt_flags = PACKET_VALID | PACKET_ULP_OWNED | PACKET_ALLOCATED;
	sbp->port = port;
	sbp->pkt = pkt;
	sbp->iocbq.sbp = sbp;

	return (pkt);

failed:

	if (datalen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_data_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_data_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_data_dma);
	}

	if (rsplen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_resp_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_resp_dma);
	}

	if (cmdlen) {
		(void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
		(void) ddi_dma_mem_free(&pkt->pkt_cmd_acc);
		(void) ddi_dma_free_handle(&pkt->pkt_cmd_dma);
	}

	if (pkt) {
		kmem_free(pkt, pkt_size);
	}

	return (NULL);

} /* emlxs_pkt_alloc() */
Example #27
0
/**
 * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Where to store the queue.
 *
 * @return An allocated Virtio Pci queue, or NULL in case of errors.
 */
static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturn(pDevice, NULL);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturn(pPci, NULL);

    /*
     * Select a Queue.
     */
    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);

    /*
     * Get the currently selected Queue's size.
     */
    pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM));
    if (RT_UNLIKELY(!pQueue->Ring.cDesc))
    {
        LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex));
        return NULL;
    }

    /*
     * Check if it's already active.
     */
    uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN));
    if (QueuePFN != 0)
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex));
        return NULL;
    }

    LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc));

    /*
     * Allocate and initialize Pci queue data.
     */
    virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t));
    if (pPciQueue)
    {
        /*
         * Setup DMA.
         */
        size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN);
        int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA);
        if (rc == DDI_SUCCESS)
        {
            rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT,
                                   DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf,
                                   &pPciQueue->hIO);
            if (rc == DDI_SUCCESS)
            {
                AssertRelease(pPciQueue->cbBuf >= cbQueue);
                ddi_dma_cookie_t DmaCookie;
                uint_t cCookies;
                rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf,
                                              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
                                              0 /* addr */, &DmaCookie, &cCookies);
                if (rc == DDI_SUCCESS)
                {
                    pPciQueue->physBuf = DmaCookie.dmac_laddress;
                    pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;

                    LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf));
                    cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf);

                    /*
                     * Activate the queue and initialize a ring for the queue.
                     */
                    memset(pQueue->pQueue, 0, pPciQueue->cbBuf);
                    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf);
                    VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN);
                    return pPciQueue;
                }
                else