int ddi_dmae_alloc(dev_info_t *dip, int chnl, int (*dmae_waitfp)(), caddr_t arg) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_ACQUIRE, (off_t *)dmae_waitfp, (size_t *)arg, (caddr_t *)(uintptr_t)chnl, 0)); }
int ddi_dmae_swsetup(dev_info_t *dip, struct ddi_dmae_req *dmaereqp, ddi_dma_cookie_t *cookiep, int chnl) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_SWSETUP, (off_t *)dmaereqp, (size_t *)cookiep, (caddr_t *)(uintptr_t)chnl, 0)); }
void dvma_release(ddi_dma_handle_t h) { ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h; uint_t np = mp->dmai_ndvmapages; if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) { kmem_free((void *)mp->dmai_mapping, sizeof (ddi_dma_lim_t)); kmem_free(mp->dmai_minfo, np * sizeof (ddi_dma_handle_t)); } (void) ddi_dma_mctl(HD, HD, h, DDI_DMA_RELEASE, 0, 0, 0, 0); }
int dvma_reserve(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t pages, ddi_dma_handle_t *handlep) { auto ddi_dma_lim_t dma_lim; auto ddi_dma_impl_t implhdl; struct ddi_dma_req dmareq; ddi_dma_handle_t reqhdl; ddi_dma_impl_t *mp; int ret; if (limp == (ddi_dma_lim_t *)0) { return (DDI_DMA_BADLIMITS); } else { dma_lim = *limp; } bzero(&dmareq, sizeof (dmareq)); dmareq.dmar_fp = DDI_DMA_DONTWAIT; dmareq.dmar_flags = DDI_DMA_RDWR | DDI_DMA_STREAMING; dmareq.dmar_limits = &dma_lim; dmareq.dmar_object.dmao_size = pages; /* * pass in a dummy handle. This avoids the problem when * somebody is dereferencing the handle before checking * the operation. This can be avoided once we separate * handle allocation and actual operation. */ bzero((caddr_t)&implhdl, sizeof (ddi_dma_impl_t)); reqhdl = (ddi_dma_handle_t)&implhdl; ret = ddi_dma_mctl(dip, dip, reqhdl, DDI_DMA_RESERVE, (off_t *)&dmareq, 0, (caddr_t *)handlep, 0); if (ret == DDI_SUCCESS) { mp = (ddi_dma_impl_t *)(*handlep); if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) { uint_t np = mp->dmai_ndvmapages; mp->dmai_mapping = (ulong_t)kmem_alloc( sizeof (ddi_dma_lim_t), KM_SLEEP); bcopy((char *)&dma_lim, (char *)mp->dmai_mapping, sizeof (ddi_dma_lim_t)); mp->dmai_minfo = kmem_alloc( np * sizeof (ddi_dma_handle_t), KM_SLEEP); } } return (ret); }
static int isa_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags) { int rval; int arg = (int)(uintptr_t)objp; switch (request) { case DDI_DMA_E_PROG: return (i_dmae_prog(rdip, (struct ddi_dmae_req *)offp, (ddi_dma_cookie_t *)lenp, arg)); case DDI_DMA_E_ACQUIRE: return (i_dmae_acquire(rdip, arg, (int(*)(caddr_t))offp, (caddr_t)lenp)); case DDI_DMA_E_FREE: return (i_dmae_free(rdip, arg)); case DDI_DMA_E_STOP: i_dmae_stop(rdip, arg); return (DDI_SUCCESS); case DDI_DMA_E_ENABLE: i_dmae_enable(rdip, arg); return (DDI_SUCCESS); case DDI_DMA_E_DISABLE: i_dmae_disable(rdip, arg); return (DDI_SUCCESS); case DDI_DMA_E_GETCNT: i_dmae_get_chan_stat(rdip, arg, NULL, (int *)lenp); return (DDI_SUCCESS); case DDI_DMA_E_SWSETUP: return (i_dmae_swsetup(rdip, (struct ddi_dmae_req *)offp, (ddi_dma_cookie_t *)lenp, arg)); case DDI_DMA_E_SWSTART: i_dmae_swstart(rdip, arg); return (DDI_SUCCESS); case DDI_DMA_E_GETATTR: bcopy(&ISA_dma_attr, objp, sizeof (ddi_dma_attr_t)); return (DDI_SUCCESS); case DDI_DMA_E_1STPTY: { struct ddi_dmae_req req1stpty = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if (arg == 0) { req1stpty.der_command = DMAE_CMD_TRAN; req1stpty.der_trans = DMAE_TRANS_DMND; } else { req1stpty.der_trans = DMAE_TRANS_CSCD; } return (i_dmae_prog(rdip, &req1stpty, NULL, arg)); } default: /* * We pass to rootnex, but it turns out that rootnex will just * return failure, as we don't use ddi_dma_mctl() except * for DMA engine (ISA) and DVMA (SPARC). Arguably we could * just return an error direclty here, instead. */ rval = ddi_dma_mctl(dip, rdip, handle, request, offp, lenp, objp, flags); } return (rval); }
int ddi_dmae_swstart(dev_info_t *dip, int chnl) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_SWSTART, 0, 0, (caddr_t *)(uintptr_t)chnl, 0)); }
int ddi_dmae_1stparty(dev_info_t *dip, int chnl) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_1STPTY, 0, 0, (caddr_t *)(uintptr_t)chnl, 0)); }
int ddi_dmae_getattr(dev_info_t *dip, ddi_dma_attr_t *attrp) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_GETATTR, 0, 0, (caddr_t *)attrp, 0)); }
int ddi_dmae_getlim(dev_info_t *dip, ddi_dma_lim_t *limitsp) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_GETLIM, 0, 0, (caddr_t *)limitsp, 0)); }
int ddi_dmae_release(dev_info_t *dip, int chnl) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_FREE, 0, 0, (caddr_t *)(uintptr_t)chnl, 0)); }
int ddi_dmae_getcnt(dev_info_t *dip, int chnl, int *countp) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_GETCNT, 0, (size_t *)countp, (caddr_t *)(uintptr_t)chnl, 0)); }
int ddi_dmae_disable(dev_info_t *dip, int chnl) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_DISABLE, 0, 0, (caddr_t *)(uintptr_t)chnl, 0)); }
int ddi_dmae_stop(dev_info_t *dip, int chnl) { return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_E_STOP, 0, 0, (caddr_t *)(uintptr_t)chnl, 0)); }