/*ARGSUSED*/ int px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) { ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; px_t *px_p = DIP_TO_STATE(dip); px_mmu_t *mmu_p = px_p->px_mmu_p; DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", ddi_driver_name(rdip), ddi_get_instance(rdip), handle); if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); return (DDI_FAILURE); } mp->dmai_error.err_cf = NULL; /* * Here if the handle is using the iommu. Unload all the iommu * translations. */ switch (PX_DMA_TYPE(mp)) { case PX_DMAI_FLAGS_DVMA: px_mmu_unmap_window(mmu_p, mp); px_dvma_unmap(mmu_p, mp); px_dma_freepfn(mp); break; case PX_DMAI_FLAGS_BYPASS: case PX_DMAI_FLAGS_PTP: px_dma_freewin(mp); break; default: cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", ddi_driver_name(rdip), ddi_get_instance(rdip), mp); /*NOTREACHED*/ } if (mmu_p->mmu_dvma_clid != 0) { DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); ddi_run_callback(&mmu_p->mmu_dvma_clid); } if (px_kmem_clid) { DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); ddi_run_callback(&px_kmem_clid); } mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; return (DDI_SUCCESS); }
int px_fdvma_release(dev_info_t *dip, px_t *px_p, ddi_dma_impl_t *mp) { px_mmu_t *mmu_p = px_p->px_mmu_p; size_t npages; fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma; if (px_disable_fdvma) return (DDI_FAILURE); /* validate fdvma handle */ if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) { DBG(DBG_DMA_CTL, dip, "DDI_DMA_RELEASE: not fast dma\n"); return (DDI_FAILURE); } /* flush all reserved dvma addresses from mmu */ px_mmu_unmap_window(mmu_p, mp); npages = mp->dmai_ndvmapages; vmem_xfree(mmu_p->mmu_dvma_map, (void *)mp->dmai_mapping, MMU_PTOB(npages)); mmu_p->mmu_dvma_reserve += npages; mp->dmai_ndvmapages = 0; /* see if there is anyone waiting for dvma space */ if (mmu_p->mmu_dvma_clid != 0) { DBG(DBG_DMA_CTL, dip, "run dvma callback\n"); ddi_run_callback(&mmu_p->mmu_dvma_clid); } /* free data structures */ kmem_free(fdvma_p->pagecnt, npages * sizeof (uint_t)); kmem_free(fdvma_p, sizeof (fdvma_t)); kmem_free(mp, sizeof (px_dma_hdl_t)); /* see if there is anyone waiting for kmem */ if (px_kmem_clid != 0) { DBG(DBG_DMA_CTL, dip, "run handle callback\n"); ddi_run_callback(&px_kmem_clid); } return (DDI_SUCCESS); }
/*ARGSUSED*/ int px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) { DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", ddi_driver_name(rdip), ddi_get_instance(rdip), handle); px_dma_freemp((ddi_dma_impl_t *)handle); if (px_kmem_clid) { DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); ddi_run_callback(&px_kmem_clid); } return (DDI_SUCCESS); }
void impl_acc_hdl_free(ddi_acc_handle_t handle) { ddi_acc_impl_t *hp; /* * The supplied (ddi_acc_handle_t) is actually a (ddi_acc_impl_t *), * because that's what we allocated in impl_acc_hdl_alloc() above. */ hp = (ddi_acc_impl_t *)handle; if (hp) { kmem_free(hp->ahi_err->err_ontrap, sizeof (on_trap_data_t)); kmem_free(hp->ahi_err, sizeof (ndi_err_t)); kmem_free(hp, sizeof (ddi_acc_impl_t)); if (impl_acc_hdl_id) ddi_run_callback(&impl_acc_hdl_id); } }