/** * cik_sdma_fini - tear down the async dma engines * * @rdev: radeon_device pointer * * Stop the async dma engines and free the rings (CIK). */ void cik_sdma_fini(struct radeon_device *rdev) { /* halt the MEs */ cik_sdma_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); /* XXX - compute dma queue tear down */ }
void radeon_uvd_fini(struct radeon_device *rdev) { int r; if (rdev->uvd.vcpu_bo == NULL) return; r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); radeon_bo_unreserve(rdev->uvd.vcpu_bo); } radeon_bo_unref(&rdev->uvd.vcpu_bo); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); release_firmware(rdev->uvd_fw); }
/** * r600_dma_fini - tear down the async dma engine * * @rdev: radeon_device pointer * * Stop the async dma engine and free the ring (r6xx-evergreen). */ void r600_dma_fini(struct radeon_device *rdev) { r600_dma_stop(rdev); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); }
static void cayman_cp_fini(struct radeon_device *rdev) { cayman_cp_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); }