/** * radeon_fence_driver_fini - tear down the fence driver * for all possible rings. * * @rdev: radeon device pointer * * Tear down the fence driver for all possible rings (all asics). */ void radeon_fence_driver_fini(struct radeon_device *rdev) { int ring, r; mutex_lock(&rdev->ring_lock); for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { if (!rdev->fence_drv[ring].initialized) continue; r = radeon_fence_wait_empty(rdev, ring); if (r) { /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev); } #ifdef __NetBSD__ spin_lock(&rdev->fence_lock); DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock); spin_unlock(&rdev->fence_lock); #else wake_up_all(&rdev->fence_queue); #endif radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false; } mutex_unlock(&rdev->ring_lock); #ifdef __NetBSD__ DRM_DESTROY_WAITQUEUE(&rdev->fence_queue); spin_lock_destroy(&rdev->fence_lock); #endif }
/** * radeon_fence_driver_fini - tear down the fence driver * for all possible rings. * * @rdev: radeon device pointer * * Tear down the fence driver for all possible rings (all asics). */ void radeon_fence_driver_fini(struct radeon_device *rdev) { int ring, r; mutex_lock(&rdev->ring_lock); for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { if (!rdev->fence_drv[ring].initialized) continue; r = radeon_fence_wait_empty(rdev, ring); if (r) { /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev, ring); } wake_up_all(&rdev->fence_queue); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false; } mutex_unlock(&rdev->ring_lock); }
/** * radeon_fence_driver_fini - tear down the fence driver * for all possible rings. * * @rdev: radeon device pointer * * Tear down the fence driver for all possible rings (all asics). */ void radeon_fence_driver_fini(struct radeon_device *rdev) { int ring, r; sx_xlock(&rdev->ring_lock); for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { if (!rdev->fence_drv[ring].initialized) continue; r = radeon_fence_wait_empty_locked(rdev, ring); if (r) { /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev); } cv_broadcast(&rdev->fence_queue); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false; cv_destroy(&rdev->fence_queue); } sx_xunlock(&rdev->ring_lock); }