/** * radeon_fence_driver_fini - tear down the fence driver * for all possible rings. * * @rdev: radeon device pointer * * Tear down the fence driver for all possible rings (all asics). */ void radeon_fence_driver_fini(struct radeon_device *rdev) { int ring, r; mutex_lock(&rdev->ring_lock); for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { if (!rdev->fence_drv[ring].initialized) continue; r = radeon_fence_wait_empty(rdev, ring); if (r) { /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev); } #ifdef __NetBSD__ spin_lock(&rdev->fence_lock); DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock); spin_unlock(&rdev->fence_lock); #else wake_up_all(&rdev->fence_queue); #endif radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false; } mutex_unlock(&rdev->ring_lock); #ifdef __NetBSD__ DRM_DESTROY_WAITQUEUE(&rdev->fence_queue); spin_lock_destroy(&rdev->fence_lock); #endif }
static void nv84_fence_destroy(struct nouveau_drm *drm) { struct nv84_fence_priv *priv = drm->fence; #ifdef __NetBSD__ spin_lock_destroy(&priv->base.waitlock); DRM_DESTROY_WAITQUEUE(&priv->base.waitqueue); #endif nouveau_bo_unmap(priv->bo_gart); if (priv->bo_gart) nouveau_bo_unpin(priv->bo_gart); nouveau_bo_ref(NULL, &priv->bo_gart); nouveau_bo_unmap(priv->bo); if (priv->bo) nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); }