/** * radeon_fence_wait_any - wait for a fence to signal on any ring * * @rdev: radeon device pointer * @fences: radeon fence object(s) * @intr: use interruptable sleep * * Wait for any requested fence to signal (all asics). Fence * array is indexed by ring id. @intr selects whether to use * interruptable (true) or non-interruptable (false) sleep when * waiting for the fences. Used by the suballocator. * Returns 0 if any fence has passed, error for all other cases. */ int radeon_fence_wait_any(struct radeon_device *rdev, struct radeon_fence **fences, bool intr) { uint64_t seq[RADEON_NUM_RINGS]; unsigned i, num_rings = 0; int r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { seq[i] = 0; if (!fences[i]) { continue; } seq[i] = fences[i]->seq; ++num_rings; /* test if something was allready signaled */ if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) return 0; } /* nothing to wait for ? */ if (num_rings == 0) return -ENOENT; r = radeon_fence_wait_seq(rdev, seq, intr); if (r) { return r; } return 0; }
/** * radeon_fence_wait_next - wait for the next fence to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for the next fence on the requested ring to signal (all asics). * Returns 0 if the next fence has passed, error for all other cases. * Caller must hold ring lock. */ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { /* nothing to wait for, last_seq is already the last emited fence */ return -ENOENT; } return radeon_fence_wait_seq(rdev, seq, false); }
/** * radeon_fence_wait_next_locked - wait for the next fence to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for the next fence on the requested ring to signal (all asics). * Returns 0 if the next fence has passed, error for all other cases. * Caller must hold ring lock. */ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) { uint64_t seq; seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq) + 1ULL; if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { /* nothing to wait for, last_seq is already the last emited fence */ return -ENOENT; } return radeon_fence_wait_seq(rdev, seq, ring, false, false); }
/** * radeon_fence_wait_empty_locked - wait for all fences to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for all fences on the requested ring to signal (all asics). * Returns 0 if the fences have passed, error for all other cases. * Caller must hold ring lock. */ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) { uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; int r; r = radeon_fence_wait_seq(rdev, seq, ring, false, false); if (r) { if (r == -EDEADLK) { return -EDEADLK; } dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", ring, r); } return 0; }
/** * radeon_fence_wait - wait for a fence to signal * * @fence: radeon fence object * @intr: use interruptable sleep * * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the fence. * Returns 0 if the fence has passed, error for all other cases. */ int radeon_fence_wait(struct radeon_fence *fence, bool intr) { int r; if (fence == NULL) { DRM_ERROR("Querying an invalid fence : %p !\n", fence); return -EINVAL; } r = radeon_fence_wait_seq(fence->rdev, fence->seq, fence->ring, intr, true); if (r) { return r; } fence->seq = RADEON_FENCE_SIGNALED_SEQ; return 0; }
/** * radeon_fence_wait_empty - wait for all fences to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for all fences on the requested ring to signal (all asics). * Returns 0 if the fences have passed, error for all other cases. * Caller must hold ring lock. */ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; int r; seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; if (!seq[ring]) return 0; r = radeon_fence_wait_seq(rdev, seq, false); if (r) { if (r == -EDEADLK) return -EDEADLK; dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", ring, r); } return 0; }
/** * radeon_fence_wait - wait for a fence to signal * * @fence: radeon fence object * @intr: use interruptable sleep * * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the fence. * Returns 0 if the fence has passed, error for all other cases. */ int radeon_fence_wait(struct radeon_fence *fence, bool intr) { uint64_t seq[RADEON_NUM_RINGS] = {}; int r; if (fence == NULL) { WARN(1, "Querying an invalid fence : %p !\n", fence); return -EINVAL; } seq[fence->ring] = fence->seq; if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) return 0; r = radeon_fence_wait_seq(fence->rdev, seq, intr); if (r) return r; fence->seq = RADEON_FENCE_SIGNALED_SEQ; return 0; }
/** * radeon_fence_wait_empty_locked - wait for all fences to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with * * Wait for all fences on the requested ring to signal (all asics). * Returns 0 if the fences have passed, error for all other cases. * Caller must hold ring lock. */ void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) { uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; while(1) { int r; r = radeon_fence_wait_seq(rdev, seq, ring, false, false); if (r == -EDEADLK) { mutex_unlock(&rdev->ring_lock); r = radeon_gpu_reset(rdev); mutex_lock(&rdev->ring_lock); if (!r) continue; } if (r) { dev_err(rdev->dev, "error waiting for ring to become" " idle (%d)\n", r); } return; } }