/** * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_*(). * Returns remaining time if the sequence number has passed, 0 when * the wait timeout, or an error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, u64 *target_seq, bool intr, long timeout) { long r; int i; if (radeon_fence_any_seq_signaled(rdev, target_seq)) return timeout; /* enable IRQs and tracing */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); radeon_irq_kms_sw_irq_get(rdev, i); } if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, ( radeon_fence_any_seq_signaled(rdev, target_seq) || rdev->needs_reset), timeout); } else { r = wait_event_timeout(rdev->fence_queue, ( radeon_fence_any_seq_signaled(rdev, target_seq) || rdev->needs_reset), timeout); } if (rdev->needs_reset) r = -EDEADLK; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; radeon_irq_kms_sw_irq_put(rdev, i); trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); } return r; }
/** * radeon_fence_wait_seq - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_*(). * Returns 0 if the sequence number has passed, error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, bool intr) { uint64_t last_seq[RADEON_NUM_RINGS]; bool signaled; int i, r; spin_lock(&rdev->fence_lock); while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { /* Save current sequence values, used to check for GPU lockups */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); radeon_irq_kms_sw_irq_get(rdev, i); } #ifdef __NetBSD__ if (intr) DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue, &rdev->fence_lock, RADEON_FENCE_JIFFIES_TIMEOUT, ((signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) || rdev->needs_reset)); else DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(r, &rdev->fence_queue, &rdev->fence_lock, RADEON_FENCE_JIFFIES_TIMEOUT, ((signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) || rdev->needs_reset)); #else if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); } else { r = wait_event_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); } #endif for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; radeon_irq_kms_sw_irq_put(rdev, i); trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); } if (unlikely(r < 0)) goto out; if (unlikely(!signaled)) { if (rdev->needs_reset) { r = -EDEADLK; goto out; } /* we were interrupted for some reason and fence * isn't signaled yet, resume waiting */ if (r) continue; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) break; } if (i != RADEON_NUM_RINGS) continue; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) break; } if (i < RADEON_NUM_RINGS) { /* good news we believe it's a lockup */ dev_warn(rdev->dev, "GPU lockup (waiting for " "0x%016"PRIx64" last fence id 0x%016"PRIx64" on" " ring %d)\n", target_seq[i], last_seq[i], i); /* remember that we need an reset */ rdev->needs_reset = true; #ifdef __NetBSD__ DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock); #else wake_up_all(&rdev->fence_queue); #endif r = -EDEADLK; goto out; } } } out: spin_unlock(&rdev->fence_lock); return 0; }
/** * radeon_fence_wait_any_seq - wait for a sequence number on any ring * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_any(), et al. * Returns 0 if the sequence number has passed, error for all other cases. */ static int radeon_fence_wait_any_seq(struct radeon_device *rdev, u64 *target_seq, bool intr) { unsigned long timeout, last_activity, tmp; unsigned i, ring = RADEON_NUM_RINGS; bool signaled; int r; for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) { continue; } /* use the most recent one as indicator */ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { last_activity = rdev->fence_drv[i].last_activity; } /* For lockup detection just pick the lowest ring we are * actively waiting for */ if (i < ring) { ring = i; } } /* nothing to wait for ? */ if (ring == RADEON_NUM_RINGS) { return -ENOENT; } while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; if (time_after(last_activity, timeout)) { /* the normal case, timeout is somewhere before last_activity */ timeout = last_activity - timeout; } else { /* either jiffies wrapped around, or no fence was signaled in the last 500ms * anyway we will just wait for the minimum amount and then check for a lockup */ timeout = 1; } trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (target_seq[i]) { radeon_irq_kms_sw_irq_get(rdev, i); } } if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), timeout); } else { r = wait_event_timeout(rdev->fence_queue, (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), timeout); } for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (target_seq[i]) { radeon_irq_kms_sw_irq_put(rdev, i); } } if (unlikely(r < 0)) { return r; } trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); if (unlikely(!signaled)) { /* we were interrupted for some reason and fence * isn't signaled yet, resume waiting */ if (r) { continue; } mutex_lock(&rdev->ring_lock); for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { if (time_after(rdev->fence_drv[i].last_activity, tmp)) { tmp = rdev->fence_drv[i].last_activity; } } /* test if somebody else has already decided that this is a lockup */ if (last_activity != tmp) { last_activity = tmp; mutex_unlock(&rdev->ring_lock); continue; } if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { /* good news we believe it's a lockup */ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", target_seq[ring]); /* change last activity so nobody else think there is a lockup */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { rdev->fence_drv[i].last_activity = jiffies; } /* mark the ring as not ready any more */ rdev->ring[ring].ready = false; mutex_unlock(&rdev->ring_lock); return -EDEADLK; } mutex_unlock(&rdev->ring_lock); } } return 0; }
/** * radeon_fence_wait_seq - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_*(). * Returns 0 if the sequence number has passed, error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, bool intr) { uint64_t last_seq[RADEON_NUM_RINGS]; bool signaled; int i, r; while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { /* Save current sequence values, used to check for GPU lockups */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); radeon_irq_kms_sw_irq_get(rdev, i); } if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); } else { r = wait_event_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); } for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; radeon_irq_kms_sw_irq_put(rdev, i); trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); } if (unlikely(r < 0)) return r; if (unlikely(!signaled)) { if (rdev->needs_reset) return -EDEADLK; /* we were interrupted for some reason and fence * isn't signaled yet, resume waiting */ if (r) continue; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) break; } if (i != RADEON_NUM_RINGS) continue; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) break; } if (i < RADEON_NUM_RINGS) { /* good news we believe it's a lockup */ dev_warn(rdev->dev, "GPU lockup (waiting for " "0x%016llx last fence id 0x%016llx on" " ring %d)\n", target_seq[i], last_seq[i], i); /* remember that we need an reset */ rdev->needs_reset = true; wake_up_all(&rdev->fence_queue); return -EDEADLK; } } } return 0; }
/** * radeon_fence_wait_seq - wait for a specific sequence number * * @rdev: radeon device pointer * @target_seq: sequence number we want to wait for * @ring: ring index the fence is associated with * @intr: use interruptable sleep * @lock_ring: whether the ring should be locked or not * * Wait for the requested sequence number to be written (all asics). * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait(), et al. * Returns 0 if the sequence number has passed, error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected and the ring is * marked as not ready so no further jobs get scheduled until a successful * reset. */ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, unsigned ring, bool intr, bool lock_ring) { unsigned long timeout, last_activity; uint64_t seq; unsigned i; bool signaled; int r; while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { if (!rdev->ring[ring].ready) { return -EBUSY; } timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { /* the normal case, timeout is somewhere before last_activity */ timeout = rdev->fence_drv[ring].last_activity - timeout; } else { /* either jiffies wrapped around, or no fence was signaled in the last 500ms * anyway we will just wait for the minimum amount and then check for a lockup */ timeout = 1; } seq = atomic64_read(&rdev->fence_drv[ring].last_seq); /* Save current last activity valuee, used to check for GPU lockups */ last_activity = rdev->fence_drv[ring].last_activity; trace_radeon_fence_wait_begin(rdev->ddev, seq); radeon_irq_kms_sw_irq_get(rdev, ring); if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), timeout); } else { r = wait_event_timeout(rdev->fence_queue, (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), timeout); } radeon_irq_kms_sw_irq_put(rdev, ring); if (unlikely(r < 0)) { return r; } trace_radeon_fence_wait_end(rdev->ddev, seq); if (unlikely(!signaled)) { /* we were interrupted for some reason and fence * isn't signaled yet, resume waiting */ if (r) { continue; } /* check if sequence value has changed since last_activity */ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { continue; } if (lock_ring) { mutex_lock(&rdev->ring_lock); } /* test if somebody else has already decided that this is a lockup */ if (last_activity != rdev->fence_drv[ring].last_activity) { if (lock_ring) { mutex_unlock(&rdev->ring_lock); } continue; } if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { /* good news we believe it's a lockup */ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", target_seq, seq); /* change last activity so nobody else think there is a lockup */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { rdev->fence_drv[i].last_activity = jiffies; } /* mark the ring as not ready any more */ rdev->ring[ring].ready = false; if (lock_ring) { mutex_unlock(&rdev->ring_lock); } return -EDEADLK; } if (lock_ring) { mutex_unlock(&rdev->ring_lock); } } } return 0; }