/* caller must hold ring lock */ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int signaler, int waiter) { int r; /* no need to signal and wait on the same ring */ if (signaler == waiter) { return 0; } /* prevent GPU deadlocks */ if (!rdev->ring[signaler].ready) { dev_err(rdev->dev, "Trying to sync to a disabled ring!"); return -EINVAL; } r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); if (r) { return r; } radeon_semaphore_emit_signal(rdev, signaler, semaphore); radeon_ring_commit(rdev, &rdev->ring[signaler]); /* we assume caller has already allocated space on waiters ring */ radeon_semaphore_emit_wait(rdev, waiter, semaphore); /* for debugging lockup only, used by sysfs debug files */ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; return 0; }
/** * radeon_ring_lock - lock the ring and allocate space on it * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information * @ndw: number of dwords to allocate in the ring buffer * * Lock the ring and allocate @ndw dwords in the ring buffer * (all asics). * Returns 0 on success, error on failure. */ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) { int r; mutex_lock(&rdev->ring_lock); r = radeon_ring_alloc(rdev, ring, ndw); if (r) { mutex_unlock(&rdev->ring_lock); return r; } return 0; }
/** * radeon_ring_force_activity - add some nop packets to the ring * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information * * Add some nop packets to the ring to force activity (all asics). * Used for lockup detection to see if the rptr is advancing. */ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) { int r; radeon_ring_free_size(rdev, ring); if (ring->rptr == ring->wptr) { r = radeon_ring_alloc(rdev, ring, 1); if (!r) { radeon_ring_write(ring, ring->nop); radeon_ring_commit(rdev, ring); } } }
/** * radeon_semaphore_sync_rings - sync ring to all registered fences * * @rdev: radeon_device pointer * @semaphore: semaphore object to use for sync * @ring: ring that needs sync * * Ensure that all registered fences are signaled before letting * the ring continue. The caller must hold the ring lock. */ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int ring) { unsigned count = 0; int i, r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { struct radeon_fence *fence = semaphore->sync_to[i]; /* check if we really need to sync */ if (!radeon_fence_need_sync(fence, ring)) continue; /* prevent GPU deadlocks */ if (!rdev->ring[i].ready) { dev_err(rdev->dev, "Syncing to a disabled ring!"); return -EINVAL; } if (++count > RADEON_NUM_SYNCS) { /* not enough room, wait manually */ r = radeon_fence_wait(fence, false); if (r) return r; continue; } /* allocate enough space for sync command */ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); if (r) { return r; } /* emit the signal semaphore */ if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { /* signaling wasn't successful wait manually */ radeon_ring_undo(&rdev->ring[i]); r = radeon_fence_wait(fence, false); if (r) return r; continue; } /* we assume caller has already allocated space on waiters ring */ if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { /* waiting wasn't successful wait manually */ radeon_ring_undo(&rdev->ring[i]); r = radeon_fence_wait(fence, false); if (r) return r; continue; } radeon_ring_commit(rdev, &rdev->ring[i], false); radeon_fence_note_sync(fence, ring); semaphore->gpu_addr += 8; } return 0; }