Пример #1
0
/**
 * radeon_semaphore_sync_rings - sync ring to all registered fences
 *
 * @rdev: radeon_device pointer
 * @semaphore: semaphore object to use for sync
 * @ring: ring that needs sync
 *
 * Ensure that all registered fences are signaled before letting
 * the ring continue. The caller must hold the ring lock.
 */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
				struct radeon_semaphore *semaphore,
				int ring)
{
	unsigned count = 0;
	int i, r;

        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		struct radeon_fence *fence = semaphore->sync_to[i];

		/* check if we really need to sync */
                if (!radeon_fence_need_sync(fence, ring))
			continue;

		/* prevent GPU deadlocks */
		if (!rdev->ring[i].ready) {
			dev_err(rdev->dev, "Syncing to a disabled ring!");
			return -EINVAL;
		}

		if (++count > RADEON_NUM_SYNCS) {
			/* not enough room, wait manually */
			r = radeon_fence_wait(fence, false);
			if (r)
				return r;
			continue;
		}

		/* allocate enough space for sync command */
		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
		if (r) {
			return r;
		}

		/* emit the signal semaphore */
		if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
			/* signaling wasn't successful wait manually */
			radeon_ring_undo(&rdev->ring[i]);
			r = radeon_fence_wait(fence, false);
			if (r)
				return r;
			continue;
		}

		/* we assume caller has already allocated space on waiters ring */
		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
			/* waiting wasn't successful wait manually */
			radeon_ring_undo(&rdev->ring[i]);
			r = radeon_fence_wait(fence, false);
			if (r)
				return r;
			continue;
		}

		radeon_ring_commit(rdev, &rdev->ring[i]);
		radeon_fence_note_sync(fence, ring);

		semaphore->gpu_addr += 8;
	}

	return 0;
}
Пример #2
0
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
	radeon_ring_undo(ring);
	mutex_unlock(&rdev->ring_lock);
}