Example #1
0
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
				       unsigned long arg)
{
	struct sync_file_info info;
	struct sync_fence_info *fence_info = NULL;
	struct fence **fences;
	__u32 size;
	int num_fences, ret, i;

	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
		return -EFAULT;

	if (info.flags || info.pad)
		return -EINVAL;

	fences = get_fences(sync_file, &num_fences);

	/*
	 * Passing num_fences = 0 means that userspace doesn't want to
	 * retrieve any sync_fence_info. If num_fences = 0 we skip filling
	 * sync_fence_info and return the actual number of fences on
	 * info->num_fences.
	 */
	if (!info.num_fences)
		goto no_fences;

	if (info.num_fences < num_fences)
		return -EINVAL;

	size = num_fences * sizeof(*fence_info);
	fence_info = kzalloc(size, GFP_KERNEL);
	if (!fence_info)
		return -ENOMEM;

	for (i = 0; i < num_fences; i++)
		sync_fill_fence_info(fences[i], &fence_info[i]);

	if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
			 size)) {
		ret = -EFAULT;
		goto out;
	}

no_fences:
	strlcpy(info.name, sync_file->name, sizeof(info.name));
	info.status = fence_is_signaled(sync_file->fence);
	info.num_fences = num_fences;

	if (copy_to_user((void __user *)arg, &info, sizeof(info)))
		ret = -EFAULT;
	else
		ret = 0;

out:
	kfree(fence_info);

	return ret;
}
Example #2
0
static void add_fence(struct fence **fences, int *i, struct fence *fence)
{
	fences[*i] = fence;

	if (!fence_is_signaled(fence)) {
		fence_get(fence);
		(*i)++;
	}
}
Example #3
0
static void describe_fence(struct fence *fence, const char *type,
		struct seq_file *m)
{
	if (!fence_is_signaled(fence))
		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
				fence->ops->get_driver_name(fence),
				fence->ops->get_timeline_name(fence),
				fence->seqno);
}
Example #4
0
static void sync_fill_fence_info(struct fence *fence,
				 struct sync_fence_info *info)
{
	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
		sizeof(info->obj_name));
	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
		sizeof(info->driver_name));
	if (fence_is_signaled(fence))
		info->status = fence->status >= 0 ? 1 : fence->status;
	else
		info->status = 0;
	info->timestamp_ns = ktime_to_ns(fence->timestamp);
}
Example #5
0
static unsigned int sync_file_poll(struct file *file, poll_table *wait)
{
	struct sync_file *sync_file = file->private_data;

	poll_wait(file, &sync_file->wq, wait);

	if (!poll_does_not_wait(wait) &&
	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
		if (fence_add_callback(sync_file->fence, &sync_file->cb,
				       fence_check_cb_func) < 0)
			wake_up_all(&sync_file->wq);
	}

	return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
Example #6
0
static inline int
reservation_object_test_signaled_single(struct fence *passed_fence)
{
	struct fence *fence, *lfence = passed_fence;
	int ret = 1;

	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
		int ret;

		fence = fence_get_rcu(lfence);
		if (!fence)
			return -1;

		ret = !!fence_is_signaled(fence);
		fence_put(fence);
	}
	return ret;
}
Example #7
0
/**
 * amdgpu_fence_emit - emit a fence on the requested ring
 *
 * @ring: ring the fence is associated with
 * @f: resulting fence object
 *
 * Emits a fence command on the requested ring (all asics).
 * Returns 0 on success, -ENOMEM on failure.
 */
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
{
	struct amdgpu_device *adev = ring->adev;
	struct amdgpu_fence *fence;
	struct fence *old, **ptr;
	uint32_t seq;

	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
	if (fence == NULL)
		return -ENOMEM;

	seq = ++ring->fence_drv.sync_seq;
	fence->ring = ring;
	fence_init(&fence->base, &amdgpu_fence_ops,
		   &ring->fence_drv.lock,
		   adev->fence_context + ring->idx,
		   seq);
	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
			       seq, AMDGPU_FENCE_FLAG_INT);

	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
	/* This function can't be called concurrently anyway, otherwise
	 * emitting the fence would mess up the hardware ring buffer.
	 */
	old = rcu_dereference_protected(*ptr, 1);
	if (old && !fence_is_signaled(old)) {
		DRM_INFO("rcu slot is busy\n");
		fence_wait(old, false);
	}

	rcu_assign_pointer(*ptr, fence_get(&fence->base));

	*f = &fence->base;

	return 0;
}
Example #8
0
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
					 bool wait_all, bool intr,
					 unsigned long timeout)
{
	struct fence *fence;
	unsigned seq, shared_count, i = 0;
	long ret = timeout;

retry:
	fence = NULL;
	shared_count = 0;
	seq = read_seqcount_begin(&obj->seq);
	rcu_read_lock();

	if (wait_all) {
		struct reservation_object_list *fobj = rcu_dereference(obj->fence);

		if (fobj)
			shared_count = fobj->shared_count;

		if (read_seqcount_retry(&obj->seq, seq))
			goto unlock_retry;

		for (i = 0; i < shared_count; ++i) {
			struct fence *lfence = rcu_dereference(fobj->shared[i]);

			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
				continue;

			if (!fence_get_rcu(lfence))
				goto unlock_retry;

			if (fence_is_signaled(lfence)) {
				fence_put(lfence);
				continue;
			}

			fence = lfence;
			break;
		}
	}

	if (!shared_count) {
		struct fence *fence_excl = rcu_dereference(obj->fence_excl);

		if (read_seqcount_retry(&obj->seq, seq))
			goto unlock_retry;

		if (fence_excl &&
		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
			if (!fence_get_rcu(fence_excl))
				goto unlock_retry;

			if (fence_is_signaled(fence_excl))
				fence_put(fence_excl);
			else
				fence = fence_excl;
		}
	}

	rcu_read_unlock();
	if (fence) {
		ret = fence_wait_timeout(fence, intr, ret);
		fence_put(fence);
		if (ret > 0 && wait_all && (i + 1 < shared_count))
			goto retry;
	}
	return ret;

unlock_retry:
	rcu_read_unlock();
	goto retry;
}
Example #9
0
static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
			    struct amdgpu_ring *ringA,
			    struct amdgpu_ring *ringB,
			    struct amdgpu_ring *ringC)
{
	struct fence *fenceA = NULL, *fenceB = NULL;
	struct amdgpu_semaphore *semaphore = NULL;
	bool sigA, sigB;
	int i, r;

	r = amdgpu_semaphore_create(adev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = amdgpu_ring_lock(ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_wait(ringA, semaphore);
	amdgpu_ring_unlock_commit(ringA);

	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
	if (r)
		goto out_cleanup;

	r = amdgpu_ring_lock(ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_wait(ringB, semaphore);
	amdgpu_ring_unlock_commit(ringB);
	r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
	if (r)
		goto out_cleanup;

	mdelay(1000);

	if (fence_is_signaled(fenceA)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}
	if (fence_is_signaled(fenceB)) {
		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = amdgpu_ring_lock(ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_signal(ringC, semaphore);
	amdgpu_ring_unlock_commit(ringC);

	for (i = 0; i < 30; ++i) {
		mdelay(100);
		sigA = fence_is_signaled(fenceA);
		sigB = fence_is_signaled(fenceB);
		if (sigA || sigB)
			break;
	}

	if (!sigA && !sigB) {
		DRM_ERROR("Neither fence A nor B has been signaled\n");
		goto out_cleanup;
	} else if (sigA && sigB) {
		DRM_ERROR("Both fence A and B has been signaled\n");
		goto out_cleanup;
	}

	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');

	r = amdgpu_ring_lock(ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_signal(ringC, semaphore);
	amdgpu_ring_unlock_commit(ringC);

	mdelay(1000);

	r = fence_wait(fenceA, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence A\n");
		goto out_cleanup;
	}
	r = fence_wait(fenceB, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence B\n");
		goto out_cleanup;
	}

out_cleanup:
	amdgpu_semaphore_free(adev, &semaphore, NULL);

	if (fenceA)
		fence_put(fenceA);

	if (fenceB)
		fence_put(fenceB);

	if (r)
		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
Example #10
0
void amdgpu_test_ring_sync(struct amdgpu_device *adev,
			   struct amdgpu_ring *ringA,
			   struct amdgpu_ring *ringB)
{
	struct fence *fence1 = NULL, *fence2 = NULL;
	struct amdgpu_semaphore *semaphore = NULL;
	int r;

	r = amdgpu_semaphore_create(adev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = amdgpu_ring_lock(ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_wait(ringA, semaphore);
	amdgpu_ring_unlock_commit(ringA);

	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
	if (r)
		goto out_cleanup;

	r = amdgpu_ring_lock(ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_wait(ringA, semaphore);
	amdgpu_ring_unlock_commit(ringA);

	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
	if (r)
		goto out_cleanup;

	mdelay(1000);

	if (fence_is_signaled(fence1)) {
		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = amdgpu_ring_lock(ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringB);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_signal(ringB, semaphore);
	amdgpu_ring_unlock_commit(ringB);

	r = fence_wait(fence1, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence 1\n");
		goto out_cleanup;
	}

	mdelay(1000);

	if (fence_is_signaled(fence2)) {
		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = amdgpu_ring_lock(ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringB);
		goto out_cleanup;
	}
	amdgpu_semaphore_emit_signal(ringB, semaphore);
	amdgpu_ring_unlock_commit(ringB);

	r = fence_wait(fence2, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence 1\n");
		goto out_cleanup;
	}

out_cleanup:
	amdgpu_semaphore_free(adev, &semaphore, NULL);

	if (fence1)
		fence_put(fence1);

	if (fence2)
		fence_put(fence2);

	if (r)
		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}