Esempio n. 1
0
struct fence *drm_sw_fence_new(unsigned int context, unsigned seqno)
{
	struct fence *fence;

	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
	if (!fence)
		return ERR_PTR(-ENOMEM);
	fence_init(fence,
		   &sw_fence_ops,
		   &sw_fence_lock,
		   context, seqno);

	return fence;
}
Esempio n. 2
0
/**
 * radeon_fence_emit - emit a fence on the requested ring
 *
 * @rdev: radeon_device pointer
 * @fence: radeon fence object
 * @ring: ring index the fence is associated with
 *
 * Emits a fence command on the requested ring (all asics).
 * Returns 0 on success, -ENOMEM on failure.
 */
int radeon_fence_emit(struct radeon_device *rdev,
		      struct radeon_fence **fence,
		      int ring)
{
	u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];

	/* we are protected by the ring emission mutex */
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
	}
	(*fence)->rdev = rdev;
	(*fence)->seq = seq;
	(*fence)->ring = ring;
	(*fence)->is_vm_update = false;
	fence_init(&(*fence)->base, &radeon_fence_ops,
		   &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
	radeon_fence_ring_emit(rdev, ring, *fence);
	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
	return 0;
}
Esempio n. 3
0
/**
 * amdgpu_fence_emit - emit a fence on the requested ring
 *
 * @ring: ring the fence is associated with
 * @f: resulting fence object
 *
 * Emits a fence command on the requested ring (all asics).
 * Returns 0 on success, -ENOMEM on failure.
 */
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
{
	struct amdgpu_device *adev = ring->adev;
	struct amdgpu_fence *fence;
	struct fence *old, **ptr;
	uint32_t seq;

	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
	if (fence == NULL)
		return -ENOMEM;

	seq = ++ring->fence_drv.sync_seq;
	fence->ring = ring;
	fence_init(&fence->base, &amdgpu_fence_ops,
		   &ring->fence_drv.lock,
		   adev->fence_context + ring->idx,
		   seq);
	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
			       seq, AMDGPU_FENCE_FLAG_INT);

	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
	/* This function can't be called concurrently anyway, otherwise
	 * emitting the fence would mess up the hardware ring buffer.
	 */
	old = rcu_dereference_protected(*ptr, 1);
	if (old && !fence_is_signaled(old)) {
		DRM_INFO("rcu slot is busy\n");
		fence_wait(old, false);
	}

	rcu_assign_pointer(*ptr, fence_get(&fence->base));

	*f = &fence->base;

	return 0;
}