Exemple #1
0
/**
 * radeon_vce_ring_test - test if VCE ring is working
 *
 * @rdev: radeon_device pointer
 * @ring: the engine to test on
 *
 */
int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
	uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
	unsigned i;
	int r;

	r = radeon_ring_lock(rdev, ring, 16);
	if (r) {
		DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
			  ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, VCE_CMD_END);
	radeon_ring_unlock_commit(rdev, ring);

	for (i = 0; i < rdev->usec_timeout; i++) {
	        if (vce_v1_0_get_rptr(rdev, ring) != rptr)
	                break;
	        DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
	        DRM_INFO("ring test on %d succeeded in %d usecs\n",
	                 ring->idx, i);
	} else {
	        DRM_ERROR("radeon: ring %d test failed\n",
	                  ring->idx);
	        r = -ETIMEDOUT;
	}

	return r;
}
static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
					     struct radeon_ring *ring,
					     struct radeon_fence **fence)
{
	int r;

	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
		r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
		if (r) {
			DRM_ERROR("Failed to get dummy create msg\n");
			return r;
		}

		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
		if (r) {
			DRM_ERROR("Failed to get dummy destroy msg\n");
			return r;
		}
	} else {
		r = radeon_ring_lock(rdev, ring, 64);
		if (r) {
			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
			return r;
		}
		radeon_fence_emit(rdev, fence, ring->idx);
		radeon_ring_unlock_commit(rdev, ring);
	}
	return 0;
}
Exemple #3
0
/**
 * uvd_v1_0_ring_test - register write test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring pointer
 *
 * Test if we can successfully write to the context register
 */
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
	uint32_t tmp = 0;
	unsigned i;
	int r;

	WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
	r = radeon_ring_lock(rdev, ring, 3);
	if (r) {
		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
			  ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring);
	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = RREG32(UVD_CONTEXT_ID);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n",
			 ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Exemple #4
0
/**
 * uvd_v1_0_init - start and test UVD block
 *
 * @rdev: radeon_device pointer
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
int uvd_v1_0_init(struct radeon_device *rdev)
{
	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
	uint32_t tmp;
	int r;

	/* raise clocks while booting up the VCPU */
	if (rdev->family < CHIP_RV740)
		radeon_set_uvd_clocks(rdev, 10000, 10000);
	else
		radeon_set_uvd_clocks(rdev, 53300, 40000);

	r = uvd_v1_0_start(rdev);
	if (r)
		goto done;

	ring->ready = true;
	r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
	if (r) {
		ring->ready = false;
		goto done;
	}

	r = radeon_ring_lock(rdev, ring, 10);
	if (r) {
		DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
		goto done;
	}

	tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
	radeon_ring_write(ring, tmp);
	radeon_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
	radeon_ring_write(ring, tmp);
	radeon_ring_write(ring, 0xFFFFF);

	tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
	radeon_ring_write(ring, tmp);
	radeon_ring_write(ring, 0xFFFFF);

	/* Clear timeout status bits */
	radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
	radeon_ring_write(ring, 0x8);

	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
	radeon_ring_write(ring, 3);

	radeon_ring_unlock_commit(rdev, ring);

done:
	/* lower clocks again */
	radeon_set_uvd_clocks(rdev, 0, 0);

	if (!r)
		DRM_INFO("UVD initialized successfully.\n");

	return r;
}
Exemple #5
0
/**
 * cik_copy_dma - copy pages using the DMA engine
 *
 * @rdev: radeon_device pointer
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @num_gpu_pages: number of GPU pages to xfer
 * @resv: reservation object to sync to
 *
 * Copy GPU paging using the DMA engine (CIK).
 * Used by the radeon ttm implementation to move pages if
 * registered as the asic copy callback.
 */
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
				  uint64_t src_offset, uint64_t dst_offset,
				  unsigned num_gpu_pages,
				  struct reservation_object *resv)
{
	struct radeon_semaphore *sem = NULL;
	struct radeon_fence *fence;
	int ring_index = rdev->asic->copy.dma_ring_index;
	struct radeon_ring *ring = &rdev->ring[ring_index];
	u32 size_in_bytes, cur_size_in_bytes;
	int i, num_loops;
	int r = 0;

	r = radeon_semaphore_create(rdev, &sem);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		return ERR_PTR(r);
	}

	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		radeon_semaphore_free(rdev, &sem, NULL);
		return ERR_PTR(r);
	}

	radeon_semaphore_sync_resv(rdev, sem, resv, false);
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);

	for (i = 0; i < num_loops; i++) {
		cur_size_in_bytes = size_in_bytes;
		if (cur_size_in_bytes > 0x1fffff)
			cur_size_in_bytes = 0x1fffff;
		size_in_bytes -= cur_size_in_bytes;
		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
		radeon_ring_write(ring, cur_size_in_bytes);
		radeon_ring_write(ring, 0); /* src/dst endian swap */
		radeon_ring_write(ring, lower_32_bits(src_offset));
		radeon_ring_write(ring, upper_32_bits(src_offset));
		radeon_ring_write(ring, lower_32_bits(dst_offset));
		radeon_ring_write(ring, upper_32_bits(dst_offset));
		src_offset += cur_size_in_bytes;
		dst_offset += cur_size_in_bytes;
	}

	r = radeon_fence_emit(rdev, &fence, ring->idx);
	if (r) {
		radeon_ring_unlock_undo(rdev, ring);
		radeon_semaphore_free(rdev, &sem, NULL);
		return ERR_PTR(r);
	}

	radeon_ring_unlock_commit(rdev, ring, false);
	radeon_semaphore_free(rdev, &sem, fence);

	return fence;
}
Exemple #6
0
/**
 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
 *
 * @rdev: radeon_device pointer
 * @ib: IB object to schedule
 * @const_ib: Const IB to schedule (SI only)
 *
 * Schedule an IB on the associated ring (all asics).
 * Returns 0 on success, error on failure.
 *
 * On SI, there are two parallel engines fed from the primary ring,
 * the CE (Constant Engine) and the DE (Drawing Engine).  Since
 * resource descriptors have moved to memory, the CE allows you to
 * prime the caches while the DE is updating register state so that
 * the resource descriptors will be already in cache when the draw is
 * processed.  To accomplish this, the userspace driver submits two
 * IBs, one for the CE and one for the DE.  If there is a CE IB (called
 * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
 * to SI there was just a DE IB.
 */
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
		       struct radeon_ib *const_ib)
{
	struct radeon_ring *ring = &rdev->ring[ib->ring];
	int r = 0;

	if (!ib->length_dw || !ring->ready) {
		/* TODO: Nothings in the ib we should report. */
		dev_err(rdev->dev, "couldn't schedule ib\n");
		return -EINVAL;
	}

	/* 64 dwords should be enough for fence too */
	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
	if (r) {
		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
		return r;
	}

	/* grab a vm id if necessary */
	if (ib->vm) {
		struct radeon_fence *vm_id_fence;
		vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
        	radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
	}

	/* sync with other rings */
	r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
	if (r) {
		dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
		radeon_ring_unlock_undo(rdev, ring);
		return r;
	}

	if (ib->vm)
		radeon_vm_flush(rdev, ib->vm, ib->ring);

	if (const_ib) {
		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
		radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
	}
	radeon_ring_ib_execute(rdev, ib->ring, ib);
	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
	if (r) {
		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
		radeon_ring_unlock_undo(rdev, ring);
		return r;
	}
	if (const_ib) {
		const_ib->fence = radeon_fence_ref(ib->fence);
	}

	if (ib->vm)
		radeon_vm_fence(rdev, ib->vm, ib->fence);

	radeon_ring_unlock_commit(rdev, ring);
	return 0;
}
Exemple #7
0
/**
 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
 *
 * @rdev: radeon_device pointer
 * @ib: IB object to schedule
 * @const_ib: Const IB to schedule (SI only)
 *
 * Schedule an IB on the associated ring (all asics).
 * Returns 0 on success, error on failure.
 *
 * On SI, there are two parallel engines fed from the primary ring,
 * the CE (Constant Engine) and the DE (Drawing Engine).  Since
 * resource descriptors have moved to memory, the CE allows you to
 * prime the caches while the DE is updating register state so that
 * the resource descriptors will be already in cache when the draw is
 * processed.  To accomplish this, the userspace driver submits two
 * IBs, one for the CE and one for the DE.  If there is a CE IB (called
 * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
 * to SI there was just a DE IB.
 */
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
		       struct radeon_ib *const_ib)
{
	struct radeon_ring *ring = &rdev->ring[ib->ring];
	bool need_sync = false;
	int i, r = 0;

	if (!ib->length_dw || !ring->ready) {
		/* TODO: Nothings in the ib we should report. */
		dev_err(rdev->dev, "couldn't schedule ib\n");
		return -EINVAL;
	}

	/* 64 dwords should be enough for fence too */
	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
	if (r) {
		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
		return r;
	}
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		struct radeon_fence *fence = ib->sync_to[i];
		if (radeon_fence_need_sync(fence, ib->ring)) {
			need_sync = true;
			radeon_semaphore_sync_rings(rdev, ib->semaphore,
						    fence->ring, ib->ring);
			radeon_fence_note_sync(fence, ib->ring);
		}
	}
	/* immediately free semaphore when we don't need to sync */
	if (!need_sync) {
		radeon_semaphore_free(rdev, &ib->semaphore, NULL);
	}
	/* if we can't remember our last VM flush then flush now! */
	/* XXX figure out why we have to flush for every IB */
	if (ib->vm /*&& !ib->vm->last_flush*/) {
		radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
	}
	if (const_ib) {
		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
		radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
	}
	radeon_ring_ib_execute(rdev, ib->ring, ib);
	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
	if (r) {
		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
		radeon_ring_unlock_undo(rdev, ring);
		return r;
	}
	if (const_ib) {
		const_ib->fence = radeon_fence_ref(ib->fence);
	}
	/* we just flushed the VM, remember that */
	if (ib->vm && !ib->vm->last_flush) {
		ib->vm->last_flush = radeon_fence_ref(ib->fence);
	}
	radeon_ring_unlock_commit(rdev, ring);
	return 0;
}
Exemple #8
0
/**
 * r600_copy_dma - copy pages using the DMA engine
 *
 * @rdev: radeon_device pointer
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @num_gpu_pages: number of GPU pages to xfer
 * @resv: reservation object to sync to
 *
 * Copy GPU paging using the DMA engine (r6xx).
 * Used by the radeon ttm implementation to move pages if
 * registered as the asic copy callback.
 */
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
				   uint64_t src_offset, uint64_t dst_offset,
				   unsigned num_gpu_pages,
				   struct reservation_object *resv)
{
	struct radeon_semaphore *sem = NULL;
	struct radeon_fence *fence;
	int ring_index = rdev->asic->copy.dma_ring_index;
	struct radeon_ring *ring = &rdev->ring[ring_index];
	u32 size_in_dw, cur_size_in_dw;
	int i, num_loops;
	int r = 0;

	r = radeon_semaphore_create(rdev, &sem);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		return ERR_PTR(r);
	}

	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		radeon_semaphore_free(rdev, &sem, NULL);
		return ERR_PTR(r);
	}

	radeon_semaphore_sync_resv(rdev, sem, resv, false);
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);

	for (i = 0; i < num_loops; i++) {
		cur_size_in_dw = size_in_dw;
		if (cur_size_in_dw > 0xFFFE)
			cur_size_in_dw = 0xFFFE;
		size_in_dw -= cur_size_in_dw;
		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
		radeon_ring_write(ring, dst_offset & 0xfffffffc);
		radeon_ring_write(ring, src_offset & 0xfffffffc);
		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
					 (upper_32_bits(src_offset) & 0xff)));
		src_offset += cur_size_in_dw * 4;
		dst_offset += cur_size_in_dw * 4;
	}

	r = radeon_fence_emit(rdev, &fence, ring->idx);
	if (r) {
		radeon_ring_unlock_undo(rdev, ring);
		radeon_semaphore_free(rdev, &sem, NULL);
		return ERR_PTR(r);
	}

	radeon_ring_unlock_commit(rdev, ring, false);
	radeon_semaphore_free(rdev, &sem, fence);

	return fence;
}
Exemple #9
0
static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
					     struct radeon_ring *ring,
					     struct radeon_fence **fence)
{
	uint32_t handle = ring->idx ^ 0xdeafbeef;
	int r;

	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
		r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
		if (r) {
			DRM_ERROR("Failed to get dummy create msg\n");
			return r;
		}

		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
		if (r) {
			DRM_ERROR("Failed to get dummy destroy msg\n");
			return r;
		}

	} else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
		   ring->idx == TN_RING_TYPE_VCE2_INDEX) {
		r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
		if (r) {
			DRM_ERROR("Failed to get dummy create msg\n");
			return r;
		}

		r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
		if (r) {
			DRM_ERROR("Failed to get dummy destroy msg\n");
			return r;
		}

	} else {
		r = radeon_ring_lock(rdev, ring, 64);
		if (r) {
			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
			return r;
		}
		r = radeon_fence_emit(rdev, fence, ring->idx);
		if (r) {
			DRM_ERROR("Failed to emit fence\n");
			radeon_ring_unlock_undo(rdev, ring);
			return r;
		}
		radeon_ring_unlock_commit(rdev, ring, false);
	}
	return 0;
}
Exemple #10
0
/**
 * cik_sdma_ring_test - simple async dma engine test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (CIK).
 * Returns 0 for success, error for failure.
 */
int cik_sdma_ring_test(struct radeon_device *rdev,
		       struct radeon_ring *ring)
{
	unsigned i;
	int r;
	unsigned index;
	u32 tmp;
	u64 gpu_addr;

	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
		index = R600_WB_DMA_RING_TEST_OFFSET;
	else
		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;

	gpu_addr = rdev->wb.gpu_addr + index;

	tmp = 0xCAFEDEAD;
	rdev->wb.wb[index/4] = cpu_to_le32(tmp);

	r = radeon_ring_lock(rdev, ring, 5);
	if (r) {
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
	radeon_ring_write(ring, lower_32_bits(gpu_addr));
	radeon_ring_write(ring, upper_32_bits(gpu_addr));
	radeon_ring_write(ring, 1); /* number of DWs to follow */
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring, false);

	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
void rv515_ring_start(struct radeon_device *rdev)
{
	int r;

	r = radeon_ring_lock(rdev, 64);
	if (r) {
		return;
	}
	radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
	radeon_ring_write(rdev,
			  ISYNC_ANY2D_IDLE3D |
			  ISYNC_ANY3D_IDLE2D |
			  ISYNC_WAIT_IDLEGUI |
			  ISYNC_CPSCRATCH_IDLEGUI);
	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
<<<<<<< HEAD
Exemple #12
0
/**
 * cik_sdma_ring_test - simple async dma engine test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (CIK).
 * Returns 0 for success, error for failure.
 */
int cik_sdma_ring_test(struct radeon_device *rdev,
		       struct radeon_ring *ring)
{
	unsigned i;
	int r;
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
	u32 tmp;

	if (!ptr) {
		DRM_ERROR("invalid vram scratch pointer\n");
		return -EINVAL;
	}

	tmp = 0xCAFEDEAD;
	writel(tmp, ptr);

	r = radeon_ring_lock(rdev, ring, 5);
	if (r) {
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
	radeon_ring_write(ring, 1); /* number of DWs to follow */
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring);

	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = readl(ptr);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Exemple #13
0
/**
 * radeon_ring_restore - append saved commands to the ring again
 *
 * @rdev: radeon_device pointer
 * @ring: ring to append commands to
 * @size: number of dwords we want to write
 * @data: saved commands
 *
 * Allocates space on the ring and restore the previously saved commands.
 */
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
			unsigned size, uint32_t *data)
{
	int i, r;

	if (!size || !data)
		return 0;

	/* restore the saved ring content */
	r = radeon_ring_lock(rdev, ring, size);
	if (r)
		return r;

	for (i = 0; i < size; ++i) {
		radeon_ring_write(ring, data[i]);
	}

	radeon_ring_unlock_commit(rdev, ring);
	kfree(data);
	return 0;
}
Exemple #14
0
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
	int r = 0;

	if (!ib->length_dw || !ring->ready) {
		/* TODO: Nothings in the ib we should report. */
		dev_err(rdev->dev, "couldn't schedule ib\n");
		return -EINVAL;
	}

	/* 64 dwords should be enough for fence too */
	r = radeon_ring_lock(rdev, ring, 64);
	if (r) {
		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
		return r;
	}
	radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
	radeon_fence_emit(rdev, ib->fence);
	radeon_ring_unlock_commit(rdev, ring);
	return 0;
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
			bool evict, int no_wait_reserve, bool no_wait_gpu,
			struct ttm_mem_reg *new_mem,
			struct ttm_mem_reg *old_mem)
{
	struct radeon_device *rdev;
	uint64_t old_start, new_start;
	struct radeon_fence *fence;
	int r, i;

	rdev = radeon_get_rdev(bo->bdev);
	r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
	if (unlikely(r)) {
		return r;
	}
	old_start = old_mem->start << PAGE_SHIFT;
	new_start = new_mem->start << PAGE_SHIFT;

	switch (old_mem->mem_type) {
	case TTM_PL_VRAM:
		old_start += rdev->mc.vram_start;
		break;
	case TTM_PL_TT:
		old_start += rdev->mc.gtt_start;
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
		return -EINVAL;
	}
	switch (new_mem->mem_type) {
	case TTM_PL_VRAM:
		new_start += rdev->mc.vram_start;
		break;
	case TTM_PL_TT:
		new_start += rdev->mc.gtt_start;
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
		return -EINVAL;
	}
	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
		DRM_ERROR("Trying to move memory with ring turned off.\n");
		return -EINVAL;
	}

	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);

	/* sync other rings */
	if (rdev->family >= CHIP_R600) {
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			/* no need to sync to our own or unused rings */
			if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
				continue;

			if (!fence->semaphore) {
				r = radeon_semaphore_create(rdev, &fence->semaphore);
				/* FIXME: handle semaphore error */
				if (r)
					continue;
			}

			r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
			/* FIXME: handle ring lock error */
			if (r)
				continue;
			radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
			radeon_ring_unlock_commit(rdev, &rdev->ring[i]);

			r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
			/* FIXME: handle ring lock error */
			if (r)
				continue;
			radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
			radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
		}
	}

	r = radeon_copy(rdev, old_start, new_start,
			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
			fence);
	/* FIXME: handle copy error */
	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
				      evict, no_wait_reserve, no_wait_gpu, new_mem);
	radeon_fence_unref(&fence);
	return r;
}
void radeon_test_ring_sync2(struct radeon_device *rdev,
			    struct radeon_ring *ringA,
			    struct radeon_ring *ringB,
			    struct radeon_ring *ringC)
{
	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
	struct radeon_semaphore *semaphore = NULL;
	int ridxA = radeon_ring_index(rdev, ringA);
	int ridxB = radeon_ring_index(rdev, ringB);
	int ridxC = radeon_ring_index(rdev, ringC);
	bool sigA, sigB;
	int i, r;

	r = radeon_fence_create(rdev, &fenceA, ridxA);
	if (r) {
		DRM_ERROR("Failed to create sync fence 1\n");
		goto out_cleanup;
	}
	r = radeon_fence_create(rdev, &fenceB, ridxB);
	if (r) {
		DRM_ERROR("Failed to create sync fence 2\n");
		goto out_cleanup;
	}

	r = radeon_semaphore_create(rdev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
	radeon_fence_emit(rdev, fenceA);
	radeon_ring_unlock_commit(rdev, ringA);

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %d\n", ridxB);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
	radeon_fence_emit(rdev, fenceB);
	radeon_ring_unlock_commit(rdev, ringB);

	mdelay(1000);

	if (radeon_fence_signaled(fenceA)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}
	if (radeon_fence_signaled(fenceB)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
	radeon_ring_unlock_commit(rdev, ringC);

	for (i = 0; i < 30; ++i) {
		mdelay(100);
		sigA = radeon_fence_signaled(fenceA);
		sigB = radeon_fence_signaled(fenceB);
		if (sigA || sigB)
			break;
	}

	if (!sigA && !sigB) {
		DRM_ERROR("Neither fence A nor B has been signaled\n");
		goto out_cleanup;
	} else if (sigA && sigB) {
		DRM_ERROR("Both fence A and B has been signaled\n");
		goto out_cleanup;
	}

	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
	radeon_ring_unlock_commit(rdev, ringC);

	mdelay(1000);

	r = radeon_fence_wait(fenceA, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence A\n");
		goto out_cleanup;
	}
	r = radeon_fence_wait(fenceB, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence B\n");
		goto out_cleanup;
	}

out_cleanup:
	if (semaphore)
		radeon_semaphore_free(rdev, semaphore);

	if (fenceA)
		radeon_fence_unref(&fenceA);

	if (fenceB)
		radeon_fence_unref(&fenceB);

	if (r)
		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
void radeon_test_ring_sync(struct radeon_device *rdev,
			   struct radeon_ring *ringA,
			   struct radeon_ring *ringB)
{
	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
	struct radeon_semaphore *semaphore = NULL;
	int ridxA = radeon_ring_index(rdev, ringA);
	int ridxB = radeon_ring_index(rdev, ringB);
	int r;

	r = radeon_fence_create(rdev, &fence1, ridxA);
	if (r) {
		DRM_ERROR("Failed to create sync fence 1\n");
		goto out_cleanup;
	}
	r = radeon_fence_create(rdev, &fence2, ridxA);
	if (r) {
		DRM_ERROR("Failed to create sync fence 2\n");
		goto out_cleanup;
	}

	r = radeon_semaphore_create(rdev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
	radeon_fence_emit(rdev, fence1);
	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
	radeon_fence_emit(rdev, fence2);
	radeon_ring_unlock_commit(rdev, ringA);

	mdelay(1000);

	if (radeon_fence_signaled(fence1)) {
		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringB);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
	radeon_ring_unlock_commit(rdev, ringB);

	r = radeon_fence_wait(fence1, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence 1\n");
		goto out_cleanup;
	}

	mdelay(1000);

	if (radeon_fence_signaled(fence2)) {
		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringB);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
	radeon_ring_unlock_commit(rdev, ringB);

	r = radeon_fence_wait(fence2, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence 1\n");
		goto out_cleanup;
	}

out_cleanup:
	if (semaphore)
		radeon_semaphore_free(rdev, semaphore);

	if (fence1)
		radeon_fence_unref(&fence1);

	if (fence2)
		radeon_fence_unref(&fence2);

	if (r)
		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
static int cayman_cp_start(struct radeon_device *rdev)
{
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
	int r, i;

	r = radeon_ring_lock(rdev, ring, 7);
	if (r) {
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
		return r;
	}
	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
	radeon_ring_write(ring, 0x1);
	radeon_ring_write(ring, 0x0);
	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
	radeon_ring_write(ring, 0);
	radeon_ring_write(ring, 0);
	radeon_ring_unlock_commit(rdev, ring);

	cayman_cp_enable(rdev, true);

	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
	if (r) {
		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
		return r;
	}

	/* setup clear context state */
	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);

	for (i = 0; i < cayman_default_size; i++)
		radeon_ring_write(ring, cayman_default_state[i]);

	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);

	/* set clear context state */
	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
	radeon_ring_write(ring, 0);

	/* SQ_VTX_BASE_VTX_LOC */
	radeon_ring_write(ring, 0xc0026f00);
	radeon_ring_write(ring, 0x00000000);
	radeon_ring_write(ring, 0x00000000);
	radeon_ring_write(ring, 0x00000000);

	/* Clear consts */
	radeon_ring_write(ring, 0xc0036f00);
	radeon_ring_write(ring, 0x00000bc4);
	radeon_ring_write(ring, 0xffffffff);
	radeon_ring_write(ring, 0xffffffff);
	radeon_ring_write(ring, 0xffffffff);

	radeon_ring_write(ring, 0xc0026900);
	radeon_ring_write(ring, 0x00000316);
	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
	radeon_ring_write(ring, 0x00000010); /*  */

	radeon_ring_unlock_commit(rdev, ring);

	/* XXX init other rings */

	return 0;
}
Exemple #19
0
void radeon_test_ring_sync(struct radeon_device *rdev,
                           struct radeon_ring *ringA,
                           struct radeon_ring *ringB)
{
    struct radeon_fence *fence1 = NULL, *fence2 = NULL;
    struct radeon_semaphore *semaphore = NULL;
    int r;

    r = radeon_semaphore_create(rdev, &semaphore);
    if (r) {
        DRM_ERROR("Failed to create semaphore\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringA, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fence1, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit fence 1\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    r = radeon_fence_emit(rdev, &fence2, ringA->idx);
    if (r) {
        DRM_ERROR("Failed to emit fence 2\n");
        radeon_ring_unlock_undo(rdev, ringA);
        goto out_cleanup;
    }
    radeon_ring_unlock_commit(rdev, ringA);

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fence1)) {
        DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringB);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringB);

    r = radeon_fence_wait(fence1, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence 1\n");
        goto out_cleanup;
    }

    DRM_MDELAY(1000);

    if (radeon_fence_signaled(fence2)) {
        DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
        goto out_cleanup;
    }

    r = radeon_ring_lock(rdev, ringB, 64);
    if (r) {
        DRM_ERROR("Failed to lock ring B %p\n", ringB);
        goto out_cleanup;
    }
    radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    radeon_ring_unlock_commit(rdev, ringB);

    r = radeon_fence_wait(fence2, false);
    if (r) {
        DRM_ERROR("Failed to wait for sync fence 1\n");
        goto out_cleanup;
    }

out_cleanup:
    radeon_semaphore_free(rdev, &semaphore, NULL);

    if (fence1)
        radeon_fence_unref(&fence1);

    if (fence2)
        radeon_fence_unref(&fence2);

    if (r)
        DRM_ERROR("Error while testing ring sync (%d).\n", r);
}
Exemple #20
0
static void radeon_test_ring_sync2(struct radeon_device *rdev,
			    struct radeon_ring *ringA,
			    struct radeon_ring *ringB,
			    struct radeon_ring *ringC)
{
	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
	struct radeon_semaphore *semaphore = NULL;
	bool sigA, sigB;
	int i, r;

	r = radeon_semaphore_create(rdev, &semaphore);
	if (r) {
		DRM_ERROR("Failed to create semaphore\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringA, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringA, false);

	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
	if (r)
		goto out_cleanup;

	r = radeon_ring_lock(rdev, ringB, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
		goto out_cleanup;
	}
	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringB, false);
	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
	if (r)
		goto out_cleanup;

	msleep(1000);

	if (radeon_fence_signaled(fenceA)) {
		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}
	if (radeon_fence_signaled(fenceB)) {
		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
		goto out_cleanup;
	}

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringC, false);

	for (i = 0; i < 30; ++i) {
		msleep(100);
		sigA = radeon_fence_signaled(fenceA);
		sigB = radeon_fence_signaled(fenceB);
		if (sigA || sigB)
			break;
	}

	if (!sigA && !sigB) {
		DRM_ERROR("Neither fence A nor B has been signaled\n");
		goto out_cleanup;
	} else if (sigA && sigB) {
		DRM_ERROR("Both fence A and B has been signaled\n");
		goto out_cleanup;
	}

	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');

	r = radeon_ring_lock(rdev, ringC, 64);
	if (r) {
		DRM_ERROR("Failed to lock ring B %p\n", ringC);
		goto out_cleanup;
	}
	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
	radeon_ring_unlock_commit(rdev, ringC, false);

	msleep(1000);

	r = radeon_fence_wait(fenceA, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence A\n");
		goto out_cleanup;
	}
	r = radeon_fence_wait(fenceB, false);
	if (r) {
		DRM_ERROR("Failed to wait for sync fence B\n");
		goto out_cleanup;
	}

out_cleanup:
	radeon_semaphore_free(rdev, &semaphore, NULL);

	if (fenceA)
		radeon_fence_unref(&fenceA);

	if (fenceB)
		radeon_fence_unref(&fenceB);

	if (r)
		pr_warn("Error while testing ring sync (%d)\n", r);
}