Ejemplo n.º 1
0
static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
{
	struct radeon_cs_chunk *ib_chunk;
	struct radeon_vm *vm = NULL;
	int r;

	if (parser->chunk_ib_idx == -1)
		return 0;

	if (parser->cs_flags & RADEON_CS_USE_VM) {
		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
		vm = &fpriv->vm;

		if ((rdev->family >= CHIP_TAHITI) &&
		    (parser->chunk_const_ib_idx != -1)) {
			ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
				return -EINVAL;
			}
			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
					   vm, ib_chunk->length_dw * 4);
			if (r) {
				DRM_ERROR("Failed to get const ib !\n");
				return r;
			}
			parser->const_ib.is_const_ib = true;
			parser->const_ib.length_dw = ib_chunk->length_dw;
			if (copy_from_user(parser->const_ib.ptr,
					       ib_chunk->user_ptr,
					       ib_chunk->length_dw * 4))
				return -EFAULT;
		}

		ib_chunk = &parser->chunks[parser->chunk_ib_idx];
		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
			return -EINVAL;
		}
	}
	ib_chunk = &parser->chunks[parser->chunk_ib_idx];

	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
			   vm, ib_chunk->length_dw * 4);
	if (r) {
		DRM_ERROR("Failed to get ib !\n");
		return r;
	}
	parser->ib.length_dw = ib_chunk->length_dw;
	if (ib_chunk->kdata)
		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
	else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
		return -EFAULT;
	return 0;
}
Ejemplo n.º 2
0
/**
 * cik_sdma_ib_test - test an IB on the DMA engine
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test a simple IB in the DMA ring (CIK).
 * Returns 0 on success, error on failure.
 */
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
	struct radeon_ib ib;
	unsigned i;
	unsigned index;
	int r;
	u32 tmp = 0;
	u64 gpu_addr;

	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
		index = R600_WB_DMA_RING_TEST_OFFSET;
	else
		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;

	gpu_addr = rdev->wb.gpu_addr + index;

	tmp = 0xCAFEDEAD;
	rdev->wb.wb[index/4] = cpu_to_le32(tmp);

	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
	if (r) {
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
		return r;
	}

	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
	ib.ptr[1] = lower_32_bits(gpu_addr);
	ib.ptr[2] = upper_32_bits(gpu_addr);
	ib.ptr[3] = 1;
	ib.ptr[4] = 0xDEADBEEF;
	ib.length_dw = 5;

	r = radeon_ib_schedule(rdev, &ib, NULL, false);
	if (r) {
		radeon_ib_free(rdev, &ib);
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
		return r;
	}
	r = radeon_fence_wait(ib.fence, false);
	if (r) {
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
		return r;
	}
	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}
	if (i < rdev->usec_timeout) {
		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
	} else {
		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
		r = -EINVAL;
	}
	radeon_ib_free(rdev, &ib);
	return r;
}
Ejemplo n.º 3
0
/**
 * radeon_vce_get_create_msg - generate a VCE create msg
 *
 * @rdev: radeon_device pointer
 * @ring: ring we should submit the msg to
 * @handle: VCE session handle to use
 * @fence: optional fence to return
 *
 * Open up a stream for HW test
 */
int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
			      uint32_t handle, struct radeon_fence **fence)
{
	const unsigned ib_size_dw = 1024;
	struct radeon_ib ib;
	uint64_t dummy;
	int i, r;

	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
	if (r) {
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
		return r;
	}

	dummy = ib.gpu_addr + 1024;

	/* stitch together an VCE create msg */
	ib.length_dw = 0;
	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
	ib.ptr[ib.length_dw++] = handle;

	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
	ib.ptr[ib.length_dw++] = 0x00000000;
	ib.ptr[ib.length_dw++] = 0x00000042;
	ib.ptr[ib.length_dw++] = 0x0000000a;
	ib.ptr[ib.length_dw++] = 0x00000001;
	ib.ptr[ib.length_dw++] = 0x00000080;
	ib.ptr[ib.length_dw++] = 0x00000060;
	ib.ptr[ib.length_dw++] = 0x00000100;
	ib.ptr[ib.length_dw++] = 0x00000100;
	ib.ptr[ib.length_dw++] = 0x0000000c;
	ib.ptr[ib.length_dw++] = 0x00000000;

	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
	ib.ptr[ib.length_dw++] = dummy;
	ib.ptr[ib.length_dw++] = 0x00000001;

	for (i = ib.length_dw; i < ib_size_dw; ++i)
		ib.ptr[i] = 0x0;

	r = radeon_ib_schedule(rdev, &ib, NULL);
	if (r) {
	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
	}

	if (fence)
		*fence = radeon_fence_ref(ib.fence);

	radeon_ib_free(rdev, &ib);

	return r;
}
Ejemplo n.º 4
0
/**
 * cik_sdma_ib_test - test an IB on the DMA engine
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test a simple IB in the DMA ring (CIK).
 * Returns 0 on success, error on failure.
 */
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
	struct radeon_ib ib;
	unsigned i;
	int r;
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
	u32 tmp = 0;

	if (!ptr) {
		DRM_ERROR("invalid vram scratch pointer\n");
		return -EINVAL;
	}

	tmp = 0xCAFEDEAD;
	writel(tmp, ptr);

	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
	if (r) {
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
		return r;
	}

	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
	ib.ptr[3] = 1;
	ib.ptr[4] = 0xDEADBEEF;
	ib.length_dw = 5;

	r = radeon_ib_schedule(rdev, &ib, NULL);
	if (r) {
		radeon_ib_free(rdev, &ib);
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
		return r;
	}
	r = radeon_fence_wait(ib.fence, false);
	if (r) {
		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
		return r;
	}
	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = readl(ptr);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}
	if (i < rdev->usec_timeout) {
		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
	} else {
		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
		r = -EINVAL;
	}
	radeon_ib_free(rdev, &ib);
	return r;
}
Ejemplo n.º 5
0
/**
 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
 *
 * @rdev: radeon_device pointer
 * @ring: ring we should submit the msg to
 * @handle: VCE session handle to use
 * @fence: optional fence to return
 *
 * Close up a stream for HW test or if userspace failed to do so
 */
int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence)
{
    const unsigned ib_size_dw = 1024;
    struct radeon_ib ib;
    uint64_t dummy;
    int i, r;

    r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
    if (r) {
        DRM_ERROR("radeon: failed to get ib (%d).\n", r);
        return r;
    }

    dummy = ib.gpu_addr + 1024;

    /* stitch together an VCE destroy msg */
    ib.length_dw = 0;
    ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
    ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
    ib.ptr[ib.length_dw++] = cpu_to_le32(handle);

    ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
    ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
    ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
    ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
    ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);

    ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
    ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */

    for (i = ib.length_dw; i < ib_size_dw; ++i)
        ib.ptr[i] = cpu_to_le32(0x0);

    r = radeon_ib_schedule(rdev, &ib, NULL, false);
    if (r) {
        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
    }

    if (fence)
        *fence = radeon_fence_ref(ib.fence);

    radeon_ib_free(rdev, &ib);

    return r;
}
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_cs_parser parser;
	struct radeon_cs_chunk *ib_chunk;
	int r;

	mutex_lock(&rdev->cs_mutex);
	/* initialize parser */
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
	parser.filp = filp;
	parser.rdev = rdev;
	parser.dev = rdev->dev;
	parser.family = rdev->family;
	r = radeon_cs_parser_init(&parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r =  radeon_ib_get(rdev, &parser.ib);
	if (r) {
		DRM_ERROR("Failed to get ib !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r = radeon_cs_parser_relocs(&parser);
	if (r) {
		if (r != -ERESTARTSYS)
			DRM_ERROR("Failed to parse relocation %d!\n", r);
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	/* Copy the packet into the IB, the parser will read from the
	 * input memory (cached) and write to the IB (which can be
	 * uncached). */
	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
	parser.ib->length_dw = ib_chunk->length_dw;
	r = radeon_cs_parse(&parser);
	if (r || parser.parser_error) {
		DRM_ERROR("Invalid command stream !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r = radeon_cs_finish_pages(&parser);
	if (r) {
		DRM_ERROR("Invalid command stream !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r = radeon_ib_schedule(rdev, parser.ib);
	if (r) {
		DRM_ERROR("Failed to schedule IB !\n");
	}
	radeon_cs_parser_fini(&parser, r);
	mutex_unlock(&rdev->cs_mutex);
	return r;
}
Ejemplo n.º 7
0
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_cs_parser parser;
	struct radeon_cs_chunk *ib_chunk;
	int r;

	mutex_lock(&rdev->cs_mutex);
	if (rdev->gpu_lockup) {
		mutex_unlock(&rdev->cs_mutex);
		return -EINVAL;
	}
	
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
	parser.filp = filp;
	parser.rdev = rdev;
	r = radeon_cs_parser_init(&parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r =  radeon_ib_get(rdev, &parser.ib);
	if (r) {
		DRM_ERROR("Failed to get ib !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r = radeon_cs_parser_relocs(&parser);
	if (r) {
		DRM_ERROR("Failed to parse relocation !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	
	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
	parser.ib->length_dw = ib_chunk->length_dw;
	r = radeon_cs_parse(&parser);
	if (r || parser.parser_error) {
		DRM_ERROR("Invalid command stream !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r = radeon_cs_finish_pages(&parser);
	if (r) {
		DRM_ERROR("Invalid command stream !\n");
		radeon_cs_parser_fini(&parser, r);
		mutex_unlock(&rdev->cs_mutex);
		return r;
	}
	r = radeon_ib_schedule(rdev, parser.ib);
	if (r) {
		DRM_ERROR("Faild to schedule IB !\n");
	}
	radeon_cs_parser_fini(&parser, r);
	mutex_unlock(&rdev->cs_mutex);
	return r;
}