예제 #1
0
static void draw_rect_render(int fd, struct cmd_data *cmd_data,
			     struct buf_data *buf, struct rect *rect,
			     uint32_t color)
{
	drm_intel_bo *src, *dst;
	uint32_t devid = intel_get_drm_devid(fd);
	igt_render_copyfunc_t rendercopy = igt_get_render_copyfunc(devid);
	struct igt_buf src_buf, dst_buf;
	struct intel_batchbuffer *batch;
	uint32_t tiling, swizzle;
	struct buf_data tmp;
	int pixel_size = buf->bpp / 8;
	unsigned adjusted_w, adjusted_dst_x;

	igt_skip_on(!rendercopy);

	/* Rendercopy works at 32bpp, so if you try to do copies on buffers with
	 * smaller bpps you won't succeeed if you need to copy "half" of a 32bpp
	 * pixel or something similar. */
	igt_skip_on(rect->x % (32 / buf->bpp) != 0 ||
		    rect->y % (32 / buf->bpp) != 0 ||
		    rect->w % (32 / buf->bpp) != 0 ||
		    rect->h % (32 / buf->bpp) != 0);

	gem_get_tiling(fd, buf->handle, &tiling, &swizzle);

	/* We create a temporary buffer and copy from it using rendercopy. */
	tmp.size = rect->w * rect->h * pixel_size;
	tmp.handle = gem_create(fd, tmp.size);
	tmp.stride = rect->w * pixel_size;
	tmp.bpp = buf->bpp;
	draw_rect_mmap_cpu(fd, &tmp, &(struct rect){0, 0, rect->w, rect->h},
예제 #2
0
static void draw_rect_mmap_wc(int fd, struct buf_data *buf, struct rect *rect,
			      uint32_t color)
{
	uint32_t *ptr;
	uint32_t tiling, swizzle;

	gem_set_domain(fd, buf->handle, I915_GEM_DOMAIN_GTT,
		       I915_GEM_DOMAIN_GTT);
	gem_get_tiling(fd, buf->handle, &tiling, &swizzle);

	/* We didn't implement suport for the older tiling methods yet. */
	if (tiling != I915_TILING_NONE)
		igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);

	ptr = gem_mmap__wc(fd, buf->handle, 0, buf->size,
			   PROT_READ | PROT_WRITE);

	switch (tiling) {
	case I915_TILING_NONE:
		draw_rect_ptr_linear(ptr, buf->stride, rect, color, buf->bpp);
		break;
	case I915_TILING_X:
		draw_rect_ptr_tiled(ptr, buf->stride, swizzle, rect, color,
				    buf->bpp);
		break;
	default:
		igt_assert(false);
		break;
	}

	igt_assert(munmap(ptr, buf->size) == 0);
}
int main(int argc, char *argv[])
{
	int i;

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	parse(argc, argv);

	threads = calloc(num_contexts, sizeof(*threads));
	returns = calloc(num_contexts, sizeof(*returns));

	for (i = 0; i < num_contexts; i++)
		pthread_create(&threads[i], NULL, work, &i);

	for (i = 0; i < num_contexts; i++) {
		int thread_status, ret;
		void *retval;
		ret = pthread_join(threads[i], &retval);
		thread_status = *(int *)retval;
		if (!ret && thread_status)
			exit(thread_status);
	}

	free(returns);
	free(threads);
	close(fd);

	exit(EXIT_SUCCESS);
}
예제 #4
0
static void
processes(void)
{
	int *all_fds;
	uint64_t aperture;
	struct rlimit rlim;
	int ppgtt_mode;
	int ctx_size;
	int obj_size;
	int n;

	igt_skip_on_simulation();

	fd = drm_open_driver_render(DRIVER_INTEL);
	devid = intel_get_drm_devid(fd);
	aperture = gem_aperture_size(fd);

	ppgtt_mode = uses_ppgtt(fd);
	igt_require(ppgtt_mode);

	render_copy = igt_get_render_copyfunc(devid);
	igt_require_f(render_copy, "no render-copy function\n");

	if (ppgtt_mode > 1)
		ctx_size = aperture >> 10; /* Assume full-ppgtt of maximum size */
	else
int main(int argc, char **argv)
{
	uint32_t batch[2] = {MI_BATCH_BUFFER_END};
	uint32_t handle;
	uint32_t devid;
	int fd;

	drmtest_subtest_init(argc, argv);

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	if (drmtest_run_subtest("render"))
		loop(fd, handle, I915_EXEC_RENDER, "render");

	if (drmtest_run_subtest("bsd"))
		if (HAS_BSD_RING(devid))
			loop(fd, handle, I915_EXEC_BSD, "bsd");

	if (drmtest_run_subtest("blt"))
		if (HAS_BLT_RING(devid))
			loop(fd, handle, I915_EXEC_BLT, "blt");


	gem_close(fd, handle);

	close(fd);

	return skipped_all ? 77 : 0;
}
예제 #6
0
static void run_test(int fd, unsigned ring, unsigned flags)
{
	const int gen = intel_gen(intel_get_drm_devid(fd));
	const uint32_t bbe = MI_BATCH_BUFFER_END;
	struct drm_i915_gem_exec_object2 obj[2];
	struct drm_i915_gem_relocation_entry reloc[1024];
	struct drm_i915_gem_execbuffer2 execbuf;
	struct igt_hang_ring hang;
	uint32_t *batch, *b;
	int i;

	gem_require_ring(fd, ring);
	igt_skip_on_f(gen == 6 && (ring & ~(3<<13)) == I915_EXEC_BSD,
		      "MI_STORE_DATA broken on gen6 bsd\n");

	gem_quiescent_gpu(fd);

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)obj;
	execbuf.buffer_count = 2;
	execbuf.flags = ring | (1 << 11);
	if (gen < 6)
		execbuf.flags |= I915_EXEC_SECURE;

	memset(obj, 0, sizeof(obj));
	obj[0].handle = gem_create(fd, 4096);
	obj[0].flags |= EXEC_OBJECT_WRITE;
	obj[1].handle = gem_create(fd, 1024*16 + 4096);
	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
	igt_require(__gem_execbuf(fd, &execbuf) == 0);

	obj[1].relocs_ptr = (uintptr_t)reloc;
	obj[1].relocation_count = 1024;

	batch = gem_mmap__cpu(fd, obj[1].handle, 0, 16*1024 + 4096,
			      PROT_WRITE | PROT_READ);
	gem_set_domain(fd, obj[1].handle,
		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

	memset(reloc, 0, sizeof(reloc));
	b = batch;
	for (i = 0; i < 1024; i++) {
		uint64_t offset;

		reloc[i].target_handle = obj[0].handle;
		reloc[i].presumed_offset = obj[0].offset;
		reloc[i].offset = (b - batch + 1) * sizeof(*batch);
		reloc[i].delta = i * sizeof(uint32_t);
		reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;

		offset = obj[0].offset + reloc[i].delta;
		*b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
		if (gen >= 8) {
			*b++ = offset;
			*b++ = offset >> 32;
		} else if (gen >= 4) {
예제 #7
0
static int gem_linear_blt(int fd,
                          uint32_t *batch,
                          uint32_t src,
                          uint32_t dst,
                          uint32_t length,
                          struct drm_i915_gem_relocation_entry *reloc)
{
    uint32_t *b = batch;

    *b++ = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
    *b++ = 0x66 << 16 | 1 << 25 | 1 << 24 | (4*1024);
    *b++ = 0;
    *b++ = (length / (4*1024)) << 16 | 1024;
    *b++ = 0;
    reloc->offset = (b-batch-1) * sizeof(uint32_t);
    reloc->delta = 0;
    reloc->target_handle = dst;
    reloc->read_domains = I915_GEM_DOMAIN_RENDER;
    reloc->write_domain = I915_GEM_DOMAIN_RENDER;
    reloc->presumed_offset = 0;
    reloc++;
    if (intel_gen(intel_get_drm_devid(fd)) >= 8)
        *b++ = 0; /* FIXME */

    *b++ = 0;
    *b++ = 4*1024;
    *b++ = 0;
    reloc->offset = (b-batch-1) * sizeof(uint32_t);
    reloc->delta = 0;
    reloc->target_handle = src;
    reloc->read_domains = I915_GEM_DOMAIN_RENDER;
    reloc->write_domain = 0;
    reloc->presumed_offset = 0;
    reloc++;
    if (intel_gen(intel_get_drm_devid(fd)) >= 8)
        *b++ = 0; /* FIXME */

    *b++ = MI_BATCH_BUFFER_END;
    *b++ = 0;

    return (b - batch) * sizeof(uint32_t);
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	igt_skip_on_simulation();

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "not (yet) implemented for pre-snb\n");
		return 77;
	}

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		igt_fail(-1);
	}

	blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096);
	if (!blt_bo) {
		fprintf(stderr, "failed to alloc blt buffer\n");
		igt_fail(-1);
	}

	dummy_reloc_loop();

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd, i;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	/* don't enable buffer reuse!! */
	//drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
	assert(batch);

	/* put some load onto the gpu to keep the light buffers active for long
	 * enough */
	for (i = 0; i < 1000; i++) {
		load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
		if (!load_bo) {
			fprintf(stderr, "failed to alloc target buffer\n");
			exit(-1);
		}

		BEGIN_BATCH(8);
		OUT_BATCH(XY_SRC_COPY_BLT_CMD |
			  XY_SRC_COPY_BLT_WRITE_ALPHA |
			  XY_SRC_COPY_BLT_WRITE_RGB);
		OUT_BATCH((3 << 24) | /* 32 bits */
			  (0xcc << 16) | /* copy ROP */
			  4096);
		OUT_BATCH(0); /* dst x1,y1 */
		OUT_BATCH((1024 << 16) | 512);
		OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
		OUT_BATCH((0 << 16) | 512); /* src x1, y1 */
		OUT_BATCH(4096);
		OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
		ADVANCE_BATCH();

		intel_batchbuffer_flush(batch);

		drm_intel_bo_disable_reuse(load_bo);
		drm_intel_bo_unreference(load_bo);
	}

	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
예제 #10
0
static void draw_rect_blt(int fd, struct cmd_data *cmd_data,
			  struct buf_data *buf, struct rect *rect,
			  uint32_t color)
{
	drm_intel_bo *dst;
	struct intel_batchbuffer *batch;
	int blt_cmd_len, blt_cmd_tiling, blt_cmd_depth;
	uint32_t devid = intel_get_drm_devid(fd);
	int gen = intel_gen(devid);
	uint32_t tiling, swizzle;
	int pitch;

	gem_get_tiling(fd, buf->handle, &tiling, &swizzle);

	dst = gem_handle_to_libdrm_bo(cmd_data->bufmgr, fd, "", buf->handle);
	igt_assert(dst);

	batch = intel_batchbuffer_alloc(cmd_data->bufmgr, devid);
	igt_assert(batch);

	switch (buf->bpp) {
	case 8:
		blt_cmd_depth = 0;
		break;
	case 16: /* we're assuming 565 */
		blt_cmd_depth = 1 << 24;
		break;
	case 32:
		blt_cmd_depth = 3 << 24;
		break;
	default:
		igt_assert(false);
	}

	blt_cmd_len = (gen >= 8) ?  0x5 : 0x4;
	blt_cmd_tiling = (tiling) ? XY_COLOR_BLT_TILED : 0;
	pitch = (tiling) ? buf->stride / 4 : buf->stride;

	BEGIN_BATCH(6, 1);
	OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | XY_COLOR_BLT_WRITE_ALPHA |
		  XY_COLOR_BLT_WRITE_RGB | blt_cmd_tiling | blt_cmd_len);
	OUT_BATCH(blt_cmd_depth | (0xF0 << 16) | pitch);
	OUT_BATCH((rect->y << 16) | rect->x);
	OUT_BATCH(((rect->y + rect->h) << 16) | (rect->x + rect->w));
	OUT_RELOC_FENCED(dst, 0, I915_GEM_DOMAIN_RENDER, 0);
	OUT_BATCH(color);
	ADVANCE_BATCH();

	intel_batchbuffer_flush(batch);
	intel_batchbuffer_free(batch);
}
예제 #11
0
static void draw_rect_pwrite_tiled(int fd, struct buf_data *buf,
				   struct rect *rect, uint32_t color,
				   uint32_t swizzle)
{
	int i;
	int tiled_pos, x, y, pixel_size;
	uint8_t tmp[4096];
	int tmp_used = 0, tmp_size;
	bool flush_tmp = false;
	int tmp_start_pos = 0;
	int pixels_written = 0;

	/* We didn't implement suport for the older tiling methods yet. */
	igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);

	pixel_size = buf->bpp / 8;
	tmp_size = sizeof(tmp) / pixel_size;

	/* Instead of doing one pwrite per pixel, we try to group the maximum
	 * amount of consecutive pixels we can in a single pwrite: that's why we
	 * use the "tmp" variables. */
	for (i = 0; i < tmp_size; i++)
		set_pixel(tmp, i, color, buf->bpp);

	for (tiled_pos = 0; tiled_pos < buf->size; tiled_pos += pixel_size) {
		tiled_pos_to_x_y_linear(tiled_pos, buf->stride, swizzle,
					buf->bpp, &x, &y);

		if (x >= rect->x && x < rect->x + rect->w &&
		    y >= rect->y && y < rect->y + rect->h) {
			if (tmp_used == 0)
				tmp_start_pos = tiled_pos;
			tmp_used++;
		} else {
			flush_tmp = true;
		}

		if (tmp_used == tmp_size || (flush_tmp && tmp_used > 0) ||
		    tiled_pos + pixel_size >= buf->size) {
			gem_write(fd, buf->handle, tmp_start_pos, tmp,
				  tmp_used * pixel_size);
			flush_tmp = false;
			pixels_written += tmp_used;
			tmp_used = 0;

			if (pixels_written == rect->w * rect->h)
				break;
		}
	}
}
예제 #12
0
static void make_busy(int fd, uint32_t handle)
{
    struct drm_i915_gem_execbuffer2 execbuf;
    struct drm_i915_gem_exec_object2 obj[2];
    struct drm_i915_gem_relocation_entry reloc[2];
    uint32_t batch[20];
    uint32_t tmp;
    int count;

    tmp = gem_create(fd, 1024*1024);

    obj[0].handle = tmp;
    obj[0].relocation_count = 0;
    obj[0].relocs_ptr = 0;
    obj[0].alignment = 0;
    obj[0].offset = 0;
    obj[0].flags = 0;
    obj[0].rsvd1 = 0;
    obj[0].rsvd2 = 0;

    obj[1].handle = handle;
    obj[1].relocation_count = 2;
    obj[1].relocs_ptr = (uintptr_t) reloc;
    obj[1].alignment = 0;
    obj[1].offset = 0;
    obj[1].flags = 0;
    obj[1].rsvd1 = 0;
    obj[1].rsvd2 = 0;

    execbuf.buffers_ptr = (uintptr_t)obj;
    execbuf.buffer_count = 2;
    execbuf.batch_start_offset = 0;
    execbuf.batch_len = gem_linear_blt(fd, batch, tmp, tmp, 1024*1024,reloc);
    execbuf.cliprects_ptr = 0;
    execbuf.num_cliprects = 0;
    execbuf.DR1 = 0;
    execbuf.DR4 = 0;
    execbuf.flags = 0;
    if (HAS_BLT_RING(intel_get_drm_devid(fd)))
        execbuf.flags |= I915_EXEC_BLT;
    i915_execbuffer2_set_context_id(execbuf, 0);
    execbuf.rsvd2 = 0;

    gem_write(fd, handle, 0, batch, execbuf.batch_len);
    for (count = 0; count < 10; count++)
        do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
    gem_close(fd, tmp);
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		exit(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "inter ring check needs gen6+\n");
		return 77;
	}


	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		exit(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		exit(-1);
	}

	store_dword_loop(I915_EXEC_RENDER);

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	if (HAS_BSD_RING(devid))
		num_rings++;

	if (HAS_BLT_RING(devid))
		num_rings++;


	printf("num rings detected: %i\n", num_rings);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	mi_lri_loop();
	gem_quiescent_gpu(fd);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd;
	int object_size = OBJECT_WIDTH * OBJECT_HEIGHT * 4;
	double start_time, end_time;
	drm_intel_bo *dst_bo;
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int i;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst_bo = drm_intel_bo_alloc(bufmgr, "dst", object_size, 4096);

	/* Prep loop to get us warmed up. */
	for (i = 0; i < 60; i++) {
		do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT);
	}
	drm_intel_bo_wait_rendering(dst_bo);

	/* Do the actual timing. */
	start_time = get_time_in_secs();
	for (i = 0; i < 200; i++) {
		do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT);
	}
	drm_intel_bo_wait_rendering(dst_bo);
	end_time = get_time_in_secs();

	printf("%d iterations in %.03f secs: %.01f MB/sec\n", i,
	       end_time - start_time,
	       (double)i * OBJECT_WIDTH * OBJECT_HEIGHT * 4 / 1024.0 / 1024.0 /
	       (end_time - start_time));

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
예제 #16
0
int main(int argc, char **argv)
{
	int fd;
	int i;
	drm_intel_bo *src_bo, *dst_bo;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
	dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);

	/* The ring we've been using is 128k, and each rendering op
	 * will use at least 8 dwords:
	 *
	 * BATCH_START
	 * BATCH_START offset
	 * MI_FLUSH
	 * STORE_DATA_INDEX
	 * STORE_DATA_INDEX offset
	 * STORE_DATA_INDEX value
	 * MI_USER_INTERRUPT
	 * (padding)
	 *
	 * So iterate just a little more than that -- if we don't fill the ring
	 * doing this, we aren't likely to with this test.
	 */
	for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) {
		intel_copy_bo(batch, dst_bo, src_bo, width, height);
		intel_batchbuffer_flush(batch);
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
예제 #17
0
static unsigned get_num_contexts(int fd)
{
	uint64_t ggtt_size;
	unsigned size;
	unsigned count;

	/* Compute the number of contexts we can allocate to fill the GGTT */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		ggtt_size = 1ull << 32;
	else
		ggtt_size = 1ull << 31;

	size = 64 << 10; /* Most gen require at least 64k for ctx */

	count = 3 * (ggtt_size / size) / 2;
	igt_info("Creating %lld contexts (assuming of size %lld)\n",
		 (long long)count, (long long)size);

	intel_require_memory(count, size, CHECK_RAM | CHECK_SWAP);
	return count;
}
예제 #18
0
static void test_connector(const char *test_name,
			   struct kmstest_connector_config *cconf,
			   enum test_flags flags)
{
	const uint32_t *formats;
	int format_count;
	int i;

	igt_get_all_formats(&formats, &format_count);
	for (i = 0; i < format_count; i++) {
		if (intel_gen(intel_get_drm_devid(drm_fd)) < 4
		    && formats[i] == DRM_FORMAT_XRGB2101010) {
			igt_info("gen2/3 don't support 10bpc, skipping\n");
			continue;
		}

		test_format(test_name,
			    cconf, &cconf->connector->modes[0],
			    formats[i], flags);
	}
}
예제 #19
0
static void write_dword(int fd,
			uint32_t target_handle,
			uint64_t target_offset,
			uint32_t value)
{
	int gen = intel_gen(intel_get_drm_devid(fd));
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 obj[2];
	struct drm_i915_gem_relocation_entry reloc;
	uint32_t buf[16];
	int i;

	memset(obj, 0, sizeof(obj));
	obj[0].handle = target_handle;
	obj[1].handle = gem_create(fd, 4096);

	i = 0;
	buf[i++] = MI_STORE_DWORD_IMM | (gen < 6 ? 1<<22 : 0);
	if (gen >= 8) {
		buf[i++] = target_offset;
		buf[i++] = target_offset >> 32;
	} else if (gen >= 4) {
예제 #20
0
int main(int argc, char **argv)
{
	drm_intel_bo *src;
	int fd;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src = drm_intel_bo_alloc(bufmgr, "src", 128 * 128, 4096);

	bad_blit(src, batch->devid);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	drm_intel_bo *bo[4096];
	uint32_t bo_start_val[4096];
	uint32_t start = 0;
	int fd, i, count;

	igt_skip_on_simulation();

	fd = drm_open_any();
	count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
	if (count > intel_get_total_ram_mb() * 9 / 10) {
		count = intel_get_total_ram_mb() * 9 / 10;
		printf("not enough RAM to run test, reducing buffer count\n");
	}
	count |= 1;
	printf("Using %d 1MiB buffers\n", count);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	for (i = 0; i < count; i++) {
		bo[i] = create_bo(fd, start);
		bo_start_val[i] = start;

		/*
		printf("Creating bo %d\n", i);
		check_bo(bo[i], bo_start_val[i]);
		*/

		start += 1024 * 1024 / 4;
	}

	for (i = 0; i < count; i++) {
		int src = count - i - 1;
		intel_copy_bo(batch, bo[i], bo[src], width, height);
		bo_start_val[i] = bo_start_val[src];
	}

	for (i = 0; i < count * 4; i++) {
		int src = random() % count;
		int dst = random() % count;

		if (src == dst)
			continue;

		intel_copy_bo(batch, bo[dst], bo[src], width, height);
		bo_start_val[dst] = bo_start_val[src];

		/*
		check_bo(bo[dst], bo_start_val[dst]);
		printf("%d: copy bo %d to %d\n", i, src, dst);
		*/
	}

	for (i = 0; i < count; i++) {
		/*
		printf("check %d\n", i);
		*/
		check_bo(fd, bo[i], bo_start_val[i]);

		drm_intel_bo_unreference(bo[i]);
		bo[i] = NULL;
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
예제 #22
0
static void render_timeout(int fd)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	int64_t negative_timeout = -1;
	int ret;
	const bool do_signals = true; /* signals will seem to make the operation
				       * use less process CPU time */
	bool done = false;
	int i, iter = 1;

	igt_skip_on_simulation();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096);
	dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096);

	igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL,
		      "kernel doesn't support wait_timeout, skipping test\n");
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;

	/* Figure out a rough number of fills required to consume 1 second of
	 * GPU work.
	 */
	do {
		struct timespec start, end;
		long diff;

#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif

		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0);
		for (i = 0; i < iter; i++)
			blt_color_fill(batch, dst, BUF_PAGES);
		intel_batchbuffer_flush(batch);
		drm_intel_bo_wait_rendering(dst);
		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0);

		diff = do_time_diff(&end, &start);
		igt_assert(diff >= 0);

		if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS)
			done = true;
		else
			iter <<= 1;
	} while (!done && iter < 1000000);

	igt_assert_lt(iter, 1000000);

	igt_info("%d iters is enough work\n", iter);
	gem_quiescent_gpu(fd);
	if (do_signals)
		igt_fork_signal_helper();

	/* We should be able to do half as much work in the same amount of time,
	 * but because we might schedule almost twice as much as required, we
	 * might accidentally time out. Hence add some fudge. */
	for (i = 0; i < iter/3; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);
	igt_assert_neq(timeout, 0);
	if (timeout ==  (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC))
		igt_info("Buffer was already done!\n");
	else {
		igt_info("Finished with %" PRIu64 " time remaining\n", timeout);
	}

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert_eq(timeout, 0);

	/* Now check that we correctly time out, twice the auto-tune load should
	 * be good enough. */
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	for (i = 0; i < iter*2; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout);
	igt_assert_eq(ret, -ETIME);
	igt_assert_eq(timeout, 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME);
	igt_assert_eq(timeout, 0);


	/* Now check that we can pass negative (infinite) timeouts. */
	negative_timeout = -1;
	for (i = 0; i < iter; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0);
	igt_assert_eq(negative_timeout, -1); /* infinity always remains */
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);

	if (do_signals)
		igt_stop_signal_helper();
	drm_intel_bo_unreference(dst2);
	drm_intel_bo_unreference(dst);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);
}
예제 #23
0
static int negative_reloc_blt(int fd)
{
	const int gen = intel_gen(intel_get_drm_devid(fd));
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 gem_exec[1024][2];
	struct drm_i915_gem_relocation_entry gem_reloc;
	uint32_t buf[1024], *b;
	int i;

	memset(&gem_reloc, 0, sizeof(gem_reloc));
	gem_reloc.offset = 4 * sizeof(uint32_t);
	gem_reloc.presumed_offset = ~0ULL;
	gem_reloc.delta = -4096;
	gem_reloc.target_handle = 0;
	gem_reloc.read_domains = I915_GEM_DOMAIN_RENDER;
	gem_reloc.write_domain = I915_GEM_DOMAIN_RENDER;

	for (i = 0; i < 1024; i++) {
		memset(gem_exec[i], 0, sizeof(gem_exec[i]));

		gem_exec[i][0].handle = gem_create(fd, 4096);
		gem_exec[i][0].flags = EXEC_OBJECT_NEEDS_FENCE;

		b = buf;
		*b++ = XY_COLOR_BLT_CMD_NOLEN |
			((gen >= 8) ? 5 : 4) |
			COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
		*b++ = 0xf0 << 16 | 1 << 25 | 1 << 24 | 4096;
		*b++ = 1 << 16 | 0;
		*b++ = 2 << 16 | 1024;
		*b++ = ~0;
		if (gen >= 8)
			*b++ = ~0;
		*b++ = 0xc0ffee ^ i;
		*b++ = MI_BATCH_BUFFER_END;
		if ((b - buf) & 1)
			*b++ = 0;

		gem_exec[i][1].handle = gem_create(fd, 4096);
		gem_write(fd, gem_exec[i][1].handle, 0, buf, (b - buf) * sizeof(uint32_t));
		gem_exec[i][1].relocation_count = 1;
		gem_exec[i][1].relocs_ptr = (uintptr_t)&gem_reloc;
	}

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffer_count = 2;
	execbuf.batch_len = (b - buf) * sizeof(uint32_t);
	execbuf.flags = USE_LUT;
	if (gen >= 6)
		execbuf.flags |= I915_EXEC_BLT;

	for (i = 0; i < 1024; i++) {
		execbuf.buffers_ptr = (uintptr_t)gem_exec[i];
		gem_execbuf(fd, &execbuf);
	}

	for (i = 1024; i--;) {
		gem_read(fd, gem_exec[i][0].handle,
			 i*sizeof(uint32_t), buf + i, sizeof(uint32_t));
		gem_close(fd, gem_exec[i][0].handle);
		gem_close(fd, gem_exec[i][1].handle);
	}

	if (0) {
		for (i = 0; i < 1024; i += 8)
			igt_info("%08x %08x %08x %08x %08x %08x %08x %08x\n",
				 buf[i + 0], buf[i + 1], buf[i + 2], buf[i + 3],
				 buf[i + 4], buf[i + 5], buf[i + 6], buf[i + 7]);
	}
	for (i = 0; i < 1024; i++)
		igt_assert_eq(buf[i], 0xc0ffee ^ i);

	return 0;
}
예제 #24
0
/* Simulates SNA behaviour using negative self-relocations for
 * STATE_BASE_ADDRESS command packets. If they wrap around (to values greater
 * than the total size of the GTT), the GPU will hang.
 * See https://bugs.freedesktop.org/show_bug.cgi?id=78533
 */
static int negative_reloc(int fd, unsigned flags)
{
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 gem_exec[2];
	struct drm_i915_gem_relocation_entry gem_reloc[1000];
	uint64_t gtt_max = get_page_table_size(fd);
	uint32_t buf[1024] = {MI_BATCH_BUFFER_END};
	int i;

#define BIAS (256*1024)

	igt_require(intel_gen(intel_get_drm_devid(fd)) >= 7);

	memset(gem_exec, 0, sizeof(gem_exec));
	gem_exec[0].handle = gem_create(fd, 4096);
	gem_write(fd, gem_exec[0].handle, 0, buf, 8);

	gem_reloc[0].offset = 1024;
	gem_reloc[0].delta = 0;
	gem_reloc[0].target_handle = gem_exec[0].handle;
	gem_reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;

	gem_exec[1].handle = gem_create(fd, 4096);
	gem_write(fd, gem_exec[1].handle, 0, buf, 8);
	gem_exec[1].relocation_count = 1;
	gem_exec[1].relocs_ptr = (uintptr_t)gem_reloc;

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)gem_exec;
	execbuf.buffer_count = 2;
	execbuf.batch_len = 8;

	do_or_die(drmIoctl(fd,
			   DRM_IOCTL_I915_GEM_EXECBUFFER2,
			   &execbuf));
	gem_close(fd, gem_exec[1].handle);

	igt_info("Found offset %lld for 4k batch\n", (long long)gem_exec[0].offset);
	/*
	 * Ideally we'd like to be able to control where the kernel is going to
	 * place the buffer. We don't SKIP here because it causes the test
	 * to "randomly" flip-flop between the SKIP and PASS states.
	 */
	if (gem_exec[0].offset < BIAS) {
		igt_info("Offset is below BIAS, not testing anything\n");
		return 0;
	}

	memset(gem_reloc, 0, sizeof(gem_reloc));
	for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++) {
		gem_reloc[i].offset = 8 + 4*i;
		gem_reloc[i].delta = -BIAS*i/1024;
		gem_reloc[i].target_handle = flags & USE_LUT ? 0 : gem_exec[0].handle;
		gem_reloc[i].read_domains = I915_GEM_DOMAIN_COMMAND;
	}

	gem_exec[0].relocation_count = sizeof(gem_reloc)/sizeof(gem_reloc[0]);
	gem_exec[0].relocs_ptr = (uintptr_t)gem_reloc;

	execbuf.buffer_count = 1;
	execbuf.flags = flags & USE_LUT;
	do_or_die(drmIoctl(fd,
			   DRM_IOCTL_I915_GEM_EXECBUFFER2,
			   &execbuf));

	igt_info("Batch is now at offset %lld\n", (long long)gem_exec[0].offset);

	gem_read(fd, gem_exec[0].handle, 0, buf, sizeof(buf));
	gem_close(fd, gem_exec[0].handle);

	for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++)
		igt_assert(buf[2 + i] < gtt_max);

	return 0;
}
int main(int argc, char **argv)
{
	uint32_t *handle, *tiling, *start_val;
	uint32_t start = 0;
	int i, fd, count;

	fd = drm_open_any();

	if (!IS_GEN3(intel_get_drm_devid(fd))) {
		printf("gen3-only test, doing nothing\n");
		return 77;
	}

	count = 0;
	if (argc > 1)
		count = atoi(argv[1]);
	if (count == 0)
		count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
	printf("Using %d 1MiB buffers\n", count);

	handle = malloc(sizeof(uint32_t)*count*3);
	tiling = handle + count;
	start_val = tiling + count;

	for (i = 0; i < count; i++) {
		handle[i] = create_bo(fd, start, tiling[i] = i % 3);
		start_val[i] = start;
		start += 1024 * 1024 / 4;
	}

	printf("Verifying initialisation..."); fflush(stdout);
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);
	printf("done\n");

	printf("Cyclic blits, forward..."); fflush(stdout);
	for (i = 0; i < count * 32; i++) {
		int src = i % count;
		int dst = (i + 1) % count;

		copy(fd, handle[dst], tiling[dst], handle[src], tiling[src]);
		start_val[dst] = start_val[src];
	}
	printf("verifying..."); fflush(stdout);
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);
	printf("done\n");

	printf("Cyclic blits, backward..."); fflush(stdout);
	for (i = 0; i < count * 32; i++) {
		int src = (i + 1) % count;
		int dst = i % count;

		copy(fd, handle[dst], tiling[dst], handle[src], tiling[src]);
		start_val[dst] = start_val[src];
	}
	printf("verifying..."); fflush(stdout);
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);
	printf("done\n");

	printf("Random blits..."); fflush(stdout);
	for (i = 0; i < count * 32; i++) {
		int src = random() % count;
		int dst = random() % count;

		while (src == dst)
			dst = random() % count;

			copy(fd, handle[dst], tiling[dst], handle[src], tiling[src]);
		start_val[dst] = start_val[src];
	}
	printf("verifying..."); fflush(stdout);
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);
	printf("done\n");

	return 0;
}
static int
copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo)
{
	uint32_t batch[12];
	struct drm_i915_gem_relocation_entry reloc[2];
	struct drm_i915_gem_exec_object2 *obj;
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int n, ret, i=0;

	batch[i++] = (XY_SRC_COPY_BLT_CMD |
		    XY_SRC_COPY_BLT_WRITE_ALPHA |
		    XY_SRC_COPY_BLT_WRITE_RGB | 6);
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i - 1] += 2;
	batch[i++] = (3 << 24) | /* 32 bits */
		  (0xcc << 16) | /* copy ROP */
		  WIDTH*4;
	batch[i++] = 0; /* dst x1,y1 */
	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
	batch[i++] = 0; /* dst reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0; /* FIXME */
	batch[i++] = 0; /* src x1,y1 */
	batch[i++] = WIDTH*4;
	batch[i++] = 0; /* src reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0; /* FIXME */
	batch[i++] = MI_BATCH_BUFFER_END;
	batch[i++] = MI_NOOP;

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	reloc[0].target_handle = dst;
	reloc[0].delta = 0;
	reloc[0].offset = 4 * sizeof(batch[0]);
	reloc[0].presumed_offset = 0;
	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;

	reloc[1].target_handle = src;
	reloc[1].delta = 0;
	reloc[1].offset = 7 * sizeof(batch[0]);
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		reloc[1].offset += sizeof(batch[0]);
	reloc[1].presumed_offset = 0;
	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[1].write_domain = 0;

	obj = calloc(n_bo + 1, sizeof(*obj));
	for (n = 0; n < n_bo; n++)
		obj[n].handle = all_bo[n];
	obj[n].handle = handle;
	obj[n].relocation_count = 2;
	obj[n].relocs_ptr = (uintptr_t)reloc;

	exec.buffers_ptr = (uintptr_t)obj;
	exec.buffer_count = n_bo + 1;
	exec.batch_start_offset = 0;
	exec.batch_len = i * 4;
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
	i915_execbuffer2_set_context_id(exec, 0);
	exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	if (ret)
		ret = errno;

	gem_close(fd, handle);
	free(obj);

	return ret;
}
예제 #27
0
int main(int argc, char **argv)
{
	data_t data = {0, };
	struct intel_batchbuffer *batch = NULL;
	struct igt_buf src, dst;
	igt_render_copyfunc_t render_copy = NULL;
	int opt_dump_aub = igt_aub_dump_enabled();

	igt_simple_init_parse_opts(&argc, argv, "da", NULL, NULL,
				   opt_handler, NULL);

	igt_fixture {
		data.drm_fd = drm_open_any_render();
		data.devid = intel_get_drm_devid(data.drm_fd);

		data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
		igt_assert(data.bufmgr);

		render_copy = igt_get_render_copyfunc(data.devid);
		igt_require_f(render_copy,
			      "no render-copy function\n");

		batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
		igt_assert(batch);
	}

	scratch_buf_init(&data, &src, WIDTH, HEIGHT, STRIDE, SRC_COLOR);
	scratch_buf_init(&data, &dst, WIDTH, HEIGHT, STRIDE, DST_COLOR);

	scratch_buf_check(&data, &src, WIDTH / 2, HEIGHT / 2, SRC_COLOR);
	scratch_buf_check(&data, &dst, WIDTH / 2, HEIGHT / 2, DST_COLOR);

	if (opt_dump_png) {
		scratch_buf_write_to_png(&src, "source.png");
		scratch_buf_write_to_png(&dst, "destination.png");
	}

	if (opt_dump_aub) {
		drm_intel_bufmgr_gem_set_aub_filename(data.bufmgr,
						      "rendercopy.aub");
		drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, true);
	}

	/* This will copy the src to the mid point of the dst buffer. Presumably
	 * the out of bounds accesses will get clipped.
	 * Resulting buffer should look like:
	 *	  _______
	 *	 |dst|dst|
	 *	 |dst|src|
	 *	  -------
	 */
	render_copy(batch, NULL,
		    &src, 0, 0, WIDTH, HEIGHT,
		    &dst, WIDTH / 2, HEIGHT / 2);

	if (opt_dump_png)
		scratch_buf_write_to_png(&dst, "result.png");

	if (opt_dump_aub) {
		drm_intel_gem_bo_aub_dump_bmp(dst.bo,
			0, 0, WIDTH, HEIGHT,
			AUB_DUMP_BMP_FORMAT_ARGB_8888,
			STRIDE, 0);
		drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, false);
	} else if (check_all_pixels) {
		uint32_t val;
		int i, j;
		gem_read(data.drm_fd, dst.bo->handle, 0,
			 data.linear, sizeof(data.linear));
		for (i = 0; i < WIDTH; i++) {
			for (j = 0; j < HEIGHT; j++) {
				uint32_t color = DST_COLOR;
				val = data.linear[j * WIDTH + i];
				if (j >= HEIGHT/2 && i >= WIDTH/2)
					color = SRC_COLOR;

				igt_assert_f(val == color,
					     "Expected 0x%08x, found 0x%08x at (%d,%d)\n",
					     color, val, i, j);
			}
		}
	} else {
		scratch_buf_check(&data, &dst, 10, 10, DST_COLOR);
		scratch_buf_check(&data, &dst, WIDTH - 10, HEIGHT - 10, SRC_COLOR);
	}

	igt_exit();
}
int main(int argc, char **argv)
{
	uint32_t *handle, *start_val;
	uint32_t start = 0;
	int i, fd, count;

	igt_simple_init(argc, argv);

	fd = drm_open_any();

	igt_require(IS_GEN3(intel_get_drm_devid(fd)));

	count = 0;
	if (argc > 1)
		count = atoi(argv[1]);
	if (count == 0)
		count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
	igt_info("Using %d 1MiB buffers\n", count);

	handle = malloc(sizeof(uint32_t)*count*2);
	start_val = handle + count;

	for (i = 0; i < count; i++) {
		handle[i] = create_bo(fd, start);
		start_val[i] = start;
		start += 1024 * 1024 / 4;
	}

	igt_info("Verifying initialisation...\n");
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_info("Cyclic blits, forward...\n");
	for (i = 0; i < count * 4; i++) {
		int src = i % count;
		int dst = (i + 1) % count;

		copy(fd, handle[dst], handle[src]);
		start_val[dst] = start_val[src];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_info("Cyclic blits, backward...\n");
	for (i = 0; i < count * 4; i++) {
		int src = (i + 1) % count;
		int dst = i % count;

		copy(fd, handle[dst], handle[src]);
		start_val[dst] = start_val[src];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_info("Random blits...\n");
	for (i = 0; i < count * 4; i++) {
		int src = random() % count;
		int dst = random() % count;

		if (src == dst)
			continue;

		copy(fd, handle[dst], handle[src]);
		start_val[dst] = start_val[src];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_exit();
}
int main(int argc, char **argv)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	uint32_t *start_val;
	drm_intel_bo **bo;
	uint32_t start = 0;
	int i, j, fd, count;

	fd = drm_open_any();

	render_copy = get_render_copyfunc(intel_get_drm_devid(fd));
	if (render_copy == NULL) {
		printf("no render-copy function, doing nothing\n");
		return 77;
	}

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	count = 0;
	if (argc > 1)
		count = atoi(argv[1]);
	if (count == 0)
		count = 3 * gem_aperture_size(fd) / SIZE / 2;
	printf("Using %d 1MiB buffers\n", count);

	bo = malloc(sizeof(*bo)*count);
	start_val = malloc(sizeof(*start_val)*count);

	for (i = 0; i < count; i++) {
		bo[i] = drm_intel_bo_alloc(bufmgr, "", SIZE, 4096);
		start_val[i] = start;
		for (j = 0; j < WIDTH*HEIGHT; j++)
			linear[j] = start++;
		gem_write(fd, bo[i]->handle, 0, linear, sizeof(linear));
	}

	printf("Verifying initialisation...\n");
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	printf("Cyclic blits, forward...\n");
	for (i = 0; i < count * 4; i++) {
		struct scratch_buf src, dst;

		src.bo = bo[i % count];
		src.stride = STRIDE;
		src.tiling = I915_TILING_NONE;
		src.size = SIZE;

		dst.bo = bo[(i + 1) % count];
		dst.stride = STRIDE;
		dst.tiling = I915_TILING_NONE;
		dst.size = SIZE;

		render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
		start_val[(i + 1) % count] = start_val[i % count];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	printf("Cyclic blits, backward...\n");
	for (i = 0; i < count * 4; i++) {
		struct scratch_buf src, dst;

		src.bo = bo[(i + 1) % count];
		src.stride = STRIDE;
		src.tiling = I915_TILING_NONE;
		src.size = SIZE;

		dst.bo = bo[i % count];
		dst.stride = STRIDE;
		dst.tiling = I915_TILING_NONE;
		dst.size = SIZE;

		render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
		start_val[i % count] = start_val[(i + 1) % count];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	printf("Random blits...\n");
	for (i = 0; i < count * 4; i++) {
		struct scratch_buf src, dst;
		int s = random() % count;
		int d = random() % count;

		if (s == d)
			continue;

		src.bo = bo[s];
		src.stride = STRIDE;
		src.tiling = I915_TILING_NONE;
		src.size = SIZE;

		dst.bo = bo[d];
		dst.stride = STRIDE;
		dst.tiling = I915_TILING_NONE;
		dst.size = SIZE;

		render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
		start_val[d] = start_val[s];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	return 0;
}
예제 #30
0
static void
copy(int fd, uint32_t dst, uint32_t src, unsigned int error)
{
	uint32_t batch[12];
	struct drm_i915_gem_relocation_entry reloc[2];
	struct drm_i915_gem_exec_object2 obj[3];
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int ret, i=0;

	batch[i++] = XY_SRC_COPY_BLT_CMD |
		  XY_SRC_COPY_BLT_WRITE_ALPHA |
		  XY_SRC_COPY_BLT_WRITE_RGB;
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i - 1] |= 8;
	else
		batch[i - 1] |= 6;

	batch[i++] = (3 << 24) | /* 32 bits */
		  (0xcc << 16) | /* copy ROP */
		  WIDTH*4;
	batch[i++] = 0; /* dst x1,y1 */
	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
	batch[i++] = 0; /* dst reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = 0; /* src x1,y1 */
	batch[i++] = WIDTH*4;
	batch[i++] = 0; /* src reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = MI_BATCH_BUFFER_END;
	batch[i++] = MI_NOOP;

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	reloc[0].target_handle = dst;
	reloc[0].delta = 0;
	reloc[0].offset = 4 * sizeof(batch[0]);
	reloc[0].presumed_offset = 0;
	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;

	reloc[1].target_handle = src;
	reloc[1].delta = 0;
	reloc[1].offset = 7 * sizeof(batch[0]);
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		reloc[1].offset += sizeof(batch[0]);
	reloc[1].presumed_offset = 0;
	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[1].write_domain = 0;

	memset(obj, 0, sizeof(obj));
	exec.buffer_count = 0;
	obj[exec.buffer_count++].handle = dst;
	if (src != dst)
		obj[exec.buffer_count++].handle = src;
	obj[exec.buffer_count].handle = handle;
	obj[exec.buffer_count].relocation_count = 2;
	obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc;
	exec.buffer_count++;
	exec.buffers_ptr = (uintptr_t)obj;

	exec.batch_start_offset = 0;
	exec.batch_len = i * 4;
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
	i915_execbuffer2_set_context_id(exec, 0);
	exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	if (ret)
		ret = errno;

	if (error == ~0)
		igt_assert_neq(ret, 0);
	else
		igt_assert(ret == error);

	gem_close(fd, handle);
}