static void *work(void *arg)
{
	struct intel_batchbuffer *batch;
	render_copyfunc_t rendercopy = get_render_copyfunc(devid);
	drm_intel_context *context;
	drm_intel_bufmgr *bufmgr;
	int thread_id = *(int *)arg;
	int td_fd;
	int i;

	if (multiple_fds)
		td_fd = fd = drm_open_any();
	else
		td_fd = fd;

	assert(td_fd >= 0);

	bufmgr = drm_intel_bufmgr_gem_init(td_fd, 4096);
	batch = intel_batchbuffer_alloc(bufmgr, devid);
	context = drm_intel_gem_context_create(bufmgr);

	if (!context) {
		returns[thread_id] = 77;
		goto out;
	}

	for (i = 0; i < iter; i++) {
		struct scratch_buf src, dst;

		init_buffer(bufmgr, &src, 4096);
		init_buffer(bufmgr, &dst, 4096);


		if (uncontexted) {
			assert(rendercopy);
			rendercopy(batch, &src, 0, 0, 0, 0, &dst, 0, 0);
		} else {
			int ret;
			ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
			assert(ret == 0);
			intel_batchbuffer_flush_with_context(batch, context);
		}
	}

out:
	drm_intel_gem_context_destroy(context);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	if (multiple_fds)
		close(td_fd);

	pthread_exit(&returns[thread_id]);
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	igt_skip_on_simulation();

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "not (yet) implemented for pre-snb\n");
		return 77;
	}

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		igt_fail(-1);
	}

	blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096);
	if (!blt_bo) {
		fprintf(stderr, "failed to alloc blt buffer\n");
		igt_fail(-1);
	}

	dummy_reloc_loop();

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd, i;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	/* don't enable buffer reuse!! */
	//drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
	assert(batch);

	/* put some load onto the gpu to keep the light buffers active for long
	 * enough */
	for (i = 0; i < 1000; i++) {
		load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
		if (!load_bo) {
			fprintf(stderr, "failed to alloc target buffer\n");
			exit(-1);
		}

		BEGIN_BATCH(8);
		OUT_BATCH(XY_SRC_COPY_BLT_CMD |
			  XY_SRC_COPY_BLT_WRITE_ALPHA |
			  XY_SRC_COPY_BLT_WRITE_RGB);
		OUT_BATCH((3 << 24) | /* 32 bits */
			  (0xcc << 16) | /* copy ROP */
			  4096);
		OUT_BATCH(0); /* dst x1,y1 */
		OUT_BATCH((1024 << 16) | 512);
		OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
		OUT_BATCH((0 << 16) | 512); /* src x1, y1 */
		OUT_BATCH(4096);
		OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
		ADVANCE_BATCH();

		intel_batchbuffer_flush(batch);

		drm_intel_bo_disable_reuse(load_bo);
		drm_intel_bo_unreference(load_bo);
	}

	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
示例#4
0
static void draw_rect_blt(int fd, struct cmd_data *cmd_data,
			  struct buf_data *buf, struct rect *rect,
			  uint32_t color)
{
	drm_intel_bo *dst;
	struct intel_batchbuffer *batch;
	int blt_cmd_len, blt_cmd_tiling, blt_cmd_depth;
	uint32_t devid = intel_get_drm_devid(fd);
	int gen = intel_gen(devid);
	uint32_t tiling, swizzle;
	int pitch;

	gem_get_tiling(fd, buf->handle, &tiling, &swizzle);

	dst = gem_handle_to_libdrm_bo(cmd_data->bufmgr, fd, "", buf->handle);
	igt_assert(dst);

	batch = intel_batchbuffer_alloc(cmd_data->bufmgr, devid);
	igt_assert(batch);

	switch (buf->bpp) {
	case 8:
		blt_cmd_depth = 0;
		break;
	case 16: /* we're assuming 565 */
		blt_cmd_depth = 1 << 24;
		break;
	case 32:
		blt_cmd_depth = 3 << 24;
		break;
	default:
		igt_assert(false);
	}

	blt_cmd_len = (gen >= 8) ?  0x5 : 0x4;
	blt_cmd_tiling = (tiling) ? XY_COLOR_BLT_TILED : 0;
	pitch = (tiling) ? buf->stride / 4 : buf->stride;

	BEGIN_BATCH(6, 1);
	OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | XY_COLOR_BLT_WRITE_ALPHA |
		  XY_COLOR_BLT_WRITE_RGB | blt_cmd_tiling | blt_cmd_len);
	OUT_BATCH(blt_cmd_depth | (0xF0 << 16) | pitch);
	OUT_BATCH((rect->y << 16) | rect->x);
	OUT_BATCH(((rect->y + rect->h) << 16) | (rect->x + rect->w));
	OUT_RELOC_FENCED(dst, 0, I915_GEM_DOMAIN_RENDER, 0);
	OUT_BATCH(color);
	ADVANCE_BATCH();

	intel_batchbuffer_flush(batch);
	intel_batchbuffer_free(batch);
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		exit(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "inter ring check needs gen6+\n");
		return 77;
	}


	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		exit(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		exit(-1);
	}

	store_dword_loop(I915_EXEC_RENDER);

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd;
	int object_size = OBJECT_WIDTH * OBJECT_HEIGHT * 4;
	double start_time, end_time;
	drm_intel_bo *dst_bo;
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int i;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst_bo = drm_intel_bo_alloc(bufmgr, "dst", object_size, 4096);

	/* Prep loop to get us warmed up. */
	for (i = 0; i < 60; i++) {
		do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT);
	}
	drm_intel_bo_wait_rendering(dst_bo);

	/* Do the actual timing. */
	start_time = get_time_in_secs();
	for (i = 0; i < 200; i++) {
		do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT);
	}
	drm_intel_bo_wait_rendering(dst_bo);
	end_time = get_time_in_secs();

	printf("%d iterations in %.03f secs: %.01f MB/sec\n", i,
	       end_time - start_time,
	       (double)i * OBJECT_WIDTH * OBJECT_HEIGHT * 4 / 1024.0 / 1024.0 /
	       (end_time - start_time));

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	if (HAS_BSD_RING(devid))
		num_rings++;

	if (HAS_BLT_RING(devid))
		num_rings++;


	printf("num rings detected: %i\n", num_rings);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	mi_lri_loop();
	gem_quiescent_gpu(fd);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
示例#8
0
static void *thread(void *bufmgr)
{
	struct intel_batchbuffer *batch;
	dri_bo **bo;
	drm_intel_context **ctx;
	int c, b;

	batch = intel_batchbuffer_alloc(bufmgr, devid);

	bo = malloc(num_bo * sizeof(dri_bo *));
	igt_assert(bo);
	memcpy(bo, all_bo, num_bo * sizeof(dri_bo *));

	ctx = malloc(num_ctx * sizeof(drm_intel_context *));
	igt_assert(ctx);
	memcpy(ctx, all_ctx, num_ctx * sizeof(drm_intel_context *));
	igt_permute_array(ctx, num_ctx, xchg_ptr);

	for (c = 0; c < ctx_per_thread; c++) {
		igt_permute_array(bo, num_bo, xchg_ptr);
		for (b = 0; b < bo_per_ctx; b++) {
			struct igt_buf src, dst;

			src.bo = bo[b % num_bo];
			src.stride = 64;
			src.size = OBJECT_SIZE;
			src.tiling = I915_TILING_NONE;

			dst.bo = bo[(b+1) % num_bo];
			dst.stride = 64;
			dst.size = OBJECT_SIZE;
			dst.tiling = I915_TILING_NONE;

			render_copy(batch, ctx[c % num_ctx],
				    &src, 0, 0, 16, 16, &dst, 0, 0);
		}
	}

	free(ctx);
	free(bo);
	intel_batchbuffer_free(batch);

	return NULL;
}
int main(int argc, char **argv)
{
	int fd;
	int i;
	drm_intel_bo *src_bo, *dst_bo;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
	dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);

	/* The ring we've been using is 128k, and each rendering op
	 * will use at least 8 dwords:
	 *
	 * BATCH_START
	 * BATCH_START offset
	 * MI_FLUSH
	 * STORE_DATA_INDEX
	 * STORE_DATA_INDEX offset
	 * STORE_DATA_INDEX value
	 * MI_USER_INTERRUPT
	 * (padding)
	 *
	 * So iterate just a little more than that -- if we don't fill the ring
	 * doing this, we aren't likely to with this test.
	 */
	for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) {
		intel_copy_bo(batch, dst_bo, src_bo, width, height);
		intel_batchbuffer_flush(batch);
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
示例#10
0
int main(int argc, char **argv)
{
	drm_intel_bo *src;
	int fd;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src = drm_intel_bo_alloc(bufmgr, "src", 128 * 128, 4096);

	bad_blit(src, batch->devid);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
示例#11
0
static void init(void)
{
	int i;
	unsigned tmp;

	if (options.num_buffers == 0) {
		tmp = gem_aperture_size(drm_fd);
		tmp = tmp > 256*(1024*1024) ? 256*(1024*1024) : tmp;
		num_buffers = 2 * tmp / options.scratch_buf_size / 3;
		num_buffers /= 2;
		printf("using %u buffers\n", num_buffers);
	} else
		num_buffers = options.num_buffers;

	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	drm_intel_bufmgr_gem_enable_fenced_relocs(bufmgr);
	num_fences = get_num_fences();
	batch = intel_batchbuffer_alloc(bufmgr, devid);

	busy_bo = drm_intel_bo_alloc(bufmgr, "tiled bo", BUSY_BUF_SIZE, 4096);
	if (options.forced_tiling >= 0) {
		tmp = options.forced_tiling;
		set_tiling(busy_bo, &tmp, 4096);
		assert(tmp == options.forced_tiling);
	}

	for (i = 0; i < num_buffers; i++) {
		init_buffer(&buffers[0][i], options.scratch_buf_size);
		init_buffer(&buffers[1][i], options.scratch_buf_size);

		num_total_tiles += buffers[0][i].num_tiles;
	}
	current_set = 0;

	/* just in case it helps reproducability */
	srandom(0xdeadbeef);
}
static void run_test(int fd, int num_fences, int expected_errno,
		     unsigned flags)
{
	struct drm_i915_gem_execbuffer2 execbuf[2];
	struct drm_i915_gem_exec_object2 exec[2][2*MAX_FENCES+3];
	struct drm_i915_gem_relocation_entry reloc[2*MAX_FENCES+2];

	int i, n;
	int loop = 1000;

	if (flags & BUSY_LOAD) {
		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
		batch = intel_batchbuffer_alloc(bufmgr, devid);

		/* Takes forever otherwise. */
		loop = 50;
	}

	if (flags & INTERRUPTIBLE)
		igt_fork_signal_helper();

	memset(execbuf, 0, sizeof(execbuf));
	memset(exec, 0, sizeof(exec));
	memset(reloc, 0, sizeof(reloc));

	for (n = 0; n < 2*num_fences; n++) {
		uint32_t handle = tiled_bo_create(fd);
		exec[1][2*num_fences - n-1].handle = exec[0][n].handle = handle;
		fill_reloc(&reloc[n], handle);
	}

	for (i = 0; i < 2; i++) {
		for (n = 0; n < num_fences; n++)
			exec[i][n].flags = EXEC_OBJECT_NEEDS_FENCE;

		exec[i][2*num_fences].handle = batch_create(fd);
		exec[i][2*num_fences].relocs_ptr = (uintptr_t)reloc;
		exec[i][2*num_fences].relocation_count = 2*num_fences;

		execbuf[i].buffers_ptr = (uintptr_t)exec[i];
		execbuf[i].buffer_count = 2*num_fences+1;
		execbuf[i].batch_len = 2*sizeof(uint32_t);
	}

	do {
		if (flags & BUSY_LOAD)
			emit_dummy_load();

		igt_assert_eq(__gem_execbuf(fd, &execbuf[0]), expected_errno);
		igt_assert_eq(__gem_execbuf(fd, &execbuf[1]), expected_errno);
	} while (--loop);

	if (flags & INTERRUPTIBLE)
		igt_stop_signal_helper();

	/* Cleanup */
	for (n = 0; n < 2*num_fences; n++)
		gem_close(fd, exec[0][n].handle);

	for (i = 0; i < 2; i++)
		gem_close(fd, exec[i][2*num_fences].handle);

	if (flags & BUSY_LOAD) {
		intel_batchbuffer_free(batch);
		drm_intel_bufmgr_destroy(bufmgr);
	}
}
int main(int argc, char **argv)
{
	drm_intel_bo *bo[4096];
	uint32_t bo_start_val[4096];
	uint32_t start = 0;
	int fd, i, count;

	igt_skip_on_simulation();

	fd = drm_open_any();
	count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
	if (count > intel_get_total_ram_mb() * 9 / 10) {
		count = intel_get_total_ram_mb() * 9 / 10;
		printf("not enough RAM to run test, reducing buffer count\n");
	}
	count |= 1;
	printf("Using %d 1MiB buffers\n", count);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	for (i = 0; i < count; i++) {
		bo[i] = create_bo(fd, start);
		bo_start_val[i] = start;

		/*
		printf("Creating bo %d\n", i);
		check_bo(bo[i], bo_start_val[i]);
		*/

		start += 1024 * 1024 / 4;
	}

	for (i = 0; i < count; i++) {
		int src = count - i - 1;
		intel_copy_bo(batch, bo[i], bo[src], width, height);
		bo_start_val[i] = bo_start_val[src];
	}

	for (i = 0; i < count * 4; i++) {
		int src = random() % count;
		int dst = random() % count;

		if (src == dst)
			continue;

		intel_copy_bo(batch, bo[dst], bo[src], width, height);
		bo_start_val[dst] = bo_start_val[src];

		/*
		check_bo(bo[dst], bo_start_val[dst]);
		printf("%d: copy bo %d to %d\n", i, src, dst);
		*/
	}

	for (i = 0; i < count; i++) {
		/*
		printf("check %d\n", i);
		*/
		check_bo(fd, bo[i], bo_start_val[i]);

		drm_intel_bo_unreference(bo[i]);
		bo[i] = NULL;
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
示例#14
0
	src = gem_handle_to_libdrm_bo(cmd_data->bufmgr, fd, "", tmp.handle);
	igt_assert(src);
	dst = gem_handle_to_libdrm_bo(cmd_data->bufmgr, fd, "", buf->handle);
	igt_assert(dst);

	src_buf.bo = src;
	src_buf.stride = tmp.stride;
	src_buf.tiling = I915_TILING_NONE;
	src_buf.size = tmp.size;
	dst_buf.bo = dst;
	dst_buf.stride = buf->stride;
	dst_buf.tiling = tiling;
	dst_buf.size = buf->size;

	batch = intel_batchbuffer_alloc(cmd_data->bufmgr, devid);
	igt_assert(batch);

	switch (buf->bpp) {
	case 16:
	case 32:
		adjusted_w = rect->w / (32 / buf->bpp);
		adjusted_dst_x = rect->x / (32 / buf->bpp);
		break;
	default:
		igt_assert(false);
	}

	rendercopy(batch, cmd_data->context, &src_buf, 0, 0, adjusted_w,
		   rect->h, &dst_buf, adjusted_dst_x, rect->y);
int main(int argc, char **argv)
{
	data_t data = {0, };
	struct intel_batchbuffer *batch = NULL;
	struct igt_buf src, dst;
	igt_render_copyfunc_t render_copy = NULL;
	int opt_dump_aub = igt_aub_dump_enabled();

	igt_simple_init_parse_opts(&argc, argv, "da", NULL, NULL,
				   opt_handler, NULL);

	igt_fixture {
		data.drm_fd = drm_open_any_render();
		data.devid = intel_get_drm_devid(data.drm_fd);

		data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
		igt_assert(data.bufmgr);

		render_copy = igt_get_render_copyfunc(data.devid);
		igt_require_f(render_copy,
			      "no render-copy function\n");

		batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
		igt_assert(batch);
	}

	scratch_buf_init(&data, &src, WIDTH, HEIGHT, STRIDE, SRC_COLOR);
	scratch_buf_init(&data, &dst, WIDTH, HEIGHT, STRIDE, DST_COLOR);

	scratch_buf_check(&data, &src, WIDTH / 2, HEIGHT / 2, SRC_COLOR);
	scratch_buf_check(&data, &dst, WIDTH / 2, HEIGHT / 2, DST_COLOR);

	if (opt_dump_png) {
		scratch_buf_write_to_png(&src, "source.png");
		scratch_buf_write_to_png(&dst, "destination.png");
	}

	if (opt_dump_aub) {
		drm_intel_bufmgr_gem_set_aub_filename(data.bufmgr,
						      "rendercopy.aub");
		drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, true);
	}

	/* This will copy the src to the mid point of the dst buffer. Presumably
	 * the out of bounds accesses will get clipped.
	 * Resulting buffer should look like:
	 *	  _______
	 *	 |dst|dst|
	 *	 |dst|src|
	 *	  -------
	 */
	render_copy(batch, NULL,
		    &src, 0, 0, WIDTH, HEIGHT,
		    &dst, WIDTH / 2, HEIGHT / 2);

	if (opt_dump_png)
		scratch_buf_write_to_png(&dst, "result.png");

	if (opt_dump_aub) {
		drm_intel_gem_bo_aub_dump_bmp(dst.bo,
			0, 0, WIDTH, HEIGHT,
			AUB_DUMP_BMP_FORMAT_ARGB_8888,
			STRIDE, 0);
		drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, false);
	} else if (check_all_pixels) {
		uint32_t val;
		int i, j;
		gem_read(data.drm_fd, dst.bo->handle, 0,
			 data.linear, sizeof(data.linear));
		for (i = 0; i < WIDTH; i++) {
			for (j = 0; j < HEIGHT; j++) {
				uint32_t color = DST_COLOR;
				val = data.linear[j * WIDTH + i];
				if (j >= HEIGHT/2 && i >= WIDTH/2)
					color = SRC_COLOR;

				igt_assert_f(val == color,
					     "Expected 0x%08x, found 0x%08x at (%d,%d)\n",
					     color, val, i, j);
			}
		}
	} else {
		scratch_buf_check(&data, &dst, 10, 10, DST_COLOR);
		scratch_buf_check(&data, &dst, WIDTH - 10, HEIGHT - 10, SRC_COLOR);
	}

	igt_exit();
}
示例#16
0
static void render_timeout(int fd)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	int64_t negative_timeout = -1;
	int ret;
	const bool do_signals = true; /* signals will seem to make the operation
				       * use less process CPU time */
	bool done = false;
	int i, iter = 1;

	igt_skip_on_simulation();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096);
	dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096);

	igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL,
		      "kernel doesn't support wait_timeout, skipping test\n");
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;

	/* Figure out a rough number of fills required to consume 1 second of
	 * GPU work.
	 */
	do {
		struct timespec start, end;
		long diff;

#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif

		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0);
		for (i = 0; i < iter; i++)
			blt_color_fill(batch, dst, BUF_PAGES);
		intel_batchbuffer_flush(batch);
		drm_intel_bo_wait_rendering(dst);
		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0);

		diff = do_time_diff(&end, &start);
		igt_assert(diff >= 0);

		if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS)
			done = true;
		else
			iter <<= 1;
	} while (!done && iter < 1000000);

	igt_assert_lt(iter, 1000000);

	igt_info("%d iters is enough work\n", iter);
	gem_quiescent_gpu(fd);
	if (do_signals)
		igt_fork_signal_helper();

	/* We should be able to do half as much work in the same amount of time,
	 * but because we might schedule almost twice as much as required, we
	 * might accidentally time out. Hence add some fudge. */
	for (i = 0; i < iter/3; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);
	igt_assert_neq(timeout, 0);
	if (timeout ==  (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC))
		igt_info("Buffer was already done!\n");
	else {
		igt_info("Finished with %" PRIu64 " time remaining\n", timeout);
	}

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert_eq(timeout, 0);

	/* Now check that we correctly time out, twice the auto-tune load should
	 * be good enough. */
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	for (i = 0; i < iter*2; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout);
	igt_assert_eq(ret, -ETIME);
	igt_assert_eq(timeout, 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME);
	igt_assert_eq(timeout, 0);


	/* Now check that we can pass negative (infinite) timeouts. */
	negative_timeout = -1;
	for (i = 0; i < iter; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0);
	igt_assert_eq(negative_timeout, -1); /* infinity always remains */
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);

	if (do_signals)
		igt_stop_signal_helper();
	drm_intel_bo_unreference(dst2);
	drm_intel_bo_unreference(dst);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);
}
示例#17
0
GLboolean
intelInitContext(struct intel_context *intel,
                 const __GLcontextModes * mesaVis,
                 __DRIcontext * driContextPriv,
                 void *sharedContextPrivate,
                 struct dd_function_table *functions)
{
   GLcontext *ctx = &intel->ctx;
   GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
   struct intel_screen *intelScreen = sPriv->private;
   int bo_reuse_mode;

   /* we can't do anything without a connection to the device */
   if (intelScreen->bufmgr == NULL)
      return GL_FALSE;

   if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
                                 functions, (void *) intel)) {
      printf("%s: failed to init mesa context\n", __FUNCTION__);
      return GL_FALSE;
   }

   driContextPriv->driverPrivate = intel;
   intel->intelScreen = intelScreen;
   intel->driScreen = sPriv;
   intel->driContext = driContextPriv;
   intel->driFd = sPriv->fd;

   intel->has_xrgb_textures = GL_TRUE;
   if (IS_GEN6(intel->intelScreen->deviceID)) {
      intel->gen = 6;
      intel->needs_ff_sync = GL_TRUE;
      intel->has_luminance_srgb = GL_TRUE;
   } else if (IS_GEN5(intel->intelScreen->deviceID)) {
      intel->gen = 5;
      intel->needs_ff_sync = GL_TRUE;
      intel->has_luminance_srgb = GL_TRUE;
   } else if (IS_965(intel->intelScreen->deviceID)) {
      intel->gen = 4;
      if (IS_G4X(intel->intelScreen->deviceID)) {
	  intel->has_luminance_srgb = GL_TRUE;
	  intel->is_g4x = GL_TRUE;
      }
   } else if (IS_9XX(intel->intelScreen->deviceID)) {
      intel->gen = 3;
      if (IS_945(intel->intelScreen->deviceID)) {
	 intel->is_945 = GL_TRUE;
      }
   } else {
      intel->gen = 2;
      if (intel->intelScreen->deviceID == PCI_CHIP_I830_M ||
	  intel->intelScreen->deviceID == PCI_CHIP_845_G) {
	 intel->has_xrgb_textures = GL_FALSE;
      }
   }

   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
                       intel->driScreen->myNum,
		       (intel->gen >= 4) ? "i965" : "i915");
   if (intelScreen->deviceID == PCI_CHIP_I865_G)
      intel->maxBatchSize = 4096;
   else
      intel->maxBatchSize = BATCH_SZ;

   intel->bufmgr = intelScreen->bufmgr;

   bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
   switch (bo_reuse_mode) {
   case DRI_CONF_BO_REUSE_DISABLED:
      break;
   case DRI_CONF_BO_REUSE_ALL:
      intel_bufmgr_gem_enable_reuse(intel->bufmgr);
      break;
   }

   /* This doesn't yet catch all non-conformant rendering, but it's a
    * start.
    */
   if (getenv("INTEL_STRICT_CONFORMANCE")) {
      unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
      if (value > 0) {
         intel->conformance_mode = value;
      }
      else {
         intel->conformance_mode = 1;
      }
   }

   if (intel->conformance_mode > 0) {
      ctx->Const.MinLineWidth = 1.0;
      ctx->Const.MinLineWidthAA = 1.0;
      ctx->Const.MaxLineWidth = 1.0;
      ctx->Const.MaxLineWidthAA = 1.0;
      ctx->Const.LineWidthGranularity = 1.0;
   }
   else {
      ctx->Const.MinLineWidth = 1.0;
      ctx->Const.MinLineWidthAA = 1.0;
      ctx->Const.MaxLineWidth = 5.0;
      ctx->Const.MaxLineWidthAA = 5.0;
      ctx->Const.LineWidthGranularity = 0.5;
   }

   ctx->Const.MinPointSize = 1.0;
   ctx->Const.MinPointSizeAA = 1.0;
   ctx->Const.MaxPointSize = 255.0;
   ctx->Const.MaxPointSizeAA = 3.0;
   ctx->Const.PointSizeGranularity = 1.0;

   /* reinitialize the context point state.
    * It depend on constants in __GLcontextRec::Const
    */
   _mesa_init_point(ctx);

   meta_init_metaops(ctx, &intel->meta);
   ctx->Const.MaxColorAttachments = 4;  /* XXX FBO: review this */
   if (intel->gen >= 4) {
      if (MAX_WIDTH > 8192)
	 ctx->Const.MaxRenderbufferSize = 8192;
   } else {
      if (MAX_WIDTH > 2048)
	 ctx->Const.MaxRenderbufferSize = 2048;
   }

   /* Initialize the software rasterizer and helper modules. */
   _swrast_CreateContext(ctx);
   _vbo_CreateContext(ctx);
   _tnl_CreateContext(ctx);
   _swsetup_CreateContext(ctx);
 
   /* Configure swrast to match hardware characteristics: */
   _swrast_allow_pixel_fog(ctx, GL_FALSE);
   _swrast_allow_vertex_fog(ctx, GL_TRUE);

   _mesa_meta_init(ctx);

   intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
   intel->hw_stipple = 1;

   /* XXX FBO: this doesn't seem to be used anywhere */
   switch (mesaVis->depthBits) {
   case 0:                     /* what to do in this case? */
   case 16:
      intel->polygon_offset_scale = 1.0;
      break;
   case 24:
      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
      break;
   default:
      assert(0);
      break;
   }

   if (intel->gen >= 4)
      intel->polygon_offset_scale /= 0xffff;

   intel->RenderIndex = ~0;

   intelInitExtensions(ctx);

   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
   if (INTEL_DEBUG & DEBUG_BUFMGR)
      dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);

   intel->batch = intel_batchbuffer_alloc(intel);

   intel_fbo_init(intel);

   if (intel->ctx.Mesa_DXTn) {
      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
      _mesa_enable_extension(ctx, "GL_S3_s3tc");
   }
   else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
   }
   intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
					       "texture_tiling");
   intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");

   intel->prim.primitive = ~0;

   /* Force all software fallbacks */
   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
      fprintf(stderr, "disabling 3D rasterization\n");
      intel->no_rast = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
      intel->always_flush_batch = 1;
   }

   if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
      intel->always_flush_cache = 1;
   }

   /* Disable all hardware rendering (skip emitting batches and fences/waits
    * to the kernel)
    */
   intel->no_hw = getenv("INTEL_NO_HW") != NULL;

   return GL_TRUE;
}
int main(int argc, char **argv)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	uint32_t *start_val;
	drm_intel_bo **bo;
	uint32_t start = 0;
	int i, j, fd, count;

	fd = drm_open_any();

	render_copy = get_render_copyfunc(intel_get_drm_devid(fd));
	if (render_copy == NULL) {
		printf("no render-copy function, doing nothing\n");
		return 77;
	}

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	count = 0;
	if (argc > 1)
		count = atoi(argv[1]);
	if (count == 0)
		count = 3 * gem_aperture_size(fd) / SIZE / 2;
	printf("Using %d 1MiB buffers\n", count);

	bo = malloc(sizeof(*bo)*count);
	start_val = malloc(sizeof(*start_val)*count);

	for (i = 0; i < count; i++) {
		bo[i] = drm_intel_bo_alloc(bufmgr, "", SIZE, 4096);
		start_val[i] = start;
		for (j = 0; j < WIDTH*HEIGHT; j++)
			linear[j] = start++;
		gem_write(fd, bo[i]->handle, 0, linear, sizeof(linear));
	}

	printf("Verifying initialisation...\n");
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	printf("Cyclic blits, forward...\n");
	for (i = 0; i < count * 4; i++) {
		struct scratch_buf src, dst;

		src.bo = bo[i % count];
		src.stride = STRIDE;
		src.tiling = I915_TILING_NONE;
		src.size = SIZE;

		dst.bo = bo[(i + 1) % count];
		dst.stride = STRIDE;
		dst.tiling = I915_TILING_NONE;
		dst.size = SIZE;

		render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
		start_val[(i + 1) % count] = start_val[i % count];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	printf("Cyclic blits, backward...\n");
	for (i = 0; i < count * 4; i++) {
		struct scratch_buf src, dst;

		src.bo = bo[(i + 1) % count];
		src.stride = STRIDE;
		src.tiling = I915_TILING_NONE;
		src.size = SIZE;

		dst.bo = bo[i % count];
		dst.stride = STRIDE;
		dst.tiling = I915_TILING_NONE;
		dst.size = SIZE;

		render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
		start_val[i % count] = start_val[(i + 1) % count];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	printf("Random blits...\n");
	for (i = 0; i < count * 4; i++) {
		struct scratch_buf src, dst;
		int s = random() % count;
		int d = random() % count;

		if (s == d)
			continue;

		src.bo = bo[s];
		src.stride = STRIDE;
		src.tiling = I915_TILING_NONE;
		src.size = SIZE;

		dst.bo = bo[d];
		dst.stride = STRIDE;
		dst.tiling = I915_TILING_NONE;
		dst.size = SIZE;

		render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
		start_val[d] = start_val[s];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, bo[i]->handle, start_val[i]);

	return 0;
}