Пример #1
0
static void test_llseek_bad(void)
{
	int fd;
	uint32_t handle;
	int dma_buf_fd;

	counter = 0;

	fd = drm_open_driver(DRIVER_INTEL);


	handle = gem_create(fd, BO_SIZE);
	dma_buf_fd = prime_handle_to_fd(fd, handle);

	gem_close(fd, handle);

	igt_require(lseek(dma_buf_fd, 0, SEEK_END) >= 0);

	igt_assert(lseek(dma_buf_fd, -1, SEEK_END) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, 1, SEEK_SET) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, BO_SIZE, SEEK_SET) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, BO_SIZE + 1, SEEK_SET) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, BO_SIZE - 1, SEEK_SET) == -1 && errno == EINVAL);

	close(dma_buf_fd);

	close(fd);
}
Пример #2
0
static void commit_crtc(data_t *data, igt_output_t *output, igt_plane_t *plane)
{
	igt_display_t *display = &data->display;
	enum igt_commit_style commit = COMMIT_LEGACY;
	igt_plane_t *primary;

	/*
	 * With igt_display_commit2 and COMMIT_UNIVERSAL, we call just the
	 * setplane without a modeset. So, to be able to call
	 * igt_display_commit and ultimately setcrtc to do the first modeset,
	 * we create an fb covering the crtc and call commit
	 */

	primary = igt_output_get_plane(output, IGT_PLANE_PRIMARY);
	igt_plane_set_fb(primary, &data->fb_modeset);
	igt_display_commit(display);

	igt_plane_set_fb(plane, &data->fb);

	if (!plane->is_cursor)
		igt_plane_set_position(plane, data->pos_x, data->pos_y);

	if (plane->is_primary || plane->is_cursor) {
		igt_require(data->display.has_universal_planes);
		commit = COMMIT_UNIVERSAL;
	}

	igt_display_commit2(display, commit);
}
static void read_rc6_residency( int value[], const char *name_of_rc6_residency)
{
	unsigned int i;
	const int device = drm_get_card();
	char *path ;
	int  ret;
	FILE *file;

	/* For some reason my ivb isn't idle even after syncing up with the gpu.
	 * Let's add a sleept just to make it happy. */
	sleep(5);

	ret = asprintf(&path, "/sys/class/drm/card%d/power/rc6_enable", device);
	igt_assert_neq(ret, -1);

	file = fopen(path, "r");
	igt_require(file);

	/* claim success if no rc6 enabled. */
	if (readit(path) == 0)
		igt_success();

	for(i = 0; i < 2; i++)
	{
		sleep(SLEEP_DURATION / 1000);
		ret = asprintf(&path, "/sys/class/drm/card%d/power/%s_residency_ms",device,name_of_rc6_residency);
		igt_assert_neq(ret, -1);
		value[i] = readit(path);
	}
	free(path);
}
static void run_test(data_t *data, void (*testfunc)(data_t *), int cursor_w, int cursor_h)
{
	igt_display_t *display = &data->display;
	igt_output_t *output;
	enum pipe p;
	int valid_tests = 0;

	igt_require(cursor_w <= data->cursor_max_w &&
		    cursor_h <= data->cursor_max_h);

	for_each_connected_output(display, output) {
		data->output = output;
		for_each_pipe(display, p) {
			data->pipe = p;

			if (!prepare_crtc(data, output, cursor_w, cursor_h))
				continue;

			valid_tests++;

			igt_info("Beginning %s on pipe %s, connector %s\n",
				 igt_subtest_name(),
				 kmstest_pipe_name(data->pipe),
				 igt_output_name(output));

			testfunc(data);

			igt_info("\n%s on pipe %s, connector %s: PASSED\n\n",
				 igt_subtest_name(),
				 kmstest_pipe_name(data->pipe),
				 igt_output_name(output));

			/* cleanup what prepare_crtc() has done */
			cleanup_crtc(data, output);
		}
Пример #5
0
static int swizzle_addr(int addr, int swizzle)
{
	int bit6;

	switch (swizzle) {
	case I915_BIT_6_SWIZZLE_NONE:
		bit6 = BIT(addr, 6);
		break;
	case I915_BIT_6_SWIZZLE_9:
		bit6 = BIT(addr, 6) ^ BIT(addr, 9);
		break;
	case I915_BIT_6_SWIZZLE_9_10:
		bit6 = BIT(addr, 6) ^ BIT(addr, 9) ^ BIT(addr, 10);
		break;
	case I915_BIT_6_SWIZZLE_9_11:
		bit6 = BIT(addr, 6) ^ BIT(addr, 9) ^ BIT(addr, 11);
		break;
	case I915_BIT_6_SWIZZLE_9_10_11:
		bit6 = BIT(addr, 6) ^ BIT(addr, 9) ^ BIT(addr, 10) ^
		       BIT(addr, 11);
		break;
	case I915_BIT_6_SWIZZLE_UNKNOWN:
	case I915_BIT_6_SWIZZLE_9_17:
	case I915_BIT_6_SWIZZLE_9_10_17:
	default:
		/* If we hit this case, we need to implement support for the
		 * appropriate swizzling method. */
		igt_require(false);
		break;
	}

	addr &= ~(1 << 6);
	addr |= (bit6 << 6);
	return addr;
}
Пример #6
0
static void
processes(void)
{
	int *all_fds;
	uint64_t aperture;
	struct rlimit rlim;
	int ppgtt_mode;
	int ctx_size;
	int obj_size;
	int n;

	igt_skip_on_simulation();

	fd = drm_open_driver_render(DRIVER_INTEL);
	devid = intel_get_drm_devid(fd);
	aperture = gem_aperture_size(fd);

	ppgtt_mode = uses_ppgtt(fd);
	igt_require(ppgtt_mode);

	render_copy = igt_get_render_copyfunc(devid);
	igt_require_f(render_copy, "no render-copy function\n");

	if (ppgtt_mode > 1)
		ctx_size = aperture >> 10; /* Assume full-ppgtt of maximum size */
	else
Пример #7
0
static void draw_rect_mmap_wc(int fd, struct buf_data *buf, struct rect *rect,
			      uint32_t color)
{
	uint32_t *ptr;
	uint32_t tiling, swizzle;

	gem_set_domain(fd, buf->handle, I915_GEM_DOMAIN_GTT,
		       I915_GEM_DOMAIN_GTT);
	gem_get_tiling(fd, buf->handle, &tiling, &swizzle);

	/* We didn't implement suport for the older tiling methods yet. */
	if (tiling != I915_TILING_NONE)
		igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);

	ptr = gem_mmap__wc(fd, buf->handle, 0, buf->size,
			   PROT_READ | PROT_WRITE);

	switch (tiling) {
	case I915_TILING_NONE:
		draw_rect_ptr_linear(ptr, buf->stride, rect, color, buf->bpp);
		break;
	case I915_TILING_X:
		draw_rect_ptr_tiled(ptr, buf->stride, swizzle, rect, color,
				    buf->bpp);
		break;
	default:
		igt_assert(false);
		break;
	}

	igt_assert(munmap(ptr, buf->size) == 0);
}
Пример #8
0
static void run_test(int fd, unsigned ring, unsigned flags)
{
	const int gen = intel_gen(intel_get_drm_devid(fd));
	const uint32_t bbe = MI_BATCH_BUFFER_END;
	struct drm_i915_gem_exec_object2 obj[2];
	struct drm_i915_gem_relocation_entry reloc[1024];
	struct drm_i915_gem_execbuffer2 execbuf;
	struct igt_hang_ring hang;
	uint32_t *batch, *b;
	int i;

	gem_require_ring(fd, ring);
	igt_skip_on_f(gen == 6 && (ring & ~(3<<13)) == I915_EXEC_BSD,
		      "MI_STORE_DATA broken on gen6 bsd\n");

	gem_quiescent_gpu(fd);

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)obj;
	execbuf.buffer_count = 2;
	execbuf.flags = ring | (1 << 11);
	if (gen < 6)
		execbuf.flags |= I915_EXEC_SECURE;

	memset(obj, 0, sizeof(obj));
	obj[0].handle = gem_create(fd, 4096);
	obj[0].flags |= EXEC_OBJECT_WRITE;
	obj[1].handle = gem_create(fd, 1024*16 + 4096);
	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
	igt_require(__gem_execbuf(fd, &execbuf) == 0);

	obj[1].relocs_ptr = (uintptr_t)reloc;
	obj[1].relocation_count = 1024;

	batch = gem_mmap__cpu(fd, obj[1].handle, 0, 16*1024 + 4096,
			      PROT_WRITE | PROT_READ);
	gem_set_domain(fd, obj[1].handle,
		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

	memset(reloc, 0, sizeof(reloc));
	b = batch;
	for (i = 0; i < 1024; i++) {
		uint64_t offset;

		reloc[i].target_handle = obj[0].handle;
		reloc[i].presumed_offset = obj[0].offset;
		reloc[i].offset = (b - batch + 1) * sizeof(*batch);
		reloc[i].delta = i * sizeof(uint32_t);
		reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;

		offset = obj[0].offset + reloc[i].delta;
		*b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
		if (gen >= 8) {
			*b++ = offset;
			*b++ = offset >> 32;
		} else if (gen >= 4) {
static void performance(void)
{
	int n, loop, count;
	int fd, num_fences;
	double linear[2], tiled[2];

	fd = drm_open_any();

	num_fences = gem_available_fences(fd);
	igt_require(num_fences > 0);

	for (count = 2; count < 4*num_fences; count *= 2) {
		struct timeval start, end;
		uint32_t handle[count];
		void *ptr[count];

		for (n = 0; n < count; n++) {
			handle[n] = gem_create(fd, OBJECT_SIZE);
			ptr[n] = gem_mmap(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
			igt_assert(ptr[n]);
		}

		gettimeofday(&start, NULL);
		for (loop = 0; loop < 1024; loop++) {
			for (n = 0; n < count; n++)
				memset(ptr[n], 0, OBJECT_SIZE);
		}
		gettimeofday(&end, NULL);

		linear[count != 2] = count * loop / elapsed(&start, &end);
		igt_info("Upload rate for %d linear surfaces:	%7.3fMiB/s\n", count, linear[count != 2]);

		for (n = 0; n < count; n++)
			gem_set_tiling(fd, handle[n], I915_TILING_X, 1024);

		gettimeofday(&start, NULL);
		for (loop = 0; loop < 1024; loop++) {
			for (n = 0; n < count; n++)
				memset(ptr[n], 0, OBJECT_SIZE);
		}
		gettimeofday(&end, NULL);

		tiled[count != 2] = count * loop / elapsed(&start, &end);
		igt_info("Upload rate for %d tiled surfaces:	%7.3fMiB/s\n", count, tiled[count != 2]);

		for (n = 0; n < count; n++) {
			munmap(ptr[n], OBJECT_SIZE);
			gem_close(fd, handle[n]);
		}

	}

	errno = 0;
	igt_assert(linear[1] > 0.75 * linear[0]);
	igt_assert(tiled[1] > 0.75 * tiled[0]);
}
Пример #10
0
static void plane_primary(struct kms_atomic_crtc_state *crtc,
			  struct kms_atomic_plane_state *plane_old)
{
	struct drm_mode_modeinfo *mode = crtc->mode.data;
	struct kms_atomic_plane_state plane = *plane_old;
	uint32_t format = plane_get_igt_format(&plane);
	drmModeAtomicReq *req = drmModeAtomicAlloc();
	uint32_t *connectors;
	int num_connectors;
	struct igt_fb fb;
	int i;

	connectors = calloc(crtc->state->num_connectors, sizeof(*connectors));
	igt_assert(connectors);

	for (i = 0; i < crtc->state->num_connectors; i++) {
		if (crtc->state->connectors[i].crtc_id == crtc->obj)
			connectors[num_connectors++] =
				crtc->state->connectors[i].obj;
	}

	igt_require(format != 0);

	plane.src_x = 0;
	plane.src_y = 0;
	plane.src_w = mode->hdisplay << 16;
	plane.src_h = mode->vdisplay << 16;
	plane.crtc_x = 0;
	plane.crtc_y = 0;
	plane.crtc_w = mode->hdisplay;
	plane.crtc_h = mode->vdisplay;
	plane.crtc_id = crtc->obj;
	plane.fb_id = igt_create_pattern_fb(plane.state->desc->fd,
					    plane.crtc_w, plane.crtc_h,
					    format, I915_TILING_NONE, &fb);

	/* Flip the primary plane using the atomic API, and double-check
	 * state is what we think it should be. */
	crtc_commit_atomic(crtc, &plane, req, ATOMIC_RELAX_NONE);

	/* Restore the primary plane and check the state matches the old. */
	crtc_commit_atomic(crtc, plane_old, req, ATOMIC_RELAX_NONE);

	/* Re-enable the plane through the legacy CRTC/primary-plane API, and
	 * verify through atomic. */
	crtc_commit_legacy(crtc, &plane, CRTC_RELAX_MODE);

	/* Restore the plane to its original settings through the legacy CRTC
	 * API, and verify through atomic. */
	crtc_commit_legacy(crtc, plane_old, CRTC_RELAX_MODE);

	/* Finally, restore to the original state. */
	crtc_commit_atomic(crtc, plane_old, req, ATOMIC_RELAX_NONE);

	drmModeAtomicFree(req);
}
Пример #11
0
static void igt_prefault_control(bool enable)
{
	const char *name = PREFAULT_DEBUGFS;
	int fd;
	char buf[2] = {'Y', 'N'};
	int index;

	fd = open(name, O_RDWR);
	igt_require(fd >= 0);

	if (enable)
		index = 1;
	else
		index = 0;

	igt_require(write(fd, &buf[index], 1) == 1);

	close(fd);
}
static void wc_contention(void)
{
	const int loops = 4096;
	int n, count;
	int fd, num_fences;
	double linear[2], tiled[2];

	fd = drm_open_any();
	gem_require_mmap_wc(fd);

	num_fences = gem_available_fences(fd);
	igt_require(num_fences > 0);

	for (count = 1; count < 4*num_fences; count *= 2) {
		struct timeval start, end;
		struct thread_contention threads[count];

		for (n = 0; n < count; n++) {
			threads[n].handle = gem_create(fd, OBJECT_SIZE);
			threads[n].loops = loops;
			threads[n].fd = fd;
		}

		gettimeofday(&start, NULL);
		for (n = 0; n < count; n++)
			pthread_create(&threads[n].thread, NULL, wc_mmap, &threads[n]);
		for (n = 0; n < count; n++)
			pthread_join(threads[n].thread, NULL);
		gettimeofday(&end, NULL);

		linear[count != 2] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
		igt_info("Contended upload rate for %d linear threads/wc:	%7.3fMiB/s\n", count, linear[count != 2]);

		for (n = 0; n < count; n++)
			gem_set_tiling(fd, threads[n].handle, I915_TILING_X, 1024);

		gettimeofday(&start, NULL);
		for (n = 0; n < count; n++)
			pthread_create(&threads[n].thread, NULL, wc_mmap, &threads[n]);
		for (n = 0; n < count; n++)
			pthread_join(threads[n].thread, NULL);
		gettimeofday(&end, NULL);

		tiled[count != 2] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
		igt_info("Contended upload rate for %d tiled threads/wc:	%7.3fMiB/s\n", count, tiled[count != 2]);

		for (n = 0; n < count; n++) {
			gem_close(fd, threads[n].handle);
		}
	}

	errno = 0;
	igt_assert(linear[1] > 0.75 * linear[0]);
	igt_assert(tiled[1] > 0.75 * tiled[0]);
}
Пример #13
0
static void plane_overlay(struct kms_atomic_crtc_state *crtc,
			  struct kms_atomic_plane_state *plane_old)
{
	struct drm_mode_modeinfo *mode = crtc->mode.data;
	struct kms_atomic_plane_state plane = *plane_old;
	uint32_t format = plane_get_igt_format(&plane);
	drmModeAtomicReq *req = drmModeAtomicAlloc();
	struct igt_fb fb;

	igt_require(req);
	igt_require(format != 0);

	plane.src_x = 0;
	plane.src_y = 0;
	plane.src_w = (mode->hdisplay / 2) << 16;
	plane.src_h = (mode->vdisplay / 2) << 16;
	plane.crtc_x = mode->hdisplay / 4;
	plane.crtc_y = mode->vdisplay / 4;
	plane.crtc_w = mode->hdisplay / 2;
	plane.crtc_h = mode->vdisplay / 2;
	plane.crtc_id = crtc->obj;
	plane.fb_id = igt_create_pattern_fb(plane.state->desc->fd,
					    plane.crtc_w, plane.crtc_h,
					    format, I915_TILING_NONE, &fb);

	/* Enable the overlay plane using the atomic API, and double-check
	 * state is what we think it should be. */
	plane_commit_atomic(&plane, req, ATOMIC_RELAX_NONE);

	/* Disable the plane and check the state matches the old. */
	plane_commit_atomic(plane_old, req, ATOMIC_RELAX_NONE);

	/* Re-enable the plane through the legacy plane API, and verify through
	 * atomic. */
	plane_commit_legacy(&plane, ATOMIC_RELAX_NONE);

	/* Restore the plane to its original settings through the legacy plane
	 * API, and verify through atomic. */
	plane_commit_legacy(plane_old, ATOMIC_RELAX_NONE);

	drmModeAtomicFree(req);
}
Пример #14
0
static void wedge_gpu(int fd)
{
    /* First idle the GPU then disable GPU resets before injecting a hang */
    gem_quiescent_gpu(fd);

    igt_require(i915_reset_control(false));

    igt_debug("Wedging GPU by injecting hang\n");
    igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));

    igt_assert(i915_reset_control(true));
}
Пример #15
0
static void test_wait(int fd)
{
    igt_hang_ring_t hang;

    /* If the request we wait on completes due to a hang (even for
     * that request), the user expects the return value to 0 (success).
     */
    hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
    igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
    igt_post_hang_ring(fd, hang);

    /* If the GPU is wedged during the wait, again we expect the return
     * value to be 0 (success).
     */
    igt_require(i915_reset_control(false));
    hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
    igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
    igt_post_hang_ring(fd, hang);
    igt_require(i915_reset_control(true));

    trigger_reset(fd);
}
Пример #16
0
static bool i915_wedged_set(void)
{
    int fd, ret;

    igt_debug("Triggering GPU reset\n");

    fd = igt_debugfs_open("i915_wedged", O_RDWR);
    igt_require(fd >= 0);

    ret = write(fd, "1\n", 2) == 2;
    close(fd);

    return ret;
}
Пример #17
0
static void draw_rect_pwrite_tiled(int fd, struct buf_data *buf,
				   struct rect *rect, uint32_t color,
				   uint32_t swizzle)
{
	int i;
	int tiled_pos, x, y, pixel_size;
	uint8_t tmp[4096];
	int tmp_used = 0, tmp_size;
	bool flush_tmp = false;
	int tmp_start_pos = 0;
	int pixels_written = 0;

	/* We didn't implement suport for the older tiling methods yet. */
	igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);

	pixel_size = buf->bpp / 8;
	tmp_size = sizeof(tmp) / pixel_size;

	/* Instead of doing one pwrite per pixel, we try to group the maximum
	 * amount of consecutive pixels we can in a single pwrite: that's why we
	 * use the "tmp" variables. */
	for (i = 0; i < tmp_size; i++)
		set_pixel(tmp, i, color, buf->bpp);

	for (tiled_pos = 0; tiled_pos < buf->size; tiled_pos += pixel_size) {
		tiled_pos_to_x_y_linear(tiled_pos, buf->stride, swizzle,
					buf->bpp, &x, &y);

		if (x >= rect->x && x < rect->x + rect->w &&
		    y >= rect->y && y < rect->y + rect->h) {
			if (tmp_used == 0)
				tmp_start_pos = tiled_pos;
			tmp_used++;
		} else {
			flush_tmp = true;
		}

		if (tmp_used == tmp_size || (flush_tmp && tmp_used > 0) ||
		    tiled_pos + pixel_size >= buf->size) {
			gem_write(fd, buf->handle, tmp_start_pos, tmp,
				  tmp_used * pixel_size);
			flush_tmp = false;
			pixels_written += tmp_used;
			tmp_used = 0;

			if (pixels_written == rect->w * rect->h)
				break;
		}
	}
}
Пример #18
0
static bool i915_reset_control(bool enable)
{
    const char *path = "/sys/module/i915/parameters/reset";
    int fd, ret;

    igt_debug("%s GPU reset\n", enable ? "Enabling" : "Disabling");

    fd = open(path, O_RDWR);
    igt_require(fd >= 0);

    ret = write(fd, &"NY"[enable], 1) == 1;
    close(fd);

    return ret;
}
Пример #19
0
static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
{
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 execobj;
	struct {
		uint32_t handle;
		uint32_t *batch;
	} obj[2];
	unsigned i;
	char buf[100];

	gem_require_ring(fd, ring_id);
	igt_require(has_softpin(fd));

	for (i = 0; i < 2; i++) {
		obj[i].handle = gem_create(fd, BATCH_SIZE);
		obj[i].batch = mmap_coherent(fd, obj[i].handle, BATCH_SIZE);
		memset(obj[i].batch, 0xff, BATCH_SIZE);
	}

	memset(&execobj, 0, sizeof(execobj));
	execobj.handle = obj[0].handle;
	obj[0].batch[0] = MI_BATCH_BUFFER_END;

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)&execobj;
	execbuf.buffer_count = 1;
	execbuf.flags = ring_id;

	/* Execute once to allocate a gtt-offset */
	gem_execbuf(fd, &execbuf);
	execobj.flags = EXEC_OBJECT_PINNED;

	sprintf(buf, "Testing %s cs tlb coherency: ", ring_name);
	for (i = 0; i < BATCH_SIZE/64; i++) {
		execobj.handle = obj[i&1].handle;
		obj[i&1].batch[i*64/4] = MI_BATCH_BUFFER_END;
		execbuf.batch_start_offset = i*64;

		gem_execbuf(fd, &execbuf);
	}

	for (i = 0; i < 2; i++) {
		gem_close(fd, obj[i].handle);
		munmap(obj[i].batch, BATCH_SIZE);
	}
}
Пример #20
0
static void
get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle)
{
	struct drm_i915_gem_get_tiling2 {
		uint32_t handle;
		uint32_t tiling_mode;
		uint32_t swizzle_mode;
		uint32_t phys_swizzle_mode;
	} arg;
#define DRM_IOCTL_I915_GEM_GET_TILING2	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2)

	memset(&arg, 0, sizeof(arg));
	arg.handle = handle;

	do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg));
	igt_require(arg.phys_swizzle_mode == arg.swizzle_mode);

	*tiling = arg.tiling_mode;
	*swizzle = arg.swizzle_mode;
}
Пример #21
0
static int setup(int in, int nonblock)
{
	int fd;

	alarm(0);

	fd = dup(in);
	if (nonblock) {
		int ret = -1;
		if (fd != -1)
			ret = fcntl(fd, F_GETFL);
		if (ret != -1) {
			ret |= O_NONBLOCK;
			ret = fcntl(fd, F_SETFL, ret);
		}
		igt_require(ret != -1);
	}

	assert_empty(fd);
	return fd;
}
Пример #22
0
static void
test_write_cpu_read_gtt(int fd)
{
	uint32_t handle;
	uint32_t *src, *dst;

	igt_require(gem_has_llc(fd));

	handle = gem_create(fd, OBJECT_SIZE);

	dst = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);

	src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);

	gem_close(fd, handle);

	memset(src, 0xaa, OBJECT_SIZE);
	igt_assert(memcmp(dst, src, OBJECT_SIZE) == 0);

	munmap(src, OBJECT_SIZE);
	munmap(dst, OBJECT_SIZE);
}
int main(int argc, char **argv)
{
	uint32_t *handle, *start_val;
	uint32_t start = 0;
	int i, fd, count;

	igt_simple_init(argc, argv);

	fd = drm_open_any();

	igt_require(IS_GEN3(intel_get_drm_devid(fd)));

	count = 0;
	if (argc > 1)
		count = atoi(argv[1]);
	if (count == 0)
		count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
	igt_info("Using %d 1MiB buffers\n", count);

	handle = malloc(sizeof(uint32_t)*count*2);
	start_val = handle + count;

	for (i = 0; i < count; i++) {
		handle[i] = create_bo(fd, start);
		start_val[i] = start;
		start += 1024 * 1024 / 4;
	}

	igt_info("Verifying initialisation...\n");
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_info("Cyclic blits, forward...\n");
	for (i = 0; i < count * 4; i++) {
		int src = i % count;
		int dst = (i + 1) % count;

		copy(fd, handle[dst], handle[src]);
		start_val[dst] = start_val[src];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_info("Cyclic blits, backward...\n");
	for (i = 0; i < count * 4; i++) {
		int src = (i + 1) % count;
		int dst = i % count;

		copy(fd, handle[dst], handle[src]);
		start_val[dst] = start_val[src];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_info("Random blits...\n");
	for (i = 0; i < count * 4; i++) {
		int src = random() % count;
		int dst = random() % count;

		if (src == dst)
			continue;

		copy(fd, handle[dst], handle[src]);
		start_val[dst] = start_val[src];
	}
	for (i = 0; i < count; i++)
		check_bo(fd, handle[i], start_val[i]);

	igt_exit();
}
Пример #24
0
static void edp_subtest(int drm_fd, drmModeResPtr drm_res,
			drmModeConnectorPtr *drm_connectors, uint32_t devid,
			bool use_panel_fitter)
{
	int i, rc;
	uint32_t connector_id = 0, crtc_id = 0, buffer_id = 0;
	drmModeModeInfoPtr mode = NULL;
	drmModeModeInfo std_1024_mode = {
		.clock = 65000,
		.hdisplay = 1024,
		.hsync_start = 1048,
		.hsync_end = 1184,
		.htotal = 1344,
		.vtotal = 806,
		.hskew = 0,
		.vdisplay = 768,
		.vsync_start = 771,
		.vsync_end = 777,
		.vtotal = 806,
		.vscan = 0,
		.vrefresh = 60,
		.flags = 0xA,
		.type = 0x40,
		.name = "Custom 1024x768",
	};

	kmstest_unset_all_crtcs(drm_fd, drm_res);

	for (i = 0; i < drm_res->count_connectors; i++) {
		drmModeConnectorPtr c = drm_connectors[i];

		if (c->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;
		if (c->connection != DRM_MODE_CONNECTED)
			continue;

		if (!use_panel_fitter && c->count_modes) {
			connector_id = c->connector_id;
			mode = &c->modes[0];
			break;
		}
		if (use_panel_fitter) {
			connector_id = c->connector_id;

			/* This is one of the modes Xorg creates for panels, so
			 * it should work just fine. Notice that Gens that
			 * support LPSP are too new for panels with native
			 * 1024x768 resolution, so this should force the panel
			 * fitter. */
			igt_assert(c->count_modes &&
				   c->modes[0].hdisplay > 1024);
			igt_assert(c->count_modes &&
				   c->modes[0].vdisplay > 768);
			mode = &std_1024_mode;
			break;
		}
	}
	igt_require(connector_id);

	crtc_id = drm_res->crtcs[0];
	buffer_id = create_fb(drm_fd, mode->hdisplay, mode->vdisplay);

	igt_assert(crtc_id);
	igt_assert(buffer_id);
	igt_assert(connector_id);
	igt_assert(mode);

	rc = drmModeSetCrtc(drm_fd, crtc_id, buffer_id, 0, 0, &connector_id, 1,
			    mode);
	igt_assert_eq(rc, 0);

	if (use_panel_fitter) {
		if (IS_HASWELL(devid))
			igt_assert(!lpsp_is_enabled(drm_fd));
		else
			igt_assert(lpsp_is_enabled(drm_fd));
	} else {
		igt_assert(lpsp_is_enabled(drm_fd));
	}
}

static void non_edp_subtest(int drm_fd, drmModeResPtr drm_res,
			    drmModeConnectorPtr *drm_connectors)
{
	int i, rc;
	uint32_t connector_id = 0, crtc_id = 0, buffer_id = 0;
	drmModeModeInfoPtr mode = NULL;

	kmstest_unset_all_crtcs(drm_fd, drm_res);

	for (i = 0; i < drm_res->count_connectors; i++) {
		drmModeConnectorPtr c = drm_connectors[i];

		if (c->connector_type == DRM_MODE_CONNECTOR_eDP)
			continue;
		if (c->connection != DRM_MODE_CONNECTED)
			continue;

		if (c->count_modes) {
			connector_id = c->connector_id;
			mode = &c->modes[0];
			break;
		}
	}
	igt_require(connector_id);

	crtc_id = drm_res->crtcs[0];
	buffer_id = create_fb(drm_fd, mode->hdisplay, mode->vdisplay);

	igt_assert(crtc_id);
	igt_assert(buffer_id);
	igt_assert(connector_id);
	igt_assert(mode);

	rc = drmModeSetCrtc(drm_fd, crtc_id, buffer_id, 0, 0, &connector_id, 1,
			    mode);
	igt_assert_eq(rc, 0);

	igt_assert(!lpsp_is_enabled(drm_fd));
}
Пример #25
0
static void plane_invalid_params(struct kms_atomic_crtc_state *crtc,
				 struct kms_atomic_plane_state *plane_old,
				 struct kms_atomic_connector_state *conn)
{
	struct drm_mode_modeinfo *mode = crtc->mode.data;
	struct kms_atomic_plane_state plane = *plane_old;
	uint32_t format = plane_get_igt_format(&plane);
	drmModeAtomicReq *req = drmModeAtomicAlloc();
	struct igt_fb fb;

	/* Pass a series of invalid object IDs for the FB ID. */
	plane.fb_id = plane.obj;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.fb_id = crtc->obj;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.fb_id = conn->obj;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.fb_id = crtc->mode.id;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.fb_id = plane_old->fb_id;
	plane_commit_atomic(&plane, req, ATOMIC_RELAX_NONE);

	/* Pass a series of invalid object IDs for the CRTC ID. */
	plane.crtc_id = plane.obj;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.crtc_id = plane.fb_id;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.crtc_id = conn->obj;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.crtc_id = crtc->mode.id;
	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, EINVAL);

	plane.crtc_id = plane_old->crtc_id;
	plane_commit_atomic(&plane, req, ATOMIC_RELAX_NONE);

	/* Create a framebuffer too small for the plane configuration. */
	igt_require(format != 0);

	plane.src_x = 0;
	plane.src_y = 0;
	plane.src_w = mode->hdisplay << 16;
	plane.src_h = mode->vdisplay << 16;
	plane.crtc_x = 0;
	plane.crtc_y = 0;
	plane.crtc_w = mode->hdisplay;
	plane.crtc_h = mode->vdisplay;
	plane.crtc_id = crtc->obj;
	plane.fb_id = igt_create_pattern_fb(plane.state->desc->fd,
					    plane.crtc_w - 1, plane.crtc_h - 1,
					    format, I915_TILING_NONE, &fb);

	plane_commit_atomic_err(&plane, plane_old, req,
	                        ATOMIC_RELAX_NONE, ENOSPC);

	/* Restore the primary plane and check the state matches the old. */
	plane_commit_atomic(plane_old, req, ATOMIC_RELAX_NONE);

	drmModeAtomicFree(req);
}
Пример #26
0
/* Simulates SNA behaviour using negative self-relocations for
 * STATE_BASE_ADDRESS command packets. If they wrap around (to values greater
 * than the total size of the GTT), the GPU will hang.
 * See https://bugs.freedesktop.org/show_bug.cgi?id=78533
 */
static int negative_reloc(int fd, unsigned flags)
{
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 gem_exec[2];
	struct drm_i915_gem_relocation_entry gem_reloc[1000];
	uint64_t gtt_max = get_page_table_size(fd);
	uint32_t buf[1024] = {MI_BATCH_BUFFER_END};
	int i;

#define BIAS (256*1024)

	igt_require(intel_gen(intel_get_drm_devid(fd)) >= 7);

	memset(gem_exec, 0, sizeof(gem_exec));
	gem_exec[0].handle = gem_create(fd, 4096);
	gem_write(fd, gem_exec[0].handle, 0, buf, 8);

	gem_reloc[0].offset = 1024;
	gem_reloc[0].delta = 0;
	gem_reloc[0].target_handle = gem_exec[0].handle;
	gem_reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;

	gem_exec[1].handle = gem_create(fd, 4096);
	gem_write(fd, gem_exec[1].handle, 0, buf, 8);
	gem_exec[1].relocation_count = 1;
	gem_exec[1].relocs_ptr = (uintptr_t)gem_reloc;

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)gem_exec;
	execbuf.buffer_count = 2;
	execbuf.batch_len = 8;

	do_or_die(drmIoctl(fd,
			   DRM_IOCTL_I915_GEM_EXECBUFFER2,
			   &execbuf));
	gem_close(fd, gem_exec[1].handle);

	igt_info("Found offset %lld for 4k batch\n", (long long)gem_exec[0].offset);
	/*
	 * Ideally we'd like to be able to control where the kernel is going to
	 * place the buffer. We don't SKIP here because it causes the test
	 * to "randomly" flip-flop between the SKIP and PASS states.
	 */
	if (gem_exec[0].offset < BIAS) {
		igt_info("Offset is below BIAS, not testing anything\n");
		return 0;
	}

	memset(gem_reloc, 0, sizeof(gem_reloc));
	for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++) {
		gem_reloc[i].offset = 8 + 4*i;
		gem_reloc[i].delta = -BIAS*i/1024;
		gem_reloc[i].target_handle = flags & USE_LUT ? 0 : gem_exec[0].handle;
		gem_reloc[i].read_domains = I915_GEM_DOMAIN_COMMAND;
	}

	gem_exec[0].relocation_count = sizeof(gem_reloc)/sizeof(gem_reloc[0]);
	gem_exec[0].relocs_ptr = (uintptr_t)gem_reloc;

	execbuf.buffer_count = 1;
	execbuf.flags = flags & USE_LUT;
	do_or_die(drmIoctl(fd,
			   DRM_IOCTL_I915_GEM_EXECBUFFER2,
			   &execbuf));

	igt_info("Batch is now at offset %lld\n", (long long)gem_exec[0].offset);

	gem_read(fd, gem_exec[0].handle, 0, buf, sizeof(buf));
	gem_close(fd, gem_exec[0].handle);

	for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++)
		igt_assert(buf[2 + i] < gtt_max);

	return 0;
}
Пример #27
0
static void
test_huge_copy(int fd, int huge, int tiling_a, int tiling_b)
{
	uint64_t huge_object_size, i;
	uint32_t bo, *pattern_a, *pattern_b;
	char *a, *b;

	switch (huge) {
	case -2:
		huge_object_size = gem_mappable_aperture_size() / 4;
		break;
	case -1:
		huge_object_size = gem_mappable_aperture_size() / 2;
		break;
	case 0:
		huge_object_size = gem_mappable_aperture_size() + PAGE_SIZE;
		break;
	default:
		huge_object_size = gem_aperture_size(fd) + PAGE_SIZE;
		break;
	}
	intel_require_memory(2, huge_object_size, CHECK_RAM);

	pattern_a = malloc(PAGE_SIZE);
	for (i = 0; i < PAGE_SIZE/4; i++)
		pattern_a[i] = i;

	pattern_b = malloc(PAGE_SIZE);
	for (i = 0; i < PAGE_SIZE/4; i++)
		pattern_b[i] = ~i;

	bo = gem_create(fd, huge_object_size);
	if (tiling_a)
		gem_set_tiling(fd, bo, tiling_a,
			       tiling_a == I915_TILING_Y ? 128 : 512);
	a = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
	igt_require(a);
	gem_close(fd, bo);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++)
		memcpy(a + PAGE_SIZE*i, pattern_a, PAGE_SIZE);

	bo = gem_create(fd, huge_object_size);
	if (tiling_b)
		gem_set_tiling(fd, bo, tiling_b, 
			       tiling_b == I915_TILING_Y ? 128 : 512);
	b = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
	igt_require(b);
	gem_close(fd, bo);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++)
		memcpy(b + PAGE_SIZE*i, pattern_b, PAGE_SIZE);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++) {
		if (i & 1)
			memcpy(a + i *PAGE_SIZE, b + i*PAGE_SIZE, PAGE_SIZE);
		else
			memcpy(b + i *PAGE_SIZE, a + i*PAGE_SIZE, PAGE_SIZE);
	}

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++) {
		if (i & 1)
			igt_assert(memcmp(pattern_b, a + PAGE_SIZE*i, PAGE_SIZE) == 0);
		else
			igt_assert(memcmp(pattern_a, a + PAGE_SIZE*i, PAGE_SIZE) == 0);
	}
	munmap(a, huge_object_size);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++) {
		if (i & 1)
			igt_assert(memcmp(pattern_b, b + PAGE_SIZE*i, PAGE_SIZE) == 0);
		else
			igt_assert(memcmp(pattern_a, b + PAGE_SIZE*i, PAGE_SIZE) == 0);
	}
	munmap(b, huge_object_size);

	free(pattern_a);
	free(pattern_b);
}
Пример #28
0
static void test_plane_scaling(data_t *d)
{
    igt_display_t *display = &d->display;
    igt_output_t *output;
    cairo_surface_t *image;
    enum pipe pipe;
    int valid_tests = 0;
    int primary_plane_scaling = 0; /* For now */

    igt_require(d->display.has_universal_planes);
    igt_require(d->num_scalers);

    for_each_connected_output(display, output) {
        drmModeModeInfo *mode;

        pipe = output->config.pipe;
        igt_output_set_pipe(output, pipe);

        mode = igt_output_get_mode(output);

        /* allocate fb2 with image size */
        image = cairo_image_surface_create_from_png(FILE_NAME);
        igt_assert(cairo_surface_status(image) == CAIRO_STATUS_SUCCESS);
        d->image_w = cairo_image_surface_get_width(image);
        d->image_h = cairo_image_surface_get_height(image);
        cairo_surface_destroy(image);

        d->fb_id2 = igt_create_fb(d->drm_fd,
                                  d->image_w, d->image_h,
                                  DRM_FORMAT_XRGB8888,
                                  LOCAL_I915_FORMAT_MOD_X_TILED, /* tiled */
                                  &d->fb2);
        igt_assert(d->fb_id2);
        paint_image(d, &d->fb2, d->fb2.width, d->fb2.height);

        d->fb_id3 = igt_create_fb(d->drm_fd,
                                  mode->hdisplay, mode->vdisplay,
                                  DRM_FORMAT_XRGB8888,
                                  LOCAL_I915_FORMAT_MOD_X_TILED, /* tiled */
                                  &d->fb3);
        igt_assert(d->fb_id3);
        paint_color(d, &d->fb3, mode->hdisplay, mode->vdisplay);

        /* Set up display with plane 1 */
        d->plane1 = igt_output_get_plane(output, IGT_PLANE_PRIMARY);
        prepare_crtc(d, output, pipe, d->plane1, mode, COMMIT_UNIVERSAL);

        if (primary_plane_scaling) {
            /* Primary plane upscaling */
            igt_fb_set_position(&d->fb1, d->plane1, 100, 100);
            igt_fb_set_size(&d->fb1, d->plane1, 500, 500);
            igt_plane_set_position(d->plane1, 0, 0);
            igt_plane_set_size(d->plane1, mode->hdisplay, mode->vdisplay);
            igt_display_commit2(display, COMMIT_UNIVERSAL);

            /* Primary plane 1:1 no scaling */
            igt_fb_set_position(&d->fb1, d->plane1, 0, 0);
            igt_fb_set_size(&d->fb1, d->plane1, d->fb1.width, d->fb1.height);
            igt_plane_set_position(d->plane1, 0, 0);
            igt_plane_set_size(d->plane1, mode->hdisplay, mode->vdisplay);
            igt_display_commit2(display, COMMIT_UNIVERSAL);
        }

        /* Set up fb2->plane2 mapping. */
        d->plane2 = igt_output_get_plane(output, IGT_PLANE_2);
        igt_plane_set_fb(d->plane2, &d->fb2);

        /* 2nd plane windowed */
        igt_fb_set_position(&d->fb2, d->plane2, 100, 100);
        igt_fb_set_size(&d->fb2, d->plane2, d->fb2.width-200, d->fb2.height-200);
        igt_plane_set_position(d->plane2, 100, 100);
        igt_plane_set_size(d->plane2, mode->hdisplay-200, mode->vdisplay-200);
        igt_display_commit2(display, COMMIT_UNIVERSAL);

        iterate_plane_scaling(d, mode);

        /* 2nd plane up scaling */
        igt_fb_set_position(&d->fb2, d->plane2, 100, 100);
        igt_fb_set_size(&d->fb2, d->plane2, 500, 500);
        igt_plane_set_position(d->plane2, 10, 10);
        igt_plane_set_size(d->plane2, mode->hdisplay-20, mode->vdisplay-20);
        igt_display_commit2(display, COMMIT_UNIVERSAL);

        /* 2nd plane downscaling */
        igt_fb_set_position(&d->fb2, d->plane2, 0, 0);
        igt_fb_set_size(&d->fb2, d->plane2, d->fb2.width, d->fb2.height);
        igt_plane_set_position(d->plane2, 10, 10);
        igt_plane_set_size(d->plane2, 500, 500 * d->fb2.height/d->fb2.width);
        igt_display_commit2(display, COMMIT_UNIVERSAL);

        if (primary_plane_scaling) {
            /* Primary plane up scaling */
            igt_fb_set_position(&d->fb1, d->plane1, 100, 100);
            igt_fb_set_size(&d->fb1, d->plane1, 500, 500);
            igt_plane_set_position(d->plane1, 0, 0);
            igt_plane_set_size(d->plane1, mode->hdisplay, mode->vdisplay);
            igt_display_commit2(display, COMMIT_UNIVERSAL);
        }

        /* Set up fb3->plane3 mapping. */
        d->plane3 = igt_output_get_plane(output, IGT_PLANE_3);
        igt_plane_set_fb(d->plane3, &d->fb3);

        /* 3rd plane windowed - no scaling */
        igt_fb_set_position(&d->fb3, d->plane3, 100, 100);
        igt_fb_set_size(&d->fb3, d->plane3, d->fb3.width-300, d->fb3.height-300);
        igt_plane_set_position(d->plane3, 100, 100);
        igt_plane_set_size(d->plane3, mode->hdisplay-300, mode->vdisplay-300);
        igt_display_commit2(display, COMMIT_UNIVERSAL);

        /* Switch scaler from plane 2 to plane 3 */
        igt_fb_set_position(&d->fb2, d->plane2, 100, 100);
        igt_fb_set_size(&d->fb2, d->plane2, d->fb2.width-200, d->fb2.height-200);
        igt_plane_set_position(d->plane2, 100, 100);
        igt_plane_set_size(d->plane2, d->fb2.width-200, d->fb2.height-200);

        igt_fb_set_position(&d->fb3, d->plane3, 100, 100);
        igt_fb_set_size(&d->fb3, d->plane3, d->fb3.width-400, d->fb3.height-400);
        igt_plane_set_position(d->plane3, 10, 10);
        igt_plane_set_size(d->plane3, mode->hdisplay-300, mode->vdisplay-300);
        igt_display_commit2(display, COMMIT_UNIVERSAL);

        if (primary_plane_scaling) {
            /* Switch scaler from plane 1 to plane 2 */
            igt_fb_set_position(&d->fb1, d->plane1, 0, 0);
            igt_fb_set_size(&d->fb1, d->plane1, d->fb1.width, d->fb1.height);
            igt_plane_set_position(d->plane1, 0, 0);
            igt_plane_set_size(d->plane1, mode->hdisplay, mode->vdisplay);

            igt_fb_set_position(&d->fb2, d->plane2, 100, 100);
            igt_fb_set_size(&d->fb2, d->plane2, d->fb2.width-500,d->fb2.height-500);
            igt_plane_set_position(d->plane2, 100, 100);
            igt_plane_set_size(d->plane2, mode->hdisplay-200, mode->vdisplay-200);
            igt_display_commit2(display, COMMIT_UNIVERSAL);
        }

        /* back to single plane mode */
        igt_plane_set_fb(d->plane2, NULL);
        igt_plane_set_fb(d->plane3, NULL);
        igt_display_commit2(display, COMMIT_UNIVERSAL);

        valid_tests++;
        cleanup_crtc(d, output, d->plane1);
    }
Пример #29
0
static void big_exec(int fd, uint32_t handle, int ring)
{
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 *gem_exec;
	uint32_t ctx_id1, ctx_id2;
	int num_buffers = gem_available_aperture_size(fd) / 4096;
	int i;

	/* Make sure we only fill half of RAM with gem objects. */
	igt_require(intel_get_total_ram_mb() * 1024 / 2 > num_buffers * 4);

	gem_exec = calloc(num_buffers + 1, sizeof(*gem_exec));
	igt_assert(gem_exec);
	memset(gem_exec, 0, (num_buffers + 1) * sizeof(*gem_exec));


	ctx_id1 = gem_context_create(fd);
	ctx_id2 = gem_context_create(fd);

	gem_exec[0].handle = handle;


	execbuf.buffers_ptr = (uintptr_t)gem_exec;
	execbuf.buffer_count = num_buffers + 1;
	execbuf.batch_start_offset = 0;
	execbuf.batch_len = 8;
	execbuf.cliprects_ptr = 0;
	execbuf.num_cliprects = 0;
	execbuf.DR1 = 0;
	execbuf.DR4 = 0;
	execbuf.flags = ring;
	execbuf.rsvd2 = 0;

	execbuf.buffer_count = 1;
	i915_execbuffer2_set_context_id(execbuf, ctx_id1);
	do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);

	for (i = 0; i < num_buffers; i++) {
		uint32_t tmp_handle = gem_create(fd, 4096);

		gem_exec[i].handle = tmp_handle;
	}
	gem_exec[i].handle = handle;
	execbuf.buffer_count = i + 1;

	/* figure out how many buffers we can exactly fit */
	while (drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
			&execbuf) != 0) {
		i--;
		gem_close(fd, gem_exec[i].handle);
		gem_exec[i].handle = handle;
		execbuf.buffer_count--;
		igt_info("trying buffer count %i\n", i - 1);
	}

	igt_info("reduced buffer count to %i from %i\n",
	       i - 1, num_buffers);

	/* double check that it works */
	do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);

	i915_execbuffer2_set_context_id(execbuf, ctx_id2);
	do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
	gem_sync(fd, handle);
}
static void thread_performance(unsigned mask)
{
	const int loops = 4096;
	int n, count;
	int fd, num_fences;
	double linear[2], tiled[2];

	fd = drm_open_any();

	num_fences = gem_available_fences(fd);
	igt_require(num_fences > 0);

	for (count = 2; count < 4*num_fences; count *= 2) {
		const int nthreads = (mask & READ ? count : 0) + (mask & WRITE ? count : 0);
		struct timeval start, end;
		struct thread_performance readers[count];
		struct thread_performance writers[count];
		uint32_t handle[count];
		void *ptr[count];

		for (n = 0; n < count; n++) {
			handle[n] = gem_create(fd, OBJECT_SIZE);
			ptr[n] = gem_mmap(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
			igt_assert(ptr[n]);

			if (mask & READ) {
				readers[n].id = n;
				readers[n].direction = READ;
				readers[n].ptr = ptr;
				readers[n].count = count;
				readers[n].loops = loops;
			}

			if (mask & WRITE) {
				writers[n].id = count - n - 1;
				writers[n].direction = WRITE;
				writers[n].ptr = ptr;
				writers[n].count = count;
				writers[n].loops = loops;
			}
		}

		gettimeofday(&start, NULL);
		for (n = 0; n < count; n++) {
			if (mask & READ)
				pthread_create(&readers[n].thread, NULL, read_thread_performance, &readers[n]);
			if (mask & WRITE)
				pthread_create(&writers[n].thread, NULL, write_thread_performance, &writers[n]);
		}
		for (n = 0; n < count; n++) {
			if (mask & READ)
				pthread_join(readers[n].thread, NULL);
			if (mask & WRITE)
				pthread_join(writers[n].thread, NULL);
		}
		gettimeofday(&end, NULL);

		linear[count != 2] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
		igt_info("%s rate for %d linear surfaces, %d threads:	%7.3fMiB/s\n", direction_string(mask), count, nthreads, linear[count != 2]);

		for (n = 0; n < count; n++)
			gem_set_tiling(fd, handle[n], I915_TILING_X, 1024);

		gettimeofday(&start, NULL);
		for (n = 0; n < count; n++) {
			if (mask & READ)
				pthread_create(&readers[n].thread, NULL, read_thread_performance, &readers[n]);
			if (mask & WRITE)
				pthread_create(&writers[n].thread, NULL, write_thread_performance, &writers[n]);
		}
		for (n = 0; n < count; n++) {
			if (mask & READ)
				pthread_join(readers[n].thread, NULL);
			if (mask & WRITE)
				pthread_join(writers[n].thread, NULL);
		}
		gettimeofday(&end, NULL);

		tiled[count != 2] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
		igt_info("%s rate for %d tiled surfaces, %d threads:	%7.3fMiB/s\n", direction_string(mask), count, nthreads, tiled[count != 2]);

		for (n = 0; n < count; n++) {
			munmap(ptr[n], OBJECT_SIZE);
			gem_close(fd, handle[n]);
		}
	}

	errno = 0;
	igt_assert(linear[1] > 0.75 * linear[0]);
	igt_assert(tiled[1] > 0.75 * tiled[0]);
}