Exemplo n.º 1
0
static int test_input_checking(int fd)
{
	struct local_i915_gem_userptr userptr;
	int ret;

	/* Invalid flags. */
	memset(&userptr, 0, sizeof(userptr));
	userptr.user_ptr = 0;
	userptr.user_size = 0;
	userptr.flags = ~0;
	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
	igt_assert_neq(ret, 0);

	/* Too big. */
	memset(&userptr, 0, sizeof(userptr));
	userptr.user_ptr = 0;
	userptr.user_size = ~0;
	userptr.flags = 0;
	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
	igt_assert_neq(ret, 0);

	/* Both wrong. */
	memset(&userptr, 0, sizeof(userptr));
	userptr.user_ptr = 0;
	userptr.user_size = ~0;
	userptr.flags = ~0;
	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
	igt_assert_neq(ret, 0);

	return 0;
}
static void read_rc6_residency( int value[], const char *name_of_rc6_residency)
{
	unsigned int i;
	const int device = drm_get_card();
	char *path ;
	int  ret;
	FILE *file;

	/* For some reason my ivb isn't idle even after syncing up with the gpu.
	 * Let's add a sleept just to make it happy. */
	sleep(5);

	ret = asprintf(&path, "/sys/class/drm/card%d/power/rc6_enable", device);
	igt_assert_neq(ret, -1);

	file = fopen(path, "r");
	igt_require(file);

	/* claim success if no rc6 enabled. */
	if (readit(path) == 0)
		igt_success();

	for(i = 0; i < 2; i++)
	{
		sleep(SLEEP_DURATION / 1000);
		ret = asprintf(&path, "/sys/class/drm/card%d/power/%s_residency_ms",device,name_of_rc6_residency);
		igt_assert_neq(ret, -1);
		value[i] = readit(path);
	}
	free(path);
}
Exemplo n.º 3
0
static void crtc_check_current_state(struct kms_atomic_crtc_state *crtc,
				     struct kms_atomic_plane_state *primary,
				     enum kms_atomic_check_relax relax)
{
	struct kms_atomic_crtc_state crtc_kernel;
	drmModeCrtcPtr legacy;

	legacy = drmModeGetCrtc(crtc->state->desc->fd, crtc->obj);
	igt_assert(legacy);

	igt_assert_eq_u32(legacy->crtc_id, crtc->obj);
	igt_assert_eq_u32(legacy->x, primary->src_x >> 16);
	igt_assert_eq_u32(legacy->y, primary->src_y >> 16);

	if (crtc->active)
		igt_assert_eq_u32(legacy->buffer_id, primary->fb_id);
	else
		igt_assert_eq_u32(legacy->buffer_id, 0);

	if (legacy->mode_valid) {
		igt_assert_neq(legacy->mode_valid, 0);
		igt_assert_eq(crtc->mode.len,
		              sizeof(struct drm_mode_modeinfo));
		do_or_die(memcmp(&legacy->mode, crtc->mode.data,
		                 crtc->mode.len));
		igt_assert_eq(legacy->width, legacy->mode.hdisplay);
		igt_assert_eq(legacy->height, legacy->mode.vdisplay);
	} else {
		igt_assert_eq(legacy->mode_valid, 0);
	}

	memcpy(&crtc_kernel, crtc, sizeof(crtc_kernel));
	crtc_get_current_state(&crtc_kernel);

	if (crtc_kernel.mode.id != 0)
		igt_assert_eq(crtc_kernel.mode.len,
		              sizeof(struct drm_mode_modeinfo));

	/* Optionally relax the check for MODE_ID: using the legacy SetCrtc
	 * API can potentially change MODE_ID even if the mode itself remains
	 * unchanged. */
	if (((relax & CRTC_RELAX_MODE) &&
	    (crtc_kernel.mode.id != crtc->mode.id &&
	     crtc_kernel.mode.id != 0 && crtc->mode.id != 0)) &&
	    memcmp(crtc_kernel.mode.data, crtc->mode.data,
		   sizeof(struct drm_mode_modeinfo)) == 0) {
		crtc_kernel.mode.id = crtc->mode.id;
		crtc_kernel.mode.data = crtc->mode.data;
	}

	do_or_die(memcmp(&crtc_kernel, crtc, sizeof(crtc_kernel)));

	drmModeFreeCrtc(legacy);
}
Exemplo n.º 4
0
static void
copy(int fd, uint32_t dst, uint32_t src, unsigned int error)
{
	uint32_t batch[12];
	struct drm_i915_gem_relocation_entry reloc[2];
	struct drm_i915_gem_exec_object2 obj[3];
	struct drm_i915_gem_execbuffer2 exec;
	uint32_t handle;
	int ret, i=0;

	batch[i++] = XY_SRC_COPY_BLT_CMD |
		  XY_SRC_COPY_BLT_WRITE_ALPHA |
		  XY_SRC_COPY_BLT_WRITE_RGB;
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i - 1] |= 8;
	else
		batch[i - 1] |= 6;

	batch[i++] = (3 << 24) | /* 32 bits */
		  (0xcc << 16) | /* copy ROP */
		  WIDTH*4;
	batch[i++] = 0; /* dst x1,y1 */
	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
	batch[i++] = 0; /* dst reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = 0; /* src x1,y1 */
	batch[i++] = WIDTH*4;
	batch[i++] = 0; /* src reloc */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		batch[i++] = 0;
	batch[i++] = MI_BATCH_BUFFER_END;
	batch[i++] = MI_NOOP;

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	reloc[0].target_handle = dst;
	reloc[0].delta = 0;
	reloc[0].offset = 4 * sizeof(batch[0]);
	reloc[0].presumed_offset = 0;
	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;

	reloc[1].target_handle = src;
	reloc[1].delta = 0;
	reloc[1].offset = 7 * sizeof(batch[0]);
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		reloc[1].offset += sizeof(batch[0]);
	reloc[1].presumed_offset = 0;
	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
	reloc[1].write_domain = 0;

	memset(obj, 0, sizeof(obj));
	exec.buffer_count = 0;
	obj[exec.buffer_count++].handle = dst;
	if (src != dst)
		obj[exec.buffer_count++].handle = src;
	obj[exec.buffer_count].handle = handle;
	obj[exec.buffer_count].relocation_count = 2;
	obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc;
	exec.buffer_count++;
	exec.buffers_ptr = (uintptr_t)obj;

	exec.batch_start_offset = 0;
	exec.batch_len = i * 4;
	exec.DR1 = exec.DR4 = 0;
	exec.num_cliprects = 0;
	exec.cliprects_ptr = 0;
	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
	i915_execbuffer2_set_context_id(exec, 0);
	exec.rsvd2 = 0;

	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
	if (ret)
		ret = errno;

	if (error == ~0)
		igt_assert_neq(ret, 0);
	else
		igt_assert(ret == error);

	gem_close(fd, handle);
}
Exemplo n.º 5
0
static void render_timeout(int fd)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	int64_t negative_timeout = -1;
	int ret;
	const bool do_signals = true; /* signals will seem to make the operation
				       * use less process CPU time */
	bool done = false;
	int i, iter = 1;

	igt_skip_on_simulation();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096);
	dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096);

	igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL,
		      "kernel doesn't support wait_timeout, skipping test\n");
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;

	/* Figure out a rough number of fills required to consume 1 second of
	 * GPU work.
	 */
	do {
		struct timespec start, end;
		long diff;

#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif

		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0);
		for (i = 0; i < iter; i++)
			blt_color_fill(batch, dst, BUF_PAGES);
		intel_batchbuffer_flush(batch);
		drm_intel_bo_wait_rendering(dst);
		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0);

		diff = do_time_diff(&end, &start);
		igt_assert(diff >= 0);

		if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS)
			done = true;
		else
			iter <<= 1;
	} while (!done && iter < 1000000);

	igt_assert_lt(iter, 1000000);

	igt_info("%d iters is enough work\n", iter);
	gem_quiescent_gpu(fd);
	if (do_signals)
		igt_fork_signal_helper();

	/* We should be able to do half as much work in the same amount of time,
	 * but because we might schedule almost twice as much as required, we
	 * might accidentally time out. Hence add some fudge. */
	for (i = 0; i < iter/3; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);
	igt_assert_neq(timeout, 0);
	if (timeout ==  (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC))
		igt_info("Buffer was already done!\n");
	else {
		igt_info("Finished with %" PRIu64 " time remaining\n", timeout);
	}

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert_eq(timeout, 0);

	/* Now check that we correctly time out, twice the auto-tune load should
	 * be good enough. */
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	for (i = 0; i < iter*2; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout);
	igt_assert_eq(ret, -ETIME);
	igt_assert_eq(timeout, 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME);
	igt_assert_eq(timeout, 0);


	/* Now check that we can pass negative (infinite) timeouts. */
	negative_timeout = -1;
	for (i = 0; i < iter; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0);
	igt_assert_eq(negative_timeout, -1); /* infinity always remains */
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);

	if (do_signals)
		igt_stop_signal_helper();
	drm_intel_bo_unreference(dst2);
	drm_intel_bo_unreference(dst);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);
}