Пример #1
0
/**
 * igt_get_stable_obj_count:
 * @driver: fd to drm/i915 GEM driver
 *
 * This puts the driver into a stable (quiescent) state and then returns the
 * current number of gem buffer objects as reported in the i915_gem_objects
 * debugFS interface.
 */
int igt_get_stable_obj_count(int driver)
{
	int obj_count;
	gem_quiescent_gpu(driver);
	obj_count = get_object_count();
	/* The test relies on the system being in the same state before and
	 * after the test so any difference in the object count is a result of
	 * leaks during the test. gem_quiescent_gpu() mostly achieves this but
	 * on android occasionally obj_count can still change briefly.
	 * The loop ensures obj_count has remained stable over several checks
	 */
#ifdef ANDROID
	{
		int loop_count = 0;
		int prev_obj_count = obj_count;
		while (loop_count < 4) {
			usleep(200000);
			gem_quiescent_gpu(driver);
			obj_count = get_object_count();
			if (obj_count == prev_obj_count) {
				loop_count++;
			} else {
				igt_debug("loop_count=%d, obj_count=%d, prev_obj_count=%d\n",
					loop_count, obj_count, prev_obj_count);
				loop_count = 0;
				prev_obj_count = obj_count;
			}

		}
	}
#endif
	return obj_count;
}
Пример #2
0
static void test_reimport_close_race(void)
{
	pthread_t *threads;
	int r, i, num_threads;
	int fds[2];
	int obj_count;
	void *status;
	uint32_t handle;
	int fake;

	/* Allocate exit handler fds in here so that we dont screw
	 * up the counts */
	fake = drm_open_driver(DRIVER_INTEL);

	gem_quiescent_gpu(fake);
	obj_count = get_object_count();

	num_threads = sysconf(_SC_NPROCESSORS_ONLN);

	threads = calloc(num_threads, sizeof(pthread_t));

	fds[0] = drm_open_driver(DRIVER_INTEL);

	handle = gem_create(fds[0], BO_SIZE);

	fds[1] = prime_handle_to_fd(fds[0], handle);

	for (i = 0; i < num_threads; i++) {
		r = pthread_create(&threads[i], NULL,
				   thread_fn_reimport_vs_close,
				   (void *)(uintptr_t)fds);
		igt_assert_eq(r, 0);
	}

	sleep(5);

	pls_die = 1;

	for (i = 0;  i < num_threads; i++) {
		pthread_join(threads[i], &status);
		igt_assert(status == 0);
	}

	close(fds[0]);
	close(fds[1]);

	gem_quiescent_gpu(fake);
	obj_count = get_object_count() - obj_count;

	igt_info("leaked %i objects\n", obj_count);

	close(fake);

	igt_assert_eq(obj_count, 0);
}
Пример #3
0
static void gpu_blit(struct igt_fb *dst_fb, struct igt_fb *src_fb)
{
	drm_intel_bo *dst_bo;
	drm_intel_bo *src_bo;
	int bpp;

	igt_assert(dst_fb->drm_format == src_fb->drm_format);
	igt_assert(src_fb->drm_format == DRM_FORMAT_RGB565 ||
	       igt_drm_format_to_bpp(src_fb->drm_format) != 16);
	bpp = igt_drm_format_to_bpp(src_fb->drm_format);
	dst_bo = gem_handle_to_libdrm_bo(bufmgr, drm_fd, "destination",
					 dst_fb->gem_handle);
	igt_assert(dst_bo);
	src_bo = gem_handle_to_libdrm_bo(bufmgr, drm_fd, "source",
					 src_fb->gem_handle);
	igt_assert(src_bo);

	intel_blt_copy(batch,
		       src_bo, 0, 0, src_fb->width * bpp / 8,
		       dst_bo, 0, 0, dst_fb->width * bpp / 8,
		       src_fb->width, src_fb->height, bpp);
	intel_batchbuffer_flush(batch);
	gem_quiescent_gpu(drm_fd);

	drm_intel_bo_unreference(src_bo);
	drm_intel_bo_unreference(dst_bo);
}
Пример #4
0
static void trigger_reset(int fd)
{
    igt_assert(i915_wedged_set());

    /* And just check the gpu is indeed running again */
    igt_debug("Checking that the GPU recovered\n");
    gem_quiescent_gpu(fd);
}
Пример #5
0
static void run_test(int fd, unsigned ring, unsigned flags)
{
	const int gen = intel_gen(intel_get_drm_devid(fd));
	const uint32_t bbe = MI_BATCH_BUFFER_END;
	struct drm_i915_gem_exec_object2 obj[2];
	struct drm_i915_gem_relocation_entry reloc[1024];
	struct drm_i915_gem_execbuffer2 execbuf;
	struct igt_hang_ring hang;
	uint32_t *batch, *b;
	int i;

	gem_require_ring(fd, ring);
	igt_skip_on_f(gen == 6 && (ring & ~(3<<13)) == I915_EXEC_BSD,
		      "MI_STORE_DATA broken on gen6 bsd\n");

	gem_quiescent_gpu(fd);

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)obj;
	execbuf.buffer_count = 2;
	execbuf.flags = ring | (1 << 11);
	if (gen < 6)
		execbuf.flags |= I915_EXEC_SECURE;

	memset(obj, 0, sizeof(obj));
	obj[0].handle = gem_create(fd, 4096);
	obj[0].flags |= EXEC_OBJECT_WRITE;
	obj[1].handle = gem_create(fd, 1024*16 + 4096);
	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
	igt_require(__gem_execbuf(fd, &execbuf) == 0);

	obj[1].relocs_ptr = (uintptr_t)reloc;
	obj[1].relocation_count = 1024;

	batch = gem_mmap__cpu(fd, obj[1].handle, 0, 16*1024 + 4096,
			      PROT_WRITE | PROT_READ);
	gem_set_domain(fd, obj[1].handle,
		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

	memset(reloc, 0, sizeof(reloc));
	b = batch;
	for (i = 0; i < 1024; i++) {
		uint64_t offset;

		reloc[i].target_handle = obj[0].handle;
		reloc[i].presumed_offset = obj[0].offset;
		reloc[i].offset = (b - batch + 1) * sizeof(*batch);
		reloc[i].delta = i * sizeof(uint32_t);
		reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;

		offset = obj[0].offset + reloc[i].delta;
		*b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
		if (gen >= 8) {
			*b++ = offset;
			*b++ = offset >> 32;
		} else if (gen >= 4) {
Пример #6
0
static void wedge_gpu(int fd)
{
    /* First idle the GPU then disable GPU resets before injecting a hang */
    gem_quiescent_gpu(fd);

    igt_require(i915_reset_control(false));

    igt_debug("Wedging GPU by injecting hang\n");
    igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));

    igt_assert(i915_reset_control(true));
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	if (HAS_BSD_RING(devid))
		num_rings++;

	if (HAS_BLT_RING(devid))
		num_rings++;


	printf("num rings detected: %i\n", num_rings);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	mi_lri_loop();
	gem_quiescent_gpu(fd);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Пример #8
0
static void render_timeout(int fd)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	int64_t negative_timeout = -1;
	int ret;
	const bool do_signals = true; /* signals will seem to make the operation
				       * use less process CPU time */
	bool done = false;
	int i, iter = 1;

	igt_skip_on_simulation();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096);
	dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096);

	igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL,
		      "kernel doesn't support wait_timeout, skipping test\n");
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;

	/* Figure out a rough number of fills required to consume 1 second of
	 * GPU work.
	 */
	do {
		struct timespec start, end;
		long diff;

#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif

		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0);
		for (i = 0; i < iter; i++)
			blt_color_fill(batch, dst, BUF_PAGES);
		intel_batchbuffer_flush(batch);
		drm_intel_bo_wait_rendering(dst);
		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0);

		diff = do_time_diff(&end, &start);
		igt_assert(diff >= 0);

		if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS)
			done = true;
		else
			iter <<= 1;
	} while (!done && iter < 1000000);

	igt_assert_lt(iter, 1000000);

	igt_info("%d iters is enough work\n", iter);
	gem_quiescent_gpu(fd);
	if (do_signals)
		igt_fork_signal_helper();

	/* We should be able to do half as much work in the same amount of time,
	 * but because we might schedule almost twice as much as required, we
	 * might accidentally time out. Hence add some fudge. */
	for (i = 0; i < iter/3; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);
	igt_assert_neq(timeout, 0);
	if (timeout ==  (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC))
		igt_info("Buffer was already done!\n");
	else {
		igt_info("Finished with %" PRIu64 " time remaining\n", timeout);
	}

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert_eq(timeout, 0);

	/* Now check that we correctly time out, twice the auto-tune load should
	 * be good enough. */
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	for (i = 0; i < iter*2; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout);
	igt_assert_eq(ret, -ETIME);
	igt_assert_eq(timeout, 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME);
	igt_assert_eq(timeout, 0);


	/* Now check that we can pass negative (infinite) timeouts. */
	negative_timeout = -1;
	for (i = 0; i < iter; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0);
	igt_assert_eq(negative_timeout, -1); /* infinity always remains */
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);

	if (do_signals)
		igt_stop_signal_helper();
	drm_intel_bo_unreference(dst2);
	drm_intel_bo_unreference(dst);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);
}
Пример #9
0
static void wait_gpu(void)
{
    int fd = drm_open_driver(DRIVER_INTEL);
    gem_quiescent_gpu(fd);
    close(fd);
}