Ejemplo n.º 1
0
int main(int argc, char **argv)
{
	for (int i = 0; i < 1000; i++) {
		igt_fork_signal_helper();
		igt_stop_signal_helper();
	}
}
Ejemplo n.º 2
0
static void run_test(int fd, int num_fences, int expected_errno,
		     unsigned flags)
{
	struct drm_i915_gem_execbuffer2 execbuf[2];
	struct drm_i915_gem_exec_object2 exec[2][2*MAX_FENCES+3];
	struct drm_i915_gem_relocation_entry reloc[2*MAX_FENCES+2];

	int i, n;
	int loop = 1000;

	if (flags & BUSY_LOAD) {
		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
		batch = intel_batchbuffer_alloc(bufmgr, devid);

		/* Takes forever otherwise. */
		loop = 50;
	}

	if (flags & INTERRUPTIBLE)
		igt_fork_signal_helper();

	memset(execbuf, 0, sizeof(execbuf));
	memset(exec, 0, sizeof(exec));
	memset(reloc, 0, sizeof(reloc));

	for (n = 0; n < 2*num_fences; n++) {
		uint32_t handle = tiled_bo_create(fd);
		exec[1][2*num_fences - n-1].handle = exec[0][n].handle = handle;
		fill_reloc(&reloc[n], handle);
	}

	for (i = 0; i < 2; i++) {
		for (n = 0; n < num_fences; n++)
			exec[i][n].flags = EXEC_OBJECT_NEEDS_FENCE;

		exec[i][2*num_fences].handle = batch_create(fd);
		exec[i][2*num_fences].relocs_ptr = (uintptr_t)reloc;
		exec[i][2*num_fences].relocation_count = 2*num_fences;

		execbuf[i].buffers_ptr = (uintptr_t)exec[i];
		execbuf[i].buffer_count = 2*num_fences+1;
		execbuf[i].batch_len = 2*sizeof(uint32_t);
	}

	do {
		if (flags & BUSY_LOAD)
			emit_dummy_load();

		igt_assert_eq(__gem_execbuf(fd, &execbuf[0]), expected_errno);
		igt_assert_eq(__gem_execbuf(fd, &execbuf[1]), expected_errno);
	} while (--loop);

	if (flags & INTERRUPTIBLE)
		igt_stop_signal_helper();

	/* Cleanup */
	for (n = 0; n < 2*num_fences; n++)
		gem_close(fd, exec[0][n].handle);

	for (i = 0; i < 2; i++)
		gem_close(fd, exec[i][2*num_fences].handle);

	if (flags & BUSY_LOAD) {
		intel_batchbuffer_free(batch);
		drm_intel_bufmgr_destroy(bufmgr);
	}
}
Ejemplo n.º 3
0
static void render_timeout(int fd)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	int64_t negative_timeout = -1;
	int ret;
	const bool do_signals = true; /* signals will seem to make the operation
				       * use less process CPU time */
	bool done = false;
	int i, iter = 1;

	igt_skip_on_simulation();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096);
	dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096);

	igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL,
		      "kernel doesn't support wait_timeout, skipping test\n");
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;

	/* Figure out a rough number of fills required to consume 1 second of
	 * GPU work.
	 */
	do {
		struct timespec start, end;
		long diff;

#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif

		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0);
		for (i = 0; i < iter; i++)
			blt_color_fill(batch, dst, BUF_PAGES);
		intel_batchbuffer_flush(batch);
		drm_intel_bo_wait_rendering(dst);
		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0);

		diff = do_time_diff(&end, &start);
		igt_assert(diff >= 0);

		if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS)
			done = true;
		else
			iter <<= 1;
	} while (!done && iter < 1000000);

	igt_assert_lt(iter, 1000000);

	igt_info("%d iters is enough work\n", iter);
	gem_quiescent_gpu(fd);
	if (do_signals)
		igt_fork_signal_helper();

	/* We should be able to do half as much work in the same amount of time,
	 * but because we might schedule almost twice as much as required, we
	 * might accidentally time out. Hence add some fudge. */
	for (i = 0; i < iter/3; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);
	igt_assert_neq(timeout, 0);
	if (timeout ==  (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC))
		igt_info("Buffer was already done!\n");
	else {
		igt_info("Finished with %" PRIu64 " time remaining\n", timeout);
	}

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert_eq(timeout, 0);

	/* Now check that we correctly time out, twice the auto-tune load should
	 * be good enough. */
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	for (i = 0; i < iter*2; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout);
	igt_assert_eq(ret, -ETIME);
	igt_assert_eq(timeout, 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME);
	igt_assert_eq(timeout, 0);


	/* Now check that we can pass negative (infinite) timeouts. */
	negative_timeout = -1;
	for (i = 0; i < iter; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0);
	igt_assert_eq(negative_timeout, -1); /* infinity always remains */
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);

	if (do_signals)
		igt_stop_signal_helper();
	drm_intel_bo_unreference(dst2);
	drm_intel_bo_unreference(dst);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);
}