コード例 #1
0
static void
processes(void)
{
	int *all_fds;
	uint64_t aperture;
	struct rlimit rlim;
	int ppgtt_mode;
	int ctx_size;
	int obj_size;
	int n;

	igt_skip_on_simulation();

	fd = drm_open_driver_render(DRIVER_INTEL);
	devid = intel_get_drm_devid(fd);
	aperture = gem_aperture_size(fd);

	ppgtt_mode = uses_ppgtt(fd);
	igt_require(ppgtt_mode);

	render_copy = igt_get_render_copyfunc(devid);
	igt_require_f(render_copy, "no render-copy function\n");

	if (ppgtt_mode > 1)
		ctx_size = aperture >> 10; /* Assume full-ppgtt of maximum size */
	else
コード例 #2
0
ファイル: igt_debugfs.c プロジェクト: joshloo/intel-gpu-tools
/**
 * igt_require_pipe_crc:
 *
 * Convenience helper to check whether pipe CRC capturing is supported by the
 * kernel. Uses igt_skip to automatically skip the test/subtest if this isn't
 * the case.
 */
void igt_require_pipe_crc(void)
{
	const char *cmd = "pipe A none";
	FILE *ctl;
	size_t written;
	int ret;

	ctl = igt_debugfs_fopen("i915_display_crc_ctl", "r+");
	igt_require_f(ctl,
		      "No display_crc_ctl found, kernel too old\n");
	written = fwrite(cmd, 1, strlen(cmd), ctl);
	ret = fflush(ctl);
	igt_require_f((written == strlen(cmd) && ret == 0) || errno != ENODEV,
		      "CRCs not supported on this platform\n");

	fclose(ctl);
}
コード例 #3
0
static void setup_idle(void)
{
	uint64_t tsc, pc[NUM_PC_STATES], res, best_res;
	int pc_i, best_pc_i = 0, retries, consecutive_not_best;

	for (retries = 0; ; retries++) {

		set_alarm(opts.res_warm_time, 0);
		while (!alarm_received)
			pause();

		set_alarm(opts.res_calc_time, 0);

		tsc = msr_read(IA32_TIME_STAMP_COUNTER);
		for (pc_i = best_pc_i; pc_i < NUM_PC_STATES; pc_i++)
			pc[pc_i] = msr_read(res_msr_addrs[pc_i]);

		while (!alarm_received)
			pause();

		for (pc_i = best_pc_i; pc_i < NUM_PC_STATES; pc_i++)
			pc[pc_i] = msr_read(res_msr_addrs[pc_i]) - pc[pc_i];
		tsc = msr_read(IA32_TIME_STAMP_COUNTER) - tsc;

		for (pc_i = NUM_PC_STATES -1; pc_i >= best_pc_i; pc_i--)
			if (pc[pc_i] != 0)
				break;
		igt_require_f(pc_i >= 0, "We're not reaching any PC states!\n");

		res = pc[pc_i] * 100 / tsc;

		if (retries == 0 || pc_i > best_pc_i || res > best_res) {
			best_pc_i = pc_i;
			best_res = res;
			consecutive_not_best = 0;
		} else {
			consecutive_not_best++;
			if (consecutive_not_best > 2)
				break;
		}
	}

	deepest_pc_state = res_msr_addrs[best_pc_i];
	idle_res = best_res;

	printf("Stable idle residency retries:\t%d\n", retries);
	printf("Deepest PC state reached when idle:\t%s\n",
	       res_msr_names[best_pc_i]);
	printf("Idle residency for this state:\t%02"PRIu64"%%\n", idle_res);
}
コード例 #4
0
static void
test_huge_bo(int fd, int huge, int tiling)
{
	uint32_t bo;
	char *ptr;
	char *tiled_pattern;
	char *linear_pattern;
	uint64_t size, last_offset;
	int pitch = tiling == I915_TILING_Y ? 128 : 512;
	int i;

	switch (huge) {
	case -1:
		size = gem_mappable_aperture_size() / 2;
		break;
	case 0:
		size = gem_mappable_aperture_size() + PAGE_SIZE;
		break;
	default:
		size = gem_aperture_size(fd) + PAGE_SIZE;
		break;
	}
	intel_require_memory(1, size, CHECK_RAM);

	last_offset = size - PAGE_SIZE;

	/* Create pattern */
	bo = gem_create(fd, PAGE_SIZE);
	if (tiling)
		gem_set_tiling(fd, bo, tiling, pitch);
	linear_pattern = gem_mmap__gtt(fd, bo, PAGE_SIZE,
				       PROT_READ | PROT_WRITE);
	for (i = 0; i < PAGE_SIZE; i++)
		linear_pattern[i] = i;
	tiled_pattern = gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ);

	gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT, 0);
	gem_close(fd, bo);

	bo = gem_create(fd, size);
	if (tiling)
		gem_set_tiling(fd, bo, tiling, pitch);

	/* Initialise first/last page through CPU mmap */
	ptr = gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE);
	memcpy(ptr, tiled_pattern, PAGE_SIZE);
	memcpy(ptr + last_offset, tiled_pattern, PAGE_SIZE);
	munmap(ptr, size);

	/* Obtain mapping for the object through GTT. */
	ptr = __gem_mmap__gtt(fd, bo, size, PROT_READ | PROT_WRITE);
	igt_require_f(ptr, "Huge BO GTT mapping not supported.\n");

	set_domain_gtt(fd, bo);

	/* Access through GTT should still provide the CPU written values. */
	igt_assert(memcmp(ptr              , linear_pattern, PAGE_SIZE) == 0);
	igt_assert(memcmp(ptr + last_offset, linear_pattern, PAGE_SIZE) == 0);

	gem_set_tiling(fd, bo, I915_TILING_NONE, 0);

	igt_assert(memcmp(ptr              , tiled_pattern, PAGE_SIZE) == 0);
	igt_assert(memcmp(ptr + last_offset, tiled_pattern, PAGE_SIZE) == 0);

	munmap(ptr, size);

	gem_close(fd, bo);
	munmap(tiled_pattern, PAGE_SIZE);
	munmap(linear_pattern, PAGE_SIZE);
}
コード例 #5
0
int main(int argc, char **argv)
{
	data_t data = {0, };
	struct intel_batchbuffer *batch = NULL;
	struct igt_buf src, dst;
	igt_render_copyfunc_t render_copy = NULL;
	int opt_dump_aub = igt_aub_dump_enabled();

	igt_simple_init_parse_opts(&argc, argv, "da", NULL, NULL,
				   opt_handler, NULL);

	igt_fixture {
		data.drm_fd = drm_open_any_render();
		data.devid = intel_get_drm_devid(data.drm_fd);

		data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
		igt_assert(data.bufmgr);

		render_copy = igt_get_render_copyfunc(data.devid);
		igt_require_f(render_copy,
			      "no render-copy function\n");

		batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
		igt_assert(batch);
	}

	scratch_buf_init(&data, &src, WIDTH, HEIGHT, STRIDE, SRC_COLOR);
	scratch_buf_init(&data, &dst, WIDTH, HEIGHT, STRIDE, DST_COLOR);

	scratch_buf_check(&data, &src, WIDTH / 2, HEIGHT / 2, SRC_COLOR);
	scratch_buf_check(&data, &dst, WIDTH / 2, HEIGHT / 2, DST_COLOR);

	if (opt_dump_png) {
		scratch_buf_write_to_png(&src, "source.png");
		scratch_buf_write_to_png(&dst, "destination.png");
	}

	if (opt_dump_aub) {
		drm_intel_bufmgr_gem_set_aub_filename(data.bufmgr,
						      "rendercopy.aub");
		drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, true);
	}

	/* This will copy the src to the mid point of the dst buffer. Presumably
	 * the out of bounds accesses will get clipped.
	 * Resulting buffer should look like:
	 *	  _______
	 *	 |dst|dst|
	 *	 |dst|src|
	 *	  -------
	 */
	render_copy(batch, NULL,
		    &src, 0, 0, WIDTH, HEIGHT,
		    &dst, WIDTH / 2, HEIGHT / 2);

	if (opt_dump_png)
		scratch_buf_write_to_png(&dst, "result.png");

	if (opt_dump_aub) {
		drm_intel_gem_bo_aub_dump_bmp(dst.bo,
			0, 0, WIDTH, HEIGHT,
			AUB_DUMP_BMP_FORMAT_ARGB_8888,
			STRIDE, 0);
		drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, false);
	} else if (check_all_pixels) {
		uint32_t val;
		int i, j;
		gem_read(data.drm_fd, dst.bo->handle, 0,
			 data.linear, sizeof(data.linear));
		for (i = 0; i < WIDTH; i++) {
			for (j = 0; j < HEIGHT; j++) {
				uint32_t color = DST_COLOR;
				val = data.linear[j * WIDTH + i];
				if (j >= HEIGHT/2 && i >= WIDTH/2)
					color = SRC_COLOR;

				igt_assert_f(val == color,
					     "Expected 0x%08x, found 0x%08x at (%d,%d)\n",
					     color, val, i, j);
			}
		}
	} else {
		scratch_buf_check(&data, &dst, 10, 10, DST_COLOR);
		scratch_buf_check(&data, &dst, WIDTH - 10, HEIGHT - 10, SRC_COLOR);
	}

	igt_exit();
}