示例#1
0
static void test_with_two_bos(void)
{
	int fd1, fd2;
	uint32_t handle1, handle2, handle_import;
	int dma_buf_fd;

	counter = 0;

	fd1 = drm_open_driver(DRIVER_INTEL);
	fd2 = drm_open_driver(DRIVER_INTEL);

	handle1 = gem_create(fd1, BO_SIZE);
	handle2 = gem_create(fd1, BO_SIZE);

	dma_buf_fd = prime_handle_to_fd(fd1, handle1);
	handle_import = prime_fd_to_handle(fd2, dma_buf_fd);

	close(dma_buf_fd);
	gem_close(fd1, handle1);

	dma_buf_fd = prime_handle_to_fd(fd1, handle2);
	handle_import = prime_fd_to_handle(fd2, dma_buf_fd);
	check_bo(fd1, handle2, fd2, handle_import);

	gem_close(fd1, handle2);
	close(dma_buf_fd);

	check_bo(fd2, handle_import, fd2, handle_import);

	close(fd1);
	close(fd2);
}
static void run_test(int fd, unsigned ring, unsigned flags)
{
	const int gen = intel_gen(intel_get_drm_devid(fd));
	const uint32_t bbe = MI_BATCH_BUFFER_END;
	struct drm_i915_gem_exec_object2 obj[2];
	struct drm_i915_gem_relocation_entry reloc[1024];
	struct drm_i915_gem_execbuffer2 execbuf;
	struct igt_hang_ring hang;
	uint32_t *batch, *b;
	int i;

	gem_require_ring(fd, ring);
	igt_skip_on_f(gen == 6 && (ring & ~(3<<13)) == I915_EXEC_BSD,
		      "MI_STORE_DATA broken on gen6 bsd\n");

	gem_quiescent_gpu(fd);

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)obj;
	execbuf.buffer_count = 2;
	execbuf.flags = ring | (1 << 11);
	if (gen < 6)
		execbuf.flags |= I915_EXEC_SECURE;

	memset(obj, 0, sizeof(obj));
	obj[0].handle = gem_create(fd, 4096);
	obj[0].flags |= EXEC_OBJECT_WRITE;
	obj[1].handle = gem_create(fd, 1024*16 + 4096);
	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
	igt_require(__gem_execbuf(fd, &execbuf) == 0);

	obj[1].relocs_ptr = (uintptr_t)reloc;
	obj[1].relocation_count = 1024;

	batch = gem_mmap__cpu(fd, obj[1].handle, 0, 16*1024 + 4096,
			      PROT_WRITE | PROT_READ);
	gem_set_domain(fd, obj[1].handle,
		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

	memset(reloc, 0, sizeof(reloc));
	b = batch;
	for (i = 0; i < 1024; i++) {
		uint64_t offset;

		reloc[i].target_handle = obj[0].handle;
		reloc[i].presumed_offset = obj[0].offset;
		reloc[i].offset = (b - batch + 1) * sizeof(*batch);
		reloc[i].delta = i * sizeof(uint32_t);
		reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;

		offset = obj[0].offset + reloc[i].delta;
		*b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
		if (gen >= 8) {
			*b++ = offset;
			*b++ = offset >> 32;
		} else if (gen >= 4) {
示例#3
0
static void test_ring(int fd, unsigned ring, uint32_t flags)
{
	uint32_t bbe = MI_BATCH_BUFFER_END;
	uint32_t handle[3];
	uint32_t read, write;
	uint32_t active;
	unsigned i;

	gem_require_ring(fd, ring | flags);

	handle[TEST] = gem_create(fd, 4096);
	handle[BATCH] = gem_create(fd, 4096);
	gem_write(fd, handle[BATCH], 0, &bbe, sizeof(bbe));

	/* Create a long running batch which we can use to hog the GPU */
	handle[BUSY] = busy_blt(fd);

	/* Queue a batch after the busy, it should block and remain "busy" */
	igt_assert(exec_noop(fd, handle, ring | flags, false));
	igt_assert(still_busy(fd, handle[BUSY]));
	__gem_busy(fd, handle[TEST], &read, &write);
	igt_assert_eq(read, 1 << ring);
	igt_assert_eq(write, 0);

	/* Requeue with a write */
	igt_assert(exec_noop(fd, handle, ring | flags, true));
	igt_assert(still_busy(fd, handle[BUSY]));
	__gem_busy(fd, handle[TEST], &read, &write);
	igt_assert_eq(read, 1 << ring);
	igt_assert_eq(write, ring);

	/* Now queue it for a read across all available rings */
	active = 0;
	for (i = I915_EXEC_RENDER; i <= I915_EXEC_VEBOX; i++) {
		if (exec_noop(fd, handle, i | flags, false))
			active |= 1 << i;
	}
	igt_assert(still_busy(fd, handle[BUSY]));
	__gem_busy(fd, handle[TEST], &read, &write);
	igt_assert_eq(read, active);
	igt_assert_eq(write, ring); /* from the earlier write */

	/* Check that our long batch was long enough */
	igt_assert(still_busy(fd, handle[BUSY]));

	/* And make sure it becomes idle again */
	gem_sync(fd, handle[TEST]);
	__gem_busy(fd, handle[TEST], &read, &write);
	igt_assert_eq(read, 0);
	igt_assert_eq(write, 0);

	for (i = TEST; i <= BATCH; i++)
		gem_close(fd, handle[i]);
}
示例#4
0
static void run(data_t *data, int child)
{
	const int size = 4096 * (256 + child * child);
	const int tiling = child % 2;
	const int write = child % 2;
	uint32_t handle = gem_create(data->fd, size);
	uint32_t *ptr;
	uint32_t x;

	igt_assert(handle);

	if (tiling != I915_TILING_NONE)
		gem_set_tiling(data->fd, handle, tiling, 4096);

	/* load up the unfaulted bo */
	busy(data, handle, size, 100);

	/* Note that we ignore the API and rely on the implict
	 * set-to-gtt-domain within the fault handler.
	 */
	if (write) {
		ptr = gem_mmap__gtt(data->fd, handle, size,
				    PROT_READ | PROT_WRITE);
		ptr[rand() % (size / 4)] = canary;
	} else {
		ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ);
	}
	x = ptr[rand() % (size / 4)];
	munmap(ptr, size);

	igt_assert_eq_u32(x, canary);
}
示例#5
0
static void test_llseek_size(void)
{
	int fd, i;
	uint32_t handle;
	int dma_buf_fd;

	counter = 0;

	fd = drm_open_driver(DRIVER_INTEL);


	for (i = 0; i < 10; i++) {
		int bufsz = 4096 << i;

		handle = gem_create(fd, bufsz);
		dma_buf_fd = prime_handle_to_fd(fd, handle);

		gem_close(fd, handle);

		igt_assert(prime_get_size(dma_buf_fd) == bufsz);

		close(dma_buf_fd);
	}

	close(fd);
}
int main(int argc, char **argv)
{
	uint32_t batch[2] = {MI_BATCH_BUFFER_END};
	uint32_t handle;
	uint32_t devid;
	int fd;

	drmtest_subtest_init(argc, argv);

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, batch, sizeof(batch));

	if (drmtest_run_subtest("render"))
		loop(fd, handle, I915_EXEC_RENDER, "render");

	if (drmtest_run_subtest("bsd"))
		if (HAS_BSD_RING(devid))
			loop(fd, handle, I915_EXEC_BSD, "bsd");

	if (drmtest_run_subtest("blt"))
		if (HAS_BLT_RING(devid))
			loop(fd, handle, I915_EXEC_BLT, "blt");


	gem_close(fd, handle);

	close(fd);

	return skipped_all ? 77 : 0;
}
static void
test_short(int fd)
{
	struct drm_i915_gem_mmap_gtt mmap_arg;
	int pages, p;

	mmap_arg.handle = gem_create(fd, OBJECT_SIZE);
	igt_assert(mmap_arg.handle);

	igt_assert(drmIoctl(fd,
			    DRM_IOCTL_I915_GEM_MMAP_GTT,
			    &mmap_arg) == 0);
	for (pages = 1; pages <= OBJECT_SIZE / PAGE_SIZE; pages <<= 1) {
		uint8_t *r, *w;

		w = mmap64(0, pages * PAGE_SIZE, PROT_READ | PROT_WRITE,
			   MAP_SHARED, fd, mmap_arg.offset);
		igt_assert(w != MAP_FAILED);

		r = mmap64(0, pages * PAGE_SIZE, PROT_READ,
			   MAP_SHARED, fd, mmap_arg.offset);
		igt_assert(r != MAP_FAILED);

		for (p = 0; p < pages; p++) {
			w[p*PAGE_SIZE] = r[p*PAGE_SIZE];
			w[p*PAGE_SIZE+(PAGE_SIZE-1)] =
				r[p*PAGE_SIZE+(PAGE_SIZE-1)];
		}

		munmap(r, pages * PAGE_SIZE);
		munmap(w, pages * PAGE_SIZE);
	}
	gem_close(fd, mmap_arg.handle);
}
示例#8
0
static void
dontneed_after_mmap(void)
{
	int fd = drm_open_driver(DRIVER_INTEL);
	uint32_t handle;
	char *ptr;

	handle = gem_create(fd, OBJECT_SIZE);
	ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
	igt_assert(ptr);
	gem_madvise(fd, handle, I915_MADV_DONTNEED);
	close(fd);

	signal(SIGBUS, sigtrap);
	switch (setjmp(jmp)) {
	case SIGBUS:
		break;
	case 0:
		*ptr = 0;
	default:
		igt_assert(!"reached");
		break;
	}
	munmap(ptr, OBJECT_SIZE);
	signal(SIGBUS, SIG_DFL);
}
static void *thread(void *data)
{
	struct thread *t = data;
	uint32_t bbe = MI_BATCH_BUFFER_END;
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 obj;
	uint32_t *ctx;

	memset(&obj, 0, sizeof(obj));
	obj.handle = gem_create(t->fd, 4096);
	gem_write(t->fd, obj.handle, 0, &bbe, sizeof(bbe));

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffers_ptr = (uintptr_t)&obj;
	execbuf.buffer_count = 1;

	ctx = malloc(t->num_ctx * sizeof(uint32_t));
	igt_assert(ctx);
	memcpy(ctx, t->all_ctx, t->num_ctx * sizeof(uint32_t));
	igt_permute_array(ctx, t->num_ctx, xchg_int);

	for (unsigned n = 0; n < t->num_ctx; n++) {
		execbuf.rsvd1 = ctx[n];
		gem_execbuf(t->fd, &execbuf);
	}

	free(ctx);
	gem_close(t->fd, obj.handle);

	return NULL;
}
示例#10
0
static void
test_access(int fd)
{
	uint32_t handle, flink, handle2;
	struct drm_i915_gem_mmap_gtt mmap_arg;
	int fd2;

	handle = gem_create(fd, OBJECT_SIZE);
	igt_assert(handle);

	fd2 = drm_open_driver(DRIVER_INTEL);

	/* Check that fd1 can mmap. */
	mmap_arg.handle = handle;
	do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);

	igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE,
			  MAP_SHARED, fd, mmap_arg.offset));

	/* Check that the same offset on the other fd doesn't work. */
	igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE,
			  MAP_SHARED, fd2, mmap_arg.offset) == MAP_FAILED);
	igt_assert(errno == EACCES);

	flink = gem_flink(fd, handle);
	igt_assert(flink);
	handle2 = gem_open(fd2, flink);
	igt_assert(handle2);

	/* Recheck that it works after flink. */
	/* Check that the same offset on the other fd doesn't work. */
	igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE,
			  MAP_SHARED, fd2, mmap_arg.offset));
}
示例#11
0
static void test_llseek_bad(void)
{
	int fd;
	uint32_t handle;
	int dma_buf_fd;

	counter = 0;

	fd = drm_open_driver(DRIVER_INTEL);


	handle = gem_create(fd, BO_SIZE);
	dma_buf_fd = prime_handle_to_fd(fd, handle);

	gem_close(fd, handle);

	igt_require(lseek(dma_buf_fd, 0, SEEK_END) >= 0);

	igt_assert(lseek(dma_buf_fd, -1, SEEK_END) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, 1, SEEK_SET) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, BO_SIZE, SEEK_SET) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, BO_SIZE + 1, SEEK_SET) == -1 && errno == EINVAL);
	igt_assert(lseek(dma_buf_fd, BO_SIZE - 1, SEEK_SET) == -1 && errno == EINVAL);

	close(dma_buf_fd);

	close(fd);
}
示例#12
0
static void
test_read_write2(int fd, enum test_read_write order)
{
	uint32_t handle;
	void *r, *w;
	volatile uint32_t val = 0;

	handle = gem_create(fd, OBJECT_SIZE);

	r = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);

	w = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);

	if (order == READ_BEFORE_WRITE) {
		val = *(uint32_t *)r;
		*(uint32_t *)w = val;
	} else {
		*(uint32_t *)w = val;
		val = *(uint32_t *)r;
	}

	gem_close(fd, handle);
	munmap(r, OBJECT_SIZE);
	munmap(w, OBJECT_SIZE);
}
示例#13
0
static void *thread_fn_export_vs_close(void *p)
{
	struct drm_prime_handle prime_h2f;
	struct drm_gem_close close_bo;
	int fd = (uintptr_t)p;
	uint32_t handle;

	while (!pls_die) {
		/* We want to race gem close against prime export on handle one.*/
		handle = gem_create(fd, 4096);
		if (handle != 1)
			gem_close(fd, handle);

		/* raw ioctl since we expect this to fail */

		/* WTF: for gem_flink_race I've unconditionally used handle == 1
		 * here, but with prime it seems to help a _lot_ to use
		 * something more random. */
		prime_h2f.handle = 1;
		prime_h2f.flags = DRM_CLOEXEC;
		prime_h2f.fd = -1;

		ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &prime_h2f);

		close_bo.handle = 1;
		ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close_bo);

		close(prime_h2f.fd);
	}

	return (void *)0;
}
示例#14
0
static void test_with_one_bo_two_files(void)
{
	int fd1, fd2;
	uint32_t handle_import, handle_open, handle_orig, flink_name;
	int dma_buf_fd1, dma_buf_fd2;

	fd1 = drm_open_driver(DRIVER_INTEL);
	fd2 = drm_open_driver(DRIVER_INTEL);

	handle_orig = gem_create(fd1, BO_SIZE);
	dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig);

	flink_name = gem_flink(fd1, handle_orig);
	handle_open = gem_open(fd2, flink_name);

	dma_buf_fd2 = prime_handle_to_fd(fd2, handle_open);
	handle_import = prime_fd_to_handle(fd2, dma_buf_fd2);

	/* dma-buf selfimporting an flink bo should give the same handle */
	igt_assert_eq_u32(handle_import, handle_open);

	close(fd1);
	close(fd2);
	close(dma_buf_fd1);
	close(dma_buf_fd2);
}
示例#15
0
static uint32_t batch(int fd)
{
	const uint32_t buf[] = {MI_BATCH_BUFFER_END};
	uint32_t handle = gem_create(fd, 4096);
	gem_write(fd, handle, 0, buf, sizeof(buf));
	return handle;
}
示例#16
0
static void draw_rect_render(int fd, struct cmd_data *cmd_data,
			     struct buf_data *buf, struct rect *rect,
			     uint32_t color)
{
	drm_intel_bo *src, *dst;
	uint32_t devid = intel_get_drm_devid(fd);
	igt_render_copyfunc_t rendercopy = igt_get_render_copyfunc(devid);
	struct igt_buf src_buf, dst_buf;
	struct intel_batchbuffer *batch;
	uint32_t tiling, swizzle;
	struct buf_data tmp;
	int pixel_size = buf->bpp / 8;
	unsigned adjusted_w, adjusted_dst_x;

	igt_skip_on(!rendercopy);

	/* Rendercopy works at 32bpp, so if you try to do copies on buffers with
	 * smaller bpps you won't succeeed if you need to copy "half" of a 32bpp
	 * pixel or something similar. */
	igt_skip_on(rect->x % (32 / buf->bpp) != 0 ||
		    rect->y % (32 / buf->bpp) != 0 ||
		    rect->w % (32 / buf->bpp) != 0 ||
		    rect->h % (32 / buf->bpp) != 0);

	gem_get_tiling(fd, buf->handle, &tiling, &swizzle);

	/* We create a temporary buffer and copy from it using rendercopy. */
	tmp.size = rect->w * rect->h * pixel_size;
	tmp.handle = gem_create(fd, tmp.size);
	tmp.stride = rect->w * pixel_size;
	tmp.bpp = buf->bpp;
	draw_rect_mmap_cpu(fd, &tmp, &(struct rect){0, 0, rect->w, rect->h},
static void performance(void)
{
	int n, loop, count;
	int fd, num_fences;
	double linear[2], tiled[2];

	fd = drm_open_any();

	num_fences = gem_available_fences(fd);
	igt_require(num_fences > 0);

	for (count = 2; count < 4*num_fences; count *= 2) {
		struct timeval start, end;
		uint32_t handle[count];
		void *ptr[count];

		for (n = 0; n < count; n++) {
			handle[n] = gem_create(fd, OBJECT_SIZE);
			ptr[n] = gem_mmap(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
			igt_assert(ptr[n]);
		}

		gettimeofday(&start, NULL);
		for (loop = 0; loop < 1024; loop++) {
			for (n = 0; n < count; n++)
				memset(ptr[n], 0, OBJECT_SIZE);
		}
		gettimeofday(&end, NULL);

		linear[count != 2] = count * loop / elapsed(&start, &end);
		igt_info("Upload rate for %d linear surfaces:	%7.3fMiB/s\n", count, linear[count != 2]);

		for (n = 0; n < count; n++)
			gem_set_tiling(fd, handle[n], I915_TILING_X, 1024);

		gettimeofday(&start, NULL);
		for (loop = 0; loop < 1024; loop++) {
			for (n = 0; n < count; n++)
				memset(ptr[n], 0, OBJECT_SIZE);
		}
		gettimeofday(&end, NULL);

		tiled[count != 2] = count * loop / elapsed(&start, &end);
		igt_info("Upload rate for %d tiled surfaces:	%7.3fMiB/s\n", count, tiled[count != 2]);

		for (n = 0; n < count; n++) {
			munmap(ptr[n], OBJECT_SIZE);
			gem_close(fd, handle[n]);
		}

	}

	errno = 0;
	igt_assert(linear[1] > 0.75 * linear[0]);
	igt_assert(tiled[1] > 0.75 * tiled[0]);
}
示例#18
0
static void
test_create_fd_close(int fd)
{
	igt_info("Testing closing with an object allocated.\n");

	gem_create(fd, 16*1024);
	/* leak it */

	close(fd);
}
示例#19
0
static void test_reimport_close_race(void)
{
	pthread_t *threads;
	int r, i, num_threads;
	int fds[2];
	int obj_count;
	void *status;
	uint32_t handle;
	int fake;

	/* Allocate exit handler fds in here so that we dont screw
	 * up the counts */
	fake = drm_open_driver(DRIVER_INTEL);

	gem_quiescent_gpu(fake);
	obj_count = get_object_count();

	num_threads = sysconf(_SC_NPROCESSORS_ONLN);

	threads = calloc(num_threads, sizeof(pthread_t));

	fds[0] = drm_open_driver(DRIVER_INTEL);

	handle = gem_create(fds[0], BO_SIZE);

	fds[1] = prime_handle_to_fd(fds[0], handle);

	for (i = 0; i < num_threads; i++) {
		r = pthread_create(&threads[i], NULL,
				   thread_fn_reimport_vs_close,
				   (void *)(uintptr_t)fds);
		igt_assert_eq(r, 0);
	}

	sleep(5);

	pls_die = 1;

	for (i = 0;  i < num_threads; i++) {
		pthread_join(threads[i], &status);
		igt_assert(status == 0);
	}

	close(fds[0]);
	close(fds[1]);

	gem_quiescent_gpu(fake);
	obj_count = get_object_count() - obj_count;

	igt_info("leaked %i objects\n", obj_count);

	close(fake);

	igt_assert_eq(obj_count, 0);
}
static void wc_contention(void)
{
	const int loops = 4096;
	int n, count;
	int fd, num_fences;
	double linear[2], tiled[2];

	fd = drm_open_any();
	gem_require_mmap_wc(fd);

	num_fences = gem_available_fences(fd);
	igt_require(num_fences > 0);

	for (count = 1; count < 4*num_fences; count *= 2) {
		struct timeval start, end;
		struct thread_contention threads[count];

		for (n = 0; n < count; n++) {
			threads[n].handle = gem_create(fd, OBJECT_SIZE);
			threads[n].loops = loops;
			threads[n].fd = fd;
		}

		gettimeofday(&start, NULL);
		for (n = 0; n < count; n++)
			pthread_create(&threads[n].thread, NULL, wc_mmap, &threads[n]);
		for (n = 0; n < count; n++)
			pthread_join(threads[n].thread, NULL);
		gettimeofday(&end, NULL);

		linear[count != 2] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
		igt_info("Contended upload rate for %d linear threads/wc:	%7.3fMiB/s\n", count, linear[count != 2]);

		for (n = 0; n < count; n++)
			gem_set_tiling(fd, threads[n].handle, I915_TILING_X, 1024);

		gettimeofday(&start, NULL);
		for (n = 0; n < count; n++)
			pthread_create(&threads[n].thread, NULL, wc_mmap, &threads[n]);
		for (n = 0; n < count; n++)
			pthread_join(threads[n].thread, NULL);
		gettimeofday(&end, NULL);

		tiled[count != 2] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
		igt_info("Contended upload rate for %d tiled threads/wc:	%7.3fMiB/s\n", count, tiled[count != 2]);

		for (n = 0; n < count; n++) {
			gem_close(fd, threads[n].handle);
		}
	}

	errno = 0;
	igt_assert(linear[1] > 0.75 * linear[0]);
	igt_assert(tiled[1] > 0.75 * tiled[0]);
}
示例#21
0
static void
test_create_close(int fd)
{
	uint32_t handle;

	igt_info("Testing creating and closing an object.\n");

	handle = gem_create(fd, 16*1024);

	gem_close(fd, handle);
}
static uint32_t
tiled_bo_create (int fd)
{
	uint32_t handle;

	handle = gem_create(fd, OBJECT_SIZE);

	gem_set_tiling(fd, handle, I915_TILING_X, WIDTH*4);

	return handle;
}
示例#23
0
/*
 * Creating an object with non-aligned size and trying to access it with an
 * offset, which is greater than the requested size but smaller than the
 * object's last page boundary. pwrite here must be successful.
 */
static void valid_nonaligned_size(int fd)
{
	int handle;
	char buf[PAGE_SIZE];

	handle = gem_create(fd, PAGE_SIZE / 2);

	gem_write(fd, handle, PAGE_SIZE / 2, buf, PAGE_SIZE / 2);

	gem_close(fd, handle);
}
示例#24
0
static void
test_huge_bo(int huge, int tiling)
{
	uint64_t huge_object_size, last_offset;
	char *ptr_cpu;
	char *cpu_pattern;
	uint32_t bo;
	int i;

	switch (huge) {
	case -1:
		huge_object_size = gem_mappable_aperture_size() / 2;
		break;
	case 0:
		huge_object_size = gem_mappable_aperture_size() + PAGE_SIZE;
		break;
	default:
		huge_object_size = gem_aperture_size(fd) + PAGE_SIZE;
		break;
	}
	intel_require_memory(1, huge_object_size, CHECK_RAM);

	last_offset = huge_object_size - PAGE_SIZE;

	cpu_pattern = malloc(PAGE_SIZE);
	igt_assert(cpu_pattern);
	for (i = 0; i < PAGE_SIZE; i++)
		cpu_pattern[i] = i;

	bo = gem_create(fd, huge_object_size);

	/* Obtain CPU mapping for the object. */
	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
				PROT_READ | PROT_WRITE);
	igt_assert(ptr_cpu);
	gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
	gem_close(fd, bo);

	/* Write first page through the mapping and assert reading it back
	 * works. */
	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);

	/* Write last page through the mapping and assert reading it back
	 * works. */
	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);

	/* Cross check that accessing two simultaneous pages works. */
	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);

	munmap(ptr_cpu, huge_object_size);
	free(cpu_pattern);
}
示例#25
0
static int test_can_pin(int fd)
{
    struct drm_i915_gem_pin pin;
    int ret;

    pin.handle = gem_create(fd, 4096);;
    pin.alignment = 0;
    ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_PIN, &pin);
    gem_close(fd, pin.handle);

    return ret == 0;;
}
static uint32_t
batch_create (int fd)
{
	uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 };
	uint32_t batch_handle;

	batch_handle = gem_create(fd, BATCH_SIZE);

	gem_write(fd, batch_handle, 0, buf, sizeof(buf));

	return batch_handle;
}
static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
{
	uint32_t handle, handle_new;
	uint64_t gtt_offset, gtt_offset_new;
	uint32_t *batch_ptr, *batch_ptr_old;
	unsigned split;
	char buf[100];
	int i;

	gem_require_ring(fd, ring_id);

	sprintf(buf, "testing %s cs tlb coherency: ", ring_name);

	/* Shut up gcc, too stupid. */
	batch_ptr_old = NULL;
	handle = 0;
	gtt_offset = 0;

	for (split = 0; split < BATCH_SIZE/8 - 1; split += 2) {
		igt_progress(buf, split, BATCH_SIZE/8 - 1);

		handle_new = gem_create(fd, BATCH_SIZE);
		batch_ptr = gem_mmap__cpu(fd, handle_new, 0, BATCH_SIZE,
					  PROT_READ | PROT_WRITE);
		batch_ptr[split*2] = MI_BATCH_BUFFER_END;

		for (i = split*2 + 2; i < BATCH_SIZE/8; i++)
			batch_ptr[i] = 0xffffffff;

		if (split > 0) {
			gem_sync(fd, handle);
			gem_close(fd, handle);
		}

		igt_assert_eq(exec(fd, handle_new, split, &gtt_offset_new, 0),
			      0);

		if (split > 0) {
			/* Check that we've managed to collide in the tlb. */
			igt_assert(gtt_offset == gtt_offset_new);

			/* We hang onto the storage of the old batch by keeping
			 * the cpu mmap around. */
			munmap(batch_ptr_old, BATCH_SIZE);
		}

		handle = handle_new;
		gtt_offset = gtt_offset_new;
		batch_ptr_old = batch_ptr;
	}

}
static int prepare_primary_surface(int fd, int prim_width, int prim_height,
				   uint32_t *prim_handle, uint32_t *prim_stride,
				   uint32_t *prim_size, int tiled)
{
	uint32_t                        bytes_per_pixel = sizeof(uint32_t);
	uint32_t                        *prim_fb_ptr;

	if (bytes_per_pixel != sizeof(uint32_t)) {
		printf("Bad bytes_per_pixel for primary surface: %d\n",
			bytes_per_pixel);
		return -EINVAL;
	}

	if (tiled) {
		int                         v;

		/* Round the tiling up to the next power-of-two and the
		 * region up to the next pot fence size so that this works
		 * on all generations.
		 *
		 * This can still fail if the framebuffer is too large to
		 * be tiled. But then that failure is expected.
		 */

		v = prim_width * bytes_per_pixel;
		for (*prim_stride = 512; *prim_stride < v; *prim_stride *= 2)
			;

		v = *prim_stride * prim_height;
		for (*prim_size = 1024*1024; *prim_size < v; *prim_size *= 2)
			;
	} else {
		/* Scan-out has a 64 byte alignment restriction */
		*prim_stride = (prim_width * bytes_per_pixel + 63) & ~63;
		*prim_size = *prim_stride * prim_height;
	}

	*prim_handle = gem_create(fd, *prim_size);

	if (tiled)
		gem_set_tiling(fd, *prim_handle, I915_TILING_X, *prim_stride);

	prim_fb_ptr = gem_mmap(fd, *prim_handle, *prim_size, PROT_READ | PROT_WRITE);

	if (prim_fb_ptr != NULL) {
		// Write primary surface with gray background
		memset(prim_fb_ptr, 0x3f, *prim_size);
		munmap(prim_fb_ptr, *prim_size);
	}

	return 0;
}
示例#29
0
static void *
create_pointer(int fd)
{
	uint32_t handle;
	void *ptr;

	handle = gem_create(fd, OBJECT_SIZE);

	ptr = mmap_bo(fd, handle);

	gem_close(fd, handle);

	return ptr;
}
示例#30
0
static void
dontneed_before_mmap(void)
{
	int fd = drm_open_driver(DRIVER_INTEL);
	uint32_t handle;
	char *ptr;

	handle = gem_create(fd, OBJECT_SIZE);
	gem_madvise(fd, handle, I915_MADV_DONTNEED);
	ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
	igt_assert(ptr == NULL);
	igt_assert(errno == EFAULT);
	close(fd);
}