static void test_with_one_bo_two_files(void) { int fd1, fd2; uint32_t handle_import, handle_open, handle_orig, flink_name; int dma_buf_fd1, dma_buf_fd2; fd1 = drm_open_driver(DRIVER_INTEL); fd2 = drm_open_driver(DRIVER_INTEL); handle_orig = gem_create(fd1, BO_SIZE); dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig); flink_name = gem_flink(fd1, handle_orig); handle_open = gem_open(fd2, flink_name); dma_buf_fd2 = prime_handle_to_fd(fd2, handle_open); handle_import = prime_fd_to_handle(fd2, dma_buf_fd2); /* dma-buf selfimporting an flink bo should give the same handle */ igt_assert_eq_u32(handle_import, handle_open); close(fd1); close(fd2); close(dma_buf_fd1); close(dma_buf_fd2); }
static void test_access(int fd) { uint32_t handle, flink, handle2; struct drm_i915_gem_mmap_gtt mmap_arg; int fd2; handle = gem_create(fd, OBJECT_SIZE); igt_assert(handle); fd2 = drm_open_driver(DRIVER_INTEL); /* Check that fd1 can mmap. */ mmap_arg.handle = handle; do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_arg.offset)); /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset) == MAP_FAILED); igt_assert(errno == EACCES); flink = gem_flink(fd, handle); igt_assert(flink); handle2 = gem_open(fd2, flink); igt_assert(handle2); /* Recheck that it works after flink. */ /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset)); }
static void processes(void) { const struct intel_execution_engine *e; unsigned engines[16]; int num_engines; struct rlimit rlim; unsigned num_ctx; uint32_t name; int fd, *fds; fd = drm_open_driver(DRIVER_INTEL); num_ctx = get_num_contexts(fd); num_engines = 0; for (e = intel_execution_engines; e->name; e++) { if (e->exec_id == 0) continue; if (!has_engine(fd, e)) continue; if (e->exec_id == I915_EXEC_BSD) { int is_bsd2 = e->flags != 0; if (gem_has_bsd2(fd) != is_bsd2) continue; } engines[num_engines++] = e->exec_id | e->flags; if (num_engines == ARRAY_SIZE(engines)) break; } /* tweak rlimits to allow us to create this many files */ igt_assert(getrlimit(RLIMIT_NOFILE, &rlim) == 0); if (rlim.rlim_cur < ALIGN(num_ctx + 1024, 1024)) { rlim.rlim_cur = ALIGN(num_ctx + 1024, 1024); if (rlim.rlim_cur > rlim.rlim_max) rlim.rlim_max = rlim.rlim_cur; igt_assert(setrlimit(RLIMIT_NOFILE, &rlim) == 0); } fds = malloc(num_ctx * sizeof(int)); igt_assert(fds); for (unsigned n = 0; n < num_ctx; n++) { fds[n] = drm_open_driver(DRIVER_INTEL); if (fds[n] == -1) { int err = errno; for (unsigned i = n; i--; ) close(fds[i]); free(fds); errno = err; igt_assert_f(0, "failed to create context %lld/%lld\n", (long long)n, (long long)num_ctx); } } if (1) { uint32_t bbe = MI_BATCH_BUFFER_END; name = gem_create(fd, 4096); gem_write(fd, name, 0, &bbe, sizeof(bbe)); name = gem_flink(fd, name); } igt_fork(child, NUM_THREADS) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; memset(&obj, 0, sizeof(obj)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&obj; execbuf.buffer_count = 1; igt_permute_array(fds, num_ctx, xchg_int); for (unsigned n = 0; n < num_ctx; n++) { obj.handle = gem_open(fds[n], name); execbuf.flags = engines[n % num_engines]; gem_execbuf(fds[n], &execbuf); gem_close(fds[n], obj.handle); } } igt_waitchildren(); for (unsigned n = 0; n < num_ctx; n++) close(fds[n]); free(fds); close(fd); }