static void test_with_fd_dup(void) { int fd1, fd2; uint32_t handle, handle_import; int dma_buf_fd1, dma_buf_fd2; counter = 0; fd1 = drm_open_driver(DRIVER_INTEL); fd2 = drm_open_driver(DRIVER_INTEL); handle = gem_create(fd1, BO_SIZE); dma_buf_fd1 = prime_handle_to_fd(fd1, handle); gem_close(fd1, handle); dma_buf_fd2 = dup(dma_buf_fd1); close(dma_buf_fd1); handle_import = prime_fd_to_handle(fd2, dma_buf_fd2); check_bo(fd2, handle_import, fd2, handle_import); close(dma_buf_fd2); check_bo(fd2, handle_import, fd2, handle_import); close(fd1); close(fd2); }
static void test_with_one_bo_two_files(void) { int fd1, fd2; uint32_t handle_import, handle_open, handle_orig, flink_name; int dma_buf_fd1, dma_buf_fd2; fd1 = drm_open_driver(DRIVER_INTEL); fd2 = drm_open_driver(DRIVER_INTEL); handle_orig = gem_create(fd1, BO_SIZE); dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig); flink_name = gem_flink(fd1, handle_orig); handle_open = gem_open(fd2, flink_name); dma_buf_fd2 = prime_handle_to_fd(fd2, handle_open); handle_import = prime_fd_to_handle(fd2, dma_buf_fd2); /* dma-buf selfimporting an flink bo should give the same handle */ igt_assert_eq_u32(handle_import, handle_open); close(fd1); close(fd2); close(dma_buf_fd1); close(dma_buf_fd2); }
static void test_reimport_close_race(void) { pthread_t *threads; int r, i, num_threads; int fds[2]; int obj_count; void *status; uint32_t handle; int fake; /* Allocate exit handler fds in here so that we dont screw * up the counts */ fake = drm_open_driver(DRIVER_INTEL); gem_quiescent_gpu(fake); obj_count = get_object_count(); num_threads = sysconf(_SC_NPROCESSORS_ONLN); threads = calloc(num_threads, sizeof(pthread_t)); fds[0] = drm_open_driver(DRIVER_INTEL); handle = gem_create(fds[0], BO_SIZE); fds[1] = prime_handle_to_fd(fds[0], handle); for (i = 0; i < num_threads; i++) { r = pthread_create(&threads[i], NULL, thread_fn_reimport_vs_close, (void *)(uintptr_t)fds); igt_assert_eq(r, 0); } sleep(5); pls_die = 1; for (i = 0; i < num_threads; i++) { pthread_join(threads[i], &status); igt_assert(status == 0); } close(fds[0]); close(fds[1]); gem_quiescent_gpu(fake); obj_count = get_object_count() - obj_count; igt_info("leaked %i objects\n", obj_count); close(fake); igt_assert_eq(obj_count, 0); }
static void test_llseek_size(void) { int fd, i; uint32_t handle; int dma_buf_fd; counter = 0; fd = drm_open_driver(DRIVER_INTEL); for (i = 0; i < 10; i++) { int bufsz = 4096 << i; handle = gem_create(fd, bufsz); dma_buf_fd = prime_handle_to_fd(fd, handle); gem_close(fd, handle); igt_assert(prime_get_size(dma_buf_fd) == bufsz); close(dma_buf_fd); } close(fd); }
static void test_llseek_bad(void) { int fd; uint32_t handle; int dma_buf_fd; counter = 0; fd = drm_open_driver(DRIVER_INTEL); handle = gem_create(fd, BO_SIZE); dma_buf_fd = prime_handle_to_fd(fd, handle); gem_close(fd, handle); igt_require(lseek(dma_buf_fd, 0, SEEK_END) >= 0); igt_assert(lseek(dma_buf_fd, -1, SEEK_END) == -1 && errno == EINVAL); igt_assert(lseek(dma_buf_fd, 1, SEEK_SET) == -1 && errno == EINVAL); igt_assert(lseek(dma_buf_fd, BO_SIZE, SEEK_SET) == -1 && errno == EINVAL); igt_assert(lseek(dma_buf_fd, BO_SIZE + 1, SEEK_SET) == -1 && errno == EINVAL); igt_assert(lseek(dma_buf_fd, BO_SIZE - 1, SEEK_SET) == -1 && errno == EINVAL); close(dma_buf_fd); close(fd); }
static void dontneed_after_mmap(void) { int fd = drm_open_driver(DRIVER_INTEL); uint32_t handle; char *ptr; handle = gem_create(fd, OBJECT_SIZE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); igt_assert(ptr); gem_madvise(fd, handle, I915_MADV_DONTNEED); close(fd); signal(SIGBUS, sigtrap); switch (setjmp(jmp)) { case SIGBUS: break; case 0: *ptr = 0; default: igt_assert(!"reached"); break; } munmap(ptr, OBJECT_SIZE); signal(SIGBUS, SIG_DFL); }
static void test_access(int fd) { uint32_t handle, flink, handle2; struct drm_i915_gem_mmap_gtt mmap_arg; int fd2; handle = gem_create(fd, OBJECT_SIZE); igt_assert(handle); fd2 = drm_open_driver(DRIVER_INTEL); /* Check that fd1 can mmap. */ mmap_arg.handle = handle; do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_arg.offset)); /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset) == MAP_FAILED); igt_assert(errno == EACCES); flink = gem_flink(fd, handle); igt_assert(flink); handle2 = gem_open(fd2, flink); igt_assert(handle2); /* Recheck that it works after flink. */ /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset)); }
static bool test(data_t *data, enum pipe pipe, igt_output_t *output) { igt_plane_t *primary; drmModeModeInfo *mode; struct igt_fb fb[2]; int fd, ret; /* select the pipe we want to use */ igt_output_set_pipe(output, pipe); igt_display_commit(&data->display); if (!output->valid) { igt_output_set_pipe(output, PIPE_ANY); igt_display_commit(&data->display); return false; } primary = igt_output_get_plane(output, IGT_PLANE_PRIMARY); mode = igt_output_get_mode(output); igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, 0.0, 0.0, 0.0, &fb[0]); igt_plane_set_fb(primary, &fb[0]); igt_display_commit2(&data->display, COMMIT_LEGACY); fd = drm_open_driver(DRIVER_INTEL); ret = drmDropMaster(data->drm_fd); igt_assert_eq(ret, 0); ret = drmSetMaster(fd); igt_assert_eq(ret, 0); igt_create_color_fb(fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, 0.0, 0.0, 0.0, &fb[1]); ret = drmModePageFlip(fd, output->config.crtc->crtc_id, fb[1].fb_id, DRM_MODE_PAGE_FLIP_EVENT, data); igt_assert_eq(ret, 0); ret = close(fd); igt_assert_eq(ret, 0); ret = drmSetMaster(data->drm_fd); igt_assert_eq(ret, 0); igt_plane_set_fb(primary, NULL); igt_output_set_pipe(output, PIPE_ANY); igt_display_commit(&data->display); igt_remove_fb(data->drm_fd, &fb[0]); return true; }
static void test_with_one_bo(void) { int fd1, fd2; uint32_t handle, handle_import1, handle_import2, handle_selfimport; int dma_buf_fd; fd1 = drm_open_driver(DRIVER_INTEL); fd2 = drm_open_driver(DRIVER_INTEL); handle = gem_create(fd1, BO_SIZE); dma_buf_fd = prime_handle_to_fd(fd1, handle); handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd); check_bo(fd1, handle, fd2, handle_import1); /* reimport should give us the same handle so that userspace can check * whether it has that bo already somewhere. */ handle_import2 = prime_fd_to_handle(fd2, dma_buf_fd); igt_assert_eq_u32(handle_import1, handle_import2); /* Same for re-importing on the exporting fd. */ handle_selfimport = prime_fd_to_handle(fd1, dma_buf_fd); igt_assert_eq_u32(handle, handle_selfimport); /* close dma_buf, check whether nothing disappears. */ close(dma_buf_fd); check_bo(fd1, handle, fd2, handle_import1); gem_close(fd1, handle); check_bo(fd2, handle_import1, fd2, handle_import1); /* re-import into old exporter */ dma_buf_fd = prime_handle_to_fd(fd2, handle_import1); /* but drop all references to the obj in between */ gem_close(fd2, handle_import1); handle = prime_fd_to_handle(fd1, dma_buf_fd); handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd); check_bo(fd1, handle, fd2, handle_import1); /* Completely rip out exporting fd. */ close(fd1); check_bo(fd2, handle_import1, fd2, handle_import1); }
static void dontneed_before_mmap(void) { int fd = drm_open_driver(DRIVER_INTEL); uint32_t handle; char *ptr; handle = gem_create(fd, OBJECT_SIZE); gem_madvise(fd, handle, I915_MADV_DONTNEED); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); igt_assert(ptr == NULL); igt_assert(errno == EFAULT); close(fd); }
static int loop(unsigned ring, int reps, unsigned flags) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 gem_exec; int fd; fd = drm_open_driver(DRIVER_INTEL); memset(&gem_exec, 0, sizeof(gem_exec)); gem_exec.handle = batch(fd); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&gem_exec; execbuf.buffer_count = 1; execbuf.flags = ring; execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT; execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC; if (__gem_execbuf(fd, &execbuf)) { execbuf.flags = ring; if (__gem_execbuf(fd, &execbuf)) return 77; } while (reps--) { struct timespec start, end; unsigned count = 0; gem_set_domain(fd, gem_exec.handle, I915_GEM_DOMAIN_GTT, 0); sleep(1); /* wait for the hw to go back to sleep */ clock_gettime(CLOCK_MONOTONIC, &start); do { do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); count++; if (flags & SYNC) gem_sync(fd, gem_exec.handle); clock_gettime(CLOCK_MONOTONIC, &end); } while (elapsed(&start, &end) < 2.); gem_sync(fd, gem_exec.handle); clock_gettime(CLOCK_MONOTONIC, &end); printf("%7.3f\n", 1e6*elapsed(&start, &end)/count); } return 0; }
static void dontneed_before_pwrite(void) { int fd = drm_open_driver(DRIVER_INTEL); uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; struct drm_i915_gem_pwrite gem_pwrite; gem_pwrite.handle = gem_create(fd, OBJECT_SIZE); gem_pwrite.offset = 0; gem_pwrite.size = sizeof(buf); gem_pwrite.data_ptr = (uintptr_t)buf; gem_madvise(fd, gem_pwrite.handle, I915_MADV_DONTNEED); igt_assert(drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite)); igt_assert(errno == EFAULT); gem_close(fd, gem_pwrite.handle); close(fd); }
static void *gem_busyspin(void *arg) { const uint32_t bbe = MI_BATCH_BUFFER_END; struct gem_busyspin *bs = arg; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; unsigned engines[16]; unsigned nengine; unsigned engine; int fd; fd = drm_open_driver(DRIVER_INTEL); nengine = 0; for_each_engine(fd, engine) if (!ignore_engine(fd, engine)) engines[nengine++] = engine; memset(&obj, 0, sizeof(obj)); obj.handle = gem_create(fd, 4096); gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&obj; execbuf.buffer_count = 1; execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT; execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC; if (__gem_execbuf(fd, &execbuf)) { execbuf.flags = 0; gem_execbuf(fd, &execbuf); } while (!done) { for (int n = 0; n < nengine; n++) { execbuf.flags &= ~ENGINE_FLAGS; execbuf.flags |= engines[n]; gem_execbuf(fd, &execbuf); } bs->count += nengine; } close(fd); return NULL; }
static void dontneed_before_exec(void) { int fd = drm_open_driver(DRIVER_INTEL); struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; memset(&execbuf, 0, sizeof(execbuf)); memset(&exec, 0, sizeof(exec)); exec.handle = gem_create(fd, OBJECT_SIZE); gem_write(fd, exec.handle, 0, buf, sizeof(buf)); gem_madvise(fd, exec.handle, I915_MADV_DONTNEED); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.batch_len = sizeof(buf); gem_execbuf(fd, &execbuf); gem_close(fd, exec.handle); close(fd); }
int main(int argc, char **argv) { int fd = drm_open_driver(DRIVER_INTEL); int domain = I915_GEM_DOMAIN_GTT; enum dir { READ, WRITE } dir = READ; void *buf = malloc(OBJECT_SIZE); uint32_t handle; int reps = 13; int c, size; while ((c = getopt (argc, argv, "D:d:r:")) != -1) { switch (c) { case 'd': if (strcmp(optarg, "cpu") == 0) domain = I915_GEM_DOMAIN_CPU; else if (strcmp(optarg, "gtt") == 0) domain = I915_GEM_DOMAIN_GTT; break; case 'D': if (strcmp(optarg, "read") == 0) dir = READ; else if (strcmp(optarg, "write") == 0) dir = WRITE; else abort(); break; case 'r': reps = atoi(optarg); if (reps < 1) reps = 1; break; default: break; } } handle = gem_create(fd, OBJECT_SIZE); for (size = 1; size <= OBJECT_SIZE; size <<= 1) { igt_stats_t stats; int n; igt_stats_init_with_size(&stats, reps); for (n = 0; n < reps; n++) { struct timespec start, end; gem_set_domain(fd, handle, domain, domain); clock_gettime(CLOCK_MONOTONIC, &start); if (dir == READ) gem_read(fd, handle, 0, buf, size); else gem_write(fd, handle, 0, buf, size); clock_gettime(CLOCK_MONOTONIC, &end); igt_stats_push(&stats, elapsed(&start, &end)); } printf("%7.3f\n", igt_stats_get_trimean(&stats)/1000); igt_stats_fini(&stats); } return 0; }
int main(int argc, char **argv) { int fd = drm_open_driver(DRIVER_INTEL); enum map {CPU, GTT, WC} map = CPU; enum dir {READ, WRITE, CLEAR, FAULT} dir = READ; int tiling = I915_TILING_NONE; struct timespec start, end; void *buf = malloc(OBJECT_SIZE); uint32_t handle; void *ptr, *src, *dst; int reps = 1; int loops; int c; while ((c = getopt (argc, argv, "m:d:r:t:")) != -1) { switch (c) { case 'm': if (strcmp(optarg, "cpu") == 0) map = CPU; else if (strcmp(optarg, "gtt") == 0) map = GTT; else if (strcmp(optarg, "wc") == 0) map = WC; else abort(); break; case 'd': if (strcmp(optarg, "read") == 0) dir = READ; else if (strcmp(optarg, "write") == 0) dir = WRITE; else if (strcmp(optarg, "clear") == 0) dir = CLEAR; else if (strcmp(optarg, "fault") == 0) dir = FAULT; else abort(); break; case 't': if (strcmp(optarg, "x") == 0) tiling = I915_TILING_X; else if (strcmp(optarg, "y") == 0) tiling = I915_TILING_Y; else if (strcmp(optarg, "none") == 0) tiling = I915_TILING_NONE; else abort(); break; case 'r': reps = atoi(optarg); if (reps < 1) reps = 1; break; default: break; } } handle = gem_create(fd, OBJECT_SIZE); switch (map) { case CPU: ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); break; case GTT: ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); break; case WC: ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); break; default: abort(); } gem_set_tiling(fd, handle, tiling, 512); if (dir == READ) { src = ptr; dst = buf; } else { src = buf; dst = ptr; } clock_gettime(CLOCK_MONOTONIC, &start); switch (dir) { case CLEAR: case FAULT: memset(dst, 0, OBJECT_SIZE); break; default: memcpy(dst, src, OBJECT_SIZE); break; } clock_gettime(CLOCK_MONOTONIC, &end); loops = 2 / elapsed(&start, &end); while (reps--) { clock_gettime(CLOCK_MONOTONIC, &start); for (c = 0; c < loops; c++) { int page; switch (dir) { case CLEAR: memset(dst, 0, OBJECT_SIZE); break; case FAULT: munmap(ptr, OBJECT_SIZE); switch (map) { case CPU: ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); break; case GTT: ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE); break; case WC: ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); break; default: abort(); } for (page = 0; page < OBJECT_SIZE; page += 4096) { uint32_t *x = (uint32_t *)ptr + page/4; __asm__ __volatile__("": : :"memory"); page += *x; /* should be zero! */ } break; default: memcpy(dst, src, OBJECT_SIZE); break; } } clock_gettime(CLOCK_MONOTONIC, &end); printf("%7.3f\n", OBJECT_SIZE / elapsed(&start, &end) * loops / (1024*1024)); } return 0; }
int main(int argc, char **argv) { int c; int ret = 0; GIOChannel *stdinchannel; GMainLoop *mainloop; float force_clock; bool opt_dump_info = false; struct option long_opts[] = { {"list-subtests", 0, 0, SUBTEST_OPTS}, {"run-subtest", 1, 0, SUBTEST_OPTS}, {"help-description", 0, 0, HELP_DESCRIPTION}, {"help", 0, 0, 'h'}, {"yb", 0, 0, Yb_OPT}, {"yf", 0, 0, Yf_OPT}, { 0, 0, 0, 0 } }; igt_skip_on_simulation(); enter_exec_path( argv ); while ((c = getopt_long(argc, argv, optstr, long_opts, NULL)) != -1) { switch (c) { case '3': test_stereo_modes = 1; break; case 'i': opt_dump_info = true; break; case 'a': test_all_modes = 1; break; case 'f': force_mode = 1; if(sscanf(optarg,"%f,%hu,%hu,%hu,%hu,%hu,%hu,%hu,%hu", &force_clock,&force_timing.hdisplay, &force_timing.hsync_start,&force_timing.hsync_end,&force_timing.htotal, &force_timing.vdisplay, &force_timing.vsync_start, &force_timing.vsync_end, &force_timing.vtotal)!= 9) usage(argv[0], c); force_timing.clock = force_clock*1000; break; case 's': sleep_between_modes = atoi(optarg); break; case 'j': do_dpms = atoi(optarg); if (do_dpms == 0) do_dpms = DRM_MODE_DPMS_OFF; break; case 'd': depth = atoi(optarg); igt_info("using depth %d\n", depth); break; case 'p': if (sscanf(optarg, "%d,%d,%d,%d,%d,%d", &plane_width, &plane_height, &crtc_x, &crtc_y, &crtc_w, &crtc_h) != 6) usage(argv[0], c); test_plane = 1; break; case 'm': test_preferred_mode = 1; break; case 't': tiling = LOCAL_I915_FORMAT_MOD_X_TILED; break; case 'y': case Yb_OPT: tiling = LOCAL_I915_FORMAT_MOD_Y_TILED; break; case Yf_OPT: tiling = LOCAL_I915_FORMAT_MOD_Yf_TILED; break; case 'r': qr_code = 1; break; case 'o': sscanf(optarg, "%d,%d", &specified_disp_id, &specified_mode_num); break; case SUBTEST_OPTS: /* invalid subtest options */ exit(IGT_EXIT_INVALID); break; case HELP_DESCRIPTION: igt_info("Tests display functionality."); exit(0); break; default: /* fall through */ case 'h': usage(argv[0], c); break; } } set_termio_mode(); if (depth <= 8) bpp = 8; else if (depth <= 16) bpp = 16; else if (depth <= 32) bpp = 32; if (!test_all_modes && !force_mode && !test_preferred_mode && specified_mode_num == -1 && !test_stereo_modes) test_all_modes = 1; drm_fd = drm_open_driver(DRIVER_INTEL); if (test_stereo_modes && drmSetClientCap(drm_fd, DRM_CLIENT_CAP_STEREO_3D, 1) < 0) { igt_warn("DRM_CLIENT_CAP_STEREO_3D failed\n"); goto out_close; } if (opt_dump_info) { dump_info(); goto out_close; } kmstest_set_vt_graphics_mode(); mainloop = g_main_loop_new(NULL, FALSE); if (!mainloop) { igt_warn("failed to create glib mainloop\n"); ret = -1; goto out_close; } if (!testdisplay_setup_hotplug()) { igt_warn("failed to initialize hotplug support\n"); goto out_mainloop; } stdinchannel = g_io_channel_unix_new(0); if (!stdinchannel) { igt_warn("failed to create stdin GIO channel\n"); goto out_hotplug; } ret = g_io_add_watch(stdinchannel, G_IO_IN | G_IO_ERR, input_event, NULL); if (ret < 0) { igt_warn("failed to add watch on stdin GIO channel\n"); goto out_stdio; } ret = 0; if (!update_display()) { ret = 1; goto out_stdio; } if (test_all_modes) goto out_stdio; g_main_loop_run(mainloop); out_stdio: g_io_channel_shutdown(stdinchannel, TRUE, NULL); out_hotplug: testdisplay_cleanup_hotplug(); out_mainloop: g_main_loop_unref(mainloop); out_close: close(drm_fd); igt_assert_eq(ret, 0); igt_exit(); }
static int run(int object, int batch, int time, int reps) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec[3]; struct drm_i915_gem_relocation_entry *reloc; uint32_t *buf, handle, src, dst; int fd, len, gen, size, nreloc; int ring, count; size = ALIGN(batch * 64, 4096); reloc = malloc(sizeof(*reloc)*size/32*2); fd = drm_open_driver(DRIVER_INTEL); handle = gem_create(fd, size); buf = gem_mmap__cpu(fd, handle, 0, size, PROT_WRITE); gen = intel_gen(intel_get_drm_devid(fd)); has_64bit_reloc = gen >= 8; src = gem_create(fd, object); dst = gem_create(fd, object); len = gem_linear_blt(fd, buf, 0, 0, 1, object, reloc); if (has_64bit_reloc) nreloc = len > 56 ? 4 : 2; else nreloc = len > 40 ? 4 : 2; memset(exec, 0, sizeof(exec)); exec[0].handle = src; exec[1].handle = dst; exec[2].handle = handle; exec[2].relocs_ptr = (uintptr_t)reloc; exec[2].relocation_count = nreloc; ring = 0; if (gen >= 6) ring = I915_EXEC_BLT; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)exec; execbuf.buffer_count = 3; execbuf.batch_len = len; execbuf.flags = ring; execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT; if (__gem_execbuf(fd, &execbuf)) { gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); len = gem_linear_blt(fd, buf, 0, src, dst, object, reloc); igt_assert(len == execbuf.batch_len); execbuf.flags = ring; gem_execbuf(fd, &execbuf); } gem_sync(fd, handle); if (batch > 1) { if (execbuf.flags & LOCAL_I915_EXEC_HANDLE_LUT) { src = 0; dst = 1; } gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); for (int i = 1; i < batch; i++) { len = gem_linear_blt(fd, buf, len - 8, src, dst, object, reloc + nreloc * i); } exec[2].relocation_count = nreloc * batch; execbuf.batch_len = len; gem_execbuf(fd, &execbuf); gem_sync(fd, handle); } if (execbuf.flags & LOCAL_I915_EXEC_HANDLE_LUT) execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC; /* Guess how many loops we need for 0.1s */ count = baseline((uint64_t)object * batch, 100); while (reps--) { double min = HUGE_VAL; for (int s = 0; s <= time / 100; s++) { struct timespec start, end; double t; clock_gettime(CLOCK_MONOTONIC, &start); for (int loop = 0; loop < count; loop++) gem_execbuf(fd, &execbuf); gem_sync(fd, handle); clock_gettime(CLOCK_MONOTONIC, &end); t = elapsed(&start, &end); if (t < min) min = t; } printf("%7.3f\n", object/(1024*1024.)*batch*count/min); } close(fd); return 0; }
static void processes(void) { const struct intel_execution_engine *e; unsigned engines[16]; int num_engines; struct rlimit rlim; unsigned num_ctx; uint32_t name; int fd, *fds; fd = drm_open_driver(DRIVER_INTEL); num_ctx = get_num_contexts(fd); num_engines = 0; for (e = intel_execution_engines; e->name; e++) { if (e->exec_id == 0) continue; if (!has_engine(fd, e)) continue; if (e->exec_id == I915_EXEC_BSD) { int is_bsd2 = e->flags != 0; if (gem_has_bsd2(fd) != is_bsd2) continue; } engines[num_engines++] = e->exec_id | e->flags; if (num_engines == ARRAY_SIZE(engines)) break; } /* tweak rlimits to allow us to create this many files */ igt_assert(getrlimit(RLIMIT_NOFILE, &rlim) == 0); if (rlim.rlim_cur < ALIGN(num_ctx + 1024, 1024)) { rlim.rlim_cur = ALIGN(num_ctx + 1024, 1024); if (rlim.rlim_cur > rlim.rlim_max) rlim.rlim_max = rlim.rlim_cur; igt_assert(setrlimit(RLIMIT_NOFILE, &rlim) == 0); } fds = malloc(num_ctx * sizeof(int)); igt_assert(fds); for (unsigned n = 0; n < num_ctx; n++) { fds[n] = drm_open_driver(DRIVER_INTEL); if (fds[n] == -1) { int err = errno; for (unsigned i = n; i--; ) close(fds[i]); free(fds); errno = err; igt_assert_f(0, "failed to create context %lld/%lld\n", (long long)n, (long long)num_ctx); } } if (1) { uint32_t bbe = MI_BATCH_BUFFER_END; name = gem_create(fd, 4096); gem_write(fd, name, 0, &bbe, sizeof(bbe)); name = gem_flink(fd, name); } igt_fork(child, NUM_THREADS) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; memset(&obj, 0, sizeof(obj)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&obj; execbuf.buffer_count = 1; igt_permute_array(fds, num_ctx, xchg_int); for (unsigned n = 0; n < num_ctx; n++) { obj.handle = gem_open(fds[n], name); execbuf.flags = engines[n % num_engines]; gem_execbuf(fds[n], &execbuf); gem_close(fds[n], obj.handle); } } igt_waitchildren(); for (unsigned n = 0; n < num_ctx; n++) close(fds[n]); free(fds); close(fd); }
static void test_hang_gpu(void) { int fd = drm_open_driver(DRIVER_INTEL); igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT)); close(fd); }
int main(int argc, char **argv) { int fd = drm_open_driver(DRIVER_INTEL); enum map {CPU, GTT, WC} map = CPU; enum dir {READ, WRITE, CLEAR, FAULT} dir = READ; int tiling = I915_TILING_NONE; void *buf = malloc(OBJECT_SIZE); uint32_t handle; void *ptr, *src, *dst; int reps = 13; int c, size; while ((c = getopt (argc, argv, "m:d:r:t:")) != -1) { switch (c) { case 'm': if (strcmp(optarg, "cpu") == 0) map = CPU; else if (strcmp(optarg, "gtt") == 0) map = GTT; else if (strcmp(optarg, "wc") == 0) map = WC; else abort(); break; case 'd': if (strcmp(optarg, "read") == 0) dir = READ; else if (strcmp(optarg, "write") == 0) dir = WRITE; else if (strcmp(optarg, "clear") == 0) dir = CLEAR; else if (strcmp(optarg, "fault") == 0) dir = FAULT; else abort(); break; case 't': if (strcmp(optarg, "x") == 0) tiling = I915_TILING_X; else if (strcmp(optarg, "y") == 0) tiling = I915_TILING_Y; else if (strcmp(optarg, "none") == 0) tiling = I915_TILING_NONE; else abort(); break; case 'r': reps = atoi(optarg); if (reps < 1) reps = 1; break; default: break; } } handle = gem_create(fd, OBJECT_SIZE); switch (map) { case CPU: ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); break; case GTT: ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); break; case WC: ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); break; default: abort(); } gem_set_tiling(fd, handle, tiling, 512); if (dir == READ) { src = ptr; dst = buf; } else { src = buf; dst = ptr; } for (size = 1; size <= OBJECT_SIZE; size <<= 1) { igt_stats_t stats; int n; igt_stats_init_with_size(&stats, reps); for (n = 0; n < reps; n++) { struct timespec start, end; int page; clock_gettime(CLOCK_MONOTONIC, &start); switch (dir) { case CLEAR: memset(dst, 0, size); break; case FAULT: for (page = 0; page < OBJECT_SIZE; page += 4096) { uint32_t *x = (uint32_t *)ptr + page/4; page += *x; /* should be zero! */ } break; default: memcpy(dst, src, size); break; } clock_gettime(CLOCK_MONOTONIC, &end); igt_stats_push(&stats, elapsed(&start, &end)); } printf("%7.3f\n", igt_stats_get_trimean(&stats)/1000); igt_stats_fini(&stats); } return 0; }
int main(int argc, char **argv) { struct timeval start, end; uint8_t *buf; uint32_t handle; int size = OBJECT_SIZE; int loop, i, tiling; int fd; igt_simple_init(argc, argv); igt_skip_on_simulation(); if (argc > 1) size = atoi(argv[1]); if (size == 0) { igt_warn("Invalid object size specified\n"); return 1; } buf = malloc(size); memset(buf, 0, size); fd = drm_open_driver(DRIVER_INTEL); handle = gem_create(fd, size); igt_assert(handle); for (tiling = I915_TILING_NONE; tiling <= I915_TILING_Y; tiling++) { if (tiling != I915_TILING_NONE) { igt_info("\nSetting tiling mode to %s\n", tiling == I915_TILING_X ? "X" : "Y"); gem_set_tiling(fd, handle, tiling, 512); } if (tiling == I915_TILING_NONE) { gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); { uint32_t *base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); ptr = base; x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to read %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to write %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to clear %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL); base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); gettimeofday(&end, NULL); igt_info("Time to clear %dk through a cached CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* CPU pwrite */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_write(fd, handle, 0, buf, size); gettimeofday(&end, NULL); igt_info("Time to pwrite %dk through the CPU: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* CPU pread */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_read(fd, handle, 0, buf, size); gettimeofday(&end, NULL); igt_info("Time to pread %dk through the CPU: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* prefault into gtt */ { uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to read %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (gem_mmap__has_wc(fd)) { gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to read %dk through a WC map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to write %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (gem_mmap__has_wc(fd)) { /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to write %dk through a WC map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* mmap clear */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to clear %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (gem_mmap__has_wc(fd)) { /* mmap clear */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to clear %dk through a WC map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } gettimeofday(&start, NULL);{ uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to clear %dk through a cached GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (gem_mmap__has_wc(fd)) { gettimeofday(&start, NULL);{ uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to clear %dk through a cached WC map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); igt_info("Time to read %dk (again) through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (tiling == I915_TILING_NONE) { /* GTT pwrite */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_write(fd, handle, 0, buf, size); gettimeofday(&end, NULL); igt_info("Time to pwrite %dk through the GTT: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pread */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_read(fd, handle, 0, buf, size); gettimeofday(&end, NULL); igt_info("Time to pread %dk through the GTT: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pwrite, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_write(fd, handle, 0, buf, size); gem_sync(fd, handle); } gettimeofday(&end, NULL); igt_info("Time to pwrite %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pread, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_sync(fd, handle); gem_read(fd, handle, 0, buf, size); } gettimeofday(&end, NULL); igt_info("Time to pread %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* partial writes */ igt_info("Now partial writes.\n"); size /= 4; /* partial GTT pwrite, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_write(fd, handle, 0, buf, size); gem_sync(fd, handle); } gettimeofday(&end, NULL); igt_info("Time to pwrite %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* partial GTT pread, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_sync(fd, handle); gem_read(fd, handle, 0, buf, size); } gettimeofday(&end, NULL); igt_info("Time to pread %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); size *= 4; } } gem_close(fd, handle); close(fd); igt_exit(); }
int main(int argc, char **argv) { uint32_t *handle, *start_val; uint32_t start = 0; int i, fd, count; igt_simple_init(argc, argv); fd = drm_open_driver(DRIVER_INTEL); igt_require(IS_GEN3(intel_get_drm_devid(fd))); count = 0; if (argc > 1) count = atoi(argv[1]); if (count == 0) count = 3 * gem_aperture_size(fd) / (1024*1024) / 2; igt_info("Using %d 1MiB buffers\n", count); handle = malloc(sizeof(uint32_t)*count*2); start_val = handle + count; for (i = 0; i < count; i++) { handle[i] = create_bo(fd, start); start_val[i] = start; start += 1024 * 1024 / 4; } igt_info("Verifying initialisation...\n"); for (i = 0; i < count; i++) check_bo(fd, handle[i], start_val[i]); igt_info("Cyclic blits, forward...\n"); for (i = 0; i < count * 4; i++) { int src = i % count; int dst = (i + 1) % count; copy(fd, handle[dst], handle[src]); start_val[dst] = start_val[src]; } for (i = 0; i < count; i++) check_bo(fd, handle[i], start_val[i]); igt_info("Cyclic blits, backward...\n"); for (i = 0; i < count * 4; i++) { int src = (i + 1) % count; int dst = i % count; copy(fd, handle[dst], handle[src]); start_val[dst] = start_val[src]; } for (i = 0; i < count; i++) check_bo(fd, handle[i], start_val[i]); igt_info("Random blits...\n"); for (i = 0; i < count * 4; i++) { int src = random() % count; int dst = random() % count; if (src == dst) continue; copy(fd, handle[dst], handle[src]); start_val[dst] = start_val[src]; } for (i = 0; i < count; i++) check_bo(fd, handle[i], start_val[i]); igt_exit(); }
unsigned int flags; } pass[] = { { .name = "relocation", .flags = 0 }, { .name = "cycle-relocation", .flags = CYCLE_BATCH }, { .name = "fault-relocation", .flags = FAULT }, { .name = "skip-relocs", .flags = SKIP_RELOC }, { .name = "no-relocs", .flags = SKIP_RELOC | NO_RELOC }, { .name = NULL }, }, *p; struct drm_i915_gem_relocation_entry *reloc; uint32_t reloc_handle; int size; igt_skip_on_simulation(); fd = drm_open_driver(DRIVER_INTEL); memset(gem_exec, 0, sizeof(gem_exec)); for (n = 0; n < MAX_NUM_EXEC; n++) gem_exec[n].handle = gem_create(fd, 4096); for (n = 0; n < 16; n++) { cycle[n] = gem_create(fd, 4096); gem_write(fd, cycle[n], 0, batch, sizeof(batch)); } gem_exec[MAX_NUM_EXEC].handle = cycle[0]; memset(mem_reloc, 0, sizeof(mem_reloc)); for (n = 0; n < MAX_NUM_RELOC; n++) { mem_reloc[n].offset = 1024; mem_reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
static void wait_gpu(void) { int fd = drm_open_driver(DRIVER_INTEL); gem_quiescent_gpu(fd); close(fd); }