static void accuracy(int fd) { union drm_wait_vblank vbl; unsigned long target; int n; memset(&vbl, 0, sizeof(vbl)); vbl.request.type = DRM_VBLANK_RELATIVE; vbl.request.sequence = 1; do_ioctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl); target = vbl.reply.sequence + 60; for (n = 0; n < 60; n++) { vbl.request.type = DRM_VBLANK_RELATIVE; vbl.request.sequence = 1; do_ioctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl); vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT; vbl.request.sequence = target; do_ioctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl); } vbl.request.type = DRM_VBLANK_RELATIVE; vbl.request.sequence = 0; do_ioctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl); igt_assert_eq(vbl.reply.sequence, target); for (n = 0; n < 60; n++) { struct drm_event_vblank ev; igt_assert_eq(read(fd, &ev, sizeof(ev)), sizeof(ev)); igt_assert_eq(ev.sequence, target); } }
static void exec1(int fd, uint32_t handle, uint64_t reloc_ofs, unsigned flags, char *ptr) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 gem_exec[1]; struct drm_i915_gem_relocation_entry gem_reloc[1]; gem_reloc[0].offset = reloc_ofs; gem_reloc[0].delta = 0; gem_reloc[0].target_handle = handle; gem_reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; gem_reloc[0].write_domain = 0; gem_reloc[0].presumed_offset = 0; gem_exec[0].handle = handle; gem_exec[0].relocation_count = 1; gem_exec[0].relocs_ptr = (uintptr_t) gem_reloc; gem_exec[0].alignment = 0; gem_exec[0].offset = 0; gem_exec[0].flags = 0; gem_exec[0].rsvd1 = 0; gem_exec[0].rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)gem_exec; execbuf.buffer_count = 1; execbuf.batch_start_offset = 0; execbuf.batch_len = 8; execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = flags; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; /* Avoid hitting slowpaths in the reloc processing which might yield a * presumed_offset of -1. Happens when the batch is still busy from the * last round. */ gem_sync(fd, handle); gem_execbuf(fd, &execbuf); igt_warn_on(gem_reloc[0].presumed_offset == -1); if (use_64bit_relocs) { uint64_t tmp; if (ptr) tmp = *(uint64_t *)(ptr+reloc_ofs); else gem_read(fd, handle, reloc_ofs, &tmp, sizeof(tmp)); igt_assert_eq(tmp, gem_reloc[0].presumed_offset); } else { uint32_t tmp; if (ptr) tmp = *(uint32_t *)(ptr+reloc_ofs); else gem_read(fd, handle, reloc_ofs, &tmp, sizeof(tmp)); igt_assert_eq(tmp, gem_reloc[0].presumed_offset); } }
static bool test(data_t *data, enum pipe pipe, igt_output_t *output) { igt_plane_t *primary; drmModeModeInfo *mode; struct igt_fb fb[2]; int fd, ret; /* select the pipe we want to use */ igt_output_set_pipe(output, pipe); igt_display_commit(&data->display); if (!output->valid) { igt_output_set_pipe(output, PIPE_ANY); igt_display_commit(&data->display); return false; } primary = igt_output_get_plane(output, IGT_PLANE_PRIMARY); mode = igt_output_get_mode(output); igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, 0.0, 0.0, 0.0, &fb[0]); igt_plane_set_fb(primary, &fb[0]); igt_display_commit2(&data->display, COMMIT_LEGACY); fd = drm_open_driver(DRIVER_INTEL); ret = drmDropMaster(data->drm_fd); igt_assert_eq(ret, 0); ret = drmSetMaster(fd); igt_assert_eq(ret, 0); igt_create_color_fb(fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, 0.0, 0.0, 0.0, &fb[1]); ret = drmModePageFlip(fd, output->config.crtc->crtc_id, fb[1].fb_id, DRM_MODE_PAGE_FLIP_EVENT, data); igt_assert_eq(ret, 0); ret = close(fd); igt_assert_eq(ret, 0); ret = drmSetMaster(data->drm_fd); igt_assert_eq(ret, 0); igt_plane_set_fb(primary, NULL); igt_output_set_pipe(output, PIPE_ANY); igt_display_commit(&data->display); igt_remove_fb(data->drm_fd, &fb[0]); return true; }
static void test_reimport_close_race(void) { pthread_t *threads; int r, i, num_threads; int fds[2]; int obj_count; void *status; uint32_t handle; int fake; /* Allocate exit handler fds in here so that we dont screw * up the counts */ fake = drm_open_driver(DRIVER_INTEL); gem_quiescent_gpu(fake); obj_count = get_object_count(); num_threads = sysconf(_SC_NPROCESSORS_ONLN); threads = calloc(num_threads, sizeof(pthread_t)); fds[0] = drm_open_driver(DRIVER_INTEL); handle = gem_create(fds[0], BO_SIZE); fds[1] = prime_handle_to_fd(fds[0], handle); for (i = 0; i < num_threads; i++) { r = pthread_create(&threads[i], NULL, thread_fn_reimport_vs_close, (void *)(uintptr_t)fds); igt_assert_eq(r, 0); } sleep(5); pls_die = 1; for (i = 0; i < num_threads; i++) { pthread_join(threads[i], &status); igt_assert(status == 0); } close(fds[0]); close(fds[1]); gem_quiescent_gpu(fake); obj_count = get_object_count() - obj_count; igt_info("leaked %i objects\n", obj_count); close(fake); igt_assert_eq(obj_count, 0); }
static void test_invalid_buffer(int in) { int fd = setup(in, 0); alarm(1); igt_assert_eq(read(fd, (void *)-1, 4096), -1); igt_assert_eq(errno, EFAULT); teardown(fd); }
static void test_empty(int in, int nonblock, int expected) { char buffer[1024]; int fd = setup(in, nonblock); alarm(1); igt_assert_eq(read(fd, buffer, sizeof(buffer)), -1); igt_assert_eq(errno, expected); teardown(fd); }
static void crtc_check_current_state(struct kms_atomic_crtc_state *crtc, struct kms_atomic_plane_state *primary, enum kms_atomic_check_relax relax) { struct kms_atomic_crtc_state crtc_kernel; drmModeCrtcPtr legacy; legacy = drmModeGetCrtc(crtc->state->desc->fd, crtc->obj); igt_assert(legacy); igt_assert_eq_u32(legacy->crtc_id, crtc->obj); igt_assert_eq_u32(legacy->x, primary->src_x >> 16); igt_assert_eq_u32(legacy->y, primary->src_y >> 16); if (crtc->active) igt_assert_eq_u32(legacy->buffer_id, primary->fb_id); else igt_assert_eq_u32(legacy->buffer_id, 0); if (legacy->mode_valid) { igt_assert_neq(legacy->mode_valid, 0); igt_assert_eq(crtc->mode.len, sizeof(struct drm_mode_modeinfo)); do_or_die(memcmp(&legacy->mode, crtc->mode.data, crtc->mode.len)); igt_assert_eq(legacy->width, legacy->mode.hdisplay); igt_assert_eq(legacy->height, legacy->mode.vdisplay); } else { igt_assert_eq(legacy->mode_valid, 0); } memcpy(&crtc_kernel, crtc, sizeof(crtc_kernel)); crtc_get_current_state(&crtc_kernel); if (crtc_kernel.mode.id != 0) igt_assert_eq(crtc_kernel.mode.len, sizeof(struct drm_mode_modeinfo)); /* Optionally relax the check for MODE_ID: using the legacy SetCrtc * API can potentially change MODE_ID even if the mode itself remains * unchanged. */ if (((relax & CRTC_RELAX_MODE) && (crtc_kernel.mode.id != crtc->mode.id && crtc_kernel.mode.id != 0 && crtc->mode.id != 0)) && memcmp(crtc_kernel.mode.data, crtc->mode.data, sizeof(struct drm_mode_modeinfo)) == 0) { crtc_kernel.mode.id = crtc->mode.id; crtc_kernel.mode.data = crtc->mode.data; } do_or_die(memcmp(&crtc_kernel, crtc, sizeof(crtc_kernel))); drmModeFreeCrtc(legacy); }
static void test_and_verify(int val) { int result; igt_assert_eq(backlight_write(val, "brightness"), 0); igt_assert_eq(backlight_read(&result, "brightness"), 0); /* Check that the exact value sticks */ igt_assert_eq(result, val); igt_assert_eq(backlight_read(&result, "actual_brightness"), 0); /* Some rounding may happen depending on hw. Just check that it's close enough. */ igt_assert(result <= val + val * TOLERANCE / 100 && result >= val - val * TOLERANCE / 100); }
static void test_bad_command(data_t *data, const char *cmd) { FILE *ctl; size_t written; ctl = igt_debugfs_fopen("i915_display_crc_ctl", "r+"); written = fwrite(cmd, 1, strlen(cmd), ctl); fflush(ctl); igt_assert_eq(written, strlen(cmd)); igt_assert(ferror(ctl)); igt_assert_eq(errno, EINVAL); fclose(ctl); }
/** * igt_pipe_crc_stop: * @pipe_crc: pipe CRC object * * Stops the CRC capture process on @pipe_crc. */ void igt_pipe_crc_stop(igt_pipe_crc_t *pipe_crc) { char buf[32]; sprintf(buf, "pipe %s none", kmstest_pipe_name(pipe_crc->pipe)); igt_assert_eq(write(pipe_crc->ctl_fd, buf, strlen(buf)), strlen(buf)); }
static void igt_pipe_crc_pipe_off(int fd, enum pipe pipe) { char buf[32]; sprintf(buf, "pipe %s none", kmstest_pipe_name(pipe)); igt_assert_eq(write(fd, buf, strlen(buf)), strlen(buf)); }
static void dump_batch(struct intel_batchbuffer *batch) { int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666); if (fd != -1) { igt_assert_eq(write(fd, batch->buffer, 4096), 4096); fd = close(fd); } }
static void test_bad_brightness(int max) { int val; /* First write some sane value */ backlight_write(max / 2, "brightness"); /* Writing invalid values should fail and not change the value */ igt_assert_lt(backlight_write(-1, "brightness"), 0); backlight_read(&val, "brightness"); igt_assert_eq(val, max / 2); igt_assert_lt(backlight_write(max + 1, "brightness"), 0); backlight_read(&val, "brightness"); igt_assert_eq(val, max / 2); igt_assert_lt(backlight_write(INT_MAX, "brightness"), 0); backlight_read(&val, "brightness"); igt_assert_eq(val, max / 2); }
/* We know that if we don't enable audio runtime PM, snd_hda_intel will never * release its power well refcount, and we'll never reach the LPSP sate. OTOH * there's no guarantee that it will release the power well if we enable runtime * PM, but at least we can try. We don't have any assertions since the user may * not even have snd_hda_intel loaded, which is not a problem. */ static void disable_audio_runtime_pm(void) { int fd; fd = open("/sys/module/snd_hda_intel/parameters/power_save", O_WRONLY); if (fd >= 0) { igt_assert_eq(write(fd, "1\n", 2), 2); close(fd); } fd = open("/sys/bus/pci/devices/0000:00:03.0/power/control", O_WRONLY); if (fd >= 0) { igt_assert_eq(write(fd, "auto\n", 5), 5); close(fd); } /* Give some time for it to react. */ sleep(1); }
static void test_throttle(int fd) { wedge_gpu(fd); igt_assert_eq(__gem_throttle(fd), -EIO); trigger_reset(fd); }
static void test_ring(int fd, unsigned ring, uint32_t flags) { uint32_t bbe = MI_BATCH_BUFFER_END; uint32_t handle[3]; uint32_t read, write; uint32_t active; unsigned i; gem_require_ring(fd, ring | flags); handle[TEST] = gem_create(fd, 4096); handle[BATCH] = gem_create(fd, 4096); gem_write(fd, handle[BATCH], 0, &bbe, sizeof(bbe)); /* Create a long running batch which we can use to hog the GPU */ handle[BUSY] = busy_blt(fd); /* Queue a batch after the busy, it should block and remain "busy" */ igt_assert(exec_noop(fd, handle, ring | flags, false)); igt_assert(still_busy(fd, handle[BUSY])); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, 1 << ring); igt_assert_eq(write, 0); /* Requeue with a write */ igt_assert(exec_noop(fd, handle, ring | flags, true)); igt_assert(still_busy(fd, handle[BUSY])); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, 1 << ring); igt_assert_eq(write, ring); /* Now queue it for a read across all available rings */ active = 0; for (i = I915_EXEC_RENDER; i <= I915_EXEC_VEBOX; i++) { if (exec_noop(fd, handle, i | flags, false)) active |= 1 << i; } igt_assert(still_busy(fd, handle[BUSY])); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, active); igt_assert_eq(write, ring); /* from the earlier write */ /* Check that our long batch was long enough */ igt_assert(still_busy(fd, handle[BUSY])); /* And make sure it becomes idle again */ gem_sync(fd, handle[TEST]); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, 0); igt_assert_eq(write, 0); for (i = TEST; i <= BATCH; i++) gem_close(fd, handle[i]); }
/** * igt_crc_to_string: * @crc: pipe CRC value to print * * This formats @crc into a string buffer which is owned by igt_crc_to_string(). * The next call will override the buffer again, which makes this multithreading * unsafe. * * This should only ever be used for diagnostic debug output. */ char *igt_crc_to_string(igt_crc_t *crc) { char buf[128]; igt_assert_eq(crc->n_words, 5); sprintf(buf, "%08x %08x %08x %08x %08x", crc->crc[0], crc->crc[1], crc->crc[2], crc->crc[3], crc->crc[4]); return strdup(buf); }
static void prepare_crtc(data_t *data, igt_output_t *output, enum pipe pipe, igt_plane_t *plane, drmModeModeInfo *mode, enum igt_commit_style s) { igt_display_t *display = &data->display; igt_output_set_pipe(output, pipe); /* create the pipe_crc object for this pipe */ igt_pipe_crc_free(data->pipe_crc); data->pipe_crc = igt_pipe_crc_new(pipe, INTEL_PIPE_CRC_SOURCE_AUTO); /* before allocating, free if any older fb */ if (data->fb_id1) { igt_remove_fb(data->drm_fd, &data->fb1); data->fb_id1 = 0; } /* allocate fb for plane 1 */ data->fb_id1 = igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, /* tiled */ &data->fb1); igt_assert(data->fb_id1); paint_color(data, &data->fb1, mode->hdisplay, mode->vdisplay); /* * We always set the primary plane to actually enable the pipe as * there's no way (that works) to light up a pipe with only a sprite * plane enabled at the moment. */ if (!plane->is_primary) { igt_plane_t *primary; primary = igt_output_get_plane(output, IGT_PLANE_PRIMARY); igt_plane_set_fb(primary, &data->fb1); } igt_plane_set_fb(plane, &data->fb1); if (s == COMMIT_LEGACY) { int ret; ret = drmModeSetCrtc(data->drm_fd, output->config.crtc->crtc_id, data->fb_id1, plane->pan_x, plane->pan_y, &output->id, 1, mode); igt_assert_eq(ret, 0); } else { igt_display_commit2(display, s); } }
static void check_bo(int fd, uint32_t handle) { uint32_t *map; int i; igt_debug("Verifying result\n"); map = gem_mmap__cpu(fd, handle, 0, 4096, PROT_READ); gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0); for (i = 0; i < 1024; i++) igt_assert_eq(map[i], i); munmap(map, 4096); }
static void run_on_ring(int fd, unsigned ring_id, const char *ring_name) { uint32_t handle, handle_new; uint64_t gtt_offset, gtt_offset_new; uint32_t *batch_ptr, *batch_ptr_old; unsigned split; char buf[100]; int i; gem_require_ring(fd, ring_id); sprintf(buf, "testing %s cs tlb coherency: ", ring_name); /* Shut up gcc, too stupid. */ batch_ptr_old = NULL; handle = 0; gtt_offset = 0; for (split = 0; split < BATCH_SIZE/8 - 1; split += 2) { igt_progress(buf, split, BATCH_SIZE/8 - 1); handle_new = gem_create(fd, BATCH_SIZE); batch_ptr = gem_mmap__cpu(fd, handle_new, 0, BATCH_SIZE, PROT_READ | PROT_WRITE); batch_ptr[split*2] = MI_BATCH_BUFFER_END; for (i = split*2 + 2; i < BATCH_SIZE/8; i++) batch_ptr[i] = 0xffffffff; if (split > 0) { gem_sync(fd, handle); gem_close(fd, handle); } igt_assert_eq(exec(fd, handle_new, split, >t_offset_new, 0), 0); if (split > 0) { /* Check that we've managed to collide in the tlb. */ igt_assert(gtt_offset == gtt_offset_new); /* We hang onto the storage of the old batch by keeping * the cpu mmap around. */ munmap(batch_ptr_old, BATCH_SIZE); } handle = handle_new; gtt_offset = gtt_offset_new; batch_ptr_old = batch_ptr; } }
static void check_workarounds(enum operation op) { igt_assert_eq(workaround_fail_count(), 0); switch (op) { case GPU_RESET: test_hang_gpu(); break; case SUSPEND_RESUME: test_suspend_resume(); break; case SIMPLE_READ: return; default: igt_assert(0); } igt_assert_eq(workaround_fail_count(), 0); }
static void test_wait(int fd) { igt_hang_ring_t hang; /* If the request we wait on completes due to a hang (even for * that request), the user expects the return value to 0 (success). */ hang = igt_hang_ring(fd, I915_EXEC_DEFAULT); igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0); igt_post_hang_ring(fd, hang); /* If the GPU is wedged during the wait, again we expect the return * value to be 0 (success). */ igt_require(i915_reset_control(false)); hang = igt_hang_ring(fd, I915_EXEC_DEFAULT); igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0); igt_post_hang_ring(fd, hang); igt_require(i915_reset_control(true)); trigger_reset(fd); }
static int get_object_count(void) { FILE *file; int ret, scanned; igt_drop_caches_set(DROP_RETIRE | DROP_ACTIVE); file = igt_debugfs_fopen("i915_gem_objects", "r"); scanned = fscanf(file, "%i objects", &ret); igt_assert_eq(scanned, 1); return ret; }
static unsigned int readit(const char *path) { unsigned int ret; int scanned; FILE *file; file = fopen(path, "r"); igt_assert(file); scanned = fscanf(file, "%u", &ret); igt_assert_eq(scanned, 1); fclose(file); return ret; }
static bool igt_pipe_crc_do_start(igt_pipe_crc_t *pipe_crc) { char buf[64]; /* Stop first just to make sure we don't have lingering state left. */ igt_pipe_crc_stop(pipe_crc); sprintf(buf, "pipe %s %s", kmstest_pipe_name(pipe_crc->pipe), pipe_crc_source_name(pipe_crc->source)); errno = 0; igt_assert_eq(write(pipe_crc->ctl_fd, buf, strlen(buf)), strlen(buf)); if (errno != 0) return false; return true; }
static uint32_t create_userptr(int fd, uint32_t val, uint32_t *ptr) { uint32_t handle; int i, ret; ret = gem_userptr(fd, ptr, sizeof(linear), 0, &handle); igt_assert_eq(ret, 0); igt_assert(handle != 0); /* Fill the BO with dwords starting at val */ for (i = 0; i < WIDTH*HEIGHT; i++) ptr[i] = val++; return handle; }
static bool read_one_crc(igt_pipe_crc_t *pipe_crc, igt_crc_t *out) { ssize_t bytes_read; char buf[pipe_crc->buffer_len]; igt_set_timeout(5, "CRC reading"); bytes_read = read(pipe_crc->crc_fd, &buf, pipe_crc->line_len); igt_reset_timeout(); igt_assert_eq(bytes_read, pipe_crc->line_len); buf[bytes_read] = '\0'; if (!pipe_crc_init_from_string(out, buf)) return false; return true; }
static void test_short_buffer(int in, int nonblock) { char buffer[1024]; /* events are typically 32 bytes */ int fd = setup(in, nonblock); generate_event(fd); generate_event(fd); wait_for_event(fd); alarm(3); igt_assert_eq(read(fd, buffer, 4), 0); igt_assert(read(fd, buffer, 40) > 0); igt_assert(read(fd, buffer, 40) > 0); teardown(fd); }
static uint32_t create_userptr_bo(int fd, uint64_t size) { void *ptr; uint32_t handle; int ret; ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0); igt_assert(ptr != MAP_FAILED); ret = gem_userptr(fd, (uint32_t *)ptr, size, 0, &handle); igt_assert_eq(ret, 0); add_handle_ptr(handle, ptr, size); return handle; }
static void enter_exec_path( char **argv ) { char *exec_path = NULL; char *pos = NULL; short len_path = 0; int ret; len_path = strlen( argv[0] ); exec_path = (char*) malloc(len_path); memcpy(exec_path, argv[0], len_path); pos = strrchr(exec_path, '/'); if (pos != NULL) *(pos+1) = '\0'; ret = chdir(exec_path); igt_assert_eq(ret, 0); free(exec_path); }