static void set_alarm(time_t sec, suseconds_t usec) { struct itimerval timerval = {{0, 0}, {sec, usec}}; alarm_received = false; igt_assert(setitimer(ITIMER_REAL, &timerval, NULL) == 0); }
static uint32_t plane_get_igt_format(struct kms_atomic_plane_state *plane) { drmModePlanePtr plane_kms; const uint32_t *igt_formats; uint32_t ret = 0; int num_igt_formats; int i; plane_kms = drmModeGetPlane(plane->state->desc->fd, plane->obj); igt_assert(plane_kms); igt_get_all_cairo_formats(&igt_formats, &num_igt_formats); for (i = 0; i < num_igt_formats; i++) { int j; for (j = 0; j < plane_kms->count_formats; j++) { if (plane_kms->formats[j] == igt_formats[i]) { ret = plane_kms->formats[j]; break; } } } drmModeFreePlane(plane_kms); return ret; }
static void plane_check_current_state(struct kms_atomic_plane_state *plane, enum kms_atomic_check_relax relax) { drmModePlanePtr legacy; struct kms_atomic_plane_state plane_kernel; legacy = drmModeGetPlane(plane->state->desc->fd, plane->obj); igt_assert(legacy); igt_assert_eq_u32(legacy->crtc_id, plane->crtc_id); if (!(relax & PLANE_RELAX_FB)) igt_assert_eq_u32(legacy->fb_id, plane->fb_id); memcpy(&plane_kernel, plane, sizeof(plane_kernel)); plane_get_current_state(&plane_kernel); /* Legacy cursor ioctls create their own, unknowable, internal * framebuffer which we can't reason about. */ if (relax & PLANE_RELAX_FB) plane_kernel.fb_id = plane->fb_id; do_or_die(memcmp(&plane_kernel, plane, sizeof(plane_kernel))); drmModeFreePlane(legacy); }
static void setup_modeset(void) { int i; for (i = 0; i < drm.res->count_connectors; i++) { drmModeConnectorPtr c = drm.connectors[i]; if (c->connection == DRM_MODE_CONNECTED && c->count_modes > 0) { modeset.connector_id = c->connector_id; modeset.mode = &c->modes[0]; break; } } igt_assert(i < drm.res->count_connectors); modeset.crtc_id = drm.res->crtcs[0]; for (i = 0; i < 2; i++) { igt_create_fb(drm.fd, modeset.mode->hdisplay, modeset.mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_I915_FORMAT_MOD_X_TILED, &fbs[i]); igt_draw_fill_fb(drm.fd, &fbs[i], 0x80); } draw_rect(&fbs[1], IGT_DRAW_BLT, 0x800000); igt_create_fb(drm.fd, 64, 64, DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE, &cursor); igt_draw_fill_fb(drm.fd, &cursor, 0xFF008000); }
static void invalid_size_test(int fd) { int handle; handle = __gem_create(fd, 0); igt_assert(!handle); }
/** * __igt_debugfs_read: * @filename: file name * @buf: buffer where the contents will be stored, allocated by the caller * @buf_size: size of the buffer * * This function opens the debugfs file, reads it, stores the content in the * provided buffer, then closes the file. Users should make sure that the buffer * provided is big enough to fit the whole file, plus one byte. */ void __igt_debugfs_read(const char *filename, char *buf, int buf_size) { FILE *file; size_t n_read; file = igt_debugfs_fopen(filename, "r"); igt_assert(file); n_read = fread(buf, 1, buf_size - 1, file); igt_assert(n_read > 0); igt_assert(feof(file)); buf[n_read] = '\0'; igt_assert(fclose(file) == 0); }
static bool __igt_debugfs_init(igt_debugfs_t *debugfs) { const char *path = "/sys/kernel/debug"; struct stat st; int n; if (stat("/debug/dri", &st) == 0) { path = "/debug/dri"; goto find_minor; } if (stat("/sys/kernel/debug/dri", &st) == 0) goto find_minor; igt_assert(stat("/sys/kernel/debug", &st) == 0); mount("debug", "/sys/kernel/debug", "debugfs", 0, 0); find_minor: strcpy(debugfs->root, path); for (n = 0; n < 16; n++) { int len = sprintf(debugfs->dri_path, "%s/dri/%d", path, n); sprintf(debugfs->dri_path + len, "/i915_error_state"); if (stat(debugfs->dri_path, &st) == 0) { debugfs->dri_path[len] = '\0'; return true; } } debugfs->dri_path[0] = '\0'; return false; }
static void run(data_t *data, int child) { const int size = 4096 * (256 + child * child); const int tiling = child % 2; const int write = child % 2; uint32_t handle = gem_create(data->fd, size); uint32_t *ptr; uint32_t x; igt_assert(handle); if (tiling != I915_TILING_NONE) gem_set_tiling(data->fd, handle, tiling, 4096); /* load up the unfaulted bo */ busy(data, handle, size, 100); /* Note that we ignore the API and rely on the implict * set-to-gtt-domain within the fault handler. */ if (write) { ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ | PROT_WRITE); ptr[rand() % (size / 4)] = canary; } else { ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ); } x = ptr[rand() % (size / 4)]; munmap(ptr, size); igt_assert_eq_u32(x, canary); }
static void fill_obj_props(int fd, uint32_t id, int type, int num_props, const char **prop_names, uint32_t *prop_ids) { drmModeObjectPropertiesPtr props; int i, j; props = drmModeObjectGetProperties(fd, id, type); igt_assert(props); for (i = 0; i < props->count_props; i++) { drmModePropertyPtr prop = drmModeGetProperty(fd, props->props[i]); for (j = 0; j < num_props; j++) { if (strcmp(prop->name, prop_names[j]) != 0) continue; prop_ids[j] = props->props[i]; break; } drmModeFreeProperty(prop); } drmModeFreeObjectProperties(props); }
static void connector_find_preferred_mode(uint32_t connector_id, unsigned long crtc_idx_mask, int mode_num, struct connector *c) { struct kmstest_connector_config config; if (!kmstest_get_connector_config(drm_fd, connector_id, crtc_idx_mask, &config)) { c->mode_valid = 0; return; } c->connector = config.connector; c->encoder = config.encoder; c->crtc = config.crtc->crtc_id; c->crtc_idx = config.crtc_idx; c->pipe = config.pipe; if (mode_num != -1) { igt_assert(mode_num < config.connector->count_modes); c->mode = config.connector->modes[mode_num]; } else { c->mode = config.default_mode; } c->mode_valid = 1; }
static void unset_mode(void) { int rc; kmstest_unset_all_crtcs(drm.fd, drm.res); rc = drmModeSetCursor(drm.fd, modeset.crtc_id, 0, 0, 0); igt_assert(rc == 0); }
static void trigger_reset(int fd) { igt_assert(i915_wedged_set()); /* And just check the gpu is indeed running again */ igt_debug("Checking that the GPU recovered\n"); gem_quiescent_gpu(fd); }
/** * igt_drop_caches_set: * @val: bitmask for DROP_* values * * This calls the debugfs interface the drm/i915 GEM driver exposes to drop or * evict certain classes of gem buffer objects. */ void igt_drop_caches_set(uint64_t val) { int fd; char data[19]; size_t nbytes; sprintf(data, "0x%" PRIx64, val); fd = igt_debugfs_open("i915_gem_drop_caches", O_WRONLY); igt_assert(fd >= 0); do { nbytes = write(fd, data, strlen(data) + 1); } while (nbytes == -1 && (errno == EINTR || errno == EAGAIN)); igt_assert(nbytes == strlen(data) + 1); close(fd); }
static void clear(int fd, uint32_t handle, int size) { void *base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); igt_assert(base != NULL); memset(base, 0, size); munmap(base, size); }
static void clear(int fd, uint32_t handle, int size) { void *ptr = get_handle_ptr(handle); igt_assert(ptr != NULL); memset(ptr, 0, size); }
/* export handle from intel driver - reimport to intel driver see if you get same object */ static void test_i915_self_import(void) { drm_intel_bo *test_intel_bo, *test_intel_bo2; int prime_fd; test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096); drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd); test_intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE); close(prime_fd); igt_assert(test_intel_bo2); igt_assert(test_intel_bo->handle == test_intel_bo2->handle); drm_intel_bo_unreference(test_intel_bo); }
static void setup_drm(void) { int i; drm.fd = drm_open_driver_master(DRIVER_INTEL); drm.res = drmModeGetResources(drm.fd); igt_assert(drm.res->count_connectors <= MAX_CONNECTORS); for (i = 0; i < drm.res->count_connectors; i++) drm.connectors[i] = drmModeGetConnector(drm.fd, drm.res->connectors[i]); drm.bufmgr = drm_intel_bufmgr_gem_init(drm.fd, 4096); igt_assert(drm.bufmgr); drm_intel_bufmgr_gem_enable_reuse(drm.bufmgr); }
static void test_access(int fd) { uint32_t handle, flink, handle2; struct drm_i915_gem_mmap_gtt mmap_arg; int fd2; handle = gem_create(fd, OBJECT_SIZE); igt_assert(handle); fd2 = drm_open_driver(DRIVER_INTEL); /* Check that fd1 can mmap. */ mmap_arg.handle = handle; do_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_arg.offset)); /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset) == MAP_FAILED); igt_assert(errno == EACCES); flink = gem_flink(fd, handle); igt_assert(flink); handle2 = gem_open(fd2, flink); igt_assert(handle2); /* Recheck that it works after flink. */ /* Check that the same offset on the other fd doesn't work. */ igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, mmap_arg.offset)); }
static void test_nv_i915_reimport_twice_check_flink_name(void) { drm_intel_bo *intel_bo = NULL, *intel_bo2 = NULL; int prime_fd; struct nouveau_bo *nvbo = NULL; uint32_t flink_name1, flink_name2; igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, BO_SIZE, NULL, &nvbo) == 0); igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0); intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE); igt_assert(intel_bo); close(prime_fd); igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0); intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE); igt_assert(intel_bo2); close(prime_fd); igt_assert(drm_intel_bo_flink(intel_bo, &flink_name1) == 0); igt_assert(drm_intel_bo_flink(intel_bo2, &flink_name2) == 0); igt_assert_eq_u32(flink_name1, flink_name2); nouveau_bo_ref(NULL, &nvbo); drm_intel_bo_unreference(intel_bo); drm_intel_bo_unreference(intel_bo2); }
static void test_i915_nv_import_twice(void) { drm_intel_bo *test_intel_bo; int prime_fd; struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL; test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096); igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0); igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0); igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0); close(prime_fd); nouveau_bo_ref(NULL, &nvbo2); nouveau_bo_ref(NULL, &nvbo); drm_intel_bo_unreference(test_intel_bo); }
/** * igt_create_color_fb: * @fd: open i915 drm file descriptor * @width: width of the framebuffer in pixel * @height: height of the framebuffer in pixel * @format: drm fourcc pixel format code * @tiling: tiling layout of the framebuffer * @r: red value to use as fill color * @g: gree value to use as fill color * @b: blue value to use as fill color * @fb: pointer to an #igt_fb structure * * This function allocates a gem buffer object suitable to back a framebuffer * with the requested properties and then wraps it up in a drm framebuffer * object. All metadata is stored in @fb. * * Compared to igt_create_fb() this function also fills the entire framebuffer * with the given color, which is useful for some simple pipe crc based tests. * * Returns: * The kms id of the created framebuffer on success or a negative error code on * failure. */ unsigned int igt_create_color_fb(int fd, int width, int height, uint32_t format, uint64_t tiling, double r, double g, double b, struct igt_fb *fb /* out */) { unsigned int fb_id; cairo_t *cr; fb_id = igt_create_fb(fd, width, height, format, tiling, fb); igt_assert(fb_id); cr = igt_get_cairo_ctx(fd, fb); igt_paint_color(cr, 0, 0, width, height, r, g, b); igt_assert(cairo_status(cr) == 0); cairo_destroy(cr); return fb_id; }
static void test_short_buffer(int in, int nonblock) { char buffer[1024]; /* events are typically 32 bytes */ int fd = setup(in, nonblock); generate_event(fd); generate_event(fd); wait_for_event(fd); alarm(3); igt_assert_eq(read(fd, buffer, 4), 0); igt_assert(read(fd, buffer, 40) > 0); igt_assert(read(fd, buffer, 40) > 0); teardown(fd); }
static void invalid_flag_test(int fd) { int ret; gem_require_stolen_support(fd); create.handle = 0; create.size = PAGE_SIZE; create.flags = ~I915_CREATE_PLACEMENT_STOLEN; ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create); igt_assert(ret <= 0); create.flags = ~0; ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create); igt_assert(ret <= 0); }
static void set_mode(void) { int rc; front_fb = &fbs[0]; back_fb = &fbs[1]; rc = drmModeSetCrtc(drm.fd, modeset.crtc_id, front_fb->fb_id, 0, 0, &modeset.connector_id, 1, modeset.mode); igt_assert(rc == 0); /* TODO: it seems we need a cursor in order to reach PC7 on BDW. Why? */ rc = drmModeMoveCursor(drm.fd, modeset.crtc_id, 0, 0); igt_assert(rc == 0); rc = drmModeSetCursor(drm.fd, modeset.crtc_id, cursor.gem_handle, cursor.width, cursor.height); igt_assert(rc == 0); }
static void *thread(void *bufmgr) { struct intel_batchbuffer *batch; dri_bo **bo; drm_intel_context **ctx; int c, b; batch = intel_batchbuffer_alloc(bufmgr, devid); bo = malloc(num_bo * sizeof(dri_bo *)); igt_assert(bo); memcpy(bo, all_bo, num_bo * sizeof(dri_bo *)); ctx = malloc(num_ctx * sizeof(drm_intel_context *)); igt_assert(ctx); memcpy(ctx, all_ctx, num_ctx * sizeof(drm_intel_context *)); igt_permute_array(ctx, num_ctx, xchg_ptr); for (c = 0; c < ctx_per_thread; c++) { igt_permute_array(bo, num_bo, xchg_ptr); for (b = 0; b < bo_per_ctx; b++) { struct igt_buf src, dst; src.bo = bo[b % num_bo]; src.stride = 64; src.size = OBJECT_SIZE; src.tiling = I915_TILING_NONE; dst.bo = bo[(b+1) % num_bo]; dst.stride = 64; dst.size = OBJECT_SIZE; dst.tiling = I915_TILING_NONE; render_copy(batch, ctx[c % num_ctx], &src, 0, 0, 16, 16, &dst, 0, 0); } } free(ctx); free(bo); intel_batchbuffer_free(batch); return NULL; }
static void plane_primary(struct kms_atomic_crtc_state *crtc, struct kms_atomic_plane_state *plane_old) { struct drm_mode_modeinfo *mode = crtc->mode.data; struct kms_atomic_plane_state plane = *plane_old; uint32_t format = plane_get_igt_format(&plane); drmModeAtomicReq *req = drmModeAtomicAlloc(); uint32_t *connectors; int num_connectors; struct igt_fb fb; int i; connectors = calloc(crtc->state->num_connectors, sizeof(*connectors)); igt_assert(connectors); for (i = 0; i < crtc->state->num_connectors; i++) { if (crtc->state->connectors[i].crtc_id == crtc->obj) connectors[num_connectors++] = crtc->state->connectors[i].obj; } igt_require(format != 0); plane.src_x = 0; plane.src_y = 0; plane.src_w = mode->hdisplay << 16; plane.src_h = mode->vdisplay << 16; plane.crtc_x = 0; plane.crtc_y = 0; plane.crtc_w = mode->hdisplay; plane.crtc_h = mode->vdisplay; plane.crtc_id = crtc->obj; plane.fb_id = igt_create_pattern_fb(plane.state->desc->fd, plane.crtc_w, plane.crtc_h, format, I915_TILING_NONE, &fb); /* Flip the primary plane using the atomic API, and double-check * state is what we think it should be. */ crtc_commit_atomic(crtc, &plane, req, ATOMIC_RELAX_NONE); /* Restore the primary plane and check the state matches the old. */ crtc_commit_atomic(crtc, plane_old, req, ATOMIC_RELAX_NONE); /* Re-enable the plane through the legacy CRTC/primary-plane API, and * verify through atomic. */ crtc_commit_legacy(crtc, &plane, CRTC_RELAX_MODE); /* Restore the plane to its original settings through the legacy CRTC * API, and verify through atomic. */ crtc_commit_legacy(crtc, plane_old, CRTC_RELAX_MODE); /* Finally, restore to the original state. */ crtc_commit_atomic(crtc, plane_old, req, ATOMIC_RELAX_NONE); drmModeAtomicFree(req); }
static int reopen(int _fd) { struct stat st; char name[128]; igt_assert(fstat(_fd, &st) == 0); sprintf(name, "/dev/dri/card%u", (unsigned)(st.st_rdev & 0x7f)); return open(name, O_RDWR); }
static void dontneed_before_pwrite(void) { int fd = drm_open_driver(DRIVER_INTEL); uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; struct drm_i915_gem_pwrite gem_pwrite; gem_pwrite.handle = gem_create(fd, OBJECT_SIZE); gem_pwrite.offset = 0; gem_pwrite.size = sizeof(buf); gem_pwrite.data_ptr = (uintptr_t)buf; gem_madvise(fd, gem_pwrite.handle, I915_MADV_DONTNEED); igt_assert(drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite)); igt_assert(errno == EFAULT); gem_close(fd, gem_pwrite.handle); close(fd); }
static void * mmap_bo(int fd, uint32_t handle) { void *ptr; ptr = gem_mmap(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); igt_assert(ptr != MAP_FAILED); return ptr; }
static uint64_t msr_read(uint32_t addr) { int rc; uint64_t ret; rc = pread(msr_fd, &ret, sizeof(uint64_t), addr); igt_assert(rc == sizeof(ret)); return ret; }