static void crtc_check_current_state(struct kms_atomic_crtc_state *crtc, struct kms_atomic_plane_state *primary, enum kms_atomic_check_relax relax) { struct kms_atomic_crtc_state crtc_kernel; drmModeCrtcPtr legacy; legacy = drmModeGetCrtc(crtc->state->desc->fd, crtc->obj); igt_assert(legacy); igt_assert_eq_u32(legacy->crtc_id, crtc->obj); igt_assert_eq_u32(legacy->x, primary->src_x >> 16); igt_assert_eq_u32(legacy->y, primary->src_y >> 16); if (crtc->active) igt_assert_eq_u32(legacy->buffer_id, primary->fb_id); else igt_assert_eq_u32(legacy->buffer_id, 0); if (legacy->mode_valid) { igt_assert_neq(legacy->mode_valid, 0); igt_assert_eq(crtc->mode.len, sizeof(struct drm_mode_modeinfo)); do_or_die(memcmp(&legacy->mode, crtc->mode.data, crtc->mode.len)); igt_assert_eq(legacy->width, legacy->mode.hdisplay); igt_assert_eq(legacy->height, legacy->mode.vdisplay); } else { igt_assert_eq(legacy->mode_valid, 0); } memcpy(&crtc_kernel, crtc, sizeof(crtc_kernel)); crtc_get_current_state(&crtc_kernel); if (crtc_kernel.mode.id != 0) igt_assert_eq(crtc_kernel.mode.len, sizeof(struct drm_mode_modeinfo)); /* Optionally relax the check for MODE_ID: using the legacy SetCrtc * API can potentially change MODE_ID even if the mode itself remains * unchanged. */ if (((relax & CRTC_RELAX_MODE) && (crtc_kernel.mode.id != crtc->mode.id && crtc_kernel.mode.id != 0 && crtc->mode.id != 0)) && memcmp(crtc_kernel.mode.data, crtc->mode.data, sizeof(struct drm_mode_modeinfo)) == 0) { crtc_kernel.mode.id = crtc->mode.id; crtc_kernel.mode.data = crtc->mode.data; } do_or_die(memcmp(&crtc_kernel, crtc, sizeof(crtc_kernel))); drmModeFreeCrtc(legacy); }
/** * igt_create_fb_with_bo_size: * @fd: open i915 drm file descriptor * @width: width of the framebuffer in pixel * @height: height of the framebuffer in pixel * @format: drm fourcc pixel format code * @tiling: tiling layout of the framebuffer (as framebuffer modifier) * @fb: pointer to an #igt_fb structure * @bo_size: size of the backing bo (0 for minimum needed size) * * This function allocates a gem buffer object suitable to back a framebuffer * with the requested properties and then wraps it up in a drm framebuffer * object of the requested size. All metadata is stored in @fb. * * The backing storage of the framebuffer is filled with all zeros, i.e. black * for rgb pixel formats. * * Returns: * The kms id of the created framebuffer. */ unsigned int igt_create_fb_with_bo_size(int fd, int width, int height, uint32_t format, uint64_t tiling, struct igt_fb *fb, unsigned bo_size) { uint32_t fb_id; int bpp; memset(fb, 0, sizeof(*fb)); bpp = igt_drm_format_to_bpp(format); igt_debug("%s(width=%d, height=%d, format=0x%x [bpp=%d], tiling=0x%"PRIx64", size=%d)\n", __func__, width, height, format, bpp, tiling, bo_size); do_or_die(create_bo_for_fb(fd, width, height, bpp, tiling, bo_size, &fb->gem_handle, &fb->size, &fb->stride)); igt_debug("%s(handle=%d, pitch=%d)\n", __func__, fb->gem_handle, fb->stride); if (tiling != LOCAL_DRM_FORMAT_MOD_NONE && tiling != LOCAL_I915_FORMAT_MOD_X_TILED) { do_or_die(__kms_addfb(fd, fb->gem_handle, width, height, fb->stride, format, tiling, LOCAL_DRM_MODE_FB_MODIFIERS, &fb_id)); } else { uint32_t handles[4]; uint32_t pitches[4]; uint32_t offsets[4]; memset(handles, 0, sizeof(handles)); memset(pitches, 0, sizeof(pitches)); memset(offsets, 0, sizeof(offsets)); handles[0] = fb->gem_handle; pitches[0] = fb->stride; do_or_die(drmModeAddFB2(fd, width, height, format, handles, pitches, offsets, &fb_id, 0)); } fb->width = width; fb->height = height; fb->tiling = tiling; fb->drm_format = format; fb->fb_id = fb_id; return fb_id; }
static void crtc_invalid_params(struct kms_atomic_crtc_state *crtc_old, struct kms_atomic_plane_state *plane, struct kms_atomic_connector_state *conn) { struct kms_atomic_crtc_state crtc = *crtc_old; drmModeAtomicReq *req = drmModeAtomicAlloc(); igt_assert(req); /* Pass a series of invalid object IDs for the mode ID. */ crtc.mode.id = plane->obj; crtc_commit_atomic_err(&crtc, plane, crtc_old, plane, req, ATOMIC_RELAX_NONE, EINVAL); crtc.mode.id = crtc.obj; crtc_commit_atomic_err(&crtc, plane, crtc_old, plane, req, ATOMIC_RELAX_NONE, EINVAL); crtc.mode.id = conn->obj; crtc_commit_atomic_err(&crtc, plane, crtc_old, plane, req, ATOMIC_RELAX_NONE, EINVAL); crtc.mode.id = plane->fb_id; crtc_commit_atomic_err(&crtc, plane, crtc_old, plane, req, ATOMIC_RELAX_NONE, EINVAL); crtc.mode.id = crtc_old->mode.id; crtc_commit_atomic(&crtc, plane, req, ATOMIC_RELAX_NONE); /* Create a blob which is the wrong size to be a valid mode. */ do_or_die(drmModeCreatePropertyBlob(crtc.state->desc->fd, crtc.mode.data, sizeof(struct drm_mode_modeinfo) - 1, &crtc.mode.id)); crtc_commit_atomic_err(&crtc, plane, crtc_old, plane, req, ATOMIC_RELAX_NONE, EINVAL); do_or_die(drmModeCreatePropertyBlob(crtc.state->desc->fd, crtc.mode.data, sizeof(struct drm_mode_modeinfo) + 1, &crtc.mode.id)); crtc_commit_atomic_err(&crtc, plane, crtc_old, plane, req, ATOMIC_RELAX_NONE, EINVAL); /* Restore the CRTC and check the state matches the old. */ crtc_commit_atomic(crtc_old, plane, req, ATOMIC_RELAX_NONE); drmModeAtomicFree(req); }
static void plane_check_current_state(struct kms_atomic_plane_state *plane, enum kms_atomic_check_relax relax) { drmModePlanePtr legacy; struct kms_atomic_plane_state plane_kernel; legacy = drmModeGetPlane(plane->state->desc->fd, plane->obj); igt_assert(legacy); igt_assert_eq_u32(legacy->crtc_id, plane->crtc_id); if (!(relax & PLANE_RELAX_FB)) igt_assert_eq_u32(legacy->fb_id, plane->fb_id); memcpy(&plane_kernel, plane, sizeof(plane_kernel)); plane_get_current_state(&plane_kernel); /* Legacy cursor ioctls create their own, unknowable, internal * framebuffer which we can't reason about. */ if (relax & PLANE_RELAX_FB) plane_kernel.fb_id = plane->fb_id; do_or_die(memcmp(&plane_kernel, plane, sizeof(plane_kernel))); drmModeFreePlane(legacy); }
/* XXX: Checking this repeatedly actually hangs the GPU. I have literally no * idea why. */ static void connector_check_current_state(struct kms_atomic_connector_state *connector) { struct kms_atomic_connector_state connector_kernel; drmModeConnectorPtr legacy; uint32_t crtc_id; legacy = drmModeGetConnectorCurrent(connector->state->desc->fd, connector->obj); igt_assert(legacy); if (legacy->encoder_id) { drmModeEncoderPtr legacy_enc; legacy_enc = drmModeGetEncoder(connector->state->desc->fd, legacy->encoder_id); igt_assert(legacy_enc); crtc_id = legacy_enc->crtc_id; drmModeFreeEncoder(legacy_enc); } else { crtc_id = 0; } igt_assert_eq_u32(crtc_id, connector->crtc_id); memcpy(&connector_kernel, connector, sizeof(connector_kernel)); connector_get_current_state(&connector_kernel); do_or_die(memcmp(&connector_kernel, connector, sizeof(connector_kernel))); drmModeFreeConnector(legacy); }
void intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring) { unsigned int used = flush_on_ring_common(batch, ring); if (used == 0) return; do_or_die(drm_intel_bo_subdata(batch->bo, 0, used, batch->buffer)); batch->ptr = NULL; do_or_die(drm_intel_bo_mrb_exec(batch->bo, used, NULL, 0, 0, ring)); intel_batchbuffer_reset(batch); }
static void crtc_commit_legacy(struct kms_atomic_crtc_state *crtc, struct kms_atomic_plane_state *plane, enum kms_atomic_check_relax relax) { drmModeObjectPropertiesPtr props; uint32_t *connectors; int num_connectors = 0; int i; if (!crtc->active) { do_or_die(drmModeSetCrtc(crtc->state->desc->fd, crtc->obj, 0, 0, 0, NULL, 0, NULL)); return; } connectors = calloc(crtc->state->num_connectors, sizeof(*connectors)); igt_assert(connectors); igt_assert_neq_u32(crtc->mode.id, 0); for (i = 0; i < crtc->state->num_connectors; i++) { struct kms_atomic_connector_state *connector = &crtc->state->connectors[i]; if (connector->crtc_id != crtc->obj) continue; connectors[num_connectors++] = connector->obj; } do_or_die(drmModeSetCrtc(crtc->state->desc->fd, crtc->obj, plane->fb_id, plane->src_x >> 16, plane->src_y >> 16, (num_connectors) ? connectors : NULL, num_connectors, crtc->mode.data)); /* When doing a legacy commit, the core may update MODE_ID to be a new * blob implicitly created by the legacy request. Hence we backfill * the value in the state object to ensure they match. */ props = drmModeObjectGetProperties(crtc->state->desc->fd, crtc->obj, DRM_MODE_OBJECT_CRTC); igt_assert(props); for (i = 0; i < props->count_props; i++) { if (props->props[i] != crtc->state->desc->props_crtc[CRTC_MODE_ID]) continue; crtc->mode.id = props->prop_values[i]; break; } drmModeFreeObjectProperties(props); crtc_check_current_state(crtc, plane, relax); plane_check_current_state(plane, relax); }
static uint32_t blob_duplicate(int fd, uint32_t id_orig) { drmModePropertyBlobPtr orig = drmModeGetPropertyBlob(fd, id_orig); uint32_t id_new; igt_assert(orig); do_or_die(drmModeCreatePropertyBlob(fd, orig->data, orig->length, &id_new)); drmModeFreePropertyBlob(orig); return id_new; }
static void plane_commit_legacy(struct kms_atomic_plane_state *plane, enum kms_atomic_check_relax relax) { do_or_die(drmModeSetPlane(plane->state->desc->fd, plane->obj, plane->crtc_id, plane->fb_id, 0, plane->crtc_x, plane->crtc_y, plane->crtc_w, plane->crtc_h, plane->src_x, plane->src_y, plane->src_w, plane->src_h)); plane_check_current_state(plane, relax); }
static void get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle) { struct drm_i915_gem_get_tiling2 { uint32_t handle; uint32_t tiling_mode; uint32_t swizzle_mode; uint32_t phys_swizzle_mode; } arg; #define DRM_IOCTL_I915_GEM_GET_TILING2 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2) memset(&arg, 0, sizeof(arg)); arg.handle = handle; do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg)); igt_require(arg.phys_swizzle_mode == arg.swizzle_mode); *tiling = arg.tiling_mode; *swizzle = arg.swizzle_mode; }
int main(int argc, char **argv) { uint32_t batch[2] = {MI_BATCH_BUFFER_END}; uint32_t handle; int fd, i; fd = drm_open_any(); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); do_or_die(exec(fd, handle, NORMAL)); fail(exec(fd, handle, BROKEN)); if (exec(fd, handle, USE_LUT)) return 77; do_or_die(exec(fd, handle, USE_LUT)); fail(exec(fd, handle, USE_LUT | BROKEN)); for (i = 2; i <= SLOW_QUICK(65536, 8); i *= 2) { if (many_exec(fd, handle, i+1, i+1, NORMAL) == -1 && errno == ENOSPC) break; pass(many_exec(fd, handle, i-1, i-1, NORMAL)); pass(many_exec(fd, handle, i-1, i, NORMAL)); pass(many_exec(fd, handle, i-1, i+1, NORMAL)); pass(many_exec(fd, handle, i, i-1, NORMAL)); pass(many_exec(fd, handle, i, i, NORMAL)); pass(many_exec(fd, handle, i, i+1, NORMAL)); pass(many_exec(fd, handle, i+1, i-1, NORMAL)); pass(many_exec(fd, handle, i+1, i, NORMAL)); pass(many_exec(fd, handle, i+1, i+1, NORMAL)); fail(many_exec(fd, handle, i-1, i-1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i-1, i, NORMAL | BROKEN)); fail(many_exec(fd, handle, i-1, i+1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i, i-1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i, i, NORMAL | BROKEN)); fail(many_exec(fd, handle, i, i+1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i+1, i-1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i+1, i, NORMAL | BROKEN)); fail(many_exec(fd, handle, i+1, i+1, NORMAL | BROKEN)); pass(many_exec(fd, handle, i-1, i-1, USE_LUT)); pass(many_exec(fd, handle, i-1, i, USE_LUT)); pass(many_exec(fd, handle, i-1, i+1, USE_LUT)); pass(many_exec(fd, handle, i, i-1, USE_LUT)); pass(many_exec(fd, handle, i, i, USE_LUT)); pass(many_exec(fd, handle, i, i+1, USE_LUT)); pass(many_exec(fd, handle, i+1, i-1, USE_LUT)); pass(many_exec(fd, handle, i+1, i, USE_LUT)); pass(many_exec(fd, handle, i+1, i+1, USE_LUT)); fail(many_exec(fd, handle, i-1, i-1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i-1, i, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i-1, i+1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i, i-1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i, i, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i, i+1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i+1, i-1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i+1, i, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i+1, i+1, USE_LUT | BROKEN)); } return 0; }
static void assert_empty(int fd) { struct pollfd pfd = {fd, POLLIN}; do_or_die(poll(&pfd, 1, 0)); }
static int test_format(const char *test_name, struct kmstest_connector_config *cconf, drmModeModeInfo *mode, uint32_t format, enum test_flags flags) { int width; int height; struct igt_fb fb[2]; char *mode_format_str; char *cconf_str; int ret; ret = asprintf(&mode_format_str, "%s @ %dHz / %s", mode->name, mode->vrefresh, igt_format_str(format)); igt_assert_lt(0, ret); ret = asprintf(&cconf_str, "pipe %s, encoder %s, connector %s", kmstest_pipe_name(cconf->pipe), kmstest_encoder_type_str(cconf->encoder->encoder_type), kmstest_connector_type_str(cconf->connector->connector_type)); igt_assert_lt(0, ret); igt_info("Beginning test %s with %s on %s\n", test_name, mode_format_str, cconf_str); width = mode->hdisplay; height = mode->vdisplay; if (!igt_create_fb(drm_fd, width, height, format, LOCAL_DRM_FORMAT_MOD_NONE, &fb[0])) goto err1; if (!igt_create_fb(drm_fd, width, height, format, LOCAL_DRM_FORMAT_MOD_NONE, &fb[1])) goto err2; if (drmModeSetCrtc(drm_fd, cconf->crtc->crtc_id, fb[0].fb_id, 0, 0, &cconf->connector->connector_id, 1, mode)) goto err2; do_or_die(drmModePageFlip(drm_fd, cconf->crtc->crtc_id, fb[0].fb_id, 0, NULL)); sleep(2); if (flags & TEST_DIRECT_RENDER) { paint_fb(&fb[0], test_name, mode_format_str, cconf_str); } else if (flags & TEST_GPU_BLIT) { paint_fb(&fb[1], test_name, mode_format_str, cconf_str); gpu_blit(&fb[0], &fb[1]); } sleep(5); igt_info("Test %s with %s on %s: PASSED\n", test_name, mode_format_str, cconf_str); free(mode_format_str); free(cconf_str); igt_remove_fb(drm_fd, &fb[1]); igt_remove_fb(drm_fd, &fb[0]); return 0; err2: igt_remove_fb(drm_fd, &fb[0]); err1: igt_info("Test %s with %s on %s: SKIPPED\n", test_name, mode_format_str, cconf_str); free(mode_format_str); free(cconf_str); return -1; }
/** * igt_remove_fb: * @fd: open i915 drm file descriptor * @fb: pointer to an #igt_fb structure * * This function releases all resources allocated in igt_create_fb() for @fb. * Note that if this framebuffer is still in use on a primary plane the kernel * will disable the corresponding crtc. */ void igt_remove_fb(int fd, struct igt_fb *fb) { cairo_surface_destroy(fb->cairo_surface); do_or_die(drmModeRmFB(fd, fb->fb_id)); gem_close(fd, fb->gem_handle); }
/* Simulates SNA behaviour using negative self-relocations for * STATE_BASE_ADDRESS command packets. If they wrap around (to values greater * than the total size of the GTT), the GPU will hang. * See https://bugs.freedesktop.org/show_bug.cgi?id=78533 */ static int negative_reloc(int fd, unsigned flags) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 gem_exec[2]; struct drm_i915_gem_relocation_entry gem_reloc[1000]; uint64_t gtt_max = get_page_table_size(fd); uint32_t buf[1024] = {MI_BATCH_BUFFER_END}; int i; #define BIAS (256*1024) igt_require(intel_gen(intel_get_drm_devid(fd)) >= 7); memset(gem_exec, 0, sizeof(gem_exec)); gem_exec[0].handle = gem_create(fd, 4096); gem_write(fd, gem_exec[0].handle, 0, buf, 8); gem_reloc[0].offset = 1024; gem_reloc[0].delta = 0; gem_reloc[0].target_handle = gem_exec[0].handle; gem_reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND; gem_exec[1].handle = gem_create(fd, 4096); gem_write(fd, gem_exec[1].handle, 0, buf, 8); gem_exec[1].relocation_count = 1; gem_exec[1].relocs_ptr = (uintptr_t)gem_reloc; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)gem_exec; execbuf.buffer_count = 2; execbuf.batch_len = 8; do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf)); gem_close(fd, gem_exec[1].handle); igt_info("Found offset %lld for 4k batch\n", (long long)gem_exec[0].offset); /* * Ideally we'd like to be able to control where the kernel is going to * place the buffer. We don't SKIP here because it causes the test * to "randomly" flip-flop between the SKIP and PASS states. */ if (gem_exec[0].offset < BIAS) { igt_info("Offset is below BIAS, not testing anything\n"); return 0; } memset(gem_reloc, 0, sizeof(gem_reloc)); for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++) { gem_reloc[i].offset = 8 + 4*i; gem_reloc[i].delta = -BIAS*i/1024; gem_reloc[i].target_handle = flags & USE_LUT ? 0 : gem_exec[0].handle; gem_reloc[i].read_domains = I915_GEM_DOMAIN_COMMAND; } gem_exec[0].relocation_count = sizeof(gem_reloc)/sizeof(gem_reloc[0]); gem_exec[0].relocs_ptr = (uintptr_t)gem_reloc; execbuf.buffer_count = 1; execbuf.flags = flags & USE_LUT; do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf)); igt_info("Batch is now at offset %lld\n", (long long)gem_exec[0].offset); gem_read(fd, gem_exec[0].handle, 0, buf, sizeof(buf)); gem_close(fd, gem_exec[0].handle); for (i = 0; i < sizeof(gem_reloc)/sizeof(gem_reloc[0]); i++) igt_assert(buf[2 + i] < gtt_max); return 0; }
static void plane_cursor(struct kms_atomic_crtc_state *crtc, struct kms_atomic_plane_state *plane_old) { struct drm_mode_modeinfo *mode = crtc->mode.data; struct kms_atomic_plane_state plane = *plane_old; drmModeAtomicReq *req = drmModeAtomicAlloc(); struct igt_fb fb; uint64_t width, height; igt_assert(req); /* Any kernel new enough for atomic, also has the cursor size caps. */ do_or_die(drmGetCap(plane.state->desc->fd, DRM_CAP_CURSOR_WIDTH, &width)); do_or_die(drmGetCap(plane.state->desc->fd, DRM_CAP_CURSOR_HEIGHT, &height)); plane.src_x = 0; plane.src_y = 0; plane.src_w = width << 16; plane.src_h = height << 16; plane.crtc_x = mode->hdisplay / 2; plane.crtc_y = mode->vdisplay / 2; plane.crtc_w = width; plane.crtc_h = height; plane.crtc_id = crtc->obj; plane.fb_id = igt_create_color_fb(plane.state->desc->fd, width, height, DRM_FORMAT_ARGB8888, LOCAL_DRM_FORMAT_MOD_NONE, 0.0, 0.0, 0.0, &fb); igt_assert_neq_u32(plane.fb_id, 0); /* Flip the cursor plane using the atomic API, and double-check * state is what we think it should be. */ plane_commit_atomic(&plane, req, ATOMIC_RELAX_NONE); /* Restore the cursor plane and check the state matches the old. */ plane_commit_atomic(plane_old, req, ATOMIC_RELAX_NONE); /* Re-enable the plane through the legacy cursor API, and verify * through atomic. */ do_or_die(drmModeMoveCursor(plane.state->desc->fd, plane.crtc_id, plane.crtc_x, plane.crtc_y)); do_or_die(drmModeSetCursor(plane.state->desc->fd, plane.crtc_id, fb.gem_handle, width, height)); plane_check_current_state(&plane, PLANE_RELAX_FB); /* Wiggle. */ plane.crtc_x -= 16; plane.crtc_y -= 16; do_or_die(drmModeMoveCursor(plane.state->desc->fd, plane.crtc_id, plane.crtc_x, plane.crtc_y)); plane_check_current_state(&plane, PLANE_RELAX_FB); /* Restore the plane to its original settings through the legacy cursor * API, and verify through atomic. */ do_or_die(drmModeSetCursor2(plane.state->desc->fd, plane.crtc_id, 0, 0, 0, 0, 0)); plane_check_current_state(plane_old, ATOMIC_RELAX_NONE); /* Finally, restore to the original state. */ plane_commit_atomic(plane_old, req, ATOMIC_RELAX_NONE); drmModeAtomicFree(req); }
static void atomic_setup(struct kms_atomic_state *state) { struct kms_atomic_desc *desc = state->desc; drmModeResPtr res; drmModePlaneResPtr res_plane; int i; desc->fd = drm_open_driver_master(DRIVER_INTEL); igt_assert_fd(desc->fd); do_or_die(drmSetClientCap(desc->fd, DRM_CLIENT_CAP_ATOMIC, 1)); res = drmModeGetResources(desc->fd); res_plane = drmModeGetPlaneResources(desc->fd); igt_assert(res); igt_assert(res_plane); igt_assert_lt(0, res->count_crtcs); state->num_crtcs = res->count_crtcs; state->crtcs = calloc(state->num_crtcs, sizeof(*state->crtcs)); igt_assert(state->crtcs); igt_assert_lt(0, res_plane->count_planes); state->num_planes = res_plane->count_planes; state->planes = calloc(state->num_planes, sizeof(*state->planes)); igt_assert(state->planes); igt_assert_lt(0, res->count_connectors); state->num_connectors = res->count_connectors; state->connectors = calloc(state->num_connectors, sizeof(*state->connectors)); igt_assert(state->connectors); fill_obj_props(desc->fd, res->crtcs[0], DRM_MODE_OBJECT_CRTC, NUM_CRTC_PROPS, crtc_prop_names, desc->props_crtc); fill_obj_props(desc->fd, res_plane->planes[0], DRM_MODE_OBJECT_PLANE, NUM_PLANE_PROPS, plane_prop_names, desc->props_plane); fill_obj_prop_map(desc->fd, res_plane->planes[0], DRM_MODE_OBJECT_PLANE, "type", NUM_PLANE_TYPE_PROPS, plane_type_prop_names, desc->props_plane_type); fill_obj_props(desc->fd, res->connectors[0], DRM_MODE_OBJECT_CONNECTOR, NUM_CONNECTOR_PROPS, connector_prop_names, desc->props_connector); for (i = 0; i < state->num_crtcs; i++) { struct kms_atomic_crtc_state *crtc = &state->crtcs[i]; crtc->state = state; crtc->obj = res->crtcs[i]; crtc->idx = i; crtc_get_current_state(crtc); /* The blob pointed to by MODE_ID could well be transient, * and lose its last reference as we switch away from it. * Duplicate the blob here so we have a reference we know we * own. */ if (crtc->mode.id != 0) crtc->mode.id = blob_duplicate(desc->fd, crtc->mode.id); } for (i = 0; i < state->num_planes; i++) { drmModePlanePtr plane = drmModeGetPlane(desc->fd, res_plane->planes[i]); igt_assert(plane); state->planes[i].state = state; state->planes[i].obj = res_plane->planes[i]; state->planes[i].crtc_mask = plane->possible_crtcs; plane_get_current_state(&state->planes[i]); } for (i = 0; i < state->num_connectors; i++) { state->connectors[i].state = state; state->connectors[i].obj = res->connectors[i]; connector_get_current_state(&state->connectors[i]); } drmModeFreePlaneResources(res_plane); drmModeFreeResources(res); }