static void plane_check_current_state(struct kms_atomic_plane_state *plane, enum kms_atomic_check_relax relax) { drmModePlanePtr legacy; struct kms_atomic_plane_state plane_kernel; legacy = drmModeGetPlane(plane->state->desc->fd, plane->obj); igt_assert(legacy); igt_assert_eq_u32(legacy->crtc_id, plane->crtc_id); if (!(relax & PLANE_RELAX_FB)) igt_assert_eq_u32(legacy->fb_id, plane->fb_id); memcpy(&plane_kernel, plane, sizeof(plane_kernel)); plane_get_current_state(&plane_kernel); /* Legacy cursor ioctls create their own, unknowable, internal * framebuffer which we can't reason about. */ if (relax & PLANE_RELAX_FB) plane_kernel.fb_id = plane->fb_id; do_or_die(memcmp(&plane_kernel, plane, sizeof(plane_kernel))); drmModeFreePlane(legacy); }
static void crtc_check_current_state(struct kms_atomic_crtc_state *crtc, struct kms_atomic_plane_state *primary, enum kms_atomic_check_relax relax) { struct kms_atomic_crtc_state crtc_kernel; drmModeCrtcPtr legacy; legacy = drmModeGetCrtc(crtc->state->desc->fd, crtc->obj); igt_assert(legacy); igt_assert_eq_u32(legacy->crtc_id, crtc->obj); igt_assert_eq_u32(legacy->x, primary->src_x >> 16); igt_assert_eq_u32(legacy->y, primary->src_y >> 16); if (crtc->active) igt_assert_eq_u32(legacy->buffer_id, primary->fb_id); else igt_assert_eq_u32(legacy->buffer_id, 0); if (legacy->mode_valid) { igt_assert_neq(legacy->mode_valid, 0); igt_assert_eq(crtc->mode.len, sizeof(struct drm_mode_modeinfo)); do_or_die(memcmp(&legacy->mode, crtc->mode.data, crtc->mode.len)); igt_assert_eq(legacy->width, legacy->mode.hdisplay); igt_assert_eq(legacy->height, legacy->mode.vdisplay); } else { igt_assert_eq(legacy->mode_valid, 0); } memcpy(&crtc_kernel, crtc, sizeof(crtc_kernel)); crtc_get_current_state(&crtc_kernel); if (crtc_kernel.mode.id != 0) igt_assert_eq(crtc_kernel.mode.len, sizeof(struct drm_mode_modeinfo)); /* Optionally relax the check for MODE_ID: using the legacy SetCrtc * API can potentially change MODE_ID even if the mode itself remains * unchanged. */ if (((relax & CRTC_RELAX_MODE) && (crtc_kernel.mode.id != crtc->mode.id && crtc_kernel.mode.id != 0 && crtc->mode.id != 0)) && memcmp(crtc_kernel.mode.data, crtc->mode.data, sizeof(struct drm_mode_modeinfo)) == 0) { crtc_kernel.mode.id = crtc->mode.id; crtc_kernel.mode.data = crtc->mode.data; } do_or_die(memcmp(&crtc_kernel, crtc, sizeof(crtc_kernel))); drmModeFreeCrtc(legacy); }
static void test_with_one_bo_two_files(void) { int fd1, fd2; uint32_t handle_import, handle_open, handle_orig, flink_name; int dma_buf_fd1, dma_buf_fd2; fd1 = drm_open_driver(DRIVER_INTEL); fd2 = drm_open_driver(DRIVER_INTEL); handle_orig = gem_create(fd1, BO_SIZE); dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig); flink_name = gem_flink(fd1, handle_orig); handle_open = gem_open(fd2, flink_name); dma_buf_fd2 = prime_handle_to_fd(fd2, handle_open); handle_import = prime_fd_to_handle(fd2, dma_buf_fd2); /* dma-buf selfimporting an flink bo should give the same handle */ igt_assert_eq_u32(handle_import, handle_open); close(fd1); close(fd2); close(dma_buf_fd1); close(dma_buf_fd2); }
static void test_nv_i915_reimport_twice_check_flink_name(void) { drm_intel_bo *intel_bo = NULL, *intel_bo2 = NULL; int prime_fd; struct nouveau_bo *nvbo = NULL; uint32_t flink_name1, flink_name2; igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, BO_SIZE, NULL, &nvbo) == 0); igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0); intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE); igt_assert(intel_bo); close(prime_fd); igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0); intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE); igt_assert(intel_bo2); close(prime_fd); igt_assert(drm_intel_bo_flink(intel_bo, &flink_name1) == 0); igt_assert(drm_intel_bo_flink(intel_bo2, &flink_name2) == 0); igt_assert_eq_u32(flink_name1, flink_name2); nouveau_bo_ref(NULL, &nvbo); drm_intel_bo_unreference(intel_bo); drm_intel_bo_unreference(intel_bo2); }
static void test_i915_nv_reimport_twice_check_flink_name(void) { drm_intel_bo *test_intel_bo; int prime_fd; struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL; uint32_t flink_name1, flink_name2; test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096); igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0); igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0); /* create a new dma-buf */ close(prime_fd); igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0); igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0); close(prime_fd); igt_assert(nouveau_bo_name_get(nvbo, &flink_name1) == 0); igt_assert(nouveau_bo_name_get(nvbo2, &flink_name2) == 0); igt_assert_eq_u32(flink_name1, flink_name2); nouveau_bo_ref(NULL, &nvbo2); nouveau_bo_ref(NULL, &nvbo); drm_intel_bo_unreference(test_intel_bo); }
/* XXX: Checking this repeatedly actually hangs the GPU. I have literally no * idea why. */ static void connector_check_current_state(struct kms_atomic_connector_state *connector) { struct kms_atomic_connector_state connector_kernel; drmModeConnectorPtr legacy; uint32_t crtc_id; legacy = drmModeGetConnectorCurrent(connector->state->desc->fd, connector->obj); igt_assert(legacy); if (legacy->encoder_id) { drmModeEncoderPtr legacy_enc; legacy_enc = drmModeGetEncoder(connector->state->desc->fd, legacy->encoder_id); igt_assert(legacy_enc); crtc_id = legacy_enc->crtc_id; drmModeFreeEncoder(legacy_enc); } else { crtc_id = 0; } igt_assert_eq_u32(crtc_id, connector->crtc_id); memcpy(&connector_kernel, connector, sizeof(connector_kernel)); connector_get_current_state(&connector_kernel); do_or_die(memcmp(&connector_kernel, connector, sizeof(connector_kernel))); drmModeFreeConnector(legacy); }
/** * igt_assert_crc_equal: * @a: first pipe CRC value * @b: second pipe CRC value * * Compares two CRC values and fails the testcase if they don't match with * igt_fail(). Note that due to CRC collisions CRC based testcase can only * assert that CRCs match, never that they are different. Otherwise there might * be random testcase failures when different screen contents end up with the * same CRC by chance. */ void igt_assert_crc_equal(igt_crc_t *a, igt_crc_t *b) { int i; for (i = 0; i < a->n_words; i++) igt_assert_eq_u32(a->crc[i], b->crc[i]); }
static void run(data_t *data, int child) { const int size = 4096 * (256 + child * child); const int tiling = child % 2; const int write = child % 2; uint32_t handle = gem_create(data->fd, size); uint32_t *ptr; uint32_t x; igt_assert(handle); if (tiling != I915_TILING_NONE) gem_set_tiling(data->fd, handle, tiling, 4096); /* load up the unfaulted bo */ busy(data, handle, size, 100); /* Note that we ignore the API and rely on the implict * set-to-gtt-domain within the fault handler. */ if (write) { ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ | PROT_WRITE); ptr[rand() % (size / 4)] = canary; } else { ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ); } x = ptr[rand() % (size / 4)]; munmap(ptr, size); igt_assert_eq_u32(x, canary); }
static void test_with_one_bo(void) { int fd1, fd2; uint32_t handle, handle_import1, handle_import2, handle_selfimport; int dma_buf_fd; fd1 = drm_open_driver(DRIVER_INTEL); fd2 = drm_open_driver(DRIVER_INTEL); handle = gem_create(fd1, BO_SIZE); dma_buf_fd = prime_handle_to_fd(fd1, handle); handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd); check_bo(fd1, handle, fd2, handle_import1); /* reimport should give us the same handle so that userspace can check * whether it has that bo already somewhere. */ handle_import2 = prime_fd_to_handle(fd2, dma_buf_fd); igt_assert_eq_u32(handle_import1, handle_import2); /* Same for re-importing on the exporting fd. */ handle_selfimport = prime_fd_to_handle(fd1, dma_buf_fd); igt_assert_eq_u32(handle, handle_selfimport); /* close dma_buf, check whether nothing disappears. */ close(dma_buf_fd); check_bo(fd1, handle, fd2, handle_import1); gem_close(fd1, handle); check_bo(fd2, handle_import1, fd2, handle_import1); /* re-import into old exporter */ dma_buf_fd = prime_handle_to_fd(fd2, handle_import1); /* but drop all references to the obj in between */ gem_close(fd2, handle_import1); handle = prime_fd_to_handle(fd1, dma_buf_fd); handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd); check_bo(fd1, handle, fd2, handle_import1); /* Completely rip out exporting fd. */ close(fd1); check_bo(fd2, handle_import1, fd2, handle_import1); }
static void crtc_get_current_state(struct kms_atomic_crtc_state *crtc) { drmModeObjectPropertiesPtr props; int i; props = drmModeObjectGetProperties(crtc->state->desc->fd, crtc->obj, DRM_MODE_OBJECT_CRTC); igt_assert(props); for (i = 0; i < props->count_props; i++) { uint32_t *prop_ids = crtc->state->desc->props_crtc; if (props->props[i] == prop_ids[CRTC_MODE_ID]) { drmModePropertyBlobPtr blob; crtc->mode.id = props->prop_values[i]; if (!crtc->mode.id) { crtc->mode.len = 0; continue; } blob = drmModeGetPropertyBlob(crtc->state->desc->fd, crtc->mode.id); igt_assert(blob); igt_assert_eq_u32(blob->length, sizeof(struct drm_mode_modeinfo)); if (!crtc->mode.data || memcmp(crtc->mode.data, blob->data, blob->length) != 0) crtc->mode.data = blob->data; crtc->mode.len = blob->length; } else if (props->props[i] == prop_ids[CRTC_ACTIVE]) { crtc->active = props->prop_values[i]; } } drmModeFreeObjectProperties(props); }