static void test_write(int fd) { void *src; uint32_t dst; /* copy from a fresh src to fresh dst to force pagefault on both */ src = create_pointer(fd); dst = gem_create(fd, OBJECT_SIZE); gem_write(fd, dst, 0, src, OBJECT_SIZE); gem_close(fd, dst); munmap(src, OBJECT_SIZE); }
static uint64_t submit_batch(int fd, unsigned ring_id) { const uint32_t batch[] = { MI_NOOP, MI_BATCH_BUFFER_END }; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint64_t presumed_offset; gem_require_ring(fd, ring_id); exec.handle = gem_create(fd, 4096); gem_write(fd, exec.handle, 0, batch, sizeof(batch)); exec.relocation_count = 0; exec.relocs_ptr = 0; exec.alignment = 0; exec.offset = 0; exec.flags = 0; exec.rsvd1 = 0; exec.rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.batch_start_offset = 0; execbuf.batch_len = sizeof(batch); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = ring_id; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; gem_execbuf(fd, &execbuf); gem_sync(fd, exec.handle); presumed_offset = exec.offset; igt_set_stop_rings(igt_to_stop_ring_flag(ring_id)); gem_execbuf(fd, &execbuf); gem_sync(fd, exec.handle); igt_assert(igt_get_stop_rings() == STOP_RING_NONE); igt_assert(presumed_offset == exec.offset); gem_close(fd, exec.handle); return exec.offset; }
static void make_busy(int fd, uint32_t handle) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; struct drm_i915_gem_relocation_entry reloc[2]; uint32_t batch[20]; uint32_t tmp; int count; tmp = gem_create(fd, 1024*1024); obj[0].handle = tmp; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = 0; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = handle; obj[1].relocation_count = 2; obj[1].relocs_ptr = (uintptr_t) reloc; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = 0; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)obj; execbuf.buffer_count = 2; execbuf.batch_start_offset = 0; execbuf.batch_len = gem_linear_blt(fd, batch, tmp, tmp, 1024*1024,reloc); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = 0; if (HAS_BLT_RING(intel_get_drm_devid(fd))) execbuf.flags |= I915_EXEC_BLT; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; gem_write(fd, handle, 0, batch, execbuf.batch_len); for (count = 0; count < 10; count++) do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); gem_close(fd, tmp); }
static void run_on_ring(int fd, unsigned ring_id, const char *ring_name) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 execobj; struct { uint32_t handle; uint32_t *batch; } obj[2]; unsigned i; char buf[100]; gem_require_ring(fd, ring_id); igt_require(has_softpin(fd)); for (i = 0; i < 2; i++) { obj[i].handle = gem_create(fd, BATCH_SIZE); obj[i].batch = mmap_coherent(fd, obj[i].handle, BATCH_SIZE); memset(obj[i].batch, 0xff, BATCH_SIZE); } memset(&execobj, 0, sizeof(execobj)); execobj.handle = obj[0].handle; obj[0].batch[0] = MI_BATCH_BUFFER_END; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&execobj; execbuf.buffer_count = 1; execbuf.flags = ring_id; /* Execute once to allocate a gtt-offset */ gem_execbuf(fd, &execbuf); execobj.flags = EXEC_OBJECT_PINNED; sprintf(buf, "Testing %s cs tlb coherency: ", ring_name); for (i = 0; i < BATCH_SIZE/64; i++) { execobj.handle = obj[i&1].handle; obj[i&1].batch[i*64/4] = MI_BATCH_BUFFER_END; execbuf.batch_start_offset = i*64; gem_execbuf(fd, &execbuf); } for (i = 0; i < 2; i++) { gem_close(fd, obj[i].handle); munmap(obj[i].batch, BATCH_SIZE); } }
/* * Creating an object with non-aligned size and trying to access it with an * offset, which is greater than the requested size and larger than the * object's last page boundary. pwrite here must fail. */ static void invalid_nonaligned_size(int fd) { int handle; char buf[PAGE_SIZE]; struct drm_i915_gem_pwrite gem_pwrite; handle = gem_create(fd, PAGE_SIZE / 2); CLEAR(gem_pwrite); gem_pwrite.handle = handle; gem_pwrite.offset = PAGE_SIZE / 2; gem_pwrite.size = PAGE_SIZE; gem_pwrite.data_ptr = (uintptr_t)buf; /* This should fail. Hence cannot use gem_write. */ igt_assert(drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite)); gem_close(fd, handle); }
static void dontneed_before_pwrite(void) { int fd = drm_open_driver(DRIVER_INTEL); uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; struct drm_i915_gem_pwrite gem_pwrite; gem_pwrite.handle = gem_create(fd, OBJECT_SIZE); gem_pwrite.offset = 0; gem_pwrite.size = sizeof(buf); gem_pwrite.data_ptr = (uintptr_t)buf; gem_madvise(fd, gem_pwrite.handle, I915_MADV_DONTNEED); igt_assert(drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite)); igt_assert(errno == EFAULT); gem_close(fd, gem_pwrite.handle); close(fd); }
static int has_engine(int fd, const struct intel_execution_engine *e) { uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; int ret; memset(&exec, 0, sizeof(exec)); exec.handle = gem_create(fd, 4096); gem_write(fd, exec.handle, 0, &bbe, sizeof(bbe)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.flags = e->exec_id | e->flags; ret = __gem_execbuf(fd, &execbuf); gem_close(fd, exec.handle); return ret == 0; }
static void invalid_flags(int fd) { struct drm_i915_gem_wait wait; int ret; uint32_t handle; handle = gem_create(fd, 4096); wait.bo_handle = handle; wait.timeout_ns = 1; /* NOTE: This test intentionally tests for just the next available flag. * Don't "fix" this testcase without the ABI testcases for new flags * first. */ wait.flags = 1; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait); igt_assert(ret != 0 && errno == EINVAL); gem_close(fd, handle); }
static void test_execbuf(int fd) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint32_t tmp[] = { MI_BATCH_BUFFER_END }; memset(&exec, 0, sizeof(exec)); memset(&execbuf, 0, sizeof(execbuf)); exec.handle = gem_create(fd, 4096); gem_write(fd, exec.handle, 0, tmp, sizeof(tmp)); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; wedge_gpu(fd); igt_assert_eq(__gem_execbuf(fd, &execbuf), -EIO); gem_close(fd, exec.handle); trigger_reset(fd); }
static void test_write_gtt(int fd) { uint32_t dst; char *dst_gtt; void *src; dst = gem_create(fd, OBJECT_SIZE); /* prefault object into gtt */ dst_gtt = mmap_bo(fd, dst); set_domain_gtt(fd, dst); memset(dst_gtt, 0, OBJECT_SIZE); munmap(dst_gtt, OBJECT_SIZE); src = create_pointer(fd); gem_write(fd, dst, 0, src, OBJECT_SIZE); gem_close(fd, dst); munmap(src, OBJECT_SIZE); }
static void test_write_cpu_read_gtt(int fd) { uint32_t handle; uint32_t *src, *dst; igt_require(gem_has_llc(fd)); handle = gem_create(fd, OBJECT_SIZE); dst = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ); src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); gem_close(fd, handle); memset(src, 0xaa, OBJECT_SIZE); igt_assert(memcmp(dst, src, OBJECT_SIZE) == 0); munmap(src, OBJECT_SIZE); munmap(dst, OBJECT_SIZE); }
static int has_userptr(int fd) { uint32_t handle = 0; void *ptr; uint32_t oldflags; int ret; igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0); oldflags = userptr_flags; gem_userptr_test_unsynchronized(); ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle); userptr_flags = oldflags; if (ret != 0) { free(ptr); return 0; } gem_close(fd, handle); free(ptr); return handle != 0; }
static void test_read_write(int fd, enum test_read_write order) { uint32_t handle; void *ptr; volatile uint32_t val = 0; handle = gem_create(fd, OBJECT_SIZE); ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); if (order == READ_BEFORE_WRITE) { val = *(uint32_t *)ptr; *(uint32_t *)ptr = val; } else { *(uint32_t *)ptr = val; val = *(uint32_t *)ptr; } gem_close(fd, handle); munmap(ptr, OBJECT_SIZE); }
static void exec0(int fd) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec[1]; uint32_t buf[2] = { MI_BATCH_BUFFER_END, 0 }; /* Just try executing with a zero-length bo. * We expect the kernel to either accept the nop batch, or reject it * for the zero-length buffer, but never crash. */ exec[0].handle = gem_create(fd, 4096); gem_write(fd, exec[0].handle, 0, buf, sizeof(buf)); exec[0].relocation_count = 0; exec[0].relocs_ptr = 0; exec[0].alignment = 0; exec[0].offset = 0; exec[0].flags = 0; exec[0].rsvd1 = 0; exec[0].rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)exec; execbuf.buffer_count = 1; execbuf.batch_start_offset = 0; execbuf.batch_len = sizeof(buf); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = 0; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; igt_info("trying to run an empty batchbuffer\n"); gem_exec(fd, &execbuf); gem_close(fd, exec[0].handle); }
static void dontneed_before_exec(void) { int fd = drm_open_driver(DRIVER_INTEL); struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; memset(&execbuf, 0, sizeof(execbuf)); memset(&exec, 0, sizeof(exec)); exec.handle = gem_create(fd, OBJECT_SIZE); gem_write(fd, exec.handle, 0, buf, sizeof(buf)); gem_madvise(fd, exec.handle, I915_MADV_DONTNEED); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.batch_len = sizeof(buf); gem_execbuf(fd, &execbuf); gem_close(fd, exec.handle); close(fd); }
static void test_huge_copy(int fd, int huge, int tiling_a, int tiling_b) { uint64_t huge_object_size, i; uint32_t bo, *pattern_a, *pattern_b; char *a, *b; switch (huge) { case -2: huge_object_size = gem_mappable_aperture_size() / 4; break; case -1: huge_object_size = gem_mappable_aperture_size() / 2; break; case 0: huge_object_size = gem_mappable_aperture_size() + PAGE_SIZE; break; default: huge_object_size = gem_aperture_size(fd) + PAGE_SIZE; break; } intel_require_memory(2, huge_object_size, CHECK_RAM); pattern_a = malloc(PAGE_SIZE); for (i = 0; i < PAGE_SIZE/4; i++) pattern_a[i] = i; pattern_b = malloc(PAGE_SIZE); for (i = 0; i < PAGE_SIZE/4; i++) pattern_b[i] = ~i; bo = gem_create(fd, huge_object_size); if (tiling_a) gem_set_tiling(fd, bo, tiling_a, tiling_a == I915_TILING_Y ? 128 : 512); a = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE); igt_require(a); gem_close(fd, bo); for (i = 0; i < huge_object_size / PAGE_SIZE; i++) memcpy(a + PAGE_SIZE*i, pattern_a, PAGE_SIZE); bo = gem_create(fd, huge_object_size); if (tiling_b) gem_set_tiling(fd, bo, tiling_b, tiling_b == I915_TILING_Y ? 128 : 512); b = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE); igt_require(b); gem_close(fd, bo); for (i = 0; i < huge_object_size / PAGE_SIZE; i++) memcpy(b + PAGE_SIZE*i, pattern_b, PAGE_SIZE); for (i = 0; i < huge_object_size / PAGE_SIZE; i++) { if (i & 1) memcpy(a + i *PAGE_SIZE, b + i*PAGE_SIZE, PAGE_SIZE); else memcpy(b + i *PAGE_SIZE, a + i*PAGE_SIZE, PAGE_SIZE); } for (i = 0; i < huge_object_size / PAGE_SIZE; i++) { if (i & 1) igt_assert(memcmp(pattern_b, a + PAGE_SIZE*i, PAGE_SIZE) == 0); else igt_assert(memcmp(pattern_a, a + PAGE_SIZE*i, PAGE_SIZE) == 0); } munmap(a, huge_object_size); for (i = 0; i < huge_object_size / PAGE_SIZE; i++) { if (i & 1) igt_assert(memcmp(pattern_b, b + PAGE_SIZE*i, PAGE_SIZE) == 0); else igt_assert(memcmp(pattern_a, b + PAGE_SIZE*i, PAGE_SIZE) == 0); } munmap(b, huge_object_size); free(pattern_a); free(pattern_b); }
static void test_huge_bo(int fd, int huge, int tiling) { uint32_t bo; char *ptr; char *tiled_pattern; char *linear_pattern; uint64_t size, last_offset; int pitch = tiling == I915_TILING_Y ? 128 : 512; int i; switch (huge) { case -1: size = gem_mappable_aperture_size() / 2; break; case 0: size = gem_mappable_aperture_size() + PAGE_SIZE; break; default: size = gem_aperture_size(fd) + PAGE_SIZE; break; } intel_require_memory(1, size, CHECK_RAM); last_offset = size - PAGE_SIZE; /* Create pattern */ bo = gem_create(fd, PAGE_SIZE); if (tiling) gem_set_tiling(fd, bo, tiling, pitch); linear_pattern = gem_mmap__gtt(fd, bo, PAGE_SIZE, PROT_READ | PROT_WRITE); for (i = 0; i < PAGE_SIZE; i++) linear_pattern[i] = i; tiled_pattern = gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ); gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT, 0); gem_close(fd, bo); bo = gem_create(fd, size); if (tiling) gem_set_tiling(fd, bo, tiling, pitch); /* Initialise first/last page through CPU mmap */ ptr = gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE); memcpy(ptr, tiled_pattern, PAGE_SIZE); memcpy(ptr + last_offset, tiled_pattern, PAGE_SIZE); munmap(ptr, size); /* Obtain mapping for the object through GTT. */ ptr = __gem_mmap__gtt(fd, bo, size, PROT_READ | PROT_WRITE); igt_require_f(ptr, "Huge BO GTT mapping not supported.\n"); set_domain_gtt(fd, bo); /* Access through GTT should still provide the CPU written values. */ igt_assert(memcmp(ptr , linear_pattern, PAGE_SIZE) == 0); igt_assert(memcmp(ptr + last_offset, linear_pattern, PAGE_SIZE) == 0); gem_set_tiling(fd, bo, I915_TILING_NONE, 0); igt_assert(memcmp(ptr , tiled_pattern, PAGE_SIZE) == 0); igt_assert(memcmp(ptr + last_offset, tiled_pattern, PAGE_SIZE) == 0); munmap(ptr, size); gem_close(fd, bo); munmap(tiled_pattern, PAGE_SIZE); munmap(linear_pattern, PAGE_SIZE); }
static void blt_copy(int fd, uint32_t dst, uint32_t src) { uint32_t batch[1024], *b = batch; struct drm_i915_gem_relocation_entry reloc[2], *r = reloc; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret; *b++ = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB); *b++ = 3 << 24 | 0xcc << 16 | WIDTH * 4; *b++ = 0; *b++ = HEIGHT << 16 | WIDTH; *b = fill_reloc(r++, b-batch, dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); b++; *b++ = 0; *b++ = WIDTH*4; *b = fill_reloc(r++, b-batch, src, I915_GEM_DOMAIN_RENDER, 0); b++; *b++ = MI_BATCH_BUFFER_END; if ((b - batch) & 1) *b++ = 0; assert(b - batch <= 1024); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, (b-batch)*sizeof(batch[0]), batch); assert(r-reloc == 2); obj[0].handle = dst; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = EXEC_OBJECT_NEEDS_FENCE; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = src; obj[1].relocation_count = 0; obj[1].relocs_ptr = 0; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = EXEC_OBJECT_NEEDS_FENCE; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; obj[2].handle = handle; obj[2].relocation_count = 2; obj[2].relocs_ptr = (uintptr_t)reloc; obj[2].alignment = 0; obj[2].offset = 0; obj[2].flags = 0; obj[2].rsvd1 = obj[2].rsvd2 = 0; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = 3; exec.batch_start_offset = 0; exec.batch_len = (b-batch)*sizeof(batch[0]); exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = 0; exec.rsvd1 = exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); while (ret && errno == EBUSY) { drmCommandNone(fd, DRM_I915_GEM_THROTTLE); ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); } assert(ret == 0); gem_close(fd, handle); }
static void big_exec(int fd, uint32_t handle, int ring) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 *gem_exec; uint32_t ctx_id1, ctx_id2; int num_buffers = gem_available_aperture_size(fd) / 4096; int i; /* Make sure we only fill half of RAM with gem objects. */ igt_require(intel_get_total_ram_mb() * 1024 / 2 > num_buffers * 4); gem_exec = calloc(num_buffers + 1, sizeof(*gem_exec)); igt_assert(gem_exec); memset(gem_exec, 0, (num_buffers + 1) * sizeof(*gem_exec)); ctx_id1 = gem_context_create(fd); ctx_id2 = gem_context_create(fd); gem_exec[0].handle = handle; execbuf.buffers_ptr = (uintptr_t)gem_exec; execbuf.buffer_count = num_buffers + 1; execbuf.batch_start_offset = 0; execbuf.batch_len = 8; execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = ring; execbuf.rsvd2 = 0; execbuf.buffer_count = 1; i915_execbuffer2_set_context_id(execbuf, ctx_id1); do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); for (i = 0; i < num_buffers; i++) { uint32_t tmp_handle = gem_create(fd, 4096); gem_exec[i].handle = tmp_handle; } gem_exec[i].handle = handle; execbuf.buffer_count = i + 1; /* figure out how many buffers we can exactly fit */ while (drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf) != 0) { i--; gem_close(fd, gem_exec[i].handle); gem_exec[i].handle = handle; execbuf.buffer_count--; igt_info("trying buffer count %i\n", i - 1); } igt_info("reduced buffer count to %i from %i\n", i - 1, num_buffers); /* double check that it works */ do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); i915_execbuffer2_set_context_id(execbuf, ctx_id2); do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); gem_sync(fd, handle); }
/** * igt_remove_fb: * @fd: open i915 drm file descriptor * @fb: pointer to an #igt_fb structure * * This function releases all resources allocated in igt_create_fb() for @fb. * Note that if this framebuffer is still in use on a primary plane the kernel * will disable the corresponding crtc. */ void igt_remove_fb(int fd, struct igt_fb *fb) { cairo_surface_destroy(fb->cairo_surface); do_or_die(drmModeRmFB(fd, fb->fb_id)); gem_close(fd, fb->gem_handle); }
static int copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 *obj; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int n, ret, i=0; batch[i++] = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB | 6); if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] += 2; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; /* FIXME */ batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; /* FIXME */ batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; obj = calloc(n_bo + 1, sizeof(*obj)); for (n = 0; n < n_bo; n++) obj[n].handle = all_bo[n]; obj[n].handle = handle; obj[n].relocation_count = 2; obj[n].relocs_ptr = (uintptr_t)reloc; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = n_bo + 1; exec.batch_start_offset = 0; exec.batch_len = i * 4; exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); if (ret) ret = errno; gem_close(fd, handle); free(obj); return ret; }
static void ricochet(int tiled, int sprite_w, int sprite_h, int out_w, int out_h, int dump_info) { int ret; int gfx_fd; int keep_moving; const int num_surfaces = 3; uint32_t sprite_handles[num_surfaces]; uint32_t sprite_fb_id[num_surfaces]; int *sprite_x = NULL; int *sprite_y = NULL; uint32_t sprite_stride; uint32_t sprite_size; uint32_t handles[4], pitches[4], offsets[4]; /* we only use [0] */ uint32_t prim_width, prim_height, prim_handle, prim_stride, prim_size, prim_fb_id; struct drm_intel_sprite_colorkey set; struct connector curr_connector; drmModeRes *gfx_resources; struct termios orig_term, curr_term; int c_index; int sprite_index; unsigned int *sprite_plane_id = NULL; uint32_t plane_flags = 0; int *delta_x = NULL, *delta_y = NULL; struct timeval stTimeVal; long long currTime, prevFlipTime, prevMoveTime, deltaFlipTime, deltaMoveTime, SleepTime; char key; int sprite_plane_count = 0; int i; // Open up I915 graphics device gfx_fd = drmOpen("i915", NULL); if (gfx_fd < 0) { printf("Failed to load i915 driver: %s\n", strerror(errno)); return; } // Obtain pointer to struct containing graphics resources gfx_resources = drmModeGetResources(gfx_fd); if (!gfx_resources) { printf("drmModeGetResources failed: %s\n", strerror(errno)); return; } if (dump_info != 0) { dump_connectors(gfx_fd, gfx_resources); dump_crtcs(gfx_fd, gfx_resources); dump_planes(gfx_fd, gfx_resources); } // Save previous terminal settings if (tcgetattr( 0, &orig_term) != 0) { printf("tcgetattr failure: %s\n", strerror(errno)); return; } // Set up input to return characters immediately curr_term = orig_term; curr_term.c_lflag &= ~(ICANON | ECHO | ECHONL); curr_term.c_cc[VMIN] = 0; // No minimum number of characters curr_term.c_cc[VTIME] = 0 ; // Return immediately, even if // nothing has been entered. if (tcsetattr( 0, TCSANOW, &curr_term) != 0) { printf("tcgetattr failure: %s\n", strerror(errno)); return; } // Cycle through all connectors and display the flying sprite // where there are displays attached and the hardware will support it. for (c_index = 0; c_index < gfx_resources->count_connectors; c_index++) { curr_connector.id = gfx_resources->connectors[c_index]; // Find the native (preferred) display mode connector_find_preferred_mode(gfx_fd, gfx_resources, &curr_connector); if (curr_connector.mode_valid == 0) { printf("No valid preferred mode detected\n"); goto out; } // Determine if sprite hardware is available on pipe // associated with this connector. sprite_plane_count = connector_find_plane(gfx_fd, &curr_connector, &sprite_plane_id); if (!sprite_plane_count) { printf("Failed to find sprite plane on crtc\n"); goto out; } // Width and height of preferred mode prim_width = curr_connector.mode.hdisplay; prim_height = curr_connector.mode.vdisplay; // Allocate and fill memory for primary surface ret = prepare_primary_surface( gfx_fd, prim_width, prim_height, &prim_handle, &prim_stride, &prim_size, tiled); if (ret != 0) { printf("Failed to add primary fb (%dx%d): %s\n", prim_width, prim_height, strerror(errno)); goto out; } // Add the primary surface framebuffer ret = drmModeAddFB(gfx_fd, prim_width, prim_height, 24, 32, prim_stride, prim_handle, &prim_fb_id); gem_close(gfx_fd, prim_handle); if (ret != 0) { printf("Failed to add primary fb (%dx%d): %s\n", prim_width, prim_height, strerror(errno)); goto out; } // Allocate and fill sprite surfaces ret = prepare_sprite_surfaces(gfx_fd, sprite_w, sprite_h, num_surfaces, &sprite_handles[0], &sprite_stride, &sprite_size, tiled); if (ret != 0) { printf("Preparation of sprite surfaces failed %dx%d\n", sprite_w, sprite_h); goto out; } // Add the sprite framebuffers for (sprite_index = 0; sprite_index < num_surfaces; sprite_index++) { handles[0] = sprite_handles[sprite_index]; handles[1] = handles[0]; handles[2] = handles[0]; handles[3] = handles[0]; pitches[0] = sprite_stride; pitches[1] = sprite_stride; pitches[2] = sprite_stride; pitches[3] = sprite_stride; memset(offsets, 0, sizeof(offsets)); ret = drmModeAddFB2(gfx_fd, sprite_w, sprite_h, DRM_FORMAT_XRGB8888, handles, pitches, offsets, &sprite_fb_id[sprite_index], plane_flags); gem_close(gfx_fd, sprite_handles[sprite_index]); if (ret) { printf("Failed to add sprite fb (%dx%d): %s\n", sprite_w, sprite_h, strerror(errno)); sprite_index--; while (sprite_index >= 0) { drmModeRmFB(gfx_fd, sprite_fb_id[sprite_index]); sprite_index--; } goto out; } } if (dump_info != 0) { printf("Displayed Mode Connector struct:\n" " .id = %d\n" " .mode_valid = %d\n" " .crtc = %d\n" " .pipe = %d\n" " drmModeModeInfo ...\n" " .name = %s\n" " .type = %d\n" " .flags = %08x\n" " drmModeEncoder ...\n" " .encoder_id = %d\n" " .encoder_type = %d (%s)\n" " .crtc_id = %d\n" " .possible_crtcs = %d\n" " .possible_clones = %d\n" " drmModeConnector ...\n" " .connector_id = %d\n" " .encoder_id = %d\n" " .connector_type = %d (%s)\n" " .connector_type_id = %d\n\n", curr_connector.id, curr_connector.mode_valid, curr_connector.crtc, curr_connector.pipe, curr_connector.mode.name, curr_connector.mode.type, curr_connector.mode.flags, curr_connector.encoder->encoder_id, curr_connector.encoder->encoder_type, kmstest_encoder_type_str(curr_connector.encoder->encoder_type), curr_connector.encoder->crtc_id, curr_connector.encoder->possible_crtcs, curr_connector.encoder->possible_clones, curr_connector.connector->connector_id, curr_connector.connector->encoder_id, curr_connector.connector->connector_type, kmstest_connector_type_str(curr_connector.connector->connector_type), curr_connector.connector->connector_type_id); printf("Sprite surface dimensions = %dx%d\n" "Sprite output dimensions = %dx%d\n" "Press any key to continue >\n", sprite_w, sprite_h, out_w, out_h); // Wait for a key-press while( read(0, &key, 1) == 0); // Purge unread characters tcflush(0, TCIFLUSH); } // Set up the primary display mode ret = drmModeSetCrtc(gfx_fd, curr_connector.crtc, prim_fb_id, 0, 0, &curr_connector.id, 1, &curr_connector.mode); if (ret != 0) { printf("Failed to set mode (%dx%d@%dHz): %s\n", prim_width, prim_height, curr_connector.mode.vrefresh, strerror(errno)); continue; } // Set the sprite colorkey state for(i = 0; i < sprite_plane_count; i++) { set.plane_id = sprite_plane_id[i]; set.min_value = 0; set.max_value = 0; set.flags = I915_SET_COLORKEY_NONE; ret = drmCommandWrite(gfx_fd, DRM_I915_SET_SPRITE_COLORKEY, &set, sizeof(set)); assert(ret == 0); } // Set up sprite output dimensions, initial position, etc. if (out_w > prim_width / 2) out_w = prim_width / 2; if (out_h > prim_height / 2) out_h = prim_height / 2; delta_x = (int *) malloc(sprite_plane_count * sizeof(int)); delta_y = (int *) malloc(sprite_plane_count * sizeof(int)); sprite_x = (int *) malloc(sprite_plane_count * sizeof(int)); sprite_y = (int *) malloc(sprite_plane_count * sizeof(int)); /* Initializing the coordinates (x,y) of the available sprites on the * connector, equally spaced along the diagonal of the rectangle * {(0,0),(prim_width/2, prim_height/2)}. */ for(i = 0; i < sprite_plane_count; i++) { delta_x[i] = 3; delta_y[i] = 4; sprite_x[i] = i * (prim_width / (2 * sprite_plane_count)); sprite_y[i] = i * (prim_height / (2 * sprite_plane_count)); } currTime = 0; prevFlipTime = 0; // Will force immediate sprite flip prevMoveTime = 0; // Will force immediate sprite move deltaFlipTime = 500000; // Flip sprite surface every 1/2 second deltaMoveTime = 100000; // Move sprite every 100 ms sprite_index = num_surfaces - 1; keep_moving = 1; // Bounce sprite off the walls while (keep_moving) { // Obtain system time in usec. if (gettimeofday( &stTimeVal, NULL ) != 0) printf("gettimeofday error: %s\n", strerror(errno)); else currTime = ((long long)stTimeVal.tv_sec * 1000000) + stTimeVal.tv_usec; // Check if it's time to flip the sprite surface if (currTime - prevFlipTime > deltaFlipTime) { sprite_index = (sprite_index + 1) % num_surfaces; prevFlipTime = currTime; } // Move the sprite on the screen and flip // the surface if the index has changed // NB: sprite_w and sprite_h must be 16.16 fixed point, herego << 16 for(i = 0; i < sprite_plane_count; i++) { if (drmModeSetPlane(gfx_fd, sprite_plane_id[i], curr_connector.crtc, sprite_fb_id[sprite_index], plane_flags, sprite_x[i], sprite_y[i], out_w, out_h, 0, 0, sprite_w << 16, sprite_h << 16)) printf("Failed to enable sprite plane: %s\n", strerror(errno)); } // Check if it's time to move the sprite surface if (currTime - prevMoveTime > deltaMoveTime) { // Compute the next position for sprite for(i = 0; i < sprite_plane_count; i++) { sprite_x[i] += delta_x[i]; sprite_y[i] += delta_y[i]; if (sprite_x[i] < 0) { sprite_x[i] = 0; delta_x[i] = -delta_x[i]; } else if (sprite_x[i] > prim_width - out_w) { sprite_x[i] = prim_width - out_w; delta_x[i] = -delta_x[i]; } if (sprite_y[i] < 0) { sprite_y[i] = 0; delta_y[i] = -delta_y[i]; } else if (sprite_y[i] > prim_height - out_h) { sprite_y[i] = prim_height - out_h; delta_y[i] = -delta_y[i]; } } prevMoveTime = currTime; } // Fetch a key from input (non-blocking) if (read(0, &key, 1) == 1) { switch (key) { case 'q': // Kill the program case 'Q': goto out; break; case 's': // Slow down sprite movement; deltaMoveTime = (deltaMoveTime * 100) / 90; if (deltaMoveTime > 800000) { deltaMoveTime = 800000; } break; case 'S': // Speed up sprite movement; deltaMoveTime = (deltaMoveTime * 100) / 110; if (deltaMoveTime < 2000) { deltaMoveTime = 2000; } break; case 'f': // Slow down sprite flipping; deltaFlipTime = (deltaFlipTime * 100) / 90; if (deltaFlipTime > 1000000) deltaFlipTime = 1000000; break; case 'F': // Speed up sprite flipping; deltaFlipTime = (deltaFlipTime * 100) / 110; if (deltaFlipTime < 20000) deltaFlipTime = 20000; break; case 'n': // Next connector case 'N': keep_moving = 0; break; default: break; } // Purge unread characters tcflush(0, TCIFLUSH); } // Wait for min of flip or move deltas SleepTime = (deltaFlipTime < deltaMoveTime) ? deltaFlipTime : deltaMoveTime; usleep(SleepTime); } free(sprite_plane_id); free(sprite_x); free(sprite_y); free(delta_x); free(delta_y); sprite_plane_id = NULL; sprite_plane_count = 0; sprite_x = sprite_y = delta_x = delta_y = NULL; } out: // Purge unread characters tcflush(0, TCIFLUSH); // Restore previous terminal settings if (tcsetattr( 0, TCSANOW, &orig_term) != 0) { printf("tcgetattr failure: %s\n", strerror(errno)); return; } drmModeFreeResources(gfx_resources); }
static int prepare_sprite_surfaces(int fd, int sprite_width, int sprite_height, uint32_t num_surfaces, uint32_t *sprite_handles, uint32_t *sprite_stride, uint32_t *sprite_size, int tiled) { uint32_t bytes_per_pixel = sizeof(uint32_t); uint32_t *sprite_fb_ptr; int i; if (bytes_per_pixel != sizeof(uint32_t)) { printf("Bad bytes_per_pixel for sprite: %d\n", bytes_per_pixel); return -EINVAL; } if (tiled) { int v; /* Round the tiling up to the next power-of-two and the * region up to the next pot fence size so that this works * on all generations. * * This can still fail if the framebuffer is too large to * be tiled. But then that failure is expected. */ v = sprite_width * bytes_per_pixel; for (*sprite_stride = 512; *sprite_stride < v; *sprite_stride *= 2) ; v = *sprite_stride * sprite_height; for (*sprite_size = 1024*1024; *sprite_size < v; *sprite_size *= 2) ; } else { /* Scan-out has a 64 byte alignment restriction */ *sprite_stride = (sprite_width * bytes_per_pixel + 63) & ~63; *sprite_size = *sprite_stride * sprite_height; } for (i = 0; i < num_surfaces; i++) { // Create the sprite surface sprite_handles[i] = gem_create(fd, *sprite_size); if (tiled) gem_set_tiling(fd, sprite_handles[i], I915_TILING_X, *sprite_stride); // Get pointer to the surface sprite_fb_ptr = gem_mmap(fd, sprite_handles[i], *sprite_size, PROT_READ | PROT_WRITE); if (sprite_fb_ptr != NULL) { // Fill with checkerboard pattern fill_sprite(sprite_width, sprite_height, *sprite_stride, i, sprite_fb_ptr); munmap(sprite_fb_ptr, *sprite_size); } else { i--; while (i >= 0) { gem_close(fd, sprite_handles[i]); i--; } } } return 0; }
int main(int argc, char **argv) { struct timeval start, end; uint8_t *buf; uint32_t handle; int size = OBJECT_SIZE; int loop, i, tiling; int fd; if (argc > 1) size = atoi(argv[1]); if (size == 0) { fprintf(stderr, "Invalid object size specified\n"); return 1; } buf = malloc(size); memset(buf, 0, size); fd = drm_open_any(); handle = gem_create(fd, size); assert(handle); for (tiling = I915_TILING_NONE; tiling <= I915_TILING_Y; tiling++) { if (tiling != I915_TILING_NONE) { printf("\nSetting tiling mode to %s\n", tiling == I915_TILING_X ? "X" : "Y"); gem_set_tiling(fd, handle, tiling, 512); } if (tiling == I915_TILING_NONE) { gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); { uint32_t *base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); ptr = base; x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to read %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to write %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); printf("Time to clear %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL); base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); gettimeofday(&end, NULL); printf("Time to clear %dk through a cached CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* CPU pwrite */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_write(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pwrite %dk through the CPU: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* CPU pread */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_read(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pread %dk through the CPU: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* prefault into gtt */ { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to read %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to write %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap clear */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); printf("Time to clear %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL);{ uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); printf("Time to clear %dk through a cached GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to read %dk (again) through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (tiling == I915_TILING_NONE) { /* GTT pwrite */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_write(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pwrite %dk through the GTT: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pread */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_read(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pread %dk through the GTT: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pwrite, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_write(fd, handle, 0, buf, size); gem_sync(fd, handle); } gettimeofday(&end, NULL); printf("Time to pwrite %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pread, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_sync(fd, handle); gem_read(fd, handle, 0, buf, size); } gettimeofday(&end, NULL); printf("Time to pread %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* partial writes */ printf("Now partial writes.\n"); size /= 4; /* partial GTT pwrite, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_write(fd, handle, 0, buf, size); gem_sync(fd, handle); } gettimeofday(&end, NULL); printf("Time to pwrite %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* partial GTT pread, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_sync(fd, handle); gem_read(fd, handle, 0, buf, size); } gettimeofday(&end, NULL); printf("Time to pread %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); size *= 4; } } gem_close(fd, handle); close(fd); return 0; }
static void copy(int fd, uint32_t dst, uint32_t src, unsigned int error) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret, i=0; batch[i++] = XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB; if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] |= 8; else batch[i - 1] |= 6; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; memset(obj, 0, sizeof(obj)); exec.buffer_count = 0; obj[exec.buffer_count++].handle = dst; if (src != dst) obj[exec.buffer_count++].handle = src; obj[exec.buffer_count].handle = handle; obj[exec.buffer_count].relocation_count = 2; obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc; exec.buffer_count++; exec.buffers_ptr = (uintptr_t)obj; exec.batch_start_offset = 0; exec.batch_len = i * 4; exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); if (ret) ret = errno; if (error == ~0) igt_assert_neq(ret, 0); else igt_assert(ret == error); gem_close(fd, handle); }
static void copy(int fd, uint32_t dst, uint32_t src) { uint32_t batch[1024], *b = batch; struct drm_i915_gem_relocation_entry reloc[2], *r = reloc; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret; /* invariant state */ *b++ = (_3DSTATE_AA_CMD | AA_LINE_ECAAR_WIDTH_ENABLE | AA_LINE_ECAAR_WIDTH_1_0 | AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0); *b++ = (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD | IAB_MODIFY_ENABLE | IAB_MODIFY_FUNC | (BLENDFUNC_ADD << IAB_FUNC_SHIFT) | IAB_MODIFY_SRC_FACTOR | (BLENDFACT_ONE << IAB_SRC_FACTOR_SHIFT) | IAB_MODIFY_DST_FACTOR | (BLENDFACT_ZERO << IAB_DST_FACTOR_SHIFT)); *b++ = (_3DSTATE_DFLT_DIFFUSE_CMD); *b++ = (0); *b++ = (_3DSTATE_DFLT_SPEC_CMD); *b++ = (0); *b++ = (_3DSTATE_DFLT_Z_CMD); *b++ = (0); *b++ = (_3DSTATE_COORD_SET_BINDINGS | CSB_TCB(0, 0) | CSB_TCB(1, 1) | CSB_TCB(2, 2) | CSB_TCB(3, 3) | CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7)); *b++ = (_3DSTATE_RASTER_RULES_CMD | ENABLE_POINT_RASTER_RULE | OGL_POINT_RASTER_RULE | ENABLE_LINE_STRIP_PROVOKE_VRTX | ENABLE_TRI_FAN_PROVOKE_VRTX | LINE_STRIP_PROVOKE_VRTX(1) | TRI_FAN_PROVOKE_VRTX(2) | ENABLE_TEXKILL_3D_4D | TEXKILL_4D); *b++ = (_3DSTATE_MODES_4_CMD | ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC(LOGICOP_COPY) | ENABLE_STENCIL_WRITE_MASK | STENCIL_WRITE_MASK(0xff) | ENABLE_STENCIL_TEST_MASK | STENCIL_TEST_MASK(0xff)); *b++ = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | I1_LOAD_S(4) | I1_LOAD_S(5) | 2); *b++ = (0x00000000); /* Disable texture coordinate wrap-shortest */ *b++ = ((1 << S4_POINT_WIDTH_SHIFT) | S4_LINE_WIDTH_ONE | S4_CULLMODE_NONE | S4_VFMT_XY); *b++ = (0x00000000); /* Stencil. */ *b++ = (_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT); *b++ = (_3DSTATE_SCISSOR_RECT_0_CMD); *b++ = (0); *b++ = (0); *b++ = (_3DSTATE_DEPTH_SUBRECT_DISABLE); *b++ = (_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */ *b++ = (0); *b++ = (_3DSTATE_STIPPLE); *b++ = (0x00000000); *b++ = (_3DSTATE_BACKFACE_STENCIL_OPS | BFO_ENABLE_STENCIL_TWO_SIDE | 0); /* samler state */ #define TEX_COUNT 1 *b++ = (_3DSTATE_MAP_STATE | (3 * TEX_COUNT)); *b++ = ((1 << TEX_COUNT) - 1); *b = fill_reloc(r++, b-batch, src, I915_GEM_DOMAIN_SAMPLER, 0); b++; *b++ = (MAPSURF_32BIT | MT_32BIT_ARGB8888 | MS3_TILED_SURFACE | (HEIGHT - 1) << MS3_HEIGHT_SHIFT | (WIDTH - 1) << MS3_WIDTH_SHIFT); *b++ = ((WIDTH-1) << MS4_PITCH_SHIFT); *b++ = (_3DSTATE_SAMPLER_STATE | (3 * TEX_COUNT)); *b++ = ((1 << TEX_COUNT) - 1); *b++ = (MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT | FILTER_NEAREST << SS2_MAG_FILTER_SHIFT | FILTER_NEAREST << SS2_MIN_FILTER_SHIFT); *b++ = (TEXCOORDMODE_WRAP << SS3_TCX_ADDR_MODE_SHIFT | TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT | 0 << SS3_TEXTUREMAP_INDEX_SHIFT); *b++ = (0x00000000); /* render target state */ *b++ = (_3DSTATE_BUF_INFO_CMD); *b++ = (BUF_3D_ID_COLOR_BACK | BUF_3D_TILED_SURFACE | WIDTH*4); *b = fill_reloc(r++, b-batch, dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); b++; *b++ = (_3DSTATE_DST_BUF_VARS_CMD); *b++ = (COLR_BUF_ARGB8888 | DSTORG_HORT_BIAS(0x8) | DSTORG_VERT_BIAS(0x8)); /* draw rect is unconditional */ *b++ = (_3DSTATE_DRAW_RECT_CMD); *b++ = (0x00000000); *b++ = (0x00000000); /* ymin, xmin */ *b++ = (DRAW_YMAX(HEIGHT - 1) | DRAW_XMAX(WIDTH - 1)); /* yorig, xorig (relate to color buffer?) */ *b++ = (0x00000000); /* texfmt */ *b++ = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(1) | I1_LOAD_S(2) | I1_LOAD_S(6) | 2); *b++ = ((4 << S1_VERTEX_WIDTH_SHIFT) | (4 << S1_VERTEX_PITCH_SHIFT)); *b++ = (~S2_TEXCOORD_FMT(0, TEXCOORDFMT_NOT_PRESENT) | S2_TEXCOORD_FMT(0, TEXCOORDFMT_2D)); *b++ = (S6_CBUF_BLEND_ENABLE | S6_COLOR_WRITE_ENABLE | BLENDFUNC_ADD << S6_CBUF_BLEND_FUNC_SHIFT | BLENDFACT_ONE << S6_CBUF_SRC_BLEND_FACT_SHIFT | BLENDFACT_ZERO << S6_CBUF_DST_BLEND_FACT_SHIFT); /* pixel shader */ *b++ = (_3DSTATE_PIXEL_SHADER_PROGRAM | (1 + 3*3 - 2)); /* decl FS_T0 */ *b++ = (D0_DCL | REG_TYPE(FS_T0) << D0_TYPE_SHIFT | REG_NR(FS_T0) << D0_NR_SHIFT | ((REG_TYPE(FS_T0) != REG_TYPE_S) ? D0_CHANNEL_ALL : 0)); *b++ = (0); *b++ = (0); /* decl FS_S0 */ *b++ = (D0_DCL | (REG_TYPE(FS_S0) << D0_TYPE_SHIFT) | (REG_NR(FS_S0) << D0_NR_SHIFT) | ((REG_TYPE(FS_S0) != REG_TYPE_S) ? D0_CHANNEL_ALL : 0)); *b++ = (0); *b++ = (0); /* texld(FS_OC, FS_S0, FS_T0 */ *b++ = (T0_TEXLD | (REG_TYPE(FS_OC) << T0_DEST_TYPE_SHIFT) | (REG_NR(FS_OC) << T0_DEST_NR_SHIFT) | (REG_NR(FS_S0) << T0_SAMPLER_NR_SHIFT)); *b++ = ((REG_TYPE(FS_T0) << T1_ADDRESS_REG_TYPE_SHIFT) | (REG_NR(FS_T0) << T1_ADDRESS_REG_NR_SHIFT)); *b++ = (0); *b++ = (PRIM3D_RECTLIST | (3*4 - 1)); *b++ = pack_float(WIDTH); *b++ = pack_float(HEIGHT); *b++ = pack_float(WIDTH); *b++ = pack_float(HEIGHT); *b++ = pack_float(0); *b++ = pack_float(HEIGHT); *b++ = pack_float(0); *b++ = pack_float(HEIGHT); *b++ = pack_float(0); *b++ = pack_float(0); *b++ = pack_float(0); *b++ = pack_float(0); *b++ = MI_BATCH_BUFFER_END; if ((b - batch) & 1) *b++ = 0; igt_assert(b - batch <= 1024); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, (b-batch)*sizeof(batch[0])); igt_assert(r-reloc == 2); obj[0].handle = dst; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = 0; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = src; obj[1].relocation_count = 0; obj[1].relocs_ptr = 0; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = 0; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; obj[2].handle = handle; obj[2].relocation_count = 2; obj[2].relocs_ptr = (uintptr_t)reloc; obj[2].alignment = 0; obj[2].offset = 0; obj[2].flags = 0; obj[2].rsvd1 = obj[2].rsvd2 = 0; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = 3; exec.batch_start_offset = 0; exec.batch_len = (b-batch)*sizeof(batch[0]); exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); while (ret && errno == EBUSY) { drmCommandNone(fd, DRM_I915_GEM_THROTTLE); ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); } igt_assert_eq(ret, 0); gem_close(fd, handle); }
static void run_test(int fd, int num_fences, int expected_errno, unsigned flags) { struct drm_i915_gem_execbuffer2 execbuf[2]; struct drm_i915_gem_exec_object2 exec[2][2*MAX_FENCES+3]; struct drm_i915_gem_relocation_entry reloc[2*MAX_FENCES+2]; int i, n; int loop = 1000; if (flags & BUSY_LOAD) { bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); batch = intel_batchbuffer_alloc(bufmgr, devid); /* Takes forever otherwise. */ loop = 50; } if (flags & INTERRUPTIBLE) igt_fork_signal_helper(); memset(execbuf, 0, sizeof(execbuf)); memset(exec, 0, sizeof(exec)); memset(reloc, 0, sizeof(reloc)); for (n = 0; n < 2*num_fences; n++) { uint32_t handle = tiled_bo_create(fd); exec[1][2*num_fences - n-1].handle = exec[0][n].handle = handle; fill_reloc(&reloc[n], handle); } for (i = 0; i < 2; i++) { for (n = 0; n < num_fences; n++) exec[i][n].flags = EXEC_OBJECT_NEEDS_FENCE; exec[i][2*num_fences].handle = batch_create(fd); exec[i][2*num_fences].relocs_ptr = (uintptr_t)reloc; exec[i][2*num_fences].relocation_count = 2*num_fences; execbuf[i].buffers_ptr = (uintptr_t)exec[i]; execbuf[i].buffer_count = 2*num_fences+1; execbuf[i].batch_len = 2*sizeof(uint32_t); } do { if (flags & BUSY_LOAD) emit_dummy_load(); igt_assert_eq(__gem_execbuf(fd, &execbuf[0]), expected_errno); igt_assert_eq(__gem_execbuf(fd, &execbuf[1]), expected_errno); } while (--loop); if (flags & INTERRUPTIBLE) igt_stop_signal_helper(); /* Cleanup */ for (n = 0; n < 2*num_fences; n++) gem_close(fd, exec[0][n].handle); for (i = 0; i < 2; i++) gem_close(fd, exec[i][2*num_fences].handle); if (flags & BUSY_LOAD) { intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); } }
static void free_userptr_bo(int fd, uint32_t handle) { gem_close(fd, handle); free_handle_ptr(handle); }
static int many_exec(int fd, uint32_t batch, int num_exec, int num_reloc, unsigned flags) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 *gem_exec; struct drm_i915_gem_relocation_entry *gem_reloc; unsigned max_handle = batch; int ret, n; gem_exec = calloc(num_exec+1, sizeof(*gem_exec)); gem_reloc = calloc(num_reloc, sizeof(*gem_reloc)); igt_assert(gem_exec && gem_reloc); for (n = 0; n < num_exec; n++) { gem_exec[n].handle = gem_create(fd, 4096); if (gem_exec[n].handle > max_handle) max_handle = gem_exec[n].handle; gem_exec[n].relocation_count = 0; gem_exec[n].relocs_ptr = 0; gem_exec[n].alignment = 0; gem_exec[n].offset = 0; gem_exec[n].flags = 0; gem_exec[n].rsvd1 = 0; gem_exec[n].rsvd2 = 0; } gem_exec[n].handle = batch; gem_exec[n].relocation_count = num_reloc; gem_exec[n].relocs_ptr = (uintptr_t) gem_reloc; if (flags & USE_LUT) max_handle = num_exec + 1; max_handle++; for (n = 0; n < num_reloc; n++) { uint32_t target; if (flags & BROKEN) { target = -(rand() % 4096) - 1; } else { target = rand() % (num_exec + 1); if ((flags & USE_LUT) == 0) target = gem_exec[target].handle; } gem_reloc[n].offset = 1024; gem_reloc[n].delta = 0; gem_reloc[n].target_handle = target; gem_reloc[n].read_domains = I915_GEM_DOMAIN_RENDER; gem_reloc[n].write_domain = 0; gem_reloc[n].presumed_offset = 0; } execbuf.buffers_ptr = (uintptr_t)gem_exec; execbuf.buffer_count = num_exec + 1; execbuf.batch_start_offset = 0; execbuf.batch_len = 8; execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = flags & USE_LUT ? LOCAL_I915_EXEC_HANDLE_LUT : 0; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); for (n = 0; n < num_exec; n++) gem_close(fd, gem_exec[n].handle); free(gem_exec); free(gem_reloc); return ret; }
int main(int argc, char **argv) { int fd = drm_open_any(); int size = 0; int busy = 0; int reps = 13; int c, n, s; while ((c = getopt (argc, argv, "bs:r:")) != -1) { switch (c) { case 's': size = atoi(optarg); break; case 'r': reps = atoi(optarg); if (reps < 1) reps = 1; break; case 'b': busy = true; break; default: break; } } if (size == 0) { for (s = 4096; s <= OBJECT_SIZE; s <<= 1) { igt_stats_t stats; igt_stats_init_with_size(&stats, reps); for (n = 0; n < reps; n++) { struct timespec start, end; uint64_t count = 0; clock_gettime(CLOCK_MONOTONIC, &start); do { for (c = 0; c < 1000; c++) { uint32_t handle; handle = gem_create(fd, s); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); if (busy) make_busy(fd, handle); gem_close(fd, handle); } count += c; clock_gettime(CLOCK_MONOTONIC, &end); } while (end.tv_sec - start.tv_sec < 2); igt_stats_push_float(&stats, count / elapsed(&start, &end)); } printf("%f\n", igt_stats_get_trimean(&stats)); igt_stats_fini(&stats); } } else { for (n = 0; n < reps; n++) { struct timespec start, end; uint64_t count = 0; clock_gettime(CLOCK_MONOTONIC, &start); do { for (c = 0; c < 1000; c++) { uint32_t handle; handle = gem_create(fd, size); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); if (busy) make_busy(fd, handle); gem_close(fd, handle); } count += c; clock_gettime(CLOCK_MONOTONIC, &end); } while (end.tv_sec - start.tv_sec < 2); printf("%f\n", count / elapsed(&start, &end)); } } return 0; }