int main(int argc, char **argv) { uint32_t batch[2] = {MI_BATCH_BUFFER_END}; uint32_t handle; uint32_t devid; int fd; drmtest_subtest_init(argc, argv); fd = drm_open_any(); devid = intel_get_drm_devid(fd); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); if (drmtest_run_subtest("render")) loop(fd, handle, I915_EXEC_RENDER, "render"); if (drmtest_run_subtest("bsd")) if (HAS_BSD_RING(devid)) loop(fd, handle, I915_EXEC_BSD, "bsd"); if (drmtest_run_subtest("blt")) if (HAS_BLT_RING(devid)) loop(fd, handle, I915_EXEC_BLT, "blt"); gem_close(fd, handle); close(fd); return skipped_all ? 77 : 0; }
void intel_batchbuffer_flush(struct intel_batchbuffer *batch) { int ring = 0; if (HAS_BLT_RING(batch->devid)) ring = I915_EXEC_BLT; intel_batchbuffer_flush_on_ring(batch, ring); }
int main(int argc, char **argv) { int fd; int devid; igt_skip_on_simulation(); if (argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); igt_fail(-1); } fd = drm_open_any(); devid = intel_get_drm_devid(fd); if (!HAS_BLT_RING(devid)) { fprintf(stderr, "not (yet) implemented for pre-snb\n"); return 77; } bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); if (!bufmgr) { fprintf(stderr, "failed to init libdrm\n"); igt_fail(-1); } drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, devid); if (!batch) { fprintf(stderr, "failed to create batch buffer\n"); igt_fail(-1); } target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096); if (!target_buffer) { fprintf(stderr, "failed to alloc target buffer\n"); igt_fail(-1); } blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096); if (!blt_bo) { fprintf(stderr, "failed to alloc blt buffer\n"); igt_fail(-1); } dummy_reloc_loop(); drm_intel_bo_unreference(target_buffer); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
static void make_busy(int fd, uint32_t handle) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; struct drm_i915_gem_relocation_entry reloc[2]; uint32_t batch[20]; uint32_t tmp; int count; tmp = gem_create(fd, 1024*1024); obj[0].handle = tmp; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = 0; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = handle; obj[1].relocation_count = 2; obj[1].relocs_ptr = (uintptr_t) reloc; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = 0; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)obj; execbuf.buffer_count = 2; execbuf.batch_start_offset = 0; execbuf.batch_len = gem_linear_blt(fd, batch, tmp, tmp, 1024*1024,reloc); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = 0; if (HAS_BLT_RING(intel_get_drm_devid(fd))) execbuf.flags |= I915_EXEC_BLT; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; gem_write(fd, handle, 0, batch, execbuf.batch_len); for (count = 0; count < 10; count++) do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); gem_close(fd, tmp); }
int main(int argc, char **argv) { int fd; int devid; if (argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); exit(-1); } fd = drm_open_any(); devid = intel_get_drm_devid(fd); if (!HAS_BLT_RING(devid)) { fprintf(stderr, "inter ring check needs gen6+\n"); return 77; } bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); if (!bufmgr) { fprintf(stderr, "failed to init libdrm\n"); exit(-1); } drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, devid); if (!batch) { fprintf(stderr, "failed to create batch buffer\n"); exit(-1); } target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096); if (!target_buffer) { fprintf(stderr, "failed to alloc target buffer\n"); exit(-1); } store_dword_loop(I915_EXEC_RENDER); drm_intel_bo_unreference(target_buffer); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
int main(int argc, char **argv) { int fd; int devid; if (argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); igt_fail(-1); } fd = drm_open_any(); devid = intel_get_drm_devid(fd); if (HAS_BSD_RING(devid)) num_rings++; if (HAS_BLT_RING(devid)) num_rings++; printf("num rings detected: %i\n", num_rings); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); if (!bufmgr) { fprintf(stderr, "failed to init libdrm\n"); igt_fail(-1); } drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, devid); if (!batch) { fprintf(stderr, "failed to create batch buffer\n"); igt_fail(-1); } mi_lri_loop(); gem_quiescent_gpu(fd); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
static int copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 *obj; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int n, ret, i=0; batch[i++] = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB | 6); if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] += 2; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; /* FIXME */ batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; /* FIXME */ batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; obj = calloc(n_bo + 1, sizeof(*obj)); for (n = 0; n < n_bo; n++) obj[n].handle = all_bo[n]; obj[n].handle = handle; obj[n].relocation_count = 2; obj[n].relocs_ptr = (uintptr_t)reloc; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = n_bo + 1; exec.batch_start_offset = 0; exec.batch_len = i * 4; exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); if (ret) ret = errno; gem_close(fd, handle); free(obj); return ret; }
static void copy(int fd, uint32_t dst, uint32_t src, unsigned int error) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret, i=0; batch[i++] = XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB; if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] |= 8; else batch[i - 1] |= 6; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; memset(obj, 0, sizeof(obj)); exec.buffer_count = 0; obj[exec.buffer_count++].handle = dst; if (src != dst) obj[exec.buffer_count++].handle = src; obj[exec.buffer_count].handle = handle; obj[exec.buffer_count].relocation_count = 2; obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc; exec.buffer_count++; exec.buffers_ptr = (uintptr_t)obj; exec.batch_start_offset = 0; exec.batch_len = i * 4; exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); if (ret) ret = errno; if (error == ~0) igt_assert_neq(ret, 0); else igt_assert(ret == error); gem_close(fd, handle); }
static void busy(data_t *data, uint32_t handle, int size, int loops) { struct drm_i915_gem_relocation_entry reloc[20]; struct drm_i915_gem_exec_object2 gem_exec[2]; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_pwrite gem_pwrite; struct drm_i915_gem_create create; uint32_t buf[170], *b; int i; memset(reloc, 0, sizeof(reloc)); memset(gem_exec, 0, sizeof(gem_exec)); memset(&execbuf, 0, sizeof(execbuf)); b = buf; for (i = 0; i < 20; i++) { *b++ = XY_COLOR_BLT_CMD_NOLEN | ((data->intel_gen >= 8) ? 5 : 4) | COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB; *b++ = 0xf0 << 16 | 1 << 25 | 1 << 24 | 4096; *b++ = 0; *b++ = size >> 12 << 16 | 1024; reloc[i].offset = (b - buf) * sizeof(uint32_t); reloc[i].target_handle = handle; reloc[i].read_domains = I915_GEM_DOMAIN_RENDER; reloc[i].write_domain = I915_GEM_DOMAIN_RENDER; *b++ = 0; if (data->intel_gen >= 8) *b++ = 0; *b++ = canary; } *b++ = MI_BATCH_BUFFER_END; if ((b - buf) & 1) *b++ = 0; gem_exec[0].handle = handle; gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE; create.handle = 0; create.size = 4096; drmIoctl(data->fd, DRM_IOCTL_I915_GEM_CREATE, &create); gem_exec[1].handle = create.handle; gem_exec[1].relocation_count = 20; gem_exec[1].relocs_ptr = (uintptr_t)reloc; execbuf.buffers_ptr = (uintptr_t)gem_exec; execbuf.buffer_count = 2; execbuf.batch_len = (b - buf) * sizeof(buf[0]); execbuf.flags = 1 << 11; if (HAS_BLT_RING(data->devid)) execbuf.flags |= I915_EXEC_BLT; gem_pwrite.handle = gem_exec[1].handle; gem_pwrite.offset = 0; gem_pwrite.size = execbuf.batch_len; gem_pwrite.data_ptr = (uintptr_t)buf; if (drmIoctl(data->fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite) == 0) { while (loops--) drmIoctl(data->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); } drmIoctl(data->fd, DRM_IOCTL_GEM_CLOSE, &create.handle); }
static int blit(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 *obj; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int n, ret, i=0; batch[i++] = XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB; if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] |= 8; else batch[i - 1] |= 6; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; memset(&exec, 0, sizeof(exec)); obj = calloc(n_bo + 1, sizeof(*obj)); for (n = 0; n < n_bo; n++) obj[n].handle = all_bo[n]; obj[n].handle = handle; obj[n].relocation_count = 2; obj[n].relocs_ptr = (uintptr_t)reloc; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = n_bo + 1; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; ret = __gem_execbuf(fd, &exec); gem_close(fd, handle); free(obj); return ret; }
static void run(int object_size) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec[3]; struct drm_i915_gem_relocation_entry reloc[4]; uint32_t buf[20]; uint32_t handle, src, dst; int fd, len, count; int ring; fd = drm_open_any(); handle = gem_create(fd, 4096); src = gem_create(fd, object_size); dst = gem_create(fd, object_size); len = gem_linear_blt(buf, src, dst, object_size, reloc); gem_write(fd, handle, 0, buf, len); exec[0].handle = src; exec[0].relocation_count = 0; exec[0].relocs_ptr = 0; exec[0].alignment = 0; exec[0].offset = 0; exec[0].flags = 0; exec[0].rsvd1 = 0; exec[0].rsvd2 = 0; exec[1].handle = dst; exec[1].relocation_count = 0; exec[1].relocs_ptr = 0; exec[1].alignment = 0; exec[1].offset = 0; exec[1].flags = 0; exec[1].rsvd1 = 0; exec[1].rsvd2 = 0; exec[2].handle = handle; exec[2].relocation_count = len > 40 ? 4 : 2; exec[2].relocs_ptr = (uintptr_t)reloc; exec[2].alignment = 0; exec[2].offset = 0; exec[2].flags = 0; exec[2].rsvd1 = 0; exec[2].rsvd2 = 0; ring = 0; if (HAS_BLT_RING(intel_get_drm_devid(fd))) ring = I915_EXEC_BLT; execbuf.buffers_ptr = (uintptr_t)exec; execbuf.buffer_count = 3; execbuf.batch_start_offset = 0; execbuf.batch_len = len; execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = ring; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; for (count = 1; count <= 1<<17; count <<= 1) { struct timeval start, end; gettimeofday(&start, NULL); if (gem_exec(fd, &execbuf, count)) exit(1); gem_sync(fd, handle); gettimeofday(&end, NULL); printf("Time to blt %d bytes x %6d: %7.3fµs, %s\n", object_size, count, elapsed(&start, &end, count), bytes_per_sec((char *)buf, object_size/elapsed(&start, &end, count)*1e6)); fflush(stdout); } gem_close(fd, handle); close(fd); }