static uint32_t batch(int fd) { const uint32_t buf[] = {MI_BATCH_BUFFER_END}; uint32_t handle = gem_create(fd, 4096); gem_write(fd, handle, 0, buf, sizeof(buf)); return handle; }
static void *thread(void *data) { struct thread *t = data; uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; uint32_t *ctx; memset(&obj, 0, sizeof(obj)); obj.handle = gem_create(t->fd, 4096); gem_write(t->fd, obj.handle, 0, &bbe, sizeof(bbe)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&obj; execbuf.buffer_count = 1; ctx = malloc(t->num_ctx * sizeof(uint32_t)); igt_assert(ctx); memcpy(ctx, t->all_ctx, t->num_ctx * sizeof(uint32_t)); igt_permute_array(ctx, t->num_ctx, xchg_int); for (unsigned n = 0; n < t->num_ctx; n++) { execbuf.rsvd1 = ctx[n]; gem_execbuf(t->fd, &execbuf); } free(ctx); gem_close(t->fd, obj.handle); return NULL; }
int main(int argc, char **argv) { uint32_t batch[2] = {MI_BATCH_BUFFER_END}; uint32_t handle; uint32_t devid; int fd; drmtest_subtest_init(argc, argv); fd = drm_open_any(); devid = intel_get_drm_devid(fd); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); if (drmtest_run_subtest("render")) loop(fd, handle, I915_EXEC_RENDER, "render"); if (drmtest_run_subtest("bsd")) if (HAS_BSD_RING(devid)) loop(fd, handle, I915_EXEC_BSD, "bsd"); if (drmtest_run_subtest("blt")) if (HAS_BLT_RING(devid)) loop(fd, handle, I915_EXEC_BLT, "blt"); gem_close(fd, handle); close(fd); return skipped_all ? 77 : 0; }
static void test(data_t *data) { igt_output_t *output = data->output; struct igt_fb *fb = &data->fb[1]; drmModeModeInfo *mode; cairo_t *cr; uint32_t caching; void *buf; igt_crc_t crc; mode = igt_output_get_mode(output); /* create a non-white fb where we can pwrite later */ igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, fb); cr = igt_get_cairo_ctx(data->drm_fd, fb); igt_paint_test_pattern(cr, fb->width, fb->height); cairo_destroy(cr); /* flip to it to make it UC/WC and fully flushed */ drmModeSetPlane(data->drm_fd, data->primary->drm_plane->plane_id, output->config.crtc->crtc_id, fb->fb_id, 0, 0, 0, fb->width, fb->height, 0, 0, fb->width << 16, fb->height << 16); /* flip back the original white buffer */ drmModeSetPlane(data->drm_fd, data->primary->drm_plane->plane_id, output->config.crtc->crtc_id, data->fb[0].fb_id, 0, 0, 0, fb->width, fb->height, 0, 0, fb->width << 16, fb->height << 16); /* make sure caching mode has become UC/WT */ caching = gem_get_caching(data->drm_fd, fb->gem_handle); igt_assert(caching == I915_CACHING_NONE || caching == I915_CACHING_DISPLAY); /* use pwrite to make the other fb all white too */ buf = malloc(fb->size); igt_assert(buf != NULL); memset(buf, 0xff, fb->size); gem_write(data->drm_fd, fb->gem_handle, 0, buf, fb->size); free(buf); /* and flip to it */ drmModeSetPlane(data->drm_fd, data->primary->drm_plane->plane_id, output->config.crtc->crtc_id, fb->fb_id, 0, 0, 0, fb->width, fb->height, 0, 0, fb->width << 16, fb->height << 16); /* check that the crc is as expected, which requires that caches got flushed */ igt_pipe_crc_collect_crc(data->pipe_crc, &crc); igt_assert_crc_equal(&crc, &data->ref_crc); }
static void run_test(int fd, unsigned ring, unsigned flags) { const int gen = intel_gen(intel_get_drm_devid(fd)); const uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_exec_object2 obj[2]; struct drm_i915_gem_relocation_entry reloc[1024]; struct drm_i915_gem_execbuffer2 execbuf; struct igt_hang_ring hang; uint32_t *batch, *b; int i; gem_require_ring(fd, ring); igt_skip_on_f(gen == 6 && (ring & ~(3<<13)) == I915_EXEC_BSD, "MI_STORE_DATA broken on gen6 bsd\n"); gem_quiescent_gpu(fd); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)obj; execbuf.buffer_count = 2; execbuf.flags = ring | (1 << 11); if (gen < 6) execbuf.flags |= I915_EXEC_SECURE; memset(obj, 0, sizeof(obj)); obj[0].handle = gem_create(fd, 4096); obj[0].flags |= EXEC_OBJECT_WRITE; obj[1].handle = gem_create(fd, 1024*16 + 4096); gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe)); igt_require(__gem_execbuf(fd, &execbuf) == 0); obj[1].relocs_ptr = (uintptr_t)reloc; obj[1].relocation_count = 1024; batch = gem_mmap__cpu(fd, obj[1].handle, 0, 16*1024 + 4096, PROT_WRITE | PROT_READ); gem_set_domain(fd, obj[1].handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); memset(reloc, 0, sizeof(reloc)); b = batch; for (i = 0; i < 1024; i++) { uint64_t offset; reloc[i].target_handle = obj[0].handle; reloc[i].presumed_offset = obj[0].offset; reloc[i].offset = (b - batch + 1) * sizeof(*batch); reloc[i].delta = i * sizeof(uint32_t); reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION; reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION; offset = obj[0].offset + reloc[i].delta; *b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0); if (gen >= 8) { *b++ = offset; *b++ = offset >> 32; } else if (gen >= 4) {
static void test_ring(int fd, unsigned ring, uint32_t flags) { uint32_t bbe = MI_BATCH_BUFFER_END; uint32_t handle[3]; uint32_t read, write; uint32_t active; unsigned i; gem_require_ring(fd, ring | flags); handle[TEST] = gem_create(fd, 4096); handle[BATCH] = gem_create(fd, 4096); gem_write(fd, handle[BATCH], 0, &bbe, sizeof(bbe)); /* Create a long running batch which we can use to hog the GPU */ handle[BUSY] = busy_blt(fd); /* Queue a batch after the busy, it should block and remain "busy" */ igt_assert(exec_noop(fd, handle, ring | flags, false)); igt_assert(still_busy(fd, handle[BUSY])); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, 1 << ring); igt_assert_eq(write, 0); /* Requeue with a write */ igt_assert(exec_noop(fd, handle, ring | flags, true)); igt_assert(still_busy(fd, handle[BUSY])); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, 1 << ring); igt_assert_eq(write, ring); /* Now queue it for a read across all available rings */ active = 0; for (i = I915_EXEC_RENDER; i <= I915_EXEC_VEBOX; i++) { if (exec_noop(fd, handle, i | flags, false)) active |= 1 << i; } igt_assert(still_busy(fd, handle[BUSY])); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, active); igt_assert_eq(write, ring); /* from the earlier write */ /* Check that our long batch was long enough */ igt_assert(still_busy(fd, handle[BUSY])); /* And make sure it becomes idle again */ gem_sync(fd, handle[TEST]); __gem_busy(fd, handle[TEST], &read, &write); igt_assert_eq(read, 0); igt_assert_eq(write, 0); for (i = TEST; i <= BATCH; i++) gem_close(fd, handle[i]); }
/* * Creating an object with non-aligned size and trying to access it with an * offset, which is greater than the requested size but smaller than the * object's last page boundary. pwrite here must be successful. */ static void valid_nonaligned_size(int fd) { int handle; char buf[PAGE_SIZE]; handle = gem_create(fd, PAGE_SIZE / 2); gem_write(fd, handle, PAGE_SIZE / 2, buf, PAGE_SIZE / 2); gem_close(fd, handle); }
static uint32_t batch_create (int fd) { uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; uint32_t batch_handle; batch_handle = gem_create(fd, BATCH_SIZE); gem_write(fd, batch_handle, 0, buf, sizeof(buf)); return batch_handle; }
static void draw_rect_pwrite_tiled(int fd, struct buf_data *buf, struct rect *rect, uint32_t color, uint32_t swizzle) { int i; int tiled_pos, x, y, pixel_size; uint8_t tmp[4096]; int tmp_used = 0, tmp_size; bool flush_tmp = false; int tmp_start_pos = 0; int pixels_written = 0; /* We didn't implement suport for the older tiling methods yet. */ igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5); pixel_size = buf->bpp / 8; tmp_size = sizeof(tmp) / pixel_size; /* Instead of doing one pwrite per pixel, we try to group the maximum * amount of consecutive pixels we can in a single pwrite: that's why we * use the "tmp" variables. */ for (i = 0; i < tmp_size; i++) set_pixel(tmp, i, color, buf->bpp); for (tiled_pos = 0; tiled_pos < buf->size; tiled_pos += pixel_size) { tiled_pos_to_x_y_linear(tiled_pos, buf->stride, swizzle, buf->bpp, &x, &y); if (x >= rect->x && x < rect->x + rect->w && y >= rect->y && y < rect->y + rect->h) { if (tmp_used == 0) tmp_start_pos = tiled_pos; tmp_used++; } else { flush_tmp = true; } if (tmp_used == tmp_size || (flush_tmp && tmp_used > 0) || tiled_pos + pixel_size >= buf->size) { gem_write(fd, buf->handle, tmp_start_pos, tmp, tmp_used * pixel_size); flush_tmp = false; pixels_written += tmp_used; tmp_used = 0; if (pixels_written == rect->w * rect->h) break; } } }
static void draw_rect_pwrite_untiled(int fd, struct buf_data *buf, struct rect *rect, uint32_t color) { int i, y, offset; int pixel_size = buf->bpp / 8; uint8_t tmp[rect->w * pixel_size]; for (i = 0; i < rect->w; i++) set_pixel(tmp, i, color, buf->bpp); for (y = rect->y; y < rect->y + rect->h; y++) { offset = (y * buf->stride) + (rect->x * pixel_size); gem_write(fd, buf->handle, offset, tmp, rect->w * pixel_size); } }
static uint32_t create_bo(int fd, uint32_t val) { uint32_t handle; int i; handle = gem_create(fd, sizeof(linear)); /* Fill the BO with dwords starting at val */ for (i = 0; i < WIDTH*HEIGHT; i++) linear[i] = val++; gem_write(fd, handle, 0, linear, sizeof(linear)); return handle; }
static void test_write(int fd) { void *src; uint32_t dst; /* copy from a fresh src to fresh dst to force pagefault on both */ src = create_pointer(fd); dst = gem_create(fd, OBJECT_SIZE); gem_write(fd, dst, 0, src, OBJECT_SIZE); gem_close(fd, dst); munmap(src, OBJECT_SIZE); }
static void make_busy(int fd, uint32_t handle) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; struct drm_i915_gem_relocation_entry reloc[2]; uint32_t batch[20]; uint32_t tmp; int count; tmp = gem_create(fd, 1024*1024); obj[0].handle = tmp; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = 0; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = handle; obj[1].relocation_count = 2; obj[1].relocs_ptr = (uintptr_t) reloc; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = 0; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)obj; execbuf.buffer_count = 2; execbuf.batch_start_offset = 0; execbuf.batch_len = gem_linear_blt(fd, batch, tmp, tmp, 1024*1024,reloc); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = 0; if (HAS_BLT_RING(intel_get_drm_devid(fd))) execbuf.flags |= I915_EXEC_BLT; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; gem_write(fd, handle, 0, batch, execbuf.batch_len); for (count = 0; count < 10; count++) do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); gem_close(fd, tmp); }
static uint64_t submit_batch(int fd, unsigned ring_id) { const uint32_t batch[] = { MI_NOOP, MI_BATCH_BUFFER_END }; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint64_t presumed_offset; gem_require_ring(fd, ring_id); exec.handle = gem_create(fd, 4096); gem_write(fd, exec.handle, 0, batch, sizeof(batch)); exec.relocation_count = 0; exec.relocs_ptr = 0; exec.alignment = 0; exec.offset = 0; exec.flags = 0; exec.rsvd1 = 0; exec.rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.batch_start_offset = 0; execbuf.batch_len = sizeof(batch); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = ring_id; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; gem_execbuf(fd, &execbuf); gem_sync(fd, exec.handle); presumed_offset = exec.offset; igt_set_stop_rings(igt_to_stop_ring_flag(ring_id)); gem_execbuf(fd, &execbuf); gem_sync(fd, exec.handle); igt_assert(igt_get_stop_rings() == STOP_RING_NONE); igt_assert(presumed_offset == exec.offset); gem_close(fd, exec.handle); return exec.offset; }
static void scratch_buf_init(data_t *data, struct igt_buf *buf, int width, int height, int stride, uint8_t color) { drm_intel_bo *bo; int i; bo = drm_intel_bo_alloc(data->bufmgr, "", SIZE, 4096); for (i = 0; i < width * height; i++) data->linear[i] = color; gem_write(data->drm_fd, bo->handle, 0, data->linear, sizeof(data->linear)); buf->bo = bo; buf->stride = stride; buf->tiling = I915_TILING_NONE; buf->size = SIZE; }
static void *gem_busyspin(void *arg) { const uint32_t bbe = MI_BATCH_BUFFER_END; struct gem_busyspin *bs = arg; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; unsigned engines[16]; unsigned nengine; unsigned engine; int fd; fd = drm_open_driver(DRIVER_INTEL); nengine = 0; for_each_engine(fd, engine) if (!ignore_engine(fd, engine)) engines[nengine++] = engine; memset(&obj, 0, sizeof(obj)); obj.handle = gem_create(fd, 4096); gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&obj; execbuf.buffer_count = 1; execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT; execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC; if (__gem_execbuf(fd, &execbuf)) { execbuf.flags = 0; gem_execbuf(fd, &execbuf); } while (!done) { for (int n = 0; n < nengine; n++) { execbuf.flags &= ~ENGINE_FLAGS; execbuf.flags |= engines[n]; gem_execbuf(fd, &execbuf); } bs->count += nengine; } close(fd); return NULL; }
static drm_intel_bo * create_bo(int fd, uint32_t start_val) { drm_intel_bo *bo; uint32_t tiling = I915_TILING_X; int ret, i; bo = drm_intel_bo_alloc(bufmgr, "tiled bo", 1024 * 1024, 4096); ret = drm_intel_bo_set_tiling(bo, &tiling, width * 4); igt_assert(ret == 0); igt_assert(tiling == I915_TILING_X); /* Fill the BO with dwords starting at start_val */ for (i = 0; i < 1024 * 1024 / 4; i++) linear[i] = start_val++; gem_write(fd, bo->handle, 0, linear, sizeof(linear)); return bo; }
static int has_engine(int fd, const struct intel_execution_engine *e) { uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; int ret; memset(&exec, 0, sizeof(exec)); exec.handle = gem_create(fd, 4096); gem_write(fd, exec.handle, 0, &bbe, sizeof(bbe)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.flags = e->exec_id | e->flags; ret = __gem_execbuf(fd, &execbuf); gem_close(fd, exec.handle); return ret == 0; }
static void make_busy(int fd, uint32_t handle) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 gem_exec; const uint32_t buf[] = {MI_BATCH_BUFFER_END}; gem_write(fd, handle, 0, buf, sizeof(buf)); memset(&gem_exec, 0, sizeof(gem_exec)); gem_exec.handle = handle; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&gem_exec; execbuf.buffer_count = 1; execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT; execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC; if (__gem_execbuf(fd, &execbuf)) { execbuf.flags = 0; gem_execbuf(fd, &execbuf); } }
static void test_execbuf(int fd) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint32_t tmp[] = { MI_BATCH_BUFFER_END }; memset(&exec, 0, sizeof(exec)); memset(&execbuf, 0, sizeof(execbuf)); exec.handle = gem_create(fd, 4096); gem_write(fd, exec.handle, 0, tmp, sizeof(tmp)); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; wedge_gpu(fd); igt_assert_eq(__gem_execbuf(fd, &execbuf), -EIO); gem_close(fd, exec.handle); trigger_reset(fd); }
static void test_write_gtt(int fd) { uint32_t dst; char *dst_gtt; void *src; dst = gem_create(fd, OBJECT_SIZE); /* prefault object into gtt */ dst_gtt = mmap_bo(fd, dst); set_domain_gtt(fd, dst); memset(dst_gtt, 0, OBJECT_SIZE); munmap(dst_gtt, OBJECT_SIZE); src = create_pointer(fd); gem_write(fd, dst, 0, src, OBJECT_SIZE); gem_close(fd, dst); munmap(src, OBJECT_SIZE); }
static void exec0(int fd) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec[1]; uint32_t buf[2] = { MI_BATCH_BUFFER_END, 0 }; /* Just try executing with a zero-length bo. * We expect the kernel to either accept the nop batch, or reject it * for the zero-length buffer, but never crash. */ exec[0].handle = gem_create(fd, 4096); gem_write(fd, exec[0].handle, 0, buf, sizeof(buf)); exec[0].relocation_count = 0; exec[0].relocs_ptr = 0; exec[0].alignment = 0; exec[0].offset = 0; exec[0].flags = 0; exec[0].rsvd1 = 0; exec[0].rsvd2 = 0; execbuf.buffers_ptr = (uintptr_t)exec; execbuf.buffer_count = 1; execbuf.batch_start_offset = 0; execbuf.batch_len = sizeof(buf); execbuf.cliprects_ptr = 0; execbuf.num_cliprects = 0; execbuf.DR1 = 0; execbuf.DR4 = 0; execbuf.flags = 0; i915_execbuffer2_set_context_id(execbuf, 0); execbuf.rsvd2 = 0; igt_info("trying to run an empty batchbuffer\n"); gem_exec(fd, &execbuf); gem_close(fd, exec[0].handle); }
static void dontneed_before_exec(void) { int fd = drm_open_driver(DRIVER_INTEL); struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 exec; uint32_t buf[] = { MI_BATCH_BUFFER_END, 0 }; memset(&execbuf, 0, sizeof(execbuf)); memset(&exec, 0, sizeof(exec)); exec.handle = gem_create(fd, OBJECT_SIZE); gem_write(fd, exec.handle, 0, buf, sizeof(buf)); gem_madvise(fd, exec.handle, I915_MADV_DONTNEED); execbuf.buffers_ptr = (uintptr_t)&exec; execbuf.buffer_count = 1; execbuf.batch_len = sizeof(buf); gem_execbuf(fd, &execbuf); gem_close(fd, exec.handle); close(fd); }
int main(int argc, char **argv) { uint32_t batch[2] = {MI_BATCH_BUFFER_END}; uint32_t handle; int fd, i; fd = drm_open_any(); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); do_or_die(exec(fd, handle, NORMAL)); fail(exec(fd, handle, BROKEN)); if (exec(fd, handle, USE_LUT)) return 77; do_or_die(exec(fd, handle, USE_LUT)); fail(exec(fd, handle, USE_LUT | BROKEN)); for (i = 2; i <= SLOW_QUICK(65536, 8); i *= 2) { if (many_exec(fd, handle, i+1, i+1, NORMAL) == -1 && errno == ENOSPC) break; pass(many_exec(fd, handle, i-1, i-1, NORMAL)); pass(many_exec(fd, handle, i-1, i, NORMAL)); pass(many_exec(fd, handle, i-1, i+1, NORMAL)); pass(many_exec(fd, handle, i, i-1, NORMAL)); pass(many_exec(fd, handle, i, i, NORMAL)); pass(many_exec(fd, handle, i, i+1, NORMAL)); pass(many_exec(fd, handle, i+1, i-1, NORMAL)); pass(many_exec(fd, handle, i+1, i, NORMAL)); pass(many_exec(fd, handle, i+1, i+1, NORMAL)); fail(many_exec(fd, handle, i-1, i-1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i-1, i, NORMAL | BROKEN)); fail(many_exec(fd, handle, i-1, i+1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i, i-1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i, i, NORMAL | BROKEN)); fail(many_exec(fd, handle, i, i+1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i+1, i-1, NORMAL | BROKEN)); fail(many_exec(fd, handle, i+1, i, NORMAL | BROKEN)); fail(many_exec(fd, handle, i+1, i+1, NORMAL | BROKEN)); pass(many_exec(fd, handle, i-1, i-1, USE_LUT)); pass(many_exec(fd, handle, i-1, i, USE_LUT)); pass(many_exec(fd, handle, i-1, i+1, USE_LUT)); pass(many_exec(fd, handle, i, i-1, USE_LUT)); pass(many_exec(fd, handle, i, i, USE_LUT)); pass(many_exec(fd, handle, i, i+1, USE_LUT)); pass(many_exec(fd, handle, i+1, i-1, USE_LUT)); pass(many_exec(fd, handle, i+1, i, USE_LUT)); pass(many_exec(fd, handle, i+1, i+1, USE_LUT)); fail(many_exec(fd, handle, i-1, i-1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i-1, i, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i-1, i+1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i, i-1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i, i, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i, i+1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i+1, i-1, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i+1, i, USE_LUT | BROKEN)); fail(many_exec(fd, handle, i+1, i+1, USE_LUT | BROKEN)); } return 0; }
static int copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 *obj; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int n, ret, i=0; batch[i++] = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB | 6); if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] += 2; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; /* FIXME */ batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; /* FIXME */ batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; obj = calloc(n_bo + 1, sizeof(*obj)); for (n = 0; n < n_bo; n++) obj[n].handle = all_bo[n]; obj[n].handle = handle; obj[n].relocation_count = 2; obj[n].relocs_ptr = (uintptr_t)reloc; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = n_bo + 1; exec.batch_start_offset = 0; exec.batch_len = i * 4; exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); if (ret) ret = errno; gem_close(fd, handle); free(obj); return ret; }
static void copy(int fd, uint32_t dst, uint32_t src, unsigned int error) { uint32_t batch[12]; struct drm_i915_gem_relocation_entry reloc[2]; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret, i=0; batch[i++] = XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB; if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i - 1] |= 8; else batch[i - 1] |= 6; batch[i++] = (3 << 24) | /* 32 bits */ (0xcc << 16) | /* copy ROP */ WIDTH*4; batch[i++] = 0; /* dst x1,y1 */ batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */ batch[i++] = 0; /* dst reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = 0; /* src x1,y1 */ batch[i++] = WIDTH*4; batch[i++] = 0; /* src reloc */ if (intel_gen(intel_get_drm_devid(fd)) >= 8) batch[i++] = 0; batch[i++] = MI_BATCH_BUFFER_END; batch[i++] = MI_NOOP; handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, sizeof(batch)); reloc[0].target_handle = dst; reloc[0].delta = 0; reloc[0].offset = 4 * sizeof(batch[0]); reloc[0].presumed_offset = 0; reloc[0].read_domains = I915_GEM_DOMAIN_RENDER; reloc[0].write_domain = I915_GEM_DOMAIN_RENDER; reloc[1].target_handle = src; reloc[1].delta = 0; reloc[1].offset = 7 * sizeof(batch[0]); if (intel_gen(intel_get_drm_devid(fd)) >= 8) reloc[1].offset += sizeof(batch[0]); reloc[1].presumed_offset = 0; reloc[1].read_domains = I915_GEM_DOMAIN_RENDER; reloc[1].write_domain = 0; memset(obj, 0, sizeof(obj)); exec.buffer_count = 0; obj[exec.buffer_count++].handle = dst; if (src != dst) obj[exec.buffer_count++].handle = src; obj[exec.buffer_count].handle = handle; obj[exec.buffer_count].relocation_count = 2; obj[exec.buffer_count].relocs_ptr = (uintptr_t)reloc; exec.buffer_count++; exec.buffers_ptr = (uintptr_t)obj; exec.batch_start_offset = 0; exec.batch_len = i * 4; exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); if (ret) ret = errno; if (error == ~0) igt_assert_neq(ret, 0); else igt_assert(ret == error); gem_close(fd, handle); }
static void copy(int fd, uint32_t dst, uint32_t src) { uint32_t batch[1024], *b = batch; struct drm_i915_gem_relocation_entry reloc[2], *r = reloc; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret; /* invariant state */ *b++ = (_3DSTATE_AA_CMD | AA_LINE_ECAAR_WIDTH_ENABLE | AA_LINE_ECAAR_WIDTH_1_0 | AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0); *b++ = (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD | IAB_MODIFY_ENABLE | IAB_MODIFY_FUNC | (BLENDFUNC_ADD << IAB_FUNC_SHIFT) | IAB_MODIFY_SRC_FACTOR | (BLENDFACT_ONE << IAB_SRC_FACTOR_SHIFT) | IAB_MODIFY_DST_FACTOR | (BLENDFACT_ZERO << IAB_DST_FACTOR_SHIFT)); *b++ = (_3DSTATE_DFLT_DIFFUSE_CMD); *b++ = (0); *b++ = (_3DSTATE_DFLT_SPEC_CMD); *b++ = (0); *b++ = (_3DSTATE_DFLT_Z_CMD); *b++ = (0); *b++ = (_3DSTATE_COORD_SET_BINDINGS | CSB_TCB(0, 0) | CSB_TCB(1, 1) | CSB_TCB(2, 2) | CSB_TCB(3, 3) | CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7)); *b++ = (_3DSTATE_RASTER_RULES_CMD | ENABLE_POINT_RASTER_RULE | OGL_POINT_RASTER_RULE | ENABLE_LINE_STRIP_PROVOKE_VRTX | ENABLE_TRI_FAN_PROVOKE_VRTX | LINE_STRIP_PROVOKE_VRTX(1) | TRI_FAN_PROVOKE_VRTX(2) | ENABLE_TEXKILL_3D_4D | TEXKILL_4D); *b++ = (_3DSTATE_MODES_4_CMD | ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC(LOGICOP_COPY) | ENABLE_STENCIL_WRITE_MASK | STENCIL_WRITE_MASK(0xff) | ENABLE_STENCIL_TEST_MASK | STENCIL_TEST_MASK(0xff)); *b++ = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | I1_LOAD_S(4) | I1_LOAD_S(5) | 2); *b++ = (0x00000000); /* Disable texture coordinate wrap-shortest */ *b++ = ((1 << S4_POINT_WIDTH_SHIFT) | S4_LINE_WIDTH_ONE | S4_CULLMODE_NONE | S4_VFMT_XY); *b++ = (0x00000000); /* Stencil. */ *b++ = (_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT); *b++ = (_3DSTATE_SCISSOR_RECT_0_CMD); *b++ = (0); *b++ = (0); *b++ = (_3DSTATE_DEPTH_SUBRECT_DISABLE); *b++ = (_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */ *b++ = (0); *b++ = (_3DSTATE_STIPPLE); *b++ = (0x00000000); *b++ = (_3DSTATE_BACKFACE_STENCIL_OPS | BFO_ENABLE_STENCIL_TWO_SIDE | 0); /* samler state */ #define TEX_COUNT 1 *b++ = (_3DSTATE_MAP_STATE | (3 * TEX_COUNT)); *b++ = ((1 << TEX_COUNT) - 1); *b = fill_reloc(r++, b-batch, src, I915_GEM_DOMAIN_SAMPLER, 0); b++; *b++ = (MAPSURF_32BIT | MT_32BIT_ARGB8888 | MS3_TILED_SURFACE | (HEIGHT - 1) << MS3_HEIGHT_SHIFT | (WIDTH - 1) << MS3_WIDTH_SHIFT); *b++ = ((WIDTH-1) << MS4_PITCH_SHIFT); *b++ = (_3DSTATE_SAMPLER_STATE | (3 * TEX_COUNT)); *b++ = ((1 << TEX_COUNT) - 1); *b++ = (MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT | FILTER_NEAREST << SS2_MAG_FILTER_SHIFT | FILTER_NEAREST << SS2_MIN_FILTER_SHIFT); *b++ = (TEXCOORDMODE_WRAP << SS3_TCX_ADDR_MODE_SHIFT | TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT | 0 << SS3_TEXTUREMAP_INDEX_SHIFT); *b++ = (0x00000000); /* render target state */ *b++ = (_3DSTATE_BUF_INFO_CMD); *b++ = (BUF_3D_ID_COLOR_BACK | BUF_3D_TILED_SURFACE | WIDTH*4); *b = fill_reloc(r++, b-batch, dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); b++; *b++ = (_3DSTATE_DST_BUF_VARS_CMD); *b++ = (COLR_BUF_ARGB8888 | DSTORG_HORT_BIAS(0x8) | DSTORG_VERT_BIAS(0x8)); /* draw rect is unconditional */ *b++ = (_3DSTATE_DRAW_RECT_CMD); *b++ = (0x00000000); *b++ = (0x00000000); /* ymin, xmin */ *b++ = (DRAW_YMAX(HEIGHT - 1) | DRAW_XMAX(WIDTH - 1)); /* yorig, xorig (relate to color buffer?) */ *b++ = (0x00000000); /* texfmt */ *b++ = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(1) | I1_LOAD_S(2) | I1_LOAD_S(6) | 2); *b++ = ((4 << S1_VERTEX_WIDTH_SHIFT) | (4 << S1_VERTEX_PITCH_SHIFT)); *b++ = (~S2_TEXCOORD_FMT(0, TEXCOORDFMT_NOT_PRESENT) | S2_TEXCOORD_FMT(0, TEXCOORDFMT_2D)); *b++ = (S6_CBUF_BLEND_ENABLE | S6_COLOR_WRITE_ENABLE | BLENDFUNC_ADD << S6_CBUF_BLEND_FUNC_SHIFT | BLENDFACT_ONE << S6_CBUF_SRC_BLEND_FACT_SHIFT | BLENDFACT_ZERO << S6_CBUF_DST_BLEND_FACT_SHIFT); /* pixel shader */ *b++ = (_3DSTATE_PIXEL_SHADER_PROGRAM | (1 + 3*3 - 2)); /* decl FS_T0 */ *b++ = (D0_DCL | REG_TYPE(FS_T0) << D0_TYPE_SHIFT | REG_NR(FS_T0) << D0_NR_SHIFT | ((REG_TYPE(FS_T0) != REG_TYPE_S) ? D0_CHANNEL_ALL : 0)); *b++ = (0); *b++ = (0); /* decl FS_S0 */ *b++ = (D0_DCL | (REG_TYPE(FS_S0) << D0_TYPE_SHIFT) | (REG_NR(FS_S0) << D0_NR_SHIFT) | ((REG_TYPE(FS_S0) != REG_TYPE_S) ? D0_CHANNEL_ALL : 0)); *b++ = (0); *b++ = (0); /* texld(FS_OC, FS_S0, FS_T0 */ *b++ = (T0_TEXLD | (REG_TYPE(FS_OC) << T0_DEST_TYPE_SHIFT) | (REG_NR(FS_OC) << T0_DEST_NR_SHIFT) | (REG_NR(FS_S0) << T0_SAMPLER_NR_SHIFT)); *b++ = ((REG_TYPE(FS_T0) << T1_ADDRESS_REG_TYPE_SHIFT) | (REG_NR(FS_T0) << T1_ADDRESS_REG_NR_SHIFT)); *b++ = (0); *b++ = (PRIM3D_RECTLIST | (3*4 - 1)); *b++ = pack_float(WIDTH); *b++ = pack_float(HEIGHT); *b++ = pack_float(WIDTH); *b++ = pack_float(HEIGHT); *b++ = pack_float(0); *b++ = pack_float(HEIGHT); *b++ = pack_float(0); *b++ = pack_float(HEIGHT); *b++ = pack_float(0); *b++ = pack_float(0); *b++ = pack_float(0); *b++ = pack_float(0); *b++ = MI_BATCH_BUFFER_END; if ((b - batch) & 1) *b++ = 0; igt_assert(b - batch <= 1024); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, batch, (b-batch)*sizeof(batch[0])); igt_assert(r-reloc == 2); obj[0].handle = dst; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = 0; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = src; obj[1].relocation_count = 0; obj[1].relocs_ptr = 0; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = 0; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; obj[2].handle = handle; obj[2].relocation_count = 2; obj[2].relocs_ptr = (uintptr_t)reloc; obj[2].alignment = 0; obj[2].offset = 0; obj[2].flags = 0; obj[2].rsvd1 = obj[2].rsvd2 = 0; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = 3; exec.batch_start_offset = 0; exec.batch_len = (b-batch)*sizeof(batch[0]); exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = 0; i915_execbuffer2_set_context_id(exec, 0); exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); while (ret && errno == EBUSY) { drmCommandNone(fd, DRM_I915_GEM_THROTTLE); ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); } igt_assert_eq(ret, 0); gem_close(fd, handle); }
static void blt_copy(int fd, uint32_t dst, uint32_t src) { uint32_t batch[1024], *b = batch; struct drm_i915_gem_relocation_entry reloc[2], *r = reloc; struct drm_i915_gem_exec_object2 obj[3]; struct drm_i915_gem_execbuffer2 exec; uint32_t handle; int ret; *b++ = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB); *b++ = 3 << 24 | 0xcc << 16 | WIDTH * 4; *b++ = 0; *b++ = HEIGHT << 16 | WIDTH; *b = fill_reloc(r++, b-batch, dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); b++; *b++ = 0; *b++ = WIDTH*4; *b = fill_reloc(r++, b-batch, src, I915_GEM_DOMAIN_RENDER, 0); b++; *b++ = MI_BATCH_BUFFER_END; if ((b - batch) & 1) *b++ = 0; assert(b - batch <= 1024); handle = gem_create(fd, 4096); gem_write(fd, handle, 0, (b-batch)*sizeof(batch[0]), batch); assert(r-reloc == 2); obj[0].handle = dst; obj[0].relocation_count = 0; obj[0].relocs_ptr = 0; obj[0].alignment = 0; obj[0].offset = 0; obj[0].flags = EXEC_OBJECT_NEEDS_FENCE; obj[0].rsvd1 = 0; obj[0].rsvd2 = 0; obj[1].handle = src; obj[1].relocation_count = 0; obj[1].relocs_ptr = 0; obj[1].alignment = 0; obj[1].offset = 0; obj[1].flags = EXEC_OBJECT_NEEDS_FENCE; obj[1].rsvd1 = 0; obj[1].rsvd2 = 0; obj[2].handle = handle; obj[2].relocation_count = 2; obj[2].relocs_ptr = (uintptr_t)reloc; obj[2].alignment = 0; obj[2].offset = 0; obj[2].flags = 0; obj[2].rsvd1 = obj[2].rsvd2 = 0; exec.buffers_ptr = (uintptr_t)obj; exec.buffer_count = 3; exec.batch_start_offset = 0; exec.batch_len = (b-batch)*sizeof(batch[0]); exec.DR1 = exec.DR4 = 0; exec.num_cliprects = 0; exec.cliprects_ptr = 0; exec.flags = 0; exec.rsvd1 = exec.rsvd2 = 0; ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); while (ret && errno == EBUSY) { drmCommandNone(fd, DRM_I915_GEM_THROTTLE); ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec); } assert(ret == 0); gem_close(fd, handle); }
int main(int argc, char **argv) { drm_intel_bufmgr *bufmgr; struct intel_batchbuffer *batch; uint32_t *start_val; drm_intel_bo **bo; uint32_t start = 0; int i, j, fd, count; fd = drm_open_any(); render_copy = get_render_copyfunc(intel_get_drm_devid(fd)); if (render_copy == NULL) { printf("no render-copy function, doing nothing\n"); return 77; } bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); count = 0; if (argc > 1) count = atoi(argv[1]); if (count == 0) count = 3 * gem_aperture_size(fd) / SIZE / 2; printf("Using %d 1MiB buffers\n", count); bo = malloc(sizeof(*bo)*count); start_val = malloc(sizeof(*start_val)*count); for (i = 0; i < count; i++) { bo[i] = drm_intel_bo_alloc(bufmgr, "", SIZE, 4096); start_val[i] = start; for (j = 0; j < WIDTH*HEIGHT; j++) linear[j] = start++; gem_write(fd, bo[i]->handle, 0, linear, sizeof(linear)); } printf("Verifying initialisation...\n"); for (i = 0; i < count; i++) check_bo(fd, bo[i]->handle, start_val[i]); printf("Cyclic blits, forward...\n"); for (i = 0; i < count * 4; i++) { struct scratch_buf src, dst; src.bo = bo[i % count]; src.stride = STRIDE; src.tiling = I915_TILING_NONE; src.size = SIZE; dst.bo = bo[(i + 1) % count]; dst.stride = STRIDE; dst.tiling = I915_TILING_NONE; dst.size = SIZE; render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0); start_val[(i + 1) % count] = start_val[i % count]; } for (i = 0; i < count; i++) check_bo(fd, bo[i]->handle, start_val[i]); printf("Cyclic blits, backward...\n"); for (i = 0; i < count * 4; i++) { struct scratch_buf src, dst; src.bo = bo[(i + 1) % count]; src.stride = STRIDE; src.tiling = I915_TILING_NONE; src.size = SIZE; dst.bo = bo[i % count]; dst.stride = STRIDE; dst.tiling = I915_TILING_NONE; dst.size = SIZE; render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0); start_val[i % count] = start_val[(i + 1) % count]; } for (i = 0; i < count; i++) check_bo(fd, bo[i]->handle, start_val[i]); printf("Random blits...\n"); for (i = 0; i < count * 4; i++) { struct scratch_buf src, dst; int s = random() % count; int d = random() % count; if (s == d) continue; src.bo = bo[s]; src.stride = STRIDE; src.tiling = I915_TILING_NONE; src.size = SIZE; dst.bo = bo[d]; dst.stride = STRIDE; dst.tiling = I915_TILING_NONE; dst.size = SIZE; render_copy(batch, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0); start_val[d] = start_val[s]; } for (i = 0; i < count; i++) check_bo(fd, bo[i]->handle, start_val[i]); return 0; }
int main(int argc, char **argv) { struct timeval start, end; uint8_t *buf; uint32_t handle; int size = OBJECT_SIZE; int loop, i, tiling; int fd; if (argc > 1) size = atoi(argv[1]); if (size == 0) { fprintf(stderr, "Invalid object size specified\n"); return 1; } buf = malloc(size); memset(buf, 0, size); fd = drm_open_any(); handle = gem_create(fd, size); assert(handle); for (tiling = I915_TILING_NONE; tiling <= I915_TILING_Y; tiling++) { if (tiling != I915_TILING_NONE) { printf("\nSetting tiling mode to %s\n", tiling == I915_TILING_X ? "X" : "Y"); gem_set_tiling(fd, handle, tiling, 512); } if (tiling == I915_TILING_NONE) { gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); { uint32_t *base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); ptr = base; x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to read %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to write %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); printf("Time to clear %dk through a CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL); base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); gettimeofday(&end, NULL); printf("Time to clear %dk through a cached CPU map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* CPU pwrite */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_write(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pwrite %dk through the CPU: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* CPU pread */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_read(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pread %dk through the CPU: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); } /* prefault into gtt */ { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to read %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap write */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; for (i = 0; i < size/sizeof(*ptr); i++) ptr[i] = i; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to write %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap clear */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); printf("Time to clear %dk through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); gettimeofday(&start, NULL);{ uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); for (loop = 0; loop < 1000; loop++) memset(base, 0, size); munmap(base, size); } gettimeofday(&end, NULL); printf("Time to clear %dk through a cached GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* mmap read */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { uint32_t *base = gem_mmap(fd, handle, size, PROT_READ | PROT_WRITE); volatile uint32_t *ptr = base; int x = 0; for (i = 0; i < size/sizeof(*ptr); i++) x += ptr[i]; /* force overtly clever gcc to actually compute x */ ptr[0] = x; munmap(base, size); } gettimeofday(&end, NULL); printf("Time to read %dk (again) through a GTT map: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); if (tiling == I915_TILING_NONE) { /* GTT pwrite */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_write(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pwrite %dk through the GTT: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pread */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) gem_read(fd, handle, 0, buf, size); gettimeofday(&end, NULL); printf("Time to pread %dk through the GTT: %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pwrite, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_write(fd, handle, 0, buf, size); gem_sync(fd, handle); } gettimeofday(&end, NULL); printf("Time to pwrite %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* GTT pread, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_sync(fd, handle); gem_read(fd, handle, 0, buf, size); } gettimeofday(&end, NULL); printf("Time to pread %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* partial writes */ printf("Now partial writes.\n"); size /= 4; /* partial GTT pwrite, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_write(fd, handle, 0, buf, size); gem_sync(fd, handle); } gettimeofday(&end, NULL); printf("Time to pwrite %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); /* partial GTT pread, including clflush */ gettimeofday(&start, NULL); for (loop = 0; loop < 1000; loop++) { gem_sync(fd, handle); gem_read(fd, handle, 0, buf, size); } gettimeofday(&end, NULL); printf("Time to pread %dk through the GTT (clflush): %7.3fµs\n", size/1024, elapsed(&start, &end, loop)); size *= 4; } } gem_close(fd, handle); close(fd); return 0; }