void intelDestroyContext(__DRIcontext * driContextPriv) { struct brw_context *brw = (struct brw_context *) driContextPriv->driverPrivate; struct gl_context *ctx = &brw->ctx; assert(brw); /* should never be null */ if (!brw) return; /* Dump a final BMP in case the application doesn't call SwapBuffers */ if (INTEL_DEBUG & DEBUG_AUB) { intel_batchbuffer_flush(brw); aub_dump_bmp(&brw->ctx); } _mesa_meta_free(&brw->ctx); if (INTEL_DEBUG & DEBUG_SHADER_TIME) { /* Force a report. */ brw->shader_time.report_time = 0; brw_collect_and_report_shader_time(brw); brw_destroy_shader_time(brw); } brw_destroy_state(brw); brw_draw_destroy(brw); drm_intel_bo_unreference(brw->curbe.curbe_bo); free(brw->curbe.last_buf); free(brw->curbe.next_buf); drm_intel_gem_context_destroy(brw->hw_ctx); if (ctx->swrast_context) { _swsetup_DestroyContext(&brw->ctx); _tnl_DestroyContext(&brw->ctx); } _vbo_DestroyContext(&brw->ctx); if (ctx->swrast_context) _swrast_DestroyContext(&brw->ctx); intel_batchbuffer_free(brw); drm_intel_bo_unreference(brw->first_post_swapbuffers_batch); brw->first_post_swapbuffers_batch = NULL; driDestroyOptionCache(&brw->optionCache); /* free the Mesa context */ _mesa_free_context_data(&brw->ctx); ralloc_free(brw); driContextPriv->driverPrivate = NULL; }
void intelDestroyContext(__DRIcontext * driContextPriv) { struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate; assert(intel); /* should never be null */ if (intel) { GLboolean release_texture_heaps; INTEL_FIREVERTICES(intel); _mesa_meta_free(&intel->ctx); meta_destroy_metaops(&intel->meta); intel->vtbl.destroy(intel); release_texture_heaps = (intel->ctx.Shared->RefCount == 1); _swsetup_DestroyContext(&intel->ctx); _tnl_DestroyContext(&intel->ctx); _vbo_DestroyContext(&intel->ctx); _swrast_DestroyContext(&intel->ctx); intel->Fallback = 0x0; /* don't call _swrast_Flush later */ intel_batchbuffer_free(intel->batch); intel->batch = NULL; free(intel->prim.vb); intel->prim.vb = NULL; dri_bo_unreference(intel->prim.vb_bo); intel->prim.vb_bo = NULL; dri_bo_unreference(intel->first_post_swapbuffers_batch); intel->first_post_swapbuffers_batch = NULL; if (release_texture_heaps) { /* Nothing is currently done here to free texture heaps; * but we're not using the texture heap utilities, so I * rather think we shouldn't. I've taken a look, and can't * find any private texture data hanging around anywhere, but * I'm not yet certain there isn't any at all... */ /* if (INTEL_DEBUG & DEBUG_TEXTURE) fprintf(stderr, "do something to free texture heaps\n"); */ } driDestroyOptionCache(&intel->optionCache); /* free the Mesa context */ _mesa_free_context_data(&intel->ctx); FREE(intel); driContextPriv->driverPrivate = NULL; } }
static void intel_encoder_context_destroy(void *hw_context) { struct intel_encoder_context *encoder_context = (struct intel_encoder_context *)hw_context; encoder_context->mfc_context_destroy(encoder_context->mfc_context); encoder_context->vme_context_destroy(encoder_context->vme_context); intel_batchbuffer_free(encoder_context->base.batch); free(encoder_context); }
static void gen75_encoder_context_destroy(void *hw_context) { struct gen6_encoder_context *gen6_encoder_context = (struct gen6_encoder_context *)hw_context; gen75_mfc_context_destroy(&gen6_encoder_context->mfc_context); gen75_vme_context_destroy(&gen6_encoder_context->vme_context); intel_batchbuffer_free(gen6_encoder_context->base.batch); free(gen6_encoder_context); }
static void *work(void *arg) { struct intel_batchbuffer *batch; render_copyfunc_t rendercopy = get_render_copyfunc(devid); drm_intel_context *context; drm_intel_bufmgr *bufmgr; int thread_id = *(int *)arg; int td_fd; int i; if (multiple_fds) td_fd = fd = drm_open_any(); else td_fd = fd; assert(td_fd >= 0); bufmgr = drm_intel_bufmgr_gem_init(td_fd, 4096); batch = intel_batchbuffer_alloc(bufmgr, devid); context = drm_intel_gem_context_create(bufmgr); if (!context) { returns[thread_id] = 77; goto out; } for (i = 0; i < iter; i++) { struct scratch_buf src, dst; init_buffer(bufmgr, &src, 4096); init_buffer(bufmgr, &dst, 4096); if (uncontexted) { assert(rendercopy); rendercopy(batch, &src, 0, 0, 0, 0, &dst, 0, 0); } else { int ret; ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); assert(ret == 0); intel_batchbuffer_flush_with_context(batch, context); } } out: drm_intel_gem_context_destroy(context); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); if (multiple_fds) close(td_fd); pthread_exit(&returns[thread_id]); }
int main(int argc, char **argv) { int fd; int devid; igt_skip_on_simulation(); if (argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); igt_fail(-1); } fd = drm_open_any(); devid = intel_get_drm_devid(fd); if (!HAS_BLT_RING(devid)) { fprintf(stderr, "not (yet) implemented for pre-snb\n"); return 77; } bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); if (!bufmgr) { fprintf(stderr, "failed to init libdrm\n"); igt_fail(-1); } drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, devid); if (!batch) { fprintf(stderr, "failed to create batch buffer\n"); igt_fail(-1); } target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096); if (!target_buffer) { fprintf(stderr, "failed to alloc target buffer\n"); igt_fail(-1); } blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096); if (!blt_bo) { fprintf(stderr, "failed to alloc blt buffer\n"); igt_fail(-1); } dummy_reloc_loop(); drm_intel_bo_unreference(target_buffer); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
static void draw_rect_blt(int fd, struct cmd_data *cmd_data, struct buf_data *buf, struct rect *rect, uint32_t color) { drm_intel_bo *dst; struct intel_batchbuffer *batch; int blt_cmd_len, blt_cmd_tiling, blt_cmd_depth; uint32_t devid = intel_get_drm_devid(fd); int gen = intel_gen(devid); uint32_t tiling, swizzle; int pitch; gem_get_tiling(fd, buf->handle, &tiling, &swizzle); dst = gem_handle_to_libdrm_bo(cmd_data->bufmgr, fd, "", buf->handle); igt_assert(dst); batch = intel_batchbuffer_alloc(cmd_data->bufmgr, devid); igt_assert(batch); switch (buf->bpp) { case 8: blt_cmd_depth = 0; break; case 16: /* we're assuming 565 */ blt_cmd_depth = 1 << 24; break; case 32: blt_cmd_depth = 3 << 24; break; default: igt_assert(false); } blt_cmd_len = (gen >= 8) ? 0x5 : 0x4; blt_cmd_tiling = (tiling) ? XY_COLOR_BLT_TILED : 0; pitch = (tiling) ? buf->stride / 4 : buf->stride; BEGIN_BATCH(6, 1); OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB | blt_cmd_tiling | blt_cmd_len); OUT_BATCH(blt_cmd_depth | (0xF0 << 16) | pitch); OUT_BATCH((rect->y << 16) | rect->x); OUT_BATCH(((rect->y + rect->h) << 16) | (rect->x + rect->w)); OUT_RELOC_FENCED(dst, 0, I915_GEM_DOMAIN_RENDER, 0); OUT_BATCH(color); ADVANCE_BATCH(); intel_batchbuffer_flush(batch); intel_batchbuffer_free(batch); }
void intelDestroyContext(__DRIcontext * driContextPriv) { struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate; struct gl_context *ctx = &intel->ctx; assert(intel); /* should never be null */ if (intel) { INTEL_FIREVERTICES(intel); /* Dump a final BMP in case the application doesn't call SwapBuffers */ if (INTEL_DEBUG & DEBUG_AUB) { intel_batchbuffer_flush(intel); aub_dump_bmp(&intel->ctx); } _mesa_meta_free(&intel->ctx); intel->vtbl.destroy(intel); if (ctx->swrast_context) { _swsetup_DestroyContext(&intel->ctx); _tnl_DestroyContext(&intel->ctx); } _vbo_DestroyContext(&intel->ctx); if (ctx->swrast_context) _swrast_DestroyContext(&intel->ctx); intel->Fallback = 0x0; /* don't call _swrast_Flush later */ intel_batchbuffer_free(intel); free(intel->prim.vb); intel->prim.vb = NULL; drm_intel_bo_unreference(intel->prim.vb_bo); intel->prim.vb_bo = NULL; drm_intel_bo_unreference(intel->first_post_swapbuffers_batch); intel->first_post_swapbuffers_batch = NULL; driDestroyOptionCache(&intel->optionCache); /* free the Mesa context */ _mesa_free_context_data(&intel->ctx); _math_matrix_dtr(&intel->ViewportMatrix); ralloc_free(intel); driContextPriv->driverPrivate = NULL; } }
int main(int argc, char **argv) { int fd; int devid; if (argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); exit(-1); } fd = drm_open_any(); devid = intel_get_drm_devid(fd); if (!HAS_BLT_RING(devid)) { fprintf(stderr, "inter ring check needs gen6+\n"); return 77; } bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); if (!bufmgr) { fprintf(stderr, "failed to init libdrm\n"); exit(-1); } drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, devid); if (!batch) { fprintf(stderr, "failed to create batch buffer\n"); exit(-1); } target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096); if (!target_buffer) { fprintf(stderr, "failed to alloc target buffer\n"); exit(-1); } store_dword_loop(I915_EXEC_RENDER); drm_intel_bo_unreference(target_buffer); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
int main(int argc, char **argv) { int fd; int devid; if (argc != 1) { fprintf(stderr, "usage: %s\n", argv[0]); igt_fail(-1); } fd = drm_open_any(); devid = intel_get_drm_devid(fd); if (HAS_BSD_RING(devid)) num_rings++; if (HAS_BLT_RING(devid)) num_rings++; printf("num rings detected: %i\n", num_rings); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); if (!bufmgr) { fprintf(stderr, "failed to init libdrm\n"); igt_fail(-1); } drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, devid); if (!batch) { fprintf(stderr, "failed to create batch buffer\n"); igt_fail(-1); } mi_lri_loop(); gem_quiescent_gpu(fd); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
int main(int argc, char **argv) { int fd; int object_size = OBJECT_WIDTH * OBJECT_HEIGHT * 4; double start_time, end_time; drm_intel_bo *dst_bo; drm_intel_bufmgr *bufmgr; struct intel_batchbuffer *batch; int i; fd = drm_open_any(); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); dst_bo = drm_intel_bo_alloc(bufmgr, "dst", object_size, 4096); /* Prep loop to get us warmed up. */ for (i = 0; i < 60; i++) { do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT); } drm_intel_bo_wait_rendering(dst_bo); /* Do the actual timing. */ start_time = get_time_in_secs(); for (i = 0; i < 200; i++) { do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT); } drm_intel_bo_wait_rendering(dst_bo); end_time = get_time_in_secs(); printf("%d iterations in %.03f secs: %.01f MB/sec\n", i, end_time - start_time, (double)i * OBJECT_WIDTH * OBJECT_HEIGHT * 4 / 1024.0 / 1024.0 / (end_time - start_time)); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
static void *thread(void *bufmgr) { struct intel_batchbuffer *batch; dri_bo **bo; drm_intel_context **ctx; int c, b; batch = intel_batchbuffer_alloc(bufmgr, devid); bo = malloc(num_bo * sizeof(dri_bo *)); igt_assert(bo); memcpy(bo, all_bo, num_bo * sizeof(dri_bo *)); ctx = malloc(num_ctx * sizeof(drm_intel_context *)); igt_assert(ctx); memcpy(ctx, all_ctx, num_ctx * sizeof(drm_intel_context *)); igt_permute_array(ctx, num_ctx, xchg_ptr); for (c = 0; c < ctx_per_thread; c++) { igt_permute_array(bo, num_bo, xchg_ptr); for (b = 0; b < bo_per_ctx; b++) { struct igt_buf src, dst; src.bo = bo[b % num_bo]; src.stride = 64; src.size = OBJECT_SIZE; src.tiling = I915_TILING_NONE; dst.bo = bo[(b+1) % num_bo]; dst.stride = 64; dst.size = OBJECT_SIZE; dst.tiling = I915_TILING_NONE; render_copy(batch, ctx[c % num_ctx], &src, 0, 0, 16, 16, &dst, 0, 0); } } free(ctx); free(bo); intel_batchbuffer_free(batch); return NULL; }
void intelDestroyContext(__DRIcontext * driContextPriv) { struct brw_context *brw = (struct brw_context *) driContextPriv->driverPrivate; struct gl_context *ctx = &brw->ctx; assert(brw); /* should never be null */ if (brw) { /* Dump a final BMP in case the application doesn't call SwapBuffers */ if (INTEL_DEBUG & DEBUG_AUB) { intel_batchbuffer_flush(brw); aub_dump_bmp(&brw->ctx); } _mesa_meta_free(&brw->ctx); brw->vtbl.destroy(brw); if (ctx->swrast_context) { _swsetup_DestroyContext(&brw->ctx); _tnl_DestroyContext(&brw->ctx); } _vbo_DestroyContext(&brw->ctx); if (ctx->swrast_context) _swrast_DestroyContext(&brw->ctx); intel_batchbuffer_free(brw); drm_intel_bo_unreference(brw->first_post_swapbuffers_batch); brw->first_post_swapbuffers_batch = NULL; driDestroyOptionCache(&brw->optionCache); /* free the Mesa context */ _mesa_free_context_data(&brw->ctx); ralloc_free(brw); driContextPriv->driverPrivate = NULL; } }
int main(int argc, char **argv) { int fd; int i; drm_intel_bo *src_bo, *dst_bo; fd = drm_open_any(); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096); dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096); /* The ring we've been using is 128k, and each rendering op * will use at least 8 dwords: * * BATCH_START * BATCH_START offset * MI_FLUSH * STORE_DATA_INDEX * STORE_DATA_INDEX offset * STORE_DATA_INDEX value * MI_USER_INTERRUPT * (padding) * * So iterate just a little more than that -- if we don't fill the ring * doing this, we aren't likely to with this test. */ for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) { intel_copy_bo(batch, dst_bo, src_bo, width, height); intel_batchbuffer_flush(batch); } intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
int main(int argc, char **argv) { drm_intel_bo *src; int fd; fd = drm_open_any(); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); src = drm_intel_bo_alloc(bufmgr, "src", 128 * 128, 4096); bad_blit(src, batch->devid); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
int main(int argc, char **argv) { int i, j; unsigned *current_permutation, *tmp_permutation; pid_t signal_helper = -1; drm_fd = drm_open_any(); devid = intel_get_drm_devid(drm_fd); parse_options(argc, argv); /* start our little helper early before too may allocations occur */ signal(SIGUSR1, SIG_IGN); if (options.use_signal_helper) signal_helper = fork_signal_helper(); init(); check_render_copyfunc(); tile_permutation = malloc(num_total_tiles*sizeof(uint32_t)); current_permutation = malloc(num_total_tiles*sizeof(uint32_t)); tmp_permutation = malloc(num_total_tiles*sizeof(uint32_t)); assert(tile_permutation); assert(current_permutation); assert(tmp_permutation); fan_out(); for (i = 0; i < options.total_rounds; i++) { printf("round %i\n", i); if (i % 64 == 63) { fan_in_and_check(); printf("everything correct after %i rounds\n", i + 1); } target_set = (current_set + 1) & 1; init_set(target_set); for (j = 0; j < num_total_tiles; j++) current_permutation[j] = j; permute_array(current_permutation, num_total_tiles, exchange_uint); copy_tiles(current_permutation); memcpy(tmp_permutation, tile_permutation, sizeof(unsigned)*num_total_tiles); /* accumulate the permutations */ for (j = 0; j < num_total_tiles; j++) tile_permutation[j] = current_permutation[tmp_permutation[j]]; current_set = target_set; } fan_in_and_check(); fprintf(stderr, "num failed tiles %u, max incoherent bytes %lu\n", stats.num_failed, stats.max_failed_reads*sizeof(uint32_t)); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(drm_fd); if (signal_helper != -1) kill(signal_helper, SIGQUIT); return 0; }
static void run_test(int fd, int num_fences, int expected_errno, unsigned flags) { struct drm_i915_gem_execbuffer2 execbuf[2]; struct drm_i915_gem_exec_object2 exec[2][2*MAX_FENCES+3]; struct drm_i915_gem_relocation_entry reloc[2*MAX_FENCES+2]; int i, n; int loop = 1000; if (flags & BUSY_LOAD) { bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); batch = intel_batchbuffer_alloc(bufmgr, devid); /* Takes forever otherwise. */ loop = 50; } if (flags & INTERRUPTIBLE) igt_fork_signal_helper(); memset(execbuf, 0, sizeof(execbuf)); memset(exec, 0, sizeof(exec)); memset(reloc, 0, sizeof(reloc)); for (n = 0; n < 2*num_fences; n++) { uint32_t handle = tiled_bo_create(fd); exec[1][2*num_fences - n-1].handle = exec[0][n].handle = handle; fill_reloc(&reloc[n], handle); } for (i = 0; i < 2; i++) { for (n = 0; n < num_fences; n++) exec[i][n].flags = EXEC_OBJECT_NEEDS_FENCE; exec[i][2*num_fences].handle = batch_create(fd); exec[i][2*num_fences].relocs_ptr = (uintptr_t)reloc; exec[i][2*num_fences].relocation_count = 2*num_fences; execbuf[i].buffers_ptr = (uintptr_t)exec[i]; execbuf[i].buffer_count = 2*num_fences+1; execbuf[i].batch_len = 2*sizeof(uint32_t); } do { if (flags & BUSY_LOAD) emit_dummy_load(); igt_assert_eq(__gem_execbuf(fd, &execbuf[0]), expected_errno); igt_assert_eq(__gem_execbuf(fd, &execbuf[1]), expected_errno); } while (--loop); if (flags & INTERRUPTIBLE) igt_stop_signal_helper(); /* Cleanup */ for (n = 0; n < 2*num_fences; n++) gem_close(fd, exec[0][n].handle); for (i = 0; i < 2; i++) gem_close(fd, exec[i][2*num_fences].handle); if (flags & BUSY_LOAD) { intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); } }
int main(int argc, char **argv) { drm_intel_bo *bo[4096]; uint32_t bo_start_val[4096]; uint32_t start = 0; int fd, i, count; igt_skip_on_simulation(); fd = drm_open_any(); count = 3 * gem_aperture_size(fd) / (1024*1024) / 2; if (count > intel_get_total_ram_mb() * 9 / 10) { count = intel_get_total_ram_mb() * 9 / 10; printf("not enough RAM to run test, reducing buffer count\n"); } count |= 1; printf("Using %d 1MiB buffers\n", count); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); for (i = 0; i < count; i++) { bo[i] = create_bo(fd, start); bo_start_val[i] = start; /* printf("Creating bo %d\n", i); check_bo(bo[i], bo_start_val[i]); */ start += 1024 * 1024 / 4; } for (i = 0; i < count; i++) { int src = count - i - 1; intel_copy_bo(batch, bo[i], bo[src], width, height); bo_start_val[i] = bo_start_val[src]; } for (i = 0; i < count * 4; i++) { int src = random() % count; int dst = random() % count; if (src == dst) continue; intel_copy_bo(batch, bo[dst], bo[src], width, height); bo_start_val[dst] = bo_start_val[src]; /* check_bo(bo[dst], bo_start_val[dst]); printf("%d: copy bo %d to %d\n", i, src, dst); */ } for (i = 0; i < count; i++) { /* printf("check %d\n", i); */ check_bo(fd, bo[i], bo_start_val[i]); drm_intel_bo_unreference(bo[i]); bo[i] = NULL; } intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); close(fd); return 0; }
static void render_timeout(int fd) { drm_intel_bufmgr *bufmgr; struct intel_batchbuffer *batch; int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC; int64_t negative_timeout = -1; int ret; const bool do_signals = true; /* signals will seem to make the operation * use less process CPU time */ bool done = false; int i, iter = 1; igt_skip_on_simulation(); bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); drm_intel_bufmgr_gem_enable_reuse(bufmgr); batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd)); dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096); dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096); igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL, "kernel doesn't support wait_timeout, skipping test\n"); timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC; /* Figure out a rough number of fills required to consume 1 second of * GPU work. */ do { struct timespec start, end; long diff; #ifndef CLOCK_MONOTONIC_RAW #define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC #endif igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0); for (i = 0; i < iter; i++) blt_color_fill(batch, dst, BUF_PAGES); intel_batchbuffer_flush(batch); drm_intel_bo_wait_rendering(dst); igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0); diff = do_time_diff(&end, &start); igt_assert(diff >= 0); if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS) done = true; else iter <<= 1; } while (!done && iter < 1000000); igt_assert_lt(iter, 1000000); igt_info("%d iters is enough work\n", iter); gem_quiescent_gpu(fd); if (do_signals) igt_fork_signal_helper(); /* We should be able to do half as much work in the same amount of time, * but because we might schedule almost twice as much as required, we * might accidentally time out. Hence add some fudge. */ for (i = 0; i < iter/3; i++) blt_color_fill(batch, dst2, BUF_PAGES); intel_batchbuffer_flush(batch); igt_assert(gem_bo_busy(fd, dst2->handle) == true); igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0); igt_assert(gem_bo_busy(fd, dst2->handle) == false); igt_assert_neq(timeout, 0); if (timeout == (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC)) igt_info("Buffer was already done!\n"); else { igt_info("Finished with %" PRIu64 " time remaining\n", timeout); } /* check that polling with timeout=0 works. */ timeout = 0; igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0); igt_assert_eq(timeout, 0); /* Now check that we correctly time out, twice the auto-tune load should * be good enough. */ timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC; for (i = 0; i < iter*2; i++) blt_color_fill(batch, dst2, BUF_PAGES); intel_batchbuffer_flush(batch); ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout); igt_assert_eq(ret, -ETIME); igt_assert_eq(timeout, 0); igt_assert(gem_bo_busy(fd, dst2->handle) == true); /* check that polling with timeout=0 works. */ timeout = 0; igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME); igt_assert_eq(timeout, 0); /* Now check that we can pass negative (infinite) timeouts. */ negative_timeout = -1; for (i = 0; i < iter; i++) blt_color_fill(batch, dst2, BUF_PAGES); intel_batchbuffer_flush(batch); igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0); igt_assert_eq(negative_timeout, -1); /* infinity always remains */ igt_assert(gem_bo_busy(fd, dst2->handle) == false); if (do_signals) igt_stop_signal_helper(); drm_intel_bo_unreference(dst2); drm_intel_bo_unreference(dst); intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); }