static void residency_accuracy(int value[],const char *name_of_rc6_residency) { unsigned int flag_counter,flag_support; double counter_result = 0; unsigned int diff; unsigned int tmp_counter, tmp_support; double counter; flag_counter = 0; flag_support = 0; diff = (value[1] - value[0]); igt_assert_f(diff <= (SLEEP_DURATION + RC6_FUDGE),"Diff was too high. That is unpossible\n"); igt_assert_f(diff >= (SLEEP_DURATION - RC6_FUDGE),"GPU was not in RC6 long enough. Check that " "the GPU is as idle as possible(ie. no X, " "running and running no other tests)\n"); counter = ((double)value[1] - (double)value[0]) /(double) (SLEEP_DURATION + CODE_TIME); if( counter > 0.9 ){ counter_result = counter; tmp_counter = 1; } else tmp_counter = 0; if( value [1] == 0){ tmp_support = 0; igt_info("This machine/configuration doesn't support %s\n", name_of_rc6_residency); } else tmp_support = 1; flag_counter = flag_counter + tmp_counter; flag_counter = flag_counter << 1; flag_support = flag_support + tmp_support; flag_support = flag_support << 1; igt_info("The residency counter: %f \n", counter_result); igt_skip_on_f(flag_support == 0 , "This machine didn't entry %s state.\n", name_of_rc6_residency); igt_assert_f((flag_counter != 0) && (counter_result <=1) , "Sysfs RC6 residency counter is inaccurate.\n"); igt_info("This machine entry %s state.\n", name_of_rc6_residency); }
/** * igt_drm_format_to_bpp: * @drm_format: drm fourcc pixel format code * * Returns: * The bits per pixel for the given drm fourcc pixel format code. Fails hard if * no match was found. */ uint32_t igt_drm_format_to_bpp(uint32_t drm_format) { struct format_desc_struct *f; for_each_format(f) if (f->drm_id == drm_format) return f->bpp; igt_assert_f(0, "can't find a bpp format for %08x (%s)\n", drm_format, igt_format_str(drm_format)); }
static cairo_format_t drm_format_to_cairo(uint32_t drm_format) { struct format_desc_struct *f; for_each_format(f) if (f->drm_id == drm_format) return f->cairo_id; igt_assert_f(0, "can't find a cairo format for %08x (%s)\n", drm_format, igt_format_str(drm_format)); }
static void set_pixel(void *_ptr, int index, uint32_t color, int bpp) { if (bpp == 16) { uint16_t *ptr = _ptr; ptr[index] = color; } else if (bpp == 32) { uint32_t *ptr = _ptr; ptr[index] = color; } else { igt_assert_f(false, "bpp: %d\n", bpp); } }
/** * igt_bpp_depth_to_drm_format: * @bpp: desired bits per pixel * @depth: desired depth * * Returns: * The rgb drm fourcc pixel format code corresponding to the given @bpp and * @depth values. Fails hard if no match was found. */ uint32_t igt_bpp_depth_to_drm_format(int bpp, int depth) { struct format_desc_struct *f; for_each_format(f) if (f->bpp == bpp && f->depth == depth) return f->drm_id; igt_assert_f(0, "can't find drm format with bpp=%d, depth=%d\n", bpp, depth); }
static void scratch_buf_check(data_t *data, struct igt_buf *buf, int x, int y, uint8_t color) { uint8_t val; gem_read(data->drm_fd, buf->bo->handle, 0, data->linear, sizeof(data->linear)); val = data->linear[y * WIDTH + x]; igt_assert_f(val == color, "Expected 0x%02x, found 0x%02x at (%d,%d)\n", color, val, x, y); }
static void check_cpu(uint32_t *ptr, uint32_t val) { int i; for (i = 0; i < WIDTH*HEIGHT; i++) { igt_assert_f(ptr[i] == val, "Expected 0x%08x, found 0x%08x " "at offset 0x%08x\n", val, ptr[i], i * 4); val++; } }
static void check_bo(int fd, uint32_t handle, uint32_t val) { int i; gem_read(fd, handle, 0, linear, sizeof(linear)); for (i = 0; i < WIDTH*HEIGHT; i++) { igt_assert_f(linear[i] == val, "Expected 0x%08x, found 0x%08x " "at offset 0x%08x\n", val, linear[i], i * 4); val++; } }
static void check_bo(int fd, uint32_t handle, uint32_t val) { uint32_t *v; int i; v = gem_mmap__gtt(fd, handle, WIDTH * HEIGHT * 4, PROT_READ); for (i = 0; i < WIDTH*HEIGHT; i++) { igt_assert_f(v[i] == val, "Expected 0x%08x, found 0x%08x " "at offset 0x%08x\n", val, v[i], i * 4); val++; } munmap(v, WIDTH*HEIGHT*4); }
static void setup_msr(void) { #if 0 uint64_t control; const char *limit; #endif /* Make sure our Kernel supports MSR and the module is loaded. */ igt_assert(system("modprobe -q msr > /dev/null 2>&1") != -1); msr_fd = open("/dev/cpu/0/msr", O_RDONLY); igt_assert_f(msr_fd >= 0, "Can't open /dev/cpu/0/msr.\n"); #if 0 /* FIXME: why is this code not printing the truth? */ control = msr_read(MSR_PKG_CST_CONFIG_CONTROL); printf("Control: 0x016%" PRIx64 "\n", control); switch (control & PKG_CST_LIMIT_MASK) { case PKG_CST_LIMIT_C0: limit = "C0"; break; case PKG_CST_LIMIT_C2: limit = "C2"; break; case PKG_CST_LIMIT_C3: limit = "C3"; break; case PKG_CST_LIMIT_C6: limit = "C6"; break; case PKG_CST_LIMIT_C7: limit = "C7"; break; case PKG_CST_LIMIT_C7s: limit = "C7s"; break; case PKG_CST_NO_LIMIT: limit = "no limit"; break; default: limit = "unknown"; break; } printf("Package C state limit: %s\n", limit); #endif }
int main(int argc, char **argv) { data_t data = {0, }; struct intel_batchbuffer *batch = NULL; struct igt_buf src, dst; igt_render_copyfunc_t render_copy = NULL; int opt_dump_aub = igt_aub_dump_enabled(); igt_simple_init_parse_opts(&argc, argv, "da", NULL, NULL, opt_handler, NULL); igt_fixture { data.drm_fd = drm_open_any_render(); data.devid = intel_get_drm_devid(data.drm_fd); data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096); igt_assert(data.bufmgr); render_copy = igt_get_render_copyfunc(data.devid); igt_require_f(render_copy, "no render-copy function\n"); batch = intel_batchbuffer_alloc(data.bufmgr, data.devid); igt_assert(batch); } scratch_buf_init(&data, &src, WIDTH, HEIGHT, STRIDE, SRC_COLOR); scratch_buf_init(&data, &dst, WIDTH, HEIGHT, STRIDE, DST_COLOR); scratch_buf_check(&data, &src, WIDTH / 2, HEIGHT / 2, SRC_COLOR); scratch_buf_check(&data, &dst, WIDTH / 2, HEIGHT / 2, DST_COLOR); if (opt_dump_png) { scratch_buf_write_to_png(&src, "source.png"); scratch_buf_write_to_png(&dst, "destination.png"); } if (opt_dump_aub) { drm_intel_bufmgr_gem_set_aub_filename(data.bufmgr, "rendercopy.aub"); drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, true); } /* This will copy the src to the mid point of the dst buffer. Presumably * the out of bounds accesses will get clipped. * Resulting buffer should look like: * _______ * |dst|dst| * |dst|src| * ------- */ render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, WIDTH / 2, HEIGHT / 2); if (opt_dump_png) scratch_buf_write_to_png(&dst, "result.png"); if (opt_dump_aub) { drm_intel_gem_bo_aub_dump_bmp(dst.bo, 0, 0, WIDTH, HEIGHT, AUB_DUMP_BMP_FORMAT_ARGB_8888, STRIDE, 0); drm_intel_bufmgr_gem_set_aub_dump(data.bufmgr, false); } else if (check_all_pixels) { uint32_t val; int i, j; gem_read(data.drm_fd, dst.bo->handle, 0, data.linear, sizeof(data.linear)); for (i = 0; i < WIDTH; i++) { for (j = 0; j < HEIGHT; j++) { uint32_t color = DST_COLOR; val = data.linear[j * WIDTH + i]; if (j >= HEIGHT/2 && i >= WIDTH/2) color = SRC_COLOR; igt_assert_f(val == color, "Expected 0x%08x, found 0x%08x at (%d,%d)\n", color, val, i, j); } } } else { scratch_buf_check(&data, &dst, 10, 10, DST_COLOR); scratch_buf_check(&data, &dst, WIDTH - 10, HEIGHT - 10, SRC_COLOR); } igt_exit(); }
static void processes(void) { const struct intel_execution_engine *e; unsigned engines[16]; int num_engines; struct rlimit rlim; unsigned num_ctx; uint32_t name; int fd, *fds; fd = drm_open_driver(DRIVER_INTEL); num_ctx = get_num_contexts(fd); num_engines = 0; for (e = intel_execution_engines; e->name; e++) { if (e->exec_id == 0) continue; if (!has_engine(fd, e)) continue; if (e->exec_id == I915_EXEC_BSD) { int is_bsd2 = e->flags != 0; if (gem_has_bsd2(fd) != is_bsd2) continue; } engines[num_engines++] = e->exec_id | e->flags; if (num_engines == ARRAY_SIZE(engines)) break; } /* tweak rlimits to allow us to create this many files */ igt_assert(getrlimit(RLIMIT_NOFILE, &rlim) == 0); if (rlim.rlim_cur < ALIGN(num_ctx + 1024, 1024)) { rlim.rlim_cur = ALIGN(num_ctx + 1024, 1024); if (rlim.rlim_cur > rlim.rlim_max) rlim.rlim_max = rlim.rlim_cur; igt_assert(setrlimit(RLIMIT_NOFILE, &rlim) == 0); } fds = malloc(num_ctx * sizeof(int)); igt_assert(fds); for (unsigned n = 0; n < num_ctx; n++) { fds[n] = drm_open_driver(DRIVER_INTEL); if (fds[n] == -1) { int err = errno; for (unsigned i = n; i--; ) close(fds[i]); free(fds); errno = err; igt_assert_f(0, "failed to create context %lld/%lld\n", (long long)n, (long long)num_ctx); } } if (1) { uint32_t bbe = MI_BATCH_BUFFER_END; name = gem_create(fd, 4096); gem_write(fd, name, 0, &bbe, sizeof(bbe)); name = gem_flink(fd, name); } igt_fork(child, NUM_THREADS) { struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; memset(&obj, 0, sizeof(obj)); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)&obj; execbuf.buffer_count = 1; igt_permute_array(fds, num_ctx, xchg_int); for (unsigned n = 0; n < num_ctx; n++) { obj.handle = gem_open(fds[n], name); execbuf.flags = engines[n % num_engines]; gem_execbuf(fds[n], &execbuf); gem_close(fds[n], obj.handle); } } igt_waitchildren(); for (unsigned n = 0; n < num_ctx; n++) close(fds[n]); free(fds); close(fd); }
static void store_dword_loop(int fd, int ring, int count, int divider) { int i, val = 0; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; struct drm_i915_gem_relocation_entry reloc[divider]; uint32_t handle[divider]; uint32_t *batch[divider]; uint32_t *target; int gen = intel_gen(devid); memset(obj, 0, sizeof(obj)); obj[0].handle = gem_create(fd, 4096); target = mmap_coherent(fd, obj[0].handle, 4096); memset(reloc, 0, sizeof(reloc)); for (i = 0; i < divider; i++) { uint32_t *b; handle[i] = gem_create(fd, 4096); batch[i] = mmap_coherent(fd, handle[i], 4096); gem_set_domain(fd, handle[i], coherent_domain, coherent_domain); b = batch[i]; *b++ = MI_STORE_DWORD_IMM; *b++ = 0; *b++ = 0; *b++ = 0; *b++ = MI_BATCH_BUFFER_END; reloc[i].target_handle = obj[0].handle; reloc[i].offset = 4; if (gen < 8) reloc[i].offset += 4; reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION; reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION; obj[1].relocation_count = 1; } memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = (uintptr_t)obj; execbuf.buffer_count = 2; execbuf.flags = ring; igt_info("running storedw loop on render with stall every %i batch\n", divider); for (i = 0; i < SLOW_QUICK(0x2000, 0x10); i++) { int j = i % divider; gem_set_domain(fd, handle[j], coherent_domain, coherent_domain); batch[j][3] = val; obj[1].handle = handle[j]; obj[1].relocs_ptr = (uintptr_t)&reloc[j]; gem_execbuf(fd, &execbuf); if (j == 0) { gem_set_domain(fd, obj[0].handle, coherent_domain, 0); igt_assert_f(*target == val, "%d: value mismatch: stored 0x%08x, expected 0x%08x\n", i, *target, val); } val++; } gem_set_domain(fd, obj[0].handle, coherent_domain, 0); igt_info("completed %d writes successfully, current value: 0x%08x\n", i, target[0]); munmap(target, 4096); gem_close(fd, obj[0].handle); for (i = 0; i < divider; ++i) { munmap(batch[i], 4096); gem_close(fd, handle[i]); } }