/** * Copy from a source image into a destination image of the specified * format and check the result. * * If \a strict_layout_qualifiers is false, uniform layout qualifiers * will be omitted where allowed by the spec. If \a * strict_access_qualifiers is false, the "readonly" and "writeonly" * qualifiers will be omitted. If \a strict_binding is false, the * image will be bound as READ_WRITE, otherwise only the required * access type will be used. */ static bool run_test(const struct image_format_info *format, bool strict_layout_qualifiers, bool strict_access_qualifiers, bool strict_binding) { const struct grid_info grid = grid_info(GL_FRAGMENT_SHADER, image_base_internal_format(format), W, H); const struct image_info img = image_info(GL_TEXTURE_2D, format->format, W, H); GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(img, ""), test_hunk(strict_layout_qualifiers, strict_access_qualifiers), hunk("SRC_IMAGE_Q uniform IMAGE_BARE_T src_img;\n" "DST_IMAGE_Q uniform IMAGE_BARE_T dst_img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " imageStore(dst_img, IMAGE_ADDR(idx)," " imageLoad(src_img, IMAGE_ADDR(idx)));\n" " return x;\n" "}\n"), NULL)); bool ret = prog && init_fb(grid) && init_image(img, 0, strict_binding) && init_image(img, 1, strict_binding) && set_uniform_int(prog, "src_img", 0) && set_uniform_int(prog, "dst_img", 1) && draw_grid(grid, prog) && check(grid, img); glDeleteProgram(prog); return ret; }
static bool run_test(const struct image_target_info *target, const struct image_extent size) { const struct grid_info grid = { GL_FRAGMENT_SHADER_BIT, get_image_format(GL_RGBA32F), image_optimal_extent(size) }; const struct image_info img = { target, grid.format, size, image_format_epsilon(grid.format) }; GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(img, ""), hunk("readonly uniform IMAGE_T src_img;\n" "writeonly uniform IMAGE_T dst_img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " imageStore(dst_img, IMAGE_ADDR(idx)," " imageLoad(src_img, IMAGE_ADDR(idx)));\n" " return x;\n" "}\n"), NULL)); bool ret = prog && init_fb(grid) && init_image(img, 0) && init_image(img, 1) && set_uniform_int(prog, "src_img", 0) && set_uniform_int(prog, "dst_img", 1) && draw_grid(grid, prog) && check(img); glDeleteProgram(prog); return ret; }
/** * Bind all image uniforms present in the program to the available * image units, re-using the same unit several times if necessary in * cyclical order. */ static bool bind_images(const struct grid_info grid, GLuint prog) { const unsigned m = max_image_units(); const struct image_stage_info *stage; for (stage = image_stages(); stage->name; ++stage) { if (grid.stages & stage->bit) { const unsigned first = num_images_for_stages(grid, stage->bit - 1); const unsigned n = num_images_for_stage(grid, stage); const unsigned stage_idx = stage - image_stages(); int i; for (i = 0; i < n; ++i) { char *name = NULL; asprintf(&name, "imgs_%d[%d]", stage_idx, i); if (!set_uniform_int(prog, name, (first + i) % m)) return false; free(name); } } } return true; }
static bool run_test(const struct image_op_info *op, unsigned w, unsigned h, bool (*check)(const struct grid_info grid, const struct image_info img, unsigned w, unsigned h), const char *body) { const struct grid_info grid = grid_info(GL_FRAGMENT_SHADER, GL_R32UI, W, H); const struct image_info img = image_info_for_grid(grid); GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(img, ""), hunk("uniform IMAGE_T img;\n"), hunk(op->hunk), hunk(body), NULL)); bool ret = prog && init_fb(grid) && init_image(img) && set_uniform_int(prog, "img", 0) && draw_grid(set_grid_size(grid, w, h), prog) && check(grid, img, w, h); glDeleteProgram(prog); return ret; }
static bool run_test(const struct image_qualifier_info *qual) { const struct grid_info grid = grid_info(GL_FRAGMENT_SHADER, GL_R32UI, W, H); const struct image_info img = image_info(GL_TEXTURE_1D, GL_R32UI, W, H); GLuint prog = generate_program( grid, /** * Write to consecutive locations of an image using a * the value read from a fixed location of a different * image uniform which aliases the first image. If * the implementation incorrectly coalesces repeated * loads from the fixed location the results of the * test will be altered. */ GL_FRAGMENT_SHADER, concat(qualifier_hunk(qual), image_hunk(img, ""), hunk("IMAGE_Q IMAGE_UNIFORM_T src_img;\n" "IMAGE_Q IMAGE_UNIFORM_T dst_img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " int i;\n" "\n" " for (i = 0; i < N / 2; ++i) {\n" " imageStore(dst_img, 2 * i," " imageLoad(src_img, W) + 1u);\n" " imageStore(dst_img, 2 * i + 1," " imageLoad(src_img, W) - 1u);\n" " }\n" "\n" " return x;\n" "}\n"), NULL)); bool ret = prog && init_fb(grid) && init_image(img) && set_uniform_int(prog, "src_img", 0) && set_uniform_int(prog, "dst_img", 0) && draw_grid(set_grid_size(grid, 1, 1), prog) && (check(img) || qual->control_test); glDeleteProgram(prog); return ret; }
static bool run_test(const struct image_qualifier_info *qual, const struct image_stage_info *stage_w, const struct image_stage_info *stage_r, unsigned l) { const struct grid_info grid = { stage_w->bit | stage_r->bit, get_image_format(GL_RGBA32UI), { l, l, 1, 1 } }; const struct image_info img = image_info_for_grid(grid); GLuint prog = generate_program( grid, /* * Write (11, 22, 33, 44) to some location on the * image from the write stage. */ stage_w->stage, concat(qualifier_hunk(qual), image_hunk(img, ""), hunk("IMAGE_Q uniform IMAGE_T img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " imageStore(img, idx, DATA_T(11, 22, 33, 44));" " return x;" "}\n"), NULL), /* * The same location will read back the expected value * if image access is coherent, as the shader inputs * of the read stage are dependent on the outputs of * the write stage and consequently they are * guaranteed to be executed sequentially. */ stage_r->stage, concat(qualifier_hunk(qual), image_hunk(img, ""), hunk("IMAGE_Q uniform IMAGE_T img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " DATA_T v = imageLoad(img, idx);" " if (v == DATA_T(11, 22, 33, 44))" " return GRID_T(33, 33, 33, 33);" " else" " return GRID_T(77, 77, 77, 77);" "}\n"), NULL)); bool ret = prog && init_fb(grid) && init_image(img) && set_uniform_int(prog, "img", 0) && draw_grid(grid, prog) && (check(grid, img) || qual->control_test); glDeleteProgram(prog); return ret; }
/** * Test skeleton: Init image to \a init_value, run the provided shader * \a op and check that the resulting image pixels equal \a * check_value. */ static bool run_test(uint32_t init_value, uint32_t check_value, const char *op) { const struct grid_info grid = grid_info(GL_FRAGMENT_SHADER, GL_R32UI, W, H); const struct image_info img = image_info_for_grid(grid); GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(img, ""), hunk("uniform IMAGE_T img;\n"), hunk(op), NULL)); bool ret = prog && init_fb(grid) && init_image(img, init_value) && set_uniform_int(prog, "img", 0) && draw_grid(grid, prog) && check(img, check_value); glDeleteProgram(prog); return ret; }
static bool run_test_fragment(void) { const char *fs_source = "#version 140\n" "#extension GL_ARB_shader_atomic_counters : enable\n" "\n" "out ivec4 fcolor;\n" "uniform int index;\n" "layout(binding = 0, offset = 4) uniform atomic_uint x[3];\n" "\n" "void main() {\n" " fcolor.x = int(atomicCounterIncrement(x[1 + index]));\n" " fcolor.y = int(atomicCounterIncrement(x[0 + index]));\n" " fcolor.z = int(atomicCounterIncrement(x[1 + index]));\n" " fcolor.w = int(atomicCounterIncrement(x[0 + index]));\n" "}\n"; const char *vs_source = "#version 140\n" "#extension GL_ARB_shader_atomic_counters : enable\n" "\n" "in vec4 position;\n" "\n" "void main() {\n" " gl_Position = position;\n" "}\n"; const uint32_t start[] = { 1, 2, 4, 8 }; const unsigned int expected[] = { 8, 4, 9, 5 }; GLuint prog = glCreateProgram(); bool ret = atomic_counters_compile(prog, GL_FRAGMENT_SHADER, fs_source) && atomic_counters_compile(prog, GL_VERTEX_SHADER, vs_source) && set_uniform_int(prog, "index", 1) && atomic_counters_draw_point(prog, sizeof(start), start) && piglit_probe_rect_rgba_uint(0, 0, 1, 1, expected); glDeleteProgram(prog); return ret; }
/** * If \a layered is false, bind an individual layer of a texture to an * image unit, read its contents and write back a different value to * the same location. If \a layered is true or the texture has a * single layer, the whole texture will be read and written back. * * For textures with a single layer, the arguments \a layered and \a * layer which are passed to the same arguments of * glBindImageTexture() should have no effect as required by the spec. */ static bool run_test(const struct image_target_info *target, bool layered, unsigned layer) { const struct image_info real_img = image_info( target->target, GL_RGBA32F, W, H); const unsigned slices = (layered ? 1 : image_num_layers(real_img)); /* * "Slice" of the image that will be bound to the pipeline. */ const struct image_info slice_img = image_info( (layered ? target->target : image_layer_target(target)), GL_RGBA32F, W, H / slices); /* * Grid with as many elements as the slice. */ const struct grid_info grid = grid_info( GL_FRAGMENT_SHADER, GL_RGBA32F, W, H / slices); GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(slice_img, ""), hunk("IMAGE_UNIFORM_T img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " GRID_T v = imageLoad(img, IMAGE_ADDR(idx));\n" " imageStore(img, IMAGE_ADDR(idx), DATA_T(33));\n" " return v;\n" "}\n"), NULL)); bool ret = prog && init_fb(grid) && init_image(real_img, layered, layer) && set_uniform_int(prog, "img", 0) && draw_grid(grid, prog) && check(grid, real_img, (slices == 1 ? 0 : layer)); glDeleteProgram(prog); return ret; }
bool download_image_levels(const struct image_info img, unsigned num_levels, unsigned unit, uint32_t *r_pixels) { const unsigned m = image_num_components(img.format); int i, l; glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_BUFFER_UPDATE_BARRIER_BIT | GL_PIXEL_BUFFER_BARRIER_BIT | GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); glBindTexture(img.target->target, textures[unit]); switch (img.target->target) { case GL_TEXTURE_1D: case GL_TEXTURE_2D: case GL_TEXTURE_3D: case GL_TEXTURE_RECTANGLE: case GL_TEXTURE_1D_ARRAY: case GL_TEXTURE_2D_ARRAY: case GL_TEXTURE_CUBE_MAP_ARRAY: assert(img.target->target != GL_TEXTURE_RECTANGLE || num_levels == 1); for (l = 0; l < num_levels; ++l) glGetTexImage(img.target->target, l, img.format->pixel_format, image_base_type(img.format), &r_pixels[m * image_level_offset(img, l)]); break; case GL_TEXTURE_CUBE_MAP: for (l = 0; l < num_levels; ++l) { const unsigned offset = m * image_level_offset(img, l); const unsigned face_sz = m * product(image_level_size(img, l)) / 6; for (i = 0; i < 6; ++i) glGetTexImage(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, l, img.format->pixel_format, image_base_type(img.format), &r_pixels[offset + face_sz * i]); } break; case GL_TEXTURE_BUFFER: { /* * glGetTexImage() isn't supposed to work with buffer * textures. We copy the packed pixels to a texture * with the same internal format as the image to let * the GL unpack it for us. */ const struct image_extent grid = image_optimal_extent(img.size); GLuint packed_tex; assert(num_levels == 1); glGenTextures(1, &packed_tex); glBindTexture(GL_TEXTURE_2D, packed_tex); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffers[unit]); glTexImage2D(GL_TEXTURE_2D, 0, img.format->format, grid.x, grid.y, 0, img.format->pixel_format, img.format->pixel_type, NULL); glGetTexImage(GL_TEXTURE_2D, 0, img.format->pixel_format, image_base_type(img.format), r_pixels); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glDeleteTextures(1, &packed_tex); break; } case GL_TEXTURE_2D_MULTISAMPLE: case GL_TEXTURE_2D_MULTISAMPLE_ARRAY: { /* * GL doesn't seem to provide any direct way to read * back a multisample texture, so we use imageLoad() * to copy its contents to a larger single-sample 2D * texture from the fragment shader. */ const struct grid_info grid = { get_image_stage(GL_FRAGMENT_SHADER)->bit, img.format, image_optimal_extent(img.size) }; GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(img, "SRC_"), image_hunk(image_info_for_grid(grid), "DST_"), hunk("readonly SRC_IMAGE_UNIFORM_T src_img;\n" "writeonly DST_IMAGE_UNIFORM_T dst_img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " imageStore(dst_img, DST_IMAGE_ADDR(idx),\n" " imageLoad(src_img, SRC_IMAGE_ADDR(idx)));\n" " return x;\n" "}\n"), NULL)); bool ret = prog && generate_fb(grid, 1); GLuint tmp_tex; assert(num_levels == 1); glGenTextures(1, &tmp_tex); glBindTexture(GL_TEXTURE_2D, tmp_tex); glTexImage2D(GL_TEXTURE_2D, 0, img.format->format, grid.size.x, grid.size.y, 0, img.format->pixel_format, image_base_type(img.format), NULL); glBindImageTexture(unit, textures[unit], 0, GL_TRUE, 0, GL_READ_ONLY, img.format->format); glBindImageTexture(6, tmp_tex, 0, GL_TRUE, 0, GL_WRITE_ONLY, img.format->format); ret &= set_uniform_int(prog, "src_img", unit) && set_uniform_int(prog, "dst_img", 6) && draw_grid(grid, prog); glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT); glGetTexImage(GL_TEXTURE_2D, 0, img.format->pixel_format, image_base_type(img.format), r_pixels); glDeleteProgram(prog); glDeleteTextures(1, &tmp_tex); glBindFramebuffer(GL_FRAMEBUFFER, fb[0]); glViewportIndexedfv(0, vp[0]); if (!ret) return false; break; } default: abort(); } return piglit_check_gl_error(GL_NO_ERROR); }
bool upload_image_levels(const struct image_info img, unsigned num_levels, unsigned level, unsigned unit, const uint32_t *pixels) { const unsigned m = image_num_components(img.format); int i, l; if (get_texture(unit)) { glDeleteTextures(1, &textures[unit]); textures[unit] = 0; } if (get_buffer(unit)) { glDeleteBuffers(1, &buffers[unit]); buffers[unit] = 0; } glGenTextures(1, &textures[unit]); glBindTexture(img.target->target, textures[unit]); switch (img.target->target) { case GL_TEXTURE_1D: for (l = 0; l < num_levels; ++l) { const struct image_extent size = image_level_size(img, l); glTexImage1D(GL_TEXTURE_1D, l, img.format->format, size.x, 0, img.format->pixel_format, image_base_type(img.format), &pixels[m * image_level_offset(img, l)]); } break; case GL_TEXTURE_2D: for (l = 0; l < num_levels; ++l) { const struct image_extent size = image_level_size(img, l); glTexImage2D(GL_TEXTURE_2D, l, img.format->format, size.x, size.y, 0, img.format->pixel_format, image_base_type(img.format), &pixels[m * image_level_offset(img, l)]); } break; case GL_TEXTURE_3D: for (l = 0; l < num_levels; ++l) { const struct image_extent size = image_level_size(img, l); glTexImage3D(GL_TEXTURE_3D, l, img.format->format, size.x, size.y, size.z, 0, img.format->pixel_format, image_base_type(img.format), &pixels[m * image_level_offset(img, l)]); } break; case GL_TEXTURE_RECTANGLE: assert(num_levels == 1); glTexImage2D(GL_TEXTURE_RECTANGLE, 0, img.format->format, img.size.x, img.size.y, 0, img.format->pixel_format, image_base_type(img.format), pixels); break; case GL_TEXTURE_CUBE_MAP: for (l = 0; l < num_levels; ++l) { const unsigned offset = m * image_level_offset(img, l); const struct image_extent size = image_level_size(img, l); const unsigned face_sz = m * product(size) / 6; for (i = 0; i < 6; ++i) glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, l, img.format->format, size.x, size.y, 0, img.format->pixel_format, image_base_type(img.format), &pixels[offset + face_sz * i]); } break; case GL_TEXTURE_BUFFER: { /* * glTexImage*() isn't supposed to work with buffer * textures. We copy the unpacked pixels to a texture * with the desired internal format to let the GL pack * them for us. */ const struct image_extent grid = image_optimal_extent(img.size); GLuint packed_tex; assert(num_levels == 1); glGenBuffers(1, &buffers[unit]); glBindBuffer(GL_PIXEL_PACK_BUFFER, buffers[unit]); glBufferData(GL_PIXEL_PACK_BUFFER, img.size.x * image_pixel_size(img.format) / 8, NULL, GL_STATIC_DRAW); glGenTextures(1, &packed_tex); glBindTexture(GL_TEXTURE_2D, packed_tex); glTexImage2D(GL_TEXTURE_2D, 0, img.format->format, grid.x, grid.y, 0, img.format->pixel_format, image_base_type(img.format), pixels); glGetTexImage(GL_TEXTURE_2D, 0, img.format->pixel_format, img.format->pixel_type, NULL); glDeleteTextures(1, &packed_tex); glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); glTexBuffer(GL_TEXTURE_BUFFER, image_compat_format(img.format), buffers[unit]); break; } case GL_TEXTURE_1D_ARRAY: for (l = 0; l < num_levels; ++l) { const struct image_extent size = image_level_size(img, l); glTexImage2D(GL_TEXTURE_1D_ARRAY, l, img.format->format, size.x, size.y, 0, img.format->pixel_format, image_base_type(img.format), &pixels[m * image_level_offset(img, l)]); } break; case GL_TEXTURE_2D_ARRAY: for (l = 0; l < num_levels; ++l) { const struct image_extent size = image_level_size(img, l); glTexImage3D(GL_TEXTURE_2D_ARRAY, l, img.format->format, size.x, size.y, size.z, 0, img.format->pixel_format, image_base_type(img.format), &pixels[m * image_level_offset(img, l)]); } break; case GL_TEXTURE_CUBE_MAP_ARRAY: for (l = 0; l < num_levels; ++l) { const struct image_extent size = image_level_size(img, l); glTexImage3D(GL_TEXTURE_CUBE_MAP_ARRAY, l, img.format->format, size.x, size.y, size.z, 0, img.format->pixel_format, image_base_type(img.format), &pixels[m * image_level_offset(img, l)]); } break; case GL_TEXTURE_2D_MULTISAMPLE: case GL_TEXTURE_2D_MULTISAMPLE_ARRAY: { /* * GL doesn't seem to provide any direct way to * initialize a multisample texture, so we use * imageStore() to render to it from the fragment * shader copying the contents of a larger * single-sample 2D texture. */ const struct grid_info grid = { get_image_stage(GL_FRAGMENT_SHADER)->bit, img.format, image_optimal_extent(img.size) }; GLuint prog = generate_program( grid, GL_FRAGMENT_SHADER, concat(image_hunk(image_info_for_grid(grid), "SRC_"), image_hunk(img, "DST_"), hunk("readonly SRC_IMAGE_UNIFORM_T src_img;\n" "writeonly DST_IMAGE_UNIFORM_T dst_img;\n" "\n" "GRID_T op(ivec2 idx, GRID_T x) {\n" " imageStore(dst_img, DST_IMAGE_ADDR(idx),\n" " imageLoad(src_img, SRC_IMAGE_ADDR(idx)));\n" " return x;\n" "}\n"), NULL)); bool ret = prog && generate_fb(grid, 1); GLuint tmp_tex; assert(num_levels == 1); glGenTextures(1, &tmp_tex); glBindTexture(GL_TEXTURE_2D, tmp_tex); if (img.target->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) { glTexImage3DMultisample(GL_TEXTURE_2D_MULTISAMPLE_ARRAY, img.size.x, img.format->format, img.size.y, img.size.z, img.size.w, GL_FALSE); } else { glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, img.size.x, img.format->format, img.size.y, img.size.z, GL_FALSE); } glTexImage2D(GL_TEXTURE_2D, 0, img.format->format, grid.size.x, grid.size.y, 0, img.format->pixel_format, image_base_type(img.format), pixels); glBindImageTexture(unit, textures[unit], 0, GL_TRUE, 0, GL_WRITE_ONLY, img.format->format); glBindImageTexture(6, tmp_tex, 0, GL_TRUE, 0, GL_READ_ONLY, img.format->format); ret &= set_uniform_int(prog, "src_img", 6) && set_uniform_int(prog, "dst_img", unit) && draw_grid(grid, prog); glDeleteProgram(prog); glDeleteTextures(1, &tmp_tex); glBindFramebuffer(GL_FRAMEBUFFER, fb[0]); glViewportIndexedfv(0, vp[0]); glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); if (!ret) return false; break; } default: abort(); } glBindImageTexture(unit, textures[unit], level, GL_TRUE, 0, GL_READ_WRITE, img.format->format); return piglit_check_gl_error(GL_NO_ERROR); }
bool draw_grid(const struct grid_info grid, GLuint prog) { static GLuint lprog; if (lprog != prog) { glUseProgram(prog); lprog = prog; } if (grid.stages & GL_COMPUTE_SHADER_BIT) { set_uniform_int(prog, "ret_img", 7); glDispatchCompute(1, grid.size.y, 1); } else if (grid.stages & (GL_TESS_CONTROL_SHADER_BIT | GL_TESS_EVALUATION_SHADER_BIT)) { static struct image_extent size; static GLuint vao, vbo; if (size.x != grid.size.x || size.y != grid.size.y) { size = grid.size; if (!generate_grid_arrays( &vao, &vbo, 1.0 / size.x - 1.0, 1.0 / size.y - 1.0, 2.0 / size.x, 2.0 / size.y, size.x, size.y)) return false; } glBindVertexArray(vao); glPatchParameteri(GL_PATCH_VERTICES, 4); glDrawArrays(GL_PATCHES, 0, product(size)); } else if (grid.stages & (GL_VERTEX_SHADER_BIT | GL_GEOMETRY_SHADER_BIT)) { static struct image_extent size; static GLuint vao, vbo; if (size.x != grid.size.x || size.y != grid.size.y) { size = grid.size; if (!generate_grid_arrays( &vao, &vbo, 1.0 / size.x - 1.0, 1.0 / size.y - 1.0, 2.0 / size.x, 2.0 / size.y, size.x, size.y)) return false; } glBindVertexArray(vao); glDrawArrays(GL_POINTS, 0, product(size)); } else { static struct image_extent size; static GLuint vao, vbo; if (size.x != grid.size.x || size.y != grid.size.y) { float vp[4]; glGetFloati_v(GL_VIEWPORT, 0, vp); size = grid.size; if (!generate_grid_arrays( &vao, &vbo, -1.0, -1.0, 2.0 * size.x / vp[2], 2.0 * size.y / vp[3], 2, 2)) return false; } glBindVertexArray(vao); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); } return piglit_check_gl_error(GL_NO_ERROR); }
void FFTPassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num) { Effect::set_gl_state(glsl_program_num, prefix, sampler_num); int input_size = (direction == VERTICAL) ? input_height : input_width; // See the comments on changes_output_size() in the .h file to see // why this is legal. It is _needed_ because it counteracts the // precision issues we get because we sample the input texture with // normalized coordinates (especially when the repeat count along // the axis is not a power of two); we very rapidly end up in narrowly // missing a texel center, which causes precision loss to propagate // throughout the FFT. assert(*sampler_num == 1); glActiveTexture(GL_TEXTURE0); check_error(); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); check_error(); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); check_error(); // The memory layout follows figure 5.2 on page 25 of // http://gpuwave.sesse.net/gpuwave.pdf -- it can be a bit confusing // at first, but is classically explained more or less as follows: // // The classic Cooley-Tukey decimation-in-time FFT algorithm works // by first splitting input data into odd and even elements // (e.g. bit-wise xxxxx0 and xxxxx1 for a size-32 FFT), then FFTing // them separately and combining them using twiddle factors. // So the outer pass (done _last_) looks only at the last bit, // and does one such merge pass of sub-size N/2 (FFT size N). // // FFT of the first part must then necessarily be split into xxxx00 and // xxxx10, and similarly xxxx01 and xxxx11 for the other part. Since // these two FFTs are handled identically, it means we split into xxxx0x // and xxxx1x, so that the second-outer pass (done second-to-last) // looks only at the second last bit, and so on. We do two such merge // passes of sub-size N/4 (sub-FFT size N/2). // // Thus, the inner, Nth pass (done first) splits at the first bit, // so 0 is paired with 16, 1 with 17 and so on, doing N/2 such merge // passes of sub-size 1 (sub-FFT size 2). We say that the stride is 16. // The second-inner, (N-1)th pass (done second) splits at the second // bit, so the stride is 8, and so on. assert((fft_size & (fft_size - 1)) == 0); // Must be power of two. float *tmp = new float[fft_size * 4]; int subfft_size = 1 << pass_number; double mulfac; if (inverse) { mulfac = 2.0 * M_PI; } else { mulfac = -2.0 * M_PI; } assert((fft_size & (fft_size - 1)) == 0); // Must be power of two. assert(fft_size % subfft_size == 0); int stride = fft_size / subfft_size; for (int i = 0; i < fft_size; ++i) { int k = i / stride; // Element number within this sub-FFT. int offset = i % stride; // Sub-FFT number. double twiddle_real, twiddle_imag; if (k < subfft_size / 2) { twiddle_real = cos(mulfac * (k / double(subfft_size))); twiddle_imag = sin(mulfac * (k / double(subfft_size))); } else { // This is mathematically equivalent to the twiddle factor calculations // in the other branch of the if, but not numerically; the range // reductions on x87 are not all that precise, and this keeps us within // [0,pi>. k -= subfft_size / 2; twiddle_real = -cos(mulfac * (k / double(subfft_size))); twiddle_imag = -sin(mulfac * (k / double(subfft_size))); } // The support texture contains everything we need for the FFT: // Obviously, the twiddle factor (in the Z and W components), but also // which two samples to fetch. These are stored as normalized // X coordinate offsets (Y coordinate for a vertical FFT); the reason // for using offsets and not direct coordinates as in GPUwave // is that we can have multiple FFTs along the same line, // and want to reuse the support texture by repeating it. int base = k * stride * 2 + offset; int support_texture_index; if (direction == FFTPassEffect::VERTICAL) { // Compensate for OpenGL's bottom-left convention. support_texture_index = fft_size - i - 1; } else { support_texture_index = i; } tmp[support_texture_index * 4 + 0] = (base - support_texture_index) / double(input_size); tmp[support_texture_index * 4 + 1] = (base + stride - support_texture_index) / double(input_size); tmp[support_texture_index * 4 + 2] = twiddle_real; tmp[support_texture_index * 4 + 3] = twiddle_imag; } glActiveTexture(GL_TEXTURE0 + *sampler_num); check_error(); glBindTexture(GL_TEXTURE_1D, tex); check_error(); glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); check_error(); glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); check_error(); glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_REPEAT); check_error(); // Supposedly FFTs are very sensitive to inaccuracies in the twiddle factors, // at least according to a paper by Schatzman (see gpuwave.pdf reference [30] // for the full reference), so we keep them at 32-bit. However, for // small sizes, all components are exact anyway, so we can cheat there // (although noting that the source coordinates become somewhat less // accurate then, too). glTexImage1D(GL_TEXTURE_1D, 0, (subfft_size <= 4) ? GL_RGBA16F : GL_RGBA32F, fft_size, 0, GL_RGBA, GL_FLOAT, tmp); check_error(); delete[] tmp; set_uniform_int(glsl_program_num, prefix, "support_tex", *sampler_num); ++*sampler_num; assert(input_size % fft_size == 0); set_uniform_float(glsl_program_num, prefix, "num_repeats", input_size / fft_size); }