void app_initialize(App *app) { glGenVertexArrays(1, &app->vao); glBindVertexArray(app->vao); glViewport(0, 0, app->window_width, app->window_height); app->vbo_cube = make_cube_mesh(); app->vbo_quad = make_quad_mesh(); get_attrib_location (mapping, position); get_uniform_location(mapping, height_scale); get_uniform_location(mapping, use_mip); get_uniform_location(mapping, max_lod_coverage); get_uniform_location(mapping, screen_size); get_uniform_location(mapping, diffusemap); get_uniform_location(mapping, heightmap); get_uniform_location(mapping, model); get_uniform_location(mapping, view); get_uniform_location(mapping, projection); get_attrib_location (backdrop, position); get_uniform_location(backdrop, sun_dir); get_uniform_location(backdrop, screen_size); get_uniform_location(backdrop, inv_tan_fov); get_uniform_location(backdrop, view); app->current_scene = 0; }
// call this to bind attribute names int bind_light_shader_attributes(GLuint program) { int errors = 0; errors += get_attrib_location(&OGLAttr::lighting_shader.coord, "l_coord", program); errors += get_uniform_location(&OGLAttr::lighting_shader.mvp, "l_mvp", program); errors += get_uniform_location(&OGLAttr::lighting_shader.position_map, "g_position_map", program); errors += get_uniform_location(&OGLAttr::lighting_shader.color_map, "g_color_map", program); errors += get_uniform_location(&OGLAttr::lighting_shader.color_t_map, "g_color_t_map", program); //errors += get_uniform_location(&OGLAttr::lighting_shader.normal_map, "g_normal_map", program); errors += get_uniform_location(&OGLAttr::lighting_shader.shadow_map, "g_shadow_map", program); errors += get_uniform_location(&OGLAttr::lighting_shader.shadow_mvp, "l_shadow_mvp", program); errors += get_uniform_location(&OGLAttr::lighting_shader.screen_size, "l_screen_size", program); errors += get_uniform_location(&OGLAttr::lighting_shader.val, "l_val", program); errors += get_uniform_location(&OGLAttr::lighting_shader.properties, "l_properties", program); errors += get_uniform_location(&OGLAttr::lighting_shader.draw_mode, "l_draw_mode", program); if (errors) { printf("Could not bind one of the above variables. Aborting\n"); return errors; } printf("Done loading lighting shaders: %d\n", glGetError()); return 0; }
void create_font_renderer(FontRenderer* r) { // First create shader program create_program_from_files(&r->sp, 2, "../shaders/text.glslv", "../shaders/text.glslf"); // Create the vertex buffer object glGenVertexArrays(1, &r->vao); glBindVertexArray(r->vao); glGenBuffers(1, &r->vbo); // Set up attribute array glBindBuffer(GL_ARRAY_BUFFER, r->vbo); unsigned int pos_attrib = get_attrib_location(&r->sp, "position"); glEnableVertexAttribArray(pos_attrib); glVertexAttribPointer(pos_attrib, 3, GL_FLOAT, GL_FALSE, sizeof(BufferData), 0); unsigned int tex_attrib = get_attrib_location(&r->sp, "tex_coord"); glEnableVertexAttribArray(tex_attrib); glVertexAttribPointer(tex_attrib, 2, GL_FLOAT, GL_FALSE, sizeof(BufferData), NULL+sizeof(vec3)); glBindVertexArray(0); }
// call this to bind attribute names int bind_geometry_shader_attributes(GLuint program) { int errors = 0; errors += get_attrib_location(&OGLAttr::geometry_shader.coord, "g_coord", program); errors += get_attrib_location(&OGLAttr::geometry_shader.texture_coord, "g_texturecoord", program); errors += get_uniform_location(&OGLAttr::geometry_shader.mvp, "g_mvp", program); errors += get_uniform_location(&OGLAttr::geometry_shader.world_transform, "g_worldtransform", program); errors += get_uniform_location(&OGLAttr::geometry_shader.draw_mode, "g_drawmode", program); errors += get_uniform_location(&OGLAttr::geometry_shader.intensity, "g_intensity", program); errors += get_uniform_location(&OGLAttr::geometry_shader.alphacutoff, "g_alphacutoff", program); errors += get_uniform_location(&OGLAttr::geometry_shader.enable_shadows, "g_enable_shadows", program); errors += get_uniform_location(&OGLAttr::geometry_shader.tile_texture, "tile_texture", program); if (errors) { printf("Could not bind one of the above variables. Aborting\n"); return errors; } printf("Done loading geometry shaders: %d\n", glGetError()); return 0; }
int bind_shadow_shader_attributes(GLuint program) { int errors = 0; errors += get_attrib_location(&OGLAttr::shadow_shader.coord, "g_coord", program); errors += get_uniform_location(&OGLAttr::shadow_shader.mvp, "g_mvp", program); if (errors) { printf("Could not bind one of the above variables. Aborting\n"); return errors; } printf("Done loading lighting shaders: %d\n", glGetError()); return 0; }
void app_initialize(App *app) { // Make sure the required extensions are present. const GLubyte* extensions = glGetString(GL_EXTENSIONS); char * found_multiview2_extension = strstr ((const char*)extensions, "GL_OVR_multiview2"); char * found_multisample_multiview_extension = strstr ((const char*)extensions, "GL_OVR_multiview_multisampled_render_to_texture"); char * found_border_clamp_extension = strstr ((const char*)extensions, "GL_EXT_texture_border_clamp"); if (found_multiview2_extension == NULL) { LOGI("OpenGL ES 3.0 implementation does not support GL_OVR_multiview2 extension.\n"); exit(EXIT_FAILURE); } if (found_multisample_multiview_extension == NULL) { // If multisampled multiview is not supported, multisampling will not be used, so no need to exit here. LOGI("OpenGL ES 3.0 implementation does not support GL_OVR_multiview_multisampled_render_to_texture extension.\n"); } if (found_border_clamp_extension == NULL) { LOGI("OpenGL ES 3.0 implementation does not support GL_EXT_texture_border_clamp extension.\n"); exit(EXIT_FAILURE); } GL_CHECK(glGenVertexArrays(1, &app->vao)); GL_CHECK(glBindVertexArray(app->vao)); GL_CHECK(glViewport(0, 0, app->window_width, app->window_height)); app->vbo_cube = make_cube(); app->fb = make_eye_framebuffer(Eye_Fb_Resolution_X, Eye_Fb_Resolution_Y, Num_Views); // The coefficients below may be calibrated by photographing an // image containing straight lines, both vertical and horizontal, // through the lenses of the HMD, at the position where the viewer // would be looking through them. // Ideally, the user would be allowed to calibrate them for their // own eyes, through some calibration utility. The application should // then load a stored user-profile on runtime. For now, we hardcode // some values based on our calibration of the SM-R320 Gear VR // lenses. // Left lens app->hmd.left.coefficients_red.k1 = 0.19f; app->hmd.left.coefficients_red.k2 = 0.21f; app->hmd.left.coefficients_red.k3 = 0.0f; app->hmd.left.coefficients_red.p1 = 0.0f; app->hmd.left.coefficients_red.p2 = 0.0f; app->hmd.left.coefficients_green.k1 = 0.22f; app->hmd.left.coefficients_green.k2 = 0.24f; app->hmd.left.coefficients_green.k3 = 0.0f; app->hmd.left.coefficients_green.p1 = 0.0f; app->hmd.left.coefficients_green.p2 = 0.0f; app->hmd.left.coefficients_blue.k1 = 0.24f; app->hmd.left.coefficients_blue.k2 = 0.26f; app->hmd.left.coefficients_blue.k3 = 0.0f; app->hmd.left.coefficients_blue.p1 = 0.0f; app->hmd.left.coefficients_blue.p2 = 0.0f; // Right lens app->hmd.right.coefficients_red.k1 = 0.19f; app->hmd.right.coefficients_red.k2 = 0.21f; app->hmd.right.coefficients_red.k3 = 0.0f; app->hmd.right.coefficients_red.p1 = 0.0f; app->hmd.right.coefficients_red.p2 = 0.0f; app->hmd.right.coefficients_green.k1 = 0.22f; app->hmd.right.coefficients_green.k2 = 0.24f; app->hmd.right.coefficients_green.k3 = 0.0f; app->hmd.right.coefficients_green.p1 = 0.0f; app->hmd.right.coefficients_green.p2 = 0.0f; app->hmd.right.coefficients_blue.k1 = 0.24f; app->hmd.right.coefficients_blue.k2 = 0.26f; app->hmd.right.coefficients_blue.k3 = 0.0f; app->hmd.right.coefficients_blue.p1 = 0.0f; app->hmd.right.coefficients_blue.p2 = 0.0f; // These may be computed by measuring the distance between the top // of the unscaled distorted image and the top of the screen. Denote // this distance by Delta. The normalized view coordinate of the // distorted image top is // Y = 1 - 2 Delta / Screen_Size_Y // We want to scale this coordinate such that it maps to the top of // the view. That is, // Y * fill_scale = 1 // Solving for fill_scale gives the equations below. float delta = Centimeter(0.7f); app->hmd.left.fill_scale = 1.0f / (1.0f - 2.0f * delta / Screen_Size_Y); app->hmd.right.fill_scale = 1.0f / (1.0f - 2.0f * delta / Screen_Size_Y); // These are computed such that the centers of the displayed framebuffers // on the device are seperated by the viewer's IPD. app->hmd.left.image_centre = vec2(+1.0f - Eye_IPD / (Screen_Size_X / 2.0f), 0.0f); app->hmd.right.image_centre = vec2(-1.0f + Eye_IPD / (Screen_Size_X / 2.0f), 0.0f); // These are computed such that the distortion takes place around // an offset determined by the difference between lens seperation // and the viewer's eye IPD. If the difference is zero, the distortion // takes place around the image centre. app->hmd.left.distort_centre = vec2((Lens_IPD - Eye_IPD) / (Screen_Size_X / 2.0f), 0.0f); app->hmd.right.distort_centre = vec2((Eye_IPD - Lens_IPD) / (Screen_Size_X / 2.0f), 0.0f); app->warp_mesh[0] = make_warp_mesh(app->hmd.left); app->warp_mesh[1] = make_warp_mesh(app->hmd.right); get_attrib_location( distort, position); get_attrib_location( distort, uv_red_low_res); get_attrib_location( distort, uv_green_low_res); get_attrib_location( distort, uv_blue_low_res); get_attrib_location( distort, uv_red_high_res); get_attrib_location( distort, uv_green_high_res); get_attrib_location( distort, uv_blue_high_res); get_uniform_location(distort, layer_index); get_uniform_location(distort, framebuffer); get_attrib_location (cube, position); get_attrib_location (cube, normal); get_uniform_location(cube, projection); get_uniform_location(cube, view); get_uniform_location(cube, model); }