struct gl_platform *gl_platform_create(device_t device, struct gs_init_data *info) { struct gl_platform *plat = bmalloc(sizeof(struct gl_platform)); struct dummy_context dummy; int pixel_format; PIXELFORMATDESCRIPTOR pfd; memset(plat, 0, sizeof(struct gl_platform)); memset(&dummy, 0, sizeof(struct dummy_context)); if (!gl_dummy_context_init(&dummy)) goto fail; if (!gl_init_extensions(device)) goto fail; /* you have to have a dummy context open before you can actually * use wglChoosePixelFormatARB */ if (!gl_getpixelformat(dummy.hdc, info, &pixel_format, &pfd)) goto fail; gl_dummy_context_free(&dummy); if (!init_default_swap(plat, device, pixel_format, &pfd, info)) goto fail; plat->hrc = gl_init_context(plat->swap.wi->hdc); if (!plat->hrc) goto fail; if (GLEW_ARB_seamless_cube_map) { glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS); gl_success("GL_TEXTURE_CUBE_MAP_SEAMLESS"); } #ifdef _DEBUG if (GLEW_AMD_debug_output) { glDebugMessageEnableAMD(0, 0, 0, NULL, true); glDebugMessageCallbackAMD(gl_debug_message_amd, device); gl_success("glDebugMessageCallback"); } #endif return plat; fail: blog(LOG_ERROR, "gl_platform_create failed"); gl_platform_destroy(plat); gl_dummy_context_free(&dummy); return NULL; }
void GFXGLDevice::initGLState() { // We don't currently need to sync device state with a known good place because we are // going to set everything in GFXGLStateBlock, but if we change our GFXGLStateBlock strategy, this may // need to happen. // Deal with the card profiler here when we know we have a valid context. mCardProfiler = new GFXGLCardProfiler(); mCardProfiler->init(); glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, (GLint*)&mMaxShaderTextures); // JTH: Needs removed, ffp //glGetIntegerv(GL_MAX_TEXTURE_UNITS, (GLint*)&mMaxFFTextures); glGetIntegerv(GL_MAX_COLOR_ATTACHMENTS, (GLint*)&mMaxTRColors); mMaxTRColors = getMin( mMaxTRColors, (U32)(GFXTextureTarget::MaxRenderSlotId-1) ); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); // [JTH 5/6/2016] GLSL 1.50 is really SM 4.0 // Setting mPixelShaderVersion to 3.0 will allow Advanced Lighting to run. mPixelShaderVersion = 3.0; // Set capability extensions. mCapabilities.anisotropicFiltering = mCardProfiler->queryProfile("GL_EXT_texture_filter_anisotropic"); mCapabilities.bufferStorage = mCardProfiler->queryProfile("GL_ARB_buffer_storage"); mCapabilities.shaderModel5 = mCardProfiler->queryProfile("GL_ARB_gpu_shader5"); mCapabilities.textureStorage = mCardProfiler->queryProfile("GL_ARB_texture_storage"); mCapabilities.samplerObjects = mCardProfiler->queryProfile("GL_ARB_sampler_objects"); mCapabilities.copyImage = mCardProfiler->queryProfile("GL_ARB_copy_image"); mCapabilities.vertexAttributeBinding = mCardProfiler->queryProfile("GL_ARB_vertex_attrib_binding"); String vendorStr = (const char*)glGetString( GL_VENDOR ); if( vendorStr.find("NVIDIA", 0, String::NoCase | String::Left) != String::NPos) mUseGlMap = false; // Workaround for all Mac's, has a problem using glMap* with volatile buffers #ifdef TORQUE_OS_MAC mUseGlMap = false; #endif #if TORQUE_DEBUG if( gglHasExtension(ARB_debug_output) ) { glEnable(GL_DEBUG_OUTPUT); glDebugMessageCallbackARB(glDebugCallback, NULL); glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); GLuint unusedIds = 0; glDebugMessageControlARB(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, &unusedIds, GL_TRUE); } else if(gglHasExtension(AMD_debug_output)) { glEnable(GL_DEBUG_OUTPUT); glDebugMessageCallbackAMD(glAmdDebugCallback, NULL); //glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB); GLuint unusedIds = 0; glDebugMessageEnableAMD(GL_DONT_CARE, GL_DONT_CARE, 0,&unusedIds, GL_TRUE); } #endif PlatformGL::setVSync(smDisableVSync ? 0 : 1); //install vsync callback Con::NotifyDelegate clbk(this, &GFXGLDevice::vsyncCallback); Con::addVariableNotify("$pref::Video::disableVerticalSync", clbk); //OpenGL 3 need a binded VAO for render GLuint vao; glGenVertexArrays(1, &vao); glBindVertexArray(vao); }