extern "C" void initCL() { dumpCLinfo(); EGLDisplay mEglDisplay = eglGetCurrentDisplay(); if (mEglDisplay == EGL_NO_DISPLAY) LOGE("initCL: eglGetCurrentDisplay() returned 'EGL_NO_DISPLAY', error = %x", eglGetError()); EGLContext mEglContext = eglGetCurrentContext(); if (mEglContext == EGL_NO_CONTEXT) LOGE("initCL: eglGetCurrentContext() returned 'EGL_NO_CONTEXT', error = %x", eglGetError()); cl_context_properties props[] = { CL_GL_CONTEXT_KHR, (cl_context_properties) mEglContext, CL_EGL_DISPLAY_KHR, (cl_context_properties) mEglDisplay, CL_CONTEXT_PLATFORM, 0, 0 }; try { haveOpenCL = false; cl::Platform p = cl::Platform::getDefault(); std::string ext = p.getInfo<CL_PLATFORM_EXTENSIONS>(); if(ext.find("cl_khr_gl_sharing") == std::string::npos) LOGE("Warning: CL-GL sharing isn't supported by PLATFORM"); props[5] = (cl_context_properties) p(); theContext = cl::Context(CL_DEVICE_TYPE_GPU, props); std::vector<cl::Device> devs = theContext.getInfo<CL_CONTEXT_DEVICES>(); LOGD("Context returned %d devices, taking the 1st one", devs.size()); ext = devs[0].getInfo<CL_DEVICE_EXTENSIONS>(); if(ext.find("cl_khr_gl_sharing") == std::string::npos) LOGE("Warning: CL-GL sharing isn't supported by DEVICE"); theQueue = cl::CommandQueue(theContext, devs[0]); cl::Program::Sources src(1, std::make_pair(oclProgI2I, sizeof(oclProgI2I))); theProgI2I = cl::Program(theContext, src); theProgI2I.build(devs); cv::ocl::attachContext(p.getInfo<CL_PLATFORM_NAME>(), p(), theContext(), devs[0]()); if( cv::ocl::useOpenCL() ) LOGD("OpenCV+OpenCL works OK!"); else LOGE("Can't init OpenCV with OpenCL TAPI"); haveOpenCL = true; } catch(cl::Error& e) { LOGE("cl::Error: %s (%d)", e.what(), e.err()); } catch(std::exception& e) { LOGE("std::exception: %s", e.what()); } catch(...) { LOGE( "OpenCL info: unknown error while initializing OpenCL stuff" ); } LOGD("initCL completed"); }
bool EGLInteropResource::ensureSurface(int w, int h) { if (egl->surface && width == w && height == h) return true; releaseEGL(); // egl->dpy = eglGetCurrentDisplay(); qDebug("EGL version: %s, client api: %s", eglQueryString(egl->dpy, EGL_VERSION), eglQueryString(egl->dpy, EGL_CLIENT_APIS)); EGLint cfg_attribs[] = { EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8, // EGL_BIND_TO_TEXTURE_RGBA, EGL_TRUE, //remove? EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_NONE }; EGLint nb_cfgs; EGLConfig egl_cfg; if (!eglChooseConfig(egl->dpy, cfg_attribs, &egl_cfg, 1, &nb_cfgs)) { qWarning("Failed to create EGL configuration"); return false; } // check extensions QList<QByteArray> extensions = QByteArray(eglQueryString(egl->dpy, EGL_EXTENSIONS)).split(' '); // ANGLE_d3d_share_handle_client_buffer will be used if possible // TODO: strstr is enough const bool kEGL_ANGLE_d3d_share_handle_client_buffer = extensions.contains("EGL_ANGLE_d3d_share_handle_client_buffer"); const bool kEGL_ANGLE_query_surface_pointer = extensions.contains("EGL_ANGLE_query_surface_pointer"); if (!kEGL_ANGLE_d3d_share_handle_client_buffer && !kEGL_ANGLE_query_surface_pointer) { qWarning("EGL extension 'kEGL_ANGLE_query_surface_pointer' or 'ANGLE_d3d_share_handle_client_buffer' is required!"); return false; } GLint has_alpha = 1; //QOpenGLContext::currentContext()->format().hasAlpha() eglGetConfigAttrib(egl->dpy, egl_cfg, EGL_BIND_TO_TEXTURE_RGBA, &has_alpha); //EGL_ALPHA_SIZE qDebug("choose egl display:%p config: %p/%d, has alpha: %d", egl->dpy, egl_cfg, nb_cfgs, has_alpha); EGLint attribs[] = { EGL_WIDTH, w, EGL_HEIGHT, h, EGL_TEXTURE_FORMAT, has_alpha ? EGL_TEXTURE_RGBA : EGL_TEXTURE_RGB, EGL_TEXTURE_TARGET, EGL_TEXTURE_2D, EGL_NONE }; HANDLE share_handle = NULL; if (!kEGL_ANGLE_d3d_share_handle_client_buffer && kEGL_ANGLE_query_surface_pointer) { EGL_ENSURE((egl->surface = eglCreatePbufferSurface(egl->dpy, egl_cfg, attribs)) != EGL_NO_SURFACE, false); qDebug("pbuffer surface: %p", egl->surface); PFNEGLQUERYSURFACEPOINTERANGLEPROC eglQuerySurfacePointerANGLE = reinterpret_cast<PFNEGLQUERYSURFACEPOINTERANGLEPROC>(eglGetProcAddress("eglQuerySurfacePointerANGLE")); if (!eglQuerySurfacePointerANGLE) { qWarning("EGL_ANGLE_query_surface_pointer is not supported"); return false; } EGL_ENSURE(eglQuerySurfacePointerANGLE(egl->dpy, egl->surface, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE, &share_handle), false); } releaseDX(); // _A8 for a yuv plane /* * d3d resource share requires windows >= vista: https://msdn.microsoft.com/en-us/library/windows/desktop/bb219800(v=vs.85).aspx * from extension files: * d3d9: level must be 1, dimensions must match EGL surface's * d3d9ex or d3d10: */ DX_ENSURE_OK(d3ddev->CreateTexture(w, h, 1, D3DUSAGE_RENDERTARGET, has_alpha ? D3DFMT_A8R8G8B8 : D3DFMT_X8R8G8B8, D3DPOOL_DEFAULT, &dx_texture, &share_handle) , false); DX_ENSURE_OK(dx_texture->GetSurfaceLevel(0, &dx_surface), false); if (kEGL_ANGLE_d3d_share_handle_client_buffer) { // requires extension EGL_ANGLE_d3d_share_handle_client_buffer // egl surface size must match d3d texture's // d3d9ex or d3d10 is required EGL_ENSURE((egl->surface = eglCreatePbufferFromClientBuffer(egl->dpy, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE, share_handle, egl_cfg, attribs)), false); qDebug("pbuffer surface from client buffer: %p", egl->surface); } width = w; height = h; return true; }
/*!**************************************************************************** @Function InitView @Return bool true if no error occured @Description Code in InitView() will be called by PVRShell upon initialization or after a change in the rendering context. Used to initialize variables that are dependant on the rendering context (e.g. textures, vertex buffers, etc.) ******************************************************************************/ bool OGLES2MultiThreading::InitView() { /* Retrieve pointers to EGL extension functions which are not exposed by default. */ eglCreateSyncKHR = (PFNEGLCREATESYNCKHRPROC)eglGetProcAddress("eglCreateSyncKHR"); eglDestroySyncKHR = (PFNEGLDESTROYSYNCKHRPROC)eglGetProcAddress("eglDestroySyncKHR"); eglClientWaitSyncKHR = (PFNEGLCLIENTWAITSYNCKHRPROC)eglGetProcAddress("eglClientWaitSyncKHR"); eglGetSyncAttribKHR = (PFNEGLGETSYNCATTRIBKHRPROC)eglGetProcAddress("eglGetSyncAttribKHR"); if(!eglCreateSyncKHR || !eglDestroySyncKHR || !eglClientWaitSyncKHR || !eglGetSyncAttribKHR) { PVRShellSet(prefExitMessage, "Error: Failed to retrieve function pointers for KHR_fence_sync extension functions.\nIt's possible that the host system does not support this extension.\n"); return false; } // EGL and GL variables memset(&handles, 0, sizeof(handles)); // Set the EGL sync object to invalid as we check the value of this object when determining the status // of the GL. handles.eglSync = EGL_NO_SYNC_KHR; // Retrieve EGL handles handles.eglDisplay = eglGetCurrentDisplay(); handles.eglMainContext = eglGetCurrentContext(); // Create a mutex InitializeCriticalSection(&handles.mutex); /* At this point everything is initialized and we're ready to use OpenGL ES to draw something on the screen. */ bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen); int iW = PVRShellGet(prefWidth); int iH = PVRShellGet(prefHeight); // Intialise Print3D if(print3D.SetTextures(NULL, iW, iH, bRotate) != PVR_SUCCESS) { PVRShellSet(prefExitMessage, "ERROR: Failed to initialise Print3D.\n"); return false; } if(loadingText.SetTextures(NULL, iW, iH, bRotate) != PVR_SUCCESS) { PVRShellSet(prefExitMessage, "ERROR: Failed to initialise Print3D.\n"); return false; } // Load some shaders which will enable us to draw a splash screen. if(!LoadShaders(handles.uiLoadVertShader, handles.uiLoadFragShader, c_pszVertShader, c_pszFragShader)) { PVRShellSet(prefExitMessage, "ERROR: Failed to load shaders.\n"); return false; } if(!CreateProgram(handles.uiLoadProgram, handles.uiLoadVertShader, handles.uiLoadFragShader)) { PVRShellSet(prefExitMessage, "ERROR: Failed to create a program.\n"); return false; } if(!CreateLoadingGeometry(handles.uiLoadVbo)) { PVRShellSet(prefExitMessage, "ERROR: Failed to create geometry.\n"); return false; } // The colours are passed per channel (red,green,blue,alpha) as float values from 0.0 to 1.0 glClearColor(0.6f, 0.8f, 1.0f, 1.0f); // clear blue iFrame = 0; bLoading = true; /* Spawn a thread which will create it's own context and load resouces while we render a loading screen. */ HANDLE r = CreateThread(NULL, 0, &ThreadFunc, (void *)&handles, 0, NULL); if(!r) { PVRShellSet(prefExitMessage, "ERROR: Failed to spawn a worker thread.\n"); return false; } return true; }
void piglit_init(int argc, char **argv) { EGLDisplay egl_dpy = eglGetCurrentDisplay(); piglit_require_egl_extension(egl_dpy, "EGL_EXT_image_dma_buf_import"); }
void CheckGLExtensions() { #if !PPSSPP_PLATFORM(UWP) // Make sure to only do this once. It's okay to call CheckGLExtensions from wherever. if (extensionsDone) return; extensionsDone = true; memset(&gl_extensions, 0, sizeof(gl_extensions)); gl_extensions.IsCoreContext = useCoreContext; #ifdef USING_GLES2 gl_extensions.IsGLES = !useCoreContext; #endif const char *renderer = (const char *)glGetString(GL_RENDERER); const char *versionStr = (const char *)glGetString(GL_VERSION); const char *glslVersionStr = (const char *)glGetString(GL_SHADING_LANGUAGE_VERSION); // Check vendor string to try and guess GPU const char *cvendor = (char *)glGetString(GL_VENDOR); // TODO: move this stuff to gpu_features.cpp if (cvendor) { const std::string vendor = StripSpaces(std::string(cvendor)); if (vendor == "NVIDIA Corporation" || vendor == "Nouveau" || vendor == "nouveau") { gl_extensions.gpuVendor = GPU_VENDOR_NVIDIA; } else if (vendor == "Advanced Micro Devices, Inc." || vendor == "ATI Technologies Inc.") { gl_extensions.gpuVendor = GPU_VENDOR_AMD; } else if (vendor == "Intel" || vendor == "Intel Inc." || vendor == "Intel Corporation" || vendor == "Tungsten Graphics, Inc") { // We'll assume this last one means Intel gl_extensions.gpuVendor = GPU_VENDOR_INTEL; } else if (vendor == "ARM") { gl_extensions.gpuVendor = GPU_VENDOR_ARM; } else if (vendor == "Imagination Technologies") { gl_extensions.gpuVendor = GPU_VENDOR_IMGTEC; } else if (vendor == "Qualcomm") { gl_extensions.gpuVendor = GPU_VENDOR_QUALCOMM; } else if (vendor == "Broadcom") { gl_extensions.gpuVendor = GPU_VENDOR_BROADCOM; // Just for reference: Galaxy Y has renderer == "VideoCore IV HW" } else if (vendor == "Vivante Corporation") { gl_extensions.gpuVendor = GPU_VENDOR_VIVANTE; } else { gl_extensions.gpuVendor = GPU_VENDOR_UNKNOWN; } } else { gl_extensions.gpuVendor = GPU_VENDOR_UNKNOWN; } ILOG("GPU Vendor : %s ; renderer: %s version str: %s ; GLSL version str: %s", cvendor, renderer ? renderer : "N/A", versionStr ? versionStr : "N/A", glslVersionStr ? glslVersionStr : "N/A"); if (renderer) { strncpy(gl_extensions.model, renderer, sizeof(gl_extensions.model)); gl_extensions.model[sizeof(gl_extensions.model) - 1] = 0; } // Start by assuming we're at 2.0. int parsed[2] = {2, 0}; { // Grab the version and attempt to parse. char buffer[128] = { 0 }; if (versionStr) { strncpy(buffer, versionStr, sizeof(buffer) - 1); } int len = (int)strlen(buffer); bool beforeDot = true; int lastDigit = 0; for (int i = 0; i < len; i++) { if (buffer[i] >= '0' && buffer[i] <= '9') { lastDigit = buffer[i] - '0'; if (!beforeDot) { parsed[1] = lastDigit; break; } } if (beforeDot && buffer[i] == '.' && lastDigit) { parsed[0] = lastDigit; beforeDot = false; } } if (beforeDot && lastDigit) { parsed[0] = lastDigit; } } #ifndef USING_GLES2 if (strstr(versionStr, "OpenGL ES") == versionStr) { // For desktops running GLES. gl_extensions.IsGLES = true; } #endif if (!gl_extensions.IsGLES) { // For desktop GL gl_extensions.ver[0] = parsed[0]; gl_extensions.ver[1] = parsed[1]; // If the GL version >= 4.3, we know it's a true superset of OpenGL ES 3.0 and can thus enable // all the same modern paths. // Most of it could be enabled on lower GPUs as well, but let's start this way. if (gl_extensions.VersionGEThan(4, 3, 0)) { gl_extensions.GLES3 = true; #ifdef USING_GLES2 // Try to load up the other funcs if we're not using glew. gl3stubInit(); #endif } } else { // Start by assuming we're at 2.0. gl_extensions.ver[0] = 2; #ifdef GL_MAJOR_VERSION // Before grabbing the values, reset the error. glGetError(); glGetIntegerv(GL_MAJOR_VERSION, &gl_extensions.ver[0]); glGetIntegerv(GL_MINOR_VERSION, &gl_extensions.ver[1]); // We check error here to detect if these properties were supported. if (glGetError() != GL_NO_ERROR) { // They weren't, reset to GLES 2.0. gl_extensions.ver[0] = 2; gl_extensions.ver[1] = 0; } else if (parsed[0] && (gl_extensions.ver[0] != parsed[0] || gl_extensions.ver[1] != parsed[1])) { // Something going wrong. Possible bug in GL ES drivers. See #9688 ILOG("GL ES version mismatch. Version string '%s' parsed as %d.%d but API return %d.%d. Fallback to GL ES 2.0.", versionStr ? versionStr : "N/A", parsed[0], parsed[1], gl_extensions.ver[0], gl_extensions.ver[1]); gl_extensions.ver[0] = 2; gl_extensions.ver[1] = 0; } #endif // If the above didn't give us a version, or gave us a crazy version, fallback. #ifdef USING_GLES2 if (gl_extensions.ver[0] < 3 || gl_extensions.ver[0] > 5) { // Try to load GLES 3.0 only if "3.0" found in version // This simple heuristic avoids issues on older devices where you can only call eglGetProcAddress a limited // number of times. Make sure to check for 3.0 in the shader version too to avoid false positives, see #5584. bool gl_3_0_in_string = strstr(versionStr, "3.0") && (glslVersionStr && strstr(glslVersionStr, "3.0")); bool gl_3_1_in_string = strstr(versionStr, "3.1") && (glslVersionStr && strstr(glslVersionStr, "3.1")); // intentionally left out .1 if ((gl_3_0_in_string || gl_3_1_in_string) && gl3stubInit()) { gl_extensions.ver[0] = 3; if (gl_3_1_in_string) { gl_extensions.ver[1] = 1; } gl_extensions.GLES3 = true; // Though, let's ban Mali from the GLES 3 path for now, see #4078 if (strstr(renderer, "Mali") != 0) { gl_extensions.GLES3 = false; } } else { // Just to be safe. gl_extensions.ver[0] = 2; gl_extensions.ver[1] = 0; } } else { // Otherwise, let's trust GL_MAJOR_VERSION. Note that Mali is intentionally not banned here. if (gl_extensions.ver[0] >= 3) { gl_extensions.GLES3 = gl3stubInit(); } } #else // If we have GLEW/similar, assume GLES3 loaded. gl_extensions.GLES3 = gl_extensions.ver[0] >= 3; #endif if (gl_extensions.GLES3) { if (gl_extensions.ver[1] >= 1) { ILOG("OpenGL ES 3.1 support detected!\n"); } else { ILOG("OpenGL ES 3.0 support detected!\n"); } } } const char *extString = nullptr; if (gl_extensions.ver[0] >= 3) { // Let's use the new way for OpenGL 3.x+, required in the core profile. GLint numExtensions = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &numExtensions); g_all_gl_extensions.clear(); g_set_gl_extensions.clear(); for (GLint i = 0; i < numExtensions; ++i) { const char *ext = (const char *)glGetStringi(GL_EXTENSIONS, i); g_set_gl_extensions.insert(ext); g_all_gl_extensions += ext; g_all_gl_extensions += " "; } } else { extString = (const char *)glGetString(GL_EXTENSIONS); g_all_gl_extensions = extString ? extString : ""; ParseExtensionsString(g_all_gl_extensions, g_set_gl_extensions); } #ifdef WIN32 const char *wglString = 0; if (wglGetExtensionsStringEXT) wglString = wglGetExtensionsStringEXT(); g_all_egl_extensions = wglString ? wglString : ""; ParseExtensionsString(g_all_egl_extensions, g_set_egl_extensions); gl_extensions.EXT_swap_control_tear = g_set_egl_extensions.count("WGL_EXT_swap_control_tear") != 0; #elif !defined(USING_GLES2) // const char *glXString = glXQueryExtensionString(); // gl_extensions.EXT_swap_control_tear = strstr(glXString, "GLX_EXT_swap_control_tear") != 0; #endif // Check the desktop extension instead of the OES one. They are very similar. // Also explicitly check those ATI devices that claims to support npot if (renderer) { gl_extensions.OES_texture_npot = g_set_gl_extensions.count("GL_ARB_texture_non_power_of_two") != 0 && !(((strncmp(renderer, "ATI RADEON X", 12) == 0) || (strncmp(renderer, "ATI MOBILITY RADEON X", 21) == 0))); } gl_extensions.ARB_blend_func_extended = g_set_gl_extensions.count("GL_ARB_blend_func_extended") != 0; gl_extensions.EXT_blend_func_extended = g_set_gl_extensions.count("GL_EXT_blend_func_extended") != 0; gl_extensions.ARB_conservative_depth = g_set_gl_extensions.count("GL_ARB_conservative_depth") != 0; gl_extensions.ARB_shader_image_load_store = (g_set_gl_extensions.count("GL_ARB_shader_image_load_store") != 0) || (g_set_gl_extensions.count("GL_EXT_shader_image_load_store") != 0); gl_extensions.ARB_shading_language_420pack = (g_set_gl_extensions.count("GL_ARB_shading_language_420pack") != 0); gl_extensions.EXT_bgra = g_set_gl_extensions.count("GL_EXT_bgra") != 0; gl_extensions.EXT_gpu_shader4 = g_set_gl_extensions.count("GL_EXT_gpu_shader4") != 0; gl_extensions.NV_framebuffer_blit = g_set_gl_extensions.count("GL_NV_framebuffer_blit") != 0; gl_extensions.NV_copy_image = g_set_gl_extensions.count("GL_NV_copy_image") != 0; gl_extensions.OES_copy_image = g_set_gl_extensions.count("GL_OES_copy_image") != 0; gl_extensions.EXT_copy_image = g_set_gl_extensions.count("GL_EXT_copy_image") != 0; gl_extensions.ARB_copy_image = g_set_gl_extensions.count("GL_ARB_copy_image") != 0; gl_extensions.ARB_buffer_storage = g_set_gl_extensions.count("GL_ARB_buffer_storage") != 0; gl_extensions.ARB_vertex_array_object = g_set_gl_extensions.count("GL_ARB_vertex_array_object") != 0; gl_extensions.ARB_texture_float = g_set_gl_extensions.count("GL_ARB_texture_float") != 0; gl_extensions.EXT_texture_filter_anisotropic = g_set_gl_extensions.count("GL_EXT_texture_filter_anisotropic") != 0 || g_set_gl_extensions.count("GL_ARB_texture_filter_anisotropic") != 0; gl_extensions.EXT_draw_instanced = g_set_gl_extensions.count("GL_EXT_draw_instanced") != 0; gl_extensions.ARB_draw_instanced = g_set_gl_extensions.count("GL_ARB_draw_instanced") != 0; gl_extensions.ARB_cull_distance = g_set_gl_extensions.count("GL_ARB_cull_distance") != 0; if (gl_extensions.IsGLES) { gl_extensions.OES_texture_npot = g_set_gl_extensions.count("GL_OES_texture_npot") != 0; gl_extensions.OES_packed_depth_stencil = (g_set_gl_extensions.count("GL_OES_packed_depth_stencil") != 0) || gl_extensions.GLES3; gl_extensions.OES_depth24 = g_set_gl_extensions.count("GL_OES_depth24") != 0; gl_extensions.OES_depth_texture = g_set_gl_extensions.count("GL_OES_depth_texture") != 0; gl_extensions.OES_mapbuffer = g_set_gl_extensions.count("GL_OES_mapbuffer") != 0; gl_extensions.EXT_blend_minmax = g_set_gl_extensions.count("GL_EXT_blend_minmax") != 0; gl_extensions.EXT_unpack_subimage = g_set_gl_extensions.count("GL_EXT_unpack_subimage") != 0; gl_extensions.EXT_shader_framebuffer_fetch = g_set_gl_extensions.count("GL_EXT_shader_framebuffer_fetch") != 0; gl_extensions.NV_shader_framebuffer_fetch = g_set_gl_extensions.count("GL_NV_shader_framebuffer_fetch") != 0; gl_extensions.ARM_shader_framebuffer_fetch = g_set_gl_extensions.count("GL_ARM_shader_framebuffer_fetch") != 0; gl_extensions.OES_texture_float = g_set_gl_extensions.count("GL_OES_texture_float") != 0; gl_extensions.EXT_buffer_storage = g_set_gl_extensions.count("GL_EXT_buffer_storage") != 0; gl_extensions.EXT_clip_cull_distance = g_set_gl_extensions.count("GL_EXT_clip_cull_distance") != 0; #if defined(__ANDROID__) // On Android, incredibly, this is not consistently non-zero! It does seem to have the same value though. // https://twitter.com/ID_AA_Carmack/status/387383037794603008 #ifdef _DEBUG void *invalidAddress = (void *)eglGetProcAddress("InvalidGlCall1"); void *invalidAddress2 = (void *)eglGetProcAddress("AnotherInvalidGlCall2"); DLOG("Addresses returned for invalid extensions: %p %p", invalidAddress, invalidAddress2); #endif // These are all the same. Let's alias. if (!gl_extensions.OES_copy_image) { if (gl_extensions.NV_copy_image) { glCopyImageSubDataOES = (decltype(glCopyImageSubDataOES))eglGetProcAddress("glCopyImageSubDataNV"); } else if (gl_extensions.EXT_copy_image) { glCopyImageSubDataOES = (decltype(glCopyImageSubDataOES))eglGetProcAddress("glCopyImageSubDataEXT"); } } if (gl_extensions.NV_framebuffer_blit) { glBlitFramebufferNV = (PFNGLBLITFRAMEBUFFERNVPROC)eglGetProcAddress("glBlitFramebufferNV"); } gl_extensions.OES_vertex_array_object = g_set_gl_extensions.count("GL_OES_vertex_array_object") != 0; if (gl_extensions.OES_vertex_array_object) { glGenVertexArraysOES = (PFNGLGENVERTEXARRAYSOESPROC)eglGetProcAddress("glGenVertexArraysOES"); glBindVertexArrayOES = (PFNGLBINDVERTEXARRAYOESPROC)eglGetProcAddress("glBindVertexArrayOES"); glDeleteVertexArraysOES = (PFNGLDELETEVERTEXARRAYSOESPROC)eglGetProcAddress("glDeleteVertexArraysOES"); glIsVertexArrayOES = (PFNGLISVERTEXARRAYOESPROC)eglGetProcAddress("glIsVertexArrayOES"); } // Hm, this should be available on iOS too. gl_extensions.EXT_discard_framebuffer = g_set_gl_extensions.count("GL_EXT_discard_framebuffer") != 0; if (gl_extensions.EXT_discard_framebuffer) { glDiscardFramebufferEXT = (PFNGLDISCARDFRAMEBUFFEREXTPROC)eglGetProcAddress("glDiscardFramebufferEXT"); } #else gl_extensions.OES_vertex_array_object = false; gl_extensions.EXT_discard_framebuffer = false; #endif } else { // Desktops support minmax and subimage unpack (GL_UNPACK_ROW_LENGTH etc) gl_extensions.EXT_blend_minmax = true; gl_extensions.EXT_unpack_subimage = true; } // GLES 3 subsumes many ES2 extensions. if (gl_extensions.GLES3) { gl_extensions.EXT_blend_minmax = true; gl_extensions.EXT_unpack_subimage = true; } #if defined(__ANDROID__) if (gl_extensions.OES_mapbuffer) { glMapBuffer = (PFNGLMAPBUFFERPROC)eglGetProcAddress("glMapBufferOES"); } // Look for EGL extensions EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY); const char *eglString = eglQueryString(display, EGL_EXTENSIONS); g_all_egl_extensions = eglString ? eglString : ""; ParseExtensionsString(g_all_egl_extensions, g_set_egl_extensions); gl_extensions.EGL_NV_system_time = g_set_egl_extensions.count("EGL_NV_system_time") != 0; gl_extensions.EGL_NV_coverage_sample = g_set_egl_extensions.count("EGL_NV_coverage_sample") != 0; if (gl_extensions.EGL_NV_system_time) { eglGetSystemTimeNV = (PFNEGLGETSYSTEMTIMENVPROC)eglGetProcAddress("eglGetSystemTimeNV"); eglGetSystemTimeFrequencyNV = (PFNEGLGETSYSTEMTIMEFREQUENCYNVPROC)eglGetProcAddress("eglGetSystemTimeFrequencyNV"); } #elif defined(USING_GLES2) && defined(__linux__) const char *eglString = eglQueryString(NULL, EGL_EXTENSIONS); g_all_egl_extensions = eglString ? eglString : ""; if (eglString) { eglString = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS); if (eglString) { g_all_egl_extensions.append(" "); g_all_egl_extensions.append(eglString); } } ParseExtensionsString(g_all_egl_extensions, g_set_egl_extensions); #endif glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, &gl_extensions.maxVertexTextureUnits); #ifdef GL_LOW_FLOAT // This is probably a waste of time, implementations lie. if (gl_extensions.IsGLES || g_set_gl_extensions.count("GL_ARB_ES2_compatibility") || gl_extensions.VersionGEThan(4, 1)) { const GLint precisions[6] = { GL_LOW_FLOAT, GL_MEDIUM_FLOAT, GL_HIGH_FLOAT, GL_LOW_INT, GL_MEDIUM_INT, GL_HIGH_INT }; GLint shaderTypes[2] = { GL_VERTEX_SHADER, GL_FRAGMENT_SHADER }; for (int st = 0; st < 2; st++) { for (int p = 0; p < 6; p++) { glGetShaderPrecisionFormat(shaderTypes[st], precisions[p], gl_extensions.range[st][p], &gl_extensions.precision[st][p]); } } // Now, Adreno lies. So let's override it. if (gl_extensions.gpuVendor == GPU_VENDOR_QUALCOMM) { WLOG("Detected Adreno - lowering int precision"); gl_extensions.range[1][5][0] = 15; gl_extensions.range[1][5][1] = 15; } } #endif gl_extensions.ARB_framebuffer_object = g_set_gl_extensions.count("GL_ARB_framebuffer_object") != 0; gl_extensions.EXT_framebuffer_object = g_set_gl_extensions.count("GL_EXT_framebuffer_object") != 0; gl_extensions.ARB_pixel_buffer_object = g_set_gl_extensions.count("GL_ARB_pixel_buffer_object") != 0; gl_extensions.NV_pixel_buffer_object = g_set_gl_extensions.count("GL_NV_pixel_buffer_object") != 0; if (!gl_extensions.IsGLES && gl_extensions.IsCoreContext) { // These are required, and don't need to be specified by the driver (they aren't on Apple.) gl_extensions.ARB_vertex_array_object = true; gl_extensions.ARB_framebuffer_object = true; } // Add any extensions that are included in core. May be elided. if (!gl_extensions.IsGLES) { if (gl_extensions.VersionGEThan(3, 0)) { gl_extensions.ARB_texture_float = true; } if (gl_extensions.VersionGEThan(3, 1)) { gl_extensions.ARB_draw_instanced = true; // ARB_uniform_buffer_object = true; } if (gl_extensions.VersionGEThan(3, 2)) { // ARB_depth_clamp = true; } if (gl_extensions.VersionGEThan(3, 3)) { gl_extensions.ARB_blend_func_extended = true; // ARB_explicit_attrib_location = true; } if (gl_extensions.VersionGEThan(4, 0)) { // ARB_gpu_shader5 = true; } if (gl_extensions.VersionGEThan(4, 1)) { // ARB_get_program_binary = true; // ARB_separate_shader_objects = true; // ARB_shader_precision = true; // ARB_viewport_array = true; } if (gl_extensions.VersionGEThan(4, 2)) { // ARB_texture_storage = true; } if (gl_extensions.VersionGEThan(4, 3)) { gl_extensions.ARB_copy_image = true; // ARB_explicit_uniform_location = true; // ARB_stencil_texturing = true; // ARB_texture_view = true; // ARB_vertex_attrib_binding = true; } if (gl_extensions.VersionGEThan(4, 4)) { gl_extensions.ARB_buffer_storage = true; } if (gl_extensions.VersionGEThan(4, 5)) { gl_extensions.ARB_cull_distance = true; } if (gl_extensions.VersionGEThan(4, 6)) { // Actually ARB, but they're basically the same. gl_extensions.EXT_texture_filter_anisotropic = true; } } #ifdef __APPLE__ if (!gl_extensions.IsGLES && !gl_extensions.IsCoreContext) { // Apple doesn't allow OpenGL 3.x+ in compatibility contexts. gl_extensions.ForceGL2 = true; } #endif ProcessGPUFeatures(); int error = glGetError(); if (error) ELOG("GL error in init: %i", error); #endif }
void getRenderBufferSize(GLint *width, GLint *height) { eglQuerySurface(eglGetCurrentDisplay(), eglGetCurrentSurface(EGL_DRAW), EGL_WIDTH, width); eglQuerySurface(eglGetCurrentDisplay(), eglGetCurrentSurface(EGL_DRAW), EGL_HEIGHT, height); }
bool EGLInteropResource::ensureD3D9EGL(int w, int h) { if (surface9 && res[0].w == w && res[0].h == h) return true; #if QTAV_HAVE(GUI_PRIVATE) QPlatformNativeInterface *nativeInterface = QGuiApplication::platformNativeInterface(); egl->dpy = static_cast<EGLDisplay>(nativeInterface->nativeResourceForContext("eglDisplay", QOpenGLContext::currentContext())); EGLConfig egl_cfg = static_cast<EGLConfig>(nativeInterface->nativeResourceForContext("eglConfig", QOpenGLContext::currentContext())); #else #ifdef Q_OS_WIN #if QT_VERSION < QT_VERSION_CHECK(5, 5, 0) #ifdef _MSC_VER #pragma message("ANGLE version in Qt<5.5 does not support eglQueryContext. You must upgrade your runtime ANGLE libraries") #else #warning "ANGLE version in Qt<5.5 does not support eglQueryContext. You must upgrade your runtime ANGLE libraries" #endif //_MSC_VER #endif #endif //Q_OS_WIN // eglQueryContext() added (Feb 2015): https://github.com/google/angle/commit/8310797003c44005da4143774293ea69671b0e2a egl->dpy = eglGetCurrentDisplay(); qDebug("EGL version: %s, client api: %s", eglQueryString(egl->dpy, EGL_VERSION), eglQueryString(egl->dpy, EGL_CLIENT_APIS)); // TODO: check runtime egl>=1.4 for eglGetCurrentContext() EGLint cfg_id = 0; EGL_ENSURE(eglQueryContext(egl->dpy, eglGetCurrentContext(), EGL_CONFIG_ID , &cfg_id) == EGL_TRUE, false); qDebug("egl config id: %d", cfg_id); EGLint nb_cfg = 0; EGL_ENSURE(eglGetConfigs(egl->dpy, NULL, 0, &nb_cfg) == EGL_TRUE, false); qDebug("eglGetConfigs number: %d", nb_cfg); QVector<EGLConfig> cfgs(nb_cfg); //check > 0 EGL_ENSURE(eglGetConfigs(egl->dpy, cfgs.data(), cfgs.size(), &nb_cfg) == EGL_TRUE, false); EGLConfig egl_cfg = NULL; for (int i = 0; i < nb_cfg; ++i) { EGLint id = 0; eglGetConfigAttrib(egl->dpy, cfgs[i], EGL_CONFIG_ID, &id); if (id == cfg_id) { egl_cfg = cfgs[i]; break; } } #endif qDebug("egl display:%p config: %p", egl->dpy, egl_cfg); // check extensions QList<QByteArray> extensions = QByteArray(eglQueryString(egl->dpy, EGL_EXTENSIONS)).split(' '); // ANGLE_d3d_share_handle_client_buffer will be used if possible const bool kEGL_ANGLE_d3d_share_handle_client_buffer = extensions.contains("EGL_ANGLE_d3d_share_handle_client_buffer"); const bool kEGL_ANGLE_query_surface_pointer = extensions.contains("EGL_ANGLE_query_surface_pointer"); if (!kEGL_ANGLE_d3d_share_handle_client_buffer && !kEGL_ANGLE_query_surface_pointer) { qWarning("EGL extension 'kEGL_ANGLE_query_surface_pointer' or 'ANGLE_d3d_share_handle_client_buffer' is required!"); return false; } GLint has_alpha = 1; //QOpenGLContext::currentContext()->format().hasAlpha() eglGetConfigAttrib(egl->dpy, egl_cfg, EGL_BIND_TO_TEXTURE_RGBA, &has_alpha); //EGL_ALPHA_SIZE EGLint attribs[] = { EGL_WIDTH, w, EGL_HEIGHT, h, EGL_TEXTURE_FORMAT, has_alpha ? EGL_TEXTURE_RGBA : EGL_TEXTURE_RGB, EGL_TEXTURE_TARGET, EGL_TEXTURE_2D, EGL_NONE }; HANDLE share_handle = NULL; if (!kEGL_ANGLE_d3d_share_handle_client_buffer && kEGL_ANGLE_query_surface_pointer) { EGL_ENSURE((egl->surface = eglCreatePbufferSurface(egl->dpy, egl_cfg, attribs)) != EGL_NO_SURFACE, false); qDebug("pbuffer surface: %p", egl->surface); PFNEGLQUERYSURFACEPOINTERANGLEPROC eglQuerySurfacePointerANGLE = reinterpret_cast<PFNEGLQUERYSURFACEPOINTERANGLEPROC>(eglGetProcAddress("eglQuerySurfacePointerANGLE")); if (!eglQuerySurfacePointerANGLE) { qWarning("EGL_ANGLE_query_surface_pointer is not supported"); return false; } EGL_ENSURE(eglQuerySurfacePointerANGLE(egl->dpy, egl->surface, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE, &share_handle), false); } SafeRelease(&surface9); SafeRelease(&texture9); // _A8 for a yuv plane /* * d3d resource share requires windows >= vista: https://msdn.microsoft.com/en-us/library/windows/desktop/bb219800(v=vs.85).aspx * from extension files: * d3d9: level must be 1, dimensions must match EGL surface's * d3d9ex or d3d10: */ DX_ENSURE(device9->CreateTexture(w, h, 1, D3DUSAGE_RENDERTARGET, has_alpha ? D3DFMT_A8R8G8B8 : D3DFMT_X8R8G8B8, D3DPOOL_DEFAULT, &texture9, &share_handle) , false); DX_ENSURE(texture9->GetSurfaceLevel(0, &surface9), false); if (kEGL_ANGLE_d3d_share_handle_client_buffer) { // requires extension EGL_ANGLE_d3d_share_handle_client_buffer // egl surface size must match d3d texture's // d3d9ex or d3d10 is required EGL_ENSURE((egl->surface = eglCreatePbufferFromClientBuffer(egl->dpy, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE, share_handle, egl_cfg, attribs)), false); qDebug("pbuffer surface from client buffer: %p", egl->surface); } return true; }
// Initialized EGL resources. bool Renderer::initEGL(ANativeWindow *window) { LOGI(">initEGL Got window %p", window); int samples = 0; Properties* config = Game::getInstance()->getConfig()->getNamespace( "window", true); if (config) { samples = std::max(config->getInt("samples"), 0); } LOGI(">initEGL samples=%d", samples); // Hard-coded to 32-bit/OpenGL ES 2.0. // NOTE: EGL_SAMPLE_BUFFERS, EGL_SAMPLES and EGL_DEPTH_SIZE MUST remain at the beginning of the attribute list // since they are expected to be at indices 0-5 in config fallback code later. // EGL_DEPTH_SIZE is also expected to EGLint eglConfigAttrs[] = { EGL_SAMPLE_BUFFERS, samples > 0 ? 1 : 0, EGL_SAMPLES, samples, EGL_DEPTH_SIZE, 24, EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 8, EGL_STENCIL_SIZE, 8, EGL_SURFACE_TYPE, EGL_WINDOW_BIT, EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE }; EGLint eglConfigCount; const EGLint eglContextAttrs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE }; const EGLint eglSurfaceAttrs[] = { EGL_RENDER_BUFFER, EGL_BACK_BUFFER, EGL_NONE }; if (_display == EGL_NO_DISPLAY && _context == EGL_NO_CONTEXT) { _display = eglGetCurrentDisplay(); LOGI(">initEGL _display=%p", _display); EGLSurface drawSurface = eglGetCurrentSurface(EGL_DRAW); LOGI(">initEGL drawSurface=%p", drawSurface); EGLSurface readSurface = eglGetCurrentSurface(EGL_READ); LOGI(">initEGL readSurface=%p", readSurface); EGLContext context = eglGetCurrentContext(); LOGI(">initEGL context=%p", context); // Get the EGL display and initialize. _display = eglGetDisplay(EGL_DEFAULT_DISPLAY); if (_display == EGL_NO_DISPLAY) { checkErrorEGL("eglGetDisplay"); goto error; } LOGI(">initEGL _display=%p", _display); eglMakeCurrent(_display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); eglDestroyContext(_display, context); eglDestroySurface(_display, drawSurface); if (eglInitialize(_display, NULL, NULL) != EGL_TRUE) { checkErrorEGL("eglInitialize"); goto error; } // Try both 24 and 16-bit depth sizes since some hardware (i.e. Tegra) does not support 24-bit depth bool validConfig = false; EGLint depthSizes[] = { 24, 16 }; for (unsigned int i = 0; i < 2; ++i) { eglConfigAttrs[1] = samples > 0 ? 1 : 0; eglConfigAttrs[3] = samples; eglConfigAttrs[5] = depthSizes[i]; if (eglChooseConfig(_display, eglConfigAttrs, &__eglConfig, 1, &eglConfigCount) == EGL_TRUE && eglConfigCount > 0) { LOGI(">initEGL depthSizes[i]=%d", depthSizes[i]); validConfig = true; break; } if (samples) { // Try lowering the MSAA sample size until we find a config int sampleCount = samples; while (sampleCount) { GP_WARN( "No EGL config found for depth_size=%d and samples=%d. Trying samples=%d instead.", depthSizes[i], sampleCount, sampleCount / 2); sampleCount /= 2; eglConfigAttrs[1] = sampleCount > 0 ? 1 : 0; eglConfigAttrs[3] = sampleCount; if (eglChooseConfig(_display, eglConfigAttrs, &__eglConfig, 1, &eglConfigCount) == EGL_TRUE && eglConfigCount > 0) { validConfig = true; break; } } if (validConfig) break; } else { GP_WARN("No EGL config found for depth_size=%d.", depthSizes[i]); } } if (!validConfig) { checkErrorEGL("eglChooseConfig"); goto error; } _context = eglCreateContext(_display, __eglConfig, EGL_NO_CONTEXT, eglContextAttrs); if (_context == EGL_NO_CONTEXT) { checkErrorEGL("eglCreateContext"); goto error; } } // EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is // guaranteed to be accepted by ANativeWindow_setBuffersGeometry(). // As soon as we picked a EGLConfig, we can safely reconfigure the // ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. EGLint format; eglGetConfigAttrib(_display, __eglConfig, EGL_NATIVE_VISUAL_ID, &format); ANativeWindow_setBuffersGeometry(window, 0, 0, format); _surface = eglCreateWindowSurface(_display, __eglConfig, window, eglSurfaceAttrs); if (_surface == EGL_NO_SURFACE) { checkErrorEGL("eglCreateWindowSurface"); goto error; } if (eglMakeCurrent(_display, _surface, _surface, _context) != EGL_TRUE) { checkErrorEGL("eglMakeCurrent"); goto error; } eglQuerySurface(_display, _surface, EGL_WIDTH, &__width); eglQuerySurface(_display, _surface, EGL_HEIGHT, &__height); // __orientationAngle = getRotation() * 90; // Set vsync. eglSwapInterval(_display, WINDOW_VSYNC ? 1 : 0); // Initialize OpenGL ES extensions. __glExtensions = (const char*) glGetString(GL_EXTENSIONS); if (strstr(__glExtensions, "GL_OES_vertex_array_object") || strstr(__glExtensions, "GL_ARB_vertex_array_object")) { // Disable VAO extension for now. glBindVertexArray = (PFNGLBINDVERTEXARRAYOESPROC) eglGetProcAddress( "glBindVertexArrayOES"); glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSOESPROC) eglGetProcAddress( "glDeleteVertexArraysOES"); glGenVertexArrays = (PFNGLGENVERTEXARRAYSOESPROC) eglGetProcAddress( "glGenVertexArraysOES"); glIsVertexArray = (PFNGLISVERTEXARRAYOESPROC) eglGetProcAddress( "glIsVertexArrayOES"); } LOGI("<initEGL true"); return true; error: LOGI("<initEGL false"); return false; }
void QEGLPlatformContext::updateFormatFromGL() { #ifndef QT_NO_OPENGL // Have to save & restore to prevent QOpenGLContext::currentContext() from becoming // inconsistent after QOpenGLContext::create(). EGLDisplay prevDisplay = eglGetCurrentDisplay(); if (prevDisplay == EGL_NO_DISPLAY) // when no context is current prevDisplay = m_eglDisplay; EGLContext prevContext = eglGetCurrentContext(); EGLSurface prevSurfaceDraw = eglGetCurrentSurface(EGL_DRAW); EGLSurface prevSurfaceRead = eglGetCurrentSurface(EGL_READ); // Rely on the surfaceless extension, if available. This is beneficial since we can // avoid creating an extra pbuffer surface which is apparently troublesome with some // drivers (Mesa) when certain attributes are present (multisampling). EGLSurface tempSurface = EGL_NO_SURFACE; if (!q_hasEglExtension(m_eglDisplay, "EGL_KHR_surfaceless_context")) tempSurface = createTemporaryOffscreenSurface(); if (eglMakeCurrent(m_eglDisplay, tempSurface, tempSurface, m_eglContext)) { if (m_format.renderableType() == QSurfaceFormat::OpenGL || m_format.renderableType() == QSurfaceFormat::OpenGLES) { const GLubyte *s = glGetString(GL_VERSION); if (s) { QByteArray version = QByteArray(reinterpret_cast<const char *>(s)); int major, minor; if (QPlatformOpenGLContext::parseOpenGLVersion(version, major, minor)) { m_format.setMajorVersion(major); m_format.setMinorVersion(minor); } } m_format.setProfile(QSurfaceFormat::NoProfile); m_format.setOptions(QSurfaceFormat::FormatOptions()); if (m_format.renderableType() == QSurfaceFormat::OpenGL) { // Check profile and options. if (m_format.majorVersion() < 3) { m_format.setOption(QSurfaceFormat::DeprecatedFunctions); } else { GLint value = 0; glGetIntegerv(GL_CONTEXT_FLAGS, &value); if (!(value & GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT)) m_format.setOption(QSurfaceFormat::DeprecatedFunctions); if (value & GL_CONTEXT_FLAG_DEBUG_BIT) m_format.setOption(QSurfaceFormat::DebugContext); if (m_format.version() >= qMakePair(3, 2)) { value = 0; glGetIntegerv(GL_CONTEXT_PROFILE_MASK, &value); if (value & GL_CONTEXT_CORE_PROFILE_BIT) m_format.setProfile(QSurfaceFormat::CoreProfile); else if (value & GL_CONTEXT_COMPATIBILITY_PROFILE_BIT) m_format.setProfile(QSurfaceFormat::CompatibilityProfile); } } } } eglMakeCurrent(prevDisplay, prevSurfaceDraw, prevSurfaceRead, prevContext); } else { qWarning("QEGLPlatformContext: Failed to make temporary surface current, format not updated"); } if (tempSurface != EGL_NO_SURFACE) destroyTemporaryOffscreenSurface(tempSurface); #endif // QT_NO_OPENGL }
SkNativeGLContext::AutoContextRestore::AutoContextRestore() { fOldEGLContext = eglGetCurrentContext(); fOldDisplay = eglGetCurrentDisplay(); fOldSurface = eglGetCurrentSurface(EGL_DRAW); }
EGLContext EGLDisplayOpenVG::contextForSurface(const EGLSurface& surface) { ASSERT(surface != EGL_NO_SURFACE); if (m_platformSurfaces.contains(surface)) return m_platformSurfaces.get(surface)->eglContext(); eglBindAPI(EGL_OPENVG_API); ASSERT_EGL_NO_ERROR(); if (!m_sharedPlatformSurface) // shared context has not been created yet sharedPlatformSurface(); // creates the shared surface & context EGLint surfaceConfigId; if (m_surfaceConfigIds.contains(surface)) surfaceConfigId = m_surfaceConfigIds.get(surface); else { // Retrieve the same EGL config for context creation that was used to // create the the EGL surface. EGLBoolean success = eglQuerySurface(m_display, surface, EGL_CONFIG_ID, &surfaceConfigId); ASSERT(success == EGL_TRUE); ASSERT(surfaceConfigId != EGL_BAD_ATTRIBUTE); m_surfaceConfigIds.set(surface, surfaceConfigId); } if (m_compatibleConfigIds.contains(surfaceConfigId)) surfaceConfigId = m_compatibleConfigIds.get(surfaceConfigId); if (m_contexts.contains(surfaceConfigId)) return m_contexts.get(surfaceConfigId); EGLDisplay currentDisplay = eglGetCurrentDisplay(); EGLSurface currentReadSurface = eglGetCurrentSurface(EGL_READ); EGLSurface currentDrawSurface = eglGetCurrentSurface(EGL_DRAW); EGLContext currentContext = eglGetCurrentContext(); // Before creating a new context, let's try whether an existing one // is compatible with the surface. EGL doesn't give us a different way // to check context/surface compatibility than trying it out, so let's // do just that. HashMap<EGLint, EGLContext>::iterator end = m_contexts.end(); for (HashMap<EGLint, EGLContext>::iterator it = m_contexts.begin(); it != end; ++it) { eglMakeCurrent(m_display, surface, surface, (*it).second); if (eglGetError() == EGL_SUCCESS) { // Restore previous surface/context. if (currentContext != EGL_NO_CONTEXT) { eglMakeCurrent(currentDisplay, currentReadSurface, currentDrawSurface, currentContext); ASSERT_EGL_NO_ERROR(); } // Cool, surface is compatible to one of our existing contexts. m_compatibleConfigIds.set(surfaceConfigId, (*it).first); return (*it).second; } } // Restore previous surface/context. if (currentContext != EGL_NO_CONTEXT) { eglMakeCurrent(currentDisplay, currentReadSurface, currentDrawSurface, currentContext); ASSERT_EGL_NO_ERROR(); } EGLConfig config; EGLint numConfigs; const EGLint configAttribs[] = { EGL_CONFIG_ID, surfaceConfigId, EGL_NONE }; eglChooseConfig(m_display, configAttribs, &config, 1, &numConfigs); ASSERT_EGL_NO_ERROR(); ASSERT(numConfigs == 1); // We share all of the images and paths amongst the different contexts, // so that they can be used in all of them. Resources that are created // while m_sharedPlatformSurface->context() is current will be // accessible from all other contexts, but are not restricted to the // lifetime of those contexts. EGLContext context = eglCreateContext(m_display, config, m_sharedPlatformSurface->eglContext(), 0); ASSERT_EGL_NO_ERROR(); ASSERT(!m_contexts.contains(surfaceConfigId)); m_contexts.set(surfaceConfigId, context); return context; }
static jlong jni_eglGetCurrentDisplay(JNIEnv *_env, jobject _this) { return reinterpret_cast<jlong>(eglGetCurrentDisplay()); }
static int create(struct gl_hwdec *hw) { GL *gl = hw->gl; struct priv *p = talloc_zero(hw, struct priv); hw->priv = p; p->current_image.buf = p->current_image.image_id = VA_INVALID_ID; p->log = hw->log; if (hw->hwctx) return -1; if (!eglGetCurrentDisplay()) return -1; const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS); if (!exts) return -1; if (!strstr(exts, "EXT_image_dma_buf_import") || !strstr(exts, "EGL_KHR_image_base") || !strstr(gl->extensions, "GL_OES_EGL_image") || !(gl->mpgl_caps & MPGL_CAP_TEX_RG)) return -1; // EGL_KHR_image_base p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR"); p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR"); // GL_OES_EGL_image p->EGLImageTargetTexture2DOES = (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES"); if (!p->CreateImageKHR || !p->DestroyImageKHR || !p->EGLImageTargetTexture2DOES) return -1; p->display = create_native_va_display(gl); if (!p->display) return -1; p->ctx = va_initialize(p->display, p->log, true); if (!p->ctx) { vaTerminate(p->display); return -1; } if (hw->probing && va_guess_if_emulated(p->ctx)) { destroy(hw); return -1; } MP_VERBOSE(p, "using VAAPI EGL interop\n"); insane_hack(hw); if (!test_format(hw)) { destroy(hw); return -1; } hw->hwctx = &p->ctx->hwctx; return 0; }
void nativeRecord::init(){ _eglCreateImageKHR = ( PFNEGLCREATEIMAGEKHRPROC) eglGetProcAddress ( "eglCreateImageKHR" ); _eglDestroyImageKHR = ( PFNEGLDESTROYIMAGEKHRPROC) eglGetProcAddress( "eglDestroyImageKHR" ); _glEGLImageTargetTexture2DOES = (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) eglGetProcAddress( "glEGLImageTargetTexture2DOES" ); if(_eglCreateImageKHR == NULL){ LOGD("_eglCreateImageKHR error"); exit(1); } if(_eglDestroyImageKHR == NULL){ LOGD("_eglDestroyImageKHR error"); exit(1); } if(_glEGLImageTargetTexture2DOES == NULL){ LOGD("_glEGLImageTargetTexture2DOES error"); exit(1); } buffer = new GraphicBuffer ( gWidth , gHeight , HAL_PIXEL_FORMAT_BGRA_8888, GraphicBuffer :: USAGE_SW_READ_OFTEN | GraphicBuffer :: USAGE_HW_TEXTURE ); if((buffer->initCheck ()) != NO_ERROR) exit (1); android_native_buffer_t * anb = buffer->getNativeBuffer(); const EGLint attrs [] = { EGL_IMAGE_PRESERVED_KHR , EGL_TRUE, EGL_NONE, EGL_NONE }; pEGLImage = _eglCreateImageKHR(eglGetCurrentDisplay(),EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID, (EGLClientBuffer)anb, attrs); if(EGL_SUCCESS != eglGetError()) { LOGI("_eglCreateImageKHR failed."); return; } glGenTextures(1 , &iFBOTex ); checkGlError("glGenTextures(1 , &iFBOTex );"); glBindTexture(GL_TEXTURE_2D , iFBOTex); checkGlError("glBindTexture(GL_TEXTURE_2D , iFBOTex);"); // glTexParameteri(GL_TEXTURE_2D , GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST ); // glTexParameteri(GL_TEXTURE_2D , GL_TEXTURE_MAG_FILTER, GL_LINEAR ); glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, gWidth, gHeight, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NULL); if(GL_NO_ERROR != glGetError()) { LOGI("glTexImage2D failed."); return; } glGenFramebuffers (1, &iFBO); checkGlError("glGenFramebuffers (1, &iFBO);"); glBindFramebuffer (GL_FRAMEBUFFER, iFBO); checkGlError("glBindFramebuffer (GL_FRAMEBUFFER, iFBO)"); glFramebufferTexture2D (GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, iFBOTex, 0); GLuint status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if(status != GL_FRAMEBUFFER_COMPLETE) { LOGI("glCheckFramebufferStatus %d", status); } checkGlError("glFramebufferTexture2D"); _glEGLImageTargetTexture2DOES (GL_TEXTURE_2D, pEGLImage); checkGlError("_glEGLImageTargetTexture2DOES"); glBindFramebuffer (GL_FRAMEBUFFER,0); checkGlError("glBindFramebuffer (GL_FRAMEBUFFER,0)"); pixels = (char*)malloc(720*1280*4); memset(pixels,0,720*1280*4); }
status_t SurfaceTexture::updateTexImage(BufferRejecter* rejecter, bool skipSync, bool isComposition) { #else status_t SurfaceTexture::updateTexImage(BufferRejecter* rejecter, bool skipSync) { #endif ATRACE_CALL(); ST_LOGV("updateTexImage"); Mutex::Autolock lock(mMutex); status_t err = NO_ERROR; if (mAbandoned) { ST_LOGE("updateTexImage: SurfaceTexture is abandoned!"); return NO_INIT; } if (!mAttached) { ST_LOGE("updateTexImage: SurfaceTexture is not attached to an OpenGL " "ES context"); return INVALID_OPERATION; } EGLDisplay dpy = eglGetCurrentDisplay(); EGLContext ctx = eglGetCurrentContext(); if ((mEglDisplay != dpy && mEglDisplay != EGL_NO_DISPLAY) || dpy == EGL_NO_DISPLAY) { ST_LOGE("updateTexImage: invalid current EGLDisplay"); return INVALID_OPERATION; } if ((mEglContext != ctx && mEglContext != EGL_NO_CONTEXT) || ctx == EGL_NO_CONTEXT) { ST_LOGE("updateTexImage: invalid current EGLContext"); return INVALID_OPERATION; } mEglDisplay = dpy; mEglContext = ctx; BufferQueue::BufferItem item; // In asynchronous mode the list is guaranteed to be one buffer // deep, while in synchronous mode we use the oldest buffer. err = acquireBufferLocked(&item); if (err == NO_ERROR) { int buf = item.mBuf; // we call the rejecter here, in case the caller has a reason to // not accept this buffer. this is used by SurfaceFlinger to // reject buffers which have the wrong size if (rejecter && rejecter->reject(mSlots[buf].mGraphicBuffer, item)) { releaseBufferLocked(buf, dpy, EGL_NO_SYNC_KHR); glBindTexture(mTexTarget, mTexName); return NO_ERROR; } #ifdef DECIDE_TEXTURE_TARGET // GPU is not efficient in handling GL_TEXTURE_EXTERNAL_OES // texture target. Depending on the image format, decide, // the texture target to be used if(isComposition){ switch (mSlots[buf].mGraphicBuffer->format) { case HAL_PIXEL_FORMAT_RGBA_8888: case HAL_PIXEL_FORMAT_RGBX_8888: case HAL_PIXEL_FORMAT_RGB_888: case HAL_PIXEL_FORMAT_RGB_565: case HAL_PIXEL_FORMAT_BGRA_8888: case HAL_PIXEL_FORMAT_RGBA_5551: case HAL_PIXEL_FORMAT_RGBA_4444: mTexTarget = GL_TEXTURE_2D; break; default: mTexTarget = GL_TEXTURE_EXTERNAL_OES; break; } } #endif GLint error; while ((error = glGetError()) != GL_NO_ERROR) { ST_LOGW("updateTexImage: clearing GL error: %#04x", error); } EGLImageKHR image = mEglSlots[buf].mEglImage; glBindTexture(mTexTarget, mTexName); glEGLImageTargetTexture2DOES(mTexTarget, (GLeglImageOES)image); while ((error = glGetError()) != GL_NO_ERROR) { ST_LOGE("updateTexImage: error binding external texture image %p " "(slot %d): %#04x", image, buf, error); err = UNKNOWN_ERROR; } if (err == NO_ERROR) { err = syncForReleaseLocked(dpy); } if (err != NO_ERROR) { // Release the buffer we just acquired. It's not safe to // release the old buffer, so instead we just drop the new frame. releaseBufferLocked(buf, dpy, EGL_NO_SYNC_KHR); return err; } ST_LOGV("updateTexImage: (slot=%d buf=%p) -> (slot=%d buf=%p)", mCurrentTexture, mCurrentTextureBuf != NULL ? mCurrentTextureBuf->handle : 0, buf, mSlots[buf].mGraphicBuffer->handle); // release old buffer if (mCurrentTexture != BufferQueue::INVALID_BUFFER_SLOT) { status_t status = releaseBufferLocked(mCurrentTexture, dpy, mEglSlots[mCurrentTexture].mEglFence); if (status != NO_ERROR && status != BufferQueue::STALE_BUFFER_SLOT) { ST_LOGE("updateTexImage: failed to release buffer: %s (%d)", strerror(-status), status); err = status; } } // Update the SurfaceTexture state. mCurrentTexture = buf; mCurrentTextureBuf = mSlots[buf].mGraphicBuffer; mCurrentCrop = item.mCrop; mCurrentTransform = item.mTransform; mCurrentScalingMode = item.mScalingMode; mCurrentTimestamp = item.mTimestamp; mCurrentFence = item.mFence; if (!skipSync) { // SurfaceFlinger needs to lazily perform GLES synchronization // only when it's actually going to use GLES for compositing. // Eventually SurfaceFlinger should have its own consumer class, // but for now we'll just hack it in to SurfaceTexture. // SurfaceFlinger is responsible for calling doGLFenceWait before // texturing from this SurfaceTexture. doGLFenceWaitLocked(); } computeCurrentTransformMatrixLocked(); } else { if (err < 0) { ST_LOGE("updateTexImage: acquire failed: %s (%d)", strerror(-err), err); return err; } // We always bind the texture even if we don't update its contents. glBindTexture(mTexTarget, mTexName); return OK; } return err; }
static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image, GLuint *out_textures) { struct priv *p = hw->priv; GL *gl = hw->gl; VAStatus status; VAImage *va_image = &p->current_image; unref_image(hw); mp_image_setrefp(&p->current_ref, hw_image); va_lock(p->ctx); status = vaDeriveImage(p->display, va_surface_id(hw_image), va_image); if (!CHECK_VA_STATUS(p, "vaDeriveImage()")) goto err; int mpfmt = va_fourcc_to_imgfmt(va_image->format.fourcc); if (mpfmt != IMGFMT_NV12 && mpfmt != IMGFMT_420P) { MP_FATAL(p, "unsupported VA image format %s\n", VA_STR_FOURCC(va_image->format.fourcc)); goto err; } if (!hw->converted_imgfmt) { MP_VERBOSE(p, "format: %s %s\n", VA_STR_FOURCC(va_image->format.fourcc), mp_imgfmt_to_name(mpfmt)); hw->converted_imgfmt = mpfmt; } if (hw->converted_imgfmt != mpfmt) { MP_FATAL(p, "mid-stream hwdec format change (%s -> %s) not supported\n", mp_imgfmt_to_name(hw->converted_imgfmt), mp_imgfmt_to_name(mpfmt)); goto err; } VABufferInfo buffer_info = {.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME}; status = vaAcquireBufferHandle(p->display, va_image->buf, &buffer_info); if (!CHECK_VA_STATUS(p, "vaAcquireBufferHandle()")) goto err; p->buffer_acquired = true; struct mp_image layout = {0}; mp_image_set_params(&layout, &hw_image->params); mp_image_setfmt(&layout, mpfmt); // (it would be nice if we could use EGL_IMAGE_INTERNAL_FORMAT_EXT) int drm_fmts[4] = {MP_FOURCC('R', '8', ' ', ' '), // DRM_FORMAT_R8 MP_FOURCC('G', 'R', '8', '8'), // DRM_FORMAT_GR88 MP_FOURCC('R', 'G', '2', '4'), // DRM_FORMAT_RGB888 MP_FOURCC('R', 'A', '2', '4')}; // DRM_FORMAT_RGBA8888 for (int n = 0; n < layout.num_planes; n++) { int attribs[20] = {EGL_NONE}; int num_attribs = 0; ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmts[layout.fmt.bytes[n] - 1]); ADD_ATTRIB(EGL_WIDTH, mp_image_plane_w(&layout, n)); ADD_ATTRIB(EGL_HEIGHT, mp_image_plane_h(&layout, n)); ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info.handle); ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]); ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]); p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs); if (!p->images[n]) goto err; gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]); p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]); out_textures[n] = p->gl_textures[n]; } gl->BindTexture(GL_TEXTURE_2D, 0); if (va_image->format.fourcc == VA_FOURCC_YV12) MPSWAP(GLuint, out_textures[1], out_textures[2]); va_unlock(p->ctx); return 0; err: va_unlock(p->ctx); MP_FATAL(p, "mapping VAAPI EGL image failed\n"); unref_image(hw); return -1; }
status_t SurfaceTexture::attachToContext(GLuint tex) { ATRACE_CALL(); ST_LOGV("attachToContext"); Mutex::Autolock lock(mMutex); if (mAbandoned) { ST_LOGE("attachToContext: abandoned SurfaceTexture"); return NO_INIT; } if (mAttached) { ST_LOGE("attachToContext: SurfaceTexture is already attached to a " "context"); return INVALID_OPERATION; } EGLDisplay dpy = eglGetCurrentDisplay(); EGLContext ctx = eglGetCurrentContext(); if (dpy == EGL_NO_DISPLAY) { ST_LOGE("attachToContext: invalid current EGLDisplay"); return INVALID_OPERATION; } if (ctx == EGL_NO_CONTEXT) { ST_LOGE("attachToContext: invalid current EGLContext"); return INVALID_OPERATION; } // We need to bind the texture regardless of whether there's a current // buffer. glBindTexture(mTexTarget, tex); if (mCurrentTextureBuf != NULL) { // The EGLImageKHR that was associated with the slot was destroyed when // the SurfaceTexture was detached from the old context, so we need to // recreate it here. EGLImageKHR image = createImage(dpy, mCurrentTextureBuf); if (image == EGL_NO_IMAGE_KHR) { return UNKNOWN_ERROR; } // Attach the current buffer to the GL texture. glEGLImageTargetTexture2DOES(mTexTarget, (GLeglImageOES)image); GLint error; status_t err = OK; while ((error = glGetError()) != GL_NO_ERROR) { ST_LOGE("attachToContext: error binding external texture image %p " "(slot %d): %#04x", image, mCurrentTexture, error); err = UNKNOWN_ERROR; } // We destroy the EGLImageKHR here because the current buffer may no // longer be associated with one of the buffer slots, so we have // nowhere to to store it. If the buffer is still associated with a // slot then another EGLImageKHR will be created next time that buffer // gets acquired in updateTexImage. eglDestroyImageKHR(dpy, image); if (err != OK) { return err; } } mEglDisplay = dpy; mEglContext = ctx; mTexName = tex; mAttached = true; return OK; }
/*! Creates an OpenCL context that is compatible with the current QGLContext and \a platform. Returns false if there is no OpenGL context current or the OpenCL context could not be created for some reason. This function will first try to create a QCLDevice::GPU device, and will then fall back to QCLDevice::Default if a GPU is not found. If \a platform is null, then the first platform that has a GPU will be used. If there is no GPU, then the first platform with a default device will be used. \sa supportsObjectSharing() */ bool QCLContextGL::create(const QCLPlatform &platform) { Q_D(QCLContextGL); // Bail out if the context already exists. if (isCreated()) return true; // Bail out if we don't have an OpenGL context. if (!QGLContext::currentContext()) { qWarning() << "QCLContextGL::create: needs a current GL context"; setLastError(CL_INVALID_CONTEXT); return false; } // Find the first gpu device. QList<QCLDevice> devices; cl_device_type deviceType = CL_DEVICE_TYPE_GPU; devices = QCLDevice::devices(QCLDevice::GPU, platform); if (devices.isEmpty()) { // Find the first default device. devices = QCLDevice::devices(QCLDevice::Default, platform); deviceType = CL_DEVICE_TYPE_DEFAULT; } if (devices.isEmpty()) { qWarning() << "QCLContextGL::create: no gpu devices found"; setLastError(CL_DEVICE_NOT_FOUND); return false; } QCLDevice gpu = devices[0]; QVarLengthArray<cl_device_id> devs; foreach (QCLDevice dev, devices) devs.append(dev.deviceId()); // Add the platform identifier to the properties. QVarLengthArray<cl_context_properties> properties; properties.append(CL_CONTEXT_PLATFORM); properties.append(cl_context_properties(gpu.platform().platformId())); bool hasSharing = false; #ifndef QT_NO_CL_OPENGL // Determine what kind of OpenCL-OpenGL sharing we have and enable it. #if defined(__APPLE__) || defined(__MACOSX) bool appleSharing = gpu.hasExtension("cl_apple_gl_sharing"); if (appleSharing) { CGLContextObj cglContext = CGLGetCurrentContext(); CGLShareGroupObj cglShareGroup = CGLGetShareGroup(cglContext); properties.append(CL_CGL_SHAREGROUP_KHR); properties.append(cl_context_properties(cglShareGroup)); hasSharing = true; } #else bool khrSharing = gpu.hasExtension("cl_khr_gl_sharing"); #if defined(QT_OPENGL_ES_2) || defined(QT_OPENGL_ES) if (khrSharing) { properties.append(CL_EGL_DISPLAY_KHR); properties.append(cl_context_properties(eglGetCurrentDisplay())); #ifdef EGL_OPENGL_ES_API eglBindAPI(EGL_OPENGL_ES_API); #endif properties.append(CL_GL_CONTEXT_KHR); properties.append(cl_context_properties(eglGetCurrentContext())); hasSharing = true; } #elif defined(Q_WS_X11) if (khrSharing) { properties.append(CL_GLX_DISPLAY_KHR); properties.append(cl_context_properties(glXGetCurrentDisplay())); properties.append(CL_GL_CONTEXT_KHR); properties.append(cl_context_properties(glXGetCurrentContext())); hasSharing = true; } #else // Needs to be ported to other platforms. if (khrSharing) qWarning() << "QCLContextGL::create: do not know how to enable sharing"; #endif #endif #endif // !QT_NO_CL_OPENGL properties.append(0); #ifndef QT_NO_CL_OPENGL // Query the actual OpenCL devices we should use with the OpenGL context. typedef cl_int (*q_PFNCLGETGLCONTEXTINFOKHR) (const cl_context_properties *, cl_uint, size_t, void *, size_t *); q_PFNCLGETGLCONTEXTINFOKHR getGLContextInfo = (q_PFNCLGETGLCONTEXTINFOKHR)clGetExtensionFunctionAddress ("clGetGLContextInfoKHR"); if (getGLContextInfo && hasSharing) { size_t size; cl_device_id currentDev; if(getGLContextInfo(properties.data(), CL_DEVICES_FOR_GL_CONTEXT_KHR, 0, 0, &size) == CL_SUCCESS && size > 0) { QVarLengthArray<cl_device_id> buf(size / sizeof(cl_device_id)); getGLContextInfo(properties.data(), CL_DEVICES_FOR_GL_CONTEXT_KHR, size, buf.data(), 0); devs = buf; gpu = QCLDevice(devs[0]); } if (getGLContextInfo(properties.data(), CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR, sizeof(currentDev), ¤tDev, 0) == CL_SUCCESS) { gpu = QCLDevice(currentDev); } } #endif // Create the OpenCL context. cl_context id; cl_int error; id = clCreateContext (properties.data(), devs.size(), devs.data(), qt_clgl_context_notify, 0, &error); if (!id && hasSharing) { // Try again without the sharing parameters. properties.resize(2); properties.append(0); hasSharing = false; id = clCreateContext (properties.data(), devs.size(), devs.data(), qt_clgl_context_notify, 0, &error); } setLastError(error); if (id == 0) { qWarning() << "QCLContextGL::create:" << errorName(error); d->supportsSharing = false; } else { setContextId(id); clReleaseContext(id); // setContextId() adds an extra reference. setDefaultDevice(gpu); d->supportsSharing = hasSharing; } return id != 0; }
void showRenderBuffer() { int i; EGLContext eglContext; EGLDisplay eglDisplay; EGLSurface eglSurface; grs_font *font; JNIEnv *env; jclass clazz; jmethodID method; if (Want_pause) { // Save this in case we need to destroy it later eglContext = eglGetCurrentContext(); eglDisplay = eglGetCurrentDisplay(); eglSurface = eglGetCurrentSurface(EGL_DRAW); // Close digi so another application can use the OpenSL ES objects digi_close_digi(); (*jvm)->GetEnv(jvm, (void **) &env, JNI_VERSION_1_6); clazz = (*env)->FindClass(env, "tuchsen/descent/DescentView"); // Pause this thread method = (*env)->GetMethodID(env, clazz, "pauseRenderThread", "()V"); (*env)->CallVoidMethod(env, Descent_view, method); digi_init_digi(); if (Surface_was_destroyed) { // Purge all texture assets, since the EGL context will be blown away for (i = 0; i < MAX_FONTS; ++i) { font = Gamefonts[i]; glDeleteTextures(font->ft_maxchar - font->ft_minchar, font->ft_ogles_texes); memset(font->ft_ogles_texes, 0, (font->ft_maxchar - font->ft_minchar) * sizeof(GLuint)); } for (i = 0; i < MAX_BITMAP_FILES; ++i) { glDeleteTextures(1, &GameBitmaps[i].bm_ogles_tex_id); GameBitmaps[i].bm_ogles_tex_id = 0; } texmerge_close(); texmerge_init(50); glDeleteTextures(1, &nm_background.bm_ogles_tex_id); nm_background.bm_ogles_tex_id = 0; // Blow away EGL surface and context eglMakeCurrent(eglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); eglDestroySurface(eglDisplay, eglSurface); eglDestroyContext(eglDisplay, eglContext); eglTerminate(eglDisplay); // Reset EGL context method = (*env)->GetMethodID(env, clazz, "initEgl", "()V"); (*env)->CallVoidMethod(env, Descent_view, method); (*env)->DeleteLocalRef(env, clazz); eglSurfaceAttrib(eglGetCurrentDisplay(), eglGetCurrentSurface(EGL_DRAW), EGL_SWAP_BEHAVIOR, EGL_BUFFER_PRESERVED); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Hack to show stuff like menus if (Game_mode != GM_NORMAL || In_screen) { mouse_handler(-1, -1, true); mouse_handler(-1, -1, false); } Surface_was_destroyed = false; } Want_pause = false; } else { draw_buttons(); eglSwapBuffers(eglGetCurrentDisplay(), eglGetCurrentSurface(EGL_READ)); can_save_screen = !can_save_screen; } }
static jint jni_eglGetCurrentDisplay(JNIEnv *_env, jobject _this) { return (jint)eglGetCurrentDisplay(); }
static bool getDrawableBounds(GLint *width, GLint *height) { #if defined(__linux__) if (dlsym(RTLD_DEFAULT, "eglGetCurrentContext")) { EGLContext currentContext = eglGetCurrentContext(); if (currentContext == EGL_NO_CONTEXT) { return false; } EGLSurface currentSurface = eglGetCurrentSurface(EGL_DRAW); if (currentSurface == EGL_NO_SURFACE) { return false; } EGLDisplay currentDisplay = eglGetCurrentDisplay(); if (currentDisplay == EGL_NO_DISPLAY) { return false; } if (!eglQuerySurface(currentDisplay, currentSurface, EGL_WIDTH, width) || !eglQuerySurface(currentDisplay, currentSurface, EGL_HEIGHT, height)) { return false; } return true; } #endif #if defined(_WIN32) HDC hDC = wglGetCurrentDC(); if (!hDC) { return false; } HWND hWnd = WindowFromDC(hDC); RECT rect; if (!GetClientRect(hWnd, &rect)) { return false; } *width = rect.right - rect.left; *height = rect.bottom - rect.top; return true; #elif defined(__APPLE__) CGLContextObj ctx = CGLGetCurrentContext(); if (ctx == NULL) { return false; } CGSConnectionID cid; CGSWindowID wid; CGSSurfaceID sid; if (CGLGetSurface(ctx, &cid, &wid, &sid) != kCGLNoError) { return false; } CGRect rect; if (CGSGetSurfaceBounds(cid, wid, sid, &rect) != 0) { return false; } *width = rect.size.width; *height = rect.size.height; return true; #elif defined(HAVE_X11) Display *display; Drawable drawable; Window root; int x, y; unsigned int w, h, bw, depth; display = glXGetCurrentDisplay(); if (!display) { return false; } drawable = glXGetCurrentDrawable(); if (drawable == None) { return false; } if (!XGetGeometry(display, drawable, &root, &x, &y, &w, &h, &bw, &depth)) { return false; } *width = w; *height = h; return true; #else return false; #endif }
void EGLPlatformContext::updateFormatFromGL() { #ifndef QT_NO_OPENGL // Have to save & restore to prevent QOpenGLContext::currentContext() from becoming // inconsistent after QOpenGLContext::create(). EGLDisplay prevDisplay = eglGetCurrentDisplay(); if (prevDisplay == EGL_NO_DISPLAY) // when no context is current prevDisplay = m_eglDisplay; EGLContext prevContext = eglGetCurrentContext(); EGLSurface prevSurfaceDraw = eglGetCurrentSurface(EGL_DRAW); EGLSurface prevSurfaceRead = eglGetCurrentSurface(EGL_READ); // Rely on the surfaceless extension, if available. This is beneficial since we can // avoid creating an extra pbuffer surface which is apparently troublesome with some // drivers (Mesa) when certain attributes are present (multisampling). EGLSurface tempSurface = EGL_NO_SURFACE; EGLContext tempContext = EGL_NO_CONTEXT; if (m_flags.testFlag(NoSurfaceless) || !EglUtils::hasEglExtension(m_eglDisplay, "EGL_KHR_surfaceless_context")) tempSurface = createTemporaryOffscreenSurface(); EGLBoolean ok = eglMakeCurrent(m_eglDisplay, tempSurface, tempSurface, m_eglContext); if (!ok) { EGLConfig config = EglUtils::configFromGLFormat(m_eglDisplay, m_format, false, EGL_PBUFFER_BIT); tempContext = eglCreateContext(m_eglDisplay, config, 0, m_contextAttrs.constData()); if (tempContext != EGL_NO_CONTEXT) ok = eglMakeCurrent(m_eglDisplay, tempSurface, tempSurface, tempContext); } if (ok) { if (m_format.renderableType() == QSurfaceFormat::OpenGL || m_format.renderableType() == QSurfaceFormat::OpenGLES) { const GLubyte *s = glGetString(GL_VERSION); if (s) { QByteArray version = QByteArray(reinterpret_cast<const char *>(s)); int major, minor; if (QPlatformOpenGLContext::parseOpenGLVersion(version, major, minor)) { #ifdef Q_OS_ANDROID // Some Android 4.2.2 devices report OpenGL ES 3.0 without the functions being available. static int apiLevel = QtAndroidPrivate::androidSdkVersion(); if (apiLevel <= 17 && major >= 3) { major = 2; minor = 0; } #endif m_format.setMajorVersion(major); m_format.setMinorVersion(minor); } } m_format.setProfile(QSurfaceFormat::NoProfile); m_format.setOptions(QSurfaceFormat::FormatOptions()); if (m_format.renderableType() == QSurfaceFormat::OpenGL) { // Check profile and options. if (m_format.majorVersion() < 3) { m_format.setOption(QSurfaceFormat::DeprecatedFunctions); } else { GLint value = 0; glGetIntegerv(GL_CONTEXT_FLAGS, &value); if (!(value & GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT)) m_format.setOption(QSurfaceFormat::DeprecatedFunctions); if (value & GL_CONTEXT_FLAG_DEBUG_BIT) m_format.setOption(QSurfaceFormat::DebugContext); if (m_format.version() >= qMakePair(3, 2)) { value = 0; glGetIntegerv(GL_CONTEXT_PROFILE_MASK, &value); if (value & GL_CONTEXT_CORE_PROFILE_BIT) m_format.setProfile(QSurfaceFormat::CoreProfile); else if (value & GL_CONTEXT_COMPATIBILITY_PROFILE_BIT) m_format.setProfile(QSurfaceFormat::CompatibilityProfile); } } } } runGLchecks(); eglMakeCurrent(prevDisplay, prevSurfaceDraw, prevSurfaceRead, prevContext); } else { qCWarning(lcEglConvenience, "Failed to make temporary surface current, format not updated (%x)", eglGetError()); } if (tempSurface != EGL_NO_SURFACE) destroyTemporaryOffscreenSurface(tempSurface); if (tempContext != EGL_NO_CONTEXT) eglDestroyContext(m_eglDisplay, tempContext); #endif // QT_NO_OPENGL }