EglHwcomposerBackend::EglHwcomposerBackend(HwcomposerBackend *backend) : AbstractEglBackend() , m_backend(backend) { // EGL is always direct rendering setIsDirectRendering(true); setSyncsToVBlank(true); setBlocksForRetrace(true); }
void GlxBackend::present() { if (lastDamage().isEmpty()) return; const QSize &screenSize = screens()->size(); const QRegion displayRegion(0, 0, screenSize.width(), screenSize.height()); const bool fullRepaint = supportsBufferAge() || (lastDamage() == displayRegion); if (fullRepaint) { if (m_haveINTELSwapEvent) Compositor::self()->aboutToSwapBuffers(); if (haveSwapInterval) { if (gs_tripleBufferNeedsDetection) { glXWaitGL(); m_swapProfiler.begin(); } glXSwapBuffers(display(), glxWindow); if (gs_tripleBufferNeedsDetection) { glXWaitGL(); if (char result = m_swapProfiler.end()) { gs_tripleBufferUndetected = gs_tripleBufferNeedsDetection = false; if (result == 'd' && GLPlatform::instance()->driver() == Driver_NVidia) { // TODO this is a workaround, we should get __GL_YIELD set before libGL checks it if (qstrcmp(qgetenv("__GL_YIELD"), "USLEEP")) { options->setGlPreferBufferSwap(0); setSwapInterval(0); result = 0; // hint proper behavior qCWarning(KWIN_CORE) << "\nIt seems you are using the nvidia driver without triple buffering\n" "You must export __GL_YIELD=\"USLEEP\" to prevent large CPU overhead on synced swaps\n" "Preferably, enable the TripleBuffer Option in the xorg.conf Device\n" "For this reason, the tearing prevention has been disabled.\n" "See https://bugs.kde.org/show_bug.cgi?id=322060\n"; } } setBlocksForRetrace(result == 'd'); } } else if (blocksForRetrace()) { // at least the nvidia blob manages to swap async, ie. return immediately on double // buffering - what messes our timing calculation and leads to laggy behavior #346275 glXWaitGL(); } } else { waitSync(); glXSwapBuffers(display(), glxWindow); } if (supportsBufferAge()) { glXQueryDrawable(display(), glxWindow, GLX_BACK_BUFFER_AGE_EXT, (GLuint *) &m_bufferAge); } } else if (m_haveMESACopySubBuffer) { foreach (const QRect & r, lastDamage().rects()) { // convert to OpenGL coordinates int y = screenSize.height() - r.y() - r.height(); glXCopySubBufferMESA(display(), glxWindow, r.x(), y, r.width(), r.height()); } } else { // Copy Pixels (horribly slow on Mesa)
void EglOnXBackend::present() { if (lastDamage().isEmpty()) return; const QRegion displayRegion(0, 0, displayWidth(), displayHeight()); const bool fullRepaint = supportsBufferAge() || (lastDamage() == displayRegion); if (fullRepaint || !surfaceHasSubPost) { if (gs_tripleBufferNeedsDetection) { eglWaitGL(); m_swapProfiler.begin(); } // the entire screen changed, or we cannot do partial updates (which implies we enabled surface preservation) eglSwapBuffers(dpy, surface); if (gs_tripleBufferNeedsDetection) { eglWaitGL(); if (char result = m_swapProfiler.end()) { gs_tripleBufferUndetected = gs_tripleBufferNeedsDetection = false; if (result == 'd' && GLPlatform::instance()->driver() == Driver_NVidia) { // TODO this is a workaround, we should get __GL_YIELD set before libGL checks it if (qstrcmp(qgetenv("__GL_YIELD"), "USLEEP")) { options->setGlPreferBufferSwap(0); eglSwapInterval(dpy, 0); qWarning() << "\nIt seems you are using the nvidia driver without triple buffering\n" "You must export __GL_YIELD=\"USLEEP\" to prevent large CPU overhead on synced swaps\n" "Preferably, enable the TripleBuffer Option in the xorg.conf Device\n" "For this reason, the tearing prevention has been disabled.\n" "See https://bugs.kde.org/show_bug.cgi?id=322060\n"; } } setBlocksForRetrace(result == 'd'); } } if (supportsBufferAge()) { eglQuerySurface(dpy, surface, EGL_BUFFER_AGE_EXT, &m_bufferAge); } } else { // a part of the screen changed, and we can use eglPostSubBufferNV to copy the updated area foreach (const QRect & r, lastDamage().rects()) { eglPostSubBufferNV(dpy, surface, r.left(), displayHeight() - r.bottom() - 1, r.width(), r.height()); } } setLastDamage(QRegion()); if (!supportsBufferAge()) { eglWaitGL(); xcb_flush(connection()); } }
void GlxBackend::present() { if (lastDamage().isEmpty()) return; const QRegion displayRegion(0, 0, displayWidth(), displayHeight()); const bool fullRepaint = supportsBufferAge() || (lastDamage() == displayRegion); if (fullRepaint) { if (haveSwapInterval) { if (gs_tripleBufferNeedsDetection) { glXWaitGL(); m_swapProfiler.begin(); } glXSwapBuffers(display(), glxWindow); if (gs_tripleBufferNeedsDetection) { glXWaitGL(); if (char result = m_swapProfiler.end()) { gs_tripleBufferUndetected = gs_tripleBufferNeedsDetection = false; if (result == 'd' && GLPlatform::instance()->driver() == Driver_NVidia) { // TODO this is a workaround, we should get __GL_YIELD set before libGL checks it if (qstrcmp(qgetenv("__GL_YIELD"), "USLEEP")) { options->setGlPreferBufferSwap(0); setSwapInterval(0); qWarning() << "\nIt seems you are using the nvidia driver without triple buffering\n" "You must export __GL_YIELD=\"USLEEP\" to prevent large CPU overhead on synced swaps\n" "Preferably, enable the TripleBuffer Option in the xorg.conf Device\n" "For this reason, the tearing prevention has been disabled.\n" "See https://bugs.kde.org/show_bug.cgi?id=322060\n"; } } setBlocksForRetrace(result == 'd'); } } } else { waitSync(); glXSwapBuffers(display(), glxWindow); } if (supportsBufferAge()) { glXQueryDrawable(display(), glxWindow, GLX_BACK_BUFFER_AGE_EXT, (GLuint *) &m_bufferAge); } } else if (glXCopySubBuffer) { foreach (const QRect & r, lastDamage().rects()) { // convert to OpenGL coordinates int y = displayHeight() - r.y() - r.height(); glXCopySubBuffer(display(), glxWindow, r.x(), y, r.width(), r.height()); } } else { // Copy Pixels (horribly slow on Mesa)
void EglOnXBackend::init() { if (!initRenderingContext()) { setFailed(QStringLiteral("Could not initialize rendering context")); return; } initEGL(); if (!hasGLExtension(QStringLiteral("EGL_KHR_image")) && (!hasGLExtension(QStringLiteral("EGL_KHR_image_base")) || !hasGLExtension(QStringLiteral("EGL_KHR_image_pixmap")))) { setFailed(QStringLiteral("Required support for binding pixmaps to EGLImages not found, disabling compositing")); return; } GLPlatform *glPlatform = GLPlatform::instance(); glPlatform->detect(EglPlatformInterface); if (GLPlatform::instance()->driver() == Driver_Intel) options->setUnredirectFullscreen(false); // bug #252817 options->setGlPreferBufferSwap(options->glPreferBufferSwap()); // resolve autosetting if (options->glPreferBufferSwap() == Options::AutoSwapStrategy) options->setGlPreferBufferSwap('e'); // for unknown drivers - should not happen glPlatform->printResults(); initGL(EglPlatformInterface); if (!hasGLExtension(QStringLiteral("GL_OES_EGL_image"))) { setFailed(QStringLiteral("Required extension GL_OES_EGL_image not found, disabling compositing")); return; } // check for EGL_NV_post_sub_buffer and whether it can be used on the surface if (eglPostSubBufferNV) { if (eglQuerySurface(dpy, surface, EGL_POST_SUB_BUFFER_SUPPORTED_NV, &surfaceHasSubPost) == EGL_FALSE) { EGLint error = eglGetError(); if (error != EGL_SUCCESS && error != EGL_BAD_ATTRIBUTE) { setFailed(QStringLiteral("query surface failed")); return; } else { surfaceHasSubPost = EGL_FALSE; } } } setSupportsBufferAge(false); if (hasGLExtension("EGL_EXT_buffer_age")) { const QByteArray useBufferAge = qgetenv("KWIN_USE_BUFFER_AGE"); if (useBufferAge != "0") setSupportsBufferAge(true); } setSyncsToVBlank(false); setBlocksForRetrace(false); gs_tripleBufferNeedsDetection = false; m_swapProfiler.init(); if (surfaceHasSubPost) { qDebug() << "EGL implementation and surface support eglPostSubBufferNV, let's use it"; if (options->glPreferBufferSwap() != Options::NoSwapEncourage) { // check if swap interval 1 is supported EGLint val; eglGetConfigAttrib(dpy, config, EGL_MAX_SWAP_INTERVAL, &val); if (val >= 1) { if (eglSwapInterval(dpy, 1)) { qDebug() << "Enabled v-sync"; setSyncsToVBlank(true); const QByteArray tripleBuffer = qgetenv("KWIN_TRIPLE_BUFFER"); if (!tripleBuffer.isEmpty()) { setBlocksForRetrace(qstrcmp(tripleBuffer, "0") == 0); gs_tripleBufferUndetected = false; } gs_tripleBufferNeedsDetection = gs_tripleBufferUndetected; } } else { qWarning() << "Cannot enable v-sync as max. swap interval is" << val; } } else { // disable v-sync eglSwapInterval(dpy, 0); } } else { /* In the GLX backend, we fall back to using glCopyPixels if we have no extension providing support for partial screen updates. * However, that does not work in EGL - glCopyPixels with glDrawBuffer(GL_FRONT); does nothing. * Hence we need EGL to preserve the backbuffer for us, so that we can draw the partial updates on it and call * eglSwapBuffers() for each frame. eglSwapBuffers() then does the copy (no page flip possible in this mode), * which means it is slow and not synced to the v-blank. */ qWarning() << "eglPostSubBufferNV not supported, have to enable buffer preservation - which breaks v-sync and performance"; eglSurfaceAttrib(dpy, surface, EGL_SWAP_BEHAVIOR, EGL_BUFFER_PRESERVED); } }
void GlxBackend::init() { initGLX(); // require at least GLX 1.3 if (!hasGLXVersion(1, 3)) { setFailed(QStringLiteral("Requires at least GLX 1.3")); return; } if (!initDrawableConfigs()) { setFailed(QStringLiteral("Could not initialize the drawable configs")); return; } if (!initBuffer()) { setFailed(QStringLiteral("Could not initialize the buffer")); return; } if (!initRenderingContext()) { setFailed(QStringLiteral("Could not initialize rendering context")); return; } // Initialize OpenGL GLPlatform *glPlatform = GLPlatform::instance(); glPlatform->detect(GlxPlatformInterface); if (GLPlatform::instance()->driver() == Driver_Intel) options->setUnredirectFullscreen(false); // bug #252817 options->setGlPreferBufferSwap(options->glPreferBufferSwap()); // resolve autosetting if (options->glPreferBufferSwap() == Options::AutoSwapStrategy) options->setGlPreferBufferSwap('e'); // for unknown drivers - should not happen glPlatform->printResults(); initGL(GlxPlatformInterface); // Check whether certain features are supported haveSwapInterval = glXSwapIntervalMESA || glXSwapIntervalEXT || glXSwapIntervalSGI; setSupportsBufferAge(false); if (hasGLExtension("GLX_EXT_buffer_age")) { const QByteArray useBufferAge = qgetenv("KWIN_USE_BUFFER_AGE"); if (useBufferAge != "0") setSupportsBufferAge(true); } setSyncsToVBlank(false); setBlocksForRetrace(false); haveWaitSync = false; gs_tripleBufferNeedsDetection = false; m_swapProfiler.init(); const bool wantSync = options->glPreferBufferSwap() != Options::NoSwapEncourage; if (wantSync && glXIsDirect(display(), ctx)) { if (haveSwapInterval) { // glXSwapInterval is preferred being more reliable setSwapInterval(1); setSyncsToVBlank(true); const QByteArray tripleBuffer = qgetenv("KWIN_TRIPLE_BUFFER"); if (!tripleBuffer.isEmpty()) { setBlocksForRetrace(qstrcmp(tripleBuffer, "0") == 0); gs_tripleBufferUndetected = false; } gs_tripleBufferNeedsDetection = gs_tripleBufferUndetected; } else if (glXGetVideoSync) { unsigned int sync; if (glXGetVideoSync(&sync) == 0 && glXWaitVideoSync(1, 0, &sync) == 0) { setSyncsToVBlank(true); setBlocksForRetrace(true); haveWaitSync = true; } else qWarning() << "NO VSYNC! glXSwapInterval is not supported, glXWaitVideoSync is supported but broken"; } else qWarning() << "NO VSYNC! neither glSwapInterval nor glXWaitVideoSync are supported"; } else { // disable v-sync (if possible) setSwapInterval(0); } if (glPlatform->isVirtualBox()) { // VirtualBox does not support glxQueryDrawable // this should actually be in kwinglutils_funcs, but QueryDrawable seems not to be provided by an extension // and the GLPlatform has not been initialized at the moment when initGLX() is called. glXQueryDrawable = NULL; } setIsDirectRendering(bool(glXIsDirect(display(), ctx))); qDebug() << "Direct rendering:" << isDirectRendering() << endl; }
void GlxBackend::init() { initGLX(); // Require at least GLX 1.3 if (!hasGLXVersion(1, 3)) { setFailed(QStringLiteral("Requires at least GLX 1.3")); return; } initVisualDepthHashTable(); if (!initBuffer()) { setFailed(QStringLiteral("Could not initialize the buffer")); return; } if (!initRenderingContext()) { setFailed(QStringLiteral("Could not initialize rendering context")); return; } // Initialize OpenGL GLPlatform *glPlatform = GLPlatform::instance(); glPlatform->detect(GlxPlatformInterface); if (GLPlatform::instance()->driver() == Driver_Intel) options->setUnredirectFullscreen(false); // bug #252817 options->setGlPreferBufferSwap(options->glPreferBufferSwap()); // resolve autosetting if (options->glPreferBufferSwap() == Options::AutoSwapStrategy) options->setGlPreferBufferSwap('e'); // for unknown drivers - should not happen glPlatform->printResults(); initGL(GlxPlatformInterface); // Check whether certain features are supported m_haveMESACopySubBuffer = hasGLExtension(QByteArrayLiteral("GLX_MESA_copy_sub_buffer")); m_haveMESASwapControl = hasGLExtension(QByteArrayLiteral("GLX_MESA_swap_control")); m_haveEXTSwapControl = hasGLExtension(QByteArrayLiteral("GLX_EXT_swap_control")); m_haveSGISwapControl = hasGLExtension(QByteArrayLiteral("GLX_SGI_swap_control")); // only enable Intel swap event if env variable is set, see BUG 342582 m_haveINTELSwapEvent = hasGLExtension(QByteArrayLiteral("GLX_INTEL_swap_event")) && qgetenv("KWIN_USE_INTEL_SWAP_EVENT") == QByteArrayLiteral("1"); if (m_haveINTELSwapEvent) { m_swapEventFilter = std::make_unique<SwapEventFilter>(window, glxWindow); glXSelectEvent(display(), glxWindow, GLX_BUFFER_SWAP_COMPLETE_INTEL_MASK); } haveSwapInterval = m_haveMESASwapControl || m_haveEXTSwapControl || m_haveSGISwapControl; setSupportsBufferAge(false); if (hasGLExtension(QByteArrayLiteral("GLX_EXT_buffer_age"))) { const QByteArray useBufferAge = qgetenv("KWIN_USE_BUFFER_AGE"); if (useBufferAge != "0") setSupportsBufferAge(true); } setSyncsToVBlank(false); setBlocksForRetrace(false); haveWaitSync = false; gs_tripleBufferNeedsDetection = false; m_swapProfiler.init(); const bool wantSync = options->glPreferBufferSwap() != Options::NoSwapEncourage; if (wantSync && glXIsDirect(display(), ctx)) { if (haveSwapInterval) { // glXSwapInterval is preferred being more reliable setSwapInterval(1); setSyncsToVBlank(true); const QByteArray tripleBuffer = qgetenv("KWIN_TRIPLE_BUFFER"); if (!tripleBuffer.isEmpty()) { setBlocksForRetrace(qstrcmp(tripleBuffer, "0") == 0); gs_tripleBufferUndetected = false; } gs_tripleBufferNeedsDetection = gs_tripleBufferUndetected; } else if (hasGLExtension(QByteArrayLiteral("GLX_SGI_video_sync"))) { unsigned int sync; if (glXGetVideoSyncSGI(&sync) == 0 && glXWaitVideoSyncSGI(1, 0, &sync) == 0) { setSyncsToVBlank(true); setBlocksForRetrace(true); haveWaitSync = true; } else qCWarning(KWIN_CORE) << "NO VSYNC! glXSwapInterval is not supported, glXWaitVideoSync is supported but broken"; } else qCWarning(KWIN_CORE) << "NO VSYNC! neither glSwapInterval nor glXWaitVideoSync are supported"; } else { // disable v-sync (if possible) setSwapInterval(0); } if (glPlatform->isVirtualBox()) { // VirtualBox does not support glxQueryDrawable // this should actually be in kwinglutils_funcs, but QueryDrawable seems not to be provided by an extension // and the GLPlatform has not been initialized at the moment when initGLX() is called. glXQueryDrawable = NULL; } setIsDirectRendering(bool(glXIsDirect(display(), ctx))); qCDebug(KWIN_CORE) << "Direct rendering:" << isDirectRendering(); }