Beispiel #1
0
void GLCanvas::paintGL() {
    PROFILE_RANGE(__FUNCTION__);
    if (!_throttleRendering &&
            (!Application::getInstance()->getWindow()->isMinimized() || !Application::getInstance()->isThrottleFPSEnabled())) {
        Application::getInstance()->paintGL();
    }
}
void ApplicationOverlay::buildFramebufferObject() {
    PROFILE_RANGE(__FUNCTION__);

    QSize desiredSize = qApp->getDeviceSize();
    int currentWidth = _overlayFramebuffer ? _overlayFramebuffer->getWidth() : 0;
    int currentHeight = _overlayFramebuffer ? _overlayFramebuffer->getHeight() : 0;
    QSize frameBufferCurrentSize(currentWidth, currentHeight);
    
    if (_overlayFramebuffer && desiredSize == frameBufferCurrentSize) {
        // Already built
        return;
    }
    
    if (_overlayFramebuffer) {
        _overlayFramebuffer.reset();
        _overlayDepthTexture.reset();
        _overlayColorTexture.reset();
    }

    _overlayFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());

   auto colorFormat = gpu::Element(gpu::VEC4, gpu::NUINT8, gpu::RGBA);
   auto width = desiredSize.width();
   auto height = desiredSize.height();

   auto defaultSampler = gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_LINEAR);
   _overlayColorTexture = gpu::TexturePointer(gpu::Texture::create2D(colorFormat, width, height, defaultSampler));
   _overlayFramebuffer->setRenderBuffer(0, _overlayColorTexture);

   auto depthFormat = gpu::Element(gpu::SCALAR, gpu::FLOAT, gpu::DEPTH);
   _overlayDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(depthFormat, width, height, defaultSampler));

   _overlayFramebuffer->setDepthStencilBuffer(_overlayDepthTexture, depthFormat);
}
// Renders the overlays either to a texture or to the screen
void ApplicationOverlay::renderOverlay(RenderArgs* renderArgs) {
    PROFILE_RANGE(render, __FUNCTION__);
    buildFramebufferObject();
    
    if (!_overlayFramebuffer) {
        return; // we can't do anything without our frame buffer.
    }

    // Execute the batch into our framebuffer
    doInBatch("ApplicationOverlay::render", renderArgs->_context, [&](gpu::Batch& batch) {
        PROFILE_RANGE_BATCH(batch, "ApplicationOverlayRender");
        renderArgs->_batch = &batch;
        batch.enableStereo(false);

        int width = _overlayFramebuffer->getWidth();
        int height = _overlayFramebuffer->getHeight();

        batch.setViewportTransform(glm::ivec4(0, 0, width, height));
        batch.setFramebuffer(_overlayFramebuffer);

        glm::vec4 color { 0.0f, 0.0f, 0.0f, 0.0f };
        float depth = 1.0f;
        int stencil = 0;
        batch.clearFramebuffer(gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_DEPTH, color, depth, stencil);

        // Now render the overlay components together into a single texture
        renderDomainConnectionStatusBorder(renderArgs); // renders the connected domain line
        renderOverlays(renderArgs); // renders Scripts Overlay and AudioScope
#if !defined(DISABLE_QML)
        renderQmlUi(renderArgs); // renders a unit quad with the QML UI texture, and the text overlays from scripts
#endif
    });

    renderArgs->_batch = nullptr; // so future users of renderArgs don't try to use our batch
}
void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
    PROFILE_RANGE(render, __FUNCTION__);

    if (!_uiTexture) {
        _uiTexture = gpu::Texture::createExternal(OffscreenQmlSurface::getDiscardLambda());
        _uiTexture->setSource(__FUNCTION__);
    }
    // Once we move UI rendering and screen rendering to different
    // threads, we need to use a sync object to deteremine when
    // the current UI texture is no longer being read from, and only
    // then release it back to the UI for re-use
    auto offscreenUi = DependencyManager::get<OffscreenUi>();

    OffscreenQmlSurface::TextureAndFence newTextureAndFence;
    bool newTextureAvailable = offscreenUi->fetchTexture(newTextureAndFence);
    if (newTextureAvailable) {
        _uiTexture->setExternalTexture(newTextureAndFence.first, newTextureAndFence.second);
    }
    auto geometryCache = DependencyManager::get<GeometryCache>();
    gpu::Batch& batch = *renderArgs->_batch;
    geometryCache->useSimpleDrawPipeline(batch);
    batch.setProjectionTransform(mat4());
    batch.setModelTransform(Transform());
    batch.resetViewTransform();
    batch.setResourceTexture(0, _uiTexture);
    geometryCache->renderUnitQuad(batch, glm::vec4(1), _qmlGeometryId);
    batch.setResourceTexture(0, nullptr);
}
 template <> void payloadRender(const RenderableModelEntityItemMeta::Pointer& payload, RenderArgs* args) {
     if (args) {
         if (payload && payload->entity) {
             PROFILE_RANGE("MetaModelRender");
             payload->entity->render(args);
         }
     }
 }
Beispiel #6
0
void GLCanvas::paintGL() {
    PROFILE_RANGE(__FUNCTION__);
    
    // FIXME - I'm not sure why this still remains, it appears as if this GLCanvas gets a single paintGL call near
    // the beginning of the application starting up. I'm not sure if we really need to call Application::paintGL()
    // in this case, since the display plugins eventually handle all the painting
    if (!Application::getInstance()->getWindow()->isMinimized() || !Application::getInstance()->isThrottleFPSEnabled()) {
        Application::getInstance()->paintGL();
    }
}
void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
    PROFILE_RANGE(__FUNCTION__);
    if (_uiTexture) {
        gpu::Batch& batch = *renderArgs->_batch;
        auto geometryCache = DependencyManager::get<GeometryCache>();

        geometryCache->useSimpleDrawPipeline(batch);
        batch.setProjectionTransform(mat4());
        batch.setModelTransform(Transform());
        batch.setViewTransform(Transform());
        batch._glActiveBindTexture(GL_TEXTURE0, GL_TEXTURE_2D, _uiTexture);

        geometryCache->renderUnitQuad(batch, glm::vec4(1));
    }
}
void ApplicationOverlay::renderOverlays(RenderArgs* renderArgs) {
    PROFILE_RANGE(render, __FUNCTION__);

    gpu::Batch& batch = *renderArgs->_batch;
    auto geometryCache = DependencyManager::get<GeometryCache>();
    geometryCache->useSimpleDrawPipeline(batch);
    auto textureCache = DependencyManager::get<TextureCache>();
    batch.setResourceTexture(0, textureCache->getWhiteTexture());
    int width = renderArgs->_viewport.z;
    int height = renderArgs->_viewport.w;
    mat4 legacyProjection = glm::ortho<float>(0, width, height, 0, ORTHO_NEAR_CLIP, ORTHO_FAR_CLIP);
    batch.setProjectionTransform(legacyProjection);
    batch.setModelTransform(Transform());
    batch.resetViewTransform();

    // Render all of the Script based "HUD" aka 2D overlays.
    qApp->getOverlays().render(renderArgs);
}
void ApplicationOverlay::renderAudioScope(RenderArgs* renderArgs) {
    PROFILE_RANGE(__FUNCTION__);

    gpu::Batch& batch = *renderArgs->_batch;
    auto geometryCache = DependencyManager::get<GeometryCache>();
    geometryCache->useSimpleDrawPipeline(batch);
    auto textureCache = DependencyManager::get<TextureCache>();
    batch.setResourceTexture(0, textureCache->getWhiteTexture());
    int width = renderArgs->_viewport.z;
    int height = renderArgs->_viewport.w;
    mat4 legacyProjection = glm::ortho<float>(0, width, height, 0, ORTHO_NEAR_CLIP, ORTHO_FAR_CLIP);
    batch.setProjectionTransform(legacyProjection);
    batch.setModelTransform(Transform());
    batch.setViewTransform(Transform());

    // Render the audio scope
    DependencyManager::get<AudioScope>()->render(renderArgs, width, height);
}
Beispiel #10
0
// Renders the overlays either to a texture or to the screen
void ApplicationOverlay::renderOverlay(RenderArgs* renderArgs) {
    PROFILE_RANGE(__FUNCTION__);
    CHECK_GL_ERROR();
    PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "ApplicationOverlay::displayOverlay()");

    // TODO move to Application::idle()?
    Stats::getInstance()->updateStats();
    AvatarInputs::getInstance()->update();

    buildFramebufferObject();
    
    if (!_overlayFramebuffer) {
        return; // we can't do anything without our frame buffer.
    }

    // Execute the batch into our framebuffer
    gpu::Batch batch;
    renderArgs->_batch = &batch;

    int width = _overlayFramebuffer->getWidth();
    int height = _overlayFramebuffer->getHeight();

    batch.setViewportTransform(glm::ivec4(0, 0, width, height));
    batch.setFramebuffer(_overlayFramebuffer);

    glm::vec4 color { 0.0f, 0.0f, 0.0f, 0.0f };
    float depth = 1.0f;
    int stencil = 0;
    batch.clearFramebuffer(gpu::Framebuffer::BUFFER_COLOR0 | gpu::Framebuffer::BUFFER_DEPTH, color, depth, stencil);

    // Now render the overlay components together into a single texture
    renderDomainConnectionStatusBorder(renderArgs); // renders the connected domain line
    renderAudioScope(renderArgs); // audio scope in the very back
    renderRearView(renderArgs); // renders the mirror view selfie
    renderQmlUi(renderArgs); // renders a unit quad with the QML UI texture, and the text overlays from scripts
    renderOverlays(renderArgs); // renders Scripts Overlay and AudioScope
    renderStatsAndLogs(renderArgs);  // currently renders nothing

    renderArgs->_context->render(batch);

    renderArgs->_batch = nullptr; // so future users of renderArgs don't try to use our batch

    CHECK_GL_ERROR();
}
void Application::runRenderFrame(RenderArgs* renderArgs) {
    PROFILE_RANGE(render, __FUNCTION__);
    PerformanceTimer perfTimer("display");
    PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings), "Application::runRenderFrame()");

    // The pending changes collecting the changes here
    render::Transaction transaction;

    if (DependencyManager::get<SceneScriptingInterface>()->shouldRenderEntities()) {
        // render models...
        PerformanceTimer perfTimer("entities");
        PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
            "Application::runRenderFrame() ... entities...");

        RenderArgs::DebugFlags renderDebugFlags = RenderArgs::RENDER_DEBUG_NONE;

        if (Menu::getInstance()->isOptionChecked(MenuOption::PhysicsShowHulls)) {
            renderDebugFlags = static_cast<RenderArgs::DebugFlags>(renderDebugFlags |
                static_cast<int>(RenderArgs::RENDER_DEBUG_HULLS));
        }
        renderArgs->_debugFlags = renderDebugFlags;
    }

    // Make sure the WorldBox is in the scene
    // For the record, this one RenderItem is the first one we created and added to the scene.
    // We could meoee that code elsewhere but you know...
    if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
        auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
        auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);

        WorldBoxRenderData::_item = _main3DScene->allocateID();

        transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
        _main3DScene->enqueueTransaction(transaction);
    }

    {
        PerformanceTimer perfTimer("EngineRun");
        _renderEngine->getRenderContext()->args = renderArgs;
        _renderEngine->run();
    }
}
void ApplicationOverlay::renderOverlays(RenderArgs* renderArgs) {
    PROFILE_RANGE(__FUNCTION__);

    gpu::Batch& batch = *renderArgs->_batch;
    auto geometryCache = DependencyManager::get<GeometryCache>();
    geometryCache->useSimpleDrawPipeline(batch);
    auto textureCache = DependencyManager::get<TextureCache>();
    batch.setResourceTexture(0, textureCache->getWhiteTexture());
    int width = renderArgs->_viewport.z;
    int height = renderArgs->_viewport.w;
    mat4 legacyProjection = glm::ortho<float>(0, width, height, 0, ORTHO_NEAR_CLIP, ORTHO_FAR_CLIP);
    batch.setProjectionTransform(legacyProjection);
    batch.setModelTransform(Transform());
    batch.setViewTransform(Transform());

    // Render all of the Script based "HUD" aka 2D overlays.
    // note: we call them HUD, as opposed to 2D, only because there are some cases of 3D HUD overlays, like the
    // cameral controls for the edit.js
    qApp->getOverlays().renderHUD(renderArgs);
}
Beispiel #13
0
void MetaModelPayload::setBlendedVertices(int blendNumber, const QVector<BlendshapeOffset>& blendshapeOffsets, const QVector<int>& blendedMeshSizes, const render::ItemIDs& subRenderItems) {
    PROFILE_RANGE(render, __FUNCTION__);
    if (blendNumber < _appliedBlendNumber) {
        return;
    }
    _appliedBlendNumber = blendNumber;

    // We have fewer meshes than before.  Invalidate everything
    if (blendedMeshSizes.length() < (int)_blendshapeBuffers.size()) {
        _blendshapeBuffers.clear();
    }

    int index = 0;
    for (int i = 0; i < blendedMeshSizes.size(); i++) {
        int numVertices = blendedMeshSizes.at(i);

        // This mesh isn't blendshaped
        if (numVertices == 0) {
            _blendshapeBuffers.erase(i);
            continue;
        }

        const auto& buffer = _blendshapeBuffers.find(i);
        const auto blendShapeBufferSize = numVertices * sizeof(BlendshapeOffset);
        if (buffer == _blendshapeBuffers.end()) {
            _blendshapeBuffers[i] = std::make_shared<gpu::Buffer>(blendShapeBufferSize, (gpu::Byte*) blendshapeOffsets.constData() + index * sizeof(BlendshapeOffset), blendShapeBufferSize);
        } else {
            buffer->second->setData(blendShapeBufferSize, (gpu::Byte*) blendshapeOffsets.constData() + index * sizeof(BlendshapeOffset));
        }

        index += numVertices;
    }

    render::Transaction transaction;
    for (auto& id : subRenderItems) {
        transaction.updateItem<ModelMeshPartPayload>(id, [this, blendedMeshSizes](ModelMeshPartPayload& data) {
            data.setBlendshapeBuffer(_blendshapeBuffers, blendedMeshSizes);
        });
    }
    AbstractViewStateInterface::instance()->getMain3DScene()->enqueueTransaction(transaction);
}
void ApplicationOverlay::buildFramebufferObject() {
    PROFILE_RANGE(render, __FUNCTION__);

    auto uiSize = glm::uvec2(qApp->getUiSize());
    if (!_overlayFramebuffer || uiSize != _overlayFramebuffer->getSize()) {
        _overlayFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create("ApplicationOverlay"));
    }

    auto width = uiSize.x;
    auto height = uiSize.y;
    if (!_overlayFramebuffer->getDepthStencilBuffer()) {
        auto overlayDepthTexture = gpu::Texture::createRenderBuffer(DEPTH_FORMAT, width, height, gpu::Texture::SINGLE_MIP, DEFAULT_SAMPLER);
        _overlayFramebuffer->setDepthStencilBuffer(overlayDepthTexture, DEPTH_FORMAT);
    }

    if (!_overlayFramebuffer->getRenderBuffer(0)) {
        const gpu::Sampler OVERLAY_SAMPLER(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
        auto colorBuffer = gpu::Texture::createRenderBuffer(COLOR_FORMAT, width, height, gpu::Texture::SINGLE_MIP, OVERLAY_SAMPLER);
        _overlayFramebuffer->setRenderBuffer(0, colorBuffer);
    }
}
Beispiel #15
0
void GraphicsEngine::render_runRenderFrame(RenderArgs* renderArgs) {
    PROFILE_RANGE(render, __FUNCTION__);
    PerformanceTimer perfTimer("render");

    // Make sure the WorldBox is in the scene
    // For the record, this one RenderItem is the first one we created and added to the scene.
    // We could move that code elsewhere but you know...
    if (!render::Item::isValidID(WorldBoxRenderData::_item)) {
        render::Transaction transaction;
        auto worldBoxRenderData = std::make_shared<WorldBoxRenderData>();
        auto worldBoxRenderPayload = std::make_shared<WorldBoxRenderData::Payload>(worldBoxRenderData);

        WorldBoxRenderData::_item = _renderScene->allocateID();

        transaction.resetItem(WorldBoxRenderData::_item, worldBoxRenderPayload);
        _renderScene->enqueueTransaction(transaction);
    }

    {
        _renderEngine->getRenderContext()->args = renderArgs;
        _renderEngine->run();
    }
}
void ApplicationOverlay::buildFramebufferObject() {
    PROFILE_RANGE(__FUNCTION__);

    auto uiSize = qApp->getUiSize();
    if (!_overlayFramebuffer || uiSize != _overlayFramebuffer->getSize()) {
        _overlayFramebuffer = gpu::FramebufferPointer(gpu::Framebuffer::create());
    }

    auto width = uiSize.x;
    auto height = uiSize.y;
    if (!_overlayFramebuffer->getDepthStencilBuffer()) {
        auto overlayDepthTexture = gpu::TexturePointer(gpu::Texture::create2D(DEPTH_FORMAT, width, height, DEFAULT_SAMPLER));
        _overlayFramebuffer->setDepthStencilBuffer(overlayDepthTexture, DEPTH_FORMAT);
    }

    if (!_overlayFramebuffer->getRenderBuffer(0)) {
        gpu::TexturePointer newColorAttachment;
        {
            Lock lock(_textureGuard);
            if (!_availableTextures.empty()) {
                newColorAttachment = _availableTextures.front();
                _availableTextures.pop();
            }
        }
        if (newColorAttachment) {
            newColorAttachment->resize2D(width, height, newColorAttachment->getNumSamples());
            _overlayFramebuffer->setRenderBuffer(0, newColorAttachment);
        }
    }

    // If the overlay framebuffer still has no color attachment, no textures were available for rendering, so build a new one
    if (!_overlayFramebuffer->getRenderBuffer(0)) {
        const gpu::Sampler OVERLAY_SAMPLER(gpu::Sampler::FILTER_MIN_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP);
        auto colorBuffer = gpu::TexturePointer(gpu::Texture::create2D(COLOR_FORMAT, width, height, OVERLAY_SAMPLER));
        _overlayFramebuffer->setRenderBuffer(0, colorBuffer);
    }
}
Beispiel #17
0
void GraphicsEngine::render_performFrame() {
    // Some plugins process message events, allowing paintGL to be called reentrantly.

    _renderFrameCount++;

    auto lastPaintBegin = usecTimestampNow();
    PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
    PerformanceTimer perfTimer("paintGL");

    DisplayPluginPointer displayPlugin;
    {
        PROFILE_RANGE(render, "/getActiveDisplayPlugin");
        displayPlugin = qApp->getActiveDisplayPlugin();
    }

    {
        PROFILE_RANGE(render, "/pluginBeginFrameRender");
        // If a display plugin loses it's underlying support, it
        // needs to be able to signal us to not use it
        if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
            QMetaObject::invokeMethod(qApp, "updateDisplayMode");
            return;
        }
    }

    RenderArgs renderArgs;
    glm::mat4  HMDSensorPose;
    glm::mat4  eyeToWorld;
    glm::mat4  sensorToWorld;
    ViewFrustum viewFrustum;

    bool isStereo;
    glm::mat4  stereoEyeOffsets[2];
    glm::mat4  stereoEyeProjections[2];

    {
        QMutexLocker viewLocker(&_renderArgsMutex);
        renderArgs = _appRenderArgs._renderArgs;

        // don't render if there is no context.
        if (!_appRenderArgs._renderArgs._context) {
            return;
        }

        HMDSensorPose = _appRenderArgs._headPose;
        eyeToWorld = _appRenderArgs._eyeToWorld;
        sensorToWorld = _appRenderArgs._sensorToWorld;
        isStereo = _appRenderArgs._isStereo;
        for_each_eye([&](Eye eye) {
            stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
            stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
        });
        viewFrustum = _appRenderArgs._renderArgs.getViewFrustum();
    }

    {
        PROFILE_RANGE(render, "/gpuContextReset");
        getGPUContext()->beginFrame(_appRenderArgs._view, HMDSensorPose);
        // Reset the gpu::Context Stages
        // Back to the default framebuffer;
        gpu::doInBatch("Application_render::gpuContextReset", getGPUContext(), [&](gpu::Batch& batch) {
            batch.resetStages();
        });

        if (isStereo) {
            renderArgs._context->enableStereo(true);
            renderArgs._context->setStereoProjections(stereoEyeProjections);
            renderArgs._context->setStereoViews(stereoEyeOffsets);
        }
    }

    gpu::FramebufferPointer finalFramebuffer;
    QSize finalFramebufferSize;
    {
        PROFILE_RANGE(render, "/getOutputFramebuffer");
        // Primary rendering pass
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        finalFramebufferSize = framebufferCache->getFrameBufferSize();
        // Final framebuffer that will be handed to the display-plugin
        finalFramebuffer = framebufferCache->getFramebuffer();
    }

    if (!_programsCompiled.load()) {
        gpu::doInBatch("splashFrame", _gpuContext, [&](gpu::Batch& batch) {
            batch.setFramebuffer(finalFramebuffer);
            batch.enableSkybox(true);
            batch.enableStereo(isStereo);
            batch.setViewportTransform({ 0, 0, finalFramebuffer->getSize() });
            _splashScreen->render(batch, viewFrustum);
        });
    } else {
        {
            PROFILE_RANGE(render, "/renderOverlay");
            PerformanceTimer perfTimer("renderOverlay");
            // NOTE: There is no batch associated with this renderArgs
            // the ApplicationOverlay class assumes it's viewport is setup to be the device size
            renderArgs._viewport = glm::ivec4(0, 0, qApp->getDeviceSize());
            qApp->getApplicationOverlay().renderOverlay(&renderArgs);
        }

        {
            PROFILE_RANGE(render, "/updateCompositor");
            qApp->getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
        }

        {
            PROFILE_RANGE(render, "/runRenderFrame");
            renderArgs._hudOperator = displayPlugin->getHUDOperator();
            renderArgs._hudTexture = qApp->getApplicationOverlay().getOverlayTexture();
            renderArgs._blitFramebuffer = finalFramebuffer;
            render_runRenderFrame(&renderArgs);
        }
    }

    auto frame = getGPUContext()->endFrame();
    frame->frameIndex = _renderFrameCount;
    frame->framebuffer = finalFramebuffer;
    frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
        auto frameBufferCache = DependencyManager::get<FramebufferCache>();
        if (frameBufferCache) {
            frameBufferCache->releaseFramebuffer(framebuffer);
        }
    };
    // deliver final scene rendering commands to the display plugin
    {
        PROFILE_RANGE(render, "/pluginOutput");
        PerformanceTimer perfTimer("pluginOutput");
        _renderLoopCounter.increment();
        displayPlugin->submitFrame(frame);
    }

    // Reset the framebuffer and stereo state
    renderArgs._blitFramebuffer.reset();
    renderArgs._context->enableStereo(false);

#if !defined(DISABLE_QML)
    {
        auto stats = Stats::getInstance();
        if (stats) {
            stats->setRenderDetails(renderArgs._details);
        }
    }
#endif

    uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
    _frameTimingsScriptingInterface.addValue(lastPaintDuration);
}
void Application::paintGL() {
    // Some plugins process message events, allowing paintGL to be called reentrantly.
    if (_aboutToQuit || _window->isMinimized()) {
        return;
    }

    _renderFrameCount++;
    _lastTimeRendered.start();

    auto lastPaintBegin = usecTimestampNow();
    PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
    PerformanceTimer perfTimer("paintGL");

    if (nullptr == _displayPlugin) {
        return;
    }

    DisplayPluginPointer displayPlugin;
    {
        PROFILE_RANGE(render, "/getActiveDisplayPlugin");
        displayPlugin = getActiveDisplayPlugin();
    }

    {
        PROFILE_RANGE(render, "/pluginBeginFrameRender");
        // If a display plugin loses it's underlying support, it
        // needs to be able to signal us to not use it
        if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
            QMetaObject::invokeMethod(this, "updateDisplayMode");
            return;
        }
    }

    RenderArgs renderArgs;
    glm::mat4  HMDSensorPose;
    glm::mat4  eyeToWorld;
    glm::mat4  sensorToWorld;

    bool isStereo;
    glm::mat4  stereoEyeOffsets[2];
    glm::mat4  stereoEyeProjections[2];

    {
        QMutexLocker viewLocker(&_renderArgsMutex);
        renderArgs = _appRenderArgs._renderArgs;

        // don't render if there is no context.
        if (!_appRenderArgs._renderArgs._context) {
            return;
        }

        HMDSensorPose = _appRenderArgs._headPose;
        eyeToWorld = _appRenderArgs._eyeToWorld;
        sensorToWorld = _appRenderArgs._sensorToWorld;
        isStereo = _appRenderArgs._isStereo;
        for_each_eye([&](Eye eye) {
            stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
            stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
        });
    }

    {
        PROFILE_RANGE(render, "/gpuContextReset");
        _gpuContext->beginFrame(HMDSensorPose);
        // Reset the gpu::Context Stages
        // Back to the default framebuffer;
        gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) {
            batch.resetStages();
        });
    }


    {
        PROFILE_RANGE(render, "/renderOverlay");
        PerformanceTimer perfTimer("renderOverlay");
        // NOTE: There is no batch associated with this renderArgs
        // the ApplicationOverlay class assumes it's viewport is setup to be the device size
        renderArgs._viewport = glm::ivec4(0, 0, getDeviceSize());
        _applicationOverlay.renderOverlay(&renderArgs);
    }

    {
        PROFILE_RANGE(render, "/updateCompositor");
        getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
    }

    gpu::FramebufferPointer finalFramebuffer;
    QSize finalFramebufferSize;
    {
        PROFILE_RANGE(render, "/getOutputFramebuffer");
        // Primary rendering pass
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        finalFramebufferSize = framebufferCache->getFrameBufferSize();
        // Final framebuffer that will be handled to the display-plugin
        finalFramebuffer = framebufferCache->getFramebuffer();
    }

    {
        if (isStereo) {
            renderArgs._context->enableStereo(true);
            renderArgs._context->setStereoProjections(stereoEyeProjections);
            renderArgs._context->setStereoViews(stereoEyeOffsets);
        }

        renderArgs._hudOperator = displayPlugin->getHUDOperator();
        renderArgs._hudTexture = _applicationOverlay.getOverlayTexture();
        renderArgs._blitFramebuffer = finalFramebuffer;
        runRenderFrame(&renderArgs);
    }

    auto frame = _gpuContext->endFrame();
    frame->frameIndex = _renderFrameCount;
    frame->framebuffer = finalFramebuffer;
    frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
        DependencyManager::get<FramebufferCache>()->releaseFramebuffer(framebuffer);
    };
    // deliver final scene rendering commands to the display plugin
    {
        PROFILE_RANGE(render, "/pluginOutput");
        PerformanceTimer perfTimer("pluginOutput");
        _renderLoopCounter.increment();
        displayPlugin->submitFrame(frame);
    }

    // Reset the framebuffer and stereo state
    renderArgs._blitFramebuffer.reset();
    renderArgs._context->enableStereo(false);

    {
        Stats::getInstance()->setRenderDetails(renderArgs._details);
    }

    uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
    _frameTimingsScriptingInterface.addValue(lastPaintDuration);
}