예제 #1
0
void DeferredLightingEffect::copyBack(RenderArgs* args) {
    auto textureCache = DependencyManager::get<TextureCache>();
    QSize framebufferSize = textureCache->getFrameBufferSize();

    auto freeFBO = DependencyManager::get<GlowEffect>()->getFreeFramebuffer();

    //freeFBO->release();
    glBindFramebuffer(GL_FRAMEBUFFER, 0);

    
    glDisable(GL_CULL_FACE);
    
    // now transfer the lit region to the primary fbo
    glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_CONSTANT_ALPHA, GL_ONE);
    glColorMask(true, true, true, false);
    
    auto primaryFBO = gpu::GLBackend::getFramebufferID(textureCache->getPrimaryFramebuffer());
    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, primaryFBO);

    //primaryFBO->bind();
    
    glBindTexture(GL_TEXTURE_2D, gpu::GLBackend::getTextureID(freeFBO->getRenderBuffer(0)));
    glEnable(GL_TEXTURE_2D);
    
    glPushMatrix();
    glLoadIdentity();
    
    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();
    
    int viewport[4];
    glGetIntegerv(GL_VIEWPORT, viewport);
    const int VIEWPORT_X_INDEX = 0;
    const int VIEWPORT_Y_INDEX = 1;
    const int VIEWPORT_WIDTH_INDEX = 2;
    const int VIEWPORT_HEIGHT_INDEX = 3;

    float sMin = viewport[VIEWPORT_X_INDEX] / (float)framebufferSize.width();
    float sWidth = viewport[VIEWPORT_WIDTH_INDEX] / (float)framebufferSize.width();
    float tMin = viewport[VIEWPORT_Y_INDEX] / (float)framebufferSize.height();
    float tHeight = viewport[VIEWPORT_HEIGHT_INDEX] / (float)framebufferSize.height();

    renderFullscreenQuad(sMin, sMin + sWidth, tMin, tMin + tHeight);
    
    glBindTexture(GL_TEXTURE_2D, 0);
    glDisable(GL_TEXTURE_2D);
    
    glColorMask(true, true, true, true);
    glEnable(GL_LIGHTING);
    glEnable(GL_COLOR_MATERIAL);
    glEnable(GL_DEPTH_TEST);
    glDepthMask(true);
    
    glPopMatrix();
    
    glMatrixMode(GL_MODELVIEW);
    glPopMatrix();
}
예제 #2
0
void ToneMappingEffect::render(RenderArgs* args) {
    if (!_blitLightBuffer) {
        init();
    }
    auto framebufferCache = DependencyManager::get<FramebufferCache>();
    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        batch.enableStereo(false);
        QSize framebufferSize = framebufferCache->getFrameBufferSize();

        auto lightingBuffer = framebufferCache->getLightingTexture();
        auto destFbo = framebufferCache->getPrimaryFramebuffer();
        batch.setFramebuffer(destFbo);

        // FIXME: Generate the Luminosity map
        //batch.generateTextureMips(lightingBuffer);

        batch.setViewportTransform(args->_viewport);
        batch.setProjectionTransform(glm::mat4());
        batch.setViewTransform(Transform());
        {
            float sMin = args->_viewport.x / (float)framebufferSize.width();
            float sWidth = args->_viewport.z / (float)framebufferSize.width();
            float tMin = args->_viewport.y / (float)framebufferSize.height();
            float tHeight = args->_viewport.w / (float)framebufferSize.height();
            Transform model;
            batch.setPipeline(_blitLightBuffer);
            model.setTranslation(glm::vec3(sMin, tMin, 0.0));
            model.setScale(glm::vec3(sWidth, tHeight, 1.0));
            batch.setModelTransform(model);
        }

        batch.setUniformBuffer(ToneMappingEffect_ParamsSlot, _parametersBuffer);
        batch.setResourceTexture(ToneMappingEffect_LightingMapSlot, lightingBuffer);
        batch.draw(gpu::TRIANGLE_STRIP, 4);
    });
}
예제 #3
0
void DeferredLightingEffect::render(const render::RenderContextPointer& renderContext) {
    auto args = renderContext->args;
    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        
        // Allocate the parameters buffer used by all the deferred shaders
        if (!_deferredTransformBuffer[0]._buffer) {
            DeferredTransform parameters;
            _deferredTransformBuffer[0] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) &parameters));
            _deferredTransformBuffer[1] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) &parameters));
        }

        // Framebuffer copy operations cannot function as multipass stereo operations.  
        batch.enableStereo(false);

        // perform deferred lighting, rendering to free fbo
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        auto textureCache = DependencyManager::get<TextureCache>();
    
        QSize framebufferSize = framebufferCache->getFrameBufferSize();
    
        // binding the first framebuffer
        auto lightingFBO = framebufferCache->getLightingFramebuffer();
        batch.setFramebuffer(lightingFBO);

        batch.setViewportTransform(args->_viewport);
        batch.setStateScissorRect(args->_viewport);

        // Bind the G-Buffer surfaces
        batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, framebufferCache->getDeferredColorTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, framebufferCache->getDeferredNormalTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, framebufferCache->getDeferredSpecularTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, framebufferCache->getPrimaryDepthTexture());

        // FIXME: Different render modes should have different tasks
        if (args->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && _ambientOcclusionEnabled) {
            batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, framebufferCache->getOcclusionTexture());
        } else {
            // need to assign the white texture if ao is off
            batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, textureCache->getWhiteTexture());
        }

        assert(_lightStage.lights.size() > 0);
        const auto& globalShadow = _lightStage.lights[0]->shadow;

        // Bind the shadow buffer
        batch.setResourceTexture(SHADOW_MAP_UNIT, globalShadow.map);

        // THe main viewport is assumed to be the mono viewport (or the 2 stereo faces side by side within that viewport)
        auto monoViewport = args->_viewport;
        float sMin = args->_viewport.x / (float)framebufferSize.width();
        float sWidth = args->_viewport.z / (float)framebufferSize.width();
        float tMin = args->_viewport.y / (float)framebufferSize.height();
        float tHeight = args->_viewport.w / (float)framebufferSize.height();

        // The view frustum is the mono frustum base
        auto viewFrustum = args->_viewFrustum;

        // Eval the mono projection
        mat4 monoProjMat;
        viewFrustum->evalProjectionMatrix(monoProjMat);

        // The mono view transform
        Transform monoViewTransform;
        viewFrustum->evalViewTransform(monoViewTransform);

        // THe mono view matrix coming from the mono view transform
        glm::mat4 monoViewMat;
        monoViewTransform.getMatrix(monoViewMat);

        // Running in stero ?
        bool isStereo = args->_context->isStereo();
        int numPasses = 1;

        mat4 projMats[2];
        Transform viewTransforms[2];
        ivec4 viewports[2];
        vec4 clipQuad[2];
        vec2 screenBottomLeftCorners[2];
        vec2 screenTopRightCorners[2];
        vec4 fetchTexcoordRects[2];

        DeferredTransform deferredTransforms[2];
        auto geometryCache = DependencyManager::get<GeometryCache>();

        if (isStereo) {
            numPasses = 2;

            mat4 eyeViews[2];
            args->_context->getStereoProjections(projMats);
            args->_context->getStereoViews(eyeViews);

            float halfWidth = 0.5f * sWidth;

            for (int i = 0; i < numPasses; i++) {
                // In stereo, the 2 sides are layout side by side in the mono viewport and their width is half
                int sideWidth = monoViewport.z >> 1;
                viewports[i] = ivec4(monoViewport.x + (i * sideWidth), monoViewport.y, sideWidth, monoViewport.w);

                deferredTransforms[i].projection = projMats[i];

                auto sideViewMat = monoViewMat * glm::inverse(eyeViews[i]);
                viewTransforms[i].evalFromRawMatrix(sideViewMat);
                deferredTransforms[i].viewInverse = sideViewMat;

                deferredTransforms[i].stereoSide = (i == 0 ? -1.0f : 1.0f);

                clipQuad[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
                screenBottomLeftCorners[i] = glm::vec2(-1.0f + i * 1.0f, -1.0f);
                screenTopRightCorners[i] = glm::vec2(i * 1.0f, 1.0f);

                fetchTexcoordRects[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
            }
        } else {

            viewports[0] = monoViewport;
            projMats[0] = monoProjMat;

            deferredTransforms[0].projection = monoProjMat;
     
            deferredTransforms[0].viewInverse = monoViewMat;
            viewTransforms[0] = monoViewTransform;

            deferredTransforms[0].stereoSide = 0.0f;

            clipQuad[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
            screenBottomLeftCorners[0] = glm::vec2(-1.0f, -1.0f);
            screenTopRightCorners[0] = glm::vec2(1.0f, 1.0f);

            fetchTexcoordRects[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
        }

        auto eyePoint = viewFrustum->getPosition();
        float nearRadius = glm::distance(eyePoint, viewFrustum->getNearTopLeft());


        for (int side = 0; side < numPasses; side++) {
            // Render in this side's viewport
            batch.setViewportTransform(viewports[side]);
            batch.setStateScissorRect(viewports[side]);

            // Sync and Bind the correct DeferredTransform ubo
            _deferredTransformBuffer[side]._buffer->setSubData(0, sizeof(DeferredTransform), (const gpu::Byte*) &deferredTransforms[side]);
            batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, _deferredTransformBuffer[side]);

            glm::vec2 topLeft(-1.0f, -1.0f);
            glm::vec2 bottomRight(1.0f, 1.0f);
            glm::vec2 texCoordTopLeft(clipQuad[side].x, clipQuad[side].y);
            glm::vec2 texCoordBottomRight(clipQuad[side].x + clipQuad[side].z, clipQuad[side].y + clipQuad[side].w);

            // First Global directional light and ambient pass
            {
                auto& program = _shadowMapEnabled ? _directionalLightShadow : _directionalLight;
                LightLocationsPtr locations = _shadowMapEnabled ? _directionalLightShadowLocations : _directionalLightLocations;
                const auto& keyLight = _allocatedLights[_globalLights.front()];

                // Setup the global directional pass pipeline
                {
                    if (_shadowMapEnabled) {
                        if (keyLight->getAmbientMap()) {
                            program = _directionalSkyboxLightShadow;
                            locations = _directionalSkyboxLightShadowLocations;
                        } else {
                            program = _directionalAmbientSphereLightShadow;
                            locations = _directionalAmbientSphereLightShadowLocations;
                        }
                    } else {
                        if (keyLight->getAmbientMap()) {
                            program = _directionalSkyboxLight;
                            locations = _directionalSkyboxLightLocations;
                        } else {
                            program = _directionalAmbientSphereLight;
                            locations = _directionalAmbientSphereLightLocations;
                        }
                    }

                    if (locations->shadowTransformBuffer >= 0) {
                        batch.setUniformBuffer(locations->shadowTransformBuffer, globalShadow.getBuffer());
                    }
                    batch.setPipeline(program);
                }

                { // Setup the global lighting
                    setupKeyLightBatch(batch, locations->lightBufferUnit, SKYBOX_MAP_UNIT);
                }

                {
                    batch.setModelTransform(Transform());
                    batch.setProjectionTransform(glm::mat4());
                    batch.setViewTransform(Transform());

                    glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                   geometryCache->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                }

                if (keyLight->getAmbientMap()) {
                    batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
                }
            }

            auto texcoordMat = glm::mat4();
          /*  texcoordMat[0] = glm::vec4(sWidth / 2.0f, 0.0f, 0.0f, sMin + sWidth / 2.0f);
            texcoordMat[1] = glm::vec4(0.0f, tHeight / 2.0f, 0.0f, tMin + tHeight / 2.0f);
           */ texcoordMat[0] = glm::vec4(fetchTexcoordRects[side].z / 2.0f, 0.0f, 0.0f, fetchTexcoordRects[side].x + fetchTexcoordRects[side].z / 2.0f);
            texcoordMat[1] = glm::vec4(0.0f, fetchTexcoordRects[side].w / 2.0f, 0.0f, fetchTexcoordRects[side].y + fetchTexcoordRects[side].w / 2.0f);
            texcoordMat[2] = glm::vec4(0.0f, 0.0f, 1.0f, 0.0f);
            texcoordMat[3] = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f);

            // enlarge the scales slightly to account for tesselation
            const float SCALE_EXPANSION = 0.05f;


            batch.setProjectionTransform(projMats[side]);
            batch.setViewTransform(viewTransforms[side]);

            // Splat Point lights
            if (!_pointLights.empty()) {
                batch.setPipeline(_pointLight);

                batch._glUniformMatrix4fv(_pointLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));

                for (auto lightID : _pointLights) {
                    auto& light = _allocatedLights[lightID];
                    // IN DEBUG: light->setShowContour(true);
                    batch.setUniformBuffer(_pointLightLocations->lightBufferUnit, light->getSchemaBuffer());

                    float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
                    // TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
                    // we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
                    if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius) {
                        Transform model;
                        model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
                        batch.setModelTransform(model);
                        batch.setViewTransform(Transform());
                        batch.setProjectionTransform(glm::mat4());

                        glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                
                        batch.setProjectionTransform(projMats[side]);
                        batch.setViewTransform(viewTransforms[side]);
                    } else {
                        Transform model;
                        model.setTranslation(glm::vec3(light->getPosition().x, light->getPosition().y, light->getPosition().z));
                        batch.setModelTransform(model.postScale(expandedRadius));
                        batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
                        geometryCache->renderSphere(batch);
                    }
                }
            }
    
            // Splat spot lights
            if (!_spotLights.empty()) {
                batch.setPipeline(_spotLight);

                batch._glUniformMatrix4fv(_spotLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));

                for (auto lightID : _spotLights) {
                    auto light = _allocatedLights[lightID];
                    // IN DEBUG: light->setShowContour(true);
                    batch.setUniformBuffer(_spotLightLocations->lightBufferUnit, light->getSchemaBuffer());

                    auto eyeLightPos = eyePoint - light->getPosition();
                    auto eyeHalfPlaneDistance = glm::dot(eyeLightPos, light->getDirection());

                    const float TANGENT_LENGTH_SCALE = 0.666f;
                    glm::vec4 coneParam(light->getSpotAngleCosSin(), TANGENT_LENGTH_SCALE * tanf(0.5f * light->getSpotAngle()), 1.0f);

                    float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
                    // TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
                    // we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
                    if ((eyeHalfPlaneDistance > -nearRadius) &&
                        (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius)) {
                        coneParam.w = 0.0f;
                        batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));

                        Transform model;
                        model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
                        batch.setModelTransform(model);
                        batch.setViewTransform(Transform());
                        batch.setProjectionTransform(glm::mat4());
                
                        glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                
                        batch.setProjectionTransform( projMats[side]);
                        batch.setViewTransform(viewTransforms[side]);
                    } else {
                        coneParam.w = 1.0f;
                        batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));

                        Transform model;
                        model.setTranslation(light->getPosition());
                        model.postRotate(light->getOrientation());
                        model.postScale(glm::vec3(expandedRadius, expandedRadius, expandedRadius));

                        batch.setModelTransform(model);
                        auto mesh = getSpotLightMesh();

                        batch.setIndexBuffer(mesh->getIndexBuffer());
                        batch.setInputBuffer(0, mesh->getVertexBuffer());
                        batch.setInputFormat(mesh->getVertexFormat());

                        auto& part = mesh->getPartBuffer().get<model::Mesh::Part>();

                        batch.drawIndexed(model::Mesh::topologyToPrimitive(part._topology), part._numIndices, part._startIndex);
                    }
                }
            }
        }

        // Probably not necessary in the long run because the gpu layer would unbound this texture if used as render target
        batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, nullptr);
        batch.setResourceTexture(SHADOW_MAP_UNIT, nullptr);
        batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);

        batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, nullptr);
    });
예제 #4
0
void AmbientOcclusion::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
    assert(renderContext->getArgs());
    assert(renderContext->getArgs()->_viewFrustum);

    RenderArgs* args = renderContext->getArgs();
    gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        QSize framebufferSize = framebufferCache->getFrameBufferSize();
        float fbWidth = framebufferSize.width();
        float fbHeight = framebufferSize.height();
        float sMin = args->_viewport.x / fbWidth;
        float sWidth = args->_viewport.z / fbWidth;
        float tMin = args->_viewport.y / fbHeight;
        float tHeight = args->_viewport.w / fbHeight;


        glm::mat4 projMat;
        Transform viewMat;
        args->_viewFrustum->evalProjectionMatrix(projMat);
        args->_viewFrustum->evalViewTransform(viewMat);
        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat);
        batch.setModelTransform(Transform());

        // Occlusion step
        getOcclusionPipeline();
        batch.setResourceTexture(0, framebufferCache->getPrimaryDepthTexture());
        batch.setResourceTexture(1, framebufferCache->getDeferredNormalTexture());
        _occlusionBuffer->setRenderBuffer(0, _occlusionTexture);
        batch.setFramebuffer(_occlusionBuffer);

        // Occlusion uniforms
        g_scale = 1.0f;
        g_bias = 1.0f;
        g_sample_rad = 1.0f;
        g_intensity = 1.0f;

        // Bind the first gpu::Pipeline we need - for calculating occlusion buffer
        batch.setPipeline(getOcclusionPipeline());
        batch._glUniform1f(_gScaleLoc, g_scale);
        batch._glUniform1f(_gBiasLoc, g_bias);
        batch._glUniform1f(_gSampleRadiusLoc, g_sample_rad);
        batch._glUniform1f(_gIntensityLoc, g_intensity);

        // setup uniforms for unpacking a view-space position from the depth buffer
        // This is code taken from DeferredLightEffect.render() method in DeferredLightingEffect.cpp.
        // DeferredBuffer.slh shows how the unpacking is done and what variables are needed.

        // initialize the view-space unpacking uniforms using frustum data
        float left, right, bottom, top, nearVal, farVal;
        glm::vec4 nearClipPlane, farClipPlane;

        args->_viewFrustum->computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);

        float depthScale = (farVal - nearVal) / farVal;
        float nearScale = -1.0f / nearVal;
        float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
        float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
        float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
        float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;

        // now set the position-unpacking unforms
        batch._glUniform1f(_nearLoc, nearVal);
        batch._glUniform1f(_depthScaleLoc, depthScale);
        batch._glUniform2f(_depthTexCoordOffsetLoc, depthTexCoordOffsetS, depthTexCoordOffsetT);
        batch._glUniform2f(_depthTexCoordScaleLoc, depthTexCoordScaleS, depthTexCoordScaleT);

        batch._glUniform2f(_renderTargetResLoc, fbWidth, fbHeight);
        batch._glUniform2f(_renderTargetResInvLoc, 1.0f / fbWidth, 1.0f / fbHeight);

        glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
        glm::vec2 bottomLeft(-1.0f, -1.0f);
        glm::vec2 topRight(1.0f, 1.0f);
        glm::vec2 texCoordTopLeft(0.0f, 0.0f);
        glm::vec2 texCoordBottomRight(1.0f, 1.0f);
        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

        // Vertical blur step
        getVBlurPipeline();
        batch.setResourceTexture(0, _occlusionTexture);
        _vBlurBuffer->setRenderBuffer(0, _vBlurTexture);
        batch.setFramebuffer(_vBlurBuffer);

        // Bind the second gpu::Pipeline we need - for calculating blur buffer
        batch.setPipeline(getVBlurPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

        // Horizontal blur step
        getHBlurPipeline();
        batch.setResourceTexture(0, _vBlurTexture);
        _hBlurBuffer->setRenderBuffer(0, _hBlurTexture);
        batch.setFramebuffer(_hBlurBuffer);

        // Bind the third gpu::Pipeline we need - for calculating blur buffer
        batch.setPipeline(getHBlurPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

        // Blend step
        getBlendPipeline();
        batch.setResourceTexture(0, _hBlurTexture);
        batch.setFramebuffer(framebufferCache->getDeferredFramebuffer());

        // Bind the fourth gpu::Pipeline we need - for blending the primary color buffer with blurred occlusion texture
        batch.setPipeline(getBlendPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
    });
}
예제 #5
0
void GraphicsEngine::render_performFrame() {
    // Some plugins process message events, allowing paintGL to be called reentrantly.

    _renderFrameCount++;

    auto lastPaintBegin = usecTimestampNow();
    PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
    PerformanceTimer perfTimer("paintGL");

    DisplayPluginPointer displayPlugin;
    {
        PROFILE_RANGE(render, "/getActiveDisplayPlugin");
        displayPlugin = qApp->getActiveDisplayPlugin();
    }

    {
        PROFILE_RANGE(render, "/pluginBeginFrameRender");
        // If a display plugin loses it's underlying support, it
        // needs to be able to signal us to not use it
        if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
            QMetaObject::invokeMethod(qApp, "updateDisplayMode");
            return;
        }
    }

    RenderArgs renderArgs;
    glm::mat4  HMDSensorPose;
    glm::mat4  eyeToWorld;
    glm::mat4  sensorToWorld;
    ViewFrustum viewFrustum;

    bool isStereo;
    glm::mat4  stereoEyeOffsets[2];
    glm::mat4  stereoEyeProjections[2];

    {
        QMutexLocker viewLocker(&_renderArgsMutex);
        renderArgs = _appRenderArgs._renderArgs;

        // don't render if there is no context.
        if (!_appRenderArgs._renderArgs._context) {
            return;
        }

        HMDSensorPose = _appRenderArgs._headPose;
        eyeToWorld = _appRenderArgs._eyeToWorld;
        sensorToWorld = _appRenderArgs._sensorToWorld;
        isStereo = _appRenderArgs._isStereo;
        for_each_eye([&](Eye eye) {
            stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
            stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
        });
        viewFrustum = _appRenderArgs._renderArgs.getViewFrustum();
    }

    {
        PROFILE_RANGE(render, "/gpuContextReset");
        getGPUContext()->beginFrame(_appRenderArgs._view, HMDSensorPose);
        // Reset the gpu::Context Stages
        // Back to the default framebuffer;
        gpu::doInBatch("Application_render::gpuContextReset", getGPUContext(), [&](gpu::Batch& batch) {
            batch.resetStages();
        });

        if (isStereo) {
            renderArgs._context->enableStereo(true);
            renderArgs._context->setStereoProjections(stereoEyeProjections);
            renderArgs._context->setStereoViews(stereoEyeOffsets);
        }
    }

    gpu::FramebufferPointer finalFramebuffer;
    QSize finalFramebufferSize;
    {
        PROFILE_RANGE(render, "/getOutputFramebuffer");
        // Primary rendering pass
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        finalFramebufferSize = framebufferCache->getFrameBufferSize();
        // Final framebuffer that will be handed to the display-plugin
        finalFramebuffer = framebufferCache->getFramebuffer();
    }

    if (!_programsCompiled.load()) {
        gpu::doInBatch("splashFrame", _gpuContext, [&](gpu::Batch& batch) {
            batch.setFramebuffer(finalFramebuffer);
            batch.enableSkybox(true);
            batch.enableStereo(isStereo);
            batch.setViewportTransform({ 0, 0, finalFramebuffer->getSize() });
            _splashScreen->render(batch, viewFrustum);
        });
    } else {
        {
            PROFILE_RANGE(render, "/renderOverlay");
            PerformanceTimer perfTimer("renderOverlay");
            // NOTE: There is no batch associated with this renderArgs
            // the ApplicationOverlay class assumes it's viewport is setup to be the device size
            renderArgs._viewport = glm::ivec4(0, 0, qApp->getDeviceSize());
            qApp->getApplicationOverlay().renderOverlay(&renderArgs);
        }

        {
            PROFILE_RANGE(render, "/updateCompositor");
            qApp->getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
        }

        {
            PROFILE_RANGE(render, "/runRenderFrame");
            renderArgs._hudOperator = displayPlugin->getHUDOperator();
            renderArgs._hudTexture = qApp->getApplicationOverlay().getOverlayTexture();
            renderArgs._blitFramebuffer = finalFramebuffer;
            render_runRenderFrame(&renderArgs);
        }
    }

    auto frame = getGPUContext()->endFrame();
    frame->frameIndex = _renderFrameCount;
    frame->framebuffer = finalFramebuffer;
    frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
        auto frameBufferCache = DependencyManager::get<FramebufferCache>();
        if (frameBufferCache) {
            frameBufferCache->releaseFramebuffer(framebuffer);
        }
    };
    // deliver final scene rendering commands to the display plugin
    {
        PROFILE_RANGE(render, "/pluginOutput");
        PerformanceTimer perfTimer("pluginOutput");
        _renderLoopCounter.increment();
        displayPlugin->submitFrame(frame);
    }

    // Reset the framebuffer and stereo state
    renderArgs._blitFramebuffer.reset();
    renderArgs._context->enableStereo(false);

#if !defined(DISABLE_QML)
    {
        auto stats = Stats::getInstance();
        if (stats) {
            stats->setRenderDetails(renderArgs._details);
        }
    }
#endif

    uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
    _frameTimingsScriptingInterface.addValue(lastPaintDuration);
}
예제 #6
0
void DeferredLightingEffect::render(RenderArgs* args) {
    // perform deferred lighting, rendering to free fbo
    glDisable(GL_BLEND);
    glDisable(GL_LIGHTING);
    glDisable(GL_DEPTH_TEST);
    glDisable(GL_COLOR_MATERIAL);
    glDepthMask(false);

    auto textureCache = DependencyManager::get<TextureCache>();
    
    glBindFramebuffer(GL_FRAMEBUFFER, 0 );

    QSize framebufferSize = textureCache->getFrameBufferSize();
    
    // binding the first framebuffer
    auto freeFBO = DependencyManager::get<GlowEffect>()->getFreeFramebuffer();
    glBindFramebuffer(GL_FRAMEBUFFER, gpu::GLBackend::getFramebufferID(freeFBO));
 
    glClear(GL_COLOR_BUFFER_BIT);
   // glEnable(GL_FRAMEBUFFER_SRGB);

   // glBindTexture(GL_TEXTURE_2D, primaryFBO->texture());
    glBindTexture(GL_TEXTURE_2D, textureCache->getPrimaryColorTextureID());
    
    glActiveTexture(GL_TEXTURE1);
    glBindTexture(GL_TEXTURE_2D, textureCache->getPrimaryNormalTextureID());
    
    glActiveTexture(GL_TEXTURE2);
    glBindTexture(GL_TEXTURE_2D, textureCache->getPrimarySpecularTextureID());
    
    glActiveTexture(GL_TEXTURE3);
    glBindTexture(GL_TEXTURE_2D, textureCache->getPrimaryDepthTextureID());
        
    // get the viewport side (left, right, both)
    int viewport[4];
    glGetIntegerv(GL_VIEWPORT, viewport);
    const int VIEWPORT_X_INDEX = 0;
    const int VIEWPORT_Y_INDEX = 1;
    const int VIEWPORT_WIDTH_INDEX = 2;
    const int VIEWPORT_HEIGHT_INDEX = 3;

    float sMin = viewport[VIEWPORT_X_INDEX] / (float)framebufferSize.width();
    float sWidth = viewport[VIEWPORT_WIDTH_INDEX] / (float)framebufferSize.width();
    float tMin = viewport[VIEWPORT_Y_INDEX] / (float)framebufferSize.height();
    float tHeight = viewport[VIEWPORT_HEIGHT_INDEX] / (float)framebufferSize.height();

    bool useSkyboxCubemap = (_skybox) && (_skybox->getCubemap());

    // Fetch the ViewMatrix;
    glm::mat4 invViewMat;
    _viewState->getViewTransform().getMatrix(invViewMat);

    ProgramObject* program = &_directionalLight;
    const LightLocations* locations = &_directionalLightLocations;
    bool shadowsEnabled = _viewState->getShadowsEnabled();
    if (shadowsEnabled) {
        glActiveTexture(GL_TEXTURE4);
        glBindTexture(GL_TEXTURE_2D, textureCache->getShadowDepthTextureID());
        
        program = &_directionalLightShadowMap;
        locations = &_directionalLightShadowMapLocations;
        if (_viewState->getCascadeShadowsEnabled()) {
            program = &_directionalLightCascadedShadowMap;
            locations = &_directionalLightCascadedShadowMapLocations;
            if (useSkyboxCubemap) {
                program = &_directionalSkyboxLightCascadedShadowMap;
                locations = &_directionalSkyboxLightCascadedShadowMapLocations;
            } else if (_ambientLightMode > -1) {
                program = &_directionalAmbientSphereLightCascadedShadowMap;
                locations = &_directionalAmbientSphereLightCascadedShadowMapLocations;
            }
            program->bind();
            program->setUniform(locations->shadowDistances, _viewState->getShadowDistances());
        
        } else {
            if (useSkyboxCubemap) {
                program = &_directionalSkyboxLightShadowMap;
                locations = &_directionalSkyboxLightShadowMapLocations;
            } else if (_ambientLightMode > -1) {
                program = &_directionalAmbientSphereLightShadowMap;
                locations = &_directionalAmbientSphereLightShadowMapLocations;
            }
            program->bind();
        }
        program->setUniformValue(locations->shadowScale,
            1.0f / textureCache->getShadowFramebuffer()->getWidth());
        
    } else {
        if (useSkyboxCubemap) {
                program = &_directionalSkyboxLight;
                locations = &_directionalSkyboxLightLocations;
        } else if (_ambientLightMode > -1) {
            program = &_directionalAmbientSphereLight;
            locations = &_directionalAmbientSphereLightLocations;
        }
        program->bind();
    }

    {
        auto globalLight = _allocatedLights[_globalLights.front()];
    
        if (locations->ambientSphere >= 0) {
            gpu::SphericalHarmonics sh = globalLight->getAmbientSphere();
            if (useSkyboxCubemap && _skybox->getCubemap()->getIrradiance()) {
                sh = (*_skybox->getCubemap()->getIrradiance());
            }
            for (int i =0; i <gpu::SphericalHarmonics::NUM_COEFFICIENTS; i++) {
                program->setUniformValue(locations->ambientSphere + i, *(((QVector4D*) &sh) + i)); 
            }
        }
    
        if (useSkyboxCubemap) {
            glActiveTexture(GL_TEXTURE5);
            glBindTexture(GL_TEXTURE_CUBE_MAP, gpu::GLBackend::getTextureID(_skybox->getCubemap()));
        }

        if (locations->lightBufferUnit >= 0) {
            gpu::Batch batch;
            batch.setUniformBuffer(locations->lightBufferUnit, globalLight->getSchemaBuffer());
            gpu::GLBackend::renderBatch(batch);
        }
        
        if (_atmosphere && (locations->atmosphereBufferUnit >= 0)) {
            gpu::Batch batch;
            batch.setUniformBuffer(locations->atmosphereBufferUnit, _atmosphere->getDataBuffer());
            gpu::GLBackend::renderBatch(batch);
        }
        glUniformMatrix4fv(locations->invViewMat, 1, false, reinterpret_cast< const GLfloat* >(&invViewMat));
    }

    float left, right, bottom, top, nearVal, farVal;
    glm::vec4 nearClipPlane, farClipPlane;
    _viewState->computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);
    program->setUniformValue(locations->nearLocation, nearVal);
    float depthScale = (farVal - nearVal) / farVal;
    program->setUniformValue(locations->depthScale, depthScale);
    float nearScale = -1.0f / nearVal;
    float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
    float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
    float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
    float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;
    program->setUniformValue(locations->depthTexCoordOffset, depthTexCoordOffsetS, depthTexCoordOffsetT);
    program->setUniformValue(locations->depthTexCoordScale, depthTexCoordScaleS, depthTexCoordScaleT);
    
    renderFullscreenQuad(sMin, sMin + sWidth, tMin, tMin + tHeight);
    
    program->release();

    if (useSkyboxCubemap) {
        glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
        if (!shadowsEnabled) {
            glActiveTexture(GL_TEXTURE3);
        }
    }

    if (shadowsEnabled) {
        glBindTexture(GL_TEXTURE_2D, 0);        
        glActiveTexture(GL_TEXTURE3);
    }
    
    // additive blending
    glEnable(GL_BLEND);
    glBlendFunc(GL_ONE, GL_ONE);
    
    glEnable(GL_CULL_FACE);
    
    glm::vec4 sCoefficients(sWidth / 2.0f, 0.0f, 0.0f, sMin + sWidth / 2.0f);
    glm::vec4 tCoefficients(0.0f, tHeight / 2.0f, 0.0f, tMin + tHeight / 2.0f);
    glTexGenfv(GL_S, GL_OBJECT_PLANE, (const GLfloat*)&sCoefficients);
    glTexGenfv(GL_T, GL_OBJECT_PLANE, (const GLfloat*)&tCoefficients);
    
    // enlarge the scales slightly to account for tesselation
    const float SCALE_EXPANSION = 0.05f;
    
    const glm::vec3& eyePoint = _viewState->getCurrentViewFrustum()->getPosition();
    float nearRadius = glm::distance(eyePoint, _viewState->getCurrentViewFrustum()->getNearTopLeft());

    auto geometryCache = DependencyManager::get<GeometryCache>();
    
    if (!_pointLights.empty()) {
        _pointLight.bind();
        _pointLight.setUniformValue(_pointLightLocations.nearLocation, nearVal);
        _pointLight.setUniformValue(_pointLightLocations.depthScale, depthScale);
        _pointLight.setUniformValue(_pointLightLocations.depthTexCoordOffset, depthTexCoordOffsetS, depthTexCoordOffsetT);
        _pointLight.setUniformValue(_pointLightLocations.depthTexCoordScale, depthTexCoordScaleS, depthTexCoordScaleT);

        for (auto lightID : _pointLights) {
            auto light = _allocatedLights[lightID];

            if (_pointLightLocations.lightBufferUnit >= 0) {
                gpu::Batch batch;
                batch.setUniformBuffer(_pointLightLocations.lightBufferUnit, light->getSchemaBuffer());
                gpu::GLBackend::renderBatch(batch);
            }
            glUniformMatrix4fv(_pointLightLocations.invViewMat, 1, false, reinterpret_cast< const GLfloat* >(&invViewMat));

            glPushMatrix();
            
            float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
            if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius) {
                glLoadIdentity();
                glTranslatef(0.0f, 0.0f, -1.0f);
                
                glMatrixMode(GL_PROJECTION);
                glPushMatrix();
                glLoadIdentity();
                
                renderFullscreenQuad();
            
                glPopMatrix();
                glMatrixMode(GL_MODELVIEW);
                
            } else {
                glTranslatef(light->getPosition().x, light->getPosition().y, light->getPosition().z);   
                geometryCache->renderSphere(expandedRadius, 32, 32, glm::vec4(1.0f, 1.0f, 1.0f, 1.0f));
            }
            
            glPopMatrix();
        }
        _pointLights.clear();
        
        _pointLight.release();
    }
    
    if (!_spotLights.empty()) {
        _spotLight.bind();
        _spotLight.setUniformValue(_spotLightLocations.nearLocation, nearVal);
        _spotLight.setUniformValue(_spotLightLocations.depthScale, depthScale);
        _spotLight.setUniformValue(_spotLightLocations.depthTexCoordOffset, depthTexCoordOffsetS, depthTexCoordOffsetT);
        _spotLight.setUniformValue(_spotLightLocations.depthTexCoordScale, depthTexCoordScaleS, depthTexCoordScaleT);
        
        for (auto lightID : _spotLights) {
            auto light = _allocatedLights[lightID];

            if (_spotLightLocations.lightBufferUnit >= 0) {
                gpu::Batch batch;
                batch.setUniformBuffer(_spotLightLocations.lightBufferUnit, light->getSchemaBuffer());
                gpu::GLBackend::renderBatch(batch);
            }
            glUniformMatrix4fv(_spotLightLocations.invViewMat, 1, false, reinterpret_cast< const GLfloat* >(&invViewMat));

            glPushMatrix();
            
            float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
            float edgeRadius = expandedRadius / glm::cos(light->getSpotAngle());
            if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < edgeRadius + nearRadius) {
                glLoadIdentity();
                glTranslatef(0.0f, 0.0f, -1.0f);
                
                glMatrixMode(GL_PROJECTION);
                glPushMatrix();
                glLoadIdentity();
                
                renderFullscreenQuad();
                
                glPopMatrix();
                glMatrixMode(GL_MODELVIEW);
                
            } else {
                glTranslatef(light->getPosition().x, light->getPosition().y, light->getPosition().z);
                glm::quat spotRotation = rotationBetween(glm::vec3(0.0f, 0.0f, -1.0f), light->getDirection());
                glm::vec3 axis = glm::axis(spotRotation);
                glRotatef(glm::degrees(glm::angle(spotRotation)), axis.x, axis.y, axis.z);   
                glTranslatef(0.0f, 0.0f, -light->getMaximumRadius() * (1.0f + SCALE_EXPANSION * 0.5f));  
                geometryCache->renderCone(expandedRadius * glm::tan(light->getSpotAngle()),
                    expandedRadius, 32, 1);
            }
            
            glPopMatrix();
        }
        _spotLights.clear();
        
        _spotLight.release();
    }
    
    glBindTexture(GL_TEXTURE_2D, 0);
        
    glActiveTexture(GL_TEXTURE2);
    glBindTexture(GL_TEXTURE_2D, 0);
    
    glActiveTexture(GL_TEXTURE1);
    glBindTexture(GL_TEXTURE_2D, 0);
    
    glActiveTexture(GL_TEXTURE0);
    glBindTexture(GL_TEXTURE_2D, 0);
  //  glDisable(GL_FRAMEBUFFER_SRGB);
    
    // End of the Lighting pass
}
예제 #7
0
void Application::paintGL() {
    // Some plugins process message events, allowing paintGL to be called reentrantly.
    if (_aboutToQuit || _window->isMinimized()) {
        return;
    }

    _renderFrameCount++;
    _lastTimeRendered.start();

    auto lastPaintBegin = usecTimestampNow();
    PROFILE_RANGE_EX(render, __FUNCTION__, 0xff0000ff, (uint64_t)_renderFrameCount);
    PerformanceTimer perfTimer("paintGL");

    if (nullptr == _displayPlugin) {
        return;
    }

    DisplayPluginPointer displayPlugin;
    {
        PROFILE_RANGE(render, "/getActiveDisplayPlugin");
        displayPlugin = getActiveDisplayPlugin();
    }

    {
        PROFILE_RANGE(render, "/pluginBeginFrameRender");
        // If a display plugin loses it's underlying support, it
        // needs to be able to signal us to not use it
        if (!displayPlugin->beginFrameRender(_renderFrameCount)) {
            QMetaObject::invokeMethod(this, "updateDisplayMode");
            return;
        }
    }

    RenderArgs renderArgs;
    glm::mat4  HMDSensorPose;
    glm::mat4  eyeToWorld;
    glm::mat4  sensorToWorld;

    bool isStereo;
    glm::mat4  stereoEyeOffsets[2];
    glm::mat4  stereoEyeProjections[2];

    {
        QMutexLocker viewLocker(&_renderArgsMutex);
        renderArgs = _appRenderArgs._renderArgs;

        // don't render if there is no context.
        if (!_appRenderArgs._renderArgs._context) {
            return;
        }

        HMDSensorPose = _appRenderArgs._headPose;
        eyeToWorld = _appRenderArgs._eyeToWorld;
        sensorToWorld = _appRenderArgs._sensorToWorld;
        isStereo = _appRenderArgs._isStereo;
        for_each_eye([&](Eye eye) {
            stereoEyeOffsets[eye] = _appRenderArgs._eyeOffsets[eye];
            stereoEyeProjections[eye] = _appRenderArgs._eyeProjections[eye];
        });
    }

    {
        PROFILE_RANGE(render, "/gpuContextReset");
        _gpuContext->beginFrame(HMDSensorPose);
        // Reset the gpu::Context Stages
        // Back to the default framebuffer;
        gpu::doInBatch(_gpuContext, [&](gpu::Batch& batch) {
            batch.resetStages();
        });
    }


    {
        PROFILE_RANGE(render, "/renderOverlay");
        PerformanceTimer perfTimer("renderOverlay");
        // NOTE: There is no batch associated with this renderArgs
        // the ApplicationOverlay class assumes it's viewport is setup to be the device size
        renderArgs._viewport = glm::ivec4(0, 0, getDeviceSize());
        _applicationOverlay.renderOverlay(&renderArgs);
    }

    {
        PROFILE_RANGE(render, "/updateCompositor");
        getApplicationCompositor().setFrameInfo(_renderFrameCount, eyeToWorld, sensorToWorld);
    }

    gpu::FramebufferPointer finalFramebuffer;
    QSize finalFramebufferSize;
    {
        PROFILE_RANGE(render, "/getOutputFramebuffer");
        // Primary rendering pass
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        finalFramebufferSize = framebufferCache->getFrameBufferSize();
        // Final framebuffer that will be handled to the display-plugin
        finalFramebuffer = framebufferCache->getFramebuffer();
    }

    {
        if (isStereo) {
            renderArgs._context->enableStereo(true);
            renderArgs._context->setStereoProjections(stereoEyeProjections);
            renderArgs._context->setStereoViews(stereoEyeOffsets);
        }

        renderArgs._hudOperator = displayPlugin->getHUDOperator();
        renderArgs._hudTexture = _applicationOverlay.getOverlayTexture();
        renderArgs._blitFramebuffer = finalFramebuffer;
        runRenderFrame(&renderArgs);
    }

    auto frame = _gpuContext->endFrame();
    frame->frameIndex = _renderFrameCount;
    frame->framebuffer = finalFramebuffer;
    frame->framebufferRecycler = [](const gpu::FramebufferPointer& framebuffer) {
        DependencyManager::get<FramebufferCache>()->releaseFramebuffer(framebuffer);
    };
    // deliver final scene rendering commands to the display plugin
    {
        PROFILE_RANGE(render, "/pluginOutput");
        PerformanceTimer perfTimer("pluginOutput");
        _renderLoopCounter.increment();
        displayPlugin->submitFrame(frame);
    }

    // Reset the framebuffer and stereo state
    renderArgs._blitFramebuffer.reset();
    renderArgs._context->enableStereo(false);

    {
        Stats::getInstance()->setRenderDetails(renderArgs._details);
    }

    uint64_t lastPaintDuration = usecTimestampNow() - lastPaintBegin;
    _frameTimingsScriptingInterface.addValue(lastPaintDuration);
}
예제 #8
0
void Antialiasing::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    RenderArgs* args = renderContext->args;

    if (args->_renderMode == RenderArgs::MIRROR_RENDER_MODE) {
        return;
    }

    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        batch.enableStereo(false);
        batch.setViewportTransform(args->_viewport);

        // FIXME: NEED to simplify that code to avoid all the GeometryCahce call, this is purely pixel manipulation
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        QSize framebufferSize = framebufferCache->getFrameBufferSize();
        float fbWidth = framebufferSize.width();
        float fbHeight = framebufferSize.height();
        // float sMin = args->_viewport.x / fbWidth;
        // float sWidth = args->_viewport.z / fbWidth;
        // float tMin = args->_viewport.y / fbHeight;
        // float tHeight = args->_viewport.w / fbHeight;

        glm::mat4 projMat;
        Transform viewMat;
        args->getViewFrustum().evalProjectionMatrix(projMat);
        args->getViewFrustum().evalViewTransform(viewMat);
        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat, true);
        batch.setModelTransform(Transform());

        // FXAA step
        getAntialiasingPipeline();
        batch.setResourceTexture(0, sourceBuffer->getRenderBuffer(0));
        batch.setFramebuffer(_antialiasingBuffer);
        batch.setPipeline(getAntialiasingPipeline());

        // initialize the view-space unpacking uniforms using frustum data
        float left, right, bottom, top, nearVal, farVal;
        glm::vec4 nearClipPlane, farClipPlane;

        args->getViewFrustum().computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);

        // float depthScale = (farVal - nearVal) / farVal;
        // float nearScale = -1.0f / nearVal;
        // float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
        // float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
        // float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
        // float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;

        batch._glUniform2f(_texcoordOffsetLoc, 1.0f / fbWidth, 1.0f / fbHeight);

        glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
        glm::vec2 bottomLeft(-1.0f, -1.0f);
        glm::vec2 topRight(1.0f, 1.0f);
        glm::vec2 texCoordTopLeft(0.0f, 0.0f);
        glm::vec2 texCoordBottomRight(1.0f, 1.0f);
        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);


        // Blend step
        getBlendPipeline();
        batch.setResourceTexture(0, _antialiasingTexture);
        batch.setFramebuffer(sourceBuffer);
        batch.setPipeline(getBlendPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
    });
}