Пример #1
0
void RearMirrorTools::displayIcon(QRect bounds, QRect iconBounds, GLuint textureId, bool selected) {

    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();
    
    glOrtho(bounds.left(), bounds.right(), bounds.bottom(), bounds.top(), -1.0, 1.0);
    glDisable(GL_DEPTH_TEST);
    glDisable(GL_LIGHTING);
    glEnable(GL_TEXTURE_2D);

    if (selected) {
        glColor3f(.5f, .5f, .5f);
    } else {
        glColor3f(1, 1, 1);
    }
    
    glBindTexture(GL_TEXTURE_2D, textureId);
   
    glm::vec2 topLeft(iconBounds.left(), iconBounds.top());
    glm::vec2 bottomRight(iconBounds.right(), iconBounds.bottom());
    glm::vec2 texCoordTopLeft(0.0f, 1.0f);
    glm::vec2 texCoordBottomRight(1.0f, 0.0f);

    DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight);
    
    glPopMatrix();
    
    glMatrixMode(GL_MODELVIEW);
    
    glBindTexture(GL_TEXTURE_2D, 0);
    glDisable(GL_TEXTURE_2D);
}
Пример #2
0
void Image3DOverlay::render(RenderArgs* args) {
    if (!_isLoaded) {
        _isLoaded = true;
        _texture = DependencyManager::get<TextureCache>()->getTexture(_url);
    }

    if (!_visible || !getParentVisible() || !_texture || !_texture->isLoaded()) {
        return;
    }

    Q_ASSERT(args->_batch);
    gpu::Batch* batch = args->_batch;

    float imageWidth = _texture->getWidth();
    float imageHeight = _texture->getHeight();

    QRect fromImage;
    if (_fromImage.isNull()) {
        fromImage.setX(0);
        fromImage.setY(0);
        fromImage.setWidth(imageWidth);
        fromImage.setHeight(imageHeight);
    } else {
        float scaleX = imageWidth / _texture->getOriginalWidth();
        float scaleY = imageHeight / _texture->getOriginalHeight();

        fromImage.setX(scaleX * _fromImage.x());
        fromImage.setY(scaleY * _fromImage.y());
        fromImage.setWidth(scaleX * _fromImage.width());
        fromImage.setHeight(scaleY * _fromImage.height());
    }

    float maxSize = glm::max(fromImage.width(), fromImage.height());
    float x = fromImage.width() / (2.0f * maxSize);
    float y = -fromImage.height() / (2.0f * maxSize);

    glm::vec2 topLeft(-x, -y);
    glm::vec2 bottomRight(x, y);
    glm::vec2 texCoordTopLeft(fromImage.x() / imageWidth, fromImage.y() / imageHeight);
    glm::vec2 texCoordBottomRight((fromImage.x() + fromImage.width()) / imageWidth,
                                  (fromImage.y() + fromImage.height()) / imageHeight);

    const float MAX_COLOR = 255.0f;
    xColor color = getColor();
    float alpha = getAlpha();

    applyTransformTo(_transform, true);
    Transform transform = _transform;
    transform.postScale(glm::vec3(getDimensions(), 1.0f));

    batch->setModelTransform(transform);
    batch->setResourceTexture(0, _texture->getGPUTexture());
    
    DependencyManager::get<GeometryCache>()->renderQuad(
        *batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
        glm::vec4(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha)
    );

    batch->setResourceTexture(0, args->_whiteTexture); // restore default white color after me
}
Пример #3
0
void renderFullscreenQuad(float sMin, float sMax, float tMin, float tMax) {
    glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
    glm::vec2 topLeft(-1.0f, -1.0f);
    glm::vec2 bottomRight(1.0f, 1.0f);
    glm::vec2 texCoordTopLeft(sMin, tMin);
    glm::vec2 texCoordBottomRight(sMax, tMax);

    DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
}
Пример #4
0
void CameraToolBox::render(int x, int y, bool boxed) {
    glEnable(GL_TEXTURE_2D);
    
    if (!_enabledTexture) {
        _enabledTexture = DependencyManager::get<TextureCache>()->getImageTexture(PathUtils::resourcesPath() + "images/face.svg");
    }
    if (!_mutedTexture) {
        _mutedTexture = DependencyManager::get<TextureCache>()->getImageTexture(PathUtils::resourcesPath() + "images/face-mute.svg");
    }
    
    const int MUTE_ICON_SIZE = 24;
    _iconBounds = QRect(x, y, MUTE_ICON_SIZE, MUTE_ICON_SIZE);
    float iconColor = 1.0f;
    if (!Menu::getInstance()->isOptionChecked(MenuOption::MuteFaceTracking)) {
        glBindTexture(GL_TEXTURE_2D, gpu::GLBackend::getTextureID(_enabledTexture));
    } else {
        glBindTexture(GL_TEXTURE_2D, gpu::GLBackend::getTextureID(_mutedTexture));
        
        // Make muted icon pulsate
        static const float PULSE_MIN = 0.4f;
        static const float PULSE_MAX = 1.0f;
        static const float PULSE_FREQUENCY = 1.0f; // in Hz
        qint64 now = usecTimestampNow();
        if (now - _iconPulseTimeReference > (qint64)USECS_PER_SECOND) {
            // Prevents t from getting too big, which would diminish glm::cos precision
            _iconPulseTimeReference = now - ((now - _iconPulseTimeReference) % USECS_PER_SECOND);
        }
        float t = (float)(now - _iconPulseTimeReference) / (float)USECS_PER_SECOND;
        float pulseFactor = (glm::cos(t * PULSE_FREQUENCY * 2.0f * PI) + 1.0f) / 2.0f;
        iconColor = PULSE_MIN + (PULSE_MAX - PULSE_MIN) * pulseFactor;
    }
    
    glm::vec4 quadColor(iconColor, iconColor, iconColor, 1.0f);

    glm::vec2 topLeft(_iconBounds.left(), _iconBounds.top());
    glm::vec2 bottomRight(_iconBounds.right(), _iconBounds.bottom());
    glm::vec2 texCoordTopLeft(1,1);
    glm::vec2 texCoordBottomRight(0,0);
    
    if (_boxQuadID == GeometryCache::UNKNOWN_ID) {
        _boxQuadID = DependencyManager::get<GeometryCache>()->allocateID();
    }

    DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, quadColor, _boxQuadID);
    
    glDisable(GL_TEXTURE_2D);
}
Пример #5
0
void DeferredLightingEffect::render(const render::RenderContextPointer& renderContext) {
    auto args = renderContext->args;
    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        
        // Allocate the parameters buffer used by all the deferred shaders
        if (!_deferredTransformBuffer[0]._buffer) {
            DeferredTransform parameters;
            _deferredTransformBuffer[0] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) &parameters));
            _deferredTransformBuffer[1] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) &parameters));
        }

        // Framebuffer copy operations cannot function as multipass stereo operations.  
        batch.enableStereo(false);

        // perform deferred lighting, rendering to free fbo
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        auto textureCache = DependencyManager::get<TextureCache>();
    
        QSize framebufferSize = framebufferCache->getFrameBufferSize();
    
        // binding the first framebuffer
        auto lightingFBO = framebufferCache->getLightingFramebuffer();
        batch.setFramebuffer(lightingFBO);

        batch.setViewportTransform(args->_viewport);
        batch.setStateScissorRect(args->_viewport);

        // Bind the G-Buffer surfaces
        batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, framebufferCache->getDeferredColorTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, framebufferCache->getDeferredNormalTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, framebufferCache->getDeferredSpecularTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, framebufferCache->getPrimaryDepthTexture());

        // FIXME: Different render modes should have different tasks
        if (args->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && _ambientOcclusionEnabled) {
            batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, framebufferCache->getOcclusionTexture());
        } else {
            // need to assign the white texture if ao is off
            batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, textureCache->getWhiteTexture());
        }

        assert(_lightStage.lights.size() > 0);
        const auto& globalShadow = _lightStage.lights[0]->shadow;

        // Bind the shadow buffer
        batch.setResourceTexture(SHADOW_MAP_UNIT, globalShadow.map);

        // THe main viewport is assumed to be the mono viewport (or the 2 stereo faces side by side within that viewport)
        auto monoViewport = args->_viewport;
        float sMin = args->_viewport.x / (float)framebufferSize.width();
        float sWidth = args->_viewport.z / (float)framebufferSize.width();
        float tMin = args->_viewport.y / (float)framebufferSize.height();
        float tHeight = args->_viewport.w / (float)framebufferSize.height();

        // The view frustum is the mono frustum base
        auto viewFrustum = args->_viewFrustum;

        // Eval the mono projection
        mat4 monoProjMat;
        viewFrustum->evalProjectionMatrix(monoProjMat);

        // The mono view transform
        Transform monoViewTransform;
        viewFrustum->evalViewTransform(monoViewTransform);

        // THe mono view matrix coming from the mono view transform
        glm::mat4 monoViewMat;
        monoViewTransform.getMatrix(monoViewMat);

        // Running in stero ?
        bool isStereo = args->_context->isStereo();
        int numPasses = 1;

        mat4 projMats[2];
        Transform viewTransforms[2];
        ivec4 viewports[2];
        vec4 clipQuad[2];
        vec2 screenBottomLeftCorners[2];
        vec2 screenTopRightCorners[2];
        vec4 fetchTexcoordRects[2];

        DeferredTransform deferredTransforms[2];
        auto geometryCache = DependencyManager::get<GeometryCache>();

        if (isStereo) {
            numPasses = 2;

            mat4 eyeViews[2];
            args->_context->getStereoProjections(projMats);
            args->_context->getStereoViews(eyeViews);

            float halfWidth = 0.5f * sWidth;

            for (int i = 0; i < numPasses; i++) {
                // In stereo, the 2 sides are layout side by side in the mono viewport and their width is half
                int sideWidth = monoViewport.z >> 1;
                viewports[i] = ivec4(monoViewport.x + (i * sideWidth), monoViewport.y, sideWidth, monoViewport.w);

                deferredTransforms[i].projection = projMats[i];

                auto sideViewMat = monoViewMat * glm::inverse(eyeViews[i]);
                viewTransforms[i].evalFromRawMatrix(sideViewMat);
                deferredTransforms[i].viewInverse = sideViewMat;

                deferredTransforms[i].stereoSide = (i == 0 ? -1.0f : 1.0f);

                clipQuad[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
                screenBottomLeftCorners[i] = glm::vec2(-1.0f + i * 1.0f, -1.0f);
                screenTopRightCorners[i] = glm::vec2(i * 1.0f, 1.0f);

                fetchTexcoordRects[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
            }
        } else {

            viewports[0] = monoViewport;
            projMats[0] = monoProjMat;

            deferredTransforms[0].projection = monoProjMat;
     
            deferredTransforms[0].viewInverse = monoViewMat;
            viewTransforms[0] = monoViewTransform;

            deferredTransforms[0].stereoSide = 0.0f;

            clipQuad[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
            screenBottomLeftCorners[0] = glm::vec2(-1.0f, -1.0f);
            screenTopRightCorners[0] = glm::vec2(1.0f, 1.0f);

            fetchTexcoordRects[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
        }

        auto eyePoint = viewFrustum->getPosition();
        float nearRadius = glm::distance(eyePoint, viewFrustum->getNearTopLeft());


        for (int side = 0; side < numPasses; side++) {
            // Render in this side's viewport
            batch.setViewportTransform(viewports[side]);
            batch.setStateScissorRect(viewports[side]);

            // Sync and Bind the correct DeferredTransform ubo
            _deferredTransformBuffer[side]._buffer->setSubData(0, sizeof(DeferredTransform), (const gpu::Byte*) &deferredTransforms[side]);
            batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, _deferredTransformBuffer[side]);

            glm::vec2 topLeft(-1.0f, -1.0f);
            glm::vec2 bottomRight(1.0f, 1.0f);
            glm::vec2 texCoordTopLeft(clipQuad[side].x, clipQuad[side].y);
            glm::vec2 texCoordBottomRight(clipQuad[side].x + clipQuad[side].z, clipQuad[side].y + clipQuad[side].w);

            // First Global directional light and ambient pass
            {
                auto& program = _shadowMapEnabled ? _directionalLightShadow : _directionalLight;
                LightLocationsPtr locations = _shadowMapEnabled ? _directionalLightShadowLocations : _directionalLightLocations;
                const auto& keyLight = _allocatedLights[_globalLights.front()];

                // Setup the global directional pass pipeline
                {
                    if (_shadowMapEnabled) {
                        if (keyLight->getAmbientMap()) {
                            program = _directionalSkyboxLightShadow;
                            locations = _directionalSkyboxLightShadowLocations;
                        } else {
                            program = _directionalAmbientSphereLightShadow;
                            locations = _directionalAmbientSphereLightShadowLocations;
                        }
                    } else {
                        if (keyLight->getAmbientMap()) {
                            program = _directionalSkyboxLight;
                            locations = _directionalSkyboxLightLocations;
                        } else {
                            program = _directionalAmbientSphereLight;
                            locations = _directionalAmbientSphereLightLocations;
                        }
                    }

                    if (locations->shadowTransformBuffer >= 0) {
                        batch.setUniformBuffer(locations->shadowTransformBuffer, globalShadow.getBuffer());
                    }
                    batch.setPipeline(program);
                }

                { // Setup the global lighting
                    setupKeyLightBatch(batch, locations->lightBufferUnit, SKYBOX_MAP_UNIT);
                }

                {
                    batch.setModelTransform(Transform());
                    batch.setProjectionTransform(glm::mat4());
                    batch.setViewTransform(Transform());

                    glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                   geometryCache->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                }

                if (keyLight->getAmbientMap()) {
                    batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
                }
            }

            auto texcoordMat = glm::mat4();
          /*  texcoordMat[0] = glm::vec4(sWidth / 2.0f, 0.0f, 0.0f, sMin + sWidth / 2.0f);
            texcoordMat[1] = glm::vec4(0.0f, tHeight / 2.0f, 0.0f, tMin + tHeight / 2.0f);
           */ texcoordMat[0] = glm::vec4(fetchTexcoordRects[side].z / 2.0f, 0.0f, 0.0f, fetchTexcoordRects[side].x + fetchTexcoordRects[side].z / 2.0f);
            texcoordMat[1] = glm::vec4(0.0f, fetchTexcoordRects[side].w / 2.0f, 0.0f, fetchTexcoordRects[side].y + fetchTexcoordRects[side].w / 2.0f);
            texcoordMat[2] = glm::vec4(0.0f, 0.0f, 1.0f, 0.0f);
            texcoordMat[3] = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f);

            // enlarge the scales slightly to account for tesselation
            const float SCALE_EXPANSION = 0.05f;


            batch.setProjectionTransform(projMats[side]);
            batch.setViewTransform(viewTransforms[side]);

            // Splat Point lights
            if (!_pointLights.empty()) {
                batch.setPipeline(_pointLight);

                batch._glUniformMatrix4fv(_pointLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));

                for (auto lightID : _pointLights) {
                    auto& light = _allocatedLights[lightID];
                    // IN DEBUG: light->setShowContour(true);
                    batch.setUniformBuffer(_pointLightLocations->lightBufferUnit, light->getSchemaBuffer());

                    float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
                    // TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
                    // we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
                    if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius) {
                        Transform model;
                        model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
                        batch.setModelTransform(model);
                        batch.setViewTransform(Transform());
                        batch.setProjectionTransform(glm::mat4());

                        glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                
                        batch.setProjectionTransform(projMats[side]);
                        batch.setViewTransform(viewTransforms[side]);
                    } else {
                        Transform model;
                        model.setTranslation(glm::vec3(light->getPosition().x, light->getPosition().y, light->getPosition().z));
                        batch.setModelTransform(model.postScale(expandedRadius));
                        batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
                        geometryCache->renderSphere(batch);
                    }
                }
            }
    
            // Splat spot lights
            if (!_spotLights.empty()) {
                batch.setPipeline(_spotLight);

                batch._glUniformMatrix4fv(_spotLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));

                for (auto lightID : _spotLights) {
                    auto light = _allocatedLights[lightID];
                    // IN DEBUG: light->setShowContour(true);
                    batch.setUniformBuffer(_spotLightLocations->lightBufferUnit, light->getSchemaBuffer());

                    auto eyeLightPos = eyePoint - light->getPosition();
                    auto eyeHalfPlaneDistance = glm::dot(eyeLightPos, light->getDirection());

                    const float TANGENT_LENGTH_SCALE = 0.666f;
                    glm::vec4 coneParam(light->getSpotAngleCosSin(), TANGENT_LENGTH_SCALE * tanf(0.5f * light->getSpotAngle()), 1.0f);

                    float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
                    // TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
                    // we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
                    if ((eyeHalfPlaneDistance > -nearRadius) &&
                        (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius)) {
                        coneParam.w = 0.0f;
                        batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));

                        Transform model;
                        model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
                        batch.setModelTransform(model);
                        batch.setViewTransform(Transform());
                        batch.setProjectionTransform(glm::mat4());
                
                        glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                
                        batch.setProjectionTransform( projMats[side]);
                        batch.setViewTransform(viewTransforms[side]);
                    } else {
                        coneParam.w = 1.0f;
                        batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));

                        Transform model;
                        model.setTranslation(light->getPosition());
                        model.postRotate(light->getOrientation());
                        model.postScale(glm::vec3(expandedRadius, expandedRadius, expandedRadius));

                        batch.setModelTransform(model);
                        auto mesh = getSpotLightMesh();

                        batch.setIndexBuffer(mesh->getIndexBuffer());
                        batch.setInputBuffer(0, mesh->getVertexBuffer());
                        batch.setInputFormat(mesh->getVertexFormat());

                        auto& part = mesh->getPartBuffer().get<model::Mesh::Part>();

                        batch.drawIndexed(model::Mesh::topologyToPrimitive(part._topology), part._numIndices, part._startIndex);
                    }
                }
            }
        }

        // Probably not necessary in the long run because the gpu layer would unbound this texture if used as render target
        batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, nullptr);
        batch.setResourceTexture(SHADOW_MAP_UNIT, nullptr);
        batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);

        batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, nullptr);
    });
Пример #6
0
void Antialiasing::run(const render::RenderContextPointer& renderContext, const gpu::FramebufferPointer& sourceBuffer) {
    assert(renderContext->args);
    assert(renderContext->args->hasViewFrustum());

    RenderArgs* args = renderContext->args;

    gpu::doInBatch("Antialiasing::run", args->_context, [&](gpu::Batch& batch) {
        batch.enableStereo(false);
        batch.setViewportTransform(args->_viewport);

        // FIXME: NEED to simplify that code to avoid all the GeometryCahce call, this is purely pixel manipulation
        float fbWidth = renderContext->args->_viewport.z;
        float fbHeight = renderContext->args->_viewport.w;
        // float sMin = args->_viewport.x / fbWidth;
        // float sWidth = args->_viewport.z / fbWidth;
        // float tMin = args->_viewport.y / fbHeight;
        // float tHeight = args->_viewport.w / fbHeight;

        glm::mat4 projMat;
        Transform viewMat;
        args->getViewFrustum().evalProjectionMatrix(projMat);
        args->getViewFrustum().evalViewTransform(viewMat);
        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat, true);
        batch.setModelTransform(Transform());

        // FXAA step
        auto pipeline = getAntialiasingPipeline(renderContext->args);
        batch.setResourceTexture(0, sourceBuffer->getRenderBuffer(0));
        batch.setFramebuffer(_antialiasingBuffer);
        batch.setPipeline(pipeline);

        // initialize the view-space unpacking uniforms using frustum data
        float left, right, bottom, top, nearVal, farVal;
        glm::vec4 nearClipPlane, farClipPlane;

        args->getViewFrustum().computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);

        // float depthScale = (farVal - nearVal) / farVal;
        // float nearScale = -1.0f / nearVal;
        // float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
        // float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
        // float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
        // float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;

        batch._glUniform2f(_texcoordOffsetLoc, 1.0f / fbWidth, 1.0f / fbHeight);

        glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
        glm::vec2 bottomLeft(-1.0f, -1.0f);
        glm::vec2 topRight(1.0f, 1.0f);
        glm::vec2 texCoordTopLeft(0.0f, 0.0f);
        glm::vec2 texCoordBottomRight(1.0f, 1.0f);
        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color, _geometryId);


        // Blend step
        batch.setResourceTexture(0, _antialiasingTexture);
        batch.setFramebuffer(sourceBuffer);
        batch.setPipeline(getBlendPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color, _geometryId);
    });
}
Пример #7
0
void ImageOverlay::render(RenderArgs* args) {
    if (!_isLoaded && _renderImage) {
        _isLoaded = true;
        _texture = DependencyManager::get<TextureCache>()->getTexture(_imageURL);
    }
    // If we are not visible or loaded, return.  If we are trying to render an
    // image but the texture hasn't loaded, return.
    if (!_visible || !_isLoaded || (_renderImage && !_texture->isLoaded())) {
        return;
    }

    auto geometryCache = DependencyManager::get<GeometryCache>();
    gpu::Batch& batch = *args->_batch;
    geometryCache->useSimpleDrawPipeline(batch);
    if (_renderImage) {
        batch.setResourceTexture(0, _texture->getGPUTexture());
    } else {
        batch.setResourceTexture(0, args->_whiteTexture);
    }

    const float MAX_COLOR = 255.0f;
    xColor color = getColor();
    float alpha = getAlpha();
    glm::vec4 quadColor(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha);

    int left = _bounds.left();
    int right = _bounds.right() + 1;
    int top = _bounds.top();
    int bottom = _bounds.bottom() + 1;

    glm::vec2 topLeft(left, top);
    glm::vec2 bottomRight(right, bottom);

    batch.setModelTransform(Transform());

    // if for some reason our image is not over 0 width or height, don't attempt to render the image
    if (_renderImage) {
        float imageWidth = _texture->getWidth();
        float imageHeight = _texture->getHeight();
        if (imageWidth > 0 && imageHeight > 0) {
            QRect fromImage;
            if (_wantClipFromImage) {
                float scaleX = imageWidth / _texture->getOriginalWidth();
                float scaleY = imageHeight / _texture->getOriginalHeight();

                fromImage.setX(scaleX * _fromImage.x());
                fromImage.setY(scaleY * _fromImage.y());
                fromImage.setWidth(scaleX * _fromImage.width());
                fromImage.setHeight(scaleY * _fromImage.height());
            }
            else {
                fromImage.setX(0);
                fromImage.setY(0);
                fromImage.setWidth(imageWidth);
                fromImage.setHeight(imageHeight);
            }

            float x = fromImage.x() / imageWidth;
            float y = fromImage.y() / imageHeight;
            float w = fromImage.width() / imageWidth; // ?? is this what we want? not sure
            float h = fromImage.height() / imageHeight;

            glm::vec2 texCoordTopLeft(x, y);
            glm::vec2 texCoordBottomRight(x + w, y + h);
            glm::vec4 texcoordRect(texCoordTopLeft, w, h);

            DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, quadColor);
        } else {
            DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, quadColor);
        }
    } else {
        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, quadColor);
    }
}
Пример #8
0
void AmbientOcclusion::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
    assert(renderContext->getArgs());
    assert(renderContext->getArgs()->_viewFrustum);

    RenderArgs* args = renderContext->getArgs();
    gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        QSize framebufferSize = framebufferCache->getFrameBufferSize();
        float fbWidth = framebufferSize.width();
        float fbHeight = framebufferSize.height();
        float sMin = args->_viewport.x / fbWidth;
        float sWidth = args->_viewport.z / fbWidth;
        float tMin = args->_viewport.y / fbHeight;
        float tHeight = args->_viewport.w / fbHeight;


        glm::mat4 projMat;
        Transform viewMat;
        args->_viewFrustum->evalProjectionMatrix(projMat);
        args->_viewFrustum->evalViewTransform(viewMat);
        batch.setProjectionTransform(projMat);
        batch.setViewTransform(viewMat);
        batch.setModelTransform(Transform());

        // Occlusion step
        getOcclusionPipeline();
        batch.setResourceTexture(0, framebufferCache->getPrimaryDepthTexture());
        batch.setResourceTexture(1, framebufferCache->getDeferredNormalTexture());
        _occlusionBuffer->setRenderBuffer(0, _occlusionTexture);
        batch.setFramebuffer(_occlusionBuffer);

        // Occlusion uniforms
        g_scale = 1.0f;
        g_bias = 1.0f;
        g_sample_rad = 1.0f;
        g_intensity = 1.0f;

        // Bind the first gpu::Pipeline we need - for calculating occlusion buffer
        batch.setPipeline(getOcclusionPipeline());
        batch._glUniform1f(_gScaleLoc, g_scale);
        batch._glUniform1f(_gBiasLoc, g_bias);
        batch._glUniform1f(_gSampleRadiusLoc, g_sample_rad);
        batch._glUniform1f(_gIntensityLoc, g_intensity);

        // setup uniforms for unpacking a view-space position from the depth buffer
        // This is code taken from DeferredLightEffect.render() method in DeferredLightingEffect.cpp.
        // DeferredBuffer.slh shows how the unpacking is done and what variables are needed.

        // initialize the view-space unpacking uniforms using frustum data
        float left, right, bottom, top, nearVal, farVal;
        glm::vec4 nearClipPlane, farClipPlane;

        args->_viewFrustum->computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);

        float depthScale = (farVal - nearVal) / farVal;
        float nearScale = -1.0f / nearVal;
        float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
        float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
        float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
        float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;

        // now set the position-unpacking unforms
        batch._glUniform1f(_nearLoc, nearVal);
        batch._glUniform1f(_depthScaleLoc, depthScale);
        batch._glUniform2f(_depthTexCoordOffsetLoc, depthTexCoordOffsetS, depthTexCoordOffsetT);
        batch._glUniform2f(_depthTexCoordScaleLoc, depthTexCoordScaleS, depthTexCoordScaleT);

        batch._glUniform2f(_renderTargetResLoc, fbWidth, fbHeight);
        batch._glUniform2f(_renderTargetResInvLoc, 1.0f / fbWidth, 1.0f / fbHeight);

        glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
        glm::vec2 bottomLeft(-1.0f, -1.0f);
        glm::vec2 topRight(1.0f, 1.0f);
        glm::vec2 texCoordTopLeft(0.0f, 0.0f);
        glm::vec2 texCoordBottomRight(1.0f, 1.0f);
        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

        // Vertical blur step
        getVBlurPipeline();
        batch.setResourceTexture(0, _occlusionTexture);
        _vBlurBuffer->setRenderBuffer(0, _vBlurTexture);
        batch.setFramebuffer(_vBlurBuffer);

        // Bind the second gpu::Pipeline we need - for calculating blur buffer
        batch.setPipeline(getVBlurPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

        // Horizontal blur step
        getHBlurPipeline();
        batch.setResourceTexture(0, _vBlurTexture);
        _hBlurBuffer->setRenderBuffer(0, _hBlurTexture);
        batch.setFramebuffer(_hBlurBuffer);

        // Bind the third gpu::Pipeline we need - for calculating blur buffer
        batch.setPipeline(getHBlurPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

        // Blend step
        getBlendPipeline();
        batch.setResourceTexture(0, _hBlurTexture);
        batch.setFramebuffer(framebufferCache->getDeferredFramebuffer());

        // Bind the fourth gpu::Pipeline we need - for blending the primary color buffer with blurred occlusion texture
        batch.setPipeline(getBlendPipeline());

        DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
    });
}
Пример #9
0
void Image3DOverlay::render(RenderArgs* args) {
    if (!_renderVisible || !getParentVisible() || !_texture || !_texture->isLoaded()) {
        return;
    }

    // Once the texture has loaded, check if we need to update the render item because of transparency
    if (!_textureIsLoaded && _texture && _texture->getGPUTexture()) {
        _textureIsLoaded = true;
        bool prevAlphaTexture = _alphaTexture;
        _alphaTexture = _texture->getGPUTexture()->getUsage().isAlpha();
        if (_alphaTexture != prevAlphaTexture) {
            auto itemID = getRenderItemID();
            if (render::Item::isValidID(itemID)) {
                render::ScenePointer scene = AbstractViewStateInterface::instance()->getMain3DScene();
                render::Transaction transaction;
                transaction.updateItem(itemID);
                scene->enqueueTransaction(transaction);
            }
        }
    }

    Q_ASSERT(args->_batch);
    gpu::Batch* batch = args->_batch;

    float imageWidth = _texture->getWidth();
    float imageHeight = _texture->getHeight();

    QRect fromImage;
    if (_fromImage.isNull()) {
        fromImage.setX(0);
        fromImage.setY(0);
        fromImage.setWidth(imageWidth);
        fromImage.setHeight(imageHeight);
    } else {
        float scaleX = imageWidth / _texture->getOriginalWidth();
        float scaleY = imageHeight / _texture->getOriginalHeight();

        fromImage.setX(scaleX * _fromImage.x());
        fromImage.setY(scaleY * _fromImage.y());
        fromImage.setWidth(scaleX * _fromImage.width());
        fromImage.setHeight(scaleY * _fromImage.height());
    }

    float maxSize = glm::max(fromImage.width(), fromImage.height());
    float x = _keepAspectRatio ? fromImage.width() / (2.0f * maxSize) : 0.5f;
    float y = _keepAspectRatio ? -fromImage.height() / (2.0f * maxSize) : -0.5f;

    glm::vec2 topLeft(-x, -y);
    glm::vec2 bottomRight(x, y);
    glm::vec2 texCoordTopLeft((fromImage.x() + 0.5f) / imageWidth, (fromImage.y() + 0.5f) / imageHeight);
    glm::vec2 texCoordBottomRight((fromImage.x() + fromImage.width() - 0.5f) / imageWidth,
                                  (fromImage.y() + fromImage.height() - 0.5f) / imageHeight);

    float alpha = getAlpha();
    glm::u8vec3 color = getColor();
    glm::vec4 imageColor(toGlm(color), alpha);

    batch->setModelTransform(getRenderTransform());
    batch->setResourceTexture(0, _texture->getGPUTexture());

    DependencyManager::get<GeometryCache>()->renderQuad(
        *batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
        imageColor, _geometryId
    );

    batch->setResourceTexture(0, nullptr); // restore default white color after me
}
Пример #10
0
void ApplicationCompositor::renderControllerPointers(gpu::Batch& batch) {
    MyAvatar* myAvatar = DependencyManager::get<AvatarManager>()->getMyAvatar();

    //Static variables used for storing controller state
    static quint64 pressedTime[NUMBER_OF_RETICLES] = { 0ULL, 0ULL, 0ULL };
    static bool isPressed[NUMBER_OF_RETICLES] = { false, false, false };
    static bool stateWhenPressed[NUMBER_OF_RETICLES] = { false, false, false };

    const HandData* handData = DependencyManager::get<AvatarManager>()->getMyAvatar()->getHandData();

    for (unsigned int palmIndex = 2; palmIndex < 4; palmIndex++) {
        const int index = palmIndex - 1;

        const PalmData* palmData = NULL;

        if (palmIndex >= handData->getPalms().size()) {
            return;
        }

        if (handData->getPalms()[palmIndex].isActive()) {
            palmData = &handData->getPalms()[palmIndex];
        } else {
            continue;
        }

        int controllerButtons = palmData->getControllerButtons();

        //Check for if we should toggle or drag the magnification window
        if (controllerButtons & BUTTON_3) {
            if (isPressed[index] == false) {
                //We are now dragging the window
                isPressed[index] = true;
                //set the pressed time in us
                pressedTime[index] = usecTimestampNow();
                stateWhenPressed[index] = _magActive[index];
            }
        } else if (isPressed[index]) {
            isPressed[index] = false;
            //If the button was only pressed for < 250 ms
            //then disable it.

            const int MAX_BUTTON_PRESS_TIME = 250 * MSECS_TO_USECS;
            if (usecTimestampNow() < pressedTime[index] + MAX_BUTTON_PRESS_TIME) {
                _magActive[index] = !stateWhenPressed[index];
            }
        }

        //if we have the oculus, we should make the cursor smaller since it will be
        //magnified
        if (qApp->isHMDMode()) {

            QPoint point = getPalmClickLocation(palmData);

            _reticlePosition[index] = point;

            //When button 2 is pressed we drag the mag window
            if (isPressed[index]) {
                _magActive[index] = true;
            }

            // If oculus is enabled, we draw the crosshairs later
            continue;
        }

        auto canvasSize = qApp->getCanvasSize();
        int mouseX, mouseY;
        if (Menu::getInstance()->isOptionChecked(MenuOption::SixenseLasers)) {
            QPoint res = getPalmClickLocation(palmData);
            mouseX = res.x();
            mouseY = res.y();
        } else {
            // Get directon relative to avatar orientation
            glm::vec3 direction = glm::inverse(myAvatar->getOrientation()) * palmData->getFingerDirection();

            // Get the angles, scaled between (-0.5,0.5)
            float xAngle = (atan2(direction.z, direction.x) + M_PI_2);
            float yAngle = 0.5f - ((atan2(direction.z, direction.y) + M_PI_2));

            // Get the pixel range over which the xAngle and yAngle are scaled
            float cursorRange = canvasSize.x * SixenseManager::getInstance().getCursorPixelRangeMult();

            mouseX = (canvasSize.x / 2.0f + cursorRange * xAngle);
            mouseY = (canvasSize.y / 2.0f + cursorRange * yAngle);
        }

        //If the cursor is out of the screen then don't render it
        if (mouseX < 0 || mouseX >= (int)canvasSize.x || mouseY < 0 || mouseY >= (int)canvasSize.y) {
            _reticleActive[index] = false;
            continue;
        }
        _reticleActive[index] = true;


        const float reticleSize = 40.0f;

        mouseX -= reticleSize / 2.0f;
        mouseY += reticleSize / 2.0f;


        glm::vec2 topLeft(mouseX, mouseY);
        glm::vec2 bottomRight(mouseX + reticleSize, mouseY - reticleSize);
        glm::vec2 texCoordTopLeft(0.0f, 0.0f);
        glm::vec2 texCoordBottomRight(1.0f, 1.0f);

        DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
                                                            glm::vec4(RETICLE_COLOR[0], RETICLE_COLOR[1], RETICLE_COLOR[2], 1.0f));
        
    }
}
Пример #11
0
void BillboardOverlay::render(RenderArgs* args) {
    if (!_isLoaded) {
        _isLoaded = true;
        _texture = DependencyManager::get<TextureCache>()->getTexture(_url);
    }

    if (!_visible || !_texture->isLoaded()) {
        return;
    }

    glm::quat rotation;
    if (_isFacingAvatar) {
        // rotate about vertical to face the camera
        rotation = args->_viewFrustum->getOrientation();
        rotation *= glm::angleAxis(glm::pi<float>(), IDENTITY_UP);
        rotation *= getRotation();
    } else {
        rotation = getRotation();
    }

    float imageWidth = _texture->getWidth();
    float imageHeight = _texture->getHeight();

    QRect fromImage;
    if (_fromImage.isNull()) {
        fromImage.setX(0);
        fromImage.setY(0);
        fromImage.setWidth(imageWidth);
        fromImage.setHeight(imageHeight);
    } else {
        float scaleX = imageWidth / _texture->getOriginalWidth();
        float scaleY = imageHeight / _texture->getOriginalHeight();

        fromImage.setX(scaleX * _fromImage.x());
        fromImage.setY(scaleY * _fromImage.y());
        fromImage.setWidth(scaleX * _fromImage.width());
        fromImage.setHeight(scaleY * _fromImage.height());
    }

    float maxSize = glm::max(fromImage.width(), fromImage.height());
    float x = fromImage.width() / (2.0f * maxSize);
    float y = -fromImage.height() / (2.0f * maxSize);

    glm::vec2 topLeft(-x, -y);
    glm::vec2 bottomRight(x, y);
    glm::vec2 texCoordTopLeft(fromImage.x() / imageWidth, fromImage.y() / imageHeight);
    glm::vec2 texCoordBottomRight((fromImage.x() + fromImage.width()) / imageWidth,
                                  (fromImage.y() + fromImage.height()) / imageHeight);

    const float MAX_COLOR = 255.0f;
    xColor color = getColor();
    float alpha = getAlpha();

    auto batch = args->_batch;

    if (batch) {
        Transform transform = _transform;
        transform.postScale(glm::vec3(getDimensions(), 1.0f));
        
        batch->setModelTransform(transform);
        batch->setUniformTexture(0, _texture->getGPUTexture());
        
        DependencyManager::get<GeometryCache>()->renderQuad(*batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
                                                            glm::vec4(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha));
    
        batch->setUniformTexture(0, args->_whiteTexture); // restore default white color after me
    } else {
        glEnable(GL_ALPHA_TEST);
        glAlphaFunc(GL_GREATER, 0.5f);

        glEnable(GL_TEXTURE_2D);
        glDisable(GL_LIGHTING);

        glBindTexture(GL_TEXTURE_2D, _texture->getID());

        glPushMatrix(); {
            glTranslatef(getPosition().x, getPosition().y, getPosition().z);
            glm::vec3 axis = glm::axis(rotation);
            glRotatef(glm::degrees(glm::angle(rotation)), axis.x, axis.y, axis.z);
            glScalef(_dimensions.x, _dimensions.y, 1.0f);

            DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight,
                                                                glm::vec4(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha));

        } glPopMatrix();

        glDisable(GL_TEXTURE_2D);
        glEnable(GL_LIGHTING);
        glDisable(GL_ALPHA_TEST);

        glBindTexture(GL_TEXTURE_2D, 0);
    }
}
Пример #12
0
void AmbientOcclusion::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
    assert(renderContext->args);
    assert(renderContext->args->_viewFrustum);
    RenderArgs* args = renderContext->args;

    gpu::Batch batch;

    glm::mat4 projMat;
    Transform viewMat;
    args->_viewFrustum->evalProjectionMatrix(projMat);
    args->_viewFrustum->evalViewTransform(viewMat);
    batch.setProjectionTransform(projMat);
    batch.setViewTransform(viewMat);
    batch.setModelTransform(Transform());

    // Occlusion step
    getOcclusionPipeline();
    batch.setResourceTexture(0, DependencyManager::get<FramebufferCache>()->getPrimaryDepthTexture());
    batch.setResourceTexture(1, DependencyManager::get<FramebufferCache>()->getPrimaryNormalTexture());
    _occlusionBuffer->setRenderBuffer(0, _occlusionTexture);
    batch.setFramebuffer(_occlusionBuffer);

    // Occlusion uniforms
    g_scale = 1.0f;
    g_bias = 1.0f;
    g_sample_rad = 1.0f;
    g_intensity = 1.0f;

    // Bind the first gpu::Pipeline we need - for calculating occlusion buffer
    batch.setPipeline(getOcclusionPipeline());
    batch._glUniform1f(_gScaleLoc, g_scale);
    batch._glUniform1f(_gBiasLoc, g_bias);
    batch._glUniform1f(_gSampleRadiusLoc, g_sample_rad);
    batch._glUniform1f(_gIntensityLoc, g_intensity);
    batch._glUniform1f(_bufferWidthLoc, DependencyManager::get<FramebufferCache>()->getFrameBufferSize().width());
    batch._glUniform1f(_bufferHeightLoc, DependencyManager::get<FramebufferCache>()->getFrameBufferSize().height());

    glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
    glm::vec2 bottomLeft(-1.0f, -1.0f);
    glm::vec2 topRight(1.0f, 1.0f);
    glm::vec2 texCoordTopLeft(0.0f, 0.0f);
    glm::vec2 texCoordBottomRight(1.0f, 1.0f);
    DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

    // Vertical blur step
    getVBlurPipeline();
    batch.setResourceTexture(0, _occlusionTexture);
    _vBlurBuffer->setRenderBuffer(0, _vBlurTexture);
    batch.setFramebuffer(_vBlurBuffer);

    // Bind the second gpu::Pipeline we need - for calculating blur buffer
    batch.setPipeline(getVBlurPipeline());

    DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

    // Horizontal blur step
    getHBlurPipeline();
    batch.setResourceTexture(0, _vBlurTexture);
    _hBlurBuffer->setRenderBuffer(0, _hBlurTexture);
    batch.setFramebuffer(_hBlurBuffer);

    // Bind the third gpu::Pipeline we need - for calculating blur buffer
    batch.setPipeline(getHBlurPipeline());

    DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);

    // Blend step
    getBlendPipeline();
    batch.setResourceTexture(0, _hBlurTexture);
    batch.setFramebuffer(DependencyManager::get<FramebufferCache>()->getPrimaryFramebuffer());

    // Bind the fourth gpu::Pipeline we need - for blending the primary color buffer with blurred occlusion texture
    batch.setPipeline(getBlendPipeline());

    DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
    
    // Ready to render
    args->_context->render((batch));
}
Пример #13
0
void BillboardOverlay::render(RenderArgs* args) {
    if (!_visible || !_isLoaded) {
        return;
    }
    
    if (!_billboard.isEmpty()) {
        if (_newTextureNeeded && _billboardTexture) {
            _billboardTexture.reset();
        }
        if (!_billboardTexture) {
            QImage image = QImage::fromData(_billboard);
            if (image.format() != QImage::Format_ARGB32) {
                image = image.convertToFormat(QImage::Format_ARGB32);
            }
            _size = image.size();
            if (_fromImage.x() == -1) {
                _fromImage.setRect(0, 0, _size.width(), _size.height());
            }
            _billboardTexture.reset(new Texture());
            _newTextureNeeded = false;
            glBindTexture(GL_TEXTURE_2D, _billboardTexture->getID());
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _size.width(), _size.height(), 0,
                         GL_BGRA, GL_UNSIGNED_BYTE, image.constBits());
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
            
        } else {
            glBindTexture(GL_TEXTURE_2D, _billboardTexture->getID());
        }
    }
    
    glEnable(GL_ALPHA_TEST);
    glAlphaFunc(GL_GREATER, 0.5f);
    
    glEnable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);
    
    glPushMatrix(); {
        glTranslatef(_position.x, _position.y, _position.z);
        glm::quat rotation;
        if (_isFacingAvatar) {
            // rotate about vertical to face the camera
            rotation = Application::getInstance()->getCamera()->getRotation();
            rotation *= glm::angleAxis(glm::pi<float>(), glm::vec3(0.0f, 1.0f, 0.0f));
        } else {
            rotation = getRotation();
        }
        glm::vec3 axis = glm::axis(rotation);
        glRotatef(glm::degrees(glm::angle(rotation)), axis.x, axis.y, axis.z);
        glScalef(_scale, _scale, _scale);
        
        if (_billboardTexture) {
            float maxSize = glm::max(_fromImage.width(), _fromImage.height());
            float x = _fromImage.width() / (2.0f * maxSize);
            float y = -_fromImage.height() / (2.0f * maxSize);
            
            const float MAX_COLOR = 255.0f;
            xColor color = getColor();
            float alpha = getAlpha();
            glColor4f(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha);
            
            glm::vec2 topLeft(-x, -y);
            glm::vec2 bottomRight(x, y);
            glm::vec2 texCoordTopLeft((float)_fromImage.x() / (float)_size.width(), 
                                      (float)_fromImage.y() / (float)_size.height());
            glm::vec2 texCoordBottomRight(((float)_fromImage.x() + (float)_fromImage.width()) / (float)_size.width(),
                                          ((float)_fromImage.y() + (float)_fromImage.height()) / _size.height());

            DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight);
            
        }
    } glPopMatrix();
    
    glDisable(GL_TEXTURE_2D);
    glEnable(GL_LIGHTING);
    glDisable(GL_ALPHA_TEST);
    
    glBindTexture(GL_TEXTURE_2D, 0);
}
Пример #14
0
void ImageOverlay::render(RenderArgs* args) {
    if (!_isLoaded && _renderImage) {
        _isLoaded = true;
        _texture = DependencyManager::get<TextureCache>()->getTexture(_imageURL);
    }

    // If we are not visible or loaded, return.  If we are trying to render an
    // image but the texture hasn't loaded, return.
    if (!_visible || !_isLoaded || (_renderImage && !_texture->isLoaded())) {
        return;
    }

    if (_renderImage) {
        glEnable(GL_TEXTURE_2D);
        glBindTexture(GL_TEXTURE_2D, _texture->getID());
    }

    const float MAX_COLOR = 255.0f;
    xColor color = getColor();
    float alpha = getAlpha();
    glm::vec4 quadColor(color.red / MAX_COLOR, color.green / MAX_COLOR, color.blue / MAX_COLOR, alpha);

    int left = _bounds.left();
    int right = _bounds.right() + 1;
    int top = _bounds.top();
    int bottom = _bounds.bottom() + 1;

    glm::vec2 topLeft(left, top);
    glm::vec2 bottomRight(right, bottom);

    // if for some reason our image is not over 0 width or height, don't attempt to render the image
    if (_renderImage) {
        float imageWidth = _texture->getWidth();
        float imageHeight = _texture->getHeight();
        if (imageWidth > 0 && imageHeight > 0) {
            QRect fromImage;
            if (_wantClipFromImage) {
                float scaleX = imageWidth / _texture->getOriginalWidth();
                float scaleY = imageHeight / _texture->getOriginalHeight();

                fromImage.setX(scaleX * _fromImage.x());
                fromImage.setY(scaleY * _fromImage.y());
                fromImage.setWidth(scaleX * _fromImage.width());
                fromImage.setHeight(scaleY * _fromImage.height());
            }
            else {
                fromImage.setX(0);
                fromImage.setY(0);
                fromImage.setWidth(imageWidth);
                fromImage.setHeight(imageHeight);
            }

            float x = fromImage.x() / imageWidth;
            float y = fromImage.y() / imageHeight;
            float w = fromImage.width() / imageWidth; // ?? is this what we want? not sure
            float h = fromImage.height() / imageHeight;

            glm::vec2 texCoordTopLeft(x, y);
            glm::vec2 texCoordBottomRight(x + w, y + h);

            DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, quadColor);
        } else {
            DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, quadColor);
        }
        glDisable(GL_TEXTURE_2D);
    } else {
        DependencyManager::get<GeometryCache>()->renderQuad(topLeft, bottomRight, quadColor);
    }
}