Exemplo n.º 1
0
void StaticMesh::draw( void )
{
	UINT stride = mpVertexBuffer->getVertexStride();
	UINT offset = 0;
	horizon::gfx::getDeviceContext()->IASetVertexBuffers( 0, 1, mpVertexBuffer->getPtr(), &stride, &offset );

	// Set index buffer
	size_t typeSize = mpIndexBuffer->getIndexTypeSize();
	DXGI_FORMAT indexFormat = DXGI_FORMAT_UNKNOWN;
	switch( typeSize ) {
		case 1:
			indexFormat = DXGI_FORMAT_R8_UINT;
			break;
		case 2:
			indexFormat = DXGI_FORMAT_R16_UINT;
			break;
		case 4:
			indexFormat = DXGI_FORMAT_R32_UINT;
	}
	HORIZON_ASSERT( indexFormat != DXGI_FORMAT_UNKNOWN, "unknown index buffer format!" );
	horizon::gfx::getDeviceContext()->IASetIndexBuffer( mpIndexBuffer->get(), indexFormat, 0 );

	// Set primitive topology
	horizon::gfx::getDeviceContext()->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST );

	uint32_t indicesNum = getIndexBuffer()->getIndicesNum();
	horizon::gfx::getDeviceContext()->DrawIndexed( indicesNum, 0, 0 );
}
Exemplo n.º 2
0
Arquivo: Mesh.cpp Projeto: luk2010/GRE
const HardwareIndexBuffer Mesh::getIndexBuffer() const
{
    auto ptr = lock();
    if ( ptr )
        return ptr->getIndexBuffer();
    return HardwareIndexBuffer::Null;
}
Exemplo n.º 3
0
void GLGraphicsContext::drawPrimitives(PrimitiveType primitiveType, std::size_t startIndex, std::size_t primitiveCount)
{
	if (!getVertexBuffer())
	{
		return;
	}
	
	GLenum mode = GLMapping::getPrimitiveType(primitiveType);
	
	GLsizei count = 0;
	switch (primitiveType)
	{
		case PrimitiveType::TRIANGLE_LIST:
			count = static_cast<GLsizei>(primitiveCount) * 3;
			break;
		
		case PrimitiveType::TRIANGLE_STRIP:
			count = static_cast<GLsizei>(primitiveCount) + 2;
			break;
		
		case PrimitiveType::LINE_LIST:
			count = static_cast<GLsizei>(primitiveCount) * 2;
			break;
		
		case PrimitiveType::LINE_STRIP:
			count = static_cast<GLsizei>(primitiveCount) + 1;
			break;
	}
	
	if (getIndexBuffer())
	{
		GLenum type = GLMapping::getIndexFormat(getIndexBuffer()->getIndexFormat());
		std::size_t typeSize = (getIndexBuffer()->getIndexFormat() == IndexFormat::UINT16) ? sizeof(std::uint16_t) : sizeof(std::uint32_t);
				
		glDrawElements(mode, count, type, (const GLvoid*)(startIndex * typeSize));
	}
	else
	{
		glDrawArrays(mode, static_cast<GLint>(startIndex), count);
	}
}
Exemplo n.º 4
0
void VertexArrayNoVao::enableOpenGLVertexAttribArrays()
{
#ifndef OPENGLRENDERER_NO_STATE_CLEANUP
    // Backup the currently bound OpenGL array buffer
    // -> Using "GL_EXT_direct_state_access" this would not help in here because "glVertexAttribPointerARB" is not specified there :/
    GLint openGLArrayBufferBackup = 0;
    glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &openGLArrayBufferBackup);
#endif

    // Loop through all attributes
    // -> We're using "glBindAttribLocationARB()" when linking the program so we have known attribute locations (the vertex array can't know about the program)
    GLuint attributeLocation = 0;
    const Renderer::VertexAttribute *attributeEnd = mAttributes + mNumberOfAttributes;
    for (const Renderer::VertexAttribute *attribute = mAttributes; attribute < attributeEnd; ++attribute, ++attributeLocation)
    {
        // Set the OpenGL vertex attribute pointer
        // TODO(co) Add security check: Is the given resource one of the currently used renderer?
        const Renderer::VertexArrayVertexBuffer& vertexArrayVertexBuffer = mVertexBuffers[attribute->inputSlot];
        glBindBufferARB(GL_ARRAY_BUFFER_ARB, static_cast<VertexBuffer*>(vertexArrayVertexBuffer.vertexBuffer)->getOpenGLArrayBuffer());
        glVertexAttribPointerARB(attributeLocation,
                                 Mapping::getOpenGLSize(attribute->vertexAttributeFormat),
                                 Mapping::getOpenGLType(attribute->vertexAttributeFormat),
                                 static_cast<GLboolean>(Mapping::isOpenGLVertexAttributeFormatNormalized(attribute->vertexAttributeFormat)),
                                 static_cast<GLsizei>(vertexArrayVertexBuffer.strideInBytes),
                                 reinterpret_cast<GLvoid*>(attribute->alignedByteOffset));

        // Per-instance instead of per-vertex requires "GL_ARB_instanced_arrays"
        if (attribute->instancesPerElement > 0 && mIsGL_ARB_instanced_arrays)
        {
            glVertexAttribDivisorARB(attributeLocation, attribute->instancesPerElement);
        }

        // Enable OpenGL vertex attribute array
        glEnableVertexAttribArrayARB(attributeLocation);
    }

#ifndef OPENGLRENDERER_NO_STATE_CLEANUP
    // Be polite and restore the previous bound OpenGL array buffer
    glBindBufferARB(GL_ARRAY_BUFFER_ARB, static_cast<GLuint>(openGLArrayBufferBackup));
#endif

    // Get the used index buffer
    // -> In case of no index buffer we don't bind buffer 0, there's not really a point in it
    const IndexBuffer *indexBuffer = getIndexBuffer();
    if (nullptr != indexBuffer)
    {
        // Bind OpenGL element array buffer
        glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, indexBuffer->getOpenGLElementArrayBuffer());
    }
}
Exemplo n.º 5
0
		// Export the mesh to a Waveform OBJ file
		void writeWaveformOBJ(const std::string& filename, const std::string& objectName = "quickhull")
		{
			std::ofstream objFile;
			objFile.open (filename);
			objFile << "o " << objectName << "\n";
			for (const auto& v : getVertexBuffer()) {
				objFile << "v " << v.x << " " << v.y << " " << v.z << "\n";
			}
			const auto& indBuf = getIndexBuffer();
			size_t triangleCount = indBuf.size()/3;
			for (size_t i=0;i<triangleCount;i++) {
				objFile << "f " << indBuf[i*3]+1 << " " << indBuf[i*3+1]+1 << " " << indBuf[i*3+2]+1 << "\n";
			}
			objFile.close();
		}
Exemplo n.º 6
0
void Effect3DOutline::drawWithSprite(EffectSprite3D* sprite, const Mat4 &transform)
{
    auto mesh = sprite->getMesh();
    long offset = 0;
    for (auto i = 0; i < mesh->getMeshVertexAttribCount(); i++)
    {
        auto meshvertexattrib = mesh->getMeshVertexAttribute(i);
        
        _glProgramState->setVertexAttribPointer(s_attributeNames[meshvertexattrib.vertexAttrib],
                                                meshvertexattrib.size,
                                                meshvertexattrib.type,
                                                GL_FALSE,
                                                mesh->getVertexSizeInBytes(),
                                                (void*)offset);
        offset += meshvertexattrib.attribSizeBytes;
    }
    //draw
    {
        glEnable(GL_CULL_FACE);
        glCullFace(GL_FRONT);
        glEnable(GL_DEPTH_TEST);
        Color4F color(sprite->getDisplayedColor());
        color.a = sprite->getDisplayedOpacity() / 255.0f;
        
        _glProgramState->setUniformVec4("u_color", Vec4(color.r, color.g, color.b, color.a));
        
        auto mesh = sprite->getMesh();
        glBindBuffer(GL_ARRAY_BUFFER, mesh->getVertexBuffer());
        _glProgramState->apply(transform);
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->getIndexBuffer());
        glDrawElements((GLenum)mesh->getPrimitiveType(), (GLsizei)mesh->getIndexCount(), (GLenum)mesh->getIndexFormat(), 0);
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
        glBindBuffer(GL_ARRAY_BUFFER, 0);
        glDisable(GL_DEPTH_TEST);
        glCullFace(GL_BACK);
        glDisable(GL_CULL_FACE);
        CC_INCREMENT_GL_DRAWN_BATCHES_AND_VERTICES(1, mesh->getIndexCount());
    }
}
Exemplo n.º 7
0
void Effect3DOutline::draw(const Mat4 &transform)
{
    //draw
    Color4F color(_sprite->getDisplayedColor());
    color.a = _sprite->getDisplayedOpacity() / 255.0f;
    _glProgramState->setUniformVec4("u_color", Vec4(color.r, color.g, color.b, color.a));
    if(_sprite && _sprite->getMesh())
    {
        glEnable(GL_CULL_FACE);
        glCullFace(GL_FRONT);
        glEnable(GL_DEPTH_TEST);
        
        auto mesh = _sprite->getMesh();
        glBindBuffer(GL_ARRAY_BUFFER, mesh->getVertexBuffer());
        
        auto skin = _sprite->getMesh()->getSkin();
        if(_sprite && skin)
        {
            auto function = std::bind(MatrixPalleteCallBack, std::placeholders::_1, std::placeholders::_2,
                                      skin->getMatrixPaletteSize(), (float*)skin->getMatrixPalette());
            _glProgramState->setUniformCallback("u_matrixPalette", function);
        }
        
        if(_sprite)
            _glProgramState->apply(transform);
 
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh->getIndexBuffer());
        glDrawElements(mesh->getPrimitiveType(), (GLsizei)mesh->getIndexCount(), mesh->getIndexFormat(), 0);
        CC_INCREMENT_GL_DRAWN_BATCHES_AND_VERTICES(1, mesh->getIndexCount());
        
        glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
        glBindBuffer(GL_ARRAY_BUFFER, 0);
        glDisable(GL_DEPTH_TEST);
        glCullFace(GL_BACK);
        glDisable(GL_CULL_FACE);
    }
}
Exemplo n.º 8
0
void ViveControllerManager::renderHand(UserInputMapper::PoseValue pose, gpu::Batch& batch, int index) {
    auto userInputMapper = DependencyManager::get<UserInputMapper>();
    Transform transform(userInputMapper->getSensorToWorldMat());
    transform.postTranslate(pose.getTranslation() + pose.getRotation() * glm::vec3(0, 0, CONTROLLER_LENGTH_OFFSET));

    int sign = index == LEFT_HAND ? 1.0f : -1.0f;
    glm::quat rotation = pose.getRotation() * glm::angleAxis(PI, glm::vec3(1.0f, 0.0f, 0.0f)) * glm::angleAxis(sign * PI_OVER_TWO, glm::vec3(0.0f, 0.0f, 1.0f));
    transform.postRotate(rotation);

    batch.setModelTransform(transform);

    auto mesh = _modelGeometry.getMesh();
    batch.setInputBuffer(gpu::Stream::POSITION, mesh->getVertexBuffer());
    batch.setInputBuffer(gpu::Stream::NORMAL,
        mesh->getVertexBuffer()._buffer,
        sizeof(float) * 3,
        mesh->getVertexBuffer()._stride);
    //batch.setInputBuffer(gpu::Stream::TEXCOORD,
    //    mesh->getVertexBuffer()._buffer,
    //    2 * 3 * sizeof(float),
    //    mesh->getVertexBuffer()._stride);
    batch.setIndexBuffer(gpu::UINT16, mesh->getIndexBuffer()._buffer, 0);
    batch.drawIndexed(gpu::TRIANGLES, mesh->getNumIndices(), 0);
}
Exemplo n.º 9
0
void DeferredLightingEffect::render(const render::RenderContextPointer& renderContext) {
    auto args = renderContext->args;
    gpu::doInBatch(args->_context, [&](gpu::Batch& batch) {
        
        // Allocate the parameters buffer used by all the deferred shaders
        if (!_deferredTransformBuffer[0]._buffer) {
            DeferredTransform parameters;
            _deferredTransformBuffer[0] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) &parameters));
            _deferredTransformBuffer[1] = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(DeferredTransform), (const gpu::Byte*) &parameters));
        }

        // Framebuffer copy operations cannot function as multipass stereo operations.  
        batch.enableStereo(false);

        // perform deferred lighting, rendering to free fbo
        auto framebufferCache = DependencyManager::get<FramebufferCache>();
        auto textureCache = DependencyManager::get<TextureCache>();
    
        QSize framebufferSize = framebufferCache->getFrameBufferSize();
    
        // binding the first framebuffer
        auto lightingFBO = framebufferCache->getLightingFramebuffer();
        batch.setFramebuffer(lightingFBO);

        batch.setViewportTransform(args->_viewport);
        batch.setStateScissorRect(args->_viewport);

        // Bind the G-Buffer surfaces
        batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, framebufferCache->getDeferredColorTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, framebufferCache->getDeferredNormalTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, framebufferCache->getDeferredSpecularTexture());
        batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, framebufferCache->getPrimaryDepthTexture());

        // FIXME: Different render modes should have different tasks
        if (args->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && _ambientOcclusionEnabled) {
            batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, framebufferCache->getOcclusionTexture());
        } else {
            // need to assign the white texture if ao is off
            batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, textureCache->getWhiteTexture());
        }

        assert(_lightStage.lights.size() > 0);
        const auto& globalShadow = _lightStage.lights[0]->shadow;

        // Bind the shadow buffer
        batch.setResourceTexture(SHADOW_MAP_UNIT, globalShadow.map);

        // THe main viewport is assumed to be the mono viewport (or the 2 stereo faces side by side within that viewport)
        auto monoViewport = args->_viewport;
        float sMin = args->_viewport.x / (float)framebufferSize.width();
        float sWidth = args->_viewport.z / (float)framebufferSize.width();
        float tMin = args->_viewport.y / (float)framebufferSize.height();
        float tHeight = args->_viewport.w / (float)framebufferSize.height();

        // The view frustum is the mono frustum base
        auto viewFrustum = args->_viewFrustum;

        // Eval the mono projection
        mat4 monoProjMat;
        viewFrustum->evalProjectionMatrix(monoProjMat);

        // The mono view transform
        Transform monoViewTransform;
        viewFrustum->evalViewTransform(monoViewTransform);

        // THe mono view matrix coming from the mono view transform
        glm::mat4 monoViewMat;
        monoViewTransform.getMatrix(monoViewMat);

        // Running in stero ?
        bool isStereo = args->_context->isStereo();
        int numPasses = 1;

        mat4 projMats[2];
        Transform viewTransforms[2];
        ivec4 viewports[2];
        vec4 clipQuad[2];
        vec2 screenBottomLeftCorners[2];
        vec2 screenTopRightCorners[2];
        vec4 fetchTexcoordRects[2];

        DeferredTransform deferredTransforms[2];
        auto geometryCache = DependencyManager::get<GeometryCache>();

        if (isStereo) {
            numPasses = 2;

            mat4 eyeViews[2];
            args->_context->getStereoProjections(projMats);
            args->_context->getStereoViews(eyeViews);

            float halfWidth = 0.5f * sWidth;

            for (int i = 0; i < numPasses; i++) {
                // In stereo, the 2 sides are layout side by side in the mono viewport and their width is half
                int sideWidth = monoViewport.z >> 1;
                viewports[i] = ivec4(monoViewport.x + (i * sideWidth), monoViewport.y, sideWidth, monoViewport.w);

                deferredTransforms[i].projection = projMats[i];

                auto sideViewMat = monoViewMat * glm::inverse(eyeViews[i]);
                viewTransforms[i].evalFromRawMatrix(sideViewMat);
                deferredTransforms[i].viewInverse = sideViewMat;

                deferredTransforms[i].stereoSide = (i == 0 ? -1.0f : 1.0f);

                clipQuad[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
                screenBottomLeftCorners[i] = glm::vec2(-1.0f + i * 1.0f, -1.0f);
                screenTopRightCorners[i] = glm::vec2(i * 1.0f, 1.0f);

                fetchTexcoordRects[i] = glm::vec4(sMin + i * halfWidth, tMin, halfWidth, tHeight);
            }
        } else {

            viewports[0] = monoViewport;
            projMats[0] = monoProjMat;

            deferredTransforms[0].projection = monoProjMat;
     
            deferredTransforms[0].viewInverse = monoViewMat;
            viewTransforms[0] = monoViewTransform;

            deferredTransforms[0].stereoSide = 0.0f;

            clipQuad[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
            screenBottomLeftCorners[0] = glm::vec2(-1.0f, -1.0f);
            screenTopRightCorners[0] = glm::vec2(1.0f, 1.0f);

            fetchTexcoordRects[0] = glm::vec4(sMin, tMin, sWidth, tHeight);
        }

        auto eyePoint = viewFrustum->getPosition();
        float nearRadius = glm::distance(eyePoint, viewFrustum->getNearTopLeft());


        for (int side = 0; side < numPasses; side++) {
            // Render in this side's viewport
            batch.setViewportTransform(viewports[side]);
            batch.setStateScissorRect(viewports[side]);

            // Sync and Bind the correct DeferredTransform ubo
            _deferredTransformBuffer[side]._buffer->setSubData(0, sizeof(DeferredTransform), (const gpu::Byte*) &deferredTransforms[side]);
            batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, _deferredTransformBuffer[side]);

            glm::vec2 topLeft(-1.0f, -1.0f);
            glm::vec2 bottomRight(1.0f, 1.0f);
            glm::vec2 texCoordTopLeft(clipQuad[side].x, clipQuad[side].y);
            glm::vec2 texCoordBottomRight(clipQuad[side].x + clipQuad[side].z, clipQuad[side].y + clipQuad[side].w);

            // First Global directional light and ambient pass
            {
                auto& program = _shadowMapEnabled ? _directionalLightShadow : _directionalLight;
                LightLocationsPtr locations = _shadowMapEnabled ? _directionalLightShadowLocations : _directionalLightLocations;
                const auto& keyLight = _allocatedLights[_globalLights.front()];

                // Setup the global directional pass pipeline
                {
                    if (_shadowMapEnabled) {
                        if (keyLight->getAmbientMap()) {
                            program = _directionalSkyboxLightShadow;
                            locations = _directionalSkyboxLightShadowLocations;
                        } else {
                            program = _directionalAmbientSphereLightShadow;
                            locations = _directionalAmbientSphereLightShadowLocations;
                        }
                    } else {
                        if (keyLight->getAmbientMap()) {
                            program = _directionalSkyboxLight;
                            locations = _directionalSkyboxLightLocations;
                        } else {
                            program = _directionalAmbientSphereLight;
                            locations = _directionalAmbientSphereLightLocations;
                        }
                    }

                    if (locations->shadowTransformBuffer >= 0) {
                        batch.setUniformBuffer(locations->shadowTransformBuffer, globalShadow.getBuffer());
                    }
                    batch.setPipeline(program);
                }

                { // Setup the global lighting
                    setupKeyLightBatch(batch, locations->lightBufferUnit, SKYBOX_MAP_UNIT);
                }

                {
                    batch.setModelTransform(Transform());
                    batch.setProjectionTransform(glm::mat4());
                    batch.setViewTransform(Transform());

                    glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                   geometryCache->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                }

                if (keyLight->getAmbientMap()) {
                    batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);
                }
            }

            auto texcoordMat = glm::mat4();
          /*  texcoordMat[0] = glm::vec4(sWidth / 2.0f, 0.0f, 0.0f, sMin + sWidth / 2.0f);
            texcoordMat[1] = glm::vec4(0.0f, tHeight / 2.0f, 0.0f, tMin + tHeight / 2.0f);
           */ texcoordMat[0] = glm::vec4(fetchTexcoordRects[side].z / 2.0f, 0.0f, 0.0f, fetchTexcoordRects[side].x + fetchTexcoordRects[side].z / 2.0f);
            texcoordMat[1] = glm::vec4(0.0f, fetchTexcoordRects[side].w / 2.0f, 0.0f, fetchTexcoordRects[side].y + fetchTexcoordRects[side].w / 2.0f);
            texcoordMat[2] = glm::vec4(0.0f, 0.0f, 1.0f, 0.0f);
            texcoordMat[3] = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f);

            // enlarge the scales slightly to account for tesselation
            const float SCALE_EXPANSION = 0.05f;


            batch.setProjectionTransform(projMats[side]);
            batch.setViewTransform(viewTransforms[side]);

            // Splat Point lights
            if (!_pointLights.empty()) {
                batch.setPipeline(_pointLight);

                batch._glUniformMatrix4fv(_pointLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));

                for (auto lightID : _pointLights) {
                    auto& light = _allocatedLights[lightID];
                    // IN DEBUG: light->setShowContour(true);
                    batch.setUniformBuffer(_pointLightLocations->lightBufferUnit, light->getSchemaBuffer());

                    float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
                    // TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
                    // we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
                    if (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius) {
                        Transform model;
                        model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
                        batch.setModelTransform(model);
                        batch.setViewTransform(Transform());
                        batch.setProjectionTransform(glm::mat4());

                        glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                
                        batch.setProjectionTransform(projMats[side]);
                        batch.setViewTransform(viewTransforms[side]);
                    } else {
                        Transform model;
                        model.setTranslation(glm::vec3(light->getPosition().x, light->getPosition().y, light->getPosition().z));
                        batch.setModelTransform(model.postScale(expandedRadius));
                        batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
                        geometryCache->renderSphere(batch);
                    }
                }
            }
    
            // Splat spot lights
            if (!_spotLights.empty()) {
                batch.setPipeline(_spotLight);

                batch._glUniformMatrix4fv(_spotLightLocations->texcoordMat, 1, false, reinterpret_cast< const float* >(&texcoordMat));

                for (auto lightID : _spotLights) {
                    auto light = _allocatedLights[lightID];
                    // IN DEBUG: light->setShowContour(true);
                    batch.setUniformBuffer(_spotLightLocations->lightBufferUnit, light->getSchemaBuffer());

                    auto eyeLightPos = eyePoint - light->getPosition();
                    auto eyeHalfPlaneDistance = glm::dot(eyeLightPos, light->getDirection());

                    const float TANGENT_LENGTH_SCALE = 0.666f;
                    glm::vec4 coneParam(light->getSpotAngleCosSin(), TANGENT_LENGTH_SCALE * tanf(0.5f * light->getSpotAngle()), 1.0f);

                    float expandedRadius = light->getMaximumRadius() * (1.0f + SCALE_EXPANSION);
                    // TODO: We shouldn;t have to do that test and use a different volume geometry for when inside the vlight volume,
                    // we should be able to draw thre same geometry use DepthClamp but for unknown reason it's s not working...
                    if ((eyeHalfPlaneDistance > -nearRadius) &&
                        (glm::distance(eyePoint, glm::vec3(light->getPosition())) < expandedRadius + nearRadius)) {
                        coneParam.w = 0.0f;
                        batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));

                        Transform model;
                        model.setTranslation(glm::vec3(0.0f, 0.0f, -1.0f));
                        batch.setModelTransform(model);
                        batch.setViewTransform(Transform());
                        batch.setProjectionTransform(glm::mat4());
                
                        glm::vec4 color(1.0f, 1.0f, 1.0f, 1.0f);
                        DependencyManager::get<GeometryCache>()->renderQuad(batch, topLeft, bottomRight, texCoordTopLeft, texCoordBottomRight, color);
                
                        batch.setProjectionTransform( projMats[side]);
                        batch.setViewTransform(viewTransforms[side]);
                    } else {
                        coneParam.w = 1.0f;
                        batch._glUniform4fv(_spotLightLocations->coneParam, 1, reinterpret_cast< const float* >(&coneParam));

                        Transform model;
                        model.setTranslation(light->getPosition());
                        model.postRotate(light->getOrientation());
                        model.postScale(glm::vec3(expandedRadius, expandedRadius, expandedRadius));

                        batch.setModelTransform(model);
                        auto mesh = getSpotLightMesh();

                        batch.setIndexBuffer(mesh->getIndexBuffer());
                        batch.setInputBuffer(0, mesh->getVertexBuffer());
                        batch.setInputFormat(mesh->getVertexFormat());

                        auto& part = mesh->getPartBuffer().get<model::Mesh::Part>();

                        batch.drawIndexed(model::Mesh::topologyToPrimitive(part._topology), part._numIndices, part._startIndex);
                    }
                }
            }
        }

        // Probably not necessary in the long run because the gpu layer would unbound this texture if used as render target
        batch.setResourceTexture(DEFERRED_BUFFER_COLOR_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_NORMAL_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_EMISSIVE_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_DEPTH_UNIT, nullptr);
        batch.setResourceTexture(DEFERRED_BUFFER_OBSCURANCE_UNIT, nullptr);
        batch.setResourceTexture(SHADOW_MAP_UNIT, nullptr);
        batch.setResourceTexture(SKYBOX_MAP_UNIT, nullptr);

        batch.setUniformBuffer(_directionalLightLocations->deferredTransformBuffer, nullptr);
    });
Exemplo n.º 10
0
BoxGeometry::BoxGeometry(
		String name,
		const Vector3D& halfExtends,
		bool addTangents,
		bool patchRepresentation,
		const Vector4D& texcoordScale)
: VertexBasedGeometry(name,
		patchRepresentation ? VERTEX_BASED_TRIANGLE_PATCHES : VERTEX_BASED_TRIANGLES),
		mHalfExtends(halfExtends)
  {
	BufferInfo bufferi(
			name + String("BoxGeometryPositionBuffer"),
			ContextTypeFlags(HOST_CONTEXT_TYPE_FLAG | OPEN_GL_CONTEXT_TYPE_FLAG),
			POSITION_SEMANTICS,
			TYPE_VEC4F,
			6* 4 , //because of the normals, the vertices cannot be shared ;(
			BufferElementInfo(4,GPU_DATA_TYPE_FLOAT,32,false),
			VERTEX_ATTRIBUTE_BUFFER_TYPE,
			NO_CONTEXT_TYPE
	);

	setAttributeBuffer(new Buffer(bufferi,false,0));

	bufferi.name = name + String("BoxGeometryNormalBuffer");
	bufferi.bufferSemantics = NORMAL_SEMANTICS;
	setAttributeBuffer(new Buffer(bufferi,false,0));

	if(addTangents)
	{
		bufferi.name = name + String("BoxGeometryTangentBuffer");
		bufferi.bufferSemantics = TANGENT_SEMANTICS;
		setAttributeBuffer( new Buffer( bufferi, false, 0 )	);
	}

	bufferi.name = name + String( "BoxGeometryTexCoordBuffer");
	bufferi.bufferSemantics = TEXCOORD_SEMANTICS;
	setAttributeBuffer(new Buffer(bufferi,false,0));

	setIndexBuffer(
		new Buffer(
			BufferInfo(
				name + String("BoxGeometryIndexBuffer"),
				ContextTypeFlags(HOST_CONTEXT_TYPE_FLAG | OPEN_GL_CONTEXT_TYPE_FLAG),
				INDEX_SEMANTICS,
				TYPE_UINT32,
				6 * 6, //6 faces * 2* triangles @ 3 verts
				BufferElementInfo(1,GPU_DATA_TYPE_UINT,32,false),
				VERTEX_INDEX_BUFFER_TYPE,
				NO_CONTEXT_TYPE
			)
		)
	);


	Vector4D* posBuffer = 	reinterpret_cast<Vector4D*>(getAttributeBuffer(POSITION_SEMANTICS)->getCPUBufferHandle());
	Vector4D* normalBuffer =reinterpret_cast<Vector4D*>(getAttributeBuffer(NORMAL_SEMANTICS)->getCPUBufferHandle());
	Vector4D* tangentBuffer = 0;
	if(addTangents)	{
		tangentBuffer = 	reinterpret_cast<Vector4D*>(getAttributeBuffer(TANGENT_SEMANTICS)->getCPUBufferHandle());
	}
	Vector4D* tcBuffer = 	reinterpret_cast<Vector4D*>(getAttributeBuffer(TEXCOORD_SEMANTICS)->getCPUBufferHandle());
	unsigned int* indexBuffer = reinterpret_cast<unsigned int*>(getIndexBuffer()->getCPUBufferHandle());



	//iterate over the six faces:
	for(int axis=0; axis<3; axis++)
	{
		for(int side = 1; side >= -1; side -= 2)
		{
			Vector3D normal = Vector3D(axis==0?side:0,axis==1?side:0, axis==2?side:0);
			Vector3D left = Vector3D(
					((axis==2) || (axis ==1))	 ? 	-side 	:	0,
					0,
					axis==0?side:0
			);

			Vector3D down= glm::cross(normal,left);

			Vector3D tangent = left * (-1.0f);

			int vertexIndexBase = 8* axis + 4*(1-(side+1)/2);
			int indexIndexBase = 12* axis + 6*(1-(side+1)/2);

			//lower left
			posBuffer[vertexIndexBase + 0] 		= Vector4D( (normal + left + down)* halfExtends, 1 );
			normalBuffer[vertexIndexBase + 0]	= Vector4D( normal, 0 );
			if(addTangents){
				tangentBuffer[vertexIndexBase+0]= Vector4D( tangent, 0 );
			}
			tcBuffer[vertexIndexBase + 0]		= Vector4D(0,0,0, 0 ) * texcoordScale;
			//lower right
			posBuffer[vertexIndexBase + 1] 		= Vector4D( (normal - left + down)* halfExtends , 1 );
			normalBuffer[vertexIndexBase + 1]	= Vector4D( normal, 0 );
			if(addTangents){
				tangentBuffer[vertexIndexBase+1]= Vector4D( tangent, 0 );
			}
			tcBuffer[vertexIndexBase + 1]		= Vector4D(1,0,0, 0 )* texcoordScale;
			//upper right
			posBuffer[vertexIndexBase + 2] 		= Vector4D( (normal - left - down)* halfExtends , 1 );
			normalBuffer[vertexIndexBase + 2]	= Vector4D( normal, 0 );
			if(addTangents){
				tangentBuffer[vertexIndexBase+2]= Vector4D( tangent, 0 );
			}
			tcBuffer[vertexIndexBase + 2]		= Vector4D(1,1,0, 0 )* texcoordScale;
			//upper left
			posBuffer[vertexIndexBase + 3] 		= Vector4D( (normal + left - down)* halfExtends , 1 );
			normalBuffer[vertexIndexBase + 3]	= Vector4D( normal, 0 );
			if(addTangents){
				tangentBuffer[vertexIndexBase+3]= Vector4D( tangent, 0 );
			}
			tcBuffer[vertexIndexBase + 3]		= Vector4D(0,1,0, 0 )* texcoordScale;

			indexBuffer[indexIndexBase + 0]=  vertexIndexBase + 0;
			indexBuffer[indexIndexBase + 1]=  vertexIndexBase + 1;
			indexBuffer[indexIndexBase + 2]=  vertexIndexBase + 2;
			indexBuffer[indexIndexBase + 3]=  vertexIndexBase + 0;
			indexBuffer[indexIndexBase + 4]=  vertexIndexBase + 2;
			indexBuffer[indexIndexBase + 5]=  vertexIndexBase + 3;

//			LOG<<DEBUG_LOG_LEVEL<<"VERTICES OF BOX, face "<<
//					((side>0)?"positive":"negative") <<
//					(	(axis==0)
//							?" X"
//							:( (axis==1)
//								?" Y"
//								:" Z"	)    )
//
//					<< ": \n";
//			for(int i=0;i<4;i++)
//			{
//				LOG<<DEBUG_LOG_LEVEL
//						<<"Index of vertex :"<<vertexIndexBase+i
//						<<"; Vertex coords: "<<posBuffer[vertexIndexBase + i]
//						<<"; Normal: "<<normalBuffer[vertexIndexBase + i]
//						<<"; start index for face in index buffer: "<<indexIndexBase<<";\n"
//		 	    ;
//			}

		}
	}

	getAttributeBuffer(POSITION_SEMANTICS)->copyFromHostToGPU();
	getAttributeBuffer(NORMAL_SEMANTICS)->copyFromHostToGPU();
	if(addTangents){getAttributeBuffer(TANGENT_SEMANTICS)->copyFromHostToGPU();}
	getAttributeBuffer(TEXCOORD_SEMANTICS)->copyFromHostToGPU();
	getIndexBuffer()->copyFromHostToGPU();

}