Beispiel #1
0
bool ValidateUniformMatrix(gl::Context *context, GLenum matrixType, GLint location, GLsizei count,
                           GLboolean transpose)
{
    // Check for ES3 uniform entry points
    int rows = VariableRowCount(matrixType);
    int cols = VariableColumnCount(matrixType);
    if (rows != cols && context->getClientVersion() < 3)
    {
        return gl::error(GL_INVALID_OPERATION, false);
    }

    if (transpose != GL_FALSE && context->getClientVersion() < 3)
    {
        return gl::error(GL_INVALID_VALUE, false);
    }

    LinkedUniform *uniform = NULL;
    if (!ValidateUniformCommonBase(context, matrixType, location, count, &uniform))
    {
        return false;
    }

    if (uniform->type != matrixType)
    {
        return gl::error(GL_INVALID_OPERATION, false);
    }

    return true;
}
Uniform::Uniform(GLenum type, GLenum precision, const std::string &name, unsigned int arraySize)
    : type(type), precision(precision), name(name), arraySize(arraySize)
{
    int bytes = gl::UniformInternalSize(type) * elementCount();
    data = new unsigned char[bytes];
    memset(data, 0, bytes);
    dirty = true;

    psRegisterIndex = -1;
    vsRegisterIndex = -1;
    registerCount = VariableRowCount(type) * elementCount();
}
Beispiel #3
0
int VertexShader::getSemanticIndex(const std::string &attributeName)
{
    if (!attributeName.empty())
    {
        int semanticIndex = 0;
        for (AttributeArray::iterator attribute = mAttributes.begin(); attribute != mAttributes.end(); attribute++)
        {
            if (attribute->name == attributeName)
            {
                return semanticIndex;
            }

            semanticIndex += VariableRowCount(attribute->type);
        }
    }

    return -1;
}
bool DynamicHLSL::generateShaderLinkHLSL(const gl::Data &data,
                                         const gl::Program::Data &programData,
                                         const ProgramD3DMetadata &programMetadata,
                                         const VaryingPacking &varyingPacking,
                                         std::string *pixelHLSL,
                                         std::string *vertexHLSL) const
{
    ASSERT(pixelHLSL->empty() && vertexHLSL->empty());

    const gl::Shader *vertexShaderGL   = programData.getAttachedVertexShader();
    const gl::Shader *fragmentShaderGL = programData.getAttachedFragmentShader();
    const ShaderD3D *fragmentShader    = GetImplAs<ShaderD3D>(fragmentShaderGL);
    const int shaderModel              = mRenderer->getMajorShaderModel();

    // usesViewScale() isn't supported in the D3D9 renderer
    ASSERT(shaderModel >= 4 || !programMetadata.usesViewScale());

    bool useInstancedPointSpriteEmulation =
        programMetadata.usesPointSize() &&
        mRenderer->getWorkarounds().useInstancedPointSpriteEmulation;

    // Validation done in the compiler
    ASSERT(!fragmentShader->usesFragColor() || !fragmentShader->usesFragData());

    std::stringstream vertexStream;
    vertexStream << vertexShaderGL->getTranslatedSource();

    // Instanced PointSprite emulation requires additional entries originally generated in the
    // GeometryShader HLSL. These include pointsize clamp values.
    if (useInstancedPointSpriteEmulation)
    {
        vertexStream << "static float minPointSize = "
                     << static_cast<int>(data.caps->minAliasedPointSize) << ".0f;\n"
                     << "static float maxPointSize = "
                     << static_cast<int>(data.caps->maxAliasedPointSize) << ".0f;\n";
    }

    // Add stub string to be replaced when shader is dynamically defined by its layout
    vertexStream << "\n" << VERTEX_ATTRIBUTE_STUB_STRING + "\n";

    // Write the HLSL input/output declarations
    vertexStream << "struct VS_OUTPUT\n";
    generateVaryingLinkHLSL(SHADER_VERTEX, varyingPacking, vertexStream);
    vertexStream << "\n"
                 << "VS_OUTPUT main(VS_INPUT input)\n"
                 << "{\n"
                 << "    initAttributes(input);\n";

    vertexStream << "\n"
                 << "    gl_main();\n"
                 << "\n"
                 << "    VS_OUTPUT output;\n";

    const auto &vertexBuiltins = varyingPacking.builtins(SHADER_VERTEX);

    if (vertexBuiltins.glPosition.enabled)
    {
        vertexStream << "    output.gl_Position = gl_Position;\n";
    }

    // On D3D9 or D3D11 Feature Level 9, we need to emulate large viewports using dx_ViewAdjust.
    if (shaderModel >= 4 && mRenderer->getShaderModelSuffix() == "")
    {
        vertexStream << "    output.dx_Position.x = gl_Position.x;\n";

        if (programMetadata.usesViewScale())
        {
            // This code assumes that dx_ViewScale.y = -1.0f when rendering to texture, and +1.0f
            // when rendering to the default framebuffer. No other values are valid.
            vertexStream << "    output.dx_Position.y = dx_ViewScale.y * gl_Position.y;\n";
        }
        else
        {
            vertexStream << "    output.dx_Position.y = - gl_Position.y;\n";
        }

        vertexStream << "    output.dx_Position.z = (gl_Position.z + gl_Position.w) * 0.5;\n"
                     << "    output.dx_Position.w = gl_Position.w;\n";
    }
    else
    {
        vertexStream << "    output.dx_Position.x = gl_Position.x * dx_ViewAdjust.z + "
                        "dx_ViewAdjust.x * gl_Position.w;\n";

        // If usesViewScale() is true and we're using the D3D11 renderer via Feature Level 9_*,
        // then we need to multiply the gl_Position.y by the viewScale.
        // usesViewScale() isn't supported when using the D3D9 renderer.
        if (programMetadata.usesViewScale() &&
            (shaderModel >= 4 && mRenderer->getShaderModelSuffix() != ""))
        {
            vertexStream << "    output.dx_Position.y = dx_ViewScale.y * (gl_Position.y * "
                            "dx_ViewAdjust.w + dx_ViewAdjust.y * gl_Position.w);\n";
        }
        else
        {
            vertexStream << "    output.dx_Position.y = -(gl_Position.y * dx_ViewAdjust.w + "
                            "dx_ViewAdjust.y * gl_Position.w);\n";
        }

        vertexStream << "    output.dx_Position.z = (gl_Position.z + gl_Position.w) * 0.5;\n"
                     << "    output.dx_Position.w = gl_Position.w;\n";
    }

    // We don't need to output gl_PointSize if we use are emulating point sprites via instancing.
    if (vertexBuiltins.glPointSize.enabled)
    {
        vertexStream << "    output.gl_PointSize = gl_PointSize;\n";
    }

    if (vertexBuiltins.glFragCoord.enabled)
    {
        vertexStream << "    output.gl_FragCoord = gl_Position;\n";
    }

    for (const PackedVaryingRegister &registerInfo : varyingPacking.getRegisterList())
    {
        const auto &packedVarying = *registerInfo.packedVarying;
        const auto &varying = *packedVarying.varying;
        ASSERT(!varying.isStruct());

        vertexStream << "    output.v" << registerInfo.semanticIndex << " = ";

        if (packedVarying.isStructField())
        {
            vertexStream << decorateVariable(packedVarying.parentStructName) << ".";
        }

        vertexStream << decorateVariable(varying.name);

        if (varying.isArray())
        {
            WriteArrayString(vertexStream, registerInfo.varyingArrayIndex);
        }

        if (VariableRowCount(varying.type) > 1)
        {
            WriteArrayString(vertexStream, registerInfo.varyingRowIndex);
        }

        vertexStream << ";\n";
    }

    // Instanced PointSprite emulation requires additional entries to calculate
    // the final output vertex positions of the quad that represents each sprite.
    if (useInstancedPointSpriteEmulation)
    {
        vertexStream << "\n"
                     << "    gl_PointSize = clamp(gl_PointSize, minPointSize, maxPointSize);\n";

        vertexStream << "    output.dx_Position.x += (input.spriteVertexPos.x * gl_PointSize / "
                        "(dx_ViewCoords.x*2)) * output.dx_Position.w;";

        if (programMetadata.usesViewScale())
        {
            // Multiply by ViewScale to invert the rendering when appropriate
            vertexStream << "    output.dx_Position.y += (-dx_ViewScale.y * "
                            "input.spriteVertexPos.y * gl_PointSize / (dx_ViewCoords.y*2)) * "
                            "output.dx_Position.w;";
        }
        else
        {
            vertexStream << "    output.dx_Position.y += (input.spriteVertexPos.y * gl_PointSize / "
                            "(dx_ViewCoords.y*2)) * output.dx_Position.w;";
        }

        vertexStream
            << "    output.dx_Position.z += input.spriteVertexPos.z * output.dx_Position.w;\n";

        if (programMetadata.usesPointCoord())
        {
            vertexStream << "\n"
                         << "    output.gl_PointCoord = input.spriteTexCoord;\n";
        }
    }

    // Renderers that enable instanced pointsprite emulation require the vertex shader output member
    // gl_PointCoord to be set to a default value if used without gl_PointSize. 0.5,0.5 is the same
    // default value used in the generated pixel shader.
    if (programMetadata.usesInsertedPointCoordValue())
    {
        ASSERT(!useInstancedPointSpriteEmulation);
        vertexStream << "\n"
                     << "    output.gl_PointCoord = float2(0.5, 0.5);\n";
    }

    vertexStream << "\n"
                 << "    return output;\n"
                 << "}\n";

    std::stringstream pixelStream;
    pixelStream << fragmentShaderGL->getTranslatedSource();
    pixelStream << "struct PS_INPUT\n";
    generateVaryingLinkHLSL(SHADER_PIXEL, varyingPacking, pixelStream);
    pixelStream << "\n";

    pixelStream << PIXEL_OUTPUT_STUB_STRING + "\n";

    if (fragmentShader->usesFrontFacing())
    {
        if (shaderModel >= 4)
        {
            pixelStream << "PS_OUTPUT main(PS_INPUT input, bool isFrontFace : SV_IsFrontFace)\n"
                        << "{\n";
        }
        else
        {
            pixelStream << "PS_OUTPUT main(PS_INPUT input, float vFace : VFACE)\n"
                        << "{\n";
        }
    }
    else
    {
        pixelStream << "PS_OUTPUT main(PS_INPUT input)\n"
                    << "{\n";
    }

    const auto &pixelBuiltins = varyingPacking.builtins(SHADER_PIXEL);

    if (pixelBuiltins.glFragCoord.enabled)
    {
        pixelStream << "    float rhw = 1.0 / input.gl_FragCoord.w;\n";

        // Certain Shader Models (4_0+ and 3_0) allow reading from dx_Position in the pixel shader.
        // Other Shader Models (4_0_level_9_3 and 2_x) don't support this, so we emulate it using
        // dx_ViewCoords.
        if (shaderModel >= 4 && mRenderer->getShaderModelSuffix() == "")
        {
            pixelStream << "    gl_FragCoord.x = input.dx_Position.x;\n"
                        << "    gl_FragCoord.y = input.dx_Position.y;\n";
        }
        else if (shaderModel == 3)
        {
            pixelStream << "    gl_FragCoord.x = input.dx_Position.x + 0.5;\n"
                        << "    gl_FragCoord.y = input.dx_Position.y + 0.5;\n";
        }
        else
        {
            // dx_ViewCoords contains the viewport width/2, height/2, center.x and center.y. See
            // Renderer::setViewport()
            pixelStream << "    gl_FragCoord.x = (input.gl_FragCoord.x * rhw) * dx_ViewCoords.x + "
                           "dx_ViewCoords.z;\n"
                        << "    gl_FragCoord.y = (input.gl_FragCoord.y * rhw) * dx_ViewCoords.y + "
                           "dx_ViewCoords.w;\n";
        }

        if (programMetadata.usesViewScale())
        {
            // For Feature Level 9_3 and below, we need to correct gl_FragCoord.y to account
            // for dx_ViewScale. On Feature Level 10_0+, gl_FragCoord.y is calculated above using
            // dx_ViewCoords and is always correct irrespective of dx_ViewScale's value.
            // NOTE: usesViewScale() can only be true on D3D11 (i.e. Shader Model 4.0+).
            if (shaderModel >= 4 && mRenderer->getShaderModelSuffix() == "")
            {
                // Some assumptions:
                //  - dx_ViewScale.y = -1.0f when rendering to texture
                //  - dx_ViewScale.y = +1.0f when rendering to the default framebuffer
                //  - gl_FragCoord.y has been set correctly above.
                //
                // When rendering to the backbuffer, the code inverts gl_FragCoord's y coordinate.
                // This involves subtracting the y coordinate from the height of the area being
                // rendered to.
                //
                // First we calculate the height of the area being rendered to:
                //    render_area_height = (2.0f / (1.0f - input.gl_FragCoord.y * rhw)) *
                //    gl_FragCoord.y
                //
                // Note that when we're rendering to default FB, we want our output to be
                // equivalent to:
                //    "gl_FragCoord.y = render_area_height - gl_FragCoord.y"
                //
                // When we're rendering to a texture, we want our output to be equivalent to:
                //    "gl_FragCoord.y = gl_FragCoord.y;"
                //
                // If we set scale_factor = ((1.0f + dx_ViewScale.y) / 2.0f), then notice that
                //  - When rendering to default FB: scale_factor = 1.0f
                //  - When rendering to texture:    scale_factor = 0.0f
                //
                // Therefore, we can get our desired output by setting:
                //    "gl_FragCoord.y = scale_factor * render_area_height - dx_ViewScale.y *
                //    gl_FragCoord.y"
                //
                // Simplifying, this becomes:
                pixelStream
                    << "    gl_FragCoord.y = (1.0f + dx_ViewScale.y) * gl_FragCoord.y /"
                       "(1.0f - input.gl_FragCoord.y * rhw)  - dx_ViewScale.y * gl_FragCoord.y;\n";
            }
        }

        pixelStream << "    gl_FragCoord.z = (input.gl_FragCoord.z * rhw) * dx_DepthFront.x + "
                       "dx_DepthFront.y;\n"
                    << "    gl_FragCoord.w = rhw;\n";
    }

    if (pixelBuiltins.glPointCoord.enabled && shaderModel >= 3)
    {
        pixelStream << "    gl_PointCoord.x = input.gl_PointCoord.x;\n"
                    << "    gl_PointCoord.y = 1.0 - input.gl_PointCoord.y;\n";
    }

    if (fragmentShader->usesFrontFacing())
    {
        if (shaderModel <= 3)
        {
            pixelStream << "    gl_FrontFacing = (vFace * dx_DepthFront.z >= 0.0);\n";
        }
        else
        {
            pixelStream << "    gl_FrontFacing = isFrontFace;\n";
        }
    }

    for (const PackedVaryingRegister &registerInfo : varyingPacking.getRegisterList())
    {
        const auto &packedVarying = *registerInfo.packedVarying;
        const auto &varying = *packedVarying.varying;
        ASSERT(!varying.isBuiltIn() && !varying.isStruct());

        // Don't reference VS-only transform feedback varyings in the PS.
        if (registerInfo.packedVarying->vertexOnly)
            continue;

        pixelStream << "    ";

        if (packedVarying.isStructField())
        {
            pixelStream << decorateVariable(packedVarying.parentStructName) << ".";
        }

        pixelStream << decorateVariable(varying.name);

        if (varying.isArray())
        {
            WriteArrayString(pixelStream, registerInfo.varyingArrayIndex);
        }

        GLenum transposedType = TransposeMatrixType(varying.type);
        if (VariableRowCount(transposedType) > 1)
        {
            WriteArrayString(pixelStream, registerInfo.varyingRowIndex);
        }

        pixelStream << " = input.v" << registerInfo.semanticIndex;

        switch (VariableColumnCount(transposedType))
        {
            case 1:
                pixelStream << ".x";
                break;
            case 2:
                pixelStream << ".xy";
                break;
            case 3:
                pixelStream << ".xyz";
                break;
            case 4:
                break;
            default:
                UNREACHABLE();
        }
        pixelStream << ";\n";
    }

    pixelStream << "\n"
                << "    gl_main();\n"
                << "\n"
                << "    return generateOutput();\n"
                << "}\n";

    *vertexHLSL = vertexStream.str();
    *pixelHLSL  = pixelStream.str();

    return true;
}
std::string DynamicHLSL::generateVertexShaderForInputLayout(
    const std::string &sourceShader,
    const InputLayout &inputLayout,
    const std::vector<sh::Attribute> &shaderAttributes) const
{
    std::stringstream structStream;
    std::stringstream initStream;

    structStream << "struct VS_INPUT\n"
                 << "{\n";

    int semanticIndex       = 0;
    unsigned int inputIndex = 0;

    // If gl_PointSize is used in the shader then pointsprites rendering is expected.
    // If the renderer does not support Geometry shaders then Instanced PointSprite emulation
    // must be used.
    bool usesPointSize = sourceShader.find("GL_USES_POINT_SIZE") != std::string::npos;
    bool useInstancedPointSpriteEmulation =
        usesPointSize && mRenderer->getWorkarounds().useInstancedPointSpriteEmulation;

    // Instanced PointSprite emulation requires additional entries in the
    // VS_INPUT structure to support the vertices that make up the quad vertices.
    // These values must be in sync with the cooresponding values added during inputlayout creation
    // in InputLayoutCache::applyVertexBuffers().
    //
    // The additional entries must appear first in the VS_INPUT layout because
    // Windows Phone 8 era devices require per vertex data to physically come
    // before per instance data in the shader.
    if (useInstancedPointSpriteEmulation)
    {
        structStream << "    float3 spriteVertexPos : SPRITEPOSITION0;\n"
                     << "    float2 spriteTexCoord : SPRITETEXCOORD0;\n";
    }

    for (size_t attributeIndex = 0; attributeIndex < shaderAttributes.size(); ++attributeIndex)
    {
        const sh::Attribute &shaderAttribute = shaderAttributes[attributeIndex];
        if (!shaderAttribute.name.empty())
        {
            ASSERT(inputIndex < MAX_VERTEX_ATTRIBS);
            VertexFormatType vertexFormatType =
                inputIndex < inputLayout.size() ? inputLayout[inputIndex] : VERTEX_FORMAT_INVALID;

            // HLSL code for input structure
            if (IsMatrixType(shaderAttribute.type))
            {
                // Matrix types are always transposed
                structStream << "    "
                             << HLSLMatrixTypeString(TransposeMatrixType(shaderAttribute.type));
            }
            else
            {
                GLenum componentType = mRenderer->getVertexComponentType(vertexFormatType);

                if (shaderAttribute.name == "gl_InstanceID" ||
                    shaderAttribute.name == "gl_VertexID")
                {
                    // The input types of the instance ID and vertex ID in HLSL (uint) differs from
                    // the ones in ESSL (int).
                    structStream << " uint";
                }
                else
                {
                    structStream << "    " << HLSLComponentTypeString(
                                                  componentType,
                                                  VariableComponentCount(shaderAttribute.type));
                }
            }

            structStream << " " << decorateVariable(shaderAttribute.name) << " : ";

            if (shaderAttribute.name == "gl_InstanceID")
            {
                structStream << "SV_InstanceID";
            }
            else if (shaderAttribute.name == "gl_VertexID")
            {
                structStream << "SV_VertexID";
            }
            else
            {
                structStream << "TEXCOORD" << semanticIndex;
                semanticIndex += VariableRegisterCount(shaderAttribute.type);
            }

            structStream << ";\n";

            // HLSL code for initialization
            initStream << "    " << decorateVariable(shaderAttribute.name) << " = ";

            // Mismatched vertex attribute to vertex input may result in an undefined
            // data reinterpretation (eg for pure integer->float, float->pure integer)
            // TODO: issue warning with gl debug info extension, when supported
            if (IsMatrixType(shaderAttribute.type) ||
                (mRenderer->getVertexConversionType(vertexFormatType) & VERTEX_CONVERT_GPU) != 0)
            {
                initStream << generateAttributeConversionHLSL(vertexFormatType, shaderAttribute);
            }
            else
            {
                initStream << "input." << decorateVariable(shaderAttribute.name);
            }

            initStream << ";\n";

            inputIndex += VariableRowCount(TransposeMatrixType(shaderAttribute.type));
        }
    }

    structStream << "};\n"
                    "\n"
                    "void initAttributes(VS_INPUT input)\n"
                    "{\n"
                 << initStream.str() << "}\n";

    std::string vertexHLSL(sourceShader);

    size_t copyInsertionPos = vertexHLSL.find(VERTEX_ATTRIBUTE_STUB_STRING);
    vertexHLSL.replace(copyInsertionPos, VERTEX_ATTRIBUTE_STUB_STRING.length(), structStream.str());

    return vertexHLSL;
}
Beispiel #6
0
int MatrixComponentCount(GLenum type, bool isRowMajorMatrix)
{
    ASSERT(IsMatrixType(type));
    return isRowMajorMatrix ? VariableColumnCount(type) : VariableRowCount(type);
}