Example #1
0
// Recursively figure out how many bytes of xfb buffer are used by the given type.
// Return the size of type, in bytes.
// Sets containsDouble to true if the type contains a double.
// N.B. Caller must set containsDouble to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& containsDouble) const
{
    // "...if applied to an aggregate containing a double, the offset must also be a multiple of 8, 
    // and the space taken in the buffer will be a multiple of 8.
    // ...within the qualified entity, subsequent components are each 
    // assigned, in order, to the next available offset aligned to a multiple of
    // that component's size.  Aggregate types are flattened down to the component
    // level to get this sequence of components."

    if (type.isArray()) {        
        // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
        assert(type.isExplicitlySizedArray());
        TType elementType(type, 0);
        return type.getOuterArraySize() * computeTypeXfbSize(elementType, containsDouble);
    }

    if (type.isStruct()) {
        unsigned int size = 0;
        bool structContainsDouble = false;
        for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
            TType memberType(type, member);
            // "... if applied to 
            // an aggregate containing a double, the offset must also be a multiple of 8, 
            // and the space taken in the buffer will be a multiple of 8."
            bool memberContainsDouble = false;
            int memberSize = computeTypeXfbSize(memberType, memberContainsDouble);
            if (memberContainsDouble) {
                structContainsDouble = true;
                RoundToPow2(size, 8);
            }
            size += memberSize;
        }

        if (structContainsDouble) {
            containsDouble = true;
            RoundToPow2(size, 8);
        }
        return size;
    }

    int numComponents;
    if (type.isScalar())
        numComponents = 1;
    else if (type.isVector())
        numComponents = type.getVectorSize();
    else if (type.isMatrix())
        numComponents = type.getMatrixCols() * type.getMatrixRows();
    else {
        assert(0);
        numComponents = 1;
    }

    if (type.getBasicType() == EbtDouble) {
        containsDouble = true;
        return 8 * numComponents;
    } else
        return 4 * numComponents;
}
Example #2
0
// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions
// as the accumulation is done.
//
// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
//
// typeCollision is set to true if there is no direct collision, but the types in the same location
// are different.
//
int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
{
    typeCollision = false;

    int set;
    if (qualifier.isPipeInput())
        set = 0;
    else if (qualifier.isPipeOutput())
        set = 1;
    else if (qualifier.storage == EvqUniform)
        set = 2;
    else if (qualifier.storage == EvqBuffer)
        set = 3;
    else
        return -1;

    int size;
    if (qualifier.isUniformOrBuffer()) {
        if (type.isArray())
            size = type.getCumulativeArraySize();
        else
            size = 1;
    } else {
        // Strip off the outer array dimension for those having an extra one.
        if (type.isArray() && qualifier.isArrayedIo(language)) {
            TType elementType(type, 0);
            size = computeTypeLocationSize(elementType);
        } else
            size = computeTypeLocationSize(type);
    }

    TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
    TRange componentRange(0, 3);
    if (qualifier.hasComponent()) {
        componentRange.start = qualifier.layoutComponent;
        componentRange.last = componentRange.start + type.getVectorSize() - 1;
    }
    TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);

    // check for collisions, except for vertex inputs on desktop
    if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput())) {
        for (size_t r = 0; r < usedIo[set].size(); ++r) {
            if (range.overlap(usedIo[set][r])) {
                // there is a collision; pick one
                return std::max(locationRange.start, usedIo[set][r].location.start);
            } else if (locationRange.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
                // aliased-type mismatch
                typeCollision = true;
                return std::max(locationRange.start, usedIo[set][r].location.start);
            }
        }
    }

    usedIo[set].push_back(range);

    return -1; // no collision
}
Example #3
0
// Recursively figure out how many locations are used up by an input or output type.
// Return the size of type, as measured by "locations".
int TIntermediate::computeTypeLocationSize(const TType& type) const
{
    // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n 
    // consecutive locations..."
    if (type.isArray()) {
        // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
        TType elementType(type, 0);
        if (type.isImplicitlySizedArray()) {
            // TODO: are there valid cases of having an implicitly-sized array with a location?  If so, running this code too early.
            return computeTypeLocationSize(elementType);
        } else
            return type.getOuterArraySize() * computeTypeLocationSize(elementType);
    }

    // "The locations consumed by block and structure members are determined by applying the rules above 
    // recursively..."    
    if (type.isStruct()) {
        int size = 0;
        for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
            TType memberType(type, member);
            size += computeTypeLocationSize(memberType);
        }
        return size;
    }

    // ES: "If a shader input is any scalar or vector type, it will consume a single location."

    // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex 
    // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while 
    // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will 
    // consume only a single location, in all stages."
    if (type.isScalar())
        return 1;
    if (type.isVector()) {
        if (language == EShLangVertex && type.getQualifier().isPipeInput())
            return 1;
        if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
            return 2;
        else
            return 1;
    }

    // "If the declared input is an n x m single- or double-precision matrix, ...
    // The number of locations assigned for each matrix will be the same as 
    // for an n-element array of m-component vectors..."
    if (type.isMatrix()) {
        TType columnType(type, 0);
        return type.getMatrixCols() * computeTypeLocationSize(columnType);
    }

    assert(0);
    return 1;
}
Example #4
0
// Ensure index is in bounds, correct if necessary.
// Give an error if not.
void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
{
    if (index < 0) {
        error(loc, "", "[", "index out of range '%d'", index);
        index = 0;
    } else if (type.isArray()) {
        if (type.isSizedArray() && index >= type.getOuterArraySize()) {
            error(loc, "", "[", "array index out of range '%d'", index);
            index = type.getOuterArraySize() - 1;
        }
    } else if (type.isVector()) {
        if (index >= type.getVectorSize()) {
            error(loc, "", "[", "vector index out of range '%d'", index);
            index = type.getVectorSize() - 1;
        }
    } else if (type.isMatrix()) {
        if (index >= type.getMatrixCols()) {
            error(loc, "", "[", "matrix index out of range '%d'", index);
            index = type.getMatrixCols() - 1;
        }
    }
}
Example #5
0
// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
// Operates recursively.
//
// If std140 is true, it does the rounding up to vec4 size required by std140, 
// otherwise it does not, yielding std430 rules.
//
// The size is returned in the 'size' parameter
//
// The stride is only non-0 for arrays or matrices, and is the stride of the
// top-level object nested within the type.  E.g., for an array of matrices,
// it is the distances needed between matrices, despite the rules saying the
// stride comes from the flattening down to vectors.
//
// Return value is the alignment of the type.
int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, bool std140, bool rowMajor)
{
    int alignment;

    // When using the std140 storage layout, structures will be laid out in buffer
    // storage with its members stored in monotonically increasing order based on their
    // location in the declaration. A structure and each structure member have a base
    // offset and a base alignment, from which an aligned offset is computed by rounding
    // the base offset up to a multiple of the base alignment. The base offset of the first
    // member of a structure is taken from the aligned offset of the structure itself. The
    // base offset of all other structure members is derived by taking the offset of the
    // last basic machine unit consumed by the previous member and adding one. Each
    // structure member is stored in memory at its aligned offset. The members of a top-
    // level uniform block are laid out in buffer storage by treating the uniform block as
    // a structure with a base offset of zero.
    //
    //   1. If the member is a scalar consuming N basic machine units, the base alignment is N.
    //
    //   2. If the member is a two- or four-component vector with components consuming N basic 
    //      machine units, the base alignment is 2N or 4N, respectively.
    //
    //   3. If the member is a three-component vector with components consuming N
    //      basic machine units, the base alignment is 4N.
    //
    //   4. If the member is an array of scalars or vectors, the base alignment and array
    //      stride are set to match the base alignment of a single array element, according
    //      to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
    //      array may have padding at the end; the base offset of the member following
    //      the array is rounded up to the next multiple of the base alignment.
    //
    //   5. If the member is a column-major matrix with C columns and R rows, the
    //      matrix is stored identically to an array of C column vectors with R 
    //      components each, according to rule (4).
    //
    //   6. If the member is an array of S column-major matrices with C columns and
    //      R rows, the matrix is stored identically to a row of S  C column vectors
    //      with R components each, according to rule (4).
    //
    //   7. If the member is a row-major matrix with C columns and R rows, the matrix
    //      is stored identically to an array of R row vectors with C components each,
    //      according to rule (4).
    //
    //   8. If the member is an array of S row-major matrices with C columns and R
    //      rows, the matrix is stored identically to a row of S  R row vectors with C
    //      components each, according to rule (4).
    //
    //   9. If the member is a structure, the base alignment of the structure is N , where
    //      N is the largest base alignment value of any    of its members, and rounded
    //      up to the base alignment of a vec4. The individual members of this substructure 
    //      are then assigned offsets by applying this set of rules recursively,
    //      where the base offset of the first member of the sub-structure is equal to the
    //      aligned offset of the structure. The structure may have padding at the end;
    //      the base offset of the member following the sub-structure is rounded up to
    //      the next multiple of the base alignment of the structure.
    //
    //   10. If the member is an array of S structures, the S elements of the array are laid
    //       out in order, according to rule (9).
    //
    //   Assuming, for rule 10:  The stride is the same as the size of an element.

    stride = 0;
    int dummyStride;

    // rules 4, 6, 8, and 10
    if (type.isArray()) {
        // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
        TType derefType(type, 0);
        alignment = getBaseAlignment(derefType, size, dummyStride, std140, rowMajor);
        if (std140)
            alignment = std::max(baseAlignmentVec4Std140, alignment);
        RoundToPow2(size, alignment);
        stride = size;  // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
                        // uses the assumption for rule 10 in the comment above
        size = stride * type.getOuterArraySize();
        return alignment;
    }

    // rule 9
    if (type.getBasicType() == EbtStruct) {
        const TTypeList& memberList = *type.getStruct();

        size = 0;
        int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
        for (size_t m = 0; m < memberList.size(); ++m) {
            int memberSize;
            // modify just the children's view of matrix layout, if there is one for this member
            TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
            int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, std140,
                                                   (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
            maxAlignment = std::max(maxAlignment, memberAlignment);
            RoundToPow2(size, memberAlignment);         
            size += memberSize;
        }

        // The structure may have padding at the end; the base offset of
        // the member following the sub-structure is rounded up to the next
        // multiple of the base alignment of the structure.
        RoundToPow2(size, maxAlignment);

        return maxAlignment;
    }

    // rule 1
    if (type.isScalar())
        return getBaseAlignmentScalar(type, size);

    // rules 2 and 3
    if (type.isVector()) {
        int scalarAlign = getBaseAlignmentScalar(type, size);
        switch (type.getVectorSize()) {
        case 2:
            size *= 2;
            return 2 * scalarAlign;
        default: 
            size *= type.getVectorSize();
            return 4 * scalarAlign;
        }
    }

    // rules 5 and 7
    if (type.isMatrix()) {
        // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
        TType derefType(type, 0, rowMajor);
            
        alignment = getBaseAlignment(derefType, size, dummyStride, std140, rowMajor);
        if (std140)
            alignment = std::max(baseAlignmentVec4Std140, alignment);
        RoundToPow2(size, alignment);
        stride = size;  // use intra-matrix stride for stride of a just a matrix
        if (rowMajor)
            size = stride * type.getMatrixRows();
        else
            size = stride * type.getMatrixCols();

        return alignment;
    }

    assert(0);  // all cases should be covered above
    size = baseAlignmentVec4Std140;
    return baseAlignmentVec4Std140;
}