ANGLE_INLINE void VertexArray::setVertexAttribPointerImpl(const Context *context, ComponentType componentType, bool pureInteger, size_t attribIndex, Buffer *boundBuffer, GLint size, VertexAttribType type, bool normalized, GLsizei stride, const void *pointer) { ASSERT(attribIndex < getMaxAttribs()); GLintptr offset = boundBuffer ? reinterpret_cast<GLintptr>(pointer) : 0; VertexAttribute &attrib = mState.mVertexAttributes[attribIndex]; attrib.pureInteger = pureInteger; SetComponentTypeMask(componentType, attribIndex, &mState.mVertexAttributesTypeMask); setVertexAttribFormatImpl(&attrib, size, type, normalized, 0); setVertexAttribBinding(context, attribIndex, static_cast<GLuint>(attribIndex)); GLsizei effectiveStride = stride != 0 ? stride : static_cast<GLsizei>(ComputeVertexAttributeTypeSize(attrib)); attrib.pointer = pointer; attrib.vertexAttribArrayStride = stride; bindVertexBufferImpl(context, attribIndex, boundBuffer, offset, effectiveStride); setDirtyAttribBit(attribIndex, DIRTY_ATTRIB_POINTER); mState.mNullPointerClientMemoryAttribsMask.set(attribIndex, boundBuffer == nullptr && pointer == nullptr); }
void VertexArrayGL::computeStreamingAttributeSizes(const gl::AttributesMask &activeAttributesMask, const gl::RangeUI &indexRange, size_t *outStreamingDataSize, size_t *outMaxAttributeDataSize) const { *outStreamingDataSize = 0; *outMaxAttributeDataSize = 0; ASSERT(mAttributesNeedStreaming.any()); const auto &attribs = mData.getVertexAttributes(); for (unsigned int idx : angle::IterateBitSet(mAttributesNeedStreaming & activeAttributesMask)) { const auto &attrib = attribs[idx]; ASSERT(AttributeNeedsStreaming(attrib)); const size_t streamedVertexCount = indexRange.end - indexRange.start + 1; // If streaming is going to be required, compute the size of the required buffer // and how much slack space at the beginning of the buffer will be required by determining // the attribute with the largest data size. size_t typeSize = ComputeVertexAttributeTypeSize(attrib); *outStreamingDataSize += typeSize * streamedVertexCount; *outMaxAttributeDataSize = std::max(*outMaxAttributeDataSize, typeSize); } }
void VertexArrayGL::computeStreamingAttributeSizes(const gl::AttributesMask &activeAttributesMask, GLsizei instanceCount, const gl::IndexRange &indexRange, size_t *outStreamingDataSize, size_t *outMaxAttributeDataSize) const { *outStreamingDataSize = 0; *outMaxAttributeDataSize = 0; ASSERT(mAttributesNeedStreaming.any()); const auto &attribs = mState.getVertexAttributes(); const auto &bindings = mState.getVertexBindings(); gl::AttributesMask attribsToStream = (mAttributesNeedStreaming & activeAttributesMask); for (auto idx : attribsToStream) { const auto &attrib = attribs[idx]; const auto &binding = bindings[attrib.bindingIndex]; ASSERT(AttributeNeedsStreaming(attrib, binding)); // If streaming is going to be required, compute the size of the required buffer // and how much slack space at the beginning of the buffer will be required by determining // the attribute with the largest data size. size_t typeSize = ComputeVertexAttributeTypeSize(attrib); GLuint adjustedDivisor = GetAdjustedDivisor(mAppliedNumViews, binding.getDivisor()); *outStreamingDataSize += typeSize * ComputeVertexBindingElementCount(adjustedDivisor, indexRange.vertexCount(), instanceCount); *outMaxAttributeDataSize = std::max(*outMaxAttributeDataSize, typeSize); } }
size_t ComputeVertexAttributeStride(const VertexAttribute& attrib) { if (!attrib.enabled) { return 16; } return attrib.stride ? attrib.stride : ComputeVertexAttributeTypeSize(attrib); }
static int ElementsInBuffer(const gl::VertexAttribute &attrib, unsigned int size) { // Size cannot be larger than a GLsizei if (size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { size = static_cast<unsigned int>(std::numeric_limits<int>::max()); } GLsizei stride = ComputeVertexAttributeStride(attrib); return (size - attrib.offset % stride + (stride - ComputeVertexAttributeTypeSize(attrib))) / stride; }
gl::Error VertexDataManager::storeDynamicAttribs( std::vector<TranslatedAttribute> *translatedAttribs, const gl::AttributesMask &dynamicAttribsMask, GLint start, GLsizei count, GLsizei instances) { // Reserve the required space for the dynamic buffers. for (auto attribIndex : angle::IterateBitSet(dynamicAttribsMask)) { const auto &dynamicAttrib = (*translatedAttribs)[attribIndex]; gl::Error error = reserveSpaceForAttrib(dynamicAttrib, count, instances); if (error.isError()) { return error; } } // Store dynamic attributes for (auto attribIndex : angle::IterateBitSet(dynamicAttribsMask)) { auto *dynamicAttrib = &(*translatedAttribs)[attribIndex]; gl::Error error = storeDynamicAttrib(dynamicAttrib, start, count, instances); if (error.isError()) { unmapStreamingBuffer(); return error; } } unmapStreamingBuffer(); // Promote static usage of dynamic buffers. for (auto attribIndex : angle::IterateBitSet(dynamicAttribsMask)) { auto *dynamicAttrib = &(*translatedAttribs)[attribIndex]; gl::Buffer *buffer = dynamicAttrib->attribute->buffer.get(); if (buffer) { BufferD3D *bufferD3D = GetImplAs<BufferD3D>(buffer); size_t typeSize = ComputeVertexAttributeTypeSize(*dynamicAttrib->attribute); bufferD3D->promoteStaticUsage(count * static_cast<int>(typeSize)); } } return gl::Error(GL_NO_ERROR); }
void VertexDataManager::PromoteDynamicAttribs( const gl::Context *context, const std::vector<TranslatedAttribute> &translatedAttribs, const gl::AttributesMask &dynamicAttribsMask, size_t count) { for (auto attribIndex : dynamicAttribsMask) { const auto &dynamicAttrib = translatedAttribs[attribIndex]; ASSERT(dynamicAttrib.attribute && dynamicAttrib.binding); const auto &binding = *dynamicAttrib.binding; gl::Buffer *buffer = binding.getBuffer().get(); if (buffer) { // Note: this multiplication can overflow. It should not be a security problem. BufferD3D *bufferD3D = GetImplAs<BufferD3D>(buffer); size_t typeSize = ComputeVertexAttributeTypeSize(*dynamicAttrib.attribute); bufferD3D->promoteStaticUsage(context, count * typeSize); } } }
gl::Error VertexDataManager::prepareVertexData(const gl::State &state, GLint start, GLsizei count, std::vector<TranslatedAttribute> *translatedAttribs, GLsizei instances) { if (!mStreamingBuffer) { return gl::Error(GL_OUT_OF_MEMORY, "Internal streaming vertex buffer is unexpectedly NULL."); } // Compute active enabled and active disable attributes, for speed. // TODO(jmadill): don't recompute if there was no state change const gl::VertexArray *vertexArray = state.getVertexArray(); const gl::Program *program = state.getProgram(); const auto &vertexAttributes = vertexArray->getVertexAttributes(); mActiveEnabledAttributes.clear(); mActiveDisabledAttributes.clear(); translatedAttribs->clear(); for (size_t attribIndex = 0; attribIndex < vertexAttributes.size(); ++attribIndex) { if (program->isAttribLocationActive(attribIndex)) { // Resize automatically puts in empty attribs translatedAttribs->resize(attribIndex + 1); TranslatedAttribute *translated = &(*translatedAttribs)[attribIndex]; // Record the attribute now translated->active = true; translated->attribute = &vertexAttributes[attribIndex]; translated->currentValueType = state.getVertexAttribCurrentValue(static_cast<unsigned int>(attribIndex)).Type; translated->divisor = vertexAttributes[attribIndex].divisor; if (vertexAttributes[attribIndex].enabled) { mActiveEnabledAttributes.push_back(translated); } else { mActiveDisabledAttributes.push_back(attribIndex); } } } // Reserve the required space in the buffers for (const TranslatedAttribute *activeAttrib : mActiveEnabledAttributes) { gl::Error error = reserveSpaceForAttrib(*activeAttrib, count, instances); if (error.isError()) { return error; } } // Perform the vertex data translations for (TranslatedAttribute *activeAttrib : mActiveEnabledAttributes) { gl::Error error = storeAttribute(activeAttrib, start, count, instances); if (error.isError()) { hintUnmapAllResources(vertexAttributes); return error; } } for (size_t attribIndex : mActiveDisabledAttributes) { if (mCurrentValueCache[attribIndex].buffer == nullptr) { mCurrentValueCache[attribIndex].buffer = new StreamingVertexBufferInterface(mFactory, CONSTANT_VERTEX_BUFFER_SIZE); } gl::Error error = storeCurrentValue( state.getVertexAttribCurrentValue(static_cast<unsigned int>(attribIndex)), &(*translatedAttribs)[attribIndex], &mCurrentValueCache[attribIndex]); if (error.isError()) { hintUnmapAllResources(vertexAttributes); return error; } } // Commit all the static vertex buffers. This fixes them in size/contents, and forces ANGLE // to use a new static buffer (or recreate the static buffers) next time for (size_t attribIndex = 0; attribIndex < vertexAttributes.size(); ++attribIndex) { const gl::VertexAttribute &attrib = vertexAttributes[attribIndex]; gl::Buffer *buffer = attrib.buffer.get(); BufferD3D *storage = buffer ? GetImplAs<BufferD3D>(buffer) : nullptr; StaticVertexBufferInterface *staticBuffer = storage ? storage->getStaticVertexBuffer(attrib) : nullptr; if (staticBuffer) { staticBuffer->commit(); } } // Hint to unmap all the resources hintUnmapAllResources(vertexAttributes); for (const TranslatedAttribute *activeAttrib : mActiveEnabledAttributes) { gl::Buffer *buffer = activeAttrib->attribute->buffer.get(); if (buffer) { BufferD3D *bufferD3D = GetImplAs<BufferD3D>(buffer); size_t typeSize = ComputeVertexAttributeTypeSize(*activeAttrib->attribute); bufferD3D->promoteStaticUsage(count * static_cast<int>(typeSize)); } } return gl::Error(GL_NO_ERROR); }
gl::Error VertexArrayGL::streamAttributes(const std::vector<GLuint> &activeAttribLocations, size_t streamingDataSize, size_t maxAttributeDataSize, const gl::RangeUI &indexRange) const { if (mStreamingArrayBuffer == 0) { mFunctions->genBuffers(1, &mStreamingArrayBuffer); mStreamingArrayBufferSize = 0; } // If first is greater than zero, a slack space needs to be left at the beginning of the buffer so that // the same 'first' argument can be passed into the draw call. const size_t bufferEmptySpace = maxAttributeDataSize * indexRange.start; const size_t requiredBufferSize = streamingDataSize + bufferEmptySpace; mStateManager->bindBuffer(GL_ARRAY_BUFFER, mStreamingArrayBuffer); if (requiredBufferSize > mStreamingArrayBufferSize) { mFunctions->bufferData(GL_ARRAY_BUFFER, requiredBufferSize, nullptr, GL_DYNAMIC_DRAW); mStreamingArrayBufferSize = requiredBufferSize; } // Unmapping a buffer can return GL_FALSE to indicate that the system has corrupted the data // somehow (such as by a screen change), retry writing the data a few times and return OUT_OF_MEMORY // if that fails. GLboolean unmapResult = GL_FALSE; size_t unmapRetryAttempts = 5; while (unmapResult != GL_TRUE && --unmapRetryAttempts > 0) { uint8_t *bufferPointer = reinterpret_cast<uint8_t*>(mFunctions->mapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY)); size_t curBufferOffset = bufferEmptySpace; const size_t streamedVertexCount = indexRange.end - indexRange.start + 1; const auto &attribs = mData.getVertexAttributes(); for (size_t activeAttrib = 0; activeAttrib < activeAttribLocations.size(); activeAttrib++) { GLuint idx = activeAttribLocations[activeAttrib]; const auto &attrib = attribs[idx]; if (attrib.enabled && attrib.buffer.get() == nullptr) { const size_t sourceStride = ComputeVertexAttributeStride(attrib); const size_t destStride = ComputeVertexAttributeTypeSize(attrib); const uint8_t *inputPointer = reinterpret_cast<const uint8_t*>(attrib.pointer); // Pack the data when copying it, user could have supplied a very large stride that would // cause the buffer to be much larger than needed. if (destStride == sourceStride) { // Can copy in one go, the data is packed memcpy(bufferPointer + curBufferOffset, inputPointer + (sourceStride * indexRange.start), destStride * streamedVertexCount); } else { // Copy each vertex individually for (size_t vertexIdx = indexRange.start; vertexIdx <= indexRange.end; vertexIdx++) { memcpy(bufferPointer + curBufferOffset + (destStride * vertexIdx), inputPointer + (sourceStride * vertexIdx), destStride); } } // Compute where the 0-index vertex would be. const size_t vertexStartOffset = curBufferOffset - (indexRange.start * destStride); mFunctions->vertexAttribPointer(idx, attrib.size, attrib.type, attrib.normalized, destStride, reinterpret_cast<const GLvoid*>(vertexStartOffset)); curBufferOffset += destStride * streamedVertexCount; // Mark the applied attribute as dirty by setting an invalid size so that if it doesn't // need to be streamed later, there is no chance that the caching will skip it. mAppliedAttributes[idx].size = static_cast<GLuint>(-1); } } unmapResult = mFunctions->unmapBuffer(GL_ARRAY_BUFFER); } if (unmapResult != GL_TRUE) { return gl::Error(GL_OUT_OF_MEMORY, "Failed to unmap the client data streaming buffer."); } return gl::Error(GL_NO_ERROR); }
gl::Error VertexArrayGL::syncAttributeState(const std::vector<GLuint> &activeAttribLocations, bool attributesNeedStreaming, const gl::RangeUI &indexRange, size_t *outStreamingDataSize, size_t *outMaxAttributeDataSize) const { *outStreamingDataSize = 0; *outMaxAttributeDataSize = 0; const auto &attribs = mData.getVertexAttributes(); for (size_t activeAttrib = 0; activeAttrib < activeAttribLocations.size(); activeAttrib++) { GLuint idx = activeAttribLocations[activeAttrib]; const auto &attrib = attribs[idx]; // Always sync the enabled and divisor state, they are required for both streaming and buffered // attributes if (mAppliedAttributes[idx].enabled != attrib.enabled) { if (attrib.enabled) { mFunctions->enableVertexAttribArray(idx); } else { mFunctions->disableVertexAttribArray(idx); } mAppliedAttributes[idx].enabled = attrib.enabled; } if (mAppliedAttributes[idx].divisor != attrib.divisor) { mFunctions->vertexAttribDivisor(idx, attrib.divisor); mAppliedAttributes[idx].divisor = attrib.divisor; } if (attribs[idx].enabled && attrib.buffer.get() == nullptr) { ASSERT(attributesNeedStreaming); const size_t streamedVertexCount = indexRange.end - indexRange.start + 1; // If streaming is going to be required, compute the size of the required buffer // and how much slack space at the beginning of the buffer will be required by determining // the attribute with the largest data size. size_t typeSize = ComputeVertexAttributeTypeSize(attrib); *outStreamingDataSize += typeSize * streamedVertexCount; *outMaxAttributeDataSize = std::max(*outMaxAttributeDataSize, typeSize); } else { // Sync the attribute with no translation if (mAppliedAttributes[idx] != attrib) { const gl::Buffer *arrayBuffer = attrib.buffer.get(); if (arrayBuffer != nullptr) { const BufferGL *arrayBufferGL = GetImplAs<BufferGL>(arrayBuffer); mStateManager->bindBuffer(GL_ARRAY_BUFFER, arrayBufferGL->getBufferID()); } else { mStateManager->bindBuffer(GL_ARRAY_BUFFER, 0); } if (attrib.pureInteger) { mFunctions->vertexAttribIPointer(idx, attrib.size, attrib.type, attrib.stride, attrib.pointer); } else { mFunctions->vertexAttribPointer(idx, attrib.size, attrib.type, attrib.normalized, attrib.stride, attrib.pointer); } mAppliedAttributes[idx] = attrib; } } } return gl::Error(GL_NO_ERROR); }
gl::Error VertexDataManager::prepareVertexData(const gl::State &state, GLint start, GLsizei count, TranslatedAttribute *translated, GLsizei instances) { if (!mStreamingBuffer) { return gl::Error(GL_OUT_OF_MEMORY, "Internal streaming vertex buffer is unexpectedly NULL."); } // Invalidate static buffers that don't contain matching attributes for (int attributeIndex = 0; attributeIndex < gl::MAX_VERTEX_ATTRIBS; attributeIndex++) { translated[attributeIndex].active = (state.getProgram()->getSemanticIndex(attributeIndex) != -1); const gl::VertexAttribute &curAttrib = state.getVertexAttribState(attributeIndex); if (translated[attributeIndex].active && curAttrib.enabled) { invalidateMatchingStaticData(curAttrib, state.getVertexAttribCurrentValue(attributeIndex)); } } // Reserve the required space in the buffers for (int i = 0; i < gl::MAX_VERTEX_ATTRIBS; i++) { const gl::VertexAttribute &curAttrib = state.getVertexAttribState(i); if (translated[i].active && curAttrib.enabled) { gl::Error error = reserveSpaceForAttrib(curAttrib, state.getVertexAttribCurrentValue(i), count, instances); if (error.isError()) { return error; } } } // Perform the vertex data translations for (int i = 0; i < gl::MAX_VERTEX_ATTRIBS; i++) { const gl::VertexAttribute &curAttrib = state.getVertexAttribState(i); if (translated[i].active) { if (curAttrib.enabled) { gl::Error error = storeAttribute(curAttrib, state.getVertexAttribCurrentValue(i), &translated[i], start, count, instances); if (error.isError()) { return error; } } else { if (!mCurrentValueBuffer[i]) { mCurrentValueBuffer[i] = new StreamingVertexBufferInterface(mRenderer, CONSTANT_VERTEX_BUFFER_SIZE); } gl::Error error = storeCurrentValue(curAttrib, state.getVertexAttribCurrentValue(i), &translated[i], &mCurrentValue[i], &mCurrentValueOffsets[i], mCurrentValueBuffer[i]); if (error.isError()) { return error; } } } } for (int i = 0; i < gl::MAX_VERTEX_ATTRIBS; i++) { const gl::VertexAttribute &curAttrib = state.getVertexAttribState(i); if (translated[i].active && curAttrib.enabled) { gl::Buffer *buffer = curAttrib.buffer.get(); if (buffer) { BufferD3D *bufferImpl = BufferD3D::makeBufferD3D(buffer->getImplementation()); bufferImpl->promoteStaticUsage(count * ComputeVertexAttributeTypeSize(curAttrib)); } } } return gl::Error(GL_NO_ERROR); }
gl::Error VertexArrayGL::streamAttributes(const gl::AttributesMask &activeAttributesMask, GLsizei instanceCount, const gl::IndexRange &indexRange) const { // Sync the vertex attribute state and track what data needs to be streamed size_t streamingDataSize = 0; size_t maxAttributeDataSize = 0; computeStreamingAttributeSizes(activeAttributesMask, instanceCount, indexRange, &streamingDataSize, &maxAttributeDataSize); if (streamingDataSize == 0) { return gl::NoError(); } if (mStreamingArrayBuffer == 0) { mFunctions->genBuffers(1, &mStreamingArrayBuffer); mStreamingArrayBufferSize = 0; } // If first is greater than zero, a slack space needs to be left at the beginning of the buffer // so that the same 'first' argument can be passed into the draw call. const size_t bufferEmptySpace = maxAttributeDataSize * indexRange.start; const size_t requiredBufferSize = streamingDataSize + bufferEmptySpace; mStateManager->bindBuffer(gl::BufferBinding::Array, mStreamingArrayBuffer); if (requiredBufferSize > mStreamingArrayBufferSize) { mFunctions->bufferData(GL_ARRAY_BUFFER, requiredBufferSize, nullptr, GL_DYNAMIC_DRAW); mStreamingArrayBufferSize = requiredBufferSize; } // Unmapping a buffer can return GL_FALSE to indicate that the system has corrupted the data // somehow (such as by a screen change), retry writing the data a few times and return // OUT_OF_MEMORY if that fails. GLboolean unmapResult = GL_FALSE; size_t unmapRetryAttempts = 5; while (unmapResult != GL_TRUE && --unmapRetryAttempts > 0) { uint8_t *bufferPointer = MapBufferRangeWithFallback(mFunctions, GL_ARRAY_BUFFER, 0, requiredBufferSize, GL_MAP_WRITE_BIT); size_t curBufferOffset = bufferEmptySpace; const auto &attribs = mState.getVertexAttributes(); const auto &bindings = mState.getVertexBindings(); gl::AttributesMask attribsToStream = (mAttributesNeedStreaming & activeAttributesMask); for (auto idx : attribsToStream) { const auto &attrib = attribs[idx]; ASSERT(IsVertexAttribPointerSupported(idx, attrib)); const auto &binding = bindings[attrib.bindingIndex]; ASSERT(AttributeNeedsStreaming(attrib, binding)); GLuint adjustedDivisor = GetAdjustedDivisor(mAppliedNumViews, binding.getDivisor()); const size_t streamedVertexCount = ComputeVertexBindingElementCount( adjustedDivisor, indexRange.vertexCount(), instanceCount); const size_t sourceStride = ComputeVertexAttributeStride(attrib, binding); const size_t destStride = ComputeVertexAttributeTypeSize(attrib); // Vertices do not apply the 'start' offset when the divisor is non-zero even when doing // a non-instanced draw call const size_t firstIndex = adjustedDivisor == 0 ? indexRange.start : 0; // Attributes using client memory ignore the VERTEX_ATTRIB_BINDING state. // https://www.opengl.org/registry/specs/ARB/vertex_attrib_binding.txt const uint8_t *inputPointer = reinterpret_cast<const uint8_t *>(attrib.pointer); // Pack the data when copying it, user could have supplied a very large stride that // would cause the buffer to be much larger than needed. if (destStride == sourceStride) { // Can copy in one go, the data is packed memcpy(bufferPointer + curBufferOffset, inputPointer + (sourceStride * firstIndex), destStride * streamedVertexCount); } else { // Copy each vertex individually for (size_t vertexIdx = 0; vertexIdx < streamedVertexCount; vertexIdx++) { uint8_t *out = bufferPointer + curBufferOffset + (destStride * vertexIdx); const uint8_t *in = inputPointer + sourceStride * (vertexIdx + firstIndex); memcpy(out, in, destStride); } } // Compute where the 0-index vertex would be. const size_t vertexStartOffset = curBufferOffset - (firstIndex * destStride); callVertexAttribPointer(static_cast<GLuint>(idx), attrib, static_cast<GLsizei>(destStride), static_cast<GLintptr>(vertexStartOffset)); curBufferOffset += destStride * streamedVertexCount; } unmapResult = mFunctions->unmapBuffer(GL_ARRAY_BUFFER); } if (unmapResult != GL_TRUE) { return gl::OutOfMemory() << "Failed to unmap the client data streaming buffer."; } return gl::NoError(); }