void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { // Count what there is to draw. BatchList::Iter iter; iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); int numGLInstances = 0; int numGLDrawCmds = 0; while (Batch* b = iter.get()) { GLBatch* batch = static_cast<GLBatch*>(b); iter.next(); numGLInstances += batch->fNumDraws; numGLDrawCmds += batch->numGLCommands(); } if (!numGLDrawCmds) { return; } SkASSERT(numGLInstances); // Lazily create a vertex array object. if (!fVertexArrayID) { GL_CALL(GenVertexArrays(1, &fVertexArrayID)); if (!fVertexArrayID) { return; } this->glGpu()->bindVertexArray(fVertexArrayID); // Attach our index buffer to the vertex array. SkASSERT(!this->indexBuffer()->isCPUBacked()); GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID())); // Set up the non-instanced attribs. this->glGpu()->bindBuffer(kVertex_GrBufferType, this->vertexBuffer()); GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords)); GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, GR_GL_FALSE, sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX))); GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs)); GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fAttrs))); SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId); } // Create and map instance and draw-indirect buffers. SkASSERT(!fInstanceBuffer); fInstanceBuffer.reset( rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType, kDynamic_GrAccessPattern, GrResourceProvider::kNoPendingIO_Flag | GrResourceProvider::kRequireGpuMemory_Flag)); if (!fInstanceBuffer) { return; } SkASSERT(!fDrawIndirectBuffer); fDrawIndirectBuffer.reset( rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds, kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern, GrResourceProvider::kNoPendingIO_Flag | GrResourceProvider::kRequireGpuMemory_Flag)); if (!fDrawIndirectBuffer) { return; } Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map()); int glInstancesIdx = 0; auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map()); int glDrawCmdsIdx = 0; bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport(); if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { fGLDrawCmdsInfo.reset(numGLDrawCmds); } // Generate the instance and draw-indirect buffer contents based on the tracked batches. iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); while (Batch* b = iter.get()) { GLBatch* batch = static_cast<GLBatch*>(b); iter.next(); batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx; batch->fGLDrawCmdsIdx = glDrawCmdsIdx; const Batch::Draw* draw = batch->fHeadDraw; SkASSERT(draw); do { int instanceCount = 0; IndexRange geometry = draw->fGeometry; SkASSERT(!geometry.isEmpty()); do { glMappedInstances[glInstancesIdx + instanceCount++] = draw->fInstance; draw = draw->fNext; } while (draw && draw->fGeometry == geometry); GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx]; glCmd.fCount = geometry.fCount; glCmd.fInstanceCount = instanceCount; glCmd.fFirstIndex = geometry.fStart; glCmd.fBaseVertex = 0; glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0; if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount; #if GR_GL_LOG_INSTANCED_BATCHES fGLDrawCmdsInfo[glDrawCmdsIdx].fGeometry = geometry; #endif } glInstancesIdx += instanceCount; ++glDrawCmdsIdx; } while (draw); } SkASSERT(glDrawCmdsIdx == numGLDrawCmds); fDrawIndirectBuffer->unmap(); SkASSERT(glInstancesIdx == numGLInstances); fInstanceBuffer->unmap(); }