void GrGLProgram::setRenderTargetState(const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline) { // Load the RT height uniform if it is needed to y-flip gl_FragCoord. if (fBuiltinUniformHandles.fRTHeightUni.isValid() && fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) { fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(pipeline.getRenderTarget()->height())); } // set RT adjustment const GrRenderTarget* rt = pipeline.getRenderTarget(); SkISize size; size.set(rt->width(), rt->height()); if (!primProc.isPathRendering()) { if (fRenderTargetState.fRenderTargetOrigin != rt->origin() || fRenderTargetState.fRenderTargetSize != size) { fRenderTargetState.fRenderTargetSize = size; fRenderTargetState.fRenderTargetOrigin = rt->origin(); float rtAdjustmentVec[4]; fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec); fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec); } } else { SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport()); const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>(); fGpu->glPathRendering()->setProjectionMatrix(pathProc.viewMatrix(), size, rt->origin()); } }
bool GrPipeline::AreEqual(const GrPipeline& a, const GrPipeline& b, bool ignoreCoordTransforms) { SkASSERT(&a != &b); if (a.getRenderTarget() != b.getRenderTarget() || a.fFragmentProcessors.count() != b.fFragmentProcessors.count() || a.fNumColorProcessors != b.fNumColorProcessors || a.fScissorState != b.fScissorState || a.fFlags != b.fFlags || a.fStencilSettings != b.fStencilSettings || a.fDrawFace != b.fDrawFace || a.fIgnoresCoverage != b.fIgnoresCoverage) { return false; } // Most of the time both are nullptr if (a.fXferProcessor.get() || b.fXferProcessor.get()) { if (!a.getXferProcessor().isEqual(b.getXferProcessor())) { return false; } } for (int i = 0; i < a.numFragmentProcessors(); i++) { if (!a.getFragmentProcessor(i).isEqual(b.getFragmentProcessor(i), ignoreCoordTransforms)) { return false; } } return true; }
void GrGLProgram::setRenderTargetState(const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline) { // Load the RT height uniform if it is needed to y-flip gl_FragCoord. if (fBuiltinUniformHandles.fRTHeightUni.isValid() && fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) { fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(pipeline.getRenderTarget()->height())); } // call subclasses to set the actual view matrix this->onSetRenderTargetState(primProc, pipeline); }
bool GrPipeline::isEqual(const GrPipeline& that) const { if (this->getRenderTarget() != that.getRenderTarget() || this->fFragmentStages.count() != that.fFragmentStages.count() || this->fNumColorStages != that.fNumColorStages || this->fScissorState != that.fScissorState || this->fFlags != that.fFlags || this->fStencilSettings != that.fStencilSettings || this->fDrawFace != that.fDrawFace) { return false; } if (!this->getXferProcessor()->isEqual(*that.getXferProcessor())) { return false; } // The program desc comparison should have already assured that the stage counts match. SkASSERT(this->numFragmentStages() == that.numFragmentStages()); for (int i = 0; i < this->numFragmentStages(); i++) { if (this->getFragmentStage(i) != that.getFragmentStage(i)) { return false; } } return true; }
void GrVkPipelineState::BuildStateKey(const GrPipeline& pipeline, GrPrimitiveType primitiveType, SkTArray<uint8_t, true>* key) { // Save room for the key length and key header key->reset(); key->push_back_n(kData_StateKeyOffset); GrProcessorKeyBuilder b(key); GrVkRenderTarget* vkRT = (GrVkRenderTarget*)pipeline.getRenderTarget(); vkRT->simpleRenderPass()->genKey(&b); pipeline.getStencil().genKey(&b); SkASSERT(sizeof(GrPipelineBuilder::DrawFace) <= sizeof(uint32_t)); b.add32(pipeline.getDrawFace()); b.add32(get_blend_info_key(pipeline)); b.add32(primitiveType); // Set key length int keyLength = key->count(); SkASSERT(0 == (keyLength % 4)); *reinterpret_cast<uint32_t*>(key->begin()) = SkToU32(keyLength); }
void GrVkPipeline::SetDynamicState(GrVkGpu* gpu, GrVkCommandBuffer* cmdBuffer, const GrPipeline& pipeline) { const GrRenderTarget& target = *pipeline.getRenderTarget(); set_dynamic_scissor_state(gpu, cmdBuffer, pipeline, target); set_dynamic_viewport_state(gpu, cmdBuffer, target); set_dynamic_blend_constant_state(gpu, cmdBuffer, pipeline); }
void GrGLPathProgram::onSetRenderTargetState(const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline) { SkASSERT(!primProc.willUseGeoShader() && primProc.numAttribs() == 0); const GrRenderTarget* rt = pipeline.getRenderTarget(); SkISize size; size.set(rt->width(), rt->height()); const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>(); fGpu->glPathRendering()->setProjectionMatrix(pathProc.viewMatrix(), size, rt->origin()); }
void GrVkProgram::setRenderTargetState(const GrPipeline& pipeline) { // Load the RT height uniform if it is needed to y-flip gl_FragCoord. if (fBuiltinUniformHandles.fRTHeightUni.isValid() && fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) { fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(pipeline.getRenderTarget()->height())); } // set RT adjustment const GrRenderTarget* rt = pipeline.getRenderTarget(); SkISize size; size.set(rt->width(), rt->height()); SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid()); if (fRenderTargetState.fRenderTargetOrigin != rt->origin() || fRenderTargetState.fRenderTargetSize != size) { fRenderTargetState.fRenderTargetSize = size; fRenderTargetState.fRenderTargetOrigin = rt->origin(); float rtAdjustmentVec[4]; fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec); fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec); } }
void setup_multisample_state(const GrPipeline& pipeline, VkPipelineMultisampleStateCreateInfo* multisampleInfo) { memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo)); multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisampleInfo->pNext = nullptr; multisampleInfo->flags = 0; int numSamples = pipeline.getRenderTarget()->numColorSamples(); SkAssertResult(GrSampleCountToVkSampleCount(numSamples, &multisampleInfo->rasterizationSamples)); multisampleInfo->sampleShadingEnable = VK_FALSE; multisampleInfo->minSampleShading = 0; multisampleInfo->pSampleMask = nullptr; multisampleInfo->alphaToCoverageEnable = VK_FALSE; multisampleInfo->alphaToOneEnable = VK_FALSE; }
void GrGLProgram::onSetRenderTargetState(const GrPrimitiveProcessor&, const GrPipeline& pipeline) { const GrRenderTarget* rt = pipeline.getRenderTarget(); SkISize size; size.set(rt->width(), rt->height()); if (fRenderTargetState.fRenderTargetOrigin != rt->origin() || fRenderTargetState.fRenderTargetSize != size) { fRenderTargetState.fRenderTargetSize = size; fRenderTargetState.fRenderTargetOrigin = rt->origin(); GrGLfloat rtAdjustmentVec[4]; fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec); fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec); } }
void setup_multisample_state(const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc, const GrCaps* caps, VkPipelineMultisampleStateCreateInfo* multisampleInfo) { memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo)); multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisampleInfo->pNext = nullptr; multisampleInfo->flags = 0; int numSamples = pipeline.getRenderTarget()->numColorSamples(); SkAssertResult(GrSampleCountToVkSampleCount(numSamples, &multisampleInfo->rasterizationSamples)); float sampleShading = primProc.getSampleShading(); SkASSERT(sampleShading == 0.0f || caps->sampleShadingSupport()); multisampleInfo->sampleShadingEnable = sampleShading > 0.0f; multisampleInfo->minSampleShading = sampleShading; multisampleInfo->pSampleMask = nullptr; multisampleInfo->alphaToCoverageEnable = VK_FALSE; multisampleInfo->alphaToOneEnable = VK_FALSE; }
void GrGLProgram::setData(const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline) { this->setRenderTargetState(primProc, pipeline.getRenderTarget()); // we set the textures, and uniforms for installed processors in a generic way, but subclasses // of GLProgram determine how to set coord transforms int nextSamplerIdx = 0; fGeometryProcessor->setData(fProgramDataManager, primProc, GrFragmentProcessor::CoordTransformIter(pipeline)); this->bindTextures(primProc, pipeline.getAllowSRGBInputs(), &nextSamplerIdx); this->setFragmentData(primProc, pipeline, &nextSamplerIdx); const GrXferProcessor& xp = pipeline.getXferProcessor(); SkIPoint offset; GrTexture* dstTexture = pipeline.dstTexture(&offset); fXferProcessor->setData(fProgramDataManager, xp, dstTexture, offset); if (dstTexture) { fGpu->bindTexture(nextSamplerIdx++, GrSamplerParams::ClampNoFilter(), true, static_cast<GrGLTexture*>(dstTexture)); } }
bool GrVkProgramDescBuilder::Build(GrProgramDesc* desc, const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline, const GrGLSLCaps& glslCaps) { // The descriptor is used as a cache key. Thus when a field of the // descriptor will not affect program generation (because of the attribute // bindings in use or other descriptor field settings) it should be set // to a canonical value to avoid duplicate programs with different keys. GrVkProgramDesc* vkDesc = (GrVkProgramDesc*)desc; GR_STATIC_ASSERT(0 == kProcessorKeysOffset % sizeof(uint32_t)); // Make room for everything up to the effect keys. vkDesc->key().reset(); vkDesc->key().push_back_n(kProcessorKeysOffset); GrProcessorKeyBuilder b(&vkDesc->key()); primProc.getGLSLProcessorKey(glslCaps, &b); if (!gen_meta_key(primProc, glslCaps, 0, &b)) { vkDesc->key().reset(); return false; } GrProcessor::RequiredFeatures requiredFeatures = primProc.requiredFeatures(); for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) { const GrFragmentProcessor& fp = pipeline.getFragmentProcessor(i); if (!gen_frag_proc_and_meta_keys(primProc, fp, glslCaps, &b)) { vkDesc->key().reset(); return false; } requiredFeatures |= fp.requiredFeatures(); } const GrXferProcessor& xp = pipeline.getXferProcessor(); xp.getGLSLProcessorKey(glslCaps, &b); if (!gen_meta_key(xp, glslCaps, 0, &b)) { vkDesc->key().reset(); return false; } requiredFeatures |= xp.requiredFeatures(); // --------DO NOT MOVE HEADER ABOVE THIS LINE-------------------------------------------------- // Because header is a pointer into the dynamic array, we can't push any new data into the key // below here. KeyHeader* header = vkDesc->atOffset<KeyHeader, kHeaderOffset>(); // make sure any padding in the header is zeroed. memset(header, 0, kHeaderSize); GrRenderTarget* rt = pipeline.getRenderTarget(); if (requiredFeatures & (GrProcessor::kFragmentPosition_RequiredFeature | GrProcessor::kSampleLocations_RequiredFeature)) { header->fSurfaceOriginKey = GrGLSLFragmentShaderBuilder::KeyForSurfaceOrigin(rt->origin()); } else { header->fSurfaceOriginKey = 0; } if (requiredFeatures & GrProcessor::kSampleLocations_RequiredFeature) { SkASSERT(pipeline.isHWAntialiasState()); header->fSamplePatternKey = rt->renderTargetPriv().getMultisampleSpecs(pipeline.getStencil()).fUniqueID; } else { header->fSamplePatternKey = 0; } header->fOutputSwizzle = glslCaps.configOutputSwizzle(rt->config()).asKey(); if (pipeline.ignoresCoverage()) { header->fIgnoresCoverage = 1; } else { header->fIgnoresCoverage = 0; } header->fSnapVerticesToPixelCenters = pipeline.snapVerticesToPixelCenters(); header->fColorEffectCnt = pipeline.numColorFragmentProcessors(); header->fCoverageEffectCnt = pipeline.numCoverageFragmentProcessors(); vkDesc->finalize(); return true; }
bool GrGLProgramDescBuilder::Build(GrProgramDesc* desc, const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline, const GrGLGpu* gpu, const GrBatchTracker& batchTracker) { // The descriptor is used as a cache key. Thus when a field of the // descriptor will not affect program generation (because of the attribute // bindings in use or other descriptor field settings) it should be set // to a canonical value to avoid duplicate programs with different keys. GrGLProgramDesc* glDesc = (GrGLProgramDesc*) desc; GR_STATIC_ASSERT(0 == kProcessorKeysOffset % sizeof(uint32_t)); // Make room for everything up to the effect keys. glDesc->key().reset(); glDesc->key().push_back_n(kProcessorKeysOffset); GrProcessorKeyBuilder b(&glDesc->key()); primProc.getGLProcessorKey(batchTracker, gpu->glCaps(), &b); if (!get_meta_key(primProc, gpu->glCaps(), 0, &b)) { glDesc->key().reset(); return false; } for (int s = 0; s < pipeline.numFragmentStages(); ++s) { const GrPendingFragmentStage& fps = pipeline.getFragmentStage(s); const GrFragmentProcessor& fp = *fps.processor(); fp.getGLProcessorKey(gpu->glCaps(), &b); if (!get_meta_key(fp, gpu->glCaps(), primProc.getTransformKey(fp.coordTransforms()), &b)) { glDesc->key().reset(); return false; } } const GrXferProcessor& xp = *pipeline.getXferProcessor(); xp.getGLProcessorKey(gpu->glCaps(), &b); if (!get_meta_key(xp, gpu->glCaps(), 0, &b)) { glDesc->key().reset(); return false; } // --------DO NOT MOVE HEADER ABOVE THIS LINE-------------------------------------------------- // Because header is a pointer into the dynamic array, we can't push any new data into the key // below here. KeyHeader* header = glDesc->atOffset<KeyHeader, kHeaderOffset>(); // make sure any padding in the header is zeroed. memset(header, 0, kHeaderSize); if (pipeline.readsFragPosition()) { header->fFragPosKey = GrGLFragmentShaderBuilder::KeyForFragmentPosition(pipeline.getRenderTarget(), gpu->glCaps()); } else { header->fFragPosKey = 0; } header->fColorEffectCnt = pipeline.numColorFragmentStages(); header->fCoverageEffectCnt = pipeline.numCoverageFragmentStages(); glDesc->finalize(); return true; }
void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc, const GrMesh* meshes, int meshCount) { if (!meshCount) { return; } GrRenderTarget* rt = pipeline.getRenderTarget(); GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); SkASSERT(renderPass); prepare_sampled_images(primProc, fGpu); GrFragmentProcessor::Iter iter(pipeline); while (const GrFragmentProcessor* fp = iter.next()) { prepare_sampled_images(*fp, fGpu); } prepare_sampled_images(pipeline.getXferProcessor(), fGpu); GrPrimitiveType primitiveType = meshes[0].primitiveType(); sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass); if (!pipelineState) { return; } for (int i = 0; i < meshCount; ++i) { const GrMesh& mesh = meshes[i]; GrMesh::Iterator iter; const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh); do { if (nonIdxMesh->primitiveType() != primitiveType) { // Technically we don't have to call this here (since there is a safety check in // pipelineState:setData but this will allow for quicker freeing of resources if the // pipelineState sits in a cache for a while. pipelineState->freeTempResources(fGpu); SkDEBUGCODE(pipelineState = nullptr); primitiveType = nonIdxMesh->primitiveType(); pipelineState = this->prepareDrawState(pipeline, primProc, primitiveType, *renderPass); if (!pipelineState) { return; } } SkASSERT(pipelineState); this->bindGeometry(primProc, *nonIdxMesh); if (nonIdxMesh->isIndexed()) { fCommandBuffer->drawIndexed(fGpu, nonIdxMesh->indexCount(), 1, nonIdxMesh->startIndex(), nonIdxMesh->startVertex(), 0); } else { fCommandBuffer->draw(fGpu, nonIdxMesh->vertexCount(), 1, nonIdxMesh->startVertex(), 0); } fIsEmpty = false; fGpu->stats()->incNumDraws(); } while ((nonIdxMesh = iter.next())); } // Technically we don't have to call this here (since there is a safety check in // pipelineState:setData but this will allow for quicker freeing of resources if the // pipelineState sits in a cache for a while. pipelineState->freeTempResources(fGpu); }