void GrGLUniformHandler::getUniformLocations(GrGLuint programID, const GrGLCaps& caps) { if (!caps.bindUniformLocationSupport()) { int count = fUniforms.count(); for (int i = 0; i < count; ++i) { GrGLint location; GL_CALL_RET(location, GetUniformLocation(programID, fUniforms[i].fVariable.c_str())); fUniforms[i].fLocation = location; } for (int i = 0; i < fSamplers.count(); ++i) { GrGLint location; GL_CALL_RET(location, GetUniformLocation(programID, fSamplers[i].fVariable.c_str())); fSamplers[i].fLocation = location; } for (int i = 0; i < fTexelBuffers.count(); ++i) { GrGLint location; GL_CALL_RET(location, GetUniformLocation(programID, fTexelBuffers[i].fVariable.c_str())); fTexelBuffers[i].fLocation = location; } for (int i = 0; i < fImageStorages.count(); ++i) { GrGLint location; GL_CALL_RET(location, GetUniformLocation(programID, fImageStorages[i].fVariable.c_str())); fImageStorages[i].fLocation = location; } } }
GrGLShaderBuilder::DstReadKey GrGLShaderBuilder::KeyForDstRead(const GrTexture* dstCopy, const GrGLCaps& caps) { uint32_t key = kYesDstRead_DstReadKeyBit; if (caps.fbFetchSupport()) { return key; } SkASSERT(NULL != dstCopy); if (!caps.textureSwizzleSupport() && GrPixelConfigIsAlphaOnly(dstCopy->config())) { // The fact that the config is alpha-only must be considered when generating code. key |= kUseAlphaConfig_DstReadKeyBit; } if (kTopLeft_GrSurfaceOrigin == dstCopy->origin()) { key |= kTopLeftOrigin_DstReadKeyBit; } SkASSERT(static_cast<DstReadKey>(key) == key); return static_cast<DstReadKey>(key); }
void GrGLUniformHandler::bindUniformLocations(GrGLuint programID, const GrGLCaps& caps) { if (caps.bindUniformLocationSupport()) { int count = fUniforms.count(); for (int i = 0; i < count; ++i) { GL_CALL(BindUniformLocation(programID, i, fUniforms[i].fVariable.c_str())); fUniforms[i].fLocation = i; } } }
/** * Do we need to either map r,g,b->a or a->r. configComponentMask indicates which channels are * present in the texture's config. swizzleComponentMask indicates the channels present in the * shader swizzle. */ static bool swizzle_requires_alpha_remapping(const GrGLCaps& caps, uint32_t configComponentMask, uint32_t swizzleComponentMask) { if (caps.textureSwizzleSupport()) { // Any remapping is handled using texture swizzling not shader modifications. return false; } // check if the texture is alpha-only if (kA_GrColorComponentFlag == configComponentMask) { if (caps.textureRedSupport() && (kA_GrColorComponentFlag & swizzleComponentMask)) { // we must map the swizzle 'a's to 'r'. return true; } if (kRGB_GrColorComponentFlags & swizzleComponentMask) { // The 'r', 'g', and/or 'b's must be mapped to 'a' according to our semantics that // alpha-only textures smear alpha across all four channels when read. return true; } } return false; }
void GrGLUniformHandler::bindUniformLocations(GrGLuint programID, const GrGLCaps& caps) { if (caps.bindUniformLocationSupport()) { int uniformCnt = fUniforms.count(); for (int i = 0; i < uniformCnt; ++i) { GL_CALL(BindUniformLocation(programID, i, fUniforms[i].fVariable.c_str())); fUniforms[i].fLocation = i; } for (int i = 0; i < fSamplers.count(); ++i) { GrGLint location = i + uniformCnt; GL_CALL(BindUniformLocation(programID, location, fSamplers[i].fShaderVar.c_str())); fSamplers[i].fLocation = location; } } }
GrCustomStage::StageKey GrGLShaderBuilder::KeyForTextureAccess(const GrTextureAccess& access, const GrGLCaps& caps) { GrCustomStage::StageKey key = 0; // Assume that swizzle support implies that we never have to modify a shader to adjust // for texture format/swizzle settings. if (caps.textureSwizzleSupport()) { return key; } if (texture_requires_alpha_to_red_swizzle(caps, access)) { key = 1; } return key; }
/* * TODO: A better name for this function would be "compute" instead of "get". */ static bool get_frag_proc_and_meta_keys(const GrPrimitiveProcessor& primProc, const GrFragmentProcessor& fp, const GrGLCaps& caps, GrProcessorKeyBuilder* b) { for (int i = 0; i < fp.numChildProcessors(); ++i) { if (!get_frag_proc_and_meta_keys(primProc, fp.childProcessor(i), caps, b)) { return false; } } fp.getGLProcessorKey(*caps.glslCaps(), b); //**** use glslCaps here? return get_meta_key(fp, caps, primProc.getTransformKey(fp.coordTransforms(), fp.numTransformsExclChildren()), b); }
static uint32_t gen_texture_key(const GrProcessor& proc, const GrGLCaps& caps) { uint32_t key = 0; int numTextures = proc.numTextures(); int shift = 0; for (int t = 0; t < numTextures; ++t) { const GrTextureAccess& access = proc.textureAccess(t); if (swizzle_requires_alpha_remapping(*caps.glslCaps(), access.getTexture()->config())) { key |= 1 << shift; } if (GR_GL_TEXTURE_EXTERNAL == static_cast<GrGLTexture*>(access.getTexture())->target()) { key |= 2 << shift; } shift += 2; } return key; }
GrBackendEffectFactory::EffectKey GrGLShaderBuilder::KeyForTextureAccess( const GrTextureAccess& access, const GrGLCaps& caps) { GrBackendEffectFactory::EffectKey key = 0; // Assume that swizzle support implies that we never have to modify a shader to adjust // for texture format/swizzle settings. if (!caps.textureSwizzleSupport() && swizzle_requires_alpha_remapping(caps, access)) { key = 1; } #if GR_DEBUG // Assert that key is set iff the swizzle will be modified. SkString origString(access.getSwizzle()); origString.prepend("."); SkString modifiedString; append_swizzle(&modifiedString, access, caps); if (!modifiedString.size()) { modifiedString = ".rgba"; } GrAssert(SkToBool(key) == (modifiedString != origString)); #endif return key; }
void GrGLUniformHandler::bindUniformLocations(GrGLuint programID, const GrGLCaps& caps) { if (caps.bindUniformLocationSupport()) { int currUniform = 0; for (int i = 0; i < fUniforms.count(); ++i, ++currUniform) { GL_CALL(BindUniformLocation(programID, currUniform, fUniforms[i].fVariable.c_str())); fUniforms[i].fLocation = currUniform; } for (int i = 0; i < fSamplers.count(); ++i, ++currUniform) { GL_CALL(BindUniformLocation(programID, currUniform, fSamplers[i].fVariable.c_str())); fSamplers[i].fLocation = currUniform; } for (int i = 0; i < fTexelBuffers.count(); ++i, ++currUniform) { GL_CALL(BindUniformLocation(programID, currUniform, fTexelBuffers[i].fVariable.c_str())); fTexelBuffers[i].fLocation = currUniform; } for (int i = 0; i < fImageStorages.count(); ++i, ++currUniform) { GL_CALL(BindUniformLocation(programID, currUniform, fImageStorages[i].fVariable.c_str())); fImageStorages[i].fLocation = currUniform; } } }
bool GrGLSLCaps::init(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli, const GrGLCaps& glCaps) { this->reset(); if (!ctxInfo.isInitialized()) { return false; } GrGLStandard standard = ctxInfo.standard(); GrGLVersion version = ctxInfo.version(); /************************************************************************** * Caps specific to GrGLSLCaps **************************************************************************/ if (kGLES_GrGLStandard == standard) { if (ctxInfo.hasExtension("GL_EXT_shader_framebuffer_fetch")) { fFBFetchNeedsCustomOutput = (version >= GR_GL_VER(3, 0)); fFBFetchSupport = true; fFBFetchColorName = "gl_LastFragData[0]"; fFBFetchExtensionString = "GL_EXT_shader_framebuffer_fetch"; } else if (ctxInfo.hasExtension("GL_NV_shader_framebuffer_fetch")) { // Actually, we haven't seen an ES3.0 device with this extension yet, so we don't know fFBFetchNeedsCustomOutput = false; fFBFetchSupport = true; fFBFetchColorName = "gl_LastFragData[0]"; fFBFetchExtensionString = "GL_NV_shader_framebuffer_fetch"; } else if (ctxInfo.hasExtension("GL_ARM_shader_framebuffer_fetch")) { // The arm extension also requires an additional flag which we will set onResetContext fFBFetchNeedsCustomOutput = false; fFBFetchSupport = true; fFBFetchColorName = "gl_LastFragColorARM"; fFBFetchExtensionString = "GL_ARM_shader_framebuffer_fetch"; } } // Adreno GPUs have a tendency to drop tiles when there is a divide-by-zero in a shader fDropsTileOnZeroDivide = kQualcomm_GrGLVendor == ctxInfo.vendor(); /************************************************************************** * GrShaderCaps fields **************************************************************************/ fPathRenderingSupport = ctxInfo.hasExtension("GL_NV_path_rendering"); if (fPathRenderingSupport) { if (kGL_GrGLStandard == standard) { // We only support v1.3+ of GL_NV_path_rendering which allows us to // set individual fragment inputs with ProgramPathFragmentInputGen. The API // additions are detected by checking the existence of the function. fPathRenderingSupport = ctxInfo.hasExtension("GL_EXT_direct_state_access") && ((ctxInfo.version() >= GR_GL_VER(4, 3) || ctxInfo.hasExtension("GL_ARB_program_interface_query")) && gli->fFunctions.fProgramPathFragmentInputGen); } else { fPathRenderingSupport = ctxInfo.version() >= GR_GL_VER(3, 1); } } // For now these two are equivalent but we could have dst read in shader via some other method fDstReadInShaderSupport = fFBFetchSupport; // Enable supported shader-related caps if (kGL_GrGLStandard == standard) { fDualSourceBlendingSupport = ctxInfo.version() >= GR_GL_VER(3, 3) || ctxInfo.hasExtension("GL_ARB_blend_func_extended"); fShaderDerivativeSupport = true; // we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS fGeometryShaderSupport = ctxInfo.version() >= GR_GL_VER(3, 2) && ctxInfo.glslGeneration() >= k150_GrGLSLGeneration; } else { fShaderDerivativeSupport = ctxInfo.version() >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_OES_standard_derivatives"); } if (glCaps.advancedBlendEquationSupport()) { bool coherent = glCaps.advancedCoherentBlendEquationSupport(); if (ctxInfo.hasExtension(coherent ? "GL_NV_blend_equation_advanced_coherent" : "GL_NV_blend_equation_advanced")) { fAdvBlendEqInteraction = kAutomatic_AdvBlendEqInteraction; } else { fAdvBlendEqInteraction = kGeneralEnable_AdvBlendEqInteraction; // TODO: Use the following on any platform where "blend_support_all_equations" is slow. //fAdvBlendEqInteraction = kSpecificEnables_AdvBlendEqInteraction; } } this->initShaderPrecisionTable(ctxInfo, gli); return true; }