void TranslatorESSL::translate(TIntermNode *root, int) { TInfoSinkBase& sink = getInfoSink().obj; int shaderVer = getShaderVersion(); if (shaderVer > 100) { sink << "#version " << shaderVer << " es\n"; } writePragma(); // Write built-in extension behaviors. writeExtensionBehavior(); bool precisionEmulation = getResources().WEBGL_debug_shader_precision && getPragma().debugShaderPrecision; if (precisionEmulation) { EmulatePrecision emulatePrecision(getSymbolTable(), shaderVer); root->traverse(&emulatePrecision); emulatePrecision.updateTree(); emulatePrecision.writeEmulationHelpers(sink, SH_ESSL_OUTPUT); } RecordConstantPrecision(root, getTemporaryIndex()); // Write emulated built-in functions if needed. if (!getBuiltInFunctionEmulator().IsOutputEmpty()) { sink << "// BEGIN: Generated code for built-in function emulation\n\n"; if (getShaderType() == GL_FRAGMENT_SHADER) { sink << "#if defined(GL_FRAGMENT_PRECISION_HIGH)\n" << "#define webgl_emu_precision highp\n" << "#else\n" << "#define webgl_emu_precision mediump\n" << "#endif\n\n"; } else { sink << "#define webgl_emu_precision highp\n"; } getBuiltInFunctionEmulator().OutputEmulatedFunctions(sink); sink << "// END: Generated code for built-in function emulation\n\n"; } // Write array bounds clamping emulation if needed. getArrayBoundsClamper().OutputClampingFunctionDefinition(sink); // Write translated shader. TOutputESSL outputESSL(sink, getArrayIndexClampingStrategy(), getHashFunction(), getNameMap(), getSymbolTable(), shaderVer, precisionEmulation); root->traverse(&outputESSL); }
TIntermNode *TCompiler::compileTreeImpl(const char *const shaderStrings[], size_t numStrings, const int compileOptions) { clearResults(); ASSERT(numStrings > 0); ASSERT(GetGlobalPoolAllocator()); // Reset the extension behavior for each compilation unit. ResetExtensionBehavior(extensionBehavior); // First string is path of source file if flag is set. The actual source follows. size_t firstSource = 0; if (compileOptions & SH_SOURCE_PATH) { mSourcePath = shaderStrings[0]; ++firstSource; } TIntermediate intermediate(infoSink); TParseContext parseContext(symbolTable, extensionBehavior, intermediate, shaderType, shaderSpec, compileOptions, true, infoSink, getResources()); parseContext.setFragmentPrecisionHighOnESSL1(fragmentPrecisionHigh); SetGlobalParseContext(&parseContext); // We preserve symbols at the built-in level from compile-to-compile. // Start pushing the user-defined symbols at global level. TScopedSymbolTableLevel scopedSymbolLevel(&symbolTable); // Parse shader. bool success = (PaParseStrings(numStrings - firstSource, &shaderStrings[firstSource], nullptr, &parseContext) == 0) && (parseContext.getTreeRoot() != nullptr); shaderVersion = parseContext.getShaderVersion(); if (success && MapSpecToShaderVersion(shaderSpec) < shaderVersion) { infoSink.info.prefix(EPrefixError); infoSink.info << "unsupported shader version"; success = false; } TIntermNode *root = nullptr; if (success) { mPragma = parseContext.pragma(); if (mPragma.stdgl.invariantAll) { symbolTable.setGlobalInvariant(); } root = parseContext.getTreeRoot(); root = intermediate.postProcess(root); // Highp might have been auto-enabled based on shader version fragmentPrecisionHigh = parseContext.getFragmentPrecisionHigh(); // Disallow expressions deemed too complex. if (success && (compileOptions & SH_LIMIT_EXPRESSION_COMPLEXITY)) success = limitExpressionComplexity(root); // Create the function DAG and check there is no recursion if (success) success = initCallDag(root); if (success && (compileOptions & SH_LIMIT_CALL_STACK_DEPTH)) success = checkCallDepth(); // Checks which functions are used and if "main" exists if (success) { functionMetadata.clear(); functionMetadata.resize(mCallDag.size()); success = tagUsedFunctions(); } if (success && !(compileOptions & SH_DONT_PRUNE_UNUSED_FUNCTIONS)) success = pruneUnusedFunctions(root); // Prune empty declarations to work around driver bugs and to keep declaration output simple. if (success) PruneEmptyDeclarations(root); if (success && shaderVersion == 300 && shaderType == GL_FRAGMENT_SHADER) success = validateOutputs(root); if (success && shouldRunLoopAndIndexingValidation(compileOptions)) success = validateLimitations(root); if (success && (compileOptions & SH_TIMING_RESTRICTIONS)) success = enforceTimingRestrictions(root, (compileOptions & SH_DEPENDENCY_GRAPH) != 0); if (success && shaderSpec == SH_CSS_SHADERS_SPEC) rewriteCSSShader(root); // Unroll for-loop markup needs to happen after validateLimitations pass. if (success && (compileOptions & SH_UNROLL_FOR_LOOP_WITH_INTEGER_INDEX)) { ForLoopUnrollMarker marker(ForLoopUnrollMarker::kIntegerIndex, shouldRunLoopAndIndexingValidation(compileOptions)); root->traverse(&marker); } if (success && (compileOptions & SH_UNROLL_FOR_LOOP_WITH_SAMPLER_ARRAY_INDEX)) { ForLoopUnrollMarker marker(ForLoopUnrollMarker::kSamplerArrayIndex, shouldRunLoopAndIndexingValidation(compileOptions)); root->traverse(&marker); if (marker.samplerArrayIndexIsFloatLoopIndex()) { infoSink.info.prefix(EPrefixError); infoSink.info << "sampler array index is float loop index"; success = false; } } // Built-in function emulation needs to happen after validateLimitations pass. if (success) { initBuiltInFunctionEmulator(&builtInFunctionEmulator, compileOptions); builtInFunctionEmulator.MarkBuiltInFunctionsForEmulation(root); } // Clamping uniform array bounds needs to happen after validateLimitations pass. if (success && (compileOptions & SH_CLAMP_INDIRECT_ARRAY_BOUNDS)) arrayBoundsClamper.MarkIndirectArrayBoundsForClamping(root); // gl_Position is always written in compatibility output mode if (success && shaderType == GL_VERTEX_SHADER && ((compileOptions & SH_INIT_GL_POSITION) || (outputType == SH_GLSL_COMPATIBILITY_OUTPUT))) initializeGLPosition(root); // This pass might emit short circuits so keep it before the short circuit unfolding if (success && (compileOptions & SH_REWRITE_DO_WHILE_LOOPS)) RewriteDoWhile(root, getTemporaryIndex()); if (success && (compileOptions & SH_UNFOLD_SHORT_CIRCUIT)) { UnfoldShortCircuitAST unfoldShortCircuit; root->traverse(&unfoldShortCircuit); unfoldShortCircuit.updateTree(); } if (success && (compileOptions & SH_REMOVE_POW_WITH_CONSTANT_EXPONENT)) { RemovePow(root); } if (success && shouldCollectVariables(compileOptions)) { collectVariables(root); if (compileOptions & SH_ENFORCE_PACKING_RESTRICTIONS) { success = enforcePackingRestrictions(); if (!success) { infoSink.info.prefix(EPrefixError); infoSink.info << "too many uniforms"; } } if (success && shaderType == GL_VERTEX_SHADER && (compileOptions & SH_INIT_VARYINGS_WITHOUT_STATIC_USE)) initializeVaryingsWithoutStaticUse(root); } if (success && (compileOptions & SH_SCALARIZE_VEC_AND_MAT_CONSTRUCTOR_ARGS)) { ScalarizeVecAndMatConstructorArgs scalarizer( shaderType, fragmentPrecisionHigh); root->traverse(&scalarizer); } if (success && (compileOptions & SH_REGENERATE_STRUCT_NAMES)) { RegenerateStructNames gen(symbolTable, shaderVersion); root->traverse(&gen); } } SetGlobalParseContext(NULL); if (success) return root; return NULL; }
void TranslatorHLSL::translate(TIntermNode *root, int compileOptions) { const ShBuiltInResources &resources = getResources(); int numRenderTargets = resources.EXT_draw_buffers ? resources.MaxDrawBuffers : 1; sh::AddDefaultReturnStatements(root); SeparateDeclarations(root); // TODO (oetuaho): Sequence operators should also be split in case there is dynamic indexing of // a vector or matrix as an l-value inside (RemoveDynamicIndexing transformation step generates // statements in this case). SplitSequenceOperator(root, IntermNodePatternMatcher::kExpressionReturningArray | IntermNodePatternMatcher::kUnfoldedShortCircuitExpression | IntermNodePatternMatcher::kDynamicIndexingOfVectorOrMatrixInLValue, getTemporaryIndex(), getSymbolTable(), getShaderVersion()); // Note that SeparateDeclarations needs to be run before UnfoldShortCircuitToIf. UnfoldShortCircuitToIf(root, getTemporaryIndex()); SeparateExpressionsReturningArrays(root, getTemporaryIndex()); // Note that SeparateDeclarations needs to be run before SeparateArrayInitialization. SeparateArrayInitialization(root); // HLSL doesn't support arrays as return values, we'll need to make functions that have an array // as a return value to use an out parameter to transfer the array data instead. ArrayReturnValueToOutParameter(root, getTemporaryIndex()); if (!shouldRunLoopAndIndexingValidation(compileOptions)) { // HLSL doesn't support dynamic indexing of vectors and matrices. RemoveDynamicIndexing(root, getTemporaryIndex(), getSymbolTable(), getShaderVersion()); } // Work around D3D9 bug that would manifest in vertex shaders with selection blocks which // use a vertex attribute as a condition, and some related computation in the else block. if (getOutputType() == SH_HLSL_3_0_OUTPUT && getShaderType() == GL_VERTEX_SHADER) { sh::RewriteElseBlocks(root, getTemporaryIndex()); } bool precisionEmulation = getResources().WEBGL_debug_shader_precision && getPragma().debugShaderPrecision; if (precisionEmulation) { EmulatePrecision emulatePrecision(getSymbolTable(), getShaderVersion()); root->traverse(&emulatePrecision); emulatePrecision.updateTree(); emulatePrecision.writeEmulationHelpers(getInfoSink().obj, getShaderVersion(), getOutputType()); } if ((compileOptions & SH_EXPAND_SELECT_HLSL_INTEGER_POW_EXPRESSIONS) != 0) { sh::ExpandIntegerPowExpressions(root, getTemporaryIndex()); } sh::OutputHLSL outputHLSL(getShaderType(), getShaderVersion(), getExtensionBehavior(), getSourcePath(), getOutputType(), numRenderTargets, getUniforms(), compileOptions); outputHLSL.output(root, getInfoSink().obj); mInterfaceBlockRegisterMap = outputHLSL.getInterfaceBlockRegisterMap(); mUniformRegisterMap = outputHLSL.getUniformRegisterMap(); }