static bool tryDFGCompile(JSGlobalData* globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck) { #if ENABLE(DFG_JIT) #if ENABLE(DFG_JIT_RESTRICTIONS) // FIXME: No flow control yet supported, don't bother scanning the bytecode if there are any jump targets. // FIXME: temporarily disable property accesses until we fix regressions. if (codeBlock->numberOfJumpTargets() || codeBlock->numberOfStructureStubInfos()) return false; #endif DFG::Graph dfg(codeBlock->m_numParameters, codeBlock->m_numVars); if (!parse(dfg, globalData, codeBlock)) return false; DFG::JITCompiler dataFlowJIT(globalData, dfg, codeBlock); dataFlowJIT.compileFunction(jitCode, jitCodeWithArityCheck); return true; #else UNUSED_PARAM(globalData); UNUSED_PARAM(codeBlock); UNUSED_PARAM(jitCode); UNUSED_PARAM(jitCodeWithArityCheck); return false; #endif }
inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount()); #endif Graph dfg(globalData, codeBlock); if (!parse(dfg)) return false; if (compileMode == CompileFunction) dfg.predictArgumentTypes(); performRedundantPhiElimination(dfg); performPredictionPropagation(dfg); performFixup(dfg); performCSE(dfg); performVirtualRegisterAllocation(dfg); performCFA(dfg); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("Graph after optimization:\n"); dfg.dump(); #endif JITCompiler dataFlowJIT(dfg); bool result; if (compileMode == CompileFunction) { ASSERT(jitCodeWithArityCheck); result = dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck); } else { ASSERT(compileMode == CompileOther); ASSERT(!jitCodeWithArityCheck); result = dataFlowJIT.compile(jitCode); } return result; }
MenuInfo::MenuInfo(const QString& desktopFile) { d = new Private; KConfig df(KStandardDirs::locate("data", QString::fromLatin1("kicker/menuext/%1").arg(desktopFile))); KConfigGroup dfg(&df, "Desktop Entry"); QStringList list = dfg.readEntry("X-KDE-AuthorizeAction", QStringList() ); if (kapp && !list.isEmpty()) { for(QStringList::ConstIterator it = list.begin(); it != list.end(); ++it) { if (!KAuthorized::authorize((*it).trimmed())) return; } } d->name = dfg.readEntry("Name"); d->comment = dfg.readEntry("Comment"); d->icon = dfg.readEntry("Icon"); d->library = dfg.readEntry("X-KDE-Library"); d->desktopfile = desktopFile; }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { if (verboseCompilationEnabled() && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); } Graph dfg(vm, *this, longLivedState); if (!parse(dfg)) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } performCPSRethreading(dfg); } if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performTypeCheckHoisting(dfg); unsigned count = 1; dfg.m_fixpointState = FixpointNotConverged; for (;; ++count) { if (logCompilationChanges()) dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count); bool changed = false; if (validationEnabled()) validate(dfg); performCFA(dfg); changed |= performConstantFolding(dfg); changed |= performArgumentsSimplification(dfg); changed |= performCFGSimplification(dfg); changed |= performCSE(dfg); if (!changed) break; performCPSRethreading(dfg); } if (logCompilationChanges()) dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count); dfg.m_fixpointState = FixpointConverged; performStoreElimination(dfg); // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.m_dominators.computeIfNecessary(dfg); dfg.m_naturalLoops.computeIfNecessary(dfg); } switch (mode) { case DFGMode: { performTierUpCheckInjection(dfg); break; } case FTLMode: case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } performCriticalEdgeBreaking(dfg); performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM. performStackLayout(dfg); performLivenessAnalysis(dfg); performFlushLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:"); initializeLLVM(); FTL::State state(dfg); FTL::lowerDFGToLLVM(state); if (Options::reportCompileTimes()) beforeFTL = currentTimeMS(); if (Options::llvmAlwaysFailsBeforeCompile()) { FTL::fail(state); return FTLPath; } FTL::compile(state); if (Options::llvmAlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } FTL::link(state); return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); break; #endif // ENABLE(FTL_JIT) } default: RELEASE_ASSERT_NOT_REACHED(); break; } performCPSRethreading(dfg); performDCE(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (codeBlock->codeType() == FunctionCode) { dataFlowJIT.compileFunction(); dataFlowJIT.linkFunction(); } else { dataFlowJIT.compile(); dataFlowJIT.link(); } return DFGPath; }
REAL dfgdy(const REAL h, const REAL alpha, const Matrix& f, const Matrix& g, const unsigned i, const unsigned j) { return dfg(h, alpha, f.at(i,j-1), f.at(i,j), f.at(i,j+1), g.at(i,j-1), g.at(i,j), g.at(i+1,j-1), g.at(i+1,j)); }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { cleanMustHandleValuesIfNecessary(); if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); } Graph dfg(*vm, *this, longLivedState); if (!parse(dfg)) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters()); // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); if (Options::dumpGraphAfterParsing()) { dataLog("Graph after parsing:\n"); dfg.dump(); } performLiveCatchVariablePreservationPhase(dfg); if (Options::useMaximalFlushInsertionPhase()) performMaximalFlushInsertion(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); performStaticExecutionCountEstimation(dfg); if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCPSRethreading(dfg); } if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performStructureRegistration(dfg); performInvalidationPointInjection(dfg); performTypeCheckHoisting(dfg); dfg.m_fixpointState = FixpointNotConverged; // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means // that the compiler compiles more quickly. We want the third tier to compile quickly, which // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint. if (validationEnabled()) validate(dfg); performStrengthReduction(dfg); performCPSRethreading(dfg); performCFA(dfg); performConstantFolding(dfg); bool changed = false; changed |= performCFGSimplification(dfg); changed |= performLocalCSE(dfg); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); if (!isFTL(mode)) { // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases, // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an // escape for all of the arguments. This then disables object allocation sinking. // // So, for now, we just disable this phase for the FTL. // // If we wanted to enable it, we'd have to do any of the following: // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before // PutStack sinking and object allocation sinking. // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into // GetStack+PutStack. // // But, it's not super valuable to enable those optimizations, since the FTL // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this // pathology. changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading. } if (changed) { performCFA(dfg); performConstantFolding(dfg); } // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.ensureDominators(); dfg.ensureNaturalLoops(); dfg.ensurePrePostNumbering(); } switch (mode) { case DFGMode: { dfg.m_fixpointState = FixpointConverged; performTierUpCheckInjection(dfg); performFastStoreBarrierInsertion(dfg); performStoreBarrierClustering(dfg); performCleanUp(dfg); performCPSRethreading(dfg); performDCE(dfg); performPhantomInsertion(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); performWatchpointCollection(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (codeBlock->codeType() == FunctionCode) dataFlowJIT.compileFunction(); else dataFlowJIT.compile(); return DFGPath; } case FTLMode: case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCleanUp(dfg); // Reduce the graph size a bit. performCriticalEdgeBreaking(dfg); if (Options::createPreHeaders()) performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performSSALowering(dfg); // Ideally, these would be run to fixpoint with the object allocation sinking phase. performArgumentsElimination(dfg); if (Options::usePutStackSinking()) performPutStackSinking(dfg); performConstantHoisting(dfg); performGlobalCSE(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performConstantFolding(dfg); performCleanUp(dfg); // Reduce the graph size a lot. changed = false; changed |= performStrengthReduction(dfg); if (Options::useObjectAllocationSinking()) { changed |= performCriticalEdgeBreaking(dfg); changed |= performObjectAllocationSinking(dfg); } if (changed) { // State-at-tail and state-at-head will be invalid if we did strength reduction since // it might increase live ranges. performLivenessAnalysis(dfg); performCFA(dfg); performConstantFolding(dfg); } // Currently, this relies on pre-headers still being valid. That precludes running CFG // simplification before it, unless we re-created the pre-headers. There wouldn't be anything // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point. // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that // then we'd need to do some simple SSA fix-up. performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM. // // IntegerRangeOptimization makes changes on nodes based on preceding blocks // and nodes. LICM moves nodes which can invalidates assumptions used // by IntegerRangeOptimization. // // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534. performLivenessAnalysis(dfg); performIntegerRangeOptimization(dfg); performCleanUp(dfg); performIntegerCheckCombining(dfg); performGlobalCSE(dfg); // At this point we're not allowed to do any further code motion because our reasoning // about code motion assumes that it's OK to insert GC points in random places. dfg.m_fixpointState = FixpointConverged; performLivenessAnalysis(dfg); performCFA(dfg); performGlobalStoreBarrierInsertion(dfg); performStoreBarrierClustering(dfg); if (Options::useMovHintRemoval()) performMovHintRemoval(dfg); performCleanUp(dfg); performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3. performStackLayout(dfg); performLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); performWatchpointCollection(dfg); if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode)); // Flash a safepoint in case the GC wants some action. Safepoint::Result safepointResult; { GraphSafepoint safepoint(dfg, safepointResult); } if (safepointResult.didGetCancelled()) return CancelPath; FTL::State state(dfg); FTL::lowerDFGToB3(state); if (UNLIKELY(computeCompileTimes())) m_timeBeforeFTL = monotonicallyIncreasingTimeMS(); if (Options::b3AlwaysFailsBeforeCompile()) { FTL::fail(state); return FTLPath; } FTL::compile(state, safepointResult); if (safepointResult.didGetCancelled()) return CancelPath; if (Options::b3AlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } if (state.allocationFailed) { FTL::fail(state); return FTLPath; } FTL::link(state); if (state.allocationFailed) { FTL::fail(state); return FTLPath; } return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); return FailPath; #endif // ENABLE(FTL_JIT) } default: RELEASE_ASSERT_NOT_REACHED(); return FailPath; } }
inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck, unsigned osrEntryBytecodeIndex) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); numCompilations++; ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); ASSERT(osrEntryBytecodeIndex != UINT_MAX); if (!Options::useDFGJIT()) return false; if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount())) return false; if (logCompilationChanges()) dataLog("DFG compiling ", *codeBlock, ", number of instructions = ", codeBlock->instructionCount(), "\n"); // Derive our set of must-handle values. The compilation must be at least conservative // enough to allow for OSR entry with these values. unsigned numVarsWithValues; if (osrEntryBytecodeIndex) numVarsWithValues = codeBlock->m_numVars; else numVarsWithValues = 0; Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues); for (size_t i = 0; i < mustHandleValues.size(); ++i) { int operand = mustHandleValues.operandForIndex(i); if (operandIsArgument(operand) && !operandToArgument(operand) && compileMode == CompileFunction && codeBlock->specializationKind() == CodeForConstruct) { // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will // also never be used. It doesn't matter what we put into the value for this, // but it has to be an actual value that can be grokked by subsequent DFG passes, // so we sanitize it here by turning it into Undefined. mustHandleValues[i] = jsUndefined(); } else mustHandleValues[i] = exec->uncheckedR(operand).jsValue(); } Graph dfg(exec->vm(), codeBlock, osrEntryBytecodeIndex, mustHandleValues); if (!parse(exec, dfg)) return false; // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performTypeCheckHoisting(dfg); dfg.m_fixpointState = FixpointNotConverged; performCSE(dfg); performArgumentsSimplification(dfg); performCPSRethreading(dfg); // This should usually be a no-op since CSE rarely dethreads, and arguments simplification rarely does anything. performCFA(dfg); performConstantFolding(dfg); performCFGSimplification(dfg); dfg.m_fixpointState = FixpointConverged; performStoreElimination(dfg); performCPSRethreading(dfg); performDCE(dfg); performVirtualRegisterAllocation(dfg); GraphDumpMode modeForFinalValidate = DumpGraph; if (verboseCompilationEnabled()) { dataLogF("Graph after optimization:\n"); dfg.dump(); modeForFinalValidate = DontDumpGraph; } if (validationEnabled()) validate(dfg, modeForFinalValidate); JITCompiler dataFlowJIT(dfg); bool result; if (compileMode == CompileFunction) { ASSERT(jitCodeWithArityCheck); result = dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck); } else { ASSERT(compileMode == CompileOther); ASSERT(!jitCodeWithArityCheck); result = dataFlowJIT.compile(jitCode); } return result; }
inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG compiling code block %p(%p) for executable %p, number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->ownerExecutable(), codeBlock->instructionCount()); #endif Graph dfg(exec->globalData(), codeBlock); if (!parse(exec, dfg)) return false; if (compileMode == CompileFunction) dfg.predictArgumentTypes(); // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); validate(dfg); performPredictionPropagation(dfg); performFixup(dfg); unsigned cnt = 1; for (;; ++cnt) { #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG beginning optimization fixpoint iteration #%u.\n", cnt); #endif bool changed = false; performCFA(dfg); changed |= performConstantFolding(dfg); changed |= performArgumentsSimplification(dfg); changed |= performCFGSimplification(dfg); changed |= performCSE(dfg, FixpointNotConverged); if (!changed) break; dfg.resetExitStates(); performFixup(dfg); } performCSE(dfg, FixpointConverged); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG optimization fixpoint converged in %u iterations.\n", cnt); #endif performVirtualRegisterAllocation(dfg); GraphDumpMode modeForFinalValidate = DumpGraph; #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("Graph after optimization:\n"); dfg.dump(); modeForFinalValidate = DontDumpGraph; #endif validate(dfg, modeForFinalValidate); JITCompiler dataFlowJIT(dfg); bool result; if (compileMode == CompileFunction) { ASSERT(jitCodeWithArityCheck); result = dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck); } else { ASSERT(compileMode == CompileOther); ASSERT(!jitCodeWithArityCheck); result = dataFlowJIT.compile(jitCode); } return result; }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { Graph dfg(vm, *this, longLivedState); if (!parse(dfg)) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performTypeCheckHoisting(dfg); unsigned count = 1; dfg.m_fixpointState = FixpointNotConverged; for (;; ++count) { if (logCompilationChanges()) dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count); bool changed = false; if (validationEnabled()) validate(dfg); performCFA(dfg); changed |= performConstantFolding(dfg); changed |= performArgumentsSimplification(dfg); changed |= performCFGSimplification(dfg); changed |= performCSE(dfg); if (!changed) break; performCPSRethreading(dfg); } if (logCompilationChanges()) dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count); dfg.m_fixpointState = FixpointConverged; performStoreElimination(dfg); // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.m_dominators.computeIfNecessary(dfg); dfg.m_naturalLoops.computeIfNecessary(dfg); } #if ENABLE(FTL_JIT) if (Options::useExperimentalFTL() && compileMode == CompileFunction && FTL::canCompile(dfg)) { performCriticalEdgeBreaking(dfg); performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM. performLivenessAnalysis(dfg); performFlushLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:"); // FIXME: Support OSR entry. // https://bugs.webkit.org/show_bug.cgi?id=113625 FTL::State state(dfg); FTL::lowerDFGToLLVM(state); if (Options::reportCompileTimes()) beforeFTL = currentTimeMS(); if (Options::llvmAlwaysFails()) { FTL::fail(state); return FTLPath; } FTL::compile(state); FTL::link(state); return FTLPath; } #endif // ENABLE(FTL_JIT) performCPSRethreading(dfg); performDCE(dfg); performVirtualRegisterAllocation(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (compileMode == CompileFunction) { dataFlowJIT.compileFunction(); dataFlowJIT.linkFunction(); } else { ASSERT(compileMode == CompileOther); dataFlowJIT.compile(); dataFlowJIT.link(); } return DFGPath; }