bool foldConstants(BasicBlock* block) { bool changed = false; m_state.beginBasicBlock(block); for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { if (!m_state.isValid()) break; Node* node = block->at(indexInBlock); bool alreadyHandled = false; bool eliminated = false; switch (node->op()) { case BooleanToNumber: { if (node->child1().useKind() == UntypedUse && !m_interpreter.needsTypeCheck(node->child1(), SpecBoolean)) node->child1().setUseKind(BooleanUse); break; } case CheckArgumentsNotCreated: { if (!isEmptySpeculation( m_state.variables().operand( m_graph.argumentsRegisterFor(node->origin.semantic)).m_type)) break; node->convertToPhantom(); eliminated = true; break; } case CheckStructure: case ArrayifyToStructure: { AbstractValue& value = m_state.forNode(node->child1()); StructureSet set; if (node->op() == ArrayifyToStructure) set = node->structure(); else set = node->structureSet(); if (value.m_structure.isSubsetOf(set)) { m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell. node->convertToPhantom(); eliminated = true; break; } break; } case CheckArray: case Arrayify: { if (!node->arrayMode().alreadyChecked(m_graph, node, m_state.forNode(node->child1()))) break; node->convertToPhantom(); eliminated = true; break; } case PutStructure: { if (m_state.forNode(node->child1()).m_structure.onlyStructure() != node->transition()->next) break; node->convertToPhantom(); eliminated = true; break; } case CheckFunction: { if (m_state.forNode(node->child1()).value() != node->function()->value()) break; node->convertToPhantom(); eliminated = true; break; } case CheckInBounds: { JSValue left = m_state.forNode(node->child1()).value(); JSValue right = m_state.forNode(node->child2()).value(); if (left && right && left.isInt32() && right.isInt32() && static_cast<uint32_t>(left.asInt32()) < static_cast<uint32_t>(right.asInt32())) { node->convertToPhantom(); eliminated = true; break; } break; } case MultiGetByOffset: { Edge baseEdge = node->child1(); Node* base = baseEdge.node(); MultiGetByOffsetData& data = node->multiGetByOffsetData(); // First prune the variants, then check if the MultiGetByOffset can be // strength-reduced to a GetByOffset. AbstractValue baseValue = m_state.forNode(base); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. for (unsigned i = 0; i < data.variants.size(); ++i) { GetByIdVariant& variant = data.variants[i]; variant.structureSet().filter(baseValue); if (variant.structureSet().isEmpty()) { data.variants[i--] = data.variants.last(); data.variants.removeLast(); changed = true; } } if (data.variants.size() != 1) break; emitGetByOffset( indexInBlock, node, baseValue, data.variants[0], data.identifierNumber); changed = true; break; } case MultiPutByOffset: { Edge baseEdge = node->child1(); Node* base = baseEdge.node(); MultiPutByOffsetData& data = node->multiPutByOffsetData(); AbstractValue baseValue = m_state.forNode(base); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. for (unsigned i = 0; i < data.variants.size(); ++i) { PutByIdVariant& variant = data.variants[i]; variant.oldStructure().filter(baseValue); if (variant.oldStructure().isEmpty()) { data.variants[i--] = data.variants.last(); data.variants.removeLast(); changed = true; continue; } if (variant.kind() == PutByIdVariant::Transition && variant.oldStructure().onlyStructure() == variant.newStructure()) { variant = PutByIdVariant::replace( variant.oldStructure(), variant.offset()); changed = true; } } if (data.variants.size() != 1) break; emitPutByOffset( indexInBlock, node, baseValue, data.variants[0], data.identifierNumber); changed = true; break; } case GetById: case GetByIdFlush: { Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); AbstractValue baseValue = m_state.forNode(child); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered() || (node->child1().useKind() == UntypedUse || (baseValue.m_type & ~SpecCell))) break; GetByIdStatus status = GetByIdStatus::computeFor( vm(), baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber]); if (!status.isSimple()) break; for (unsigned i = status.numVariants(); i--;) { if (!status[i].constantChecks().isEmpty() || status[i].alternateBase()) { // FIXME: We could handle prototype cases. // https://bugs.webkit.org/show_bug.cgi?id=110386 break; } } if (status.numVariants() == 1) { emitGetByOffset(indexInBlock, node, baseValue, status[0], identifierNumber); changed = true; break; } if (!isFTL(m_graph.m_plan.mode)) break; MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); data->variants = status.variants(); data->identifierNumber = identifierNumber; node->convertToMultiGetByOffset(data); changed = true; break; } case PutById: case PutByIdDirect: case PutByIdFlush: { NodeOrigin origin = node->origin; Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); ASSERT(childEdge.useKind() == CellUse); AbstractValue baseValue = m_state.forNode(child); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered()) break; PutByIdStatus status = PutByIdStatus::computeFor( vm(), m_graph.globalObjectFor(origin.semantic), baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber], node->op() == PutByIdDirect); if (!status.isSimple()) break; ASSERT(status.numVariants()); if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode)) break; changed = true; for (unsigned i = status.numVariants(); i--;) addChecks(origin, indexInBlock, status[i].constantChecks()); if (status.numVariants() == 1) { emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber); break; } ASSERT(isFTL(m_graph.m_plan.mode)); MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); data->variants = status.variants(); data->identifierNumber = identifierNumber; node->convertToMultiPutByOffset(data); break; } case ToPrimitive: { if (m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString)) break; node->convertToIdentity(); changed = true; break; } case GetMyArgumentByVal: { InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; JSValue value = m_state.forNode(node->child1()).m_value; if (inlineCallFrame && value && value.isInt32()) { int32_t index = value.asInt32(); if (index >= 0 && static_cast<size_t>(index + 1) < inlineCallFrame->arguments.size()) { // Roll the interpreter over this. m_interpreter.execute(indexInBlock); eliminated = true; int operand = inlineCallFrame->stackOffset + m_graph.baselineCodeBlockFor(inlineCallFrame)->argumentIndexAfterCapture(index); m_insertionSet.insertNode( indexInBlock, SpecNone, CheckArgumentsNotCreated, node->origin); m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, node->origin, node->children); node->convertToGetLocalUnlinked(VirtualRegister(operand)); break; } } break; } case Check: { alreadyHandled = true; m_interpreter.execute(indexInBlock); for (unsigned i = 0; i < AdjacencyList::Size; ++i) { Edge edge = node->children.child(i); if (!edge) break; if (edge.isProved() || edge.willNotHaveCheck()) { node->children.removeEdge(i--); changed = true; } } break; } default: break; } if (eliminated) { changed = true; continue; } if (alreadyHandled) continue; m_interpreter.execute(indexInBlock); if (!m_state.isValid()) { // If we invalidated then we shouldn't attempt to constant-fold. Here's an // example: // // c: JSConstant(4.2) // x: ValueToInt32(Check:Int32:@const) // // It would be correct for an analysis to assume that execution cannot // proceed past @x. Therefore, constant-folding @x could be rather bad. But, // the CFA may report that it found a constant even though it also reported // that everything has been invalidated. This will only happen in a couple of // the constant folding cases; most of them are also separately defensive // about such things. break; } if (!node->shouldGenerate() || m_state.didClobber() || node->hasConstant()) continue; // Interesting fact: this freezing that we do right here may turn an fragile value into // a weak value. See DFGValueStrength.h. FrozenValue* value = m_graph.freeze(m_state.forNode(node).value()); if (!*value) continue; NodeOrigin origin = node->origin; AdjacencyList children = node->children; m_graph.convertToConstant(node, value); if (!children.isEmpty()) { m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, origin, children); } changed = true; } m_state.reset(); m_insertionSet.execute(block); return changed; }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { cleanMustHandleValuesIfNecessary(); if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); } Graph dfg(*vm, *this, longLivedState); if (!parse(dfg)) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters()); // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); if (Options::dumpGraphAfterParsing()) { dataLog("Graph after parsing:\n"); dfg.dump(); } performLiveCatchVariablePreservationPhase(dfg); if (Options::useMaximalFlushInsertionPhase()) performMaximalFlushInsertion(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); performStaticExecutionCountEstimation(dfg); if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCPSRethreading(dfg); } if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performStructureRegistration(dfg); performInvalidationPointInjection(dfg); performTypeCheckHoisting(dfg); dfg.m_fixpointState = FixpointNotConverged; // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means // that the compiler compiles more quickly. We want the third tier to compile quickly, which // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint. if (validationEnabled()) validate(dfg); performStrengthReduction(dfg); performCPSRethreading(dfg); performCFA(dfg); performConstantFolding(dfg); bool changed = false; changed |= performCFGSimplification(dfg); changed |= performLocalCSE(dfg); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); if (!isFTL(mode)) { // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases, // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an // escape for all of the arguments. This then disables object allocation sinking. // // So, for now, we just disable this phase for the FTL. // // If we wanted to enable it, we'd have to do any of the following: // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before // PutStack sinking and object allocation sinking. // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into // GetStack+PutStack. // // But, it's not super valuable to enable those optimizations, since the FTL // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this // pathology. changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading. } if (changed) { performCFA(dfg); performConstantFolding(dfg); } // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.ensureDominators(); dfg.ensureNaturalLoops(); dfg.ensurePrePostNumbering(); } switch (mode) { case DFGMode: { dfg.m_fixpointState = FixpointConverged; performTierUpCheckInjection(dfg); performFastStoreBarrierInsertion(dfg); performStoreBarrierClustering(dfg); performCleanUp(dfg); performCPSRethreading(dfg); performDCE(dfg); performPhantomInsertion(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); performWatchpointCollection(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (codeBlock->codeType() == FunctionCode) dataFlowJIT.compileFunction(); else dataFlowJIT.compile(); return DFGPath; } case FTLMode: case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCleanUp(dfg); // Reduce the graph size a bit. performCriticalEdgeBreaking(dfg); if (Options::createPreHeaders()) performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performSSALowering(dfg); // Ideally, these would be run to fixpoint with the object allocation sinking phase. performArgumentsElimination(dfg); if (Options::usePutStackSinking()) performPutStackSinking(dfg); performConstantHoisting(dfg); performGlobalCSE(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performConstantFolding(dfg); performCleanUp(dfg); // Reduce the graph size a lot. changed = false; changed |= performStrengthReduction(dfg); if (Options::useObjectAllocationSinking()) { changed |= performCriticalEdgeBreaking(dfg); changed |= performObjectAllocationSinking(dfg); } if (changed) { // State-at-tail and state-at-head will be invalid if we did strength reduction since // it might increase live ranges. performLivenessAnalysis(dfg); performCFA(dfg); performConstantFolding(dfg); } // Currently, this relies on pre-headers still being valid. That precludes running CFG // simplification before it, unless we re-created the pre-headers. There wouldn't be anything // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point. // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that // then we'd need to do some simple SSA fix-up. performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM. // // IntegerRangeOptimization makes changes on nodes based on preceding blocks // and nodes. LICM moves nodes which can invalidates assumptions used // by IntegerRangeOptimization. // // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534. performLivenessAnalysis(dfg); performIntegerRangeOptimization(dfg); performCleanUp(dfg); performIntegerCheckCombining(dfg); performGlobalCSE(dfg); // At this point we're not allowed to do any further code motion because our reasoning // about code motion assumes that it's OK to insert GC points in random places. dfg.m_fixpointState = FixpointConverged; performLivenessAnalysis(dfg); performCFA(dfg); performGlobalStoreBarrierInsertion(dfg); performStoreBarrierClustering(dfg); if (Options::useMovHintRemoval()) performMovHintRemoval(dfg); performCleanUp(dfg); performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3. performStackLayout(dfg); performLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); performWatchpointCollection(dfg); if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode)); // Flash a safepoint in case the GC wants some action. Safepoint::Result safepointResult; { GraphSafepoint safepoint(dfg, safepointResult); } if (safepointResult.didGetCancelled()) return CancelPath; FTL::State state(dfg); FTL::lowerDFGToB3(state); if (UNLIKELY(computeCompileTimes())) m_timeBeforeFTL = monotonicallyIncreasingTimeMS(); if (Options::b3AlwaysFailsBeforeCompile()) { FTL::fail(state); return FTLPath; } FTL::compile(state, safepointResult); if (safepointResult.didGetCancelled()) return CancelPath; if (Options::b3AlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } if (state.allocationFailed) { FTL::fail(state); return FTLPath; } FTL::link(state); if (state.allocationFailed) { FTL::fail(state); return FTLPath; } return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); return FailPath; #endif // ENABLE(FTL_JIT) } default: RELEASE_ASSERT_NOT_REACHED(); return FailPath; } }
void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData) { this->threadData = threadData; double before = 0; CString codeBlockName; if (UNLIKELY(computeCompileTimes())) before = monotonicallyIncreasingTimeMS(); if (UNLIKELY(reportCompileTimes())) codeBlockName = toCString(*codeBlock); CompilationScope compilationScope; if (logCompilationChanges(mode) || Options::reportDFGPhaseTimes()) dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); CompilationPath path = compileInThreadImpl(longLivedState); RELEASE_ASSERT(path == CancelPath || finalizer); RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled)); double after = 0; if (UNLIKELY(computeCompileTimes())) { after = monotonicallyIncreasingTimeMS(); if (Options::reportTotalCompileTimes()) { if (isFTL(mode)) { totalFTLCompileTime += after - before; totalFTLDFGCompileTime += m_timeBeforeFTL - before; totalFTLB3CompileTime += after - m_timeBeforeFTL; } else totalDFGCompileTime += after - before; } } const char* pathName = nullptr; switch (path) { case FailPath: pathName = "N/A (fail)"; break; case DFGPath: pathName = "DFG"; break; case FTLPath: pathName = "FTL"; break; case CancelPath: pathName = "Cancelled"; break; default: RELEASE_ASSERT_NOT_REACHED(); break; } if (codeBlock) { // codeBlock will be null if the compilation was cancelled. if (path == FTLPath) CODEBLOCK_LOG_EVENT(codeBlock, "ftlCompile", ("took ", after - before, " ms (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ") with ", pathName)); else CODEBLOCK_LOG_EVENT(codeBlock, "dfgCompile", ("took ", after - before, " ms with ", pathName)); } if (UNLIKELY(reportCompileTimes())) { dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms"); if (path == FTLPath) dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")"); dataLog(".\n"); } }
bool Plan::reportCompileTimes() const { return Options::reportCompileTimes() || Options::reportDFGCompileTimes() || (Options::reportFTLCompileTimes() && isFTL(mode)); }
void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData) { this->threadData = threadData; double before = 0; CString codeBlockName; if (computeCompileTimes()) before = monotonicallyIncreasingTimeMS(); if (reportCompileTimes()) codeBlockName = toCString(*codeBlock); SamplingRegion samplingRegion("DFG Compilation (Plan)"); CompilationScope compilationScope; if (logCompilationChanges(mode)) dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); CompilationPath path = compileInThreadImpl(longLivedState); RELEASE_ASSERT(path == CancelPath || finalizer); RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled)); double after = 0; if (computeCompileTimes()) after = monotonicallyIncreasingTimeMS(); if (Options::reportTotalCompileTimes()) { if (isFTL(mode)) { totalFTLCompileTime += after - before; totalFTLDFGCompileTime += m_timeBeforeFTL - before; totalFTLB3CompileTime += after - m_timeBeforeFTL; } else totalDFGCompileTime += after - before; } if (reportCompileTimes()) { const char* pathName; switch (path) { case FailPath: pathName = "N/A (fail)"; break; case DFGPath: pathName = "DFG"; break; case FTLPath: pathName = "FTL"; break; case CancelPath: pathName = "Cancelled"; break; default: RELEASE_ASSERT_NOT_REACHED(); #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) pathName = ""; #endif break; } dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", after - before, " ms"); if (path == FTLPath) dataLog(" (DFG: ", m_timeBeforeFTL - before, ", B3: ", after - m_timeBeforeFTL, ")"); dataLog(".\n"); } }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); } Graph dfg(vm, *this, longLivedState); if (!parse(dfg)) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); if (isFTL(mode)) performStaticExecutionCountEstimation(dfg); if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } performCPSRethreading(dfg); } if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performInvalidationPointInjection(dfg); performTypeCheckHoisting(dfg); unsigned count = 1; dfg.m_fixpointState = FixpointNotConverged; for (;; ++count) { if (logCompilationChanges(mode)) dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count); bool changed = false; if (validationEnabled()) validate(dfg); changed |= performStrengthReduction(dfg); performCFA(dfg); changed |= performConstantFolding(dfg); changed |= performArgumentsSimplification(dfg); changed |= performCFGSimplification(dfg); changed |= performCSE(dfg); if (!changed) break; performCPSRethreading(dfg); } if (logCompilationChanges(mode)) dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count); dfg.m_fixpointState = FixpointConverged; performStoreBarrierElision(dfg); // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.m_dominators.computeIfNecessary(dfg); dfg.m_naturalLoops.computeIfNecessary(dfg); } switch (mode) { case DFGMode: { performTierUpCheckInjection(dfg); performStoreElimination(dfg); performCPSRethreading(dfg); performDCE(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); performWatchpointCollection(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (codeBlock->codeType() == FunctionCode) { dataFlowJIT.compileFunction(); dataFlowJIT.linkFunction(); } else { dataFlowJIT.compile(); dataFlowJIT.link(); } return DFGPath; } case FTLMode: case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } performCriticalEdgeBreaking(dfg); performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performSSALowering(dfg); performCSE(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); performIntegerCheckCombining(dfg); performCSE(dfg); performLivenessAnalysis(dfg); performCFA(dfg); if (Options::validateFTLOSRExitLiveness()) performResurrectionForValidation(dfg); performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM. performStackLayout(dfg); performLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); performWatchpointCollection(dfg); dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:"); { GraphSafepoint safepoint(dfg); initializeLLVM(); } FTL::State state(dfg); FTL::lowerDFGToLLVM(state); if (reportCompileTimes()) beforeFTL = currentTimeMS(); if (Options::llvmAlwaysFailsBeforeCompile()) { FTL::fail(state); return FTLPath; } FTL::compile(state); if (Options::llvmAlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) { FTL::fail(state); return FTLPath; } FTL::link(state); return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); return FailPath; #endif // ENABLE(FTL_JIT) } default: RELEASE_ASSERT_NOT_REACHED(); return FailPath; } }