Esempio n. 1
0
void JITCompiler::disassemble(LinkBuffer& linkBuffer)
{
    if (shouldShowDisassembly())
        m_disassembler->dump(linkBuffer);
    
    if (m_graph.m_plan.compilation)
        m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
}
Esempio n. 2
0
JITCompiler::JITCompiler(Graph& dfg)
    : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
    , m_graph(dfg)
    , m_jitCode(adoptRef(new JITCode()))
    , m_blockHeads(dfg.numBlocks())
{
    if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
        m_disassembler = adoptPtr(new Disassembler(dfg));
}
bool JITCompiler::compile(JITCode& entry)
{
    SamplingRegion samplingRegion("DFG Backend");

    setStartOfCode();
    compileEntry();
    SpeculativeJIT speculative(*this);
    compileBody(speculative);
    setEndOfMainPath();

    // Generate slow path code.
    speculative.runSlowPathGenerators();
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    speculative.createOSREntries();
    setEndOfCode();

    LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer.didFailToAllocate())
        return false;
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);

    if (shouldShowDisassembly())
        m_disassembler->dump(linkBuffer);
    if (m_graph.m_compilation)
        m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);

    entry = JITCode(
        linkBuffer.finalizeCodeWithoutDisassembly(),
        JITCode::DFGJIT);
    return true;
}
Esempio n. 4
0
void compile(State& state, Safepoint::Result& safepointResult)
{
    char* error = 0;
    
    {
        GraphSafepoint safepoint(state.graph, safepointResult);
        
        LLVMMCJITCompilerOptions options;
        llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
        options.OptLevel = Options::llvmBackendOptimizationLevel();
        options.NoFramePointerElim = true;
        if (Options::useLLVMSmallCodeModel())
            options.CodeModel = LLVMCodeModelSmall;
        options.EnableFastISel = Options::enableLLVMFastISel();
        options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
            &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
    
        LLVMExecutionEngineRef engine;
        
        if (isARM64())
#if OS(DARWIN)
            llvm->SetTarget(state.module, "arm64-apple-ios");
#elif OS(LINUX)
            llvm->SetTarget(state.module, "aarch64-linux-gnu");
#else
#error "Unrecognized OS"
#endif

        if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
            dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
            CRASH();
        }

        // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
        // it to the module.
        LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine);
        LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine);
        char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData);
        llvm->SetDataLayout(state.module, stringRepOfTargetData);
        free(stringRepOfTargetData);

        LLVMPassManagerRef functionPasses = 0;
        LLVMPassManagerRef modulePasses;

        if (Options::llvmSimpleOpt()) {
            modulePasses = llvm->CreatePassManager();
            llvm->AddTargetData(targetData, modulePasses);
            llvm->AddAnalysisPasses(targetMachine, modulePasses);
            llvm->AddPromoteMemoryToRegisterPass(modulePasses);
            llvm->AddGlobalOptimizerPass(modulePasses);
            llvm->AddFunctionInliningPass(modulePasses);
            llvm->AddPruneEHPass(modulePasses);
            llvm->AddGlobalDCEPass(modulePasses);
            llvm->AddConstantPropagationPass(modulePasses);
            llvm->AddAggressiveDCEPass(modulePasses);
            llvm->AddInstructionCombiningPass(modulePasses);
            // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
            llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
            llvm->AddBasicAliasAnalysisPass(modulePasses);
            // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
            llvm->AddGVNPass(modulePasses);
            llvm->AddCFGSimplificationPass(modulePasses);
            llvm->AddDeadStoreEliminationPass(modulePasses);

            llvm->RunPassManager(modulePasses, state.module);
        } else {
            LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
            llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
            llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275);
            llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
        
            functionPasses = llvm->CreateFunctionPassManagerForModule(state.module);
            modulePasses = llvm->CreatePassManager();
        
            llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
        
            llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
            llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
        
            llvm->PassManagerBuilderDispose(passBuilder);
        
            llvm->InitializeFunctionPassManager(functionPasses);
            for (LValue function = llvm->GetFirstFunction(state.module); function; function = llvm->GetNextFunction(function))
                llvm->RunFunctionPassManager(functionPasses, function);
            llvm->FinalizeFunctionPassManager(functionPasses);
        
            llvm->RunPassManager(modulePasses, state.module);
        }

        if (shouldShowDisassembly() || verboseCompilationEnabled())
            state.dumpState("after optimization");
    
        // FIXME: Need to add support for the case where JIT memory allocation failed.
        // https://bugs.webkit.org/show_bug.cgi?id=113620
        state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
        if (functionPasses)
            llvm->DisposePassManager(functionPasses);
        llvm->DisposePassManager(modulePasses);
        llvm->DisposeExecutionEngine(engine);
    }
    if (safepointResult.didGetCancelled())
        return;
    RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
    
    if (shouldShowDisassembly()) {
        for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
            ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
            dataLog(
                "Generated LLVM code for ",
                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
                " #", i, ", ", state.codeSectionNames[i], ":\n");
            disassemble(
                MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
                "    ", WTF::dataFile(), LLVMSubset);
        }
        
        for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
            DataSection* section = state.jitCode->dataSections()[i].get();
            dataLog(
                "Generated LLVM data section for ",
                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
                " #", i, ", ", state.dataSectionNames[i], ":\n");
            dumpDataSection(section, "    ");
        }
    }
    
    bool didSeeUnwindInfo = state.jitCode->unwindInfo.parse(
        state.unwindDataSection, state.unwindDataSectionSize,
        state.generatedFunction);
    if (shouldShowDisassembly()) {
        dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
        if (didSeeUnwindInfo)
            dataLog("    ", state.jitCode->unwindInfo, "\n");
        else
            dataLog("    <no unwind info>\n");
    }
    
    if (state.stackmapsSection && state.stackmapsSection->size()) {
        if (shouldShowDisassembly()) {
            dataLog(
                "Generated LLVM stackmaps section for ",
                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
            dataLog("    Raw data:\n");
            dumpDataSection(state.stackmapsSection.get(), "    ");
        }
        
        RefPtr<DataView> stackmapsData = DataView::create(
            ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
        state.jitCode->stackmaps.parse(stackmapsData.get());
    
        if (shouldShowDisassembly()) {
            dataLog("    Structured data:\n");
            state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), "        ");
        }
        
        StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
        fixFunctionBasedOnStackMaps(
            state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
            recordMap, didSeeUnwindInfo);
        
        if (shouldShowDisassembly()) {
            for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
                if (state.codeSectionNames[i] != SECTION_NAME("text"))
                    continue;
                
                ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
                dataLog(
                    "Generated LLVM code after stackmap-based fix-up for ",
                    CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
                    " in ", state.graph.m_plan.mode, " #", i, ", ",
                    state.codeSectionNames[i], ":\n");
                disassemble(
                    MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
                    "    ", WTF::dataFile(), LLVMSubset);
            }
        }
    }
    
    state.module = 0; // We no longer own the module.
}
Esempio n. 5
0
void compileOSRExit(ExecState* exec)
{
    SamplingRegion samplingRegion("DFG OSR Exit Compilation");
    
    CodeBlock* codeBlock = exec->codeBlock();
    
    ASSERT(codeBlock);
    ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
    
    JSGlobalData* globalData = &exec->globalData();
    
    uint32_t exitIndex = globalData->osrExitIndex;
    OSRExit& exit = codeBlock->osrExit(exitIndex);
    
    // Make sure all code on our inline stack is JIT compiled. This is necessary since
    // we may opt to inline a code block even before it had ever been compiled by the
    // JIT, but our OSR exit infrastructure currently only works if the target of the
    // OSR exit is JIT code. This could be changed since there is nothing particularly
    // hard about doing an OSR exit into the interpreter, but for now this seems to make
    // sense in that if we're OSR exiting from inlined code of a DFG code block, then
    // probably it's a good sign that the thing we're exiting into is hot. Even more
    // interestingly, since the code was inlined, it may never otherwise get JIT
    // compiled since the act of inlining it may ensure that it otherwise never runs.
    for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())
            ->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)
            ->jitCompile(exec);
    }
    
    // Compute the value recoveries.
    Operands<ValueRecovery> operands;
    codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands);
    
    // There may be an override, for forward speculations.
    if (!!exit.m_valueRecoveryOverride) {
        operands.setOperand(
            exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
    }
    
    SpeculationRecovery* recovery = 0;
    if (exit.m_recoveryIndex)
        recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1);

#if DFG_ENABLE(DEBUG_VERBOSE)
    dataLog(
        "Generating OSR exit #", exitIndex, " (seq#", exit.m_streamIndex,
        ", bc#", exit.m_codeOrigin.bytecodeIndex, ", @", exit.m_nodeIndex, ", ",
        exit.m_kind, ") for ", *codeBlock, ".\n");
#endif

    {
        CCallHelpers jit(globalData, codeBlock);
        OSRExitCompiler exitCompiler(jit);

        jit.jitAssertHasValidCallFrame();
        
        if (globalData->m_perBytecodeProfiler && codeBlock->compilation()) {
            Profiler::Database& database = *globalData->m_perBytecodeProfiler;
            Profiler::Compilation* compilation = codeBlock->compilation();
            
            Profiler::OSRExit* profilerExit = compilation->addOSRExit(
                exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
                exit.m_kind,
                exit.m_watchpointIndex != std::numeric_limits<unsigned>::max());
            jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
        }
        
        exitCompiler.compileExit(exit, operands, recovery);
        
        LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
        exit.m_code = FINALIZE_CODE_IF(
            shouldShowDisassembly(),
            patchBuffer,
            ("DFG OSR exit #%u (bc#%u, @%u, %s) from %s",
                exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex,
                exitKindToString(exit.m_kind), toCString(*codeBlock).data()));
    }
    
    {
        RepatchBuffer repatchBuffer(codeBlock);
        repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
    }
    
    globalData->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
void compileOSRExit(ExecState* exec)
{
    SamplingRegion samplingRegion("DFG OSR Exit Compilation");
    
    CodeBlock* codeBlock = exec->codeBlock();
    
    ASSERT(codeBlock);
    ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
    
    VM* vm = &exec->vm();
    
    // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
    // really be profitable.
    DeferGCForAWhile deferGC(vm->heap);

    uint32_t exitIndex = vm->osrExitIndex;
    OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
    
    prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
    
    // Compute the value recoveries.
    Operands<ValueRecovery> operands;
    codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
    
    // There may be an override, for forward speculations.
    if (!!exit.m_valueRecoveryOverride) {
        operands.setOperand(
            exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery);
    }
    
    SpeculationRecovery* recovery = 0;
    if (exit.m_recoveryIndex != UINT_MAX)
        recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];

    {
        CCallHelpers jit(vm, codeBlock);
        OSRExitCompiler exitCompiler(jit);

        jit.jitAssertHasValidCallFrame();
        
        if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
            Profiler::Database& database = *vm->m_perBytecodeProfiler;
            Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
            
            Profiler::OSRExit* profilerExit = compilation->addOSRExit(
                exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
                exit.m_kind, exit.m_kind == UncountableInvalidation);
            jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
        }
        
        exitCompiler.compileExit(exit, operands, recovery);
        
        LinkBuffer patchBuffer(*vm, jit, codeBlock);
        exit.m_code = FINALIZE_CODE_IF(
            shouldShowDisassembly() || Options::verboseOSR(),
            patchBuffer,
            ("DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
                exitIndex, toCString(exit.m_codeOrigin).data(),
                exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
                toCString(ignoringContext<DumpContext>(operands)).data()));
    }
    
    {
        RepatchBuffer repatchBuffer(codeBlock);
        repatchBuffer.relink(exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code()));
    }
    
    vm->osrExitJumpDestination = exit.m_code.code().executableAddress();
}
bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
{
    SamplingRegion samplingRegion("DFG Backend");
    
    setStartOfCode();
    compileEntry();

    // === Function header code generation ===
    // This is the main entry point, without performing an arity check.
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);
    // Plant a check that sufficient space is available in the JSStack.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
    // Return here after stack check.
    Label fromStackCheck = label();


    // === Function body code generation ===
    SpeculativeJIT speculative(*this);
    compileBody(speculative);
    setEndOfMainPath();

    // === Function footer code generation ===
    //
    // Generate code to perform the slow stack check (if the fast one in
    // the function header fails), and generate the entry point with arity check.
    //
    // Generate the stack check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    stackCheck.link(this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));

    CallBeginToken token;
    beginCall(CodeOrigin(0), token);
    Call callStackCheck = call();
    notifyCall(callStackCheck, CodeOrigin(0), token);
    jump(fromStackCheck);
    
    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    Label arityCheck = label();
    compileEntry();

    load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
    branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    beginCall(CodeOrigin(0), token);
    Call callArityCheck = call();
    notifyCall(callArityCheck, CodeOrigin(0), token);
    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
    jump(fromArityCheck);
    
    // Generate slow path code.
    speculative.runSlowPathGenerators();
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    speculative.createOSREntries();
    setEndOfCode();

    // === Link ===
    LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer.didFailToAllocate())
        return false;
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);
    
    // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer.link(callStackCheck, cti_stack_check);
    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
    
    if (shouldShowDisassembly())
        m_disassembler->dump(linkBuffer);
    if (m_graph.m_compilation)
        m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);

    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    entry = JITCode(
        linkBuffer.finalizeCodeWithoutDisassembly(),
        JITCode::DFGJIT);
    return true;
}
Esempio n. 8
0
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
{
    if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
        dataLog("\n");
        dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
        dataLog("\n");
    }
    
    Graph dfg(vm, *this, longLivedState);
    
    if (!parse(dfg)) {
        finalizer = std::make_unique<FailedFinalizer>(*this);
        return FailPath;
    }

    codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
    
    // By this point the DFG bytecode parser will have potentially mutated various tables
    // in the CodeBlock. This is a good time to perform an early shrink, which is more
    // powerful than a late one. It's safe to do so because we haven't generated any code
    // that references any of the tables directly, yet.
    codeBlock->shrinkToFit(CodeBlock::EarlyShrink);

    if (validationEnabled())
        validate(dfg);
    
    if (Options::dumpGraphAfterParsing()) {
        dataLog("Graph after parsing:\n");
        dfg.dump();
    }

    performLiveCatchVariablePreservationPhase(dfg);

    if (Options::enableMaximalFlushInsertionPhase())
        performMaximalFlushInsertion(dfg);
    
    performCPSRethreading(dfg);
    performUnification(dfg);
    performPredictionInjection(dfg);
    
    performStaticExecutionCountEstimation(dfg);
    
    if (mode == FTLForOSREntryMode) {
        bool result = performOSREntrypointCreation(dfg);
        if (!result) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }
        performCPSRethreading(dfg);
    }
    
    if (validationEnabled())
        validate(dfg);
    
    performBackwardsPropagation(dfg);
    performPredictionPropagation(dfg);
    performFixup(dfg);
    performStructureRegistration(dfg);
    performInvalidationPointInjection(dfg);
    performTypeCheckHoisting(dfg);
    
    dfg.m_fixpointState = FixpointNotConverged;
    
    // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
    // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
    // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
    // that the compiler compiles more quickly. We want the third tier to compile quickly, which
    // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
    if (validationEnabled())
        validate(dfg);
        
    performStrengthReduction(dfg);
    performLocalCSE(dfg);
    performCPSRethreading(dfg);
    performCFA(dfg);
    performConstantFolding(dfg);
    bool changed = false;
    changed |= performCFGSimplification(dfg);
    changed |= performLocalCSE(dfg);
    
    if (validationEnabled())
        validate(dfg);
    
    performCPSRethreading(dfg);
    if (!isFTL(mode)) {
        // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
        // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
        // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
        // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
        // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
        // escape for all of the arguments. This then disables object allocation sinking.
        //
        // So, for now, we just disable this phase for the FTL.
        //
        // If we wanted to enable it, we'd have to do any of the following:
        // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
        //   PutStack sinking and object allocation sinking.
        // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
        //   GetStack+PutStack.
        //
        // But, it's not super valuable to enable those optimizations, since the FTL
        // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
        // pathology.
        
        changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
    }
    if (changed) {
        performCFA(dfg);
        performConstantFolding(dfg);
    }
    
    // If we're doing validation, then run some analyses, to give them an opportunity
    // to self-validate. Now is as good a time as any to do this.
    if (validationEnabled()) {
        dfg.m_dominators.computeIfNecessary(dfg);
        dfg.m_naturalLoops.computeIfNecessary(dfg);
        dfg.m_prePostNumbering.computeIfNecessary(dfg);
    }

    switch (mode) {
    case DFGMode: {
        dfg.m_fixpointState = FixpointConverged;
    
        performTierUpCheckInjection(dfg);

        performFastStoreBarrierInsertion(dfg);
        performCleanUp(dfg);
        performCPSRethreading(dfg);
        performDCE(dfg);
        performPhantomInsertion(dfg);
        performStackLayout(dfg);
        performVirtualRegisterAllocation(dfg);
        performWatchpointCollection(dfg);
        dumpAndVerifyGraph(dfg, "Graph after optimization:");
        
        JITCompiler dataFlowJIT(dfg);
        if (codeBlock->codeType() == FunctionCode)
            dataFlowJIT.compileFunction();
        else
            dataFlowJIT.compile();
        
        return DFGPath;
    }
    
    case FTLMode:
    case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }
        
        performCleanUp(dfg); // Reduce the graph size a bit.
        performCriticalEdgeBreaking(dfg);
        if (Options::createPreHeaders())
            performLoopPreHeaderCreation(dfg);
        performCPSRethreading(dfg);
        performSSAConversion(dfg);
        performSSALowering(dfg);
        
        // Ideally, these would be run to fixpoint with the object allocation sinking phase.
        performArgumentsElimination(dfg);
        performPutStackSinking(dfg);
        
        performConstantHoisting(dfg);
        performGlobalCSE(dfg);
        performLivenessAnalysis(dfg);
        performIntegerRangeOptimization(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performConstantFolding(dfg);
        performCleanUp(dfg); // Reduce the graph size a lot.
        changed = false;
        changed |= performStrengthReduction(dfg);
        if (Options::enableObjectAllocationSinking()) {
            changed |= performCriticalEdgeBreaking(dfg);
            changed |= performObjectAllocationSinking(dfg);
        }
        if (changed) {
            // State-at-tail and state-at-head will be invalid if we did strength reduction since
            // it might increase live ranges.
            performLivenessAnalysis(dfg);
            performCFA(dfg);
            performConstantFolding(dfg);
        }
        
        // Currently, this relies on pre-headers still being valid. That precludes running CFG
        // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
        // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
        // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
        // then we'd need to do some simple SSA fix-up.
        performLICM(dfg);
        
        performCleanUp(dfg);
        performIntegerCheckCombining(dfg);
        performGlobalCSE(dfg);
        
        // At this point we're not allowed to do any further code motion because our reasoning
        // about code motion assumes that it's OK to insert GC points in random places.
        dfg.m_fixpointState = FixpointConverged;
        
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performGlobalStoreBarrierInsertion(dfg);
        if (Options::enableMovHintRemoval())
            performMovHintRemoval(dfg);
        performCleanUp(dfg);
        performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by LLVM.
        performStackLayout(dfg);
        performLivenessAnalysis(dfg);
        performOSRAvailabilityAnalysis(dfg);
        performWatchpointCollection(dfg);
        
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }

        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldShowDisassembly(mode));
        
        bool haveLLVM;
        Safepoint::Result safepointResult;
        {
            GraphSafepoint safepoint(dfg, safepointResult);
            haveLLVM = initializeLLVM();
        }
        if (safepointResult.didGetCancelled())
            return CancelPath;
        
        if (!haveLLVM) {
            if (Options::ftlCrashesIfCantInitializeLLVM()) {
                dataLog("LLVM can't be initialized.\n");
                CRASH();
            }
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }

        FTL::State state(dfg);
        FTL::lowerDFGToLLVM(state);
        
        if (computeCompileTimes())
            m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
        
        if (Options::llvmAlwaysFailsBeforeCompile()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::compile(state, safepointResult);
        if (safepointResult.didGetCancelled())
            return CancelPath;
        
        if (Options::llvmAlwaysFailsBeforeLink()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        if (state.allocationFailed) {
            FTL::fail(state);
            return FTLPath;
        }

        if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) {
            FTL::fail(state);
            return FTLPath;
        }

        FTL::link(state);
        
        if (state.allocationFailed) {
            FTL::fail(state);
            return FTLPath;
        }
        
        return FTLPath;
#else
        RELEASE_ASSERT_NOT_REACHED();
        return FailPath;
#endif // ENABLE(FTL_JIT)
    }
        
    default:
        RELEASE_ASSERT_NOT_REACHED();
        return FailPath;
    }
}