bool tryToDisassembleWithLLVM(
    const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out,
    InstructionSubsetHint)
{
    initializeLLVM();
    
    const char* triple;
#if CPU(X86_64)
    triple = "x86_64-apple-darwin";
#elif CPU(X86)
    triple = "x86-apple-darwin";
#elif CPU(ARM64)
    triple = "arm64-apple-darwin";
#else
#error "LLVM disassembler currently not supported on this CPU."
#endif

    char symbolString[symbolStringSize];
    
    LLVMDisasmContextRef disassemblyContext =
        llvm->CreateDisasm(triple, symbolString, 0, 0, symbolLookupCallback);
    RELEASE_ASSERT(disassemblyContext);
    
    char pcString[20];
    char instructionString[1000];
    
    uint8_t* pc = static_cast<uint8_t*>(codePtr.executableAddress());
    uint8_t* end = pc + size;
    
    while (pc < end) {
        snprintf(
            pcString, sizeof(pcString), "0x%lx",
            static_cast<unsigned long>(bitwise_cast<uintptr_t>(pc)));

        size_t instructionSize = llvm->DisasmInstruction(
            disassemblyContext, pc, end - pc, bitwise_cast<uintptr_t>(pc),
            instructionString, sizeof(instructionString));
        
        if (!instructionSize)
            snprintf(instructionString, sizeof(instructionString), ".byte 0x%02x", *pc++);
        else
            pc += instructionSize;
        
        out.printf("%s%16s: %s\n", prefix, pcString, instructionString);
    }
    
    llvm->DisasmDispose(disassemblyContext);
    
    return true;
}
示例#2
0
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
{
    if (verboseCompilationEnabled() && osrEntryBytecodeIndex != UINT_MAX) {
        dataLog("\n");
        dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
        dataLog("\n");
    }
    
    Graph dfg(vm, *this, longLivedState);
    
    if (!parse(dfg)) {
        finalizer = adoptPtr(new FailedFinalizer(*this));
        return FailPath;
    }
    
    // By this point the DFG bytecode parser will have potentially mutated various tables
    // in the CodeBlock. This is a good time to perform an early shrink, which is more
    // powerful than a late one. It's safe to do so because we haven't generated any code
    // that references any of the tables directly, yet.
    codeBlock->shrinkToFit(CodeBlock::EarlyShrink);

    if (validationEnabled())
        validate(dfg);
    
    performCPSRethreading(dfg);
    performUnification(dfg);
    performPredictionInjection(dfg);
    
    if (mode == FTLForOSREntryMode) {
        bool result = performOSREntrypointCreation(dfg);
        if (!result) {
            finalizer = adoptPtr(new FailedFinalizer(*this));
            return FailPath;
        }
        performCPSRethreading(dfg);
    }
    
    if (validationEnabled())
        validate(dfg);
    
    performBackwardsPropagation(dfg);
    performPredictionPropagation(dfg);
    performFixup(dfg);
    performTypeCheckHoisting(dfg);
    
    unsigned count = 1;
    dfg.m_fixpointState = FixpointNotConverged;
    for (;; ++count) {
        if (logCompilationChanges())
            dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count);
        bool changed = false;
        
        if (validationEnabled())
            validate(dfg);
        
        performCFA(dfg);
        changed |= performConstantFolding(dfg);
        changed |= performArgumentsSimplification(dfg);
        changed |= performCFGSimplification(dfg);
        changed |= performCSE(dfg);
        
        if (!changed)
            break;
        
        performCPSRethreading(dfg);
    }
    
    if (logCompilationChanges())
        dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count);

    dfg.m_fixpointState = FixpointConverged;

    performStoreElimination(dfg);
    
    // If we're doing validation, then run some analyses, to give them an opportunity
    // to self-validate. Now is as good a time as any to do this.
    if (validationEnabled()) {
        dfg.m_dominators.computeIfNecessary(dfg);
        dfg.m_naturalLoops.computeIfNecessary(dfg);
    }

    switch (mode) {
    case DFGMode: {
        performTierUpCheckInjection(dfg);
        break;
    }
    
    case FTLMode:
    case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = adoptPtr(new FailedFinalizer(*this));
            return FailPath;
        }
        
        performCriticalEdgeBreaking(dfg);
        performLoopPreHeaderCreation(dfg);
        performCPSRethreading(dfg);
        performSSAConversion(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performLICM(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM.
        performStackLayout(dfg);
        performLivenessAnalysis(dfg);
        performFlushLivenessAnalysis(dfg);
        performOSRAvailabilityAnalysis(dfg);
        
        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:");
        
        initializeLLVM();
        
        FTL::State state(dfg);
        FTL::lowerDFGToLLVM(state);
        
        if (Options::reportCompileTimes())
            beforeFTL = currentTimeMS();
        
        if (Options::llvmAlwaysFailsBeforeCompile()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::compile(state);

        if (Options::llvmAlwaysFailsBeforeLink()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::link(state);
        return FTLPath;
#else
        RELEASE_ASSERT_NOT_REACHED();
        break;
#endif // ENABLE(FTL_JIT)
    }
        
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    performCPSRethreading(dfg);
    performDCE(dfg);
    performStackLayout(dfg);
    performVirtualRegisterAllocation(dfg);
    dumpAndVerifyGraph(dfg, "Graph after optimization:");

    JITCompiler dataFlowJIT(dfg);
    if (codeBlock->codeType() == FunctionCode) {
        dataFlowJIT.compileFunction();
        dataFlowJIT.linkFunction();
    } else {
        dataFlowJIT.compile();
        dataFlowJIT.link();
    }
    
    return DFGPath;
}
示例#3
0
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
{
    if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
        dataLog("\n");
        dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
        dataLog("\n");
    }
    
    Graph dfg(vm, *this, longLivedState);
    
    if (!parse(dfg)) {
        finalizer = std::make_unique<FailedFinalizer>(*this);
        return FailPath;
    }

    codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters());
    
    // By this point the DFG bytecode parser will have potentially mutated various tables
    // in the CodeBlock. This is a good time to perform an early shrink, which is more
    // powerful than a late one. It's safe to do so because we haven't generated any code
    // that references any of the tables directly, yet.
    codeBlock->shrinkToFit(CodeBlock::EarlyShrink);

    if (validationEnabled())
        validate(dfg);
    
    if (Options::dumpGraphAfterParsing()) {
        dataLog("Graph after parsing:\n");
        dfg.dump();
    }

    performLiveCatchVariablePreservationPhase(dfg);

    if (Options::enableMaximalFlushInsertionPhase())
        performMaximalFlushInsertion(dfg);
    
    performCPSRethreading(dfg);
    performUnification(dfg);
    performPredictionInjection(dfg);
    
    performStaticExecutionCountEstimation(dfg);
    
    if (mode == FTLForOSREntryMode) {
        bool result = performOSREntrypointCreation(dfg);
        if (!result) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }
        performCPSRethreading(dfg);
    }
    
    if (validationEnabled())
        validate(dfg);
    
    performBackwardsPropagation(dfg);
    performPredictionPropagation(dfg);
    performFixup(dfg);
    performStructureRegistration(dfg);
    performInvalidationPointInjection(dfg);
    performTypeCheckHoisting(dfg);
    
    dfg.m_fixpointState = FixpointNotConverged;
    
    // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
    // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
    // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
    // that the compiler compiles more quickly. We want the third tier to compile quickly, which
    // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
    if (validationEnabled())
        validate(dfg);
        
    performStrengthReduction(dfg);
    performLocalCSE(dfg);
    performCPSRethreading(dfg);
    performCFA(dfg);
    performConstantFolding(dfg);
    bool changed = false;
    changed |= performCFGSimplification(dfg);
    changed |= performLocalCSE(dfg);
    
    if (validationEnabled())
        validate(dfg);
    
    performCPSRethreading(dfg);
    if (!isFTL(mode)) {
        // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
        // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
        // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
        // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
        // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
        // escape for all of the arguments. This then disables object allocation sinking.
        //
        // So, for now, we just disable this phase for the FTL.
        //
        // If we wanted to enable it, we'd have to do any of the following:
        // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
        //   PutStack sinking and object allocation sinking.
        // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
        //   GetStack+PutStack.
        //
        // But, it's not super valuable to enable those optimizations, since the FTL
        // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
        // pathology.
        
        changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
    }
    if (changed) {
        performCFA(dfg);
        performConstantFolding(dfg);
    }
    
    // If we're doing validation, then run some analyses, to give them an opportunity
    // to self-validate. Now is as good a time as any to do this.
    if (validationEnabled()) {
        dfg.m_dominators.computeIfNecessary(dfg);
        dfg.m_naturalLoops.computeIfNecessary(dfg);
        dfg.m_prePostNumbering.computeIfNecessary(dfg);
    }

    switch (mode) {
    case DFGMode: {
        dfg.m_fixpointState = FixpointConverged;
    
        performTierUpCheckInjection(dfg);

        performFastStoreBarrierInsertion(dfg);
        performCleanUp(dfg);
        performCPSRethreading(dfg);
        performDCE(dfg);
        performPhantomInsertion(dfg);
        performStackLayout(dfg);
        performVirtualRegisterAllocation(dfg);
        performWatchpointCollection(dfg);
        dumpAndVerifyGraph(dfg, "Graph after optimization:");
        
        JITCompiler dataFlowJIT(dfg);
        if (codeBlock->codeType() == FunctionCode)
            dataFlowJIT.compileFunction();
        else
            dataFlowJIT.compile();
        
        return DFGPath;
    }
    
    case FTLMode:
    case FTLForOSREntryMode: {
#if ENABLE(FTL_JIT)
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }
        
        performCleanUp(dfg); // Reduce the graph size a bit.
        performCriticalEdgeBreaking(dfg);
        if (Options::createPreHeaders())
            performLoopPreHeaderCreation(dfg);
        performCPSRethreading(dfg);
        performSSAConversion(dfg);
        performSSALowering(dfg);
        
        // Ideally, these would be run to fixpoint with the object allocation sinking phase.
        performArgumentsElimination(dfg);
        performPutStackSinking(dfg);
        
        performConstantHoisting(dfg);
        performGlobalCSE(dfg);
        performLivenessAnalysis(dfg);
        performIntegerRangeOptimization(dfg);
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performConstantFolding(dfg);
        performCleanUp(dfg); // Reduce the graph size a lot.
        changed = false;
        changed |= performStrengthReduction(dfg);
        if (Options::enableObjectAllocationSinking()) {
            changed |= performCriticalEdgeBreaking(dfg);
            changed |= performObjectAllocationSinking(dfg);
        }
        if (changed) {
            // State-at-tail and state-at-head will be invalid if we did strength reduction since
            // it might increase live ranges.
            performLivenessAnalysis(dfg);
            performCFA(dfg);
            performConstantFolding(dfg);
        }
        
        // Currently, this relies on pre-headers still being valid. That precludes running CFG
        // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
        // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
        // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
        // then we'd need to do some simple SSA fix-up.
        performLICM(dfg);
        
        performCleanUp(dfg);
        performIntegerCheckCombining(dfg);
        performGlobalCSE(dfg);
        
        // At this point we're not allowed to do any further code motion because our reasoning
        // about code motion assumes that it's OK to insert GC points in random places.
        dfg.m_fixpointState = FixpointConverged;
        
        performLivenessAnalysis(dfg);
        performCFA(dfg);
        performGlobalStoreBarrierInsertion(dfg);
        if (Options::enableMovHintRemoval())
            performMovHintRemoval(dfg);
        performCleanUp(dfg);
        performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by LLVM.
        performStackLayout(dfg);
        performLivenessAnalysis(dfg);
        performOSRAvailabilityAnalysis(dfg);
        performWatchpointCollection(dfg);
        
        if (FTL::canCompile(dfg) == FTL::CannotCompile) {
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }

        dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldShowDisassembly(mode));
        
        bool haveLLVM;
        Safepoint::Result safepointResult;
        {
            GraphSafepoint safepoint(dfg, safepointResult);
            haveLLVM = initializeLLVM();
        }
        if (safepointResult.didGetCancelled())
            return CancelPath;
        
        if (!haveLLVM) {
            if (Options::ftlCrashesIfCantInitializeLLVM()) {
                dataLog("LLVM can't be initialized.\n");
                CRASH();
            }
            finalizer = std::make_unique<FailedFinalizer>(*this);
            return FailPath;
        }

        FTL::State state(dfg);
        FTL::lowerDFGToLLVM(state);
        
        if (computeCompileTimes())
            m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
        
        if (Options::llvmAlwaysFailsBeforeCompile()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        FTL::compile(state, safepointResult);
        if (safepointResult.didGetCancelled())
            return CancelPath;
        
        if (Options::llvmAlwaysFailsBeforeLink()) {
            FTL::fail(state);
            return FTLPath;
        }
        
        if (state.allocationFailed) {
            FTL::fail(state);
            return FTLPath;
        }

        if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) {
            FTL::fail(state);
            return FTLPath;
        }

        FTL::link(state);
        
        if (state.allocationFailed) {
            FTL::fail(state);
            return FTLPath;
        }
        
        return FTLPath;
#else
        RELEASE_ASSERT_NOT_REACHED();
        return FailPath;
#endif // ENABLE(FTL_JIT)
    }
        
    default:
        RELEASE_ASSERT_NOT_REACHED();
        return FailPath;
    }
}