Exemplo n.º 1
0
MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
{
    CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
    if (entry.isNewEntry)
        entry.iterator->value = generator(globalData);
    return entry.iterator->value;
}
Exemplo n.º 2
0
MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
{
    LockHolder locker(m_lock);
    CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
    if (entry.isNewEntry) {
        // Compilation thread can only retrieve existing entries.
        ASSERT(!isCompilationThread());
        entry.iterator->value = generator(vm);
    }
    return entry.iterator->value;
}
Exemplo n.º 3
0
NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
{
    if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(function))
        return nativeExecutable;

    MacroAssemblerCodeRef code;
    if (generator) {
        if (globalData->canUseJIT())
            code = generator(globalData);
        else
            code = MacroAssemblerCodeRef();
    } else
        code = JIT::compileCTINativeCall(globalData, function);

    NativeExecutable* nativeExecutable = NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic);
    weakAdd(*m_hostFunctionStubMap, function, PassWeak<NativeExecutable>(nativeExecutable));
    return nativeExecutable;
}
NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
{
    if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, &callHostFunctionAsConstructor)))
        return nativeExecutable;

    MacroAssemblerCodeRef code;
    if (generator) {
        if (vm->canUseJIT())
            code = generator(vm);
        else
            code = MacroAssemblerCodeRef();
    } else
        code = JIT::compileCTINativeCall(vm, function);

    NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), callHostFunctionAsConstructor, intrinsic);
    weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), PassWeak<NativeExecutable>(nativeExecutable));
    return nativeExecutable;
}
Exemplo n.º 5
0
void compile(State& state, Safepoint::Result& safepointResult)
{
    char* error = 0;
    
    {
        GraphSafepoint safepoint(state.graph, safepointResult);
        
        LLVMMCJITCompilerOptions options;
        llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
        options.OptLevel = Options::llvmBackendOptimizationLevel();
        options.NoFramePointerElim = true;
        if (Options::useLLVMSmallCodeModel())
            options.CodeModel = LLVMCodeModelSmall;
        options.EnableFastISel = enableLLVMFastISel;
        options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
            &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
    
        LLVMExecutionEngineRef engine;
        
        if (isARM64()) {
#if OS(DARWIN)
            llvm->SetTarget(state.module, "arm64-apple-ios");
#elif OS(LINUX)
            llvm->SetTarget(state.module, "aarch64-linux-gnu");
#else
#error "Unrecognized OS"
#endif
        }

        if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
            dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
            CRASH();
        }
        
        // At this point we no longer own the module.
        LModule module = state.module;
        state.module = nullptr;

        // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
        // it to the module.
        LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine);
        LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine);
        char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData);
        llvm->SetDataLayout(module, stringRepOfTargetData);
        free(stringRepOfTargetData);

        LLVMPassManagerRef functionPasses = 0;
        LLVMPassManagerRef modulePasses;

        if (Options::llvmSimpleOpt()) {
            modulePasses = llvm->CreatePassManager();
            llvm->AddTargetData(targetData, modulePasses);
            llvm->AddAnalysisPasses(targetMachine, modulePasses);
            llvm->AddPromoteMemoryToRegisterPass(modulePasses);
            llvm->AddGlobalOptimizerPass(modulePasses);
            llvm->AddFunctionInliningPass(modulePasses);
            llvm->AddPruneEHPass(modulePasses);
            llvm->AddGlobalDCEPass(modulePasses);
            llvm->AddConstantPropagationPass(modulePasses);
            llvm->AddAggressiveDCEPass(modulePasses);
            llvm->AddInstructionCombiningPass(modulePasses);
            // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
            llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
            llvm->AddBasicAliasAnalysisPass(modulePasses);
            // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
            llvm->AddGVNPass(modulePasses);
            llvm->AddCFGSimplificationPass(modulePasses);
            llvm->AddDeadStoreEliminationPass(modulePasses);
            
            if (enableLLVMFastISel)
                llvm->AddLowerSwitchPass(modulePasses);

            llvm->RunPassManager(modulePasses, module);
        } else {
            LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
            llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
            llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275);
            llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
        
            functionPasses = llvm->CreateFunctionPassManagerForModule(module);
            modulePasses = llvm->CreatePassManager();
        
            llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
        
            llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
            llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
        
            llvm->PassManagerBuilderDispose(passBuilder);
        
            llvm->InitializeFunctionPassManager(functionPasses);
            for (LValue function = llvm->GetFirstFunction(module); function; function = llvm->GetNextFunction(function))
                llvm->RunFunctionPassManager(functionPasses, function);
            llvm->FinalizeFunctionPassManager(functionPasses);
        
            llvm->RunPassManager(modulePasses, module);
        }

        if (shouldShowDisassembly() || verboseCompilationEnabled())
            state.dumpState(module, "after optimization");
        
        // FIXME: Need to add support for the case where JIT memory allocation failed.
        // https://bugs.webkit.org/show_bug.cgi?id=113620
        state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
        if (functionPasses)
            llvm->DisposePassManager(functionPasses);
        llvm->DisposePassManager(modulePasses);
        llvm->DisposeExecutionEngine(engine);
    }

    if (safepointResult.didGetCancelled())
        return;
    RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
    
    if (state.allocationFailed)
        return;
    
    if (shouldShowDisassembly()) {
        for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
            ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
            dataLog(
                "Generated LLVM code for ",
                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
                " #", i, ", ", state.codeSectionNames[i], ":\n");
            disassemble(
                MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
                "    ", WTF::dataFile(), LLVMSubset);
        }
        
        for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
            DataSection* section = state.jitCode->dataSections()[i].get();
            dataLog(
                "Generated LLVM data section for ",
                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
                " #", i, ", ", state.dataSectionNames[i], ":\n");
            dumpDataSection(section, "    ");
        }
    }
    
    std::unique_ptr<RegisterAtOffsetList> registerOffsets = parseUnwindInfo(
        state.unwindDataSection, state.unwindDataSectionSize,
        state.generatedFunction);
    if (shouldShowDisassembly()) {
        dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
        dataLog("    ", *registerOffsets, "\n");
    }
    state.graph.m_codeBlock->setCalleeSaveRegisters(WTF::move(registerOffsets));
    
    if (state.stackmapsSection && state.stackmapsSection->size()) {
        if (shouldShowDisassembly()) {
            dataLog(
                "Generated LLVM stackmaps section for ",
                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
            dataLog("    Raw data:\n");
            dumpDataSection(state.stackmapsSection.get(), "    ");
        }
        
        RefPtr<DataView> stackmapsData = DataView::create(
            ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
        state.jitCode->stackmaps.parse(stackmapsData.get());
    
        if (shouldShowDisassembly()) {
            dataLog("    Structured data:\n");
            state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), "        ");
        }
        
        StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
        fixFunctionBasedOnStackMaps(
            state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
            recordMap);
        if (state.allocationFailed)
            return;
        
        if (shouldShowDisassembly() || Options::asyncDisassembly()) {
            for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
                if (state.codeSectionNames[i] != SECTION_NAME("text"))
                    continue;
                
                ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
                
                CString header = toCString(
                    "Generated LLVM code after stackmap-based fix-up for ",
                    CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
                    " in ", state.graph.m_plan.mode, " #", i, ", ",
                    state.codeSectionNames[i], ":\n");
                
                if (Options::asyncDisassembly()) {
                    disassembleAsynchronously(
                        header, MacroAssemblerCodeRef(handle), handle->sizeInBytes(), "    ",
                        LLVMSubset);
                    continue;
                }
                
                dataLog(header);
                disassemble(
                    MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
                    "    ", WTF::dataFile(), LLVMSubset);
            }
        }
    }
}
Exemplo n.º 6
0
MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM& vm, ExecutableBase* executable, MacroAssemblerCodePtr target)
{
#if ENABLE(FTL_JIT)
    // We shouldn't ever be generating wrappers for native functions.
    RegisterSet toSave = registersToPreserve();
    ptrdiff_t offset = registerPreservationOffset();
    
    AssemblyHelpers jit(&vm, 0);
    
    jit.preserveReturnAddressAfterCall(GPRInfo::regT1);
    jit.load32(
        AssemblyHelpers::Address(
            AssemblyHelpers::stackPointerRegister,
            (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset),
        GPRInfo::regT2);
    
    // Place the stack pointer where we want it to be.
    jit.subPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister);
    
    // Compute the number of things we will be copying.
    jit.add32(
        AssemblyHelpers::TrustedImm32(
            JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize),
        GPRInfo::regT2);

    ASSERT(!toSave.get(GPRInfo::regT4));
    jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4);
    
    AssemblyHelpers::Label loop = jit.label();
    jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
    jit.load64(AssemblyHelpers::Address(GPRInfo::regT4, offset), GPRInfo::regT0);
    jit.store64(GPRInfo::regT0, GPRInfo::regT4);
    jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT4);
    jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit);

    // At this point regT4 + offset points to where we save things.
    ptrdiff_t currentOffset = 0;
    jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    
    for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) {
        if (!toSave.get(gpr))
            continue;
        currentOffset += sizeof(Register);
        jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    }
    
    // Assume that there aren't any saved FP registers.
    
    // Restore the tag registers.
    jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
    jit.add64(AssemblyHelpers::TrustedImm32(TagMask - TagTypeNumber), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
    
    jit.move(
        AssemblyHelpers::TrustedImmPtr(
            vm.getCTIStub(registerRestorationThunkGenerator).code().executableAddress()),
        GPRInfo::nonArgGPR0);
    jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0);
    AssemblyHelpers::Jump jump = jit.jump();
    
    LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
    linkBuffer.link(jump, CodeLocationLabel(target));

    if (Options::verboseFTLToJSThunk())
        dataLog("Need a thunk for calls from FTL to non-FTL version of ", *executable, "\n");
    
    return FINALIZE_DFG_CODE(linkBuffer, ("Register preservation wrapper for %s/%s, %p", toCString(executable->hashFor(CodeForCall)).data(), toCString(executable->hashFor(CodeForConstruct)).data(), target.executableAddress()));
#else // ENABLE(FTL_JIT)
    UNUSED_PARAM(vm);
    UNUSED_PARAM(executable);
    UNUSED_PARAM(target);
    // We don't support non-FTL builds for two reasons:
    // - It just so happens that currently only the FTL bottoms out in this code.
    // - The code above uses 64-bit instructions. It doesn't necessarily have to; it would be
    //   easy to change it so that it doesn't. But obviously making that change would be a
    //   prerequisite to removing this #if.
    UNREACHABLE_FOR_PLATFORM();
    return MacroAssemblerCodeRef();
#endif // ENABLE(FTL_JIT)
}