static void dumpAndVerifyGraph(Graph& graph, const char* text) { GraphDumpMode modeForFinalValidate = DumpGraph; if (verboseCompilationEnabled()) { dataLog(text, "\n"); graph.dump(); modeForFinalValidate = DontDumpGraph; } if (validationEnabled()) validate(graph, modeForFinalValidate); }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { if (verboseCompilationEnabled() && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); } Graph dfg(vm, *this, longLivedState); if (!parse(dfg)) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } performCPSRethreading(dfg); } if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performTypeCheckHoisting(dfg); unsigned count = 1; dfg.m_fixpointState = FixpointNotConverged; for (;; ++count) { if (logCompilationChanges()) dataLogF("DFG beginning optimization fixpoint iteration #%u.\n", count); bool changed = false; if (validationEnabled()) validate(dfg); performCFA(dfg); changed |= performConstantFolding(dfg); changed |= performArgumentsSimplification(dfg); changed |= performCFGSimplification(dfg); changed |= performCSE(dfg); if (!changed) break; performCPSRethreading(dfg); } if (logCompilationChanges()) dataLogF("DFG optimization fixpoint converged in %u iterations.\n", count); dfg.m_fixpointState = FixpointConverged; performStoreElimination(dfg); // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.m_dominators.computeIfNecessary(dfg); dfg.m_naturalLoops.computeIfNecessary(dfg); } switch (mode) { case DFGMode: { performTierUpCheckInjection(dfg); break; } case FTLMode: case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = adoptPtr(new FailedFinalizer(*this)); return FailPath; } performCriticalEdgeBreaking(dfg); performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performDCE(dfg); // We rely on this to convert dead SetLocals into the appropriate hint, and to kill dead code that won't be recognized as dead by LLVM. performStackLayout(dfg); performLivenessAnalysis(dfg); performFlushLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:"); initializeLLVM(); FTL::State state(dfg); FTL::lowerDFGToLLVM(state); if (Options::reportCompileTimes()) beforeFTL = currentTimeMS(); if (Options::llvmAlwaysFailsBeforeCompile()) { FTL::fail(state); return FTLPath; } FTL::compile(state); if (Options::llvmAlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } FTL::link(state); return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); break; #endif // ENABLE(FTL_JIT) } default: RELEASE_ASSERT_NOT_REACHED(); break; } performCPSRethreading(dfg); performDCE(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (codeBlock->codeType() == FunctionCode) { dataFlowJIT.compileFunction(); dataFlowJIT.linkFunction(); } else { dataFlowJIT.compile(); dataFlowJIT.link(); } return DFGPath; }
void compile(State& state, Safepoint::Result& safepointResult) { char* error = 0; { GraphSafepoint safepoint(state.graph, safepointResult); LLVMMCJITCompilerOptions options; llvm->InitializeMCJITCompilerOptions(&options, sizeof(options)); options.OptLevel = Options::llvmBackendOptimizationLevel(); options.NoFramePointerElim = true; if (Options::useLLVMSmallCodeModel()) options.CodeModel = LLVMCodeModelSmall; options.EnableFastISel = Options::enableLLVMFastISel(); options.MCJMM = llvm->CreateSimpleMCJITMemoryManager( &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy); LLVMExecutionEngineRef engine; if (isARM64()) #if OS(DARWIN) llvm->SetTarget(state.module, "arm64-apple-ios"); #elif OS(LINUX) llvm->SetTarget(state.module, "aarch64-linux-gnu"); #else #error "Unrecognized OS" #endif if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) { dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n"); CRASH(); } // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply // it to the module. LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine); LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine); char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData); llvm->SetDataLayout(state.module, stringRepOfTargetData); free(stringRepOfTargetData); LLVMPassManagerRef functionPasses = 0; LLVMPassManagerRef modulePasses; if (Options::llvmSimpleOpt()) { modulePasses = llvm->CreatePassManager(); llvm->AddTargetData(targetData, modulePasses); llvm->AddAnalysisPasses(targetMachine, modulePasses); llvm->AddPromoteMemoryToRegisterPass(modulePasses); llvm->AddGlobalOptimizerPass(modulePasses); llvm->AddFunctionInliningPass(modulePasses); llvm->AddPruneEHPass(modulePasses); llvm->AddGlobalDCEPass(modulePasses); llvm->AddConstantPropagationPass(modulePasses); llvm->AddAggressiveDCEPass(modulePasses); llvm->AddInstructionCombiningPass(modulePasses); // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES llvm->AddTypeBasedAliasAnalysisPass(modulePasses); llvm->AddBasicAliasAnalysisPass(modulePasses); // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES llvm->AddGVNPass(modulePasses); llvm->AddCFGSimplificationPass(modulePasses); llvm->AddDeadStoreEliminationPass(modulePasses); llvm->RunPassManager(modulePasses, state.module); } else { LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate(); llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel()); llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275); llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel()); functionPasses = llvm->CreateFunctionPassManagerForModule(state.module); modulePasses = llvm->CreatePassManager(); llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses); llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses); llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses); llvm->PassManagerBuilderDispose(passBuilder); llvm->InitializeFunctionPassManager(functionPasses); for (LValue function = llvm->GetFirstFunction(state.module); function; function = llvm->GetNextFunction(function)) llvm->RunFunctionPassManager(functionPasses, function); llvm->FinalizeFunctionPassManager(functionPasses); llvm->RunPassManager(modulePasses, state.module); } if (shouldShowDisassembly() || verboseCompilationEnabled()) state.dumpState("after optimization"); // FIXME: Need to add support for the case where JIT memory allocation failed. // https://bugs.webkit.org/show_bug.cgi?id=113620 state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function)); if (functionPasses) llvm->DisposePassManager(functionPasses); llvm->DisposePassManager(modulePasses); llvm->DisposeExecutionEngine(engine); } if (safepointResult.didGetCancelled()) return; RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting()); if (shouldShowDisassembly()) { for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) { ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get(); dataLog( "Generated LLVM code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), " #", i, ", ", state.codeSectionNames[i], ":\n"); disassemble( MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(), " ", WTF::dataFile(), LLVMSubset); } for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) { DataSection* section = state.jitCode->dataSections()[i].get(); dataLog( "Generated LLVM data section for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), " #", i, ", ", state.dataSectionNames[i], ":\n"); dumpDataSection(section, " "); } } bool didSeeUnwindInfo = state.jitCode->unwindInfo.parse( state.unwindDataSection, state.unwindDataSectionSize, state.generatedFunction); if (shouldShowDisassembly()) { dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n"); if (didSeeUnwindInfo) dataLog(" ", state.jitCode->unwindInfo, "\n"); else dataLog(" <no unwind info>\n"); } if (state.stackmapsSection && state.stackmapsSection->size()) { if (shouldShowDisassembly()) { dataLog( "Generated LLVM stackmaps section for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n"); dataLog(" Raw data:\n"); dumpDataSection(state.stackmapsSection.get(), " "); } RefPtr<DataView> stackmapsData = DataView::create( ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size())); state.jitCode->stackmaps.parse(stackmapsData.get()); if (shouldShowDisassembly()) { dataLog(" Structured data:\n"); state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), " "); } StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap(); fixFunctionBasedOnStackMaps( state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction, recordMap, didSeeUnwindInfo); if (shouldShowDisassembly()) { for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) { if (state.codeSectionNames[i] != SECTION_NAME("text")) continue; ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get(); dataLog( "Generated LLVM code after stackmap-based fix-up for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), " in ", state.graph.m_plan.mode, " #", i, ", ", state.codeSectionNames[i], ":\n"); disassemble( MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(), " ", WTF::dataFile(), LLVMSubset); } } } state.module = 0; // We no longer own the module. }
static void fixFunctionBasedOnStackMaps( State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo) { Graph& graph = state.graph; VM& vm = graph.m_vm; StackMaps stackmaps = jitCode->stackmaps; int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal; int varargsSpillSlotsOffset; if (state.varargsSpillSlotsStackmapID != UINT_MAX) varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID); else varargsSpillSlotsOffset = 0; for (unsigned i = graph.m_inlineVariableData.size(); i--;) { InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame; if (inlineCallFrame->argumentsRegister.isValid()) inlineCallFrame->argumentsRegister += localsOffset; if (inlineCallFrame->argumentCountRegister.isValid()) inlineCallFrame->argumentCountRegister += localsOffset; for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { inlineCallFrame->arguments[argument] = inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset); } if (inlineCallFrame->isClosureCall) { inlineCallFrame->calleeRecovery = inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset); } } if (codeBlock->usesArguments()) { codeBlock->setArgumentsRegister( VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset)); } MacroAssembler::Label stackOverflowException; { CCallHelpers checkJIT(&vm, codeBlock); // At this point it's perfectly fair to just blow away all state and restore the // JS JIT view of the universe. checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandler = checkJIT.call(); checkJIT.jumpToExceptionHandler(); stackOverflowException = checkJIT.label(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call(); checkJIT.jumpToExceptionHandler(); auto linkBuffer = std::make_unique<LinkBuffer>( vm, checkJIT, codeBlock, JITCompilationMustSucceed); linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler)); linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame)); state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer); } ExitThunkGenerator exitThunkGenerator(state); exitThunkGenerator.emitThunks(); if (exitThunkGenerator.didThings()) { RELEASE_ASSERT(state.finalizer->osrExit.size()); RELEASE_ASSERT(didSeeUnwindInfo); auto linkBuffer = std::make_unique<LinkBuffer>( vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed); RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size()); for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) { OSRExitCompilationInfo& info = state.finalizer->osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; if (verboseCompilationEnabled()) dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n"); auto iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It was optimized out. continue; } info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel); exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump); for (unsigned j = exit.m_values.size(); j--;) { ExitValue value = exit.m_values[j]; if (!value.isInJSStackSomehow()) continue; if (!value.virtualRegister().isLocal()) continue; exit.m_values[j] = value.withVirtualRegister( VirtualRegister(value.virtualRegister().offset() + localsOffset)); } if (verboseCompilationEnabled()) { DumpContext context; dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n"); } } state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer); } if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) { CCallHelpers slowPathJIT(&vm, codeBlock); CCallHelpers::JumpList exceptionTarget; for (unsigned i = state.getByIds.size(); i--;) { GetByIdDescriptor& getById = state.getByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n"); auto iter = recordMap.find(getById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg base = record.locations[1].directGPR(); JITGetByIdGenerator gen( codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base), JSValueRegs(result), NeedToSpill); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget, operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid()); gen.reportSlowPathCall(begin, call); getById.m_slowPathDone.append(slowPathJIT.jump()); getById.m_generators.append(gen); } } for (unsigned i = state.putByIds.size(); i--;) { PutByIdDescriptor& putById = state.putByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n"); auto iter = recordMap.find(putById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg base = record.locations[0].directGPR(); GPRReg value = record.locations[1].directGPR(); JITPutByIdGenerator gen( codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base), JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill, putById.ecmaMode(), putById.putKind()); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget, gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid()); gen.reportSlowPathCall(begin, call); putById.m_slowPathDone.append(slowPathJIT.jump()); putById.m_generators.append(gen); } } for (unsigned i = state.checkIns.size(); i--;) { CheckInDescriptor& checkIn = state.checkIns[i]; if (verboseCompilationEnabled()) dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n"); auto iter = recordMap.find(checkIn.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg obj = record.locations[1].directGPR(); StructureStubInfo* stubInfo = codeBlock->addStubInfo(); stubInfo->codeOrigin = checkIn.codeOrigin(); stubInfo->patch.baseGPR = static_cast<int8_t>(obj); stubInfo->patch.valueGPR = static_cast<int8_t>(result); stubInfo->patch.usedRegisters = usedRegisters; stubInfo->patch.spillMode = NeedToSpill; MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call slowCall = callOperation( state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget, operationInOptimize, result, stubInfo, obj, checkIn.m_id); checkIn.m_slowPathDone.append(slowPathJIT.jump()); checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin)); } } exceptionTarget.link(&slowPathJIT); MacroAssembler::Jump exceptionJump = slowPathJIT.jump(); state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed); state.finalizer->sideCodeLinkBuffer->link( exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); for (unsigned i = state.getByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.getByIds[i], sizeOfGetById()); } for (unsigned i = state.putByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.putByIds[i], sizeOfPutById()); } for (unsigned i = state.checkIns.size(); i--;) { generateCheckInICFastPath( state, codeBlock, generatedFunction, recordMap, state.checkIns[i], sizeOfIn()); } } adjustCallICsForStackmaps(state.jsCalls, recordMap); for (unsigned i = state.jsCalls.size(); i--;) { JSCall& call = state.jsCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall()); if (!linkBuffer.isValid()) { dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n"); RELEASE_ASSERT_NOT_REACHED(); } MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size()); call.link(vm, linkBuffer); } adjustCallICsForStackmaps(state.jsCallVarargses, recordMap); for (unsigned i = state.jsCallVarargses.size(); i--;) { JSCallVarargs& call = state.jsCallVarargses[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, graph, varargsSpillSlotsOffset); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = sizeOfICFor(call.node()); LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC); if (!linkBuffer.isValid()) { dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n"); RELEASE_ASSERT_NOT_REACHED(); } MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size()); call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } RepatchBuffer repatchBuffer(codeBlock); auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); RELEASE_ASSERT(stackOverflowException.isSet()); repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException)); } } iter = recordMap.find(state.handleExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } } for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) { OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex]; OSRExit& exit = jitCode->osrExit[exitIndex]; iter = recordMap.find(exit.m_stackmapID); Vector<const void*> codeAddresses; if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize()); if (info.m_isInvalidationPoint) jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress)); else repatchBuffer.replaceWithJump(source, info.m_thunkAddress); } } if (graph.compilation()) graph.compilation()->addOSRExitSite(codeAddresses); } }
Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState) { cleanMustHandleValuesIfNecessary(); if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) { dataLog("\n"); dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n"); dataLog("\n"); } Graph dfg(*vm, *this, longLivedState); if (!parse(dfg)) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } codeBlock->setCalleeSaveRegisters(RegisterSet::dfgCalleeSaveRegisters()); // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); if (Options::dumpGraphAfterParsing()) { dataLog("Graph after parsing:\n"); dfg.dump(); } performLiveCatchVariablePreservationPhase(dfg); if (Options::useMaximalFlushInsertionPhase()) performMaximalFlushInsertion(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); performStaticExecutionCountEstimation(dfg); if (mode == FTLForOSREntryMode) { bool result = performOSREntrypointCreation(dfg); if (!result) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCPSRethreading(dfg); } if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performStructureRegistration(dfg); performInvalidationPointInjection(dfg); performTypeCheckHoisting(dfg); dfg.m_fixpointState = FixpointNotConverged; // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means // that the compiler compiles more quickly. We want the third tier to compile quickly, which // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint. if (validationEnabled()) validate(dfg); performStrengthReduction(dfg); performCPSRethreading(dfg); performCFA(dfg); performConstantFolding(dfg); bool changed = false; changed |= performCFGSimplification(dfg); changed |= performLocalCSE(dfg); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); if (!isFTL(mode)) { // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases, // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an // escape for all of the arguments. This then disables object allocation sinking. // // So, for now, we just disable this phase for the FTL. // // If we wanted to enable it, we'd have to do any of the following: // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before // PutStack sinking and object allocation sinking. // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into // GetStack+PutStack. // // But, it's not super valuable to enable those optimizations, since the FTL // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this // pathology. changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading. } if (changed) { performCFA(dfg); performConstantFolding(dfg); } // If we're doing validation, then run some analyses, to give them an opportunity // to self-validate. Now is as good a time as any to do this. if (validationEnabled()) { dfg.ensureDominators(); dfg.ensureNaturalLoops(); dfg.ensurePrePostNumbering(); } switch (mode) { case DFGMode: { dfg.m_fixpointState = FixpointConverged; performTierUpCheckInjection(dfg); performFastStoreBarrierInsertion(dfg); performStoreBarrierClustering(dfg); performCleanUp(dfg); performCPSRethreading(dfg); performDCE(dfg); performPhantomInsertion(dfg); performStackLayout(dfg); performVirtualRegisterAllocation(dfg); performWatchpointCollection(dfg); dumpAndVerifyGraph(dfg, "Graph after optimization:"); JITCompiler dataFlowJIT(dfg); if (codeBlock->codeType() == FunctionCode) dataFlowJIT.compileFunction(); else dataFlowJIT.compile(); return DFGPath; } case FTLMode: case FTLForOSREntryMode: { #if ENABLE(FTL_JIT) if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } performCleanUp(dfg); // Reduce the graph size a bit. performCriticalEdgeBreaking(dfg); if (Options::createPreHeaders()) performLoopPreHeaderCreation(dfg); performCPSRethreading(dfg); performSSAConversion(dfg); performSSALowering(dfg); // Ideally, these would be run to fixpoint with the object allocation sinking phase. performArgumentsElimination(dfg); if (Options::usePutStackSinking()) performPutStackSinking(dfg); performConstantHoisting(dfg); performGlobalCSE(dfg); performLivenessAnalysis(dfg); performCFA(dfg); performConstantFolding(dfg); performCleanUp(dfg); // Reduce the graph size a lot. changed = false; changed |= performStrengthReduction(dfg); if (Options::useObjectAllocationSinking()) { changed |= performCriticalEdgeBreaking(dfg); changed |= performObjectAllocationSinking(dfg); } if (changed) { // State-at-tail and state-at-head will be invalid if we did strength reduction since // it might increase live ranges. performLivenessAnalysis(dfg); performCFA(dfg); performConstantFolding(dfg); } // Currently, this relies on pre-headers still being valid. That precludes running CFG // simplification before it, unless we re-created the pre-headers. There wouldn't be anything // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point. // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that // then we'd need to do some simple SSA fix-up. performLivenessAnalysis(dfg); performCFA(dfg); performLICM(dfg); // FIXME: Currently: IntegerRangeOptimization *must* be run after LICM. // // IntegerRangeOptimization makes changes on nodes based on preceding blocks // and nodes. LICM moves nodes which can invalidates assumptions used // by IntegerRangeOptimization. // // Ideally, the dependencies should be explicit. See https://bugs.webkit.org/show_bug.cgi?id=157534. performLivenessAnalysis(dfg); performIntegerRangeOptimization(dfg); performCleanUp(dfg); performIntegerCheckCombining(dfg); performGlobalCSE(dfg); // At this point we're not allowed to do any further code motion because our reasoning // about code motion assumes that it's OK to insert GC points in random places. dfg.m_fixpointState = FixpointConverged; performLivenessAnalysis(dfg); performCFA(dfg); performGlobalStoreBarrierInsertion(dfg); performStoreBarrierClustering(dfg); if (Options::useMovHintRemoval()) performMovHintRemoval(dfg); performCleanUp(dfg); performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by B3. performStackLayout(dfg); performLivenessAnalysis(dfg); performOSRAvailabilityAnalysis(dfg); performWatchpointCollection(dfg); if (FTL::canCompile(dfg) == FTL::CannotCompile) { finalizer = std::make_unique<FailedFinalizer>(*this); return FailPath; } dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldDumpDisassembly(mode)); // Flash a safepoint in case the GC wants some action. Safepoint::Result safepointResult; { GraphSafepoint safepoint(dfg, safepointResult); } if (safepointResult.didGetCancelled()) return CancelPath; FTL::State state(dfg); FTL::lowerDFGToB3(state); if (UNLIKELY(computeCompileTimes())) m_timeBeforeFTL = monotonicallyIncreasingTimeMS(); if (Options::b3AlwaysFailsBeforeCompile()) { FTL::fail(state); return FTLPath; } FTL::compile(state, safepointResult); if (safepointResult.didGetCancelled()) return CancelPath; if (Options::b3AlwaysFailsBeforeLink()) { FTL::fail(state); return FTLPath; } if (state.allocationFailed) { FTL::fail(state); return FTLPath; } FTL::link(state); if (state.allocationFailed) { FTL::fail(state); return FTLPath; } return FTLPath; #else RELEASE_ASSERT_NOT_REACHED(); return FailPath; #endif // ENABLE(FTL_JIT) } default: RELEASE_ASSERT_NOT_REACHED(); return FailPath; } }
static void fixFunctionBasedOnStackMaps( State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap) { Graph& graph = state.graph; VM& vm = graph.m_vm; StackMaps stackmaps = jitCode->stackmaps; int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal; int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID); for (unsigned i = graph.m_inlineVariableData.size(); i--;) { InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame; if (inlineCallFrame->argumentCountRegister.isValid()) inlineCallFrame->argumentCountRegister += localsOffset; for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { inlineCallFrame->arguments[argument] = inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset); } if (inlineCallFrame->isClosureCall) { inlineCallFrame->calleeRecovery = inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset); } if (graph.hasDebuggerEnabled()) codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset); } MacroAssembler::Label stackOverflowException; { CCallHelpers checkJIT(&vm, codeBlock); // At this point it's perfectly fair to just blow away all state and restore the // JS JIT view of the universe. checkJIT.copyCalleeSavesToVMCalleeSavesBuffer(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandler = checkJIT.call(); checkJIT.jumpToExceptionHandler(); stackOverflowException = checkJIT.label(); checkJIT.copyCalleeSavesToVMCalleeSavesBuffer(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call(); checkJIT.jumpToExceptionHandler(); auto linkBuffer = std::make_unique<LinkBuffer>( vm, checkJIT, codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { state.allocationFailed = true; return; } linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler)); linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame)); state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer); } ExitThunkGenerator exitThunkGenerator(state); exitThunkGenerator.emitThunks(); if (exitThunkGenerator.didThings()) { RELEASE_ASSERT(state.finalizer->osrExit.size()); auto linkBuffer = std::make_unique<LinkBuffer>( vm, exitThunkGenerator, codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { state.allocationFailed = true; return; } RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size()); for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) { OSRExitCompilationInfo& info = state.finalizer->osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; if (verboseCompilationEnabled()) dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n"); auto iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It was optimized out. continue; } info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel); exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump); for (unsigned j = exit.m_values.size(); j--;) exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset); for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) materialization->accountForLocalsOffset(localsOffset); if (verboseCompilationEnabled()) { DumpContext context; dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n"); if (!exit.m_materializations.isEmpty()) { dataLog(" Materializations: \n"); for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) dataLog(" Materialize(", pointerDump(materialization), ")\n"); } } } state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer); } if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) { CCallHelpers slowPathJIT(&vm, codeBlock); CCallHelpers::JumpList exceptionTarget; for (unsigned i = state.getByIds.size(); i--;) { GetByIdDescriptor& getById = state.getByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n"); auto iter = recordMap.find(getById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[getById.callSiteIndex().bits()]; for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg base = record.locations[1].directGPR(); JITGetByIdGenerator gen( codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base), JSValueRegs(result), NeedToSpill); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget, operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid()); gen.reportSlowPathCall(begin, call); getById.m_slowPathDone.append(slowPathJIT.jump()); getById.m_generators.append(gen); } } for (unsigned i = state.putByIds.size(); i--;) { PutByIdDescriptor& putById = state.putByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n"); auto iter = recordMap.find(putById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[putById.callSiteIndex().bits()]; for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg base = record.locations[0].directGPR(); GPRReg value = record.locations[1].directGPR(); JITPutByIdGenerator gen( codeBlock, codeOrigin, putById.callSiteIndex(), usedRegisters, JSValueRegs(base), JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill, putById.ecmaMode(), putById.putKind()); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget, gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid()); gen.reportSlowPathCall(begin, call); putById.m_slowPathDone.append(slowPathJIT.jump()); putById.m_generators.append(gen); } } for (unsigned i = state.checkIns.size(); i--;) { CheckInDescriptor& checkIn = state.checkIns[i]; if (verboseCompilationEnabled()) dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n"); auto iter = recordMap.find(checkIn.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[checkIn.callSiteIndex().bits()]; for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg obj = record.locations[1].directGPR(); StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); stubInfo->codeOrigin = codeOrigin; stubInfo->callSiteIndex = checkIn.callSiteIndex(); stubInfo->patch.baseGPR = static_cast<int8_t>(obj); stubInfo->patch.valueGPR = static_cast<int8_t>(result); stubInfo->patch.usedRegisters = usedRegisters; stubInfo->patch.spillMode = NeedToSpill; MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call slowCall = callOperation( state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget, operationInOptimize, result, stubInfo, obj, checkIn.m_uid); checkIn.m_slowPathDone.append(slowPathJIT.jump()); checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin)); } } exceptionTarget.link(&slowPathJIT); MacroAssembler::Jump exceptionJump = slowPathJIT.jump(); state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail); if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) { state.allocationFailed = true; return; } state.finalizer->sideCodeLinkBuffer->link( exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); for (unsigned i = state.getByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.getByIds[i], sizeOfGetById()); } for (unsigned i = state.putByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.putByIds[i], sizeOfPutById()); } for (unsigned i = state.checkIns.size(); i--;) { generateCheckInICFastPath( state, codeBlock, generatedFunction, recordMap, state.checkIns[i], sizeOfIn()); } } adjustCallICsForStackmaps(state.jsCalls, recordMap); for (unsigned i = state.jsCalls.size(); i--;) { JSCall& call = state.jsCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, state.jitCode->stackmaps.stackSizeForLocals()); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { call.link(vm, linkBuffer); }); } adjustCallICsForStackmaps(state.jsCallVarargses, recordMap); for (unsigned i = state.jsCallVarargses.size(); i--;) { JSCallVarargs& call = state.jsCallVarargses[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, varargsSpillSlotsOffset); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = sizeOfICFor(call.node()); generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); }); } adjustCallICsForStackmaps(state.jsTailCalls, recordMap); for (unsigned i = state.jsTailCalls.size(); i--;) { JSTailCall& call = state.jsTailCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(*state.jitCode.get(), fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = call.estimatedSize(); generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { call.link(vm, linkBuffer); }); } auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); RELEASE_ASSERT(stackOverflowException.isSet()); MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException)); } } iter = recordMap.find(state.handleExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } } for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) { OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex]; OSRExit& exit = jitCode->osrExit[exitIndex]; iter = recordMap.find(exit.m_stackmapID); Vector<const void*> codeAddresses; if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize()); if (info.m_isInvalidationPoint) jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress)); else MacroAssembler::replaceWithJump(source, info.m_thunkAddress); } } if (graph.compilation()) graph.compilation()->addOSRExitSite(codeAddresses); } }
inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck, unsigned osrEntryBytecodeIndex) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); numCompilations++; ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); ASSERT(osrEntryBytecodeIndex != UINT_MAX); if (!Options::useDFGJIT()) return false; if (!Options::bytecodeRangeToDFGCompile().isInRange(codeBlock->instructionCount())) return false; if (logCompilationChanges()) dataLog("DFG compiling ", *codeBlock, ", number of instructions = ", codeBlock->instructionCount(), "\n"); // Derive our set of must-handle values. The compilation must be at least conservative // enough to allow for OSR entry with these values. unsigned numVarsWithValues; if (osrEntryBytecodeIndex) numVarsWithValues = codeBlock->m_numVars; else numVarsWithValues = 0; Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues); for (size_t i = 0; i < mustHandleValues.size(); ++i) { int operand = mustHandleValues.operandForIndex(i); if (operandIsArgument(operand) && !operandToArgument(operand) && compileMode == CompileFunction && codeBlock->specializationKind() == CodeForConstruct) { // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will // also never be used. It doesn't matter what we put into the value for this, // but it has to be an actual value that can be grokked by subsequent DFG passes, // so we sanitize it here by turning it into Undefined. mustHandleValues[i] = jsUndefined(); } else mustHandleValues[i] = exec->uncheckedR(operand).jsValue(); } Graph dfg(exec->vm(), codeBlock, osrEntryBytecodeIndex, mustHandleValues); if (!parse(exec, dfg)) return false; // By this point the DFG bytecode parser will have potentially mutated various tables // in the CodeBlock. This is a good time to perform an early shrink, which is more // powerful than a late one. It's safe to do so because we haven't generated any code // that references any of the tables directly, yet. codeBlock->shrinkToFit(CodeBlock::EarlyShrink); if (validationEnabled()) validate(dfg); performCPSRethreading(dfg); performUnification(dfg); performPredictionInjection(dfg); if (validationEnabled()) validate(dfg); performBackwardsPropagation(dfg); performPredictionPropagation(dfg); performFixup(dfg); performTypeCheckHoisting(dfg); dfg.m_fixpointState = FixpointNotConverged; performCSE(dfg); performArgumentsSimplification(dfg); performCPSRethreading(dfg); // This should usually be a no-op since CSE rarely dethreads, and arguments simplification rarely does anything. performCFA(dfg); performConstantFolding(dfg); performCFGSimplification(dfg); dfg.m_fixpointState = FixpointConverged; performStoreElimination(dfg); performCPSRethreading(dfg); performDCE(dfg); performVirtualRegisterAllocation(dfg); GraphDumpMode modeForFinalValidate = DumpGraph; if (verboseCompilationEnabled()) { dataLogF("Graph after optimization:\n"); dfg.dump(); modeForFinalValidate = DontDumpGraph; } if (validationEnabled()) validate(dfg, modeForFinalValidate); JITCompiler dataFlowJIT(dfg); bool result; if (compileMode == CompileFunction) { ASSERT(jitCodeWithArityCheck); result = dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck); } else { ASSERT(compileMode == CompileOther); ASSERT(!jitCodeWithArityCheck); result = dataFlowJIT.compile(jitCode); } return result; }