static void generateCheckInICFastPath( State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC) { VM& vm = state.graph.m_vm; StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. return; } Vector<StackMaps::Record>& records = iter->value; RELEASE_ASSERT(records.size() == ic.m_generators.size()); for (unsigned i = records.size(); i--;) { StackMaps::Record& record = records[i]; auto generator = ic.m_generators[i]; StructureStubInfo& stubInfo = *generator.m_stub; auto call = generator.m_slowCall; auto slowPathBegin = generator.m_beginLabel; CCallHelpers fastPathJIT(&vm, codeBlock); auto jump = fastPathJIT.patchableJump(); auto done = fastPathJIT.label(); char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset; LinkBuffer fastPath(vm, fastPathJIT, startOfIC, sizeOfIC); LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer; // Note: we could handle the !isValid() case. We just don't appear to have a // reason to do so, yet. RELEASE_ASSERT(fastPath.isValid()); MacroAssembler::AssemblerType_T::fillNops( startOfIC + fastPath.size(), sizeOfIC - fastPath.size()); state.finalizer->sideCodeLinkBuffer->link( ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC)); CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin); fastPath.link(jump, slowPathBeginLoc); CodeLocationCall callReturnLocation = slowPath.locationOf(call); stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(done)); stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(jump)); stubInfo.callReturnLocation = callReturnLocation; stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, slowPathBeginLoc); } }
static void generateCheckInICFastPath( State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC) { VM& vm = state.graph.m_vm; StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. return; } Vector<StackMaps::Record>& records = iter->value; RELEASE_ASSERT(records.size() == ic.m_generators.size()); for (unsigned i = records.size(); i--;) { StackMaps::Record& record = records[i]; auto generator = ic.m_generators[i]; StructureStubInfo& stubInfo = *generator.m_stub; auto call = generator.m_slowCall; auto slowPathBegin = generator.m_beginLabel; CCallHelpers fastPathJIT(&vm, codeBlock); auto jump = fastPathJIT.patchableJump(); auto done = fastPathJIT.label(); char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset; auto postLink = [&] (LinkBuffer& fastPath, CCallHelpers&, bool) { LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer; state.finalizer->sideCodeLinkBuffer->link( ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC)); CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin); fastPath.link(jump, slowPathBeginLoc); CodeLocationCall callReturnLocation = slowPath.locationOf(call); stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(done)); stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, fastPath.locationOf(jump)); stubInfo.callReturnLocation = callReturnLocation; stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr( callReturnLocation, slowPathBeginLoc); }; generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "CheckIn inline cache", postLink); } }
bool JITFinalizer::finalizeFunction() { for (unsigned i = jitCode->handles().size(); i--;) { MacroAssembler::cacheFlush( jitCode->handles()[i]->start(), jitCode->handles()[i]->sizeInBytes()); } if (exitThunksLinkBuffer) { StackMaps::RecordMap recordMap = jitCode->stackmaps.getRecordMap(); for (unsigned i = 0; i < osrExit.size(); ++i) { OSRExitCompilationInfo& info = osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; StackMaps::RecordMap::iterator iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It's OK, it was optimized out. continue; } exitThunksLinkBuffer->link( info.m_thunkJump, CodeLocationLabel( m_plan.vm.ftlThunks->getOSRExitGenerationThunk( m_plan.vm, Location::forStackmaps( jitCode->stackmaps, iter->value.locations[0])).code())); } jitCode->initializeExitThunks( FINALIZE_DFG_CODE( *exitThunksLinkBuffer, ("FTL exit thunks for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data()))); } // else this function had no OSR exits, so no exit thunks. MacroAssemblerCodePtr withArityCheck; if (arityCheck.isSet()) withArityCheck = entrypointLinkBuffer->locationOf(arityCheck); jitCode->initializeCode( FINALIZE_DFG_CODE( *entrypointLinkBuffer, ("FTL entrypoint thunk for %s with LLVM generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data(), function))); m_plan.codeBlock->setJITCode(jitCode, withArityCheck); return true; }
void generateICFastPath( State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC) { VM& vm = state.graph.m_vm; StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. return; } Vector<StackMaps::Record>& records = iter->value; RELEASE_ASSERT(records.size() == ic.m_generators.size()); for (unsigned i = records.size(); i--;) { StackMaps::Record& record = records[i]; auto generator = ic.m_generators[i]; CCallHelpers fastPathJIT(&vm, codeBlock); generator.generateFastPath(fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset; LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC); // Note: we could handle the !isValid() case. We just don't appear to have a // reason to do so, yet. RELEASE_ASSERT(linkBuffer.isValid()); MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size()); state.finalizer->sideCodeLinkBuffer->link( ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC)); linkBuffer.link( generator.slowPathJump(), state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin())); generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer); } }
void generateICFastPath( State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC) { VM& vm = state.graph.m_vm; StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. return; } Vector<StackMaps::Record>& records = iter->value; RELEASE_ASSERT(records.size() == ic.m_generators.size()); for (unsigned i = records.size(); i--;) { StackMaps::Record& record = records[i]; auto generator = ic.m_generators[i]; CCallHelpers fastPathJIT(&vm, codeBlock); generator.generateFastPath(fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset; generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC)); linkBuffer.link(generator.slowPathJump(), state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin())); generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer); }); } }
void adjustCallICsForStackmaps(Vector<CallType>& calls, StackMaps::RecordMap& recordMap) { // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM // generated code. That implies first pruning the ones that LLVM didn't generate. Vector<CallType> oldCalls; oldCalls.swap(calls); for (unsigned i = 0; i < oldCalls.size(); ++i) { CallType& call = oldCalls[i]; StackMaps::RecordMap::iterator iter = recordMap.find(call.stackmapID()); if (iter == recordMap.end()) continue; for (unsigned j = 0; j < iter->value.size(); ++j) { CallType copy = call; copy.m_instructionOffset = iter->value[j].instructionOffset; calls.append(copy); } } std::sort(calls.begin(), calls.end()); }
static void fixFunctionBasedOnStackMaps( State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo) { Graph& graph = state.graph; VM& vm = graph.m_vm; StackMaps stackmaps = jitCode->stackmaps; int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal; int varargsSpillSlotsOffset; if (state.varargsSpillSlotsStackmapID != UINT_MAX) varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID); else varargsSpillSlotsOffset = 0; for (unsigned i = graph.m_inlineVariableData.size(); i--;) { InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame; if (inlineCallFrame->argumentsRegister.isValid()) inlineCallFrame->argumentsRegister += localsOffset; if (inlineCallFrame->argumentCountRegister.isValid()) inlineCallFrame->argumentCountRegister += localsOffset; for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { inlineCallFrame->arguments[argument] = inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset); } if (inlineCallFrame->isClosureCall) { inlineCallFrame->calleeRecovery = inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset); } } if (codeBlock->usesArguments()) { codeBlock->setArgumentsRegister( VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset)); } MacroAssembler::Label stackOverflowException; { CCallHelpers checkJIT(&vm, codeBlock); // At this point it's perfectly fair to just blow away all state and restore the // JS JIT view of the universe. checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandler = checkJIT.call(); checkJIT.jumpToExceptionHandler(); stackOverflowException = checkJIT.label(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call(); checkJIT.jumpToExceptionHandler(); auto linkBuffer = std::make_unique<LinkBuffer>( vm, checkJIT, codeBlock, JITCompilationMustSucceed); linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler)); linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame)); state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer); } ExitThunkGenerator exitThunkGenerator(state); exitThunkGenerator.emitThunks(); if (exitThunkGenerator.didThings()) { RELEASE_ASSERT(state.finalizer->osrExit.size()); RELEASE_ASSERT(didSeeUnwindInfo); auto linkBuffer = std::make_unique<LinkBuffer>( vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed); RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size()); for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) { OSRExitCompilationInfo& info = state.finalizer->osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; if (verboseCompilationEnabled()) dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n"); auto iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It was optimized out. continue; } info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel); exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump); for (unsigned j = exit.m_values.size(); j--;) { ExitValue value = exit.m_values[j]; if (!value.isInJSStackSomehow()) continue; if (!value.virtualRegister().isLocal()) continue; exit.m_values[j] = value.withVirtualRegister( VirtualRegister(value.virtualRegister().offset() + localsOffset)); } if (verboseCompilationEnabled()) { DumpContext context; dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n"); } } state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer); } if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) { CCallHelpers slowPathJIT(&vm, codeBlock); CCallHelpers::JumpList exceptionTarget; for (unsigned i = state.getByIds.size(); i--;) { GetByIdDescriptor& getById = state.getByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n"); auto iter = recordMap.find(getById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg base = record.locations[1].directGPR(); JITGetByIdGenerator gen( codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base), JSValueRegs(result), NeedToSpill); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget, operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid()); gen.reportSlowPathCall(begin, call); getById.m_slowPathDone.append(slowPathJIT.jump()); getById.m_generators.append(gen); } } for (unsigned i = state.putByIds.size(); i--;) { PutByIdDescriptor& putById = state.putByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n"); auto iter = recordMap.find(putById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg base = record.locations[0].directGPR(); GPRReg value = record.locations[1].directGPR(); JITPutByIdGenerator gen( codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base), JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill, putById.ecmaMode(), putById.putKind()); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget, gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid()); gen.reportSlowPathCall(begin, call); putById.m_slowPathDone.append(slowPathJIT.jump()); putById.m_generators.append(gen); } } for (unsigned i = state.checkIns.size(); i--;) { CheckInDescriptor& checkIn = state.checkIns[i]; if (verboseCompilationEnabled()) dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n"); auto iter = recordMap.find(checkIn.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg obj = record.locations[1].directGPR(); StructureStubInfo* stubInfo = codeBlock->addStubInfo(); stubInfo->codeOrigin = checkIn.codeOrigin(); stubInfo->patch.baseGPR = static_cast<int8_t>(obj); stubInfo->patch.valueGPR = static_cast<int8_t>(result); stubInfo->patch.usedRegisters = usedRegisters; stubInfo->patch.spillMode = NeedToSpill; MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call slowCall = callOperation( state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget, operationInOptimize, result, stubInfo, obj, checkIn.m_id); checkIn.m_slowPathDone.append(slowPathJIT.jump()); checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin)); } } exceptionTarget.link(&slowPathJIT); MacroAssembler::Jump exceptionJump = slowPathJIT.jump(); state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed); state.finalizer->sideCodeLinkBuffer->link( exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); for (unsigned i = state.getByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.getByIds[i], sizeOfGetById()); } for (unsigned i = state.putByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.putByIds[i], sizeOfPutById()); } for (unsigned i = state.checkIns.size(); i--;) { generateCheckInICFastPath( state, codeBlock, generatedFunction, recordMap, state.checkIns[i], sizeOfIn()); } } adjustCallICsForStackmaps(state.jsCalls, recordMap); for (unsigned i = state.jsCalls.size(); i--;) { JSCall& call = state.jsCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall()); if (!linkBuffer.isValid()) { dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n"); RELEASE_ASSERT_NOT_REACHED(); } MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size()); call.link(vm, linkBuffer); } adjustCallICsForStackmaps(state.jsCallVarargses, recordMap); for (unsigned i = state.jsCallVarargses.size(); i--;) { JSCallVarargs& call = state.jsCallVarargses[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, graph, varargsSpillSlotsOffset); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = sizeOfICFor(call.node()); LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC); if (!linkBuffer.isValid()) { dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n"); RELEASE_ASSERT_NOT_REACHED(); } MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size()); call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } RepatchBuffer repatchBuffer(codeBlock); auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); RELEASE_ASSERT(stackOverflowException.isSet()); repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException)); } } iter = recordMap.find(state.handleExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } } for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) { OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex]; OSRExit& exit = jitCode->osrExit[exitIndex]; iter = recordMap.find(exit.m_stackmapID); Vector<const void*> codeAddresses; if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize()); if (info.m_isInvalidationPoint) jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress)); else repatchBuffer.replaceWithJump(source, info.m_thunkAddress); } } if (graph.compilation()) graph.compilation()->addOSRExitSite(codeAddresses); } }
static void fixFunctionBasedOnStackMaps( State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap) { Graph& graph = state.graph; VM& vm = graph.m_vm; StackMaps stackmaps = jitCode->stackmaps; int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal; int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID); for (unsigned i = graph.m_inlineVariableData.size(); i--;) { InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame; if (inlineCallFrame->argumentCountRegister.isValid()) inlineCallFrame->argumentCountRegister += localsOffset; for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { inlineCallFrame->arguments[argument] = inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset); } if (inlineCallFrame->isClosureCall) { inlineCallFrame->calleeRecovery = inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset); } if (graph.hasDebuggerEnabled()) codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset); } MacroAssembler::Label stackOverflowException; { CCallHelpers checkJIT(&vm, codeBlock); // At this point it's perfectly fair to just blow away all state and restore the // JS JIT view of the universe. checkJIT.copyCalleeSavesToVMCalleeSavesBuffer(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandler = checkJIT.call(); checkJIT.jumpToExceptionHandler(); stackOverflowException = checkJIT.label(); checkJIT.copyCalleeSavesToVMCalleeSavesBuffer(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call(); checkJIT.jumpToExceptionHandler(); auto linkBuffer = std::make_unique<LinkBuffer>( vm, checkJIT, codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { state.allocationFailed = true; return; } linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler)); linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame)); state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer); } ExitThunkGenerator exitThunkGenerator(state); exitThunkGenerator.emitThunks(); if (exitThunkGenerator.didThings()) { RELEASE_ASSERT(state.finalizer->osrExit.size()); auto linkBuffer = std::make_unique<LinkBuffer>( vm, exitThunkGenerator, codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { state.allocationFailed = true; return; } RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size()); for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) { OSRExitCompilationInfo& info = state.finalizer->osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; if (verboseCompilationEnabled()) dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n"); auto iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It was optimized out. continue; } info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel); exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump); for (unsigned j = exit.m_values.size(); j--;) exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset); for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) materialization->accountForLocalsOffset(localsOffset); if (verboseCompilationEnabled()) { DumpContext context; dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n"); if (!exit.m_materializations.isEmpty()) { dataLog(" Materializations: \n"); for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) dataLog(" Materialize(", pointerDump(materialization), ")\n"); } } } state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer); } if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) { CCallHelpers slowPathJIT(&vm, codeBlock); CCallHelpers::JumpList exceptionTarget; for (unsigned i = state.getByIds.size(); i--;) { GetByIdDescriptor& getById = state.getByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n"); auto iter = recordMap.find(getById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[getById.callSiteIndex().bits()]; for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg base = record.locations[1].directGPR(); JITGetByIdGenerator gen( codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base), JSValueRegs(result), NeedToSpill); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget, operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid()); gen.reportSlowPathCall(begin, call); getById.m_slowPathDone.append(slowPathJIT.jump()); getById.m_generators.append(gen); } } for (unsigned i = state.putByIds.size(); i--;) { PutByIdDescriptor& putById = state.putByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n"); auto iter = recordMap.find(putById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[putById.callSiteIndex().bits()]; for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg base = record.locations[0].directGPR(); GPRReg value = record.locations[1].directGPR(); JITPutByIdGenerator gen( codeBlock, codeOrigin, putById.callSiteIndex(), usedRegisters, JSValueRegs(base), JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill, putById.ecmaMode(), putById.putKind()); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget, gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid()); gen.reportSlowPathCall(begin, call); putById.m_slowPathDone.append(slowPathJIT.jump()); putById.m_generators.append(gen); } } for (unsigned i = state.checkIns.size(); i--;) { CheckInDescriptor& checkIn = state.checkIns[i]; if (verboseCompilationEnabled()) dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n"); auto iter = recordMap.find(checkIn.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[checkIn.callSiteIndex().bits()]; for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg obj = record.locations[1].directGPR(); StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); stubInfo->codeOrigin = codeOrigin; stubInfo->callSiteIndex = checkIn.callSiteIndex(); stubInfo->patch.baseGPR = static_cast<int8_t>(obj); stubInfo->patch.valueGPR = static_cast<int8_t>(result); stubInfo->patch.usedRegisters = usedRegisters; stubInfo->patch.spillMode = NeedToSpill; MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call slowCall = callOperation( state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget, operationInOptimize, result, stubInfo, obj, checkIn.m_uid); checkIn.m_slowPathDone.append(slowPathJIT.jump()); checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin)); } } exceptionTarget.link(&slowPathJIT); MacroAssembler::Jump exceptionJump = slowPathJIT.jump(); state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail); if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) { state.allocationFailed = true; return; } state.finalizer->sideCodeLinkBuffer->link( exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); for (unsigned i = state.getByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.getByIds[i], sizeOfGetById()); } for (unsigned i = state.putByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.putByIds[i], sizeOfPutById()); } for (unsigned i = state.checkIns.size(); i--;) { generateCheckInICFastPath( state, codeBlock, generatedFunction, recordMap, state.checkIns[i], sizeOfIn()); } } adjustCallICsForStackmaps(state.jsCalls, recordMap); for (unsigned i = state.jsCalls.size(); i--;) { JSCall& call = state.jsCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, state.jitCode->stackmaps.stackSizeForLocals()); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { call.link(vm, linkBuffer); }); } adjustCallICsForStackmaps(state.jsCallVarargses, recordMap); for (unsigned i = state.jsCallVarargses.size(); i--;) { JSCallVarargs& call = state.jsCallVarargses[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, varargsSpillSlotsOffset); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = sizeOfICFor(call.node()); generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); }); } adjustCallICsForStackmaps(state.jsTailCalls, recordMap); for (unsigned i = state.jsTailCalls.size(); i--;) { JSTailCall& call = state.jsTailCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(*state.jitCode.get(), fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = call.estimatedSize(); generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) { call.link(vm, linkBuffer); }); } auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); RELEASE_ASSERT(stackOverflowException.isSet()); MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException)); } } iter = recordMap.find(state.handleExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } } for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) { OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex]; OSRExit& exit = jitCode->osrExit[exitIndex]; iter = recordMap.find(exit.m_stackmapID); Vector<const void*> codeAddresses; if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize()); if (info.m_isInvalidationPoint) jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress)); else MacroAssembler::replaceWithJump(source, info.m_thunkAddress); } } if (graph.compilation()) graph.compilation()->addOSRExitSite(codeAddresses); } }
bool JITFinalizer::finalizeFunction() { for (unsigned i = jitCode->handles().size(); i--;) { MacroAssembler::cacheFlush( jitCode->handles()[i]->start(), jitCode->handles()[i]->sizeInBytes()); } if (exitThunksLinkBuffer) { StackMaps::RecordMap recordMap = jitCode->stackmaps.computeRecordMap(); for (unsigned i = 0; i < osrExit.size(); ++i) { OSRExitCompilationInfo& info = osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; StackMaps::RecordMap::iterator iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It's OK, it was optimized out. continue; } exitThunksLinkBuffer->link( info.m_thunkJump, CodeLocationLabel( m_plan.vm.getCTIStub(osrExitGenerationThunkGenerator).code())); } jitCode->initializeExitThunks( FINALIZE_DFG_CODE( *exitThunksLinkBuffer, ("FTL exit thunks for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data()))); } // else this function had no OSR exits, so no exit thunks. if (sideCodeLinkBuffer) { // Side code is for special slow paths that we generate ourselves, like for inline // caches. for (unsigned i = slowPathCalls.size(); i--;) { SlowPathCall& call = slowPathCalls[i]; sideCodeLinkBuffer->link( call.call(), CodeLocationLabel(m_plan.vm.ftlThunks->getSlowPathCallThunk(m_plan.vm, call.key()).code())); } jitCode->addHandle(FINALIZE_DFG_CODE( *sideCodeLinkBuffer, ("FTL side code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data())) .executableMemory()); } if (handleExceptionsLinkBuffer) { jitCode->addHandle(FINALIZE_DFG_CODE( *handleExceptionsLinkBuffer, ("FTL exception handler for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data())) .executableMemory()); } MacroAssemblerCodePtr withArityCheck; if (arityCheck.isSet()) withArityCheck = entrypointLinkBuffer->locationOf(arityCheck); jitCode->initializeArityCheckEntrypoint( FINALIZE_DFG_CODE( *entrypointLinkBuffer, ("FTL entrypoint thunk for %s with LLVM generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock.get(), JITCode::FTLJIT)).data(), function))); m_plan.codeBlock->setJITCode(jitCode); if (m_plan.compilation) m_plan.vm.m_perBytecodeProfiler->addCompilation(m_plan.compilation); return true; }