static void fixFunctionBasedOnStackMaps( State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction, StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo) { Graph& graph = state.graph; VM& vm = graph.m_vm; StackMaps stackmaps = jitCode->stackmaps; int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal; int varargsSpillSlotsOffset; if (state.varargsSpillSlotsStackmapID != UINT_MAX) varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID); else varargsSpillSlotsOffset = 0; for (unsigned i = graph.m_inlineVariableData.size(); i--;) { InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame; if (inlineCallFrame->argumentsRegister.isValid()) inlineCallFrame->argumentsRegister += localsOffset; if (inlineCallFrame->argumentCountRegister.isValid()) inlineCallFrame->argumentCountRegister += localsOffset; for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { inlineCallFrame->arguments[argument] = inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset); } if (inlineCallFrame->isClosureCall) { inlineCallFrame->calleeRecovery = inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset); } } if (codeBlock->usesArguments()) { codeBlock->setArgumentsRegister( VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset)); } MacroAssembler::Label stackOverflowException; { CCallHelpers checkJIT(&vm, codeBlock); // At this point it's perfectly fair to just blow away all state and restore the // JS JIT view of the universe. checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandler = checkJIT.call(); checkJIT.jumpToExceptionHandler(); stackOverflowException = checkJIT.label(); checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0); checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call(); checkJIT.jumpToExceptionHandler(); auto linkBuffer = std::make_unique<LinkBuffer>( vm, checkJIT, codeBlock, JITCompilationMustSucceed); linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler)); linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame)); state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer); } ExitThunkGenerator exitThunkGenerator(state); exitThunkGenerator.emitThunks(); if (exitThunkGenerator.didThings()) { RELEASE_ASSERT(state.finalizer->osrExit.size()); RELEASE_ASSERT(didSeeUnwindInfo); auto linkBuffer = std::make_unique<LinkBuffer>( vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed); RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size()); for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) { OSRExitCompilationInfo& info = state.finalizer->osrExit[i]; OSRExit& exit = jitCode->osrExit[i]; if (verboseCompilationEnabled()) dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n"); auto iter = recordMap.find(exit.m_stackmapID); if (iter == recordMap.end()) { // It was optimized out. continue; } info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel); exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump); for (unsigned j = exit.m_values.size(); j--;) { ExitValue value = exit.m_values[j]; if (!value.isInJSStackSomehow()) continue; if (!value.virtualRegister().isLocal()) continue; exit.m_values[j] = value.withVirtualRegister( VirtualRegister(value.virtualRegister().offset() + localsOffset)); } if (verboseCompilationEnabled()) { DumpContext context; dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n"); } } state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer); } if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) { CCallHelpers slowPathJIT(&vm, codeBlock); CCallHelpers::JumpList exceptionTarget; for (unsigned i = state.getByIds.size(); i--;) { GetByIdDescriptor& getById = state.getByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n"); auto iter = recordMap.find(getById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg base = record.locations[1].directGPR(); JITGetByIdGenerator gen( codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base), JSValueRegs(result), NeedToSpill); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget, operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid()); gen.reportSlowPathCall(begin, call); getById.m_slowPathDone.append(slowPathJIT.jump()); getById.m_generators.append(gen); } } for (unsigned i = state.putByIds.size(); i--;) { PutByIdDescriptor& putById = state.putByIds[i]; if (verboseCompilationEnabled()) dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n"); auto iter = recordMap.find(putById.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg base = record.locations[0].directGPR(); GPRReg value = record.locations[1].directGPR(); JITPutByIdGenerator gen( codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base), JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill, putById.ecmaMode(), putById.putKind()); MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call call = callOperation( state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget, gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid()); gen.reportSlowPathCall(begin, call); putById.m_slowPathDone.append(slowPathJIT.jump()); putById.m_generators.append(gen); } } for (unsigned i = state.checkIns.size(); i--;) { CheckInDescriptor& checkIn = state.checkIns[i]; if (verboseCompilationEnabled()) dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n"); auto iter = recordMap.find(checkIn.stackmapID()); if (iter == recordMap.end()) { // It was optimized out. continue; } for (unsigned i = 0; i < iter->value.size(); ++i) { StackMaps::Record& record = iter->value[i]; RegisterSet usedRegisters = usedRegistersFor(record); GPRReg result = record.locations[0].directGPR(); GPRReg obj = record.locations[1].directGPR(); StructureStubInfo* stubInfo = codeBlock->addStubInfo(); stubInfo->codeOrigin = checkIn.codeOrigin(); stubInfo->patch.baseGPR = static_cast<int8_t>(obj); stubInfo->patch.valueGPR = static_cast<int8_t>(result); stubInfo->patch.usedRegisters = usedRegisters; stubInfo->patch.spillMode = NeedToSpill; MacroAssembler::Label begin = slowPathJIT.label(); MacroAssembler::Call slowCall = callOperation( state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget, operationInOptimize, result, stubInfo, obj, checkIn.m_id); checkIn.m_slowPathDone.append(slowPathJIT.jump()); checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin)); } } exceptionTarget.link(&slowPathJIT); MacroAssembler::Jump exceptionJump = slowPathJIT.jump(); state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed); state.finalizer->sideCodeLinkBuffer->link( exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); for (unsigned i = state.getByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.getByIds[i], sizeOfGetById()); } for (unsigned i = state.putByIds.size(); i--;) { generateICFastPath( state, codeBlock, generatedFunction, recordMap, state.putByIds[i], sizeOfPutById()); } for (unsigned i = state.checkIns.size(); i--;) { generateCheckInICFastPath( state, codeBlock, generatedFunction, recordMap, state.checkIns[i], sizeOfIn()); } } adjustCallICsForStackmaps(state.jsCalls, recordMap); for (unsigned i = state.jsCalls.size(); i--;) { JSCall& call = state.jsCalls[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall()); if (!linkBuffer.isValid()) { dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n"); RELEASE_ASSERT_NOT_REACHED(); } MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size()); call.link(vm, linkBuffer); } adjustCallICsForStackmaps(state.jsCallVarargses, recordMap); for (unsigned i = state.jsCallVarargses.size(); i--;) { JSCallVarargs& call = state.jsCallVarargses[i]; CCallHelpers fastPathJIT(&vm, codeBlock); call.emit(fastPathJIT, graph, varargsSpillSlotsOffset); char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset; size_t sizeOfIC = sizeOfICFor(call.node()); LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC); if (!linkBuffer.isValid()) { dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n"); RELEASE_ASSERT_NOT_REACHED(); } MacroAssembler::AssemblerType_T::fillNops( startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size()); call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } RepatchBuffer repatchBuffer(codeBlock); auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); RELEASE_ASSERT(stackOverflowException.isSet()); repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException)); } } iter = recordMap.find(state.handleExceptionStackmapID); // It's sort of remotely possible that we won't have an in-band exception handling // path, for some kinds of functions. if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint()); } } for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) { OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex]; OSRExit& exit = jitCode->osrExit[exitIndex]; iter = recordMap.find(exit.m_stackmapID); Vector<const void*> codeAddresses; if (iter != recordMap.end()) { for (unsigned i = iter->value.size(); i--;) { StackMaps::Record& record = iter->value[i]; CodeLocationLabel source = CodeLocationLabel( bitwise_cast<char*>(generatedFunction) + record.instructionOffset); codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize()); if (info.m_isInvalidationPoint) jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress)); else repatchBuffer.replaceWithJump(source, info.m_thunkAddress); } } if (graph.compilation()) graph.compilation()->addOSRExitSite(codeAddresses); } }
static void compileStubWithoutOSRExitStackmap( unsigned exitID, OSRExit& exit, VM* vm, CodeBlock* codeBlock) { CCallHelpers jit(vm, codeBlock); // Make ourselves look like a real C function. jit.push(MacroAssembler::framePointerRegister); jit.move(MacroAssembler::stackPointerRegister, MacroAssembler::framePointerRegister); // This is actually fairly easy, even though it is horribly gross. We know that // LLVM would have passes us all of the state via arguments. We know how to get // the arguments. And, we know how to pop stack back to the JIT stack frame, sort // of: we know that it's two frames beneath us. This is terrible and I feel // ashamed of it, but it will work for now. CArgumentGetter arguments(jit, 2); // First recover our call frame and tag thingies. arguments.loadNextPtr(GPRInfo::callFrameRegister); jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister); // Do some value profiling. if (exit.m_profileValueFormat != InvalidValueFormat) { arguments.loadNextAndBox(exit.m_profileValueFormat, GPRInfo::nonArgGPR0); if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { jit.loadPtr(MacroAssembler::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR1); jit.storePtr(GPRInfo::nonArgGPR1, arrayProfile->addressOfLastSeenStructure()); jit.load8(MacroAssembler::Address(GPRInfo::nonArgGPR1, Structure::indexingTypeOffset()), GPRInfo::nonArgGPR1); jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::nonArgGPR2); jit.lshift32(GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2); jit.or32(GPRInfo::nonArgGPR2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes())); } } if (!!exit.m_valueProfile) jit.store64(GPRInfo::nonArgGPR0, exit.m_valueProfile.getSpecFailBucket(0)); } // Use a scratch buffer to transfer all values. ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(sizeof(EncodedJSValue) * exit.m_values.size()); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; // Start by dumping all argument exit values to the stack. Vector<ExitArgumentForOperand, 16> sortedArguments; for (unsigned i = exit.m_values.size(); i--;) { ExitValue value = exit.m_values[i]; int operand = exit.m_values.operandForIndex(i); if (!value.isArgument()) continue; sortedArguments.append(ExitArgumentForOperand(value.exitArgument(), VirtualRegister(operand))); } std::sort(sortedArguments.begin(), sortedArguments.end(), lesserArgumentIndex); for (unsigned i = 0; i < sortedArguments.size(); ++i) { ExitArgumentForOperand argument = sortedArguments[i]; arguments.loadNextAndBox(argument.exitArgument().format(), GPRInfo::nonArgGPR0); jit.store64( GPRInfo::nonArgGPR0, scratch + exit.m_values.indexForOperand(argument.operand())); } // All temp registers are free at this point. // Move anything on the stack into the appropriate place in the scratch buffer. for (unsigned i = exit.m_values.size(); i--;) { ExitValue value = exit.m_values[i]; switch (value.kind()) { case ExitValueInJSStack: jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0); break; case ExitValueInJSStackAsInt32: jit.load32( AssemblyHelpers::addressFor(value.virtualRegister()).withOffset( OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), GPRInfo::regT0); jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); break; case ExitValueInJSStackAsInt52: jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0); jit.rshift64( AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0); jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0); break; case ExitValueInJSStackAsDouble: jit.loadDouble(AssemblyHelpers::addressFor(value.virtualRegister()), FPRInfo::fpRegT0); jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0); break; case ExitValueDead: case ExitValueConstant: case ExitValueArgument: // Don't do anything for these. continue; default: RELEASE_ASSERT_NOT_REACHED(); break; } jit.store64(GPRInfo::regT0, scratch + i); } // Move everything from the scratch buffer to the stack; this also reifies constants. for (unsigned i = exit.m_values.size(); i--;) { ExitValue value = exit.m_values[i]; int operand = exit.m_values.operandForIndex(i); MacroAssembler::Address address = AssemblyHelpers::addressFor(operand); switch (value.kind()) { case ExitValueDead: jit.store64(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), address); break; case ExitValueConstant: jit.store64(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), address); break; case ExitValueInJSStack: case ExitValueInJSStackAsInt32: case ExitValueInJSStackAsInt52: case ExitValueInJSStackAsDouble: case ExitValueArgument: jit.load64(scratch + i, GPRInfo::regT0); jit.store64(GPRInfo::regT0, address); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } } handleExitCounts(jit, exit); reifyInlinedCallFrames(jit, exit); jit.pop(MacroAssembler::framePointerRegister); jit.move(MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister); jit.pop(MacroAssembler::framePointerRegister); jit.pop(GPRInfo::nonArgGPR0); // ignore the result. if (exit.m_lastSetOperand.isValid()) { jit.load64( AssemblyHelpers::addressFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister); } adjustAndJumpToTarget(jit, exit); LinkBuffer patchBuffer(*vm, &jit, codeBlock); exit.m_code = FINALIZE_CODE_IF( shouldShowDisassembly(), patchBuffer, ("FTL OSR exit #%u (bc#%u, %s) from %s, with operands = %s", exitID, exit.m_codeOrigin.bytecodeIndex, exitKindToString(exit.m_kind), toCString(*codeBlock).data(), toCString(ignoringContext<DumpContext>(exit.m_values)).data())); }
static void compileStubWithOSRExitStackmap( unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock) { StackMaps::Record* record; for (unsigned i = jitCode->stackmaps.records.size(); i--;) { record = &jitCode->stackmaps.records[i]; if (record->patchpointID == exit.m_stackmapID) break; } RELEASE_ASSERT(record->patchpointID == exit.m_stackmapID); CCallHelpers jit(vm, codeBlock); // We need scratch space to save all registers and to build up the JSStack. // Use a scratch buffer to transfer all values. ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(sizeof(EncodedJSValue) * exit.m_values.size() + requiredScratchMemorySizeInBytes()); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; char* registerScratch = bitwise_cast<char*>(scratch + exit.m_values.size()); // Make sure that saveAllRegisters() has a place on top of the stack to spill things. That // function expects to be able to use top of stack for scratch memory. jit.push(GPRInfo::regT0); saveAllRegisters(jit, registerScratch); // Bring the stack back into a sane form. jit.pop(GPRInfo::regT0); jit.pop(GPRInfo::regT0); // The remaining code assumes that SP/FP are in the same state that they were in the FTL's // call frame. // Get the call frame and tag thingies. record->locations[0].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::callFrameRegister); jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister); // Do some value profiling. if (exit.m_profileValueFormat != InvalidValueFormat) { record->locations[1].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0); reboxAccordingToFormat( exit.m_profileValueFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { jit.loadPtr(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::regT1); jit.storePtr(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructure()); jit.load8(MacroAssembler::Address(GPRInfo::regT1, Structure::indexingTypeOffset()), GPRInfo::regT1); jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2); jit.lshift32(GPRInfo::regT1, GPRInfo::regT2); jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes())); } } if (!!exit.m_valueProfile) jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0)); } // Save all state from wherever the exit data tells us it was, into the appropriate place in // the scratch buffer. This doesn't rebox any values yet. for (unsigned index = exit.m_values.size(); index--;) { ExitValue value = exit.m_values[index]; switch (value.kind()) { case ExitValueDead: jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0); break; case ExitValueConstant: jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0); break; case ExitValueArgument: record->locations[value.exitArgument().argument()].restoreInto( jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0); break; case ExitValueInJSStack: case ExitValueInJSStackAsInt32: case ExitValueInJSStackAsInt52: case ExitValueInJSStackAsDouble: jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } jit.store64(GPRInfo::regT0, scratch + index); } // Now get state out of the scratch buffer and place it back into the stack. This part does // all reboxing. for (unsigned index = exit.m_values.size(); index--;) { int operand = exit.m_values.operandForIndex(index); ExitValue value = exit.m_values[index]; jit.load64(scratch + index, GPRInfo::regT0); reboxAccordingToFormat( value.valueFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand)); } handleExitCounts(jit, exit); reifyInlinedCallFrames(jit, exit); jit.move(MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister); jit.pop(MacroAssembler::framePointerRegister); jit.pop(GPRInfo::nonArgGPR0); // ignore the result. if (exit.m_lastSetOperand.isValid()) { jit.load64( AssemblyHelpers::addressFor(exit.m_lastSetOperand), GPRInfo::cachedResultRegister); } adjustAndJumpToTarget(jit, exit); LinkBuffer patchBuffer(*vm, &jit, codeBlock); exit.m_code = FINALIZE_CODE_IF( shouldShowDisassembly(), patchBuffer, ("FTL OSR exit #%u (bc#%u, %s) from %s, with operands = %s, and record = %s", exitID, exit.m_codeOrigin.bytecodeIndex, exitKindToString(exit.m_kind), toCString(*codeBlock).data(), toCString(ignoringContext<DumpContext>(exit.m_values)).data(), toCString(*record).data())); }
static void compileRecovery( CCallHelpers& jit, const ExitValue& value, StackMaps::Record* record, StackMaps& stackmaps, char* registerScratch, const HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*>& materializationToPointer) { switch (value.kind()) { case ExitValueDead: jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0); break; case ExitValueConstant: jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0); break; case ExitValueArgument: record->locations[value.exitArgument().argument()].restoreInto( jit, stackmaps, registerScratch, GPRInfo::regT0); break; case ExitValueInJSStack: case ExitValueInJSStackAsInt32: case ExitValueInJSStackAsInt52: case ExitValueInJSStackAsDouble: jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0); break; case ExitValueRecovery: record->locations[value.rightRecoveryArgument()].restoreInto( jit, stackmaps, registerScratch, GPRInfo::regT1); record->locations[value.leftRecoveryArgument()].restoreInto( jit, stackmaps, registerScratch, GPRInfo::regT0); switch (value.recoveryOpcode()) { case AddRecovery: switch (value.recoveryFormat()) { case DataFormatInt32: jit.add32(GPRInfo::regT1, GPRInfo::regT0); break; case DataFormatInt52: jit.add64(GPRInfo::regT1, GPRInfo::regT0); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } break; case SubRecovery: switch (value.recoveryFormat()) { case DataFormatInt32: jit.sub32(GPRInfo::regT1, GPRInfo::regT0); break; case DataFormatInt52: jit.sub64(GPRInfo::regT1, GPRInfo::regT0); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } break; default: RELEASE_ASSERT_NOT_REACHED(); break; } break; case ExitValueMaterializeNewObject: jit.loadPtr(materializationToPointer.get(value.objectMaterialization()), GPRInfo::regT0); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } reboxAccordingToFormat( value.dataFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); }