void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { // === Stage 1 - Function header code generation === // // This code currently matches the old JIT. In the function header we need to // pop the return address (since we do not allow any recursion on the machine // stack), and perform a fast register file check. // This is the main entry point, without performing an arity check. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 // We'll need to convert the remaining cti_ style calls (specifically the register file // check) which will be dependent on stack layout. (We'd need to account for this in // both normal return code and when jumping to an exception handler). preserveReturnAddressAfterCall(GPRInfo::regT2); emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC); // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Setup a pointer to the codeblock in the CallFrameHeader. emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); // Plant a check that sufficient space is available in the RegisterFile. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1); // Return here after register file check. Label fromRegisterFileCheck = label(); // === Stage 2 - Function body code generation === // // We generate the speculative code path, followed by the non-speculative // code for the function. Next we need to link the two together, making // bail-outs from the speculative path jump to the corresponding point on // the non-speculative one (and generating any code necessary to juggle // register values around, rebox values, and ensure spilled, to match the // non-speculative path's requirements). #if DFG_JIT_BREAK_ON_EVERY_FUNCTION // Handy debug tool! breakpoint(); #endif // First generate the speculative path. Label speculativePathBegin = label(); SpeculativeJIT speculative(*this); #if !DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE bool compiledSpeculative = speculative.compile(); #else bool compiledSpeculative = false; #endif // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator // to allow it to check which nodes in the graph may bail out, and may need to reenter the // non-speculative path. if (compiledSpeculative) { SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks()); NonSpeculativeJIT nonSpeculative(*this); nonSpeculative.compile(checkIterator); // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one. linkSpeculationChecks(speculative, nonSpeculative); } else { // If compilation through the SpeculativeJIT failed, throw away the code we generated. m_calls.clear(); rewindToLabel(speculativePathBegin); SpeculationCheckVector noChecks; SpeculationCheckIndexIterator checkIterator(noChecks); NonSpeculativeJIT nonSpeculative(*this); nonSpeculative.compile(checkIterator); } // === Stage 3 - Function footer code generation === // // Generate code to lookup and jump to exception handlers, to perform the slow // register file check (if the fast one in the function header fails), and // generate the entry point with arity check. // Iterate over the m_calls vector, checking for exception checks, // and linking them to here. unsigned exceptionCheckCount = 0; for (unsigned i = 0; i < m_calls.size(); ++i) { Jump& exceptionCheck = m_calls[i].m_exceptionCheck; if (exceptionCheck.isSet()) { exceptionCheck.link(this); ++exceptionCheckCount; } } // If any exception checks were linked, generate code to lookup a handler. if (exceptionCheckCount) { // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and // an identifier for the operation that threw the exception, which we can use // to look up handler information. The identifier we use is the return address // of the call out from JIT code that threw the exception; this is still // available on the stack, just below the stack pointer! move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); peek(GPRInfo::argumentGPR1, -1); m_calls.append(CallRecord(call(), lookupExceptionHandler)); // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR, // and the address of the handler in returnValueGPR2. jump(GPRInfo::returnValueGPR2); } // Generate the register file check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). registerFileCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callRegisterFileCheck = call(); jump(fromRegisterFileCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). Label arityCheck = label(); preserveReturnAddressAfterCall(GPRInfo::regT2); emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC); branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callArityCheck = call(); move(GPRInfo::regT0, GPRInfo::callFrameRegister); jump(fromArityCheck); // === Stage 4 - Link === // // Link the code, populate data in CodeBlock data structures. LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator); #if DFG_DEBUG_VERBOSE fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); if (m_codeBlock->needsCallReturnIndices()) { m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount); for (unsigned i = 0; i < m_calls.size(); ++i) { if (m_calls[i].m_exceptionCheck.isSet()) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call); unsigned exceptionInfo = m_calls[i].m_exceptionInfo; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } } } // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs. linkBuffer.link(callRegisterFileCheck, cti_register_file_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); entryWithArityCheck = linkBuffer.locationOf(arityCheck); entry = linkBuffer.finalizeCode(); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin; while (codeOrigin.inlineCallFrame) codeOrigin = codeOrigin.inlineCallFrame->caller; unsigned exceptionInfo = codeOrigin.bytecodeIndex; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins(); codeOrigins.resize(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { CallExceptionRecord& record = m_exceptionChecks[i]; unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); codeOrigins[i].codeOrigin = record.m_codeOrigin; codeOrigins[i].callReturnOffset = returnAddressOffset; record.m_token.assertCodeOriginIndex(i); } m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size()); for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call()); info.codeOrigin = m_propertyAccesses[i].m_codeOrigin; info.callReturnLocation = callReturnLocation; info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation); info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck)); #if USE(JSVALUE64) info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore)); #else info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore)); info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore)); #endif info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label())); info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done)); info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #else info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR; info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #endif info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR; info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed; } m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall)); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); } MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) { OSRExit& exit = codeBlock()->osrExit(i); linkBuffer.link(exit.m_check.lateJump(), target); exit.m_check.correctLateJump(linkBuffer); if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()) codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer); } codeBlock()->shrinkToFit(CodeBlock::LateShrink); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. #if DFG_ENABLE(DEBUG_VERBOSE) dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin; while (codeOrigin.inlineCallFrame) codeOrigin = codeOrigin.inlineCallFrame->caller; unsigned exceptionInfo = codeOrigin.bytecodeIndex; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins = m_codeBlock->codeOrigins(); codeOrigins.resize(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { CallExceptionRecord& record = m_exceptionChecks[i]; unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); codeOrigins[i].codeOrigin = record.m_codeOrigin; codeOrigins[i].callReturnOffset = returnAddressOffset; } m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size()); for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call()); info.codeOrigin = m_propertyAccesses[i].m_codeOrigin; info.callReturnLocation = callReturnLocation; info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation); info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck)); #if USE(JSVALUE64) info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore)); #else info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore)); info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore)); #endif info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label())); info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done)); info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad)); info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #else info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR; info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #endif m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters); info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed; } m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.codeOrigin = m_jsCalls[i].m_codeOrigin; linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress())); info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) { OSRExit& exit = codeBlock()->osrExit(i); linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target); exit.correctJump(linkBuffer); if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()) codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer); } if (m_graph.m_compilation) { ASSERT(m_exitSiteLabels.size() == codeBlock()->numberOfOSRExits()); for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) { Vector<Label>& labels = m_exitSiteLabels[i]; Vector<const void*> addresses; for (unsigned j = 0; j < labels.size(); ++j) addresses.append(linkBuffer.locationOf(labels[j]).executableAddress()); m_graph.m_compilation->addOSRExitSite(addresses); } } else ASSERT(!m_exitSiteLabels.size()); codeBlock()->saveCompilation(m_graph.m_compilation); codeBlock()->shrinkToFit(CodeBlock::LateShrink); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. #if DFG_ENABLE(DEBUG_VERBOSE) fprintf(stderr, "JIT code for %p start at [%p, %p). Size = %lu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); if (m_codeBlock->needsCallReturnIndices()) { m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin; while (codeOrigin.inlineCallFrame) codeOrigin = codeOrigin.inlineCallFrame->caller; unsigned exceptionInfo = codeOrigin.bytecodeIndex; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } } unsigned numCallsFromInlineCode = 0; for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { if (m_exceptionChecks[i].m_codeOrigin.inlineCallFrame) numCallsFromInlineCode++; } if (numCallsFromInlineCode) { Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins(); codeOrigins.resize(numCallsFromInlineCode); for (unsigned i = 0, j = 0; i < m_exceptionChecks.size(); ++i) { CallExceptionRecord& record = m_exceptionChecks[i]; if (record.m_codeOrigin.inlineCallFrame) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); codeOrigins[j].codeOrigin = record.m_codeOrigin; codeOrigins[j].callReturnOffset = returnAddressOffset; j++; } } } m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size()); for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall); info.callReturnLocation = callReturnLocation; info.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation); info.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck)); #if USE(JSVALUE64) info.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore)); #else info.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore)); info.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore)); #endif info.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase)); info.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone)); info.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.valueGPR = m_propertyAccesses[i].m_valueGPR; #else info.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR; info.valueGPR = m_propertyAccesses[i].m_valueGPR; #endif info.scratchGPR = m_propertyAccesses[i].m_scratchGPR; } m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall)); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); } MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) { OSRExit& exit = codeBlock()->osrExit(i); linkBuffer.link(exit.m_check.lateJump(), target); exit.m_check.correctLateJump(linkBuffer); } codeBlock()->shrinkWeakReferencesToFit(); codeBlock()->shrinkWeakReferenceTransitionsToFit(); }