void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) { jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump tooFewFails; jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter())); jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump reoptimizeNow = jit.branch32( AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization())); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1); #endif jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1); jit.call(GPRInfo::regT1); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); int32_t clippedValue = ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); }
void OSRExitCompiler::handleExitCounts(const OSRExit& exit) { m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::JumpList tooFewFails; if (exit.m_kind == InadequateCoverage) { // Proceed based on the assumption that we can profitably optimize this code once // it has executed enough times. m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2); m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter())); m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization))); } else { // Proceed based on the assumption that we can handle these exits so long as they // don't get too frequent. m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2); m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter())); m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0); tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()))); m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2); tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1)); } // Reoptimize as soon as possible. m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); AssemblyHelpers::Jump doneAdjusting = m_jit.jump(); tooFewFails.link(&m_jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(), m_jit.baselineCodeBlock()); m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&m_jit); }
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch) { AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner); // We need these extra slots because setupArgumentsWithExecState will use poke on x86. #if CPU(X86) jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif jit.setupArgumentsWithExecState(owner); jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch); jit.call(scratch); #if CPU(X86) jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif ownerIsRememberedOrInEden.link(&jit); }
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2) { AssemblyHelpers::Jump definitelyNotMarked = jit.genericWriteBarrier(owner, scratch1, scratch2); // We need these extra slots because setupArgumentsWithExecState will use poke on x86. #if CPU(X86) jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif jit.setupArgumentsWithExecState(owner); jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch1); jit.call(scratch1); #if CPU(X86) jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif definitelyNotMarked.link(&jit); }
static void generateRegisterRestoration(AssemblyHelpers& jit) { #if ENABLE(FTL_JIT) RegisterSet toSave = registersToPreserve(); ptrdiff_t offset = registerPreservationOffset(); ASSERT(!toSave.get(GPRInfo::regT4)); // We need to place the stack pointer back to where the caller thought they left it. // But also, in order to recover the registers, we need to figure out how big the // arguments area is. jit.load32( AssemblyHelpers::Address( AssemblyHelpers::stackPointerRegister, (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset), GPRInfo::regT4); jit.move(GPRInfo::regT4, GPRInfo::regT2); jit.lshift32(AssemblyHelpers::TrustedImm32(3), GPRInfo::regT2); jit.addPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister); jit.addPtr(AssemblyHelpers::stackPointerRegister, GPRInfo::regT2); // We saved things at: // // adjSP + (JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + NumArgs) * 8 // // Where: // // adjSP = origSP - offset // // regT2 now points at: // // origSP + NumArgs * 8 // = adjSP + offset + NumArgs * 8 // // So if we subtract offset and then add JSStack::CallFrameHeaderSize and subtract // JSStack::CallerFrameAndPCSize, we'll get the thing we want. ptrdiff_t currentOffset = -offset + sizeof(Register) * ( JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize); jit.loadPtr(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), GPRInfo::regT1); for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) { if (!toSave.get(gpr)) continue; currentOffset += sizeof(Register); jit.load64(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), gpr); } // Thunks like this rely on the ArgumentCount being intact. Pay it forward. jit.store32( GPRInfo::regT4, AssemblyHelpers::Address( AssemblyHelpers::stackPointerRegister, (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset)); if (!ASSERT_DISABLED) { AssemblyHelpers::Jump ok = jit.branchPtr( AssemblyHelpers::Above, GPRInfo::regT1, AssemblyHelpers::TrustedImmPtr(static_cast<size_t>(0x1000))); jit.breakpoint(); ok.link(&jit); } jit.jump(GPRInfo::regT1); #else // ENABLE(FTL_JIT) UNUSED_PARAM(jit); UNREACHABLE_FOR_PLATFORM(); #endif // ENABLE(FTL_JIT) }
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) { jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump tooFewFails; jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter())); jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump reoptimizeNow = jit.branch32( AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); // We want to figure out if there's a possibility that we're in a loop. For the outermost // code block in the inline stack, we handle this appropriately by having the loop OSR trigger // check the exit count of the replacement of the CodeBlock from which we are OSRing. The // problem is the inlined functions, which might also have loops, but whose baseline versions // don't know where to look for the exit count. Figure out if those loops are severe enough // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. // Otherwise, we should use the normal reoptimization trigger. AssemblyHelpers::JumpList loopThreshold; for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { loopThreshold.append( jit.branchTest8( AssemblyHelpers::NonZero, AssemblyHelpers::AbsoluteAddress( inlineCallFrame->executable->addressOfDidTryToEnterInLoop()))); } jit.move( AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), GPRInfo::regT1); if (!loopThreshold.empty()) { AssemblyHelpers::Jump done = jit.jump(); loopThreshold.link(&jit); jit.move( AssemblyHelpers::TrustedImm32( jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), GPRInfo::regT1); done.link(&jit); } tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1); #endif jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); int32_t clippedValue; switch (jit.codeBlock()->jitType()) { case JITCode::DFGJIT: clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); break; case JITCode::FTLJIT: clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); break; default: RELEASE_ASSERT_NOT_REACHED(); #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) clippedValue = 0; // Make some compilers, and mhahnenberg, happy. #endif break; } jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); }
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) { ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT); jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); CodeOrigin codeOrigin; for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin); CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller); unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex; CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex); void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress(); GPRReg callerFrameGPR; if (inlineCallFrame->caller.inlineCallFrame) { jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); callerFrameGPR = GPRInfo::regT3; } else callerFrameGPR = GPRInfo::callFrameRegister; #if USE(JSVALUE64) jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); if (!inlineCallFrame->isClosureCall) jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); if (!inlineCallFrame->isClosureCall) jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); // Leave the captured arguments in regT3. if (baselineCodeBlock->usesArguments()) jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3); #else // USE(JSVALUE64) // so this is the 32-bit part jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); if (!inlineCallFrame->isClosureCall) jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); if (!inlineCallFrame->isClosureCall) jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); // Leave the captured arguments in regT3. if (baselineCodeBlock->usesArguments()) jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3); #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part if (baselineCodeBlock->usesArguments()) { AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3); jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0); jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters())); noArguments.link(&jit); } } #if USE(JSVALUE64) uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); #else Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); #endif jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount))); }