void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) { jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump tooFewFails; jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter())); jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump reoptimizeNow = jit.branch32( AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization())); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1); #endif jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1); jit.call(GPRInfo::regT1); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); int32_t clippedValue = ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); }
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase) { CCallHelpers::JumpList end; if (argCountRecovery.isConstant()) { // FIXME: We could constant-fold a lot of the computation below in this case. // https://bugs.webkit.org/show_bug.cgi?id=141486 jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1); } else jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1); if (firstVarArgOffset) { CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1)); jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1); CCallHelpers::Jump endVarArgs = jit.jump(); sufficientArguments.link(&jit); jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1); endVarArgs.link(&jit); } slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1))); emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2); slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2)); // Initialize ArgumentCount. jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset)); // Copy arguments. jit.signExtend32ToPtr(scratchGPR1, scratchGPR1); CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1); // scratchGPR1: argumentCount CCallHelpers::Label copyLoop = jit.label(); int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register)); #if USE(JSVALUE64) jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3); jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); #else // USE(JSVALUE64), so this begins the 32-bit case jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3); jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset)); jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3); jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset)); #endif // USE(JSVALUE64), end of 32-bit case jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit); done.link(&jit); }
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) { jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump tooFewFails; jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter())); jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump reoptimizeNow = jit.branch32( AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); // We want to figure out if there's a possibility that we're in a loop. For the outermost // code block in the inline stack, we handle this appropriately by having the loop OSR trigger // check the exit count of the replacement of the CodeBlock from which we are OSRing. The // problem is the inlined functions, which might also have loops, but whose baseline versions // don't know where to look for the exit count. Figure out if those loops are severe enough // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. // Otherwise, we should use the normal reoptimization trigger. AssemblyHelpers::JumpList loopThreshold; for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { loopThreshold.append( jit.branchTest8( AssemblyHelpers::NonZero, AssemblyHelpers::AbsoluteAddress( inlineCallFrame->executable->addressOfDidTryToEnterInLoop()))); } jit.move( AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), GPRInfo::regT1); if (!loopThreshold.empty()) { AssemblyHelpers::Jump done = jit.jump(); loopThreshold.link(&jit); jit.move( AssemblyHelpers::TrustedImm32( jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), GPRInfo::regT1); done.link(&jit); } tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1); #endif jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); int32_t clippedValue; switch (jit.codeBlock()->jitType()) { case JITCode::DFGJIT: clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); break; case JITCode::FTLJIT: clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); break; default: RELEASE_ASSERT_NOT_REACHED(); #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) clippedValue = 0; // Make some compilers, and mhahnenberg, happy. #endif break; } jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); }