void OSRExitCompiler::handleExitCounts(const OSRExit& exit) { m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::JumpList tooFewFails; if (exit.m_kind == InadequateCoverage) { // Proceed based on the assumption that we can profitably optimize this code once // it has executed enough times. m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2); m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter())); m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization))); } else { // Proceed based on the assumption that we can handle these exits so long as they // don't get too frequent. m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2); m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter())); m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0); tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()))); m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2); tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1)); } // Reoptimize as soon as possible. m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); AssemblyHelpers::Jump doneAdjusting = m_jit.jump(); tooFewFails.link(&m_jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt( m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(), m_jit.baselineCodeBlock()); m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&m_jit); }
AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType( JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode) { AssemblyHelpers::JumpList result; switch (descriptor.kind()) { case InferredType::Bottom: result.append(jump()); break; case InferredType::Boolean: result.append(branchIfNotBoolean(regs, tempGPR)); break; case InferredType::Other: result.append(branchIfNotOther(regs, tempGPR)); break; case InferredType::Int32: result.append(branchIfNotInt32(regs, mode)); break; case InferredType::Number: result.append(branchIfNotNumber(regs, tempGPR, mode)); break; case InferredType::String: result.append(branchIfNotCell(regs, mode)); result.append(branchIfNotString(regs.payloadGPR())); break; case InferredType::ObjectWithStructure: result.append(branchIfNotCell(regs, mode)); result.append( branchStructure( NotEqual, Address(regs.payloadGPR(), JSCell::structureIDOffset()), descriptor.structure())); break; case InferredType::ObjectWithStructureOrOther: { Jump ok = branchIfOther(regs, tempGPR); result.append(branchIfNotCell(regs, mode)); result.append( branchStructure( NotEqual, Address(regs.payloadGPR(), JSCell::structureIDOffset()), descriptor.structure())); ok.link(this); break; } case InferredType::Object: result.append(branchIfNotCell(regs, mode)); result.append(branchIfNotObject(regs.payloadGPR())); break; case InferredType::ObjectOrOther: { Jump ok = branchIfOther(regs, tempGPR); result.append(branchIfNotCell(regs, mode)); result.append(branchIfNotObject(regs.payloadGPR())); ok.link(this); break; } case InferredType::Top: break; } return result; }
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) { jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump tooFewFails; jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter())); jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0); AssemblyHelpers::Jump reoptimizeNow = jit.branch32( AssemblyHelpers::GreaterThanOrEqual, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), AssemblyHelpers::TrustedImm32(0)); // We want to figure out if there's a possibility that we're in a loop. For the outermost // code block in the inline stack, we handle this appropriately by having the loop OSR trigger // check the exit count of the replacement of the CodeBlock from which we are OSRing. The // problem is the inlined functions, which might also have loops, but whose baseline versions // don't know where to look for the exit count. Figure out if those loops are severe enough // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. // Otherwise, we should use the normal reoptimization trigger. AssemblyHelpers::JumpList loopThreshold; for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { loopThreshold.append( jit.branchTest8( AssemblyHelpers::NonZero, AssemblyHelpers::AbsoluteAddress( inlineCallFrame->executable->addressOfDidTryToEnterInLoop()))); } jit.move( AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), GPRInfo::regT1); if (!loopThreshold.empty()) { AssemblyHelpers::Jump done = jit.jump(); loopThreshold.link(&jit); jit.move( AssemblyHelpers::TrustedImm32( jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), GPRInfo::regT1); done.link(&jit); } tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); reoptimizeNow.link(&jit); // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS jit.poke(GPRInfo::regT0); jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1); #else jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1); #endif jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); AssemblyHelpers::Jump doneAdjusting = jit.jump(); tooFewFails.link(&jit); // Adjust the execution counter such that the target is to only optimize after a while. int32_t activeThreshold = jit.baselineCodeBlock()->adjustedCounterValue( Options::thresholdForOptimizeAfterLongWarmUp()); int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt( activeThreshold, jit.baselineCodeBlock()); int32_t clippedValue; switch (jit.codeBlock()->jitType()) { case JITCode::DFGJIT: clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); break; case JITCode::FTLJIT: clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); break; default: RELEASE_ASSERT_NOT_REACHED(); #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) clippedValue = 0; // Make some compilers, and mhahnenberg, happy. #endif break; } jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); doneAdjusting.link(&jit); }