Esempio n. 1
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
#if OS(WINDOWS) && CPU(X86_64)
    // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
    // Other argument values are shift by 1. Use space on the stack for our two return values.
    // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
    // and space for the 16 byte return area.
    jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
    jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
    jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
    jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#else
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsWithExecState(GPRInfo::regT2);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#endif

    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    // The second return value GPR will hold a non-zero value for tail calls.

    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();

    RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
    CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);

    jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
    jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);

    doNotTrash.link(&jit);
    jit.jump(GPRInfo::returnValueGPR);
}
    CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
    {
        CCallHelpers::JumpList exceptions;
        // We spill (1) the used registers by IC and (2) the used registers by Snippet.
        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet);

        jit.store32(
            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
            CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));

        jit.makeSpaceOnStackForCCall();

        jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);

        CCallHelpers::Call operationCall = jit.call(OperationPtrTag);
        auto function = m_function;
        jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
            linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function));
        });

        jit.setupResults(m_result);
        jit.reclaimSpaceOnStackForCCall();

        CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);

        state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
        exceptions.append(jit.jump());

        noException.link(&jit);
        RegisterSet dontRestore;
        dontRestore.set(m_result);
        state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);

        return exceptions;
    }
Esempio n. 3
0
CCallHelpers::Jump CCallSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext&)
{
    switch (inst.args[calleeArgOffset].kind()) {
    case Arg::Imm:
    case Arg::Imm64:
        jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
        jit.call(scratchRegister);
        break;
    case Arg::Tmp:
        jit.call(inst.args[calleeArgOffset].gpr());
        break;
    case Arg::Addr:
        jit.call(inst.args[calleeArgOffset].asAddress());
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    return CCallHelpers::Jump();
}
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
{
    jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
    
    jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
    
    AssemblyHelpers::Jump tooFewFails;
    
    jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
    jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
    jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
    
    jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
    AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
        AssemblyHelpers::GreaterThanOrEqual,
        AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
        AssemblyHelpers::TrustedImm32(0));
        
    tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()));
    
    reoptimizeNow.link(&jit);
    
    // Reoptimize as soon as possible.
#if !NUMBER_OF_ARGUMENT_REGISTERS
    jit.poke(GPRInfo::regT0);
#else
    jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
    ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1);
#endif
    jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1);
    jit.call(GPRInfo::regT1);
    AssemblyHelpers::Jump doneAdjusting = jit.jump();
    
    tooFewFails.link(&jit);
    
    // Adjust the execution counter such that the target is to only optimize after a while.
    int32_t activeThreshold =
        jit.baselineCodeBlock()->adjustedCounterValue(
            Options::thresholdForOptimizeAfterLongWarmUp());
    int32_t targetValue = ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
        activeThreshold, jit.baselineCodeBlock());
    int32_t clippedValue =
        ExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
    jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
    jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
    jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
    
    doneAdjusting.link(&jit);
}
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
{
    AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner);

    // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
#if CPU(X86)
    jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    jit.setupArgumentsWithExecState(owner);
    jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
    jit.call(scratch);

#if CPU(X86)
    jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    ownerIsRememberedOrInEden.link(&jit);
}
Esempio n. 6
0
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
{
    AssemblyHelpers::Jump definitelyNotMarked = jit.genericWriteBarrier(owner, scratch1, scratch2);

    // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
#if CPU(X86)
    jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    jit.setupArgumentsWithExecState(owner);
    jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch1);
    jit.call(scratch1);

#if CPU(X86)
    jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    definitelyNotMarked.link(&jit);
}
Esempio n. 7
0
static void slowPathFor(
    CCallHelpers& jit, JSGlobalData* globalData, P_DFGOperation_E slowPathFunction)
{
    jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
    emitPointerValidation(jit, GPRInfo::nonArgGPR2);
    jit.storePtr(
        GPRInfo::nonArgGPR2,
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
    jit.storePtr(GPRInfo::callFrameRegister, &globalData->topCallFrame);
#if USE(JSVALUE64)
    jit.poke64(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
#else
    jit.poke(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX);
#endif
    jit.setupArgumentsExecState();
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    
    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    jit.loadPtr(
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC),
        GPRInfo::nonPreservedNonReturnGPR);
    jit.storePtr(
        CCallHelpers::TrustedImmPtr(0),
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC));
    emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
    jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.jump(GPRInfo::returnValueGPR);
}
Esempio n. 8
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsExecState();
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    
    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();
    jit.jump(GPRInfo::returnValueGPR);
}
static void slowPathFor(
    CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
{
    jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
    emitPointerValidation(jit, GPRInfo::nonArgGPR2);
    jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2);
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
    jit.setupArgumentsExecState();
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    
    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR);
    jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0));
    emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
    jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.jump(GPRInfo::returnValueGPR);
}
Esempio n. 10
0
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots)
{
    // We are passed three pieces of information:
    // - The callee.
    // - The arguments object, if it's not a forwarding call.
    // - The "this" value, if it's a constructor call.

    CallVarargsData* data = m_node->callVarargsData();
    
    GPRReg calleeGPR = GPRInfo::argumentGPR0;
    
    GPRReg argumentsGPR = InvalidGPRReg;
    GPRReg thisGPR = InvalidGPRReg;
    
    bool forwarding = false;
    
    switch (m_node->op()) {
    case CallVarargs:
    case TailCallVarargs:
    case TailCallVarargsInlinedCaller:
    case ConstructVarargs:
        argumentsGPR = GPRInfo::argumentGPR1;
        thisGPR = GPRInfo::argumentGPR2;
        break;
    case CallForwardVarargs:
    case TailCallForwardVarargs:
    case TailCallForwardVarargsInlinedCaller:
    case ConstructForwardVarargs:
        thisGPR = GPRInfo::argumentGPR1;
        forwarding = true;
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    const unsigned calleeSpillSlot = 0;
    const unsigned argumentsSpillSlot = 1;
    const unsigned thisSpillSlot = 2;
    const unsigned stackPointerSpillSlot = 3;
    
    // Get some scratch registers.
    RegisterSet usedRegisters;
    usedRegisters.merge(RegisterSet::stackRegisters());
    usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
    usedRegisters.merge(RegisterSet::calleeSaveRegisters());
    usedRegisters.set(calleeGPR);
    if (argumentsGPR != InvalidGPRReg)
        usedRegisters.set(argumentsGPR);
    ASSERT(thisGPR);
    usedRegisters.set(thisGPR);
    ScratchRegisterAllocator allocator(usedRegisters);
    GPRReg scratchGPR1 = allocator.allocateScratchGPR();
    GPRReg scratchGPR2 = allocator.allocateScratchGPR();
    GPRReg scratchGPR3 = allocator.allocateScratchGPR();

    RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
    
    auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) {
        if (isARM64()) {
            // Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction.
            jit.move(CCallHelpers::stackPointerRegister, targetGPR);
            jit.negPtr(targetGPR);
            jit.addPtr(GPRInfo::callFrameRegister, targetGPR);
        } else {
            jit.move(GPRInfo::callFrameRegister, targetGPR);
            jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR);
        }
        if (extra)
            jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR);
        jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR);
    };
    
    auto callWithExceptionCheck = [&] (void* callee) {
        jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
        jit.call(GPRInfo::nonPreservedNonArgumentGPR);
        m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
    };
    
    if (isARM64()) {
        jit.move(CCallHelpers::stackPointerRegister, scratchGPR1);
        jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
    } else
        jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));

    unsigned extraStack = sizeof(CallerFrameAndPC) +
        WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*));

    if (forwarding) {
        CCallHelpers::JumpList slowCase;
        computeUsedStack(scratchGPR2, 0);
        emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
        
        CCallHelpers::Jump done = jit.jump();
        slowCase.link(&jit);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsExecState();
        callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
        jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
        
        done.link(&jit);
        jit.move(calleeGPR, GPRInfo::regT0);
    } else {
        // Gotta spill the callee, arguments, and this because we will need them later and we will have some
        // calls that clobber them.
        jit.store64(calleeGPR, CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot));
        jit.store64(argumentsGPR, CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot));
        jit.store64(thisGPR, CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot));
    
        computeUsedStack(scratchGPR1, 0);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
        callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR1);
        computeUsedStack(scratchGPR2, extraStack);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot), argumentsGPR);
        emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
        jit.addPtr(CCallHelpers::TrustedImm32(-extraStack), scratchGPR2, CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
        callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR2);

        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot), thisGPR);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot), GPRInfo::regT0);
    }
    
    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister);

    jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
    
    // Henceforth we make the call. The base FTL call machinery expects the callee in regT0 and for the
    // stack frame to already be set up, which it is.
    jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee));

    m_callBase.emit(jit, state, osrExitFromGenericUnwindSpillSlots);

    
    // Undo the damage we've done.
    if (isARM64()) {
        GPRReg scratchGPRAtReturn = CCallHelpers::selectScratchGPR(GPRInfo::returnValueGPR);
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), scratchGPRAtReturn);
        jit.move(scratchGPRAtReturn, CCallHelpers::stackPointerRegister);
    } else
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), CCallHelpers::stackPointerRegister);
}
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
{
    jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));

    jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);

    AssemblyHelpers::Jump tooFewFails;

    jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
    jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
    jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));

    jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
    AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
        AssemblyHelpers::GreaterThanOrEqual,
        AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
        AssemblyHelpers::TrustedImm32(0));

    // We want to figure out if there's a possibility that we're in a loop. For the outermost
    // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
    // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
    // problem is the inlined functions, which might also have loops, but whose baseline versions
    // don't know where to look for the exit count. Figure out if those loops are severe enough
    // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
    // Otherwise, we should use the normal reoptimization trigger.

    AssemblyHelpers::JumpList loopThreshold;

    for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
        loopThreshold.append(
            jit.branchTest8(
                AssemblyHelpers::NonZero,
                AssemblyHelpers::AbsoluteAddress(
                    inlineCallFrame->executable->addressOfDidTryToEnterInLoop())));
    }

    jit.move(
        AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
        GPRInfo::regT1);

    if (!loopThreshold.empty()) {
        AssemblyHelpers::Jump done = jit.jump();

        loopThreshold.link(&jit);
        jit.move(
            AssemblyHelpers::TrustedImm32(
                jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
            GPRInfo::regT1);

        done.link(&jit);
    }

    tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);

    reoptimizeNow.link(&jit);

    // Reoptimize as soon as possible.
#if !NUMBER_OF_ARGUMENT_REGISTERS
    jit.poke(GPRInfo::regT0);
    jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
#else
    jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
    jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
#endif
    jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    AssemblyHelpers::Jump doneAdjusting = jit.jump();

    tooFewFails.link(&jit);

    // Adjust the execution counter such that the target is to only optimize after a while.
    int32_t activeThreshold =
        jit.baselineCodeBlock()->adjustedCounterValue(
            Options::thresholdForOptimizeAfterLongWarmUp());
    int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
        activeThreshold, jit.baselineCodeBlock());
    int32_t clippedValue;
    switch (jit.codeBlock()->jitType()) {
    case JITCode::DFGJIT:
        clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
        break;
    case JITCode::FTLJIT:
        clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
        clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
#endif
        break;
    }
    jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
    jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
    jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));

    doneAdjusting.link(&jit);
}