CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
    {
        CCallHelpers::JumpList exceptions;
        // We spill (1) the used registers by IC and (2) the used registers by Snippet.
        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet);

        jit.store32(
            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
            CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));

        jit.makeSpaceOnStackForCCall();

        jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);

        CCallHelpers::Call operationCall = jit.call(OperationPtrTag);
        auto function = m_function;
        jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
            linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function));
        });

        jit.setupResults(m_result);
        jit.reclaimSpaceOnStackForCCall();

        CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);

        state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
        exceptions.append(jit.jump());

        noException.link(&jit);
        RegisterSet dontRestore;
        dontRestore.set(m_result);
        state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);

        return exceptions;
    }
CCallHelpers::JumpList AccessCaseSnippetParams::emitSlowPathCalls(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit)
{
    CCallHelpers::JumpList exceptions;
    for (auto& generator : m_generators)
        exceptions.append(generator->generate(state, usedRegistersBySnippet, jit));
    return exceptions;
}
Exemplo n.º 3
0
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
{
    CCallHelpers::JumpList end;
    
    if (argCountRecovery.isConstant()) {
        // FIXME: We could constant-fold a lot of the computation below in this case.
        // https://bugs.webkit.org/show_bug.cgi?id=141486
        jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
    } else
        jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
    if (firstVarArgOffset) {
        CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
        jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
        CCallHelpers::Jump endVarArgs = jit.jump();
        sufficientArguments.link(&jit);
        jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
        endVarArgs.link(&jit);
    }
    slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
    
    emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);

    slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));

    // Initialize ArgumentCount.
    jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));

    // Copy arguments.
    jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
    CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
    // scratchGPR1: argumentCount

    CCallHelpers::Label copyLoop = jit.label();
    int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
    jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
    jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
#else // USE(JSVALUE64), so this begins the 32-bit case
    jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
    jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
    jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
    jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
#endif // USE(JSVALUE64), end of 32-bit case
    jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
    
    done.link(&jit);
}
Exemplo n.º 4
0
bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
{
    ASSERT(m_scratchGPR != m_src.payloadGPR());
    ASSERT(m_scratchGPR != m_result.payloadGPR());
    ASSERT(m_scratchGPR != InvalidGPRReg);
#if USE(JSVALUE32_64)
    ASSERT(m_scratchGPR != m_src.tagGPR());
    ASSERT(m_scratchGPR != m_result.tagGPR());
#endif

    jit.moveValueRegs(m_src, m_result);
    CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);

    // -0 should produce a double, and hence cannot be negated as an int.
    // The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
    slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));

    jit.neg32(m_result.payloadGPR());
#if USE(JSVALUE64)
    jit.boxInt32(m_result.payloadGPR(), m_result);
#endif
    endJumpList.append(jit.jump());

    srcNotInt.link(&jit);
    slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));

    // For a double, all we need to do is to invert the sign bit.
#if USE(JSVALUE64)
    jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
    jit.xor64(m_scratchGPR, m_result.payloadGPR());
#else
    jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
#endif
    // The flags of ArithNegate are basic in DFG.
    // We only need to know if we ever produced a number.
    if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
        arithProfile->emitSetDouble(jit);
    return true;
}
Exemplo n.º 5
0
static void dispatch(CCallHelpers& jit, FTL::State* state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, CCallHelpers::JumpList from, OperationType operation, ResultType result, Arguments arguments, std::index_sequence<ArgumentsIndex...>)
{
    CCallHelpers::Label done = jit.label();
    params.addLatePath([=] (CCallHelpers& jit) {
        AllowMacroScratchRegisterUsage allowScratch(jit);

        from.link(&jit);
        callOperation(
            *state, params.unavailableRegisters(), jit, node->origin.semantic,
            exceptions.get(), operation, extractResult(result), std::get<ArgumentsIndex>(arguments)...);
        jit.jump().linkTo(done, &jit);
    });
}
Exemplo n.º 6
0
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots)
{
    // We are passed three pieces of information:
    // - The callee.
    // - The arguments object, if it's not a forwarding call.
    // - The "this" value, if it's a constructor call.

    CallVarargsData* data = m_node->callVarargsData();
    
    GPRReg calleeGPR = GPRInfo::argumentGPR0;
    
    GPRReg argumentsGPR = InvalidGPRReg;
    GPRReg thisGPR = InvalidGPRReg;
    
    bool forwarding = false;
    
    switch (m_node->op()) {
    case CallVarargs:
    case TailCallVarargs:
    case TailCallVarargsInlinedCaller:
    case ConstructVarargs:
        argumentsGPR = GPRInfo::argumentGPR1;
        thisGPR = GPRInfo::argumentGPR2;
        break;
    case CallForwardVarargs:
    case TailCallForwardVarargs:
    case TailCallForwardVarargsInlinedCaller:
    case ConstructForwardVarargs:
        thisGPR = GPRInfo::argumentGPR1;
        forwarding = true;
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    const unsigned calleeSpillSlot = 0;
    const unsigned argumentsSpillSlot = 1;
    const unsigned thisSpillSlot = 2;
    const unsigned stackPointerSpillSlot = 3;
    
    // Get some scratch registers.
    RegisterSet usedRegisters;
    usedRegisters.merge(RegisterSet::stackRegisters());
    usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
    usedRegisters.merge(RegisterSet::calleeSaveRegisters());
    usedRegisters.set(calleeGPR);
    if (argumentsGPR != InvalidGPRReg)
        usedRegisters.set(argumentsGPR);
    ASSERT(thisGPR);
    usedRegisters.set(thisGPR);
    ScratchRegisterAllocator allocator(usedRegisters);
    GPRReg scratchGPR1 = allocator.allocateScratchGPR();
    GPRReg scratchGPR2 = allocator.allocateScratchGPR();
    GPRReg scratchGPR3 = allocator.allocateScratchGPR();

    RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
    
    auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) {
        if (isARM64()) {
            // Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction.
            jit.move(CCallHelpers::stackPointerRegister, targetGPR);
            jit.negPtr(targetGPR);
            jit.addPtr(GPRInfo::callFrameRegister, targetGPR);
        } else {
            jit.move(GPRInfo::callFrameRegister, targetGPR);
            jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR);
        }
        if (extra)
            jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR);
        jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR);
    };
    
    auto callWithExceptionCheck = [&] (void* callee) {
        jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
        jit.call(GPRInfo::nonPreservedNonArgumentGPR);
        m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
    };
    
    if (isARM64()) {
        jit.move(CCallHelpers::stackPointerRegister, scratchGPR1);
        jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
    } else
        jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));

    unsigned extraStack = sizeof(CallerFrameAndPC) +
        WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*));

    if (forwarding) {
        CCallHelpers::JumpList slowCase;
        computeUsedStack(scratchGPR2, 0);
        emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
        
        CCallHelpers::Jump done = jit.jump();
        slowCase.link(&jit);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsExecState();
        callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
        jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
        
        done.link(&jit);
        jit.move(calleeGPR, GPRInfo::regT0);
    } else {
        // Gotta spill the callee, arguments, and this because we will need them later and we will have some
        // calls that clobber them.
        jit.store64(calleeGPR, CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot));
        jit.store64(argumentsGPR, CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot));
        jit.store64(thisGPR, CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot));
    
        computeUsedStack(scratchGPR1, 0);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
        callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR1);
        computeUsedStack(scratchGPR2, extraStack);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot), argumentsGPR);
        emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
        jit.addPtr(CCallHelpers::TrustedImm32(-extraStack), scratchGPR2, CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
        callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR2);

        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot), thisGPR);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot), GPRInfo::regT0);
    }
    
    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister);

    jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
    
    // Henceforth we make the call. The base FTL call machinery expects the callee in regT0 and for the
    // stack frame to already be set up, which it is.
    jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee));

    m_callBase.emit(jit, state, osrExitFromGenericUnwindSpillSlots);

    
    // Undo the damage we've done.
    if (isARM64()) {
        GPRReg scratchGPRAtReturn = CCallHelpers::selectScratchGPR(GPRInfo::returnValueGPR);
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), scratchGPRAtReturn);
        jit.move(scratchGPRAtReturn, CCallHelpers::stackPointerRegister);
    } else
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), CCallHelpers::stackPointerRegister);
}
void GetterSetterAccessCase::emitDOMJITGetter(AccessGenerationState& state, const DOMJIT::GetterSetter* domJIT, GPRReg baseForGetGPR)
{
    CCallHelpers& jit = *state.jit;
    StructureStubInfo& stubInfo = *state.stubInfo;
    JSValueRegs valueRegs = state.valueRegs;
    GPRReg baseGPR = state.baseGPR;
    GPRReg scratchGPR = state.scratchGPR;

    // We construct the environment that can execute the DOMJIT::Snippet here.
    Ref<DOMJIT::CallDOMGetterSnippet> snippet = domJIT->compiler()();

    Vector<GPRReg> gpScratch;
    Vector<FPRReg> fpScratch;
    Vector<SnippetParams::Value> regs;

    ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
    allocator.lock(baseGPR);
#if USE(JSVALUE32_64)
    allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
#endif
    allocator.lock(valueRegs);
    allocator.lock(scratchGPR);

    GPRReg paramBaseGPR = InvalidGPRReg;
    GPRReg paramGlobalObjectGPR = InvalidGPRReg;
    JSValueRegs paramValueRegs = valueRegs;
    GPRReg remainingScratchGPR = InvalidGPRReg;

    // valueRegs and baseForGetGPR may be the same. For example, in Baseline JIT, we pass the same regT0 for baseGPR and valueRegs.
    // In FTL, there is no constraint that the baseForGetGPR interferes with the result. To make implementation simple in
    // Snippet, Snippet assumes that result registers always early interfere with input registers, in this case,
    // baseForGetGPR. So we move baseForGetGPR to the other register if baseForGetGPR == valueRegs.
    if (baseForGetGPR != valueRegs.payloadGPR()) {
        paramBaseGPR = baseForGetGPR;
        if (!snippet->requireGlobalObject)
            remainingScratchGPR = scratchGPR;
        else
            paramGlobalObjectGPR = scratchGPR;
    } else {
        jit.move(valueRegs.payloadGPR(), scratchGPR);
        paramBaseGPR = scratchGPR;
        if (snippet->requireGlobalObject)
            paramGlobalObjectGPR = allocator.allocateScratchGPR();
    }

    JSGlobalObject* globalObjectForDOMJIT = structure()->globalObject();

    regs.append(paramValueRegs);
    regs.append(paramBaseGPR);
    if (snippet->requireGlobalObject) {
        ASSERT(paramGlobalObjectGPR != InvalidGPRReg);
        regs.append(SnippetParams::Value(paramGlobalObjectGPR, globalObjectForDOMJIT));
    }

    if (snippet->numGPScratchRegisters) {
        unsigned i = 0;
        if (remainingScratchGPR != InvalidGPRReg) {
            gpScratch.append(remainingScratchGPR);
            ++i;
        }
        for (; i < snippet->numGPScratchRegisters; ++i)
            gpScratch.append(allocator.allocateScratchGPR());
    }

    for (unsigned i = 0; i < snippet->numFPScratchRegisters; ++i)
        fpScratch.append(allocator.allocateScratchFPR());

    // Let's store the reused registers to the stack. After that, we can use allocated scratch registers.
    ScratchRegisterAllocator::PreservedState preservedState =
    allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);

    if (verbose) {
        dataLog("baseGPR = ", baseGPR, "\n");
        dataLog("valueRegs = ", valueRegs, "\n");
        dataLog("scratchGPR = ", scratchGPR, "\n");
        dataLog("paramBaseGPR = ", paramBaseGPR, "\n");
        if (paramGlobalObjectGPR != InvalidGPRReg)
            dataLog("paramGlobalObjectGPR = ", paramGlobalObjectGPR, "\n");
        dataLog("paramValueRegs = ", paramValueRegs, "\n");
        for (unsigned i = 0; i < snippet->numGPScratchRegisters; ++i)
            dataLog("gpScratch[", i, "] = ", gpScratch[i], "\n");
    }

    if (snippet->requireGlobalObject)
        jit.move(CCallHelpers::TrustedImmPtr(globalObjectForDOMJIT), paramGlobalObjectGPR);

    // We just spill the registers used in Snippet here. For not spilled registers here explicitly,
    // they must be in the used register set passed by the callers (Baseline, DFG, and FTL) if they need to be kept.
    // Some registers can be locked, but not in the used register set. For example, the caller could make baseGPR
    // same to valueRegs, and not include it in the used registers since it will be changed.
    RegisterSet registersToSpillForCCall;
    for (auto& value : regs) {
        SnippetReg reg = value.reg();
        if (reg.isJSValueRegs())
            registersToSpillForCCall.set(reg.jsValueRegs());
        else if (reg.isGPR())
            registersToSpillForCCall.set(reg.gpr());
        else
            registersToSpillForCCall.set(reg.fpr());
    }
    for (GPRReg reg : gpScratch)
        registersToSpillForCCall.set(reg);
    for (FPRReg reg : fpScratch)
        registersToSpillForCCall.set(reg);
    registersToSpillForCCall.exclude(RegisterSet::registersToNotSaveForCCall());

    AccessCaseSnippetParams params(state.m_vm, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
    snippet->generator()->run(jit, params);
    allocator.restoreReusedRegistersByPopping(jit, preservedState);
    state.succeed();

    CCallHelpers::JumpList exceptions = params.emitSlowPathCalls(state, registersToSpillForCCall, jit);
    if (!exceptions.empty()) {
        exceptions.link(&jit);
        allocator.restoreReusedRegistersByPopping(jit, preservedState);
        state.emitExplicitExceptionHandler();
    }
}
Exemplo n.º 8
0
static MacroAssemblerCodeRef virtualForThunkGenerator(
    VM* vm, CodeSpecializationKind kind, RegisterPreservationMode registers)
{
    // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
    // The return address is on the stack, or in the link register. We will hence
    // jump to the callee, or save the return address to the call frame while we
    // make a C++ function call to the appropriate JIT operation.

    CCallHelpers jit(vm);
    
    CCallHelpers::JumpList slowCase;

    // FIXME: we should have a story for eliminating these checks. In many cases,
    // the DFG knows that the value is definitely a cell, or definitely a function.
    
#if USE(JSVALUE64)
    jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT2);
    
    slowCase.append(
        jit.branchTest64(
            CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT2));
#else
    slowCase.append(
        jit.branch32(
            CCallHelpers::NotEqual, GPRInfo::regT1,
            CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
    AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT2, GPRInfo::regT1);
    slowCase.append(
        jit.branchPtr(
            CCallHelpers::NotEqual,
            CCallHelpers::Address(GPRInfo::regT2, Structure::classInfoOffset()),
            CCallHelpers::TrustedImmPtr(JSFunction::info())));
    
    // Now we know we have a JSFunction.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
        GPRInfo::regT2);
    jit.loadPtr(
        CCallHelpers::Address(
            GPRInfo::regT2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind, registers)),
        GPRInfo::regT2);
    slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT2));
    
    // Now we know that we have a CodeBlock, and we're committed to making a fast
    // call.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
        GPRInfo::regT1);
#if USE(JSVALUE64)
    jit.emitPutToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
#else
    jit.emitPutPayloadToCallFrameHeaderBeforePrologue(GPRInfo::regT1, JSStack::ScopeChain);
    jit.emitPutTagToCallFrameHeaderBeforePrologue(CCallHelpers::TrustedImm32(JSValue::CellTag),
        JSStack::ScopeChain);
#endif
    
    // Make a tail call. This will return back to JIT code.
    emitPointerValidation(jit, GPRInfo::regT2);
    jit.jump(GPRInfo::regT2);

    slowCase.link(&jit);
    
    // Here we don't know anything, so revert to the full slow path.
    
    slowPathFor(jit, vm, operationVirtualFor(kind, registers));
    
    LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(
        patchBuffer,
        ("Virtual %s%s slow path thunk", kind == CodeForCall ? "call" : "construct", registers == MustPreserveRegisters ? " that preserves registers" : ""));
}
Exemplo n.º 9
0
static MacroAssemblerCodeRef virtualForThunkGenerator(
    JSGlobalData* globalData, CodeSpecializationKind kind)
{
    // The return address is on the stack, or in the link register. We will hence
    // jump to the callee, or save the return address to the call frame while we
    // make a C++ function call to the appropriate DFG operation.

    CCallHelpers jit(globalData);
    
    CCallHelpers::JumpList slowCase;

    // FIXME: we should have a story for eliminating these checks. In many cases,
    // the DFG knows that the value is definitely a cell, or definitely a function.
    
#if USE(JSVALUE64)
    slowCase.append(
        jit.branchTestPtr(
            CCallHelpers::NonZero, GPRInfo::nonArgGPR0, GPRInfo::tagMaskRegister));
#else
    slowCase.append(
        jit.branch32(
            CCallHelpers::NotEqual, GPRInfo::nonArgGPR1,
            CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
    jit.loadPtr(CCallHelpers::Address(GPRInfo::nonArgGPR0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
    slowCase.append(
        jit.branchPtr(
            CCallHelpers::NotEqual,
            CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
            CCallHelpers::TrustedImmPtr(&JSFunction::s_info)));
    
    // Now we know we have a JSFunction.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfExecutable()),
        GPRInfo::nonArgGPR2);
    slowCase.append(
        jit.branch32(
            CCallHelpers::LessThan,
            CCallHelpers::Address(
                GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
            CCallHelpers::TrustedImm32(0)));
    
    // Now we know that we have a CodeBlock, and we're committed to making a fast
    // call.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::nonArgGPR0, JSFunction::offsetOfScopeChain()),
        GPRInfo::nonArgGPR1);
#if USE(JSVALUE64)
    jit.storePtr(
        GPRInfo::nonArgGPR1,
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain));
#else
    jit.storePtr(
        GPRInfo::nonArgGPR1,
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
            OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
    jit.store32(
        CCallHelpers::TrustedImm32(JSValue::CellTag),
        CCallHelpers::Address(
            GPRInfo::callFrameRegister,
            static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain +
            OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
        GPRInfo::regT0);
    
    // Make a tail call. This will return back to DFG code.
    emitPointerValidation(jit, GPRInfo::regT0);
    jit.jump(GPRInfo::regT0);

    slowCase.link(&jit);
    
    // Here we don't know anything, so revert to the full slow path.
    
    slowPathFor(jit, globalData, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
    
    LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(
        patchBuffer,
        ("DFG virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
Exemplo n.º 10
0
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset =
        offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    
    int varargsSpillSlotsOffset;
    if (state.varargsSpillSlotsStackmapID != UINT_MAX)
        varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    else
        varargsSpillSlotsOffset = 0;
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentsRegister.isValid())
            inlineCallFrame->argumentsRegister += localsOffset;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }
    }
    
    if (codeBlock->usesArguments()) {
        codeBlock->setArgumentsRegister(
            VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset));
    }

    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationMustSucceed);
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        RELEASE_ASSERT(didSeeUnwindInfo);
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed);
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;) {
                ExitValue value = exit.m_values[j];
                if (!value.isInJSStackSomehow())
                    continue;
                if (!value.virtualRegister().isLocal())
                    continue;
                exit.m_values[j] = value.withVirtualRegister(
                    VirtualRegister(value.virtualRegister().offset() + localsOffset));
            }
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(); 
                stubInfo->codeOrigin = checkIn.codeOrigin();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_id);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed);
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        
        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall());
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size());
        
        call.link(vm, linkBuffer);
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, graph, varargsSpillSlotsOffset);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
        
        call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
    }
    
    RepatchBuffer repatchBuffer(codeBlock);

    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
Exemplo n.º 11
0
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }

        if (graph.hasDebuggerEnabled())
            codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
    }
    
    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;)
                exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset);
            for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
                materialization->accountForLocalsOffset(localsOffset);
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
                if (!exit.m_materializations.isEmpty()) {
                    dataLog("    Materializations: \n");
                    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
                        dataLog("        Materialize(", pointerDump(materialization), ")\n");
                }
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[getById.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[putById.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, codeOrigin, putById.callSiteIndex(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[checkIn.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); 
                stubInfo->codeOrigin = codeOrigin;
                stubInfo->callSiteIndex = checkIn.callSiteIndex();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_uid);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
        if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, state.jitCode->stackmaps.stackSizeForLocals());

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer);
        });
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, varargsSpillSlotsOffset);

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        });
    }

    adjustCallICsForStackmaps(state.jsTailCalls, recordMap);

    for (unsigned i = state.jsTailCalls.size(); i--;) {
        JSTailCall& call = state.jsTailCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(*state.jitCode.get(), fastPathJIT);

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = call.estimatedSize();

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer);
        });
    }
    
    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    MacroAssembler::replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
Exemplo n.º 12
0
void link(State& state)
{
    Graph& graph = state.graph;
    CodeBlock* codeBlock = graph.m_codeBlock;
    VM& vm = graph.m_vm;

    // LLVM will create its own jump tables as needed.
    codeBlock->clearSwitchJumpTables();

#if !FTL_USES_B3
    // What LLVM's stackmaps call stackSizeForLocals and what we call frameRegisterCount have a simple
    // relationship, though it's not obvious from reading the code. The easiest way to understand them
    // is to look at stackOffset, i.e. what you have to add to FP to get SP. For LLVM that is just:
    //
    //     stackOffset == -state.jitCode->stackmaps.stackSizeForLocals()
    //
    // The way we define frameRegisterCount is that it satisfies this equality:
    //
    //     stackOffset == virtualRegisterForLocal(frameRegisterCount - 1).offset() * sizeof(Register)
    //
    // We can simplify this when we apply virtualRegisterForLocal():
    //
    //     stackOffset == (-1 - (frameRegisterCount - 1)) * sizeof(Register)
    //     stackOffset == (-1 - frameRegisterCount + 1) * sizeof(Register)
    //     stackOffset == -frameRegisterCount * sizeof(Register)
    //
    // Therefore we just have:
    //
    //     frameRegisterCount == -stackOffset / sizeof(Register)
    //
    // If we substitute what we have above, we get:
    //
    //     frameRegisterCount == -(-state.jitCode->stackmaps.stackSizeForLocals()) / sizeof(Register)
    //     frameRegisterCount == state.jitCode->stackmaps.stackSizeForLocals() / sizeof(Register)
    state.jitCode->common.frameRegisterCount = state.jitCode->stackmaps.stackSizeForLocals() / sizeof(void*);
#endif

    state.jitCode->common.requiredRegisterCountForExit = graph.requiredRegisterCountForExit();

    if (!graph.m_plan.inlineCallFrames->isEmpty())
        state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames;

    graph.registerFrozenValues();

    // Create the entrypoint. Note that we use this entrypoint totally differently
    // depending on whether we're doing OSR entry or not.
    CCallHelpers jit(&vm, codeBlock);

    std::unique_ptr<LinkBuffer> linkBuffer;

    CCallHelpers::Address frame = CCallHelpers::Address(
                                      CCallHelpers::stackPointerRegister, -static_cast<int32_t>(AssemblyHelpers::prologueStackPointerDelta()));

    if (Profiler::Compilation* compilation = graph.compilation()) {
        compilation->addDescription(
            Profiler::OriginStack(),
            toCString("Generated FTL JIT code for ", CodeBlockWithJITType(codeBlock, JITCode::FTLJIT), ", instruction count = ", graph.m_codeBlock->instructionCount(), ":\n"));

        graph.ensureDominators();
        graph.ensureNaturalLoops();

        const char* prefix = "    ";

        DumpContext dumpContext;
        StringPrintStream out;
        Node* lastNode = 0;
        for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) {
            BasicBlock* block = graph.block(blockIndex);
            if (!block)
                continue;

            graph.dumpBlockHeader(out, prefix, block, Graph::DumpLivePhisOnly, &dumpContext);
            compilation->addDescription(Profiler::OriginStack(), out.toCString());
            out.reset();

            for (size_t nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node* node = block->at(nodeIndex);

                Profiler::OriginStack stack;

                if (node->origin.semantic.isSet()) {
                    stack = Profiler::OriginStack(
                                *vm.m_perBytecodeProfiler, codeBlock, node->origin.semantic);
                }

                if (graph.dumpCodeOrigin(out, prefix, lastNode, node, &dumpContext)) {
                    compilation->addDescription(stack, out.toCString());
                    out.reset();
                }

                graph.dump(out, prefix, node, &dumpContext);
                compilation->addDescription(stack, out.toCString());
                out.reset();

                if (node->origin.semantic.isSet())
                    lastNode = node;
            }
        }

        dumpContext.dump(out, prefix);
        compilation->addDescription(Profiler::OriginStack(), out.toCString());
        out.reset();

        out.print("    Disassembly:\n");
#if FTL_USES_B3
        out.print("        <not implemented yet>\n");
#else
        for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
            if (state.codeSectionNames[i] != SECTION_NAME("text"))
                continue;

            ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
            disassemble(
                MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
                "      ", out, LLVMSubset);
        }
#endif
        compilation->addDescription(Profiler::OriginStack(), out.toCString());
        out.reset();

        state.jitCode->common.compilation = compilation;
    }

    switch (graph.m_plan.mode) {
    case FTLMode: {
        CCallHelpers::JumpList mainPathJumps;

        jit.load32(
            frame.withOffset(sizeof(Register) * JSStack::ArgumentCount),
            GPRInfo::regT1);
        mainPathJumps.append(jit.branch32(
                                 CCallHelpers::AboveOrEqual, GPRInfo::regT1,
                                 CCallHelpers::TrustedImm32(codeBlock->numParameters())));
        jit.emitFunctionPrologue();
        jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
        jit.store32(
            CCallHelpers::TrustedImm32(CallSiteIndex(0).bits()),
            CCallHelpers::tagFor(JSStack::ArgumentCount));
        jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
        CCallHelpers::Call callArityCheck = jit.call();
#if !ASSERT_DISABLED
        // FIXME: need to make this call register with exception handling somehow. This is
        // part of a bigger problem: FTL should be able to handle exceptions.
        // https://bugs.webkit.org/show_bug.cgi?id=113622
        // Until then, use a JIT ASSERT.
        jit.load64(vm.addressOfException(), GPRInfo::regT1);
        jit.jitAssertIsNull(GPRInfo::regT1);
#endif
        jit.move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
        jit.emitFunctionEpilogue();
        mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::argumentGPR0));
        jit.emitFunctionPrologue();
        CCallHelpers::Call callArityFixup = jit.call();
        jit.emitFunctionEpilogue();
        mainPathJumps.append(jit.jump());

        linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        linkBuffer->link(callArityCheck, codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
        linkBuffer->link(callArityFixup, FunctionPtr((vm.getCTIStub(arityFixupGenerator)).code().executableAddress()));
        linkBuffer->link(mainPathJumps, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));

        state.jitCode->initializeAddressForCall(MacroAssemblerCodePtr(bitwise_cast<void*>(state.generatedFunction)));
        break;
    }

    case FTLForOSREntryMode: {
        // We jump to here straight from DFG code, after having boxed up all of the
        // values into the scratch buffer. Everything should be good to go - at this
        // point we've even done the stack check. Basically we just have to make the
        // call to the LLVM-generated code.
        CCallHelpers::Label start = jit.label();
        jit.emitFunctionEpilogue();
        CCallHelpers::Jump mainPathJump = jit.jump();

        linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        linkBuffer->link(mainPathJump, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));

        state.jitCode->initializeAddressForCall(linkBuffer->locationOf(start));
        break;
    }

    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }

    state.finalizer->entrypointLinkBuffer = WTFMove(linkBuffer);
    state.finalizer->function = state.generatedFunction;
    state.finalizer->jitCode = state.jitCode;
}
Exemplo n.º 13
0
// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
// path virtual call so that we can enable fast tail calls for megamorphic
// virtual calls by using the shuffler.
// https://bugs.webkit.org/show_bug.cgi?id=148831
MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
{
    // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
    // The return address is on the stack, or in the link register. We will hence
    // jump to the callee, or save the return address to the call frame while we
    // make a C++ function call to the appropriate JIT operation.

    CCallHelpers jit(vm);
    
    CCallHelpers::JumpList slowCase;
    
    // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
    // slow path execution for the profiler.
    jit.add32(
        CCallHelpers::TrustedImm32(1),
        CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));

    // FIXME: we should have a story for eliminating these checks. In many cases,
    // the DFG knows that the value is definitely a cell, or definitely a function.
    
#if USE(JSVALUE64)
    jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
    
    slowCase.append(
        jit.branchTest64(
            CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
#else
    slowCase.append(
        jit.branch32(
            CCallHelpers::NotEqual, GPRInfo::regT1,
            CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
    AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
    slowCase.append(
        jit.branchPtr(
            CCallHelpers::NotEqual,
            CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
            CCallHelpers::TrustedImmPtr(JSFunction::info())));
    
    // Now we know we have a JSFunction.
    
    jit.loadPtr(
        CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
        GPRInfo::regT4);
    jit.loadPtr(
        CCallHelpers::Address(
            GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
                callLinkInfo.specializationKind())),
        GPRInfo::regT4);
    slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
    
    // Now we know that we have a CodeBlock, and we're committed to making a fast
    // call.
    
    // Make a tail call. This will return back to JIT code.
    emitPointerValidation(jit, GPRInfo::regT4);
    if (callLinkInfo.isTailCall()) {
        jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
        jit.prepareForTailCallSlow(GPRInfo::regT4);
    }
    jit.jump(GPRInfo::regT4);

    slowCase.link(&jit);
    
    // Here we don't know anything, so revert to the full slow path.
    
    slowPathFor(jit, vm, operationVirtualCall);
    
    LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(
        patchBuffer,
        ("Virtual %s slow path thunk",
        callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
}
Exemplo n.º 14
0
void link(State& state)
{
    Graph& graph = state.graph;
    CodeBlock* codeBlock = graph.m_codeBlock;
    VM& vm = graph.m_vm;
    
    // LLVM will create its own jump tables as needed.
    codeBlock->clearSwitchJumpTables();
    
    // FIXME: Need to know the real frame register count.
    // https://bugs.webkit.org/show_bug.cgi?id=125727
    state.jitCode->common.frameRegisterCount = 1000;
    
    state.jitCode->common.requiredRegisterCountForExit = graph.requiredRegisterCountForExit();
    
    if (!graph.m_plan.inlineCallFrames->isEmpty())
        state.jitCode->common.inlineCallFrames = graph.m_plan.inlineCallFrames;
    
    graph.registerFrozenValues();

    // Create the entrypoint. Note that we use this entrypoint totally differently
    // depending on whether we're doing OSR entry or not.
    CCallHelpers jit(&vm, codeBlock);
    
    std::unique_ptr<LinkBuffer> linkBuffer;

    CCallHelpers::Address frame = CCallHelpers::Address(
        CCallHelpers::stackPointerRegister, -static_cast<int32_t>(AssemblyHelpers::prologueStackPointerDelta()));
    
    if (Profiler::Compilation* compilation = graph.compilation()) {
        compilation->addDescription(
            Profiler::OriginStack(),
            toCString("Generated FTL JIT code for ", CodeBlockWithJITType(codeBlock, JITCode::FTLJIT), ", instruction count = ", graph.m_codeBlock->instructionCount(), ":\n"));
        
        graph.m_dominators.computeIfNecessary(graph);
        graph.m_naturalLoops.computeIfNecessary(graph);
        
        const char* prefix = "    ";
        
        DumpContext dumpContext;
        StringPrintStream out;
        Node* lastNode = 0;
        for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); ++blockIndex) {
            BasicBlock* block = graph.block(blockIndex);
            if (!block)
                continue;
            
            graph.dumpBlockHeader(out, prefix, block, Graph::DumpLivePhisOnly, &dumpContext);
            compilation->addDescription(Profiler::OriginStack(), out.toCString());
            out.reset();
            
            for (size_t nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                Node* node = block->at(nodeIndex);
                if (!node->willHaveCodeGenOrOSR() && !Options::showAllDFGNodes())
                    continue;
                
                Profiler::OriginStack stack;
                
                if (node->origin.semantic.isSet()) {
                    stack = Profiler::OriginStack(
                        *vm.m_perBytecodeProfiler, codeBlock, node->origin.semantic);
                }
                
                if (graph.dumpCodeOrigin(out, prefix, lastNode, node, &dumpContext)) {
                    compilation->addDescription(stack, out.toCString());
                    out.reset();
                }
                
                graph.dump(out, prefix, node, &dumpContext);
                compilation->addDescription(stack, out.toCString());
                out.reset();
                
                if (node->origin.semantic.isSet())
                    lastNode = node;
            }
        }
        
        dumpContext.dump(out, prefix);
        compilation->addDescription(Profiler::OriginStack(), out.toCString());
        out.reset();
        
        out.print("    Disassembly:\n");
        for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
            if (state.codeSectionNames[i] != SECTION_NAME("text"))
                continue;
            
                ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
                disassemble(
                    MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
                    "      ", out, LLVMSubset);
        }
        compilation->addDescription(Profiler::OriginStack(), out.toCString());
        out.reset();
        
        state.jitCode->common.compilation = compilation;
    }
    
    switch (graph.m_plan.mode) {
    case FTLMode: {
        CCallHelpers::JumpList mainPathJumps;
    
        jit.load32(
            frame.withOffset(sizeof(Register) * JSStack::ArgumentCount),
            GPRInfo::regT1);
        mainPathJumps.append(jit.branch32(
            CCallHelpers::AboveOrEqual, GPRInfo::regT1,
            CCallHelpers::TrustedImm32(codeBlock->numParameters())));
        jit.emitFunctionPrologue();
        jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
        jit.store32(
            CCallHelpers::TrustedImm32(CallFrame::Location::encodeAsBytecodeOffset(0)),
            CCallHelpers::tagFor(JSStack::ArgumentCount));
        jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
        CCallHelpers::Call callArityCheck = jit.call();
#if !ASSERT_DISABLED
        // FIXME: need to make this call register with exception handling somehow. This is
        // part of a bigger problem: FTL should be able to handle exceptions.
        // https://bugs.webkit.org/show_bug.cgi?id=113622
        // Until then, use a JIT ASSERT.
        jit.load64(vm.addressOfException(), GPRInfo::regT1);
        jit.jitAssertIsNull(GPRInfo::regT1);
#endif
        jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0);
        jit.emitFunctionEpilogue();
        mainPathJumps.append(jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT0));
        jit.emitFunctionPrologue();
        jit.move(CCallHelpers::TrustedImmPtr(vm.arityCheckFailReturnThunks->returnPCsFor(vm, codeBlock->numParameters())), GPRInfo::regT7);
        jit.loadPtr(CCallHelpers::BaseIndex(GPRInfo::regT7, GPRInfo::regT0, CCallHelpers::timesPtr()), GPRInfo::regT7);
        CCallHelpers::Call callArityFixup = jit.call();
        jit.emitFunctionEpilogue();
        mainPathJumps.append(jit.jump());

        linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationMustSucceed);
        linkBuffer->link(callArityCheck, codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
        linkBuffer->link(callArityFixup, FunctionPtr((vm.getCTIStub(arityFixupGenerator)).code().executableAddress()));
        linkBuffer->link(mainPathJumps, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));

        state.jitCode->initializeAddressForCall(MacroAssemblerCodePtr(bitwise_cast<void*>(state.generatedFunction)));
        break;
    }
        
    case FTLForOSREntryMode: {
        // We jump to here straight from DFG code, after having boxed up all of the
        // values into the scratch buffer. Everything should be good to go - at this
        // point we've even done the stack check. Basically we just have to make the
        // call to the LLVM-generated code.
        CCallHelpers::Label start = jit.label();
        jit.emitFunctionEpilogue();
        CCallHelpers::Jump mainPathJump = jit.jump();
        
        linkBuffer = std::make_unique<LinkBuffer>(vm, jit, codeBlock, JITCompilationMustSucceed);
        linkBuffer->link(mainPathJump, CodeLocationLabel(bitwise_cast<void*>(state.generatedFunction)));

        state.jitCode->initializeAddressForCall(linkBuffer->locationOf(start));
        break;
    }
        
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    state.finalizer->entrypointLinkBuffer = WTF::move(linkBuffer);
    state.finalizer->function = state.generatedFunction;
    state.finalizer->jitCode = state.jitCode;
}