Esempio n. 1
0
void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR)
{
    jit.move(numUsedSlotsGPR, resultGPR);
    // We really want to make sure the size of the new call frame is a multiple of
    // stackAlignmentRegisters(), however it is easier to accomplish this by
    // rounding numUsedSlotsGPR to the next multiple of stackAlignmentRegisters().
    // Together with the rounding below, we will assure that the new call frame is
    // located on a stackAlignmentRegisters() boundary and a multiple of
    // stackAlignmentRegisters() in size.
    jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
    jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);

    jit.addPtr(lengthGPR, resultGPR);
    jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR);
    
    // resultGPR now has the required frame size in Register units
    // Round resultGPR to next multiple of stackAlignmentRegisters()
    jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
    jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
    
    // Now resultGPR has the right stack frame offset in Register units.
    jit.negPtr(resultGPR);
    jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR);
    jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
}
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
#if ENABLE(GGC)
    jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0);
    osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
    InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
    if (inlineCallFrames) {
        for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
            ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get();
            jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0);
            osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
        }
    }
#endif

    if (exit.m_codeOrigin.inlineCallFrame)
        jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);

    CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
    Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);

    BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);

    ASSERT(mapping);
    ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);

    void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);

    jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);

    jit.jitAssertTagsInPlace();

    jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
    jit.jump(GPRInfo::regT2);
}
Esempio n. 3
0
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
#if ENABLE(GGC) 
    // 11) Write barrier the owner executable because we're jumping into a different block.
    for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
        jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0); 
        osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2);
        if (!codeOrigin.inlineCallFrame)
            break;
    }
#endif

    if (exit.m_codeOrigin.inlineCallFrame)
        jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);

    CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
    Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
    
    BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
    
    ASSERT(mapping);
    ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
    
    void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);

    jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
    
    jit.jitAssertTagsInPlace();

    jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
    jit.jump(GPRInfo::regT2);
}
Esempio n. 4
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
#if OS(WINDOWS) && CPU(X86_64)
    // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
    // Other argument values are shift by 1. Use space on the stack for our two return values.
    // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
    // and space for the 16 byte return area.
    jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
    jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
    jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
    jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#else
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsWithExecState(GPRInfo::regT2);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#endif

    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    // The second return value GPR will hold a non-zero value for tail calls.

    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();

    RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
    CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);

    jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
    jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);

    doNotTrash.link(&jit);
    jit.jump(GPRInfo::returnValueGPR);
}
Esempio n. 5
0
void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR)
{
    jit.move(numUsedSlotsGPR, resultGPR);
    jit.addPtr(lengthGPR, resultGPR);
    jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR);
    
    // resultGPR now has the required frame size in Register units
    // Round resultGPR to next multiple of stackAlignmentRegisters()
    jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
    jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
    
    // Now resultGPR has the right stack frame offset in Register units.
    jit.negPtr(resultGPR);
    jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR);
    jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
}
Esempio n. 6
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsExecState();
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    
    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();
    jit.jump(GPRInfo::returnValueGPR);
}
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
{
    AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner);

    // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
#if CPU(X86)
    jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    jit.setupArgumentsWithExecState(owner);
    jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
    jit.call(scratch);

#if CPU(X86)
    jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    ownerIsRememberedOrInEden.link(&jit);
}
Esempio n. 8
0
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
{
    AssemblyHelpers::Jump definitelyNotMarked = jit.genericWriteBarrier(owner, scratch1, scratch2);

    // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
#if CPU(X86)
    jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    jit.setupArgumentsWithExecState(owner);
    jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch1);
    jit.call(scratch1);

#if CPU(X86)
    jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif

    definitelyNotMarked.link(&jit);
}
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
    if (exit.m_codeOrigin.inlineCallFrame)
        jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);

    CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
    Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
    
    BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
    
    ASSERT(mapping);
    ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
    
    void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
    
    jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
    jit.jump(GPRInfo::regT2);

#if DFG_ENABLE(DEBUG_VERBOSE)
    dataLogF("   -> %p\n", jumpTarget);
#endif
}
Esempio n. 10
0
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots)
{
    // We are passed three pieces of information:
    // - The callee.
    // - The arguments object, if it's not a forwarding call.
    // - The "this" value, if it's a constructor call.

    CallVarargsData* data = m_node->callVarargsData();
    
    GPRReg calleeGPR = GPRInfo::argumentGPR0;
    
    GPRReg argumentsGPR = InvalidGPRReg;
    GPRReg thisGPR = InvalidGPRReg;
    
    bool forwarding = false;
    
    switch (m_node->op()) {
    case CallVarargs:
    case TailCallVarargs:
    case TailCallVarargsInlinedCaller:
    case ConstructVarargs:
        argumentsGPR = GPRInfo::argumentGPR1;
        thisGPR = GPRInfo::argumentGPR2;
        break;
    case CallForwardVarargs:
    case TailCallForwardVarargs:
    case TailCallForwardVarargsInlinedCaller:
    case ConstructForwardVarargs:
        thisGPR = GPRInfo::argumentGPR1;
        forwarding = true;
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    const unsigned calleeSpillSlot = 0;
    const unsigned argumentsSpillSlot = 1;
    const unsigned thisSpillSlot = 2;
    const unsigned stackPointerSpillSlot = 3;
    
    // Get some scratch registers.
    RegisterSet usedRegisters;
    usedRegisters.merge(RegisterSet::stackRegisters());
    usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
    usedRegisters.merge(RegisterSet::calleeSaveRegisters());
    usedRegisters.set(calleeGPR);
    if (argumentsGPR != InvalidGPRReg)
        usedRegisters.set(argumentsGPR);
    ASSERT(thisGPR);
    usedRegisters.set(thisGPR);
    ScratchRegisterAllocator allocator(usedRegisters);
    GPRReg scratchGPR1 = allocator.allocateScratchGPR();
    GPRReg scratchGPR2 = allocator.allocateScratchGPR();
    GPRReg scratchGPR3 = allocator.allocateScratchGPR();

    RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
    
    auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) {
        if (isARM64()) {
            // Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction.
            jit.move(CCallHelpers::stackPointerRegister, targetGPR);
            jit.negPtr(targetGPR);
            jit.addPtr(GPRInfo::callFrameRegister, targetGPR);
        } else {
            jit.move(GPRInfo::callFrameRegister, targetGPR);
            jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR);
        }
        if (extra)
            jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR);
        jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR);
    };
    
    auto callWithExceptionCheck = [&] (void* callee) {
        jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
        jit.call(GPRInfo::nonPreservedNonArgumentGPR);
        m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
    };
    
    if (isARM64()) {
        jit.move(CCallHelpers::stackPointerRegister, scratchGPR1);
        jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
    } else
        jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));

    unsigned extraStack = sizeof(CallerFrameAndPC) +
        WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*));

    if (forwarding) {
        CCallHelpers::JumpList slowCase;
        computeUsedStack(scratchGPR2, 0);
        emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
        
        CCallHelpers::Jump done = jit.jump();
        slowCase.link(&jit);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsExecState();
        callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
        jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
        
        done.link(&jit);
        jit.move(calleeGPR, GPRInfo::regT0);
    } else {
        // Gotta spill the callee, arguments, and this because we will need them later and we will have some
        // calls that clobber them.
        jit.store64(calleeGPR, CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot));
        jit.store64(argumentsGPR, CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot));
        jit.store64(thisGPR, CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot));
    
        computeUsedStack(scratchGPR1, 0);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
        callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR1);
        computeUsedStack(scratchGPR2, extraStack);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot), argumentsGPR);
        emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
        jit.addPtr(CCallHelpers::TrustedImm32(-extraStack), scratchGPR2, CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
        callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR2);

        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot), thisGPR);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot), GPRInfo::regT0);
    }
    
    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister);

    jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
    
    // Henceforth we make the call. The base FTL call machinery expects the callee in regT0 and for the
    // stack frame to already be set up, which it is.
    jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee));

    m_callBase.emit(jit, state, osrExitFromGenericUnwindSpillSlots);

    
    // Undo the damage we've done.
    if (isARM64()) {
        GPRReg scratchGPRAtReturn = CCallHelpers::selectScratchGPR(GPRInfo::returnValueGPR);
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), scratchGPRAtReturn);
        jit.move(scratchGPRAtReturn, CCallHelpers::stackPointerRegister);
    } else
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), CCallHelpers::stackPointerRegister);
}
Esempio n. 11
0
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");
    
    // We don't expect the incoming code to have predecessors computed.
    code.resetReachability();
    
    if (shouldValidateIR())
        validate(code);

    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
    if (shouldDumpIR() && !shouldDumpIRAtEachPhase()) {
        dataLog("Initial air:\n");
        dataLog(code);
    }

    // This is where we run our optimizations and transformations.
    // FIXME: Add Air optimizations.
    // https://bugs.webkit.org/show_bug.cgi?id=150456
    
    eliminateDeadCode(code);

    // This is where we would have a real register allocator. Then, we could use spillEverything()
    // in place of the register allocator only for testing.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150457
    spillEverything(code);

    // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
    // does things like identify which callee-saves we're using and saves them.
    handleCalleeSaves(code);

    // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
    // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
    // shouldn't have to worry about this very much.
    allocateStack(code);

    // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
    // phase.
    simplifyCFG(code);

    // FIXME: We should really have a code layout optimization here.
    // https://bugs.webkit.org/show_bug.cgi?id=150478

    reportUsedRegisters(code);

    if (shouldValidateIR())
        validate(code);

    // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
    // since the final generation is not a phase.
    if (shouldDumpIR()) {
        dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
        dataLog(code);
    }

    TimingScope codeGenTimingScope("Air::generate backend");

    // And now, we generate code.
    jit.emitFunctionPrologue();
    jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            jit.emitFunctionEpilogue();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        for (Inst& inst : *block)
            jump = inst.generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");

    DisallowMacroScratchRegisterUsage disallowScratch(jit);

    // And now, we generate code.
    jit.emitFunctionPrologue();
    if (code.frameSize())
        jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        blockLabels[block] = jit.label();
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            if (code.frameSize())
                jit.emitFunctionEpilogue();
            else
                jit.emitFunctionEpilogueWithEmptyFrame();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
    ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
    jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));

    CodeOrigin codeOrigin;
    for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
        CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
        void* jumpTarget = nullptr;
        void* trueReturnPC = nullptr;

        unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;

        switch (inlineCallFrame->kind) {
        case InlineCallFrame::Call:
        case InlineCallFrame::Construct:
        case InlineCallFrame::CallVarargs:
        case InlineCallFrame::ConstructVarargs: {
            CallLinkInfo* callLinkInfo =
                baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
            RELEASE_ASSERT(callLinkInfo);

            jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
            break;
        }

        case InlineCallFrame::GetterCall:
        case InlineCallFrame::SetterCall: {
            StructureStubInfo* stubInfo =
                baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
            RELEASE_ASSERT(stubInfo);

            switch (inlineCallFrame->kind) {
            case InlineCallFrame::GetterCall:
                jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
                break;
            case InlineCallFrame::SetterCall:
                jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
                break;
            default:
                RELEASE_ASSERT_NOT_REACHED();
                break;
            }

            trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
                stubInfo->patch.deltaCallToDone).executableAddress();
            break;
        } }

        GPRReg callerFrameGPR;
        if (inlineCallFrame->caller.inlineCallFrame) {
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
            callerFrameGPR = GPRInfo::regT3;
        } else
            callerFrameGPR = GPRInfo::callFrameRegister;

        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        if (trueReturnPC)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset()));

        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        if (!inlineCallFrame->isVarargs())
            jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
#if USE(JSVALUE64)
        jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#else // USE(JSVALUE64) // so this is the 32-bit part
        jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
    }

#if USE(JSVALUE64)
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
    Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
    jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
    ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
    jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));

    CodeOrigin codeOrigin;
    for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
        CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
        unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
        CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex);
        
        void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress();

        GPRReg callerFrameGPR;
        if (inlineCallFrame->caller.inlineCallFrame) {
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
            callerFrameGPR = GPRInfo::regT3;
        } else
            callerFrameGPR = GPRInfo::callFrameRegister;
        
#if USE(JSVALUE64)
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        
        // Leave the captured arguments in regT3.
        if (baselineCodeBlock->usesArguments())
            jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#else // USE(JSVALUE64) // so this is the 32-bit part
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));

        // Leave the captured arguments in regT3.
        if (baselineCodeBlock->usesArguments())
            jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
        
        if (baselineCodeBlock->usesArguments()) {
            AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3);
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
            jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters()));
            noArguments.link(&jit);
        }
    }

#if USE(JSVALUE64)
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
    Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
    jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}
Esempio n. 15
0
void JSTailCall::emit(JITCode& jitCode, CCallHelpers& jit)
{
    StackMaps::Record* record { nullptr };
    
    for (unsigned i = jitCode.stackmaps.records.size(); i--;) {
        record = &jitCode.stackmaps.records[i];
        if (record->patchpointID == m_stackmapID)
            break;
    }

    RELEASE_ASSERT(record->patchpointID == m_stackmapID);

    m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();

    CallFrameShuffleData shuffleData;

    // The callee was the first passed argument, and must be in a GPR because
    // we used the "anyregcc" calling convention
    auto calleeLocation =
        FTL::Location::forStackmaps(nullptr, record->locations[0]);
    GPRReg calleeGPR = calleeLocation.directGPR();
    shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);

    // The tag type number was the second argument, if there was one
    auto tagTypeNumberLocation =
        FTL::Location::forStackmaps(&jitCode.stackmaps, record->locations[1]);
    if (tagTypeNumberLocation.isGPR() && !tagTypeNumberLocation.addend())
        shuffleData.tagTypeNumber = tagTypeNumberLocation.directGPR();

    shuffleData.args.grow(numArguments());
    HashMap<Reg, Vector<std::pair<ValueRecovery*, int32_t>>> withAddend;
    size_t numAddends { 0 };
    for (size_t i = 0; i < numArguments(); ++i) {
        shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);
        if (FTL::Location addend = getRegisterWithAddend(m_arguments[i], *record, jitCode.stackmaps)) {
            withAddend.add(
                addend.reg(),
                Vector<std::pair<ValueRecovery*, int32_t>>()).iterator->value.append(
                    std::make_pair(&shuffleData.args[i], addend.addend()));
            numAddends++;
        }
    }

    numAddends = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numAddends);

    shuffleData.numLocals = static_cast<int64_t>(jitCode.stackmaps.stackSizeForLocals()) / sizeof(void*) + numAddends;

    ASSERT(!numAddends == withAddend.isEmpty());

    if (!withAddend.isEmpty()) {
        jit.subPtr(MacroAssembler::TrustedImm32(numAddends * sizeof(void*)), MacroAssembler::stackPointerRegister);
        VirtualRegister spillBase { 1 - static_cast<int>(shuffleData.numLocals) };
        for (auto entry : withAddend) {
            for (auto pair : entry.value) {
                ASSERT(numAddends > 0);
                VirtualRegister spillSlot { spillBase + --numAddends };
                ASSERT(entry.key.isGPR());
                jit.addPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
                jit.storePtr(entry.key.gpr(), CCallHelpers::addressFor(spillSlot));
                jit.subPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
                *pair.first = ValueRecovery::displacedInJSStack(spillSlot, pair.first->dataFormat());
            }
        }
        ASSERT(numAddends < stackAlignmentRegisters());
    }

    shuffleData.args.resize(numArguments());
    for (size_t i = 0; i < numArguments(); ++i)
        shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);

    shuffleData.setupCalleeSaveRegisters(jit.codeBlock());

    CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
        CCallHelpers::NotEqual, calleeGPR, m_targetToCheck,
        CCallHelpers::TrustedImmPtr(0));

    m_callLinkInfo->setFrameShuffleData(shuffleData);
    CallFrameShuffler(jit, shuffleData).prepareForTailCall();

    m_fastCall = jit.nearTailCall();

    slowPath.link(&jit);

    CallFrameShuffler slowPathShuffler(jit, shuffleData);
    slowPathShuffler.setCalleeJSValueRegs(JSValueRegs { GPRInfo::regT0 });
    slowPathShuffler.prepareForSlowPath();

    jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);

    m_slowCall = jit.nearCall();

    jit.abortWithReason(JITDidReturnFromTailCall);

    m_callLinkInfo->setUpCall(m_type, m_semanticeOrigin, calleeGPR);
}