CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>)
    {
        CCallHelpers::JumpList exceptions;
        // We spill (1) the used registers by IC and (2) the used registers by Snippet.
        AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet);

        jit.store32(
            CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
            CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount)));

        jit.makeSpaceOnStackForCCall();

        jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...);

        CCallHelpers::Call operationCall = jit.call(OperationPtrTag);
        auto function = m_function;
        jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
            linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function));
        });

        jit.setupResults(m_result);
        jit.reclaimSpaceOnStackForCCall();

        CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck);

        state.restoreLiveRegistersFromStackForCallWithThrownException(spillState);
        exceptions.append(jit.jump());

        noException.link(&jit);
        RegisterSet dontRestore;
        dontRestore.set(m_result);
        state.restoreLiveRegistersFromStackForCall(spillState, dontRestore);

        return exceptions;
    }
Esempio n. 2
0
void BasicBlockLocation::emitExecuteCode(CCallHelpers& jit, MacroAssembler::RegisterID scratch) const
{
    static_assert(sizeof(size_t) == 4, "Assuming size_t is 32 bits on 32 bit platforms.");
    jit.load32(&m_executionCount, scratch);
    CCallHelpers::Jump done = jit.branchAdd32(CCallHelpers::Zero, scratch, CCallHelpers::TrustedImm32(1), scratch);
    jit.store32(scratch, bitwise_cast<void*>(&m_executionCount));
    done.link(&jit);
}
Esempio n. 3
0
inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
#if !ASSERT_DISABLED
    CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
    jit.breakpoint();
    isNonZero.link(&jit);
    jit.push(pointerGPR);
    jit.load8(pointerGPR, pointerGPR);
    jit.pop(pointerGPR);
#else
    UNUSED_PARAM(jit);
    UNUSED_PARAM(pointerGPR);
#endif
}
Esempio n. 4
0
void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode)
{
    if (!shouldEmitSetDouble() && !shouldEmitSetNonNumber())
        return;

    CCallHelpers::Jump isInt32 = jit.branchIfInt32(regs, mode);
    CCallHelpers::Jump notDouble = jit.branchIfNotDoubleKnownNotInt32(regs, mode);
    emitSetDouble(jit);
    CCallHelpers::Jump done = jit.jump();
    notDouble.link(&jit);
    emitSetNonNumber(jit);
    done.link(&jit);
    isInt32.link(&jit);
}
Esempio n. 5
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
#if OS(WINDOWS) && CPU(X86_64)
    // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
    // Other argument values are shift by 1. Use space on the stack for our two return values.
    // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
    // and space for the 16 byte return area.
    jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
    jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
    jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
    jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#else
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsWithExecState(GPRInfo::regT2);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#endif

    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    // The second return value GPR will hold a non-zero value for tail calls.

    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();

    RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
    CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);

    jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
    jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);

    doNotTrash.link(&jit);
    jit.jump(GPRInfo::returnValueGPR);
}
Esempio n. 6
0
void JSCallBase::emit(CCallHelpers& jit)
{
    m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();
    
    CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
        CCallHelpers::NotEqual, GPRInfo::regT0, m_targetToCheck,
        CCallHelpers::TrustedImmPtr(0));
    
    m_fastCall = jit.nearCall();
    CCallHelpers::Jump done = jit.jump();
    
    slowPath.link(&jit);
    
    jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
    m_slowCall = jit.nearCall();
    
    done.link(&jit);
}
Esempio n. 7
0
void JSCallBase::emit(CCallHelpers& jit, State& /*state*/, int32_t osrExitFromGenericUnwindStackSpillSlot)
{
    RELEASE_ASSERT(!!m_callSiteIndex);

#if FTL_USES_B3
    UNUSED_PARAM(osrExitFromGenericUnwindStackSpillSlot);
#else // FTL_USES_B3
    if (m_correspondingGenericUnwindOSRExit)
        m_correspondingGenericUnwindOSRExit->spillRegistersToSpillSlot(jit, osrExitFromGenericUnwindStackSpillSlot);
#endif // FTL_USES_B3

    jit.store32(CCallHelpers::TrustedImm32(m_callSiteIndex.bits()), CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));

    m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();
    
    if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail)
        jit.emitRestoreCalleeSaves();

    CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
        CCallHelpers::NotEqual, GPRInfo::regT0, m_targetToCheck,
        CCallHelpers::TrustedImmPtr(0));

    CCallHelpers::Jump done;

    if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail) {
        jit.prepareForTailCallSlow();
        m_fastCall = jit.nearTailCall();
    } else {
        m_fastCall = jit.nearCall();
        done = jit.jump();
    }

    slowPath.link(&jit);

    jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);
    m_slowCall = jit.nearCall();

    if (CallLinkInfo::callModeFor(m_type) == CallMode::Tail)
        jit.abortWithReason(JITDidReturnFromTailCall);
    else
        done.link(&jit);

    m_callLinkInfo->setUpCall(m_type, m_semanticeOrigin, GPRInfo::regT0);
}
Esempio n. 8
0
CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
{
    CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context);
    ASSERT(fail.isSet());

    Value* value = inst.origin;

    Vector<ValueRep> reps;
    if (isCheckMath(value->opcode())) {
        if (value->opcode() == CheckMul)
            reps.append(ValueRep());
        else if (value->opcode() == CheckSub && value->child(0)->isInt(0))
            reps.append(ValueRep::constant(0));
        else
            reps.append(repForArg(*context.code, inst.args[3]));
        reps.append(repForArg(*context.code, inst.args[2]));
    } else {
        ASSERT(value->opcode() == Check);
        reps.append(ValueRep::constant(1));
    }

    appendRepsImpl(context, m_numCheckArgs + 1, inst, reps);
    
    context.latePaths.append(
        createSharedTask<GenerationContext::LatePathFunction>(
            [=] (CCallHelpers& jit, GenerationContext&) {
                fail.link(&jit);
                
                Stackmap* stackmap = value->stackmap();
                ASSERT(stackmap);

                Stackmap::GenerationParams params;
                params.value = value;
                params.stackmap = stackmap;
                params.reps = reps;
                params.usedRegisters = stackmap->m_usedRegisters;

                stackmap->m_generator->run(jit, params);
            }));

    return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal.
}
Esempio n. 9
0
bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
{
    ASSERT(m_scratchGPR != m_src.payloadGPR());
    ASSERT(m_scratchGPR != m_result.payloadGPR());
    ASSERT(m_scratchGPR != InvalidGPRReg);
#if USE(JSVALUE32_64)
    ASSERT(m_scratchGPR != m_src.tagGPR());
    ASSERT(m_scratchGPR != m_result.tagGPR());
#endif

    jit.moveValueRegs(m_src, m_result);
    CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);

    // -0 should produce a double, and hence cannot be negated as an int.
    // The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
    slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));

    jit.neg32(m_result.payloadGPR());
#if USE(JSVALUE64)
    jit.boxInt32(m_result.payloadGPR(), m_result);
#endif
    endJumpList.append(jit.jump());

    srcNotInt.link(&jit);
    slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));

    // For a double, all we need to do is to invert the sign bit.
#if USE(JSVALUE64)
    jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
    jit.xor64(m_scratchGPR, m_result.payloadGPR());
#else
    jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
#endif
    // The flags of ArithNegate are basic in DFG.
    // We only need to know if we ever produced a number.
    if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
        arithProfile->emitSetDouble(jit);
    return true;
}
Esempio n. 10
0
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
{
    CCallHelpers::JumpList end;
    
    if (argCountRecovery.isConstant()) {
        // FIXME: We could constant-fold a lot of the computation below in this case.
        // https://bugs.webkit.org/show_bug.cgi?id=141486
        jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
    } else
        jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
    if (firstVarArgOffset) {
        CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
        jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
        CCallHelpers::Jump endVarArgs = jit.jump();
        sufficientArguments.link(&jit);
        jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
        endVarArgs.link(&jit);
    }
    slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
    
    emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);

    slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));

    // Initialize ArgumentCount.
    jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));

    // Copy arguments.
    jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
    CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
    // scratchGPR1: argumentCount

    CCallHelpers::Label copyLoop = jit.label();
    int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
    jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
    jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
#else // USE(JSVALUE64), so this begins the 32-bit case
    jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
    jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
    jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
    jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
#endif // USE(JSVALUE64), end of 32-bit case
    jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
    
    done.link(&jit);
}
Esempio n. 11
0
void JITRightShiftGenerator::generateFastPath(CCallHelpers& jit)
{
    ASSERT(m_scratchGPR != InvalidGPRReg);
    ASSERT(m_scratchGPR != m_left.payloadGPR());
    ASSERT(m_scratchGPR != m_right.payloadGPR());
#if USE(JSVALUE32_64)
    ASSERT(m_scratchGPR != m_left.tagGPR());
    ASSERT(m_scratchGPR != m_right.tagGPR());
    ASSERT(m_scratchFPR != InvalidFPRReg);
#endif

    ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());

    m_didEmitFastPath = true;

    if (m_rightOperand.isConstInt32()) {
        // Try to do (intVar >> intConstant).
        CCallHelpers::Jump notInt = jit.branchIfNotInt32(m_left);

        jit.moveValueRegs(m_left, m_result);
        int32_t shiftAmount = m_rightOperand.asConstInt32() & 0x1f;
        if (shiftAmount) {
            if (m_shiftType == SignedShift)
                jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
            else
                jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
#if USE(JSVALUE64)
            jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
#endif
        }

        if (jit.supportsFloatingPointTruncate()) {
            m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.

            // Try to do (doubleVar >> intConstant).
            notInt.link(&jit);

            m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));

            jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
            m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));

            if (shiftAmount) {
                if (m_shiftType == SignedShift)
                    jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
                else
                    jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
            }
            jit.boxInt32(m_scratchGPR, m_result);

        } else
            m_slowPathJumpList.append(notInt);

    } else {
        // Try to do (intConstant >> intVar) or (intVar >> intVar).
        m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));

        CCallHelpers::Jump notInt;
        if (m_leftOperand.isConstInt32()) {
#if USE(JSVALUE32_64)
            jit.move(m_right.tagGPR(), m_result.tagGPR());
#endif
            jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
        } else {
            notInt = jit.branchIfNotInt32(m_left);
            jit.moveValueRegs(m_left, m_result);
        }

        if (m_shiftType == SignedShift)
            jit.rshift32(m_right.payloadGPR(), m_result.payloadGPR());
        else
            jit.urshift32(m_right.payloadGPR(), m_result.payloadGPR());
#if USE(JSVALUE64)
        jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
#endif
        if (m_leftOperand.isConstInt32())
            return;

        if (jit.supportsFloatingPointTruncate()) {
            m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.

            // Try to do (doubleVar >> intVar).
            notInt.link(&jit);

            m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
            jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
            m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));

            if (m_shiftType == SignedShift)
                jit.rshift32(m_right.payloadGPR(), m_scratchGPR);
            else
                jit.urshift32(m_right.payloadGPR(), m_scratchGPR);
            jit.boxInt32(m_scratchGPR, m_result);

        } else
            m_slowPathJumpList.append(notInt);
    }
}
Esempio n. 12
0
CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context)
{
    CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context);
    ASSERT(fail.isSet());

    StackmapValue* value = inst.origin->as<StackmapValue>();
    ASSERT(value);

    Vector<ValueRep> reps = repsImpl(context, numB3Args(inst), m_numCheckArgs + 1, inst);

    // Set aside the args that are relevant to undoing the operation. This is because we don't want to
    // capture all of inst in the closure below.
    Vector<Arg, 3> args;
    for (unsigned i = 0; i < m_numCheckArgs; ++i)
        args.append(inst.args[1 + i]);

    context.latePaths.append(
        createSharedTask<GenerationContext::LatePathFunction>(
            [=] (CCallHelpers& jit, GenerationContext& context) {
                fail.link(&jit);

                // If necessary, undo the operation.
                switch (m_checkKind.opcode) {
                case BranchAdd32:
                    if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
                        || (m_numCheckArgs == 3 && args[1] == args[2])) {
                        // This is ugly, but that's fine - we won't have to do this very often.
                        ASSERT(args[1].isGPR());
                        GPRReg valueGPR = args[1].gpr();
                        GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
                        jit.pushToSave(scratchGPR);
                        jit.setCarry(scratchGPR);
                        jit.lshift32(CCallHelpers::TrustedImm32(31), scratchGPR);
                        jit.urshift32(CCallHelpers::TrustedImm32(1), valueGPR);
                        jit.or32(scratchGPR, valueGPR);
                        jit.popToRestore(scratchGPR);
                        break;
                    }
                    if (m_numCheckArgs == 4) {
                        if (args[1] == args[3])
                            Inst(Sub32, nullptr, args[2], args[3]).generate(jit, context);
                        else if (args[2] == args[3])
                            Inst(Sub32, nullptr, args[1], args[3]).generate(jit, context);
                    } else if (m_numCheckArgs == 3)
                        Inst(Sub32, nullptr, args[1], args[2]).generate(jit, context);
                    break;
                case BranchAdd64:
                    if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3])
                        || (m_numCheckArgs == 3 && args[1] == args[2])) {
                        // This is ugly, but that's fine - we won't have to do this very often.
                        ASSERT(args[1].isGPR());
                        GPRReg valueGPR = args[1].gpr();
                        GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR);
                        jit.pushToSave(scratchGPR);
                        jit.setCarry(scratchGPR);
                        jit.lshift64(CCallHelpers::TrustedImm32(63), scratchGPR);
                        jit.urshift64(CCallHelpers::TrustedImm32(1), valueGPR);
                        jit.or64(scratchGPR, valueGPR);
                        jit.popToRestore(scratchGPR);
                        break;
                    }
                    if (m_numCheckArgs == 4) {
                        if (args[1] == args[3])
                            Inst(Sub64, nullptr, args[2], args[3]).generate(jit, context);
                        else if (args[2] == args[3])
                            Inst(Sub64, nullptr, args[1], args[3]).generate(jit, context);
                    } else if (m_numCheckArgs == 3)
                        Inst(Sub64, nullptr, args[1], args[2]).generate(jit, context);
                    break;
                case BranchSub32:
                    Inst(Add32, nullptr, args[1], args[2]).generate(jit, context);
                    break;
                case BranchSub64:
                    Inst(Add64, nullptr, args[1], args[2]).generate(jit, context);
                    break;
                case BranchNeg32:
                    Inst(Neg32, nullptr, args[1]).generate(jit, context);
                    break;
                case BranchNeg64:
                    Inst(Neg64, nullptr, args[1]).generate(jit, context);
                    break;
                default:
                    break;
                }
                
                value->m_generator->run(jit, StackmapGenerationParams(value, reps, context));
            }));

    return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal.
}
Esempio n. 13
0
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");
    
    // We don't expect the incoming code to have predecessors computed.
    code.resetReachability();
    
    if (shouldValidateIR())
        validate(code);

    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
    if (shouldDumpIR() && !shouldDumpIRAtEachPhase()) {
        dataLog("Initial air:\n");
        dataLog(code);
    }

    // This is where we run our optimizations and transformations.
    // FIXME: Add Air optimizations.
    // https://bugs.webkit.org/show_bug.cgi?id=150456
    
    eliminateDeadCode(code);

    // This is where we would have a real register allocator. Then, we could use spillEverything()
    // in place of the register allocator only for testing.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150457
    spillEverything(code);

    // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
    // does things like identify which callee-saves we're using and saves them.
    handleCalleeSaves(code);

    // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
    // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
    // shouldn't have to worry about this very much.
    allocateStack(code);

    // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
    // phase.
    simplifyCFG(code);

    // FIXME: We should really have a code layout optimization here.
    // https://bugs.webkit.org/show_bug.cgi?id=150478

    reportUsedRegisters(code);

    if (shouldValidateIR())
        validate(code);

    // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
    // since the final generation is not a phase.
    if (shouldDumpIR()) {
        dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
        dataLog(code);
    }

    TimingScope codeGenTimingScope("Air::generate backend");

    // And now, we generate code.
    jit.emitFunctionPrologue();
    jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            jit.emitFunctionEpilogue();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        for (Inst& inst : *block)
            jump = inst.generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}
Esempio n. 14
0
void JSTailCall::emit(JITCode& jitCode, CCallHelpers& jit)
{
    StackMaps::Record* record { nullptr };
    
    for (unsigned i = jitCode.stackmaps.records.size(); i--;) {
        record = &jitCode.stackmaps.records[i];
        if (record->patchpointID == m_stackmapID)
            break;
    }

    RELEASE_ASSERT(record->patchpointID == m_stackmapID);

    m_callLinkInfo = jit.codeBlock()->addCallLinkInfo();

    CallFrameShuffleData shuffleData;

    // The callee was the first passed argument, and must be in a GPR because
    // we used the "anyregcc" calling convention
    auto calleeLocation =
        FTL::Location::forStackmaps(nullptr, record->locations[0]);
    GPRReg calleeGPR = calleeLocation.directGPR();
    shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);

    // The tag type number was the second argument, if there was one
    auto tagTypeNumberLocation =
        FTL::Location::forStackmaps(&jitCode.stackmaps, record->locations[1]);
    if (tagTypeNumberLocation.isGPR() && !tagTypeNumberLocation.addend())
        shuffleData.tagTypeNumber = tagTypeNumberLocation.directGPR();

    shuffleData.args.grow(numArguments());
    HashMap<Reg, Vector<std::pair<ValueRecovery*, int32_t>>> withAddend;
    size_t numAddends { 0 };
    for (size_t i = 0; i < numArguments(); ++i) {
        shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);
        if (FTL::Location addend = getRegisterWithAddend(m_arguments[i], *record, jitCode.stackmaps)) {
            withAddend.add(
                addend.reg(),
                Vector<std::pair<ValueRecovery*, int32_t>>()).iterator->value.append(
                    std::make_pair(&shuffleData.args[i], addend.addend()));
            numAddends++;
        }
    }

    numAddends = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numAddends);

    shuffleData.numLocals = static_cast<int64_t>(jitCode.stackmaps.stackSizeForLocals()) / sizeof(void*) + numAddends;

    ASSERT(!numAddends == withAddend.isEmpty());

    if (!withAddend.isEmpty()) {
        jit.subPtr(MacroAssembler::TrustedImm32(numAddends * sizeof(void*)), MacroAssembler::stackPointerRegister);
        VirtualRegister spillBase { 1 - static_cast<int>(shuffleData.numLocals) };
        for (auto entry : withAddend) {
            for (auto pair : entry.value) {
                ASSERT(numAddends > 0);
                VirtualRegister spillSlot { spillBase + --numAddends };
                ASSERT(entry.key.isGPR());
                jit.addPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
                jit.storePtr(entry.key.gpr(), CCallHelpers::addressFor(spillSlot));
                jit.subPtr(MacroAssembler::TrustedImm32(pair.second), entry.key.gpr());
                *pair.first = ValueRecovery::displacedInJSStack(spillSlot, pair.first->dataFormat());
            }
        }
        ASSERT(numAddends < stackAlignmentRegisters());
    }

    shuffleData.args.resize(numArguments());
    for (size_t i = 0; i < numArguments(); ++i)
        shuffleData.args[i] = recoveryFor(m_arguments[i], *record, jitCode.stackmaps);

    shuffleData.setupCalleeSaveRegisters(jit.codeBlock());

    CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
        CCallHelpers::NotEqual, calleeGPR, m_targetToCheck,
        CCallHelpers::TrustedImmPtr(0));

    m_callLinkInfo->setFrameShuffleData(shuffleData);
    CallFrameShuffler(jit, shuffleData).prepareForTailCall();

    m_fastCall = jit.nearTailCall();

    slowPath.link(&jit);

    CallFrameShuffler slowPathShuffler(jit, shuffleData);
    slowPathShuffler.setCalleeJSValueRegs(JSValueRegs { GPRInfo::regT0 });
    slowPathShuffler.prepareForSlowPath();

    jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2);

    m_slowCall = jit.nearCall();

    jit.abortWithReason(JITDidReturnFromTailCall);

    m_callLinkInfo->setUpCall(m_type, m_semanticeOrigin, calleeGPR);
}
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");

    DisallowMacroScratchRegisterUsage disallowScratch(jit);

    // And now, we generate code.
    jit.emitFunctionPrologue();
    if (code.frameSize())
        jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        blockLabels[block] = jit.label();
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            if (code.frameSize())
                jit.emitFunctionEpilogue();
            else
                jit.emitFunctionEpilogueWithEmptyFrame();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}
Esempio n. 16
0
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots)
{
    // We are passed three pieces of information:
    // - The callee.
    // - The arguments object, if it's not a forwarding call.
    // - The "this" value, if it's a constructor call.

    CallVarargsData* data = m_node->callVarargsData();
    
    GPRReg calleeGPR = GPRInfo::argumentGPR0;
    
    GPRReg argumentsGPR = InvalidGPRReg;
    GPRReg thisGPR = InvalidGPRReg;
    
    bool forwarding = false;
    
    switch (m_node->op()) {
    case CallVarargs:
    case TailCallVarargs:
    case TailCallVarargsInlinedCaller:
    case ConstructVarargs:
        argumentsGPR = GPRInfo::argumentGPR1;
        thisGPR = GPRInfo::argumentGPR2;
        break;
    case CallForwardVarargs:
    case TailCallForwardVarargs:
    case TailCallForwardVarargsInlinedCaller:
    case ConstructForwardVarargs:
        thisGPR = GPRInfo::argumentGPR1;
        forwarding = true;
        break;
    default:
        RELEASE_ASSERT_NOT_REACHED();
        break;
    }
    
    const unsigned calleeSpillSlot = 0;
    const unsigned argumentsSpillSlot = 1;
    const unsigned thisSpillSlot = 2;
    const unsigned stackPointerSpillSlot = 3;
    
    // Get some scratch registers.
    RegisterSet usedRegisters;
    usedRegisters.merge(RegisterSet::stackRegisters());
    usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
    usedRegisters.merge(RegisterSet::calleeSaveRegisters());
    usedRegisters.set(calleeGPR);
    if (argumentsGPR != InvalidGPRReg)
        usedRegisters.set(argumentsGPR);
    ASSERT(thisGPR);
    usedRegisters.set(thisGPR);
    ScratchRegisterAllocator allocator(usedRegisters);
    GPRReg scratchGPR1 = allocator.allocateScratchGPR();
    GPRReg scratchGPR2 = allocator.allocateScratchGPR();
    GPRReg scratchGPR3 = allocator.allocateScratchGPR();

    RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
    
    auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) {
        if (isARM64()) {
            // Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction.
            jit.move(CCallHelpers::stackPointerRegister, targetGPR);
            jit.negPtr(targetGPR);
            jit.addPtr(GPRInfo::callFrameRegister, targetGPR);
        } else {
            jit.move(GPRInfo::callFrameRegister, targetGPR);
            jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR);
        }
        if (extra)
            jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR);
        jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR);
    };
    
    auto callWithExceptionCheck = [&] (void* callee) {
        jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
        jit.call(GPRInfo::nonPreservedNonArgumentGPR);
        m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
    };
    
    if (isARM64()) {
        jit.move(CCallHelpers::stackPointerRegister, scratchGPR1);
        jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));
    } else
        jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot));

    unsigned extraStack = sizeof(CallerFrameAndPC) +
        WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*));

    if (forwarding) {
        CCallHelpers::JumpList slowCase;
        computeUsedStack(scratchGPR2, 0);
        emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
        
        CCallHelpers::Jump done = jit.jump();
        slowCase.link(&jit);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsExecState();
        callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
        jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
        
        done.link(&jit);
        jit.move(calleeGPR, GPRInfo::regT0);
    } else {
        // Gotta spill the callee, arguments, and this because we will need them later and we will have some
        // calls that clobber them.
        jit.store64(calleeGPR, CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot));
        jit.store64(argumentsGPR, CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot));
        jit.store64(thisGPR, CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot));
    
        computeUsedStack(scratchGPR1, 0);
        jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
        callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR1);
        computeUsedStack(scratchGPR2, extraStack);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot), argumentsGPR);
        emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
        jit.addPtr(CCallHelpers::TrustedImm32(-extraStack), scratchGPR2, CCallHelpers::stackPointerRegister);
        jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
        callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
    
        jit.move(GPRInfo::returnValueGPR, scratchGPR2);

        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot), thisGPR);
        jit.load64(CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot), GPRInfo::regT0);
    }
    
    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister);

    jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
    
    // Henceforth we make the call. The base FTL call machinery expects the callee in regT0 and for the
    // stack frame to already be set up, which it is.
    jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee));

    m_callBase.emit(jit, state, osrExitFromGenericUnwindSpillSlots);

    
    // Undo the damage we've done.
    if (isARM64()) {
        GPRReg scratchGPRAtReturn = CCallHelpers::selectScratchGPR(GPRInfo::returnValueGPR);
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), scratchGPRAtReturn);
        jit.move(scratchGPRAtReturn, CCallHelpers::stackPointerRegister);
    } else
        jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), CCallHelpers::stackPointerRegister);
}
void AccessCase::emitIntrinsicGetter(AccessGenerationState& state)
{
    CCallHelpers& jit = *state.jit;
    JSValueRegs valueRegs = state.valueRegs;
    GPRReg baseGPR = state.baseGPR;
    GPRReg valueGPR = valueRegs.payloadGPR();

    switch (intrinsic()) {
    case TypedArrayLengthIntrinsic: {
        jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
        jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
        state.succeed();
        return;
    }

    case TypedArrayByteLengthIntrinsic: {
        TypedArrayType type = structure()->classInfo()->typedArrayStorageType;

        jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);

        if (elementSize(type) > 1) {
            // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32.
            jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR);
        }

        jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
        state.succeed();
        return;
    }

    case TypedArrayByteOffsetIntrinsic: {
        GPRReg scratchGPR = state.scratchGPR;

        CCallHelpers::Jump emptyByteOffset = jit.branch32(
            MacroAssembler::NotEqual,
            MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
            TrustedImm32(WastefulTypedArray));

        jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
        jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR);
        jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR);
        jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR);
        jit.subPtr(scratchGPR, valueGPR);

        CCallHelpers::Jump done = jit.jump();
        
        emptyByteOffset.link(&jit);
        jit.move(TrustedImmPtr(0), valueGPR);
        
        done.link(&jit);
        
        jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
        state.succeed();
        return;
    }

    default:
        break;
    }
    RELEASE_ASSERT_NOT_REACHED();
}