Esempio n. 1
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
#if OS(WINDOWS) && CPU(X86_64)
    // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
    // Other argument values are shift by 1. Use space on the stack for our two return values.
    // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
    // and space for the 16 byte return area.
    jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
    jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
    jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
    jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
    jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#else
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsWithExecState(GPRInfo::regT2);
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
#endif

    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    // The second return value GPR will hold a non-zero value for tail calls.

    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();

    RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
    CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);

    jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
    jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);

    doNotTrash.link(&jit);
    jit.jump(GPRInfo::returnValueGPR);
}
Esempio n. 2
0
static void slowPathFor(
    CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
{
    jit.emitFunctionPrologue();
    jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    jit.setupArgumentsExecState();
    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
    emitPointerValidation(jit, GPRInfo::nonArgGPR0);
    jit.call(GPRInfo::nonArgGPR0);
    if (maxFrameExtentForSlowPathCall)
        jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
    
    // This slow call will return the address of one of the following:
    // 1) Exception throwing thunk.
    // 2) Host call return value returner thingy.
    // 3) The function to call.
    emitPointerValidation(jit, GPRInfo::returnValueGPR);
    jit.emitFunctionEpilogue();
    jit.jump(GPRInfo::returnValueGPR);
}
Esempio n. 3
0
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");
    
    // We don't expect the incoming code to have predecessors computed.
    code.resetReachability();
    
    if (shouldValidateIR())
        validate(code);

    // If we're doing super verbose dumping, the phase scope of any phase will already do a dump.
    if (shouldDumpIR() && !shouldDumpIRAtEachPhase()) {
        dataLog("Initial air:\n");
        dataLog(code);
    }

    // This is where we run our optimizations and transformations.
    // FIXME: Add Air optimizations.
    // https://bugs.webkit.org/show_bug.cgi?id=150456
    
    eliminateDeadCode(code);

    // This is where we would have a real register allocator. Then, we could use spillEverything()
    // in place of the register allocator only for testing.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=150457
    spillEverything(code);

    // Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
    // does things like identify which callee-saves we're using and saves them.
    handleCalleeSaves(code);

    // This turns all Stack and CallArg Args into Addr args that use the frame pointer. It does
    // this by first-fit allocating stack slots. It should be pretty darn close to optimal, so we
    // shouldn't have to worry about this very much.
    allocateStack(code);

    // If we coalesced moves then we can unbreak critical edges. This is the main reason for this
    // phase.
    simplifyCFG(code);

    // FIXME: We should really have a code layout optimization here.
    // https://bugs.webkit.org/show_bug.cgi?id=150478

    reportUsedRegisters(code);

    if (shouldValidateIR())
        validate(code);

    // Do a final dump of Air. Note that we have to do this even if we are doing per-phase dumping,
    // since the final generation is not a phase.
    if (shouldDumpIR()) {
        dataLog("Air after ", code.lastPhaseName(), ", before generation:\n");
        dataLog(code);
    }

    TimingScope codeGenTimingScope("Air::generate backend");

    // And now, we generate code.
    jit.emitFunctionPrologue();
    jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            jit.emitFunctionEpilogue();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        for (Inst& inst : *block)
            jump = inst.generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}
void generate(Code& code, CCallHelpers& jit)
{
    TimingScope timingScope("Air::generate");

    DisallowMacroScratchRegisterUsage disallowScratch(jit);

    // And now, we generate code.
    jit.emitFunctionPrologue();
    if (code.frameSize())
        jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);

    GenerationContext context;
    context.code = &code;
    IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
    IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());

    auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
        if (blockLabels[target].isSet()) {
            jump.linkTo(blockLabels[target], &jit);
            return;
        }

        blockJumps[target].append(jump);
    };

    for (BasicBlock* block : code) {
        blockJumps[block].link(&jit);
        blockLabels[block] = jit.label();
        ASSERT(block->size() >= 1);
        for (unsigned i = 0; i < block->size() - 1; ++i) {
            CCallHelpers::Jump jump = block->at(i).generate(jit, context);
            ASSERT_UNUSED(jump, !jump.isSet());
        }

        if (block->last().opcode == Jump
            && block->successorBlock(0) == code.findNextBlock(block))
            continue;

        if (block->last().opcode == Ret) {
            // We currently don't represent the full prologue/epilogue in Air, so we need to
            // have this override.
            if (code.frameSize())
                jit.emitFunctionEpilogue();
            else
                jit.emitFunctionEpilogueWithEmptyFrame();
            jit.ret();
            continue;
        }
        
        CCallHelpers::Jump jump = block->last().generate(jit, context);
        switch (block->numSuccessors()) {
        case 0:
            ASSERT(!jump.isSet());
            break;
        case 1:
            link(jump, block->successorBlock(0));
            break;
        case 2:
            link(jump, block->successorBlock(0));
            if (block->successorBlock(1) != code.findNextBlock(block))
                link(jit.jump(), block->successorBlock(1));
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
    }

    for (auto& latePath : context.latePaths)
        latePath->run(jit, context);
}