Ejemplo n.º 1
0
bool JITCompiler::compile(JITCode& entry)
{
    setStartOfCode();
    compileEntry();
    SpeculativeJIT speculative(*this);
    compileBody(speculative);
    setEndOfMainPath();

    // Generate slow path code.
    speculative.runSlowPathGenerators();
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    speculative.createOSREntries();
    setEndOfCode();

    LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer.didFailToAllocate())
        return false;
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);

    if (m_disassembler)
        m_disassembler->dump(linkBuffer);

    entry = JITCode(
        linkBuffer.finalizeCodeWithoutDisassembly(),
        JITCode::DFGJIT);
    return true;
}
Ejemplo n.º 2
0
MacroAssemblerCodeRef registerRestorationThunkGenerator(VM* vm)
{
    AssemblyHelpers jit(vm, 0);
    generateRegisterRestoration(jit);
    LinkBuffer linkBuffer(*vm, &jit, GLOBAL_THUNK_ID);
    return FINALIZE_CODE(linkBuffer, ("Register restoration thunk"));
}
Ejemplo n.º 3
0
Compilation::Compilation(VM& vm, Procedure& proc, unsigned optLevel)
{
    CCallHelpers jit(&vm);
    generate(proc, jit, optLevel);
    LinkBuffer linkBuffer(vm, jit, nullptr);

    m_codeRef = FINALIZE_CODE(linkBuffer, ("B3::Compilation"));
    m_byproducts = proc.takeByproducts();
}
Ejemplo n.º 4
0
Compilation compile(VM& vm, Procedure& proc, unsigned optLevel)
{
    TimingScope timingScope("Compilation");
    
    prepareForGeneration(proc, optLevel);
    
    CCallHelpers jit(&vm);
    generate(proc, jit);
    LinkBuffer linkBuffer(vm, jit, nullptr);

    return Compilation(FINALIZE_CODE(linkBuffer, ("B3::Compilation")), proc.releaseByproducts());
}
Ejemplo n.º 5
0
void JITCompiler::compile(JITCode& entry)
{
    compileEntry();
    SpeculativeJIT speculative(*this);
    compileBody(speculative);

    LinkBuffer linkBuffer(*m_globalData, this);
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);

    entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
}
Ejemplo n.º 6
0
void generateICFastPath(
    State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
{
    VM& vm = state.graph.m_vm;

    StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
    if (iter == recordMap.end()) {
        // It was optimized out.
        return;
    }
    
    Vector<StackMaps::Record>& records = iter->value;
    
    RELEASE_ASSERT(records.size() == ic.m_generators.size());
    
    for (unsigned i = records.size(); i--;) {
        StackMaps::Record& record = records[i];
        auto generator = ic.m_generators[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        generator.generateFastPath(fastPathJIT);
        
        char* startOfIC =
            bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
        
        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
        // Note: we could handle the !isValid() case. We just don't appear to have a
        // reason to do so, yet.
        RELEASE_ASSERT(linkBuffer.isValid());
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
        
        state.finalizer->sideCodeLinkBuffer->link(
            ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
        
        linkBuffer.link(
            generator.slowPathJump(),
            state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin()));
        
        generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
    }
}
Ejemplo n.º 7
0
void LazySlowPath::generate(CodeBlock* codeBlock)
{
    RELEASE_ASSERT(!m_stub);

    VM& vm = *codeBlock->vm();

    CCallHelpers jit(&vm, codeBlock);
    GenerationParams params;
    CCallHelpers::JumpList exceptionJumps;
    params.exceptionJumps = m_exceptionTarget ? &exceptionJumps : nullptr;
    params.lazySlowPath = this;

    unsigned bytesSaved = m_scratchRegisterAllocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
    // This is needed because LLVM may create a stackmap location that is the register SP.
    // But on arm64, SP is also the same register number as ZR, so LLVM is telling us that it has
    // proven something is zero. Our MASM isn't universally compatible with arm64's context dependent
    // notion of SP meaning ZR. We just make things easier by ensuring we do the necessary move of zero
    // into a non-SP register.
    if (m_newZeroValueRegister != InvalidGPRReg)
        jit.move(CCallHelpers::TrustedImm32(0), m_newZeroValueRegister);

    m_generator->run(jit, params);

    CCallHelpers::Label doneLabel;
    CCallHelpers::Jump jumpToEndOfPatchpoint;
    if (bytesSaved) {
        doneLabel = jit.label();
        m_scratchRegisterAllocator.restoreReusedRegistersByPopping(jit, bytesSaved, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
        jumpToEndOfPatchpoint = jit.jump();
    }

    LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationMustSucceed);
    if (bytesSaved) {
        linkBuffer.link(params.doneJumps, linkBuffer.locationOf(doneLabel));
        linkBuffer.link(jumpToEndOfPatchpoint, m_patchpoint.labelAtOffset(MacroAssembler::maxJumpReplacementSize()));
    } else
        linkBuffer.link(params.doneJumps, m_patchpoint.labelAtOffset(MacroAssembler::maxJumpReplacementSize()));
    if (m_exceptionTarget)
        linkBuffer.link(exceptionJumps, m_exceptionTarget);
    m_stub = FINALIZE_CODE_FOR(codeBlock, linkBuffer, ("Lazy slow path call stub"));

    MacroAssembler::replaceWithJump(m_patchpoint, CodeLocationLabel(m_stub.code()));
}
bool JITCompiler::compile(JITCode& entry)
{
    SamplingRegion samplingRegion("DFG Backend");

    setStartOfCode();
    compileEntry();
    SpeculativeJIT speculative(*this);
    compileBody(speculative);
    setEndOfMainPath();

    // Generate slow path code.
    speculative.runSlowPathGenerators();
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    speculative.createOSREntries();
    setEndOfCode();

    LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer.didFailToAllocate())
        return false;
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);

    if (shouldShowDisassembly())
        m_disassembler->dump(linkBuffer);
    if (m_graph.m_compilation)
        m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);

    entry = JITCode(
        linkBuffer.finalizeCodeWithoutDisassembly(),
        JITCode::DFGJIT);
    return true;
}
Ejemplo n.º 9
0
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
{
    // === Stage 1 - Function header code generation ===
    //
    // This code currently matches the old JIT. In the function header we need to
    // pop the return address (since we do not allow any recursion on the machine
    // stack), and perform a fast register file check.

    // This is the main entry point, without performing an arity check.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
    // We'll need to convert the remaining cti_ style calls (specifically the register file
    // check) which will be dependent on stack layout. (We'd need to account for this in
    // both normal return code and when jumping to an exception handler).
    preserveReturnAddressAfterCall(GPRInfo::regT2);
    emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);

    // Setup a pointer to the codeblock in the CallFrameHeader.
    emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);

    // Plant a check that sufficient space is available in the RegisterFile.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
    // Return here after register file check.
    Label fromRegisterFileCheck = label();


    // === Stage 2 - Function body code generation ===
    //
    // We generate the speculative code path, followed by the non-speculative
    // code for the function. Next we need to link the two together, making
    // bail-outs from the speculative path jump to the corresponding point on
    // the non-speculative one (and generating any code necessary to juggle
    // register values around, rebox values, and ensure spilled, to match the
    // non-speculative path's requirements).

#if DFG_JIT_BREAK_ON_EVERY_FUNCTION
    // Handy debug tool!
    breakpoint();
#endif

    // First generate the speculative path.
    Label speculativePathBegin = label();
    SpeculativeJIT speculative(*this);
#if !DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE
    bool compiledSpeculative = speculative.compile();
#else
    bool compiledSpeculative = false;
#endif

    // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
    // to allow it to check which nodes in the graph may bail out, and may need to reenter the
    // non-speculative path.
    if (compiledSpeculative) {
        SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks());
        NonSpeculativeJIT nonSpeculative(*this);
        nonSpeculative.compile(checkIterator);

        // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
        linkSpeculationChecks(speculative, nonSpeculative);
    } else {
        // If compilation through the SpeculativeJIT failed, throw away the code we generated.
        m_calls.clear();
        rewindToLabel(speculativePathBegin);

        SpeculationCheckVector noChecks;
        SpeculationCheckIndexIterator checkIterator(noChecks);
        NonSpeculativeJIT nonSpeculative(*this);
        nonSpeculative.compile(checkIterator);
    }

    // === Stage 3 - Function footer code generation ===
    //
    // Generate code to lookup and jump to exception handlers, to perform the slow
    // register file check (if the fast one in the function header fails), and
    // generate the entry point with arity check.

    // Iterate over the m_calls vector, checking for exception checks,
    // and linking them to here.
    unsigned exceptionCheckCount = 0;
    for (unsigned i = 0; i < m_calls.size(); ++i) {
        Jump& exceptionCheck = m_calls[i].m_exceptionCheck;
        if (exceptionCheck.isSet()) {
            exceptionCheck.link(this);
            ++exceptionCheckCount;
        }
    }
    // If any exception checks were linked, generate code to lookup a handler.
    if (exceptionCheckCount) {
        // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
        // an identifier for the operation that threw the exception, which we can use
        // to look up handler information. The identifier we use is the return address
        // of the call out from JIT code that threw the exception; this is still
        // available on the stack, just below the stack pointer!
        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
        peek(GPRInfo::argumentGPR1, -1);
        m_calls.append(CallRecord(call(), lookupExceptionHandler));
        // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
        // and the address of the handler in returnValueGPR2.
        jump(GPRInfo::returnValueGPR2);
    }

    // Generate the register file check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    registerFileCheck.link(this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callRegisterFileCheck = call();
    jump(fromRegisterFileCheck);

    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    Label arityCheck = label();
    preserveReturnAddressAfterCall(GPRInfo::regT2);
    emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
    branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callArityCheck = call();
    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
    jump(fromArityCheck);


    // === Stage 4 - Link ===
    //
    // Link the code, populate data in CodeBlock data structures.

    LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator);

#if DFG_DEBUG_VERBOSE
    fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress());
#endif

    // Link all calls out from the JIT code to their respective functions.
    for (unsigned i = 0; i < m_calls.size(); ++i)
        linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);

    if (m_codeBlock->needsCallReturnIndices()) {
        m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount);
        for (unsigned i = 0; i < m_calls.size(); ++i) {
            if (m_calls[i].m_exceptionCheck.isSet()) {
                unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call);
                unsigned exceptionInfo = m_calls[i].m_exceptionInfo;
                m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
            }
        }
    }

    // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);

    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    entry = linkBuffer.finalizeCode();
}
Ejemplo n.º 10
0
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset =
        offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    
    int varargsSpillSlotsOffset;
    if (state.varargsSpillSlotsStackmapID != UINT_MAX)
        varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    else
        varargsSpillSlotsOffset = 0;
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentsRegister.isValid())
            inlineCallFrame->argumentsRegister += localsOffset;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }
    }
    
    if (codeBlock->usesArguments()) {
        codeBlock->setArgumentsRegister(
            VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset));
    }

    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationMustSucceed);
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        RELEASE_ASSERT(didSeeUnwindInfo);
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed);
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;) {
                ExitValue value = exit.m_values[j];
                if (!value.isInJSStackSomehow())
                    continue;
                if (!value.virtualRegister().isLocal())
                    continue;
                exit.m_values[j] = value.withVirtualRegister(
                    VirtualRegister(value.virtualRegister().offset() + localsOffset));
            }
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(); 
                stubInfo->codeOrigin = checkIn.codeOrigin();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_id);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed);
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        
        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall());
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size());
        
        call.link(vm, linkBuffer);
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, graph, varargsSpillSlotsOffset);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
        
        call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
    }
    
    RepatchBuffer repatchBuffer(codeBlock);

    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
{
    SamplingRegion samplingRegion("DFG Backend");
    
    setStartOfCode();
    compileEntry();

    // === Function header code generation ===
    // This is the main entry point, without performing an arity check.
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);
    // Plant a check that sufficient space is available in the JSStack.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
    // Return here after stack check.
    Label fromStackCheck = label();


    // === Function body code generation ===
    SpeculativeJIT speculative(*this);
    compileBody(speculative);
    setEndOfMainPath();

    // === Function footer code generation ===
    //
    // Generate code to perform the slow stack check (if the fast one in
    // the function header fails), and generate the entry point with arity check.
    //
    // Generate the stack check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    stackCheck.link(this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));

    CallBeginToken token;
    beginCall(CodeOrigin(0), token);
    Call callStackCheck = call();
    notifyCall(callStackCheck, CodeOrigin(0), token);
    jump(fromStackCheck);
    
    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    Label arityCheck = label();
    compileEntry();

    load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
    branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    beginCall(CodeOrigin(0), token);
    Call callArityCheck = call();
    notifyCall(callArityCheck, CodeOrigin(0), token);
    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
    jump(fromArityCheck);
    
    // Generate slow path code.
    speculative.runSlowPathGenerators();
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    speculative.createOSREntries();
    setEndOfCode();

    // === Link ===
    LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer.didFailToAllocate())
        return false;
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);
    
    // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer.link(callStackCheck, cti_stack_check);
    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
    
    if (shouldShowDisassembly())
        m_disassembler->dump(linkBuffer);
    if (m_graph.m_compilation)
        m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);

    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    entry = JITCode(
        linkBuffer.finalizeCodeWithoutDisassembly(),
        JITCode::DFGJIT);
    return true;
}
Ejemplo n.º 12
0
MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM& vm, ExecutableBase* executable, MacroAssemblerCodePtr target)
{
#if ENABLE(FTL_JIT)
    // We shouldn't ever be generating wrappers for native functions.
    RegisterSet toSave = registersToPreserve();
    ptrdiff_t offset = registerPreservationOffset();
    
    AssemblyHelpers jit(&vm, 0);
    
    jit.preserveReturnAddressAfterCall(GPRInfo::regT1);
    jit.load32(
        AssemblyHelpers::Address(
            AssemblyHelpers::stackPointerRegister,
            (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset),
        GPRInfo::regT2);
    
    // Place the stack pointer where we want it to be.
    jit.subPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister);
    
    // Compute the number of things we will be copying.
    jit.add32(
        AssemblyHelpers::TrustedImm32(
            JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize),
        GPRInfo::regT2);

    ASSERT(!toSave.get(GPRInfo::regT4));
    jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4);
    
    AssemblyHelpers::Label loop = jit.label();
    jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
    jit.load64(AssemblyHelpers::Address(GPRInfo::regT4, offset), GPRInfo::regT0);
    jit.store64(GPRInfo::regT0, GPRInfo::regT4);
    jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT4);
    jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit);

    // At this point regT4 + offset points to where we save things.
    ptrdiff_t currentOffset = 0;
    jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    
    for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) {
        if (!toSave.get(gpr))
            continue;
        currentOffset += sizeof(Register);
        jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset));
    }
    
    // Assume that there aren't any saved FP registers.
    
    // Restore the tag registers.
    jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
    jit.add64(AssemblyHelpers::TrustedImm32(TagMask - TagTypeNumber), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
    
    jit.move(
        AssemblyHelpers::TrustedImmPtr(
            vm.getCTIStub(registerRestorationThunkGenerator).code().executableAddress()),
        GPRInfo::nonArgGPR0);
    jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0);
    AssemblyHelpers::Jump jump = jit.jump();
    
    LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
    linkBuffer.link(jump, CodeLocationLabel(target));

    if (Options::verboseFTLToJSThunk())
        dataLog("Need a thunk for calls from FTL to non-FTL version of ", *executable, "\n");
    
    return FINALIZE_DFG_CODE(linkBuffer, ("Register preservation wrapper for %s/%s, %p", toCString(executable->hashFor(CodeForCall)).data(), toCString(executable->hashFor(CodeForConstruct)).data(), target.executableAddress()));
#else // ENABLE(FTL_JIT)
    UNUSED_PARAM(vm);
    UNUSED_PARAM(executable);
    UNUSED_PARAM(target);
    // We don't support non-FTL builds for two reasons:
    // - It just so happens that currently only the FTL bottoms out in this code.
    // - The code above uses 64-bit instructions. It doesn't necessarily have to; it would be
    //   easy to change it so that it doesn't. But obviously making that change would be a
    //   prerequisite to removing this #if.
    UNREACHABLE_FOR_PLATFORM();
    return MacroAssemblerCodeRef();
#endif // ENABLE(FTL_JIT)
}
Ejemplo n.º 13
0
// We are creating a bunch of threads that touch the main thread's stack. This will make ASAN unhappy.
// The reason this is OK is that we guarantee that the main thread doesn't continue until all threads
// that could touch its stack are done executing.
SUPPRESS_ASAN 
void Plan::run()
{
    if (!parseAndValidateModule())
        return;

    auto tryReserveCapacity = [this] (auto& vector, size_t size, const char* what) {
        if (UNLIKELY(!vector.tryReserveCapacity(size))) {
            StringBuilder builder;
            builder.appendLiteral("Failed allocating enough space for ");
            builder.appendNumber(size);
            builder.append(what);
            m_errorMessage = builder.toString();
            return false;
        }
        return true;
    };

    if (!tryReserveCapacity(m_wasmExitStubs, m_moduleInformation->importFunctionSignatureIndices.size(), " WebAssembly to JavaScript stubs")
        || !tryReserveCapacity(m_unlinkedWasmToWasmCalls, m_functionLocationInBinary.size(), " unlinked WebAssembly to WebAssembly calls")
        || !tryReserveCapacity(m_wasmInternalFunctions, m_functionLocationInBinary.size(), " WebAssembly functions")
        || !tryReserveCapacity(m_compilationContexts, m_functionLocationInBinary.size(), " compilation contexts"))
        return;

    m_unlinkedWasmToWasmCalls.resize(m_functionLocationInBinary.size());
    m_wasmInternalFunctions.resize(m_functionLocationInBinary.size());
    m_compilationContexts.resize(m_functionLocationInBinary.size());

    for (unsigned importIndex = 0; importIndex < m_moduleInformation->imports.size(); ++importIndex) {
        Import* import = &m_moduleInformation->imports[importIndex];
        if (import->kind != ExternalKind::Function)
            continue;
        unsigned importFunctionIndex = m_wasmExitStubs.size();
        if (verbose)
            dataLogLn("Processing import function number ", importFunctionIndex, ": ", import->module, ": ", import->field);
        SignatureIndex signatureIndex = m_moduleInformation->importFunctionSignatureIndices.at(import->kindIndex);
        m_wasmExitStubs.uncheckedAppend(exitStubGenerator(m_vm, m_callLinkInfos, signatureIndex, importFunctionIndex));
    }

    m_currentIndex = 0;

    auto doWork = [this] {
        while (true) {
            uint32_t functionIndex;
            {
                auto locker = holdLock(m_lock);
                if (m_currentIndex >= m_functionLocationInBinary.size())
                    return;
                functionIndex = m_currentIndex;
                ++m_currentIndex;
            }

            const uint8_t* functionStart = m_source + m_functionLocationInBinary[functionIndex].start;
            size_t functionLength = m_functionLocationInBinary[functionIndex].end - m_functionLocationInBinary[functionIndex].start;
            ASSERT(functionLength <= m_sourceLength);
            SignatureIndex signatureIndex = m_moduleInformation->internalFunctionSignatureIndices[functionIndex];
            const Signature* signature = SignatureInformation::get(m_vm, signatureIndex);
            unsigned functionIndexSpace = m_wasmExitStubs.size() + functionIndex;
            ASSERT_UNUSED(functionIndexSpace, m_moduleInformation->signatureIndexFromFunctionIndexSpace(functionIndexSpace) == signatureIndex);
            ASSERT(validateFunction(m_vm, functionStart, functionLength, signature, *m_moduleInformation, m_moduleSignatureIndicesToUniquedSignatureIndices));

            m_unlinkedWasmToWasmCalls[functionIndex] = Vector<UnlinkedWasmToWasmCall>();
            auto parseAndCompileResult = parseAndCompile(*m_vm, m_compilationContexts[functionIndex], functionStart, functionLength, signature, m_unlinkedWasmToWasmCalls[functionIndex], *m_moduleInformation, m_moduleSignatureIndicesToUniquedSignatureIndices);

            if (UNLIKELY(!parseAndCompileResult)) {
                auto locker = holdLock(m_lock);
                if (!m_errorMessage) {
                    // Multiple compiles could fail simultaneously. We arbitrarily choose the first.
                    m_errorMessage = makeString(parseAndCompileResult.error(), ", in function at index ", String::number(functionIndex)); // FIXME make this an Expected.
                }
                m_currentIndex = m_functionLocationInBinary.size();

                // We will terminate on the next execution.
                continue; 
            }

            m_wasmInternalFunctions[functionIndex] = WTFMove(*parseAndCompileResult);
        }
    };

    MonotonicTime startTime;
    if (verbose || Options::reportCompileTimes())
        startTime = MonotonicTime::now();

    uint32_t threadCount = Options::useConcurrentJIT() ? WTF::numberOfProcessorCores() : 1;
    uint32_t numWorkerThreads = threadCount - 1;
    Vector<ThreadIdentifier> threads;
    threads.reserveCapacity(numWorkerThreads);
    for (uint32_t i = 0; i < numWorkerThreads; i++)
        threads.uncheckedAppend(createThread("jsc.wasm-b3-compilation.thread", doWork));

    doWork(); // Let the main thread do some work too.

    for (uint32_t i = 0; i < numWorkerThreads; i++)
        waitForThreadCompletion(threads[i]);

    for (uint32_t functionIndex = 0; functionIndex < m_functionLocationInBinary.size(); functionIndex++) {
        {
            CompilationContext& context = m_compilationContexts[functionIndex];
            SignatureIndex signatureIndex = m_moduleInformation->internalFunctionSignatureIndices[functionIndex];
            String signatureDescription = SignatureInformation::get(m_vm, signatureIndex)->toString();
            {
                LinkBuffer linkBuffer(*m_vm, *context.wasmEntrypointJIT, nullptr);
                m_wasmInternalFunctions[functionIndex]->wasmEntrypoint.compilation =
                    std::make_unique<B3::Compilation>(FINALIZE_CODE(linkBuffer, ("WebAssembly function[%i] %s", functionIndex, signatureDescription.ascii().data())), WTFMove(context.wasmEntrypointByproducts));
            }

            {
                LinkBuffer linkBuffer(*m_vm, *context.jsEntrypointJIT, nullptr);
                linkBuffer.link(context.jsEntrypointToWasmEntrypointCall, FunctionPtr(m_wasmInternalFunctions[functionIndex]->wasmEntrypoint.compilation->code().executableAddress()));

                m_wasmInternalFunctions[functionIndex]->jsToWasmEntrypoint.compilation =
                    std::make_unique<B3::Compilation>(FINALIZE_CODE(linkBuffer, ("JavaScript->WebAssembly entrypoint[%i] %s", functionIndex, signatureDescription.ascii().data())), WTFMove(context.jsEntrypointByproducts));
            }
        }
    }

    if (verbose || Options::reportCompileTimes()) {
        dataLogLn("Took ", (MonotonicTime::now() - startTime).microseconds(),
            " us to compile and link the module");
    }

    // Patch the call sites for each WebAssembly function.
    for (auto& unlinked : m_unlinkedWasmToWasmCalls) {
        for (auto& call : unlinked) {
            void* executableAddress;
            if (m_moduleInformation->isImportedFunctionFromFunctionIndexSpace(call.functionIndex)) {
                // FIXME imports could have been linked in B3, instead of generating a patchpoint. This condition should be replaced by a RELEASE_ASSERT. https://bugs.webkit.org/show_bug.cgi?id=166462
                executableAddress = call.target == UnlinkedWasmToWasmCall::Target::ToJs
                    ? m_wasmExitStubs.at(call.functionIndex).wasmToJs.code().executableAddress()
                    : m_wasmExitStubs.at(call.functionIndex).wasmToWasm.code().executableAddress();
            } else {
                ASSERT(call.target != UnlinkedWasmToWasmCall::Target::ToJs);
                executableAddress = m_wasmInternalFunctions.at(call.functionIndex - m_wasmExitStubs.size())->wasmEntrypoint.compilation->code().executableAddress();
            }
            MacroAssembler::repatchCall(call.callLocation, CodeLocationLabel(executableAddress));
        }
    }

    m_failed = false;
}
Ejemplo n.º 14
0
CodeLocationLabel* ArityCheckFailReturnThunks::returnPCsFor(
    VM& vm, unsigned numExpectedArgumentsIncludingThis)
{
    ASSERT(numExpectedArgumentsIncludingThis >= 1);
    
    numExpectedArgumentsIncludingThis = WTF::roundUpToMultipleOf(
        stackAlignmentRegisters(), numExpectedArgumentsIncludingThis);
    
    {
        ConcurrentJITLocker locker(m_lock);
        if (numExpectedArgumentsIncludingThis < m_nextSize)
            return m_returnPCArrays.last().get();
    }
    
    ASSERT(!isCompilationThread());
    
    numExpectedArgumentsIncludingThis = std::max(numExpectedArgumentsIncludingThis, m_nextSize * 2);
    
    AssemblyHelpers jit(&vm, 0);
    
    Vector<AssemblyHelpers::Label> labels;
    
    for (unsigned size = m_nextSize; size <= numExpectedArgumentsIncludingThis; size += stackAlignmentRegisters()) {
        labels.append(jit.label());
        
        jit.load32(
            AssemblyHelpers::Address(
                AssemblyHelpers::stackPointerRegister,
                (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) +
                PayloadOffset),
            GPRInfo::regT4);
        jit.add32(
            AssemblyHelpers::TrustedImm32(
                JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + size - 1),
            GPRInfo::regT4, GPRInfo::regT2);
        jit.lshift32(AssemblyHelpers::TrustedImm32(3), GPRInfo::regT2);
        jit.addPtr(AssemblyHelpers::stackPointerRegister, GPRInfo::regT2);
        jit.loadPtr(GPRInfo::regT2, GPRInfo::regT2);
        
        jit.addPtr(
            AssemblyHelpers::TrustedImm32(size * sizeof(Register)),
            AssemblyHelpers::stackPointerRegister);
        
        // Thunks like ours want to use the return PC to figure out where things
        // were saved. So, we pay it forward.
        jit.store32(
            GPRInfo::regT4,
            AssemblyHelpers::Address(
                AssemblyHelpers::stackPointerRegister,
                (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) +
                PayloadOffset));
        
        jit.jump(GPRInfo::regT2);
    }
    
    LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID);
    
    unsigned returnPCsSize = numExpectedArgumentsIncludingThis / stackAlignmentRegisters() + 1;
    std::unique_ptr<CodeLocationLabel[]> returnPCs =
        std::make_unique<CodeLocationLabel[]>(returnPCsSize);
    for (unsigned size = 0; size <= numExpectedArgumentsIncludingThis; size += stackAlignmentRegisters()) {
        unsigned index = size / stackAlignmentRegisters();
        RELEASE_ASSERT(index < returnPCsSize);
        if (size < m_nextSize)
            returnPCs[index] = m_returnPCArrays.last()[index];
        else
            returnPCs[index] = linkBuffer.locationOf(labels[(size - m_nextSize) / stackAlignmentRegisters()]);
    }

    CodeLocationLabel* result = returnPCs.get();

    {
        ConcurrentJITLocker locker(m_lock);
        m_returnPCArrays.append(std::move(returnPCs));
        m_refs.append(FINALIZE_CODE(linkBuffer, ("Arity check fail return thunks for up to numArgs = %u", numExpectedArgumentsIncludingThis)));
        m_nextSize = numExpectedArgumentsIncludingThis + stackAlignmentRegisters();
    }
    
    return result;
}
Ejemplo n.º 15
0
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
{
    compileEntry();

    // === Function header code generation ===
    // This is the main entry point, without performing an arity check.
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);
    // Plant a check that sufficient space is available in the RegisterFile.
    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
    // Return here after register file check.
    Label fromRegisterFileCheck = label();


    // === Function body code generation ===
    SpeculativeJIT speculative(*this);
    compileBody(speculative);

    // === Function footer code generation ===
    //
    // Generate code to perform the slow register file check (if the fast one in
    // the function header fails), and generate the entry point with arity check.
    //
    // Generate the register file check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    registerFileCheck.link(this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callRegisterFileCheck = call();
    jump(fromRegisterFileCheck);
    
    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    Label arityCheck = label();
    compileEntry();

    load32(Address(GPRInfo::callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))), GPRInfo::regT1);
    branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
    move(stackPointerRegister, GPRInfo::argumentGPR0);
    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    Call callArityCheck = call();
    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
    jump(fromArityCheck);


    // === Link ===
    LinkBuffer linkBuffer(*m_globalData, this);
    link(linkBuffer);
    speculative.linkOSREntries(linkBuffer);
    
    // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
    linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);

    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
}