Example #1
0
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset =
        offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    
    int varargsSpillSlotsOffset;
    if (state.varargsSpillSlotsStackmapID != UINT_MAX)
        varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    else
        varargsSpillSlotsOffset = 0;
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentsRegister.isValid())
            inlineCallFrame->argumentsRegister += localsOffset;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }
    }
    
    if (codeBlock->usesArguments()) {
        codeBlock->setArgumentsRegister(
            VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset));
    }

    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationMustSucceed);
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        RELEASE_ASSERT(didSeeUnwindInfo);
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed);
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;) {
                ExitValue value = exit.m_values[j];
                if (!value.isInJSStackSomehow())
                    continue;
                if (!value.virtualRegister().isLocal())
                    continue;
                exit.m_values[j] = value.withVirtualRegister(
                    VirtualRegister(value.virtualRegister().offset() + localsOffset));
            }
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(); 
                stubInfo->codeOrigin = checkIn.codeOrigin();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_id);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed);
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        
        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall());
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size());
        
        call.link(vm, linkBuffer);
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, graph, varargsSpillSlotsOffset);
        
        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
        if (!linkBuffer.isValid()) {
            dataLog("Failed to insert inline cache for varargs call (specifically, ", Graph::opName(call.node()->op()), ") because we thought the size would be ", sizeOfIC, " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
            RELEASE_ASSERT_NOT_REACHED();
        }
        
        MacroAssembler::AssemblerType_T::fillNops(
            startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
        
        call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
    }
    
    RepatchBuffer repatchBuffer(codeBlock);

    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
Example #2
0
void JITCompiler::link(LinkBuffer& linkBuffer)
{
    // Link the code, populate data in CodeBlock data structures.
    m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
    m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();

    if (!m_graph.m_plan.inlineCallFrames->isEmpty())
        m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
    
#if USE(JSVALUE32_64)
    m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
#endif
    
    m_graph.registerFrozenValues();

    BitVector usedJumpTables;
    for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
        SwitchData& data = **iter;
        if (!data.didUseJumpTable)
            continue;
        
        if (data.kind == SwitchString)
            continue;
        
        RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
        
        usedJumpTables.set(data.switchTableIndex);
        SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
        table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
        table.ctiOffsets.grow(table.branchOffsets.size());
        for (unsigned j = table.ctiOffsets.size(); j--;)
            table.ctiOffsets[j] = table.ctiDefault;
        for (unsigned j = data.cases.size(); j--;) {
            SwitchCase& myCase = data.cases[j];
            table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
                linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
        }
    }
    
    for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
        if (usedJumpTables.get(i))
            continue;
        
        m_codeBlock->switchJumpTable(i).clear();
    }

    // NOTE: we cannot clear string switch tables because (1) we're running concurrently
    // and we cannot deref StringImpl's and (2) it would be weird to deref those
    // StringImpl's since we refer to them.
    for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
        SwitchData& data = **switchDataIter;
        if (!data.didUseJumpTable)
            continue;
        
        if (data.kind != SwitchString)
            continue;
        
        StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
        table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
        StringJumpTable::StringOffsetTable::iterator iter;
        StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
        for (iter = table.offsetTable.begin(); iter != end; ++iter)
            iter->value.ctiOffset = table.ctiDefault;
        for (unsigned j = data.cases.size(); j--;) {
            SwitchCase& myCase = data.cases[j];
            iter = table.offsetTable.find(myCase.value.stringImpl());
            RELEASE_ASSERT(iter != end);
            iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
        }
    }

    // Link all calls out from the JIT code to their respective functions.
    for (unsigned i = 0; i < m_calls.size(); ++i)
        linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);

    for (unsigned i = m_getByIds.size(); i--;)
        m_getByIds[i].finalize(linkBuffer);
    for (unsigned i = m_putByIds.size(); i--;)
        m_putByIds[i].finalize(linkBuffer);

    for (unsigned i = 0; i < m_ins.size(); ++i) {
        StructureStubInfo& info = *m_ins[i].m_stubInfo;
        CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
        info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
        info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
        info.callReturnLocation = callReturnLocation;
        info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
    }
    
    for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
        JSCallRecord& record = m_jsCalls[i];
        CallLinkInfo& info = *record.m_info;
        linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
        info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall),
            linkBuffer.locationOf(record.m_targetToCheck),
            linkBuffer.locationOfNearCall(record.m_fastCall));
    }
    
    MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
    CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
    for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
        OSRExit& exit = m_jitCode->osrExit[i];
        OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
        linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
        exit.correctJump(linkBuffer);
        if (info.m_replacementSource.isSet()) {
            m_jitCode->common.jumpReplacements.append(JumpReplacement(
                linkBuffer.locationOf(info.m_replacementSource),
                linkBuffer.locationOf(info.m_replacementDestination)));
        }
    }
    
    if (m_graph.compilation()) {
        ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
        for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
            Vector<Label>& labels = m_exitSiteLabels[i];
            Vector<const void*> addresses;
            for (unsigned j = 0; j < labels.size(); ++j)
                addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
            m_graph.compilation()->addOSRExitSite(addresses);
        }
    } else
        ASSERT(!m_exitSiteLabels.size());

    m_jitCode->common.compilation = m_graph.compilation();
    
    // Link new DFG exception handlers and remove baseline JIT handlers.
    m_codeBlock->clearExceptionHandlers();
    for (unsigned  i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
        OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
        if (info.m_replacementDestination.isSet()) {
            // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
            // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
            // If this *is set*, it means we will be landing at this code location from genericUnwind from an
            // exception thrown in a child call frame.
            CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
            HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
            CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
            newExceptionHandler.start = callSite.bits();
            newExceptionHandler.end = callSite.bits() + 1;
            newExceptionHandler.nativeCode = catchLabel;
            m_codeBlock->appendExceptionHandler(newExceptionHandler);
        }
    }

    if (m_pcToCodeOriginMapBuilder.didBuildMapping())
        m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
}
Example #3
0
static void fixFunctionBasedOnStackMaps(
    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
    StackMaps::RecordMap& recordMap)
{
    Graph& graph = state.graph;
    VM& vm = graph.m_vm;
    StackMaps stackmaps = jitCode->stackmaps;
    
    int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
    int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
    
    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
        
        if (inlineCallFrame->argumentCountRegister.isValid())
            inlineCallFrame->argumentCountRegister += localsOffset;
        
        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
            inlineCallFrame->arguments[argument] =
                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
        }
        
        if (inlineCallFrame->isClosureCall) {
            inlineCallFrame->calleeRecovery =
                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
        }

        if (graph.hasDebuggerEnabled())
            codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
    }
    
    MacroAssembler::Label stackOverflowException;

    {
        CCallHelpers checkJIT(&vm, codeBlock);
        
        // At this point it's perfectly fair to just blow away all state and restore the
        // JS JIT view of the universe.
        checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        stackOverflowException = checkJIT.label();
        checkJIT.copyCalleeSavesToVMCalleeSavesBuffer();
        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
        MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
        checkJIT.jumpToExceptionHandler();

        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, checkJIT, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
        linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));

        state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
    }

    ExitThunkGenerator exitThunkGenerator(state);
    exitThunkGenerator.emitThunks();
    if (exitThunkGenerator.didThings()) {
        RELEASE_ASSERT(state.finalizer->osrExit.size());
        
        auto linkBuffer = std::make_unique<LinkBuffer>(
            vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
        if (linkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        
        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
        
        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
            OSRExit& exit = jitCode->osrExit[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");

            auto iter = recordMap.find(exit.m_stackmapID);
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
            
            for (unsigned j = exit.m_values.size(); j--;)
                exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset);
            for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
                materialization->accountForLocalsOffset(localsOffset);
            
            if (verboseCompilationEnabled()) {
                DumpContext context;
                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
                if (!exit.m_materializations.isEmpty()) {
                    dataLog("    Materializations: \n");
                    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
                        dataLog("        Materialize(", pointerDump(materialization), ")\n");
                }
            }
        }
        
        state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
    }

    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
        CCallHelpers slowPathJIT(&vm, codeBlock);
        
        CCallHelpers::JumpList exceptionTarget;
        
        for (unsigned i = state.getByIds.size(); i--;) {
            GetByIdDescriptor& getById = state.getByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
            
            auto iter = recordMap.find(getById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[getById.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
            
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg result = record.locations[0].directGPR();
                GPRReg base = record.locations[1].directGPR();
                
                JITGetByIdGenerator gen(
                    codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(result), NeedToSpill);
                
                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());

                gen.reportSlowPathCall(begin, call);

                getById.m_slowPathDone.append(slowPathJIT.jump());
                getById.m_generators.append(gen);
            }
        }
        
        for (unsigned i = state.putByIds.size(); i--;) {
            PutByIdDescriptor& putById = state.putByIds[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
            
            auto iter = recordMap.find(putById.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[putById.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                
                RegisterSet usedRegisters = usedRegistersFor(record);
                
                GPRReg base = record.locations[0].directGPR();
                GPRReg value = record.locations[1].directGPR();
                
                JITPutByIdGenerator gen(
                    codeBlock, codeOrigin, putById.callSiteIndex(), usedRegisters, JSValueRegs(base),
                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
                    putById.ecmaMode(), putById.putKind());
                
                MacroAssembler::Label begin = slowPathJIT.label();
                
                MacroAssembler::Call call = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
                
                gen.reportSlowPathCall(begin, call);
                
                putById.m_slowPathDone.append(slowPathJIT.jump());
                putById.m_generators.append(gen);
            }
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            CheckInDescriptor& checkIn = state.checkIns[i];
            
            if (verboseCompilationEnabled())
                dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
            
            auto iter = recordMap.find(checkIn.stackmapID());
            if (iter == recordMap.end()) {
                // It was optimized out.
                continue;
            }
            
            CodeOrigin codeOrigin = state.jitCode->common.codeOrigins[checkIn.callSiteIndex().bits()];
            for (unsigned i = 0; i < iter->value.size(); ++i) {
                StackMaps::Record& record = iter->value[i];
                RegisterSet usedRegisters = usedRegistersFor(record);
                GPRReg result = record.locations[0].directGPR();
                GPRReg obj = record.locations[1].directGPR();
                StructureStubInfo* stubInfo = codeBlock->addStubInfo(AccessType::In); 
                stubInfo->codeOrigin = codeOrigin;
                stubInfo->callSiteIndex = checkIn.callSiteIndex();
                stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
                stubInfo->patch.valueGPR = static_cast<int8_t>(result);
                stubInfo->patch.usedRegisters = usedRegisters;
                stubInfo->patch.spillMode = NeedToSpill;

                MacroAssembler::Label begin = slowPathJIT.label();

                MacroAssembler::Call slowCall = callOperation(
                    state, usedRegisters, slowPathJIT, codeOrigin, &exceptionTarget,
                    operationInOptimize, result, stubInfo, obj, checkIn.m_uid);

                checkIn.m_slowPathDone.append(slowPathJIT.jump());
                
                checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
            }
        }
        
        exceptionTarget.link(&slowPathJIT);
        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
        
        state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
        if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
            state.allocationFailed = true;
            return;
        }
        state.finalizer->sideCodeLinkBuffer->link(
            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        
        for (unsigned i = state.getByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
                sizeOfGetById());
        }
        for (unsigned i = state.putByIds.size(); i--;) {
            generateICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
                sizeOfPutById());
        }

        for (unsigned i = state.checkIns.size(); i--;) {
            generateCheckInICFastPath(
                state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                sizeOfIn()); 
        } 
    }
    
    adjustCallICsForStackmaps(state.jsCalls, recordMap);
    
    for (unsigned i = state.jsCalls.size(); i--;) {
        JSCall& call = state.jsCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, state.jitCode->stackmaps.stackSizeForLocals());

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer);
        });
    }
    
    adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
    
    for (unsigned i = state.jsCallVarargses.size(); i--;) {
        JSCallVarargs& call = state.jsCallVarargses[i];
        
        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(fastPathJIT, varargsSpillSlotsOffset);

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = sizeOfICFor(call.node());

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        });
    }

    adjustCallICsForStackmaps(state.jsTailCalls, recordMap);

    for (unsigned i = state.jsTailCalls.size(); i--;) {
        JSTailCall& call = state.jsTailCalls[i];

        CCallHelpers fastPathJIT(&vm, codeBlock);
        call.emit(*state.jitCode.get(), fastPathJIT);

        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
        size_t sizeOfIC = call.estimatedSize();

        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "tail call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
            call.link(vm, linkBuffer);
        });
    }
    
    auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);

            RELEASE_ASSERT(stackOverflowException.isSet());

            MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
        }
    }
    
    iter = recordMap.find(state.handleExceptionStackmapID);
    // It's sort of remotely possible that we won't have an in-band exception handling
    // path, for some kinds of functions.
    if (iter != recordMap.end()) {
        for (unsigned i = iter->value.size(); i--;) {
            StackMaps::Record& record = iter->value[i];
            
            CodeLocationLabel source = CodeLocationLabel(
                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
            
            MacroAssembler::replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
        }
    }
    
    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
        OSRExit& exit = jitCode->osrExit[exitIndex];
        iter = recordMap.find(exit.m_stackmapID);
        
        Vector<const void*> codeAddresses;
        
        if (iter != recordMap.end()) {
            for (unsigned i = iter->value.size(); i--;) {
                StackMaps::Record& record = iter->value[i];
                
                CodeLocationLabel source = CodeLocationLabel(
                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
                
                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
                
                if (info.m_isInvalidationPoint)
                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
                else
                    MacroAssembler::replaceWithJump(source, info.m_thunkAddress);
            }
        }
        
        if (graph.compilation())
            graph.compilation()->addOSRExitSite(codeAddresses);
    }
}
Example #4
0
void JITCompiler::link(LinkBuffer& linkBuffer)
{
    // Link the code, populate data in CodeBlock data structures.
    m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
    m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();

    if (!m_graph.m_inlineCallFrames->isEmpty())
        m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release();
    
    m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart;
    m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments);

    BitVector usedJumpTables;
    for (unsigned i = m_graph.m_switchData.size(); i--;) {
        SwitchData& data = m_graph.m_switchData[i];
        if (!data.didUseJumpTable)
            continue;
        
        if (data.kind == SwitchString)
            continue;
        
        RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar);
        
        usedJumpTables.set(data.switchTableIndex);
        SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
        table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]);
        table.ctiOffsets.grow(table.branchOffsets.size());
        for (unsigned j = table.ctiOffsets.size(); j--;)
            table.ctiOffsets[j] = table.ctiDefault;
        for (unsigned j = data.cases.size(); j--;) {
            SwitchCase& myCase = data.cases[j];
            table.ctiOffsets[myCase.value.switchLookupValue() - table.min] =
                linkBuffer.locationOf(m_blockHeads[myCase.target->index]);
        }
    }
    
    for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) {
        if (usedJumpTables.get(i))
            continue;
        
        m_codeBlock->switchJumpTable(i).clear();
    }

    // NOTE: we cannot clear string switch tables because (1) we're running concurrently
    // and we cannot deref StringImpl's and (2) it would be weird to deref those
    // StringImpl's since we refer to them.
    for (unsigned i = m_graph.m_switchData.size(); i--;) {
        SwitchData& data = m_graph.m_switchData[i];
        if (!data.didUseJumpTable)
            continue;
        
        if (data.kind != SwitchString)
            continue;
        
        StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
        table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]);
        StringJumpTable::StringOffsetTable::iterator iter;
        StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
        for (iter = table.offsetTable.begin(); iter != end; ++iter)
            iter->value.ctiOffset = table.ctiDefault;
        for (unsigned j = data.cases.size(); j--;) {
            SwitchCase& myCase = data.cases[j];
            iter = table.offsetTable.find(myCase.value.stringImpl());
            RELEASE_ASSERT(iter != end);
            iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]);
        }
    }

    // Link all calls out from the JIT code to their respective functions.
    for (unsigned i = 0; i < m_calls.size(); ++i)
        linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);

    for (unsigned i = m_getByIds.size(); i--;)
        m_getByIds[i].finalize(linkBuffer);
    for (unsigned i = m_putByIds.size(); i--;)
        m_putByIds[i].finalize(linkBuffer);

    for (unsigned i = 0; i < m_ins.size(); ++i) {
        StructureStubInfo& info = *m_ins[i].m_stubInfo;
        CodeLocationLabel jump = linkBuffer.locationOf(m_ins[i].m_jump);
        CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
        info.hotPathBegin = jump;
        info.callReturnLocation = callReturnLocation;
        info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
    }
    
    m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
    for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
        CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
        info.callType = m_jsCalls[i].m_callType;
        info.isDFG = true;
        info.codeOrigin = m_jsCalls[i].m_codeOrigin;
        linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
        info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
        info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
        info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
        info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
    }
    
    MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
    CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
    for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
        OSRExit& exit = m_jitCode->osrExit[i];
        OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
        linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
        exit.correctJump(linkBuffer);
        if (info.m_replacementSource.isSet()) {
            m_jitCode->common.jumpReplacements.append(JumpReplacement(
                linkBuffer.locationOf(info.m_replacementSource),
                linkBuffer.locationOf(info.m_replacementDestination)));
        }
    }
    
    if (m_graph.compilation()) {
        ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size());
        for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
            Vector<Label>& labels = m_exitSiteLabels[i];
            Vector<const void*> addresses;
            for (unsigned j = 0; j < labels.size(); ++j)
                addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
            m_graph.compilation()->addOSRExitSite(addresses);
        }
    } else
        ASSERT(!m_exitSiteLabels.size());
    
    m_jitCode->common.compilation = m_graph.compilation();
    
}