Exemple #1
0
void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
{
    out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()));
    if (executable->isStrictMode())
        out.print(" (StrictMode)");
    out.print(", bc#", caller.bytecodeIndex, ", ", kind);
    if (isClosureCall)
        out.print(", closure call");
    else
        out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
    out.print(", numArgs+this = ", arguments.size());
    out.print(", stackOffset = ", stackOffset);
    out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>");
}
void InlineCallFrame::dumpInContext(PrintStream& out, DumpContext* context) const
{
    out.print(briefFunctionInformation(), ":<", RawPointer(baselineCodeBlock.get()));
    if (isStrictMode())
        out.print(" (StrictMode)");
    out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast<Kind>(kind));
    if (isClosureCall)
        out.print(", closure call");
    else
        out.print(", known callee: ", inContext(calleeRecovery.constant(), context));
    out.print(", numArgs+this = ", argumentCountIncludingThis);
    out.print(", numFixup = ", argumentsWithFixup.size() - argumentCountIncludingThis);
    out.print(", stackOffset = ", stackOffset);
    out.print(" (", virtualRegisterForLocal(0), " maps to ", virtualRegisterForLocal(0) + stackOffset, ")>");
}
    void treatRootBlock(BasicBlock* block, InsertionSet& insertionSet)
    {
        Operands<VariableAccessData*> initialAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        Operands<Node*> initialAccessNodes(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        for (unsigned i = 0; i < block->size(); i++) {
            Node* node = block->at(i);
            if (!node->hasVariableAccessData(m_graph))
                continue;

            VirtualRegister operand = node->local();
            if (initialAccessData.operand(operand))
                continue;

            DFG_ASSERT(m_graph, node, node->op() != SetLocal); // We should have inserted a Flush before this!
            initialAccessData.operand(operand) = node->variableAccessData();
            initialAccessNodes.operand(operand) = node;
        }

        // We want every Flush to be able to reach backwards to
        // a SetLocal. Doing this in the root block achieves this goal.
        NodeOrigin origin = block->at(0)->origin;
        Node* undefined = insertionSet.insertConstant(0, origin, jsUndefined());

        for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) {
            VirtualRegister operand = virtualRegisterForLocal(i);
            VariableAccessData* accessData;
            DFG_ASSERT(m_graph, nullptr, initialAccessNodes.operand(operand)->op() == Flush); // We should have inserted a Flush before any SetLocal/SetArgument for the local that we are analyzing now.
            accessData = initialAccessData.operand(operand);
            DFG_ASSERT(m_graph, nullptr, accessData);
            insertionSet.insertNode(0, SpecNone, 
                SetLocal, origin, OpInfo(accessData), Edge(undefined));
        }
    }
FastBitVector BytecodeLivenessAnalysis::getLivenessInfoAtBytecodeOffset(unsigned bytecodeOffset)
{
    FastBitVector temp;
    FastBitVector result;

    getLivenessInfoForNonCapturedVarsAtBytecodeOffset(bytecodeOffset, temp);

    unsigned numCapturedVars = numberOfCapturedVariables(m_codeBlock);
    if (numCapturedVars) {
        int firstCapturedLocal = VirtualRegister(captureStart(m_codeBlock)).toLocal();
        result.resize(temp.numBits() + numCapturedVars);
        for (unsigned i = 0; i < numCapturedVars; ++i)
            result.set(firstCapturedLocal + i);
    } else
        result.resize(temp.numBits());

    int tempLength = temp.numBits();
    ASSERT(tempLength >= 0);
    for (int i = 0; i < tempLength; i++) {
        if (!temp.get(i))
            continue;

        if (!numCapturedVars) {
            result.set(i);
            continue;
        }

        if (virtualRegisterForLocal(i).offset() > captureStart(m_codeBlock))
            result.set(i);
        else 
            result.set(numCapturedVars + i);
    }
    return result;
}
void JITCompiler::compile()
{
    setStartOfCode();
    compileEntry();
    m_speculative = std::make_unique<SpeculativeJIT>(*this);

    // Plant a check that sufficient space is available in the JSStack.
    addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);

    addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
    checkStackPointerAlignment();
    compileSetupRegistersForEntry();
    compileEntryExecutionFlag();
    compileBody();
    setEndOfMainPath();

    // === Footer code generation ===
    //
    // Generate the stack overflow handling; if the stack check in the entry head fails,
    // we need to call out to a helper function to throw the StackOverflowError.
    stackOverflow.link(this);

    emitStoreCodeOrigin(CodeOrigin(0));

    if (maxFrameExtentForSlowPathCall)
        addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);

    m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);

    // Generate slow path code.
    m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
    m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    m_speculative->createOSREntries();
    setEndOfCode();

    auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer->didFailToAllocate()) {
        m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
        return;
    }
    
    link(*linkBuffer);
    m_speculative->linkOSREntries(*linkBuffer);

    m_jitCode->shrinkToFit();
    codeBlock()->shrinkToFit(CodeBlock::LateShrink);

    disassemble(*linkBuffer);
    
    m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
        m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer));
}
void* prepareOSREntry(
    ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
    unsigned bytecodeIndex, unsigned streamIndex)
{
    VM& vm = exec->vm();
    CodeBlock* baseline = dfgCodeBlock->baselineVersion();
    DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
    ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
    
    if (Options::verboseOSR()) {
        dataLog(
            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
            bytecodeIndex, ".\n");
    }
    
    if (bytecodeIndex != entryCode->bytecodeIndex()) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex());
        return 0;
    }
    
    Operands<JSValue> values;
    dfgCode->reconstruct(
        exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
    
    if (Options::verboseOSR())
        dataLog("    Values at entry: ", values, "\n");
    
    for (int argument = values.numberOfArguments(); argument--;) {
        RELEASE_ASSERT(
            exec->r(virtualRegisterForArgument(argument).offset()).jsValue() == values.argument(argument));
    }
    
    RELEASE_ASSERT(
        static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
    
    EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
        entryCode->entryBuffer()->dataBuffer());
    
    for (int local = values.numberOfLocals(); local--;)
        scratch[local] = JSValue::encode(values.local(local));
    
    int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(stackFrameSize).offset()])) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    exec->setCodeBlock(entryCodeBlock);
    
    void* result = entryCode->addressForCall().executableAddress();
    if (Options::verboseOSR())
        dataLog("    Entry will succeed, going to address", RawPointer(result), "\n");
    
    return result;
}
 void jettisonBlock(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin boundaryCodeOrigin)
 {
     for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i)
         keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i));
     for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i)
         keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i));
     
     fixJettisonedPredecessors(block, jettisonedBlock);
 }
 VirtualRegister assign(const Vector<unsigned>& allocation, VirtualRegister src)
 {
     VirtualRegister result = src;
     if (result.isLocal()) {
         unsigned myAllocation = allocation[result.toLocal()];
         if (myAllocation == UINT_MAX)
             result = VirtualRegister();
         else
             result = virtualRegisterForLocal(myAllocation);
     }
     return result;
 }
Exemple #9
0
void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
{
    out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset);
    out.print(", stack rules = [");
    
    auto printOperand = [&] (VirtualRegister reg) {
        out.print(inContext(m_expectedValues.operand(reg), context), " (");
        VirtualRegister toReg;
        bool overwritten = false;
        for (OSREntryReshuffling reshuffling : m_reshufflings) {
            if (reg == VirtualRegister(reshuffling.fromOffset)) {
                toReg = VirtualRegister(reshuffling.toOffset);
                break;
            }
            if (reg == VirtualRegister(reshuffling.toOffset))
                overwritten = true;
        }
        if (!overwritten && !toReg.isValid())
            toReg = reg;
        if (toReg.isValid()) {
            if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
                out.print("ignored");
            else
                out.print("maps to ", toReg);
        } else
            out.print("overwritten");
        if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
            out.print(", forced double");
        if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
            out.print(", forced machine int");
        out.print(")");
    };
    
    CommaPrinter comma;
    for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
        out.print(comma, "arg", argumentIndex, ":");
        printOperand(virtualRegisterForArgument(argumentIndex));
    }
    for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
        out.print(comma, "loc", localIndex, ":");
        printOperand(virtualRegisterForLocal(localIndex));
    }
    
    out.print("], machine stack used = ", m_machineStackUsed);
}
    void treatRegularBlock(BasicBlock* block, InsertionSet& insertionSet)
    {
        Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        // Insert a Flush before every SetLocal to properly pattern the graph such that 
        // any range between SetLocal and Flush has access to the local on the stack.
        {
            for (unsigned i = 0; i < block->size(); i++) {
                Node* node = block->at(i);
                bool isPrimordialSetArgument = node->op() == SetArgument && node->local().isArgument() && node == m_graph.m_arguments[node->local().toArgument()];
                if (node->op() == SetLocal || (node->op() == SetArgument && !isPrimordialSetArgument)) {
                    VirtualRegister operand = node->local();
                    VariableAccessData* flushAccessData = currentBlockAccessData.operand(operand);
                    if (!flushAccessData)
                        flushAccessData = newVariableAccessData(operand);

                    insertionSet.insertNode(i, SpecNone, 
                        Flush, node->origin, OpInfo(flushAccessData));
                }

                if (node->hasVariableAccessData(m_graph))
                    currentBlockAccessData.operand(node->local()) = node->variableAccessData();
            }
        }

        // Flush everything at the end of the block.
        {
            NodeOrigin origin = block->at(block->size() - 1)->origin;
            auto insertFlushAtEnd = [&] (VirtualRegister operand) {
                VariableAccessData* accessData = currentBlockAccessData.operand(operand);
                if (!accessData)
                    accessData = newVariableAccessData(operand);

                currentBlockAccessData.operand(operand) = accessData;

                insertionSet.insertNode(block->size(), SpecNone, 
                    Flush, origin, OpInfo(accessData));
            };

            for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++)
                insertFlushAtEnd(virtualRegisterForLocal(i));
            for (unsigned i = 0; i < block->variablesAtTail.numberOfArguments(); i++)
                insertFlushAtEnd(virtualRegisterForArgument(i));
        }
    }
Exemple #11
0
void JITCompiler::compileFunction()
{
    SamplingRegion samplingRegion("DFG Backend");
    
    setStartOfCode();
    compileEntry();

    // === Function header code generation ===
    // This is the main entry point, without performing an arity check.
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);
    // Plant a check that sufficient space is available in the JSStack.
    addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1);
    // Return here after stack check.
    Label fromStackCheck = label();


    // === Function body code generation ===
    m_speculative = adoptPtr(new SpeculativeJIT(*this));
    compileBody();
    setEndOfMainPath();

    // === Function footer code generation ===
    //
    // Generate code to perform the slow stack check (if the fast one in
    // the function header fails), and generate the entry point with arity check.
    //
    // Generate the stack check; if the fast check in the function head fails,
    // we need to call out to a helper function to check whether more space is available.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    stackCheck.link(this);

    emitStoreCodeOrigin(CodeOrigin(0));
    m_speculative->callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
    jump(fromStackCheck);
    
    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    m_arityCheck = label();
    compileEntry();

    load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
    branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
    emitStoreCodeOrigin(CodeOrigin(0));
    m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
    branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
    emitStoreCodeOrigin(CodeOrigin(0));
    m_callArityFixup = call();
    jump(fromArityCheck);
    
    // Generate slow path code.
    m_speculative->runSlowPathGenerators();
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    m_speculative->createOSREntries();
    setEndOfCode();
}
 bool run()
 {
     ASSERT(m_graph.m_form == SSA);
     
     for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
         BasicBlock* block = m_graph.block(blockIndex);
         if (!block)
             continue;
         block->ssa->availabilityAtHead.fill(Availability());
         block->ssa->availabilityAtTail.fill(Availability());
     }
     
     BasicBlock* root = m_graph.block(0);
     for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) {
         root->ssa->availabilityAtHead.argument(argument) =
             Availability::unavailable().withFlush(
                 FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument)));
     }
     for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;)
         root->ssa->availabilityAtHead.local(local) = Availability::unavailable();
     
     if (m_graph.m_plan.mode == FTLForOSREntryMode) {
         for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) {
             root->ssa->availabilityAtHead.local(local) =
                 Availability::unavailable().withFlush(
                     FlushedAt(FlushedJSValue, virtualRegisterForLocal(local)));
         }
     }
     
     // This could be made more efficient by processing blocks in reverse postorder.
     Operands<Availability> availability;
     bool changed;
     do {
         changed = false;
         
         for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
             BasicBlock* block = m_graph.block(blockIndex);
             if (!block)
                 continue;
             
             availability = block->ssa->availabilityAtHead;
             
             for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
                 Node* node = block->at(nodeIndex);
                 
                 switch (node->op()) {
                 case SetLocal: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) =
                         Availability(node->child1().node(), variable->flushedAt());
                     break;
                 }
                     
                 case GetArgument: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) =
                         Availability(node, variable->flushedAt());
                     break;
                 }
                     
                 case MovHint:
                 case MovHintAndCheck: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) =
                         Availability(node->child1().node());
                     break;
                 }
                     
                 case ZombieHint: {
                     VariableAccessData* variable = node->variableAccessData();
                     availability.operand(variable->local()) = Availability::unavailable();
                     break;
                 }
                     
                 default:
                     break;
                 }
             }
             
             if (availability == block->ssa->availabilityAtTail)
                 continue;
             
             block->ssa->availabilityAtTail = availability;
             changed = true;
             
             for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) {
                 BasicBlock* successor = block->successor(successorIndex);
                 for (unsigned i = availability.size(); i--;) {
                     successor->ssa->availabilityAtHead[i] = availability[i].merge(
                         successor->ssa->availabilityAtHead[i]);
                 }
             }
         }
     } while (changed);
     
     return true;
 }
void BytecodeGeneratorification::run()
{
    // We calculate the liveness at each merge point. This gives us the information which registers should be saved and resumed conservatively.

    {
        GeneratorLivenessAnalysis pass(*this);
        pass.run(m_codeBlock, m_instructions);
    }

    BytecodeRewriter rewriter(m_bytecodeGenerator, m_graph, m_codeBlock, m_instructions);

    // Setup the global switch for the generator.
    {
        auto nextToEnterPoint = enterPoint().next();
        unsigned switchTableIndex = m_codeBlock->numberOfSwitchJumpTables();
        VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State));
        auto& jumpTable = m_codeBlock->addSwitchJumpTable();
        jumpTable.min = 0;
        jumpTable.branchOffsets.resize(m_yields.size() + 1);
        jumpTable.branchOffsets.fill(0);
        jumpTable.add(0, nextToEnterPoint.offset());
        for (unsigned i = 0; i < m_yields.size(); ++i)
            jumpTable.add(i + 1, m_yields[i].point);

        rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) {
            fragment.appendInstruction<OpSwitchImm>(switchTableIndex, BoundLabel(nextToEnterPoint.offset()), state);
        });
    }

    for (const YieldData& data : m_yields) {
        VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame));

        auto instruction = m_instructions.at(data.point);
        // Emit save sequence.
        rewriter.insertFragmentBefore(instruction, [&](BytecodeRewriter::Fragment& fragment) {
            data.liveness.forEachSetBit([&](size_t index) {
                VirtualRegister operand = virtualRegisterForLocal(index);
                Storage storage = storageForGeneratorLocal(index);

                fragment.appendInstruction<OpPutToScope>(
                    scope, // scope
                    storage.identifierIndex, // identifier
                    operand, // value
                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info
                    m_generatorFrameSymbolTableIndex, // symbol table constant index
                    storage.scopeOffset.offset() // scope offset
                );
            });

            // Insert op_ret just after save sequence.
            fragment.appendInstruction<OpRet>(data.argument);
        });

        // Emit resume sequence.
        rewriter.insertFragmentAfter(instruction, [&](BytecodeRewriter::Fragment& fragment) {
            data.liveness.forEachSetBit([&](size_t index) {
                VirtualRegister operand = virtualRegisterForLocal(index);
                Storage storage = storageForGeneratorLocal(index);

                fragment.appendInstruction<OpGetFromScope>(
                    operand, // dst
                    scope, // scope
                    storage.identifierIndex, // identifier
                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info
                    0, // local scope depth
                    storage.scopeOffset.offset() // scope offset
                );
            });
        });

        // Clip the unnecessary bytecodes.
        rewriter.removeBytecode(instruction);
    }

    rewriter.execute();
}
    void handleBlockForTryCatch(BasicBlock* block, InsertionSet& insertionSet)
    {
        HandlerInfo* currentExceptionHandler = nullptr;
        FastBitVector liveAtCatchHead;
        liveAtCatchHead.resize(m_graph.block(0)->variablesAtTail.numberOfLocals());

        HandlerInfo* cachedHandlerResult;
        CodeOrigin cachedCodeOrigin;
        auto catchHandler = [&] (CodeOrigin origin) -> HandlerInfo* {
            ASSERT(origin);
            if (origin == cachedCodeOrigin)
                return cachedHandlerResult;

            unsigned bytecodeIndexToCheck = origin.bytecodeIndex;

            cachedCodeOrigin = origin;

            while (1) {
                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
                CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
                if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) {
                    liveAtCatchHead.clearAll();

                    unsigned catchBytecodeIndex = handler->target;
                    m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) {
                        liveAtCatchHead[operand.toLocal()] = true;
                    });

                    cachedHandlerResult = handler;
                    break;
                }

                if (!inlineCallFrame) {
                    cachedHandlerResult = nullptr;
                    break;
                }

                bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex;
                origin = inlineCallFrame->directCaller;
            }

            return cachedHandlerResult;
        };

        Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        HashSet<InlineCallFrame*> seenInlineCallFrames;

        auto flushEverything = [&] (NodeOrigin origin, unsigned index) {
            RELEASE_ASSERT(currentExceptionHandler);
            auto flush = [&] (VirtualRegister operand, bool alwaysInsert) {
                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) 
                    || operand.isArgument()
                    || alwaysInsert) {

                    ASSERT(isValidFlushLocation(block, index, operand));

                    VariableAccessData* accessData = currentBlockAccessData.operand(operand);
                    if (!accessData)
                        accessData = newVariableAccessData(operand);

                    currentBlockAccessData.operand(operand) = accessData;

                    insertionSet.insertNode(index, SpecNone, 
                        Flush, origin, OpInfo(accessData));
                }
            };

            for (unsigned local = 0; local < block->variablesAtTail.numberOfLocals(); local++)
                flush(virtualRegisterForLocal(local), false);
            for (InlineCallFrame* inlineCallFrame : seenInlineCallFrames)
                flush(VirtualRegister(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()), true);
            flush(VirtualRegister(CallFrame::thisArgumentOffset()), true);

            seenInlineCallFrames.clear();
        };

        for (unsigned nodeIndex = 0; nodeIndex < block->size(); nodeIndex++) {
            Node* node = block->at(nodeIndex);

            {
                HandlerInfo* newHandler = catchHandler(node->origin.semantic);
                if (newHandler != currentExceptionHandler && currentExceptionHandler)
                    flushEverything(node->origin, nodeIndex);
                currentExceptionHandler = newHandler;
            }

            if (currentExceptionHandler && (node->op() == SetLocal || node->op() == SetArgument)) {
                InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
                if (inlineCallFrame)
                    seenInlineCallFrames.add(inlineCallFrame);
                VirtualRegister operand = node->local();

                int stackOffset = inlineCallFrame ? inlineCallFrame->stackOffset : 0;
                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()])
                    || operand.isArgument()
                    || (operand.offset() == stackOffset + CallFrame::thisArgumentOffset())) {

                    ASSERT(isValidFlushLocation(block, nodeIndex, operand));

                    VariableAccessData* variableAccessData = currentBlockAccessData.operand(operand);
                    if (!variableAccessData)
                        variableAccessData = newVariableAccessData(operand);

                    insertionSet.insertNode(nodeIndex, SpecNone, 
                        Flush, node->origin, OpInfo(variableAccessData));
                }
            }

            if (node->accessesStack(m_graph))
                currentBlockAccessData.operand(node->local()) = node->variableAccessData();
        }

        if (currentExceptionHandler) {
            NodeOrigin origin = block->at(block->size() - 1)->origin;
            flushEverything(origin, block->size());
        }
    }
    void mergeBlocks(
        BasicBlock* firstBlock, BasicBlock* secondBlock,
        Vector<BasicBlock*, 1> jettisonedBlocks)
    {
        // This will add all of the nodes in secondBlock to firstBlock, but in so doing
        // it will also ensure that any GetLocals from the second block that refer to
        // SetLocals in the first block are relinked. If jettisonedBlock is not NoBlock,
        // then Phantoms are inserted for anything that the jettisonedBlock would have
        // kept alive.
        
        // Remove the terminal of firstBlock since we don't need it anymore. Well, we don't
        // really remove it; we actually turn it into a Phantom.
        ASSERT(firstBlock->last()->isTerminal());
        CodeOrigin boundaryCodeOrigin = firstBlock->last()->codeOrigin;
        firstBlock->last()->convertToPhantom();
        ASSERT(firstBlock->last()->refCount() == 1);
        
        for (unsigned i = jettisonedBlocks.size(); i--;) {
            BasicBlock* jettisonedBlock = jettisonedBlocks[i];
            
            // Time to insert ghosties for things that need to be kept alive in case we OSR
            // exit prior to hitting the firstBlock's terminal, and end up going down a
            // different path than secondBlock.
            
            for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i)
                keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i));
            for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i)
                keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i));
        }
        
        for (size_t i = 0; i < secondBlock->phis.size(); ++i)
            firstBlock->phis.append(secondBlock->phis[i]);

        for (size_t i = 0; i < secondBlock->size(); ++i)
            firstBlock->append(secondBlock->at(i));
        
        ASSERT(firstBlock->last()->isTerminal());
        
        // Fix the predecessors of my new successors. This is tricky, since we are going to reset
        // all predecessors anyway due to reachability analysis. But we need to fix the
        // predecessors eagerly to ensure that we know what they are in case the next block we
        // consider in this phase wishes to query the predecessors of one of the blocks we
        // affected.
        for (unsigned i = firstBlock->numSuccessors(); i--;) {
            BasicBlock* successor = firstBlock->successor(i);
            for (unsigned j = 0; j < successor->predecessors.size(); ++j) {
                if (successor->predecessors[j] == secondBlock)
                    successor->predecessors[j] = firstBlock;
            }
        }
        
        // Fix the predecessors of my former successors. Again, we'd rather not do this, but it's
        // an unfortunate necessity. See above comment.
        for (unsigned i = jettisonedBlocks.size(); i--;)
            fixJettisonedPredecessors(firstBlock, jettisonedBlocks[i]);
        
        firstBlock->valuesAtTail = secondBlock->valuesAtTail;
        firstBlock->cfaBranchDirection = secondBlock->cfaBranchDirection;
        
        m_graph.killBlock(secondBlock);
    }
void JITCompiler::compileFunction()
{
    setStartOfCode();
    compileEntry();

    // === Function header code generation ===
    // This is the main entry point, without performing an arity check.
    // If we needed to perform an arity check we will already have moved the return address,
    // so enter after this.
    Label fromArityCheck(this);
    // Plant a check that sufficient space is available in the JSStack.
    addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
    Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1);

    // Move the stack pointer down to accommodate locals
    addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
    checkStackPointerAlignment();

    compileSetupRegistersForEntry();
    compileEntryExecutionFlag();

    // === Function body code generation ===
    m_speculative = std::make_unique<SpeculativeJIT>(*this);
    compileBody();
    setEndOfMainPath();

    // === Function footer code generation ===
    //
    // Generate code to perform the stack overflow handling (if the stack check in
    // the function header fails), and generate the entry point with arity check.
    //
    // Generate the stack overflow handling; if the stack check in the function head fails,
    // we need to call out to a helper function to throw the StackOverflowError.
    stackOverflow.link(this);

    emitStoreCodeOrigin(CodeOrigin(0));

    if (maxFrameExtentForSlowPathCall)
        addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);

    m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
    
    // The fast entry point into a function does not check the correct number of arguments
    // have been passed to the call (we only use the fast entry point where we can statically
    // determine the correct number of arguments have been passed, or have already checked).
    // In cases where an arity check is necessary, we enter here.
    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    m_arityCheck = label();
    compileEntry();

    load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
    branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
    emitStoreCodeOrigin(CodeOrigin(0));
    if (maxFrameExtentForSlowPathCall)
        addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
    m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
    if (maxFrameExtentForSlowPathCall)
        addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
    branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
    emitStoreCodeOrigin(CodeOrigin(0));
    move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
    m_callArityFixup = call();
    jump(fromArityCheck);
    
    // Generate slow path code.
    m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
    m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
    
    compileExceptionHandlers();
    linkOSRExits();
    
    // Create OSR entry trampolines if necessary.
    m_speculative->createOSREntries();
    setEndOfCode();

    // === Link ===
    auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
    if (linkBuffer->didFailToAllocate()) {
        m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
        return;
    }
    link(*linkBuffer);
    m_speculative->linkOSREntries(*linkBuffer);
    
    m_jitCode->shrinkToFit();
    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
    
    linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
    
    disassemble(*linkBuffer);

    MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);

    m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
        m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck);
}
Exemple #17
0
SUPPRESS_ASAN
void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
{
    ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
    ASSERT(codeBlock->alternative());
    ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
    ASSERT(!codeBlock->jitCodeMap());

    if (!Options::useOSREntryToDFG())
        return 0;

    if (Options::verboseOSR()) {
        dataLog(
            "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
            " from bc#", bytecodeIndex, "\n");
    }
    
    VM* vm = &exec->vm();

    sanitizeStackForVM(vm);
    
    if (bytecodeIndex)
        codeBlock->ownerScriptExecutable()->setDidTryToEnterInLoop(true);
    
    if (codeBlock->jitType() != JITCode::DFGJIT) {
        RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
        
        // When will this happen? We could have:
        //
        // - An exit from the FTL JIT into the baseline JIT followed by an attempt
        //   to reenter. We're fine with allowing this to fail. If it happens
        //   enough we'll just reoptimize. It basically means that the OSR exit cost
        //   us dearly and so reoptimizing is the right thing to do.
        //
        // - We have recursive code with hot loops. Consider that foo has a hot loop
        //   that calls itself. We have two foo's on the stack, lets call them foo1
        //   and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
        //   optimized all the way into the FTL. Then it returns into foo1, and then
        //   foo1 wants to get optimized. It might reach this conclusion from its
        //   hot loop and attempt to OSR enter. And we'll tell it that it can't. It
        //   might be worth addressing this case, but I just think this case will
        //   be super rare. For now, if it does happen, it'll cause some compilation
        //   thrashing.
        
        if (Options::verboseOSR())
            dataLog("    OSR failed because the target code block is not DFG.\n");
        return 0;
    }
    
    JITCode* jitCode = codeBlock->jitCode()->dfg();
    OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
    
    if (!entry) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because the entrypoint was optimized out.\n");
        return 0;
    }
    
    ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
    
    // The code below checks if it is safe to perform OSR entry. It may find
    // that it is unsafe to do so, for any number of reasons, which are documented
    // below. If the code decides not to OSR then it returns 0, and it's the caller's
    // responsibility to patch up the state in such a way as to ensure that it's
    // both safe and efficient to continue executing baseline code for now. This
    // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
    // or codeBlock->dontOptimizeAnytimeSoon().
    
    // 1) Verify predictions. If the predictions are inconsistent with the actual
    //    values, then OSR entry is not possible at this time. It's tempting to
    //    assume that we could somehow avoid this case. We can certainly avoid it
    //    for first-time loop OSR - that is, OSR into a CodeBlock that we have just
    //    compiled. Then we are almost guaranteed that all of the predictions will
    //    check out. It would be pretty easy to make that a hard guarantee. But
    //    then there would still be the case where two call frames with the same
    //    baseline CodeBlock are on the stack at the same time. The top one
    //    triggers compilation and OSR. In that case, we may no longer have
    //    accurate value profiles for the one deeper in the stack. Hence, when we
    //    pop into the CodeBlock that is deeper on the stack, we might OSR and
    //    realize that the predictions are wrong. Probably, in most cases, this is
    //    just an anomaly in the sense that the older CodeBlock simply went off
    //    into a less-likely path. So, the wisest course of action is to simply not
    //    OSR at this time.
    
    for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
        if (argument >= exec->argumentCountIncludingThis()) {
            if (Options::verboseOSR()) {
                dataLogF("    OSR failed because argument %zu was not passed, expected ", argument);
                entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
                dataLogF(".\n");
            }
            return 0;
        }
        
        JSValue value;
        if (!argument)
            value = exec->thisValue();
        else
            value = exec->argument(argument - 1);
        
        if (!entry->m_expectedValues.argument(argument).validate(value)) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because argument ", argument, " is ", value,
                    ", expected ", entry->m_expectedValues.argument(argument), ".\n");
            }
            return 0;
        }
    }
    
    for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
        int localOffset = virtualRegisterForLocal(local).offset();
        if (entry->m_localsForcedDouble.get(local)) {
            if (!exec->registers()[localOffset].asanUnsafeJSValue().isNumber()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].asanUnsafeJSValue(), ", expected number.\n");
                }
                return 0;
            }
            continue;
        }
        if (entry->m_localsForcedAnyInt.get(local)) {
            if (!exec->registers()[localOffset].asanUnsafeJSValue().isAnyInt()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
                        "machine int.\n");
                }
                return 0;
            }
            continue;
        }
        if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].asanUnsafeJSValue())) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because variable ", localOffset, " is ",
                    exec->registers()[localOffset].asanUnsafeJSValue(), ", expected ",
                    entry->m_expectedValues.local(local), ".\n");
            }
            return 0;
        }
    }

    // 2) Check the stack height. The DFG JIT may require a taller stack than the
    //    baseline JIT, in some cases. If we can't grow the stack, then don't do
    //    OSR right now. That's the only option we have unless we want basic block
    //    boundaries to start throwing RangeErrors. Although that would be possible,
    //    it seems silly: you'd be diverting the program to error handling when it
    //    would have otherwise just kept running albeit less quickly.
    
    unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    if (Options::verboseOSR())
        dataLogF("    OSR should succeed.\n");

    // At this point we're committed to entering. We will do some work to set things up,
    // but we also rely on our caller recognizing that when we return a non-null pointer,
    // that means that we're already past the point of no return and we must succeed at
    // entering.
    
    // 3) Set up the data in the scratch buffer and perform data format conversions.

    unsigned frameSize = jitCode->common.frameRegisterCount;
    unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
    unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);

    Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer());
    
    *bitwise_cast<size_t*>(scratch + 0) = frameSize;
    
    void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
    if (Options::verboseOSR())
        dataLogF("    OSR using target PC %p.\n", targetPC);
    RELEASE_ASSERT(targetPC);
    *bitwise_cast<void**>(scratch + 1) = targetPC;
    
    Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize;
    
    for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) {
        VirtualRegister reg(-1 - index);
        
        if (reg.isLocal()) {
            if (entry->m_localsForcedDouble.get(reg.toLocal())) {
                *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
                continue;
            }
            
            if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
                *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
                continue;
            }
        }
        
        pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue();
    }
    
    // 4) Reshuffle those registers that need reshuffling.
    Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
    
    // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
    //    some programs by eliminating some stale pointer pathologies.
    for (unsigned i = frameSize; i--;) {
        if (entry->m_machineStackUsed.get(i))
            continue;
        pivot[i] = JSValue();
    }

    // 6) Copy our callee saves to buffer.
#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
    RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
    RegisterAtOffsetList* allCalleeSaves = vm->getAllCalleeSaveRegisterOffsets();
    RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());

    unsigned registerCount = registerSaveLocations->size();
    VMEntryRecord* record = vmEntryRecord(vm->topVMEntryFrame);
    for (unsigned i = 0; i < registerCount; i++) {
        RegisterAtOffset currentEntry = registerSaveLocations->at(i);
        if (dontSaveRegisters.get(currentEntry.reg()))
            continue;
        RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
        
        *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
    }
#endif
    
    // 7) Fix the call frame to have the right code block.
    
    *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock;
    
    if (Options::verboseOSR())
        dataLogF("    OSR returning data buffer %p.\n", scratch);
    return scratch;
}
Exemple #18
0
SUPPRESS_ASAN
void* prepareOSREntry(
    ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock,
    unsigned bytecodeIndex, unsigned streamIndex)
{
    VM& vm = exec->vm();
    CodeBlock* baseline = dfgCodeBlock->baselineVersion();
    ExecutableBase* executable = dfgCodeBlock->ownerExecutable();
    DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg();
    ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry();
    
    if (Options::verboseOSR()) {
        dataLog(
            "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#",
            bytecodeIndex, ".\n");
    }
    
    if (bytecodeIndex)
        jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true);

    if (bytecodeIndex != entryCode->bytecodeIndex()) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n");
        return 0;
    }
    
    Operands<JSValue> values;
    dfgCode->reconstruct(
        exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values);
    
    if (Options::verboseOSR())
        dataLog("    Values at entry: ", values, "\n");
    
    for (int argument = values.numberOfArguments(); argument--;) {
        JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue();
        JSValue reconstructedValue = values.argument(argument);
        if (valueOnStack == reconstructedValue || !argument)
            continue;
        dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n");
        dataLog("    Value on stack: ", valueOnStack, "\n");
        dataLog("    Reconstructed value: ", reconstructedValue, "\n");
        RELEASE_ASSERT_NOT_REACHED();
    }
    
    RELEASE_ASSERT(
        static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters);
    
    EncodedJSValue* scratch = static_cast<EncodedJSValue*>(
        entryCode->entryBuffer()->dataBuffer());
    
    for (int local = values.numberOfLocals(); local--;)
        scratch[local] = JSValue::encode(values.local(local));
    
    int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm.interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()])) {
        if (Options::verboseOSR())
            dataLog("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    exec->setCodeBlock(entryCodeBlock);
    
    void* result = entryCode->addressForCall(
        vm, executable, ArityCheckNotRequired,
        RegisterPreservationNotRequired).executableAddress();
    if (Options::verboseOSR())
        dataLog("    Entry will succeed, going to address", RawPointer(result), "\n");
    
    return result;
}
    bool run()
    {
        SharedSymbolTable* symbolTable = codeBlock()->symbolTable();

        // This enumerates the locals that we actually care about and packs them. So for example
        // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
        // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
        // Flush, PhantomLocal).
        
        BitVector usedLocals;
        
        // Collect those variables that are used from IR.
        bool hasGetLocalUnlinked = false;
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                Node* node = block->at(nodeIndex);
                switch (node->op()) {
                case GetLocal:
                case SetLocal:
                case Flush:
                case PhantomLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->local().isArgument())
                        break;
                    usedLocals.set(variable->local().toLocal());
                    break;
                }
                    
                case GetLocalUnlinked: {
                    VirtualRegister operand = node->unlinkedLocal();
                    if (operand.isArgument())
                        break;
                    usedLocals.set(operand.toLocal());
                    hasGetLocalUnlinked = true;
                    break;
                }
                    
                default:
                    break;
                }
            }
        }
        
        // Ensure that captured variables and captured inline arguments are pinned down.
        // They should have been because of flushes, except that the flushes can be optimized
        // away.
        if (symbolTable) {
            for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--)
                usedLocals.set(VirtualRegister(i).toLocal());
        }
        if (codeBlock()->usesArguments()) {
            usedLocals.set(codeBlock()->argumentsRegister().toLocal());
            usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal());
        }
        if (codeBlock()->uncheckedActivationRegister().isValid())
            usedLocals.set(codeBlock()->activationRegister().toLocal());
        for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            if (!inlineCallFrame->executable->usesArguments())
                continue;
            
            VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame);
            usedLocals.set(argumentsRegister.toLocal());
            usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal());
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                usedLocals.set(VirtualRegister(
                    virtualRegisterForArgument(argument).offset() +
                    inlineCallFrame->stackOffset).toLocal());
            }
        }
        
        Vector<unsigned> allocation(usedLocals.size());
        m_graph.m_nextMachineLocal = 0;
        for (unsigned i = 0; i < usedLocals.size(); ++i) {
            if (!usedLocals.get(i)) {
                allocation[i] = UINT_MAX;
                continue;
            }
            
            allocation[i] = m_graph.m_nextMachineLocal++;
        }
        
        for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
            VariableAccessData* variable = &m_graph.m_variableAccessData[i];
            if (!variable->isRoot())
                continue;
            
            if (variable->local().isArgument()) {
                variable->machineLocal() = variable->local();
                continue;
            }
            
            size_t local = variable->local().toLocal();
            if (local >= allocation.size())
                continue;
            
            if (allocation[local] == UINT_MAX)
                continue;
            
            variable->machineLocal() = virtualRegisterForLocal(
                allocation[variable->local().toLocal()]);
        }
        
        if (codeBlock()->usesArguments()) {
            VirtualRegister argumentsRegister = virtualRegisterForLocal(
                allocation[codeBlock()->argumentsRegister().toLocal()]);
            RELEASE_ASSERT(
                virtualRegisterForLocal(allocation[
                    unmodifiedArgumentsRegister(
                        codeBlock()->argumentsRegister()).toLocal()])
                == unmodifiedArgumentsRegister(argumentsRegister));
            codeBlock()->setArgumentsRegister(argumentsRegister);
        }
        
        if (codeBlock()->uncheckedActivationRegister().isValid()) {
            codeBlock()->setActivationRegister(
                virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()]));
        }
        
        for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
            InlineVariableData data = m_graph.m_inlineVariableData[i];
            InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
            
            if (inlineCallFrame->executable->usesArguments()) {
                inlineCallFrame->argumentsRegister = virtualRegisterForLocal(
                    allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]);

                RELEASE_ASSERT(
                    virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister(
                        m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()])
                    == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister));
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                ArgumentPosition& position = m_graph.m_argumentPositions[
                    data.argumentPositionStart + argument];
                VariableAccessData* variable = position.someVariable();
                ValueSource source;
                if (!variable)
                    source = ValueSource(SourceIsDead);
                else {
                    source = ValueSource::forFlushFormat(
                        variable->machineLocal(), variable->flushFormat());
                }
                inlineCallFrame->arguments[argument] = source.valueRecovery();
            }
            
            RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
            if (inlineCallFrame->isClosureCall) {
                ValueSource source = ValueSource::forFlushFormat(
                    data.calleeVariable->machineLocal(),
                    data.calleeVariable->flushFormat());
                inlineCallFrame->calleeRecovery = source.valueRecovery();
            } else
                RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
        }
        
        if (symbolTable) {
            if (symbolTable->captureCount()) {
                unsigned captureStartLocal = allocation[
                    VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()];
                ASSERT(captureStartLocal != UINT_MAX);
                m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset();
            } else
                m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset();
        
            // This is an abomination. If we had captured an argument then the argument ends
            // up being "slow", meaning that loads of the argument go through an extra lookup
            // table.
            if (const SlowArgument* slowArguments = symbolTable->slowArguments()) {
                auto newSlowArguments = std::make_unique<SlowArgument[]>(
                    symbolTable->parameterCount());
                for (size_t i = symbolTable->parameterCount(); i--;) {
                    newSlowArguments[i] = slowArguments[i];
                    VirtualRegister reg = VirtualRegister(slowArguments[i].index);
                    if (reg.isLocal())
                        newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset();
                }
            
                m_graph.m_slowArguments = std::move(newSlowArguments);
            }
        }
        
        // Fix GetLocalUnlinked's variable references.
        if (hasGetLocalUnlinked) {
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                    Node* node = block->at(nodeIndex);
                    switch (node->op()) {
                    case GetLocalUnlinked: {
                        VirtualRegister operand = node->unlinkedLocal();
                        if (operand.isLocal())
                            operand = virtualRegisterForLocal(allocation[operand.toLocal()]);
                        node->setUnlinkedMachineLocal(operand);
                        break;
                    }
                        
                    default:
                        break;
                    }
                }
            }
        }
        
        return true;
    }