Node* emitCodeToGetArgumentsArrayLength(
    InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
{
    Graph& graph = insertionSet.graph();

    DFG_ASSERT(
        graph, arguments,
        arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
        || arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments
        || arguments->op() == PhantomClonedArguments);

    InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;

    if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
        return insertionSet.insertConstant(
            nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1));
    }

    Node* argumentCount;
    if (!inlineCallFrame)
        argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin);
    else {
        VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount);

        argumentCount = insertionSet.insertNode(
            nodeIndex, SpecInt32, GetStack, origin,
            OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
    }

    return insertionSet.insertNode(
        nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked),
        Edge(argumentCount, Int32Use),
        insertionSet.insertConstantForUse(
            nodeIndex, origin, jsNumber(1), Int32Use));
}
Ejemplo n.º 2
0
void StackVisitor::readInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOrigin)
{
    ASSERT(codeOrigin);

    int frameOffset = inlinedFrameOffset(codeOrigin);
    bool isInlined = !!frameOffset;
    if (isInlined) {
        InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;

        m_frame.m_callFrame = callFrame;
        m_frame.m_inlineCallFrame = inlineCallFrame;
        if (inlineCallFrame->argumentCountRegister.isValid())
            m_frame.m_argumentCountIncludingThis = callFrame->r(inlineCallFrame->argumentCountRegister.offset()).unboxedInt32();
        else
            m_frame.m_argumentCountIncludingThis = inlineCallFrame->arguments.size();
        m_frame.m_codeBlock = inlineCallFrame->baselineCodeBlock();
        m_frame.m_bytecodeOffset = codeOrigin->bytecodeIndex;

        JSFunction* callee = inlineCallFrame->calleeForCallFrame(callFrame);
        m_frame.m_callee = callee;
        ASSERT(m_frame.callee());

        // The callerFrame just needs to be non-null to indicate that we
        // haven't reached the last frame yet. Setting it to the root
        // frame (i.e. the callFrame that this inlined frame is called from)
        // would work just fine.
        m_frame.m_callerFrame = callFrame;
        return;
    }

    readNonInlinedFrame(callFrame, codeOrigin);
}
Ejemplo n.º 3
0
Node* emitCodeToGetArgumentsArrayLength(
    InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin)
{
    Graph& graph = insertionSet.graph();

    DFG_ASSERT(
        graph, arguments,
        arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments
        || arguments->op() == CreateClonedArguments || arguments->op() == CreateRest
        || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments || arguments->op() == PhantomCreateRest);
    
    InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame;

    unsigned numberOfArgumentsToSkip = 0;
    if (arguments->op() == CreateRest || arguments->op() == PhantomCreateRest)
        numberOfArgumentsToSkip = arguments->numberOfArgumentsToSkip();
    
    if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
        unsigned argumentsSize = inlineCallFrame->arguments.size() - 1;
        if (argumentsSize >= numberOfArgumentsToSkip)
            argumentsSize -= numberOfArgumentsToSkip;
        else
            argumentsSize = 0;
        return insertionSet.insertConstant(
            nodeIndex, origin, jsNumber(argumentsSize));
    }
    
    Node* argumentCount;
    if (!inlineCallFrame)
        argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32Only, GetArgumentCountIncludingThis, origin);
    else {
        VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);
        
        argumentCount = insertionSet.insertNode(
            nodeIndex, SpecInt32Only, GetStack, origin,
            OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32)));
    }

    Node* result = insertionSet.insertNode(
        nodeIndex, SpecInt32Only, ArithSub, origin, OpInfo(Arith::Unchecked),
        Edge(argumentCount, Int32Use),
        insertionSet.insertConstantForUse(
            nodeIndex, origin, jsNumber(1 + numberOfArgumentsToSkip), Int32Use));

    if (numberOfArgumentsToSkip) {
        // The above subtraction may produce a negative number if this number is non-zero. We correct that here.
        result = insertionSet.insertNode(
            nodeIndex, SpecInt32Only, ArithMax, origin, 
            Edge(result, Int32Use), 
            insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(0), Int32Use));
        result->setResult(NodeResultInt32);
    }

    return result;
}
Ejemplo n.º 4
0
unsigned CallFrame::bytecodeOffsetFromCodeOriginIndex()
{
    ASSERT(hasLocationAsCodeOriginIndex());
    CodeBlock* codeBlock = this->codeBlock();
    ASSERT(codeBlock);

    CodeOrigin codeOrigin;
    unsigned index = locationAsCodeOriginIndex();
    ASSERT(codeBlock->canGetCodeOrigin(index));
    codeOrigin = codeBlock->codeOrigin(index);

    for (InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; inlineCallFrame;) {
        if (inlineCallFrame->baselineCodeBlock() == codeBlock)
            return codeOrigin.bytecodeIndex;

        codeOrigin = inlineCallFrame->caller;
        inlineCallFrame = codeOrigin.inlineCallFrame;
    }
    return codeOrigin.bytecodeIndex;
}
Ejemplo n.º 5
0
void StackVisitor::gotoNextFrame()
{
#if ENABLE(DFG_JIT)
    if (m_frame.isInlinedFrame()) {
        InlineCallFrame* inlineCallFrame = m_frame.inlineCallFrame();
        CodeOrigin* callerCodeOrigin = inlineCallFrame->getCallerSkippingDeadFrames();
        if (!callerCodeOrigin) {
            while (inlineCallFrame) {
                readInlinedFrame(m_frame.callFrame(), &inlineCallFrame->directCaller);
                inlineCallFrame = m_frame.inlineCallFrame();
            }
            m_frame.m_VMEntryFrame = m_frame.m_CallerVMEntryFrame;
            readFrame(m_frame.callerFrame());
        } else
            readInlinedFrame(m_frame.callFrame(), callerCodeOrigin);
        return;
    }
#endif // ENABLE(DFG_JIT)
    m_frame.m_VMEntryFrame = m_frame.m_CallerVMEntryFrame;
    readFrame(m_frame.callerFrame());
}
Ejemplo n.º 6
0
void StackIterator::readInlinedFrame(CallFrame* callFrame, CodeOrigin* codeOrigin)
{
    ASSERT(codeOrigin);
    ASSERT(!callFrame->hasHostCallFrameFlag());

    unsigned frameOffset = inlinedFrameOffset(codeOrigin);
    bool isInlined = !!frameOffset;
    if (isInlined) {
        InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;

        m_frame.m_callFrame = callFrame;
        m_frame.m_inlineCallFrame = inlineCallFrame;
        m_frame.m_argumentCountIncludingThis = inlineCallFrame->arguments.size();
        m_frame.m_codeBlock = inlineCallFrame->baselineCodeBlock();
        m_frame.m_bytecodeOffset = codeOrigin->bytecodeIndex;

        JSFunction* callee = inlineCallFrame->callee.get();
        if (callee) {
            m_frame.m_scope = callee->scope();
            m_frame.m_callee = callee;
        } else {
            CallFrame* inlinedFrame = callFrame + frameOffset;
            m_frame.m_scope = inlinedFrame->scope();
            m_frame.m_callee = inlinedFrame->callee();
        }
        ASSERT(m_frame.scope());
        ASSERT(m_frame.callee());

        // The callerFrame just needs to be non-null to indicate that we
        // haven't reached the last frame yet. Setting it to the root
        // frame (i.e. the callFrame that this inlined frame is called from)
        // would work just fine.
        m_frame.m_callerFrame = callFrame;
        return;
    }

    readNonInlinedFrame(callFrame, codeOrigin);
}
void LocalOSRAvailabilityCalculator::executeNode(Node* node)
{
    switch (node->op()) {
    case PutStack: {
        StackAccessData* data = node->stackAccessData();
        m_availability.m_locals.operand(data->local).setFlush(data->flushedAt());
        break;
    }

    case KillStack: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush));
        break;
    }

    case GetStack: {
        StackAccessData* data = node->stackAccessData();
        m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt());
        break;
    }

    case MovHint: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node());
        break;
    }

    case ZombieHint: {
        m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable();
        break;
    }

    case LoadVarargs:
    case ForwardVarargs: {
        LoadVarargsData* data = node->loadVarargsData();
        m_availability.m_locals.operand(data->count) =
            Availability(FlushedAt(FlushedInt32, data->machineCount));
        for (unsigned i = data->limit; i--;) {
            m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) =
                Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i)));
        }
        break;
    }

    case PhantomCreateRest:
    case PhantomDirectArguments:
    case PhantomClonedArguments: {
        InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
        if (!inlineCallFrame) {
            // We don't need to record anything about how the arguments are to be recovered. It's just a
            // given that we can read them from the stack.
            break;
        }

        unsigned numberOfArgumentsToSkip = 0;
        if (node->op() == PhantomCreateRest)
            numberOfArgumentsToSkip = node->numberOfArgumentsToSkip();

        if (inlineCallFrame->isVarargs()) {
            // Record how to read each argument and the argument count.
            Availability argumentCount =
                m_availability.m_locals.operand(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount);

            m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount);
        }

        if (inlineCallFrame->isClosureCall) {
            Availability callee = m_availability.m_locals.operand(
                inlineCallFrame->stackOffset + CallFrameSlot::callee);
            m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee);
        }

        for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->arguments.size() - 1; ++i) {
            Availability argument = m_availability.m_locals.operand(
                inlineCallFrame->stackOffset + CallFrame::argumentOffset(i));

            m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument);
        }
        break;
    }

    case PutHint: {
        m_availability.m_heap.set(
            PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()),
            Availability(node->child2().node()));
        break;
    }

    case PhantomSpread:
        m_availability.m_heap.set(PromotedHeapLocation(SpreadPLoc, node), Availability(node->child1().node()));
        break;

    case PhantomNewArrayWithSpread:
        for (unsigned i = 0; i < node->numChildren(); i++) {
            Node* child = m_graph.varArgChild(node, i).node();
            m_availability.m_heap.set(PromotedHeapLocation(NewArrayWithSpreadArgumentPLoc, node, i), Availability(child));
        }
        break;

    default:
        break;
    }
}
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
    ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
    jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));

    CodeOrigin codeOrigin;
    for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
        CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
        void* jumpTarget = nullptr;
        void* trueReturnPC = nullptr;

        unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;

        switch (inlineCallFrame->kind) {
        case InlineCallFrame::Call:
        case InlineCallFrame::Construct:
        case InlineCallFrame::CallVarargs:
        case InlineCallFrame::ConstructVarargs: {
            CallLinkInfo* callLinkInfo =
                baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
            RELEASE_ASSERT(callLinkInfo);

            jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
            break;
        }

        case InlineCallFrame::GetterCall:
        case InlineCallFrame::SetterCall: {
            StructureStubInfo* stubInfo =
                baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
            RELEASE_ASSERT(stubInfo);

            switch (inlineCallFrame->kind) {
            case InlineCallFrame::GetterCall:
                jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
                break;
            case InlineCallFrame::SetterCall:
                jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
                break;
            default:
                RELEASE_ASSERT_NOT_REACHED();
                break;
            }

            trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
                stubInfo->patch.deltaCallToDone).executableAddress();
            break;
        } }

        GPRReg callerFrameGPR;
        if (inlineCallFrame->caller.inlineCallFrame) {
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
            callerFrameGPR = GPRInfo::regT3;
        } else
            callerFrameGPR = GPRInfo::callFrameRegister;

        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        if (trueReturnPC)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset()));

        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        if (!inlineCallFrame->isVarargs())
            jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
#if USE(JSVALUE64)
        jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#else // USE(JSVALUE64) // so this is the 32-bit part
        jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
    }

#if USE(JSVALUE64)
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
    Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
    jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}
Ejemplo n.º 9
0
void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands)
{
    HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
    for (size_t index = 0; index < operands.size(); ++index) {
        const ValueRecovery& recovery = operands[index];
        int operand = operands.operandForIndex(index);
        
        if (recovery.technique() != DirectArgumentsThatWereNotCreated
            && recovery.technique() != ClonedArgumentsThatWereNotCreated)
            continue;
        
        MinifiedID id = recovery.nodeID();
        auto iter = alreadyAllocatedArguments.find(id);
        if (iter != alreadyAllocatedArguments.end()) {
            JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
            m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
            m_jit.storeValue(regs, CCallHelpers::addressFor(operand));
            continue;
        }
        
        InlineCallFrame* inlineCallFrame =
            m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();

        int stackOffset;
        if (inlineCallFrame)
            stackOffset = inlineCallFrame->stackOffset;
        else
            stackOffset = 0;
        
        if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
            m_jit.loadPtr(
                AssemblyHelpers::addressFor(stackOffset + JSStack::Callee),
                GPRInfo::regT0);
        } else {
            m_jit.move(
                AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
                GPRInfo::regT0);
        }
        
        if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
            m_jit.load32(
                AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount),
                GPRInfo::regT1);
        } else {
            m_jit.move(
                AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()),
                GPRInfo::regT1);
        }
        
        m_jit.setupArgumentsWithExecState(
            AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
        switch (recovery.technique()) {
        case DirectArgumentsThatWereNotCreated:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
            break;
        case ClonedArgumentsThatWereNotCreated:
            m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
            break;
        default:
            RELEASE_ASSERT_NOT_REACHED();
            break;
        }
        m_jit.call(GPRInfo::nonArgGPR0);
        m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
        
        alreadyAllocatedArguments.add(id, operand);
    }
}
Ejemplo n.º 10
0
    bool run()
    {
        // This enumerates the locals that we actually care about and packs them. So for example
        // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
        // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
        // Flush, PhantomLocal).
        
        BitVector usedLocals;
        
        // Collect those variables that are used from IR.
        bool hasNodesThatNeedFixup = false;
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                Node* node = block->at(nodeIndex);
                switch (node->op()) {
                case GetLocal:
                case SetLocal:
                case Flush:
                case PhantomLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->local().isArgument())
                        break;
                    usedLocals.set(variable->local().toLocal());
                    break;
                }
                    
                case GetLocalUnlinked: {
                    VirtualRegister operand = node->unlinkedLocal();
                    if (operand.isArgument())
                        break;
                    usedLocals.set(operand.toLocal());
                    hasNodesThatNeedFixup = true;
                    break;
                }
                    
                case LoadVarargs:
                case ForwardVarargs: {
                    LoadVarargsData* data = node->loadVarargsData();
                    if (data->count.isLocal())
                        usedLocals.set(data->count.toLocal());
                    if (data->start.isLocal()) {
                        // This part really relies on the contiguity of stack layout
                        // assignments.
                        ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal());
                        for (unsigned i = data->limit; i--;) 
                            usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal());
                    } // the else case shouldn't happen.
                    hasNodesThatNeedFixup = true;
                    break;
                }
                    
                case PutStack:
                case GetStack: {
                    StackAccessData* stack = node->stackAccessData();
                    if (stack->local.isArgument())
                        break;
                    usedLocals.set(stack->local.toLocal());
                    break;
                }
                    
                default:
                    break;
                }
            }
        }
        
        for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            
            if (inlineCallFrame->isVarargs()) {
                usedLocals.set(VirtualRegister(
                    JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal());
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                usedLocals.set(VirtualRegister(
                    virtualRegisterForArgument(argument).offset() +
                    inlineCallFrame->stackOffset).toLocal());
            }
        }
        
        Vector<unsigned> allocation(usedLocals.size());
        m_graph.m_nextMachineLocal = 0;
        for (unsigned i = 0; i < usedLocals.size(); ++i) {
            if (!usedLocals.get(i)) {
                allocation[i] = UINT_MAX;
                continue;
            }
            
            allocation[i] = m_graph.m_nextMachineLocal++;
        }
        
        for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
            VariableAccessData* variable = &m_graph.m_variableAccessData[i];
            if (!variable->isRoot())
                continue;
            
            if (variable->local().isArgument()) {
                variable->machineLocal() = variable->local();
                continue;
            }
            
            size_t local = variable->local().toLocal();
            if (local >= allocation.size())
                continue;
            
            if (allocation[local] == UINT_MAX)
                continue;
            
            variable->machineLocal() = assign(allocation, variable->local());
        }
        
        for (StackAccessData* data : m_graph.m_stackAccessData) {
            if (!data->local.isLocal()) {
                data->machineLocal = data->local;
                continue;
            }
            
            if (static_cast<size_t>(data->local.toLocal()) >= allocation.size())
                continue;
            if (allocation[data->local.toLocal()] == UINT_MAX)
                continue;
            
            data->machineLocal = assign(allocation, data->local);
        }
        
        // This register is never valid for DFG code blocks.
        codeBlock()->setActivationRegister(VirtualRegister());
        if (LIKELY(!m_graph.hasDebuggerEnabled()))
            codeBlock()->setScopeRegister(VirtualRegister());
        else
            codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister()));

        for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
            InlineVariableData data = m_graph.m_inlineVariableData[i];
            InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
            
            if (inlineCallFrame->isVarargs()) {
                inlineCallFrame->argumentCountRegister = assign(
                    allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                ArgumentPosition& position = m_graph.m_argumentPositions[
                    data.argumentPositionStart + argument];
                VariableAccessData* variable = position.someVariable();
                ValueSource source;
                if (!variable)
                    source = ValueSource(SourceIsDead);
                else {
                    source = ValueSource::forFlushFormat(
                        variable->machineLocal(), variable->flushFormat());
                }
                inlineCallFrame->arguments[argument] = source.valueRecovery();
            }
            
            RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
            if (inlineCallFrame->isClosureCall) {
                VariableAccessData* variable = data.calleeVariable->find();
                ValueSource source = ValueSource::forFlushFormat(
                    variable->machineLocal(),
                    variable->flushFormat());
                inlineCallFrame->calleeRecovery = source.valueRecovery();
            } else
                RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
        }
        
        // Fix GetLocalUnlinked's variable references.
        if (hasNodesThatNeedFixup) {
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                    Node* node = block->at(nodeIndex);
                    switch (node->op()) {
                    case GetLocalUnlinked: {
                        node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal()));
                        break;
                    }
                        
                    case LoadVarargs:
                    case ForwardVarargs: {
                        LoadVarargsData* data = node->loadVarargsData();
                        data->machineCount = assign(allocation, data->count);
                        data->machineStart = assign(allocation, data->start);
                        break;
                    }
                        
                    default:
                        break;
                    }
                }
            }
        }
        
        return true;
    }
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
    ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
    jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));

    CodeOrigin codeOrigin;
    for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
        CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
        unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
        CallLinkInfo& callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfo(callBytecodeIndex);
        
        void* jumpTarget = callLinkInfo.callReturnLocation.executableAddress();

        GPRReg callerFrameGPR;
        if (inlineCallFrame->caller.inlineCallFrame) {
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
            callerFrameGPR = GPRInfo::regT3;
        } else
            callerFrameGPR = GPRInfo::callFrameRegister;
        
#if USE(JSVALUE64)
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        if (!inlineCallFrame->isClosureCall)
            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        
        // Leave the captured arguments in regT3.
        if (baselineCodeBlock->usesArguments())
            jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#else // USE(JSVALUE64) // so this is the 32-bit part
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
        jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
        Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
        if (!inlineCallFrame->isClosureCall)
            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));

        // Leave the captured arguments in regT3.
        if (baselineCodeBlock->usesArguments())
            jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
        
        if (baselineCodeBlock->usesArguments()) {
            AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3);
            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
            jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters()));
            noArguments.link(&jit);
        }
    }

#if USE(JSVALUE64)
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
#else
    Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
    jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
}