bool
GreedyAllocator::informSafepoint(LSafepoint *safepoint)
{
    for (InlineListIterator<VirtualRegister> iter = liveSlots_.begin();
         iter != liveSlots_.end();
         iter++)
    {
        VirtualRegister *vr = *iter;
        if (vr->type() == LDefinition::OBJECT || vr->type() == LDefinition::BOX) {
            if (!safepoint->addGcSlot(vr->stackSlot()))
                return false;
            continue;
        }

#ifdef JS_NUNBOX32
        if (!IsNunbox(vr->type()))
            continue;

        VirtualRegister *other = otherHalfOfNunbox(vr);

        // Only bother if both halves are spilled.
        if (vr->hasStackSlot() && other->hasStackSlot()) {
            uint32 slot = BaseOfNunboxSlot(vr->type(), vr->stackSlot());
            if (!safepoint->addValueSlot(slot))
                return false;
        }
#endif
    }
    return true;
}
Example #2
0
JSValue CallFrame::uncheckedActivation() const
{
    CodeBlock* codeBlock = this->codeBlock();
    RELEASE_ASSERT(codeBlock->needsActivation());
    VirtualRegister activationRegister = codeBlock->activationRegister();
    return registers()[activationRegister.offset()].jsValue();
}
void
GreedyAllocator::allocateStack(VirtualRegister *vr)
{
    if (vr->hasBackingStack())
        return;

    uint32 index;
#ifdef JS_NUNBOX32
    if (IsNunbox(vr->type())) {
        VirtualRegister *other = otherHalfOfNunbox(vr);
        unsigned stackSlot;
        if (!other->hasStackSlot())
            stackSlot = allocateSlotFor(vr);
        else
            stackSlot = BaseOfNunboxSlot(other->type(), other->stackSlot());
        index = stackSlot - OffsetOfNunboxSlot(vr->type());
    } else
#endif
    {
        index = allocateSlotFor(vr);
    }

    IonSpew(IonSpew_RegAlloc, "    assign vr%d := stack%d", vr->def->virtualRegister(), index);

    vr->setStackSlot(index);
}
Example #4
0
JSLexicalEnvironment* CallFrame::lexicalEnvironment() const
{
    CodeBlock* codeBlock = this->codeBlock();
    RELEASE_ASSERT(codeBlock->needsActivation());
    VirtualRegister activationRegister = codeBlock->activationRegister();
    return registers()[activationRegister.offset()].Register::lexicalEnvironment();
}
Example #5
0
void CallFrame::setActivation(JSLexicalEnvironment* lexicalEnvironment)
{
    CodeBlock* codeBlock = this->codeBlock();
    RELEASE_ASSERT(codeBlock->needsActivation());
    VirtualRegister activationRegister = codeBlock->activationRegister();
    registers()[activationRegister.offset()] = lexicalEnvironment;
}
bool
GreedyAllocator::prescanDefinition(LDefinition *def)
{
    // If the definition is fakeo, a redefinition, ignore it entirely. It's not
    // valid to kill it, and it doesn't matter if an input uses the same
    // register (thus it does not go into the disallow set).
    if (def->policy() == LDefinition::PASSTHROUGH)
        return true;

    VirtualRegister *vr = getVirtualRegister(def);

    // Add its register to the free pool.
    killReg(vr);

    // If it has a register, prevent it from being allocated this round.
    if (vr->hasRegister())
        disallowed.add(vr->reg());

    if (def->policy() == LDefinition::PRESET) {
        const LAllocation *a = def->output();
        if (a->isRegister()) {
            // Evict fixed registers. Use the unchecked version of set-add
            // because the register does not reflect any allocation state, so
            // it may have already been added.
            AnyRegister reg = GetPresetRegister(def);
            disallowed.addUnchecked(reg);
            if (!maybeEvict(reg))
                return false;
        }
    }
    return true;
}
GreedyAllocator::VirtualRegister *
GreedyAllocator::otherHalfOfNunbox(VirtualRegister *vreg)
{
    signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
    VirtualRegister *other = &vars[vreg->def->virtualRegister() + offset];
    AssertTypesFormANunbox(vreg->type(), other->type());
    return other;
}
void
JSONSpewer::spewIntervals(LinearScanAllocator *regalloc)
{
    if (!fp_)
        return;

    beginObjectProperty("intervals");
    beginListProperty("blocks");

    for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
        beginObject();
        integerProperty("number", bno);
        beginListProperty("vregs");

        LBlock *lir = regalloc->graph.getBlock(bno);
        for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
            for (size_t k = 0; k < ins->numDefs(); k++) {
                VirtualRegister *vreg = &regalloc->vregs[ins->getDef(k)->virtualRegister()];

                beginObject();
                integerProperty("vreg", vreg->reg());
                beginListProperty("intervals");

                for (size_t i = 0; i < vreg->numIntervals(); i++) {
                    LiveInterval *live = vreg->getInterval(i);

                    if (live->numRanges()) {
                        beginObject();
                        property("allocation");
                        fprintf(fp_, "\"");
                        LAllocation::PrintAllocation(fp_, live->getAllocation());
                        fprintf(fp_, "\"");
                        beginListProperty("ranges");

                        for (size_t j = 0; j < live->numRanges(); j++) {
                            beginObject();
                            integerProperty("start", live->getRange(j)->from.pos());
                            integerProperty("end", live->getRange(j)->to.pos());
                            endObject();
                        }

                        endList();
                        endObject();
                    }
                }

                endList();
                endObject();
            }
        }

        endList();
        endObject();
    }

    endList();
    endObject();
}
Example #9
0
 VirtualRegister assign(const Vector<unsigned>& allocation, VirtualRegister src)
 {
     VirtualRegister result = src;
     if (result.isLocal()) {
         unsigned myAllocation = allocation[result.toLocal()];
         if (myAllocation == UINT_MAX)
             result = VirtualRegister();
         else
             result = virtualRegisterForLocal(myAllocation);
     }
     return result;
 }
void
GreedyAllocator::assertValidRegisterState()
{
#ifdef DEBUG
    // Assert that for each taken register in state.free, that it maps to a vr
    // and that that vr has that register.
    for (AnyRegisterIterator iter; iter.more(); iter++) {
        AnyRegister reg = *iter;
        VirtualRegister *vr = state[reg];
        JS_ASSERT(!vr == state.free.has(reg));
        JS_ASSERT_IF(vr, vr->reg() == reg);
    }
#endif
}
Example #11
0
void
JSONSpewer::spewRanges(BacktrackingAllocator* regalloc)
{
    if (!fp_)
        return;

    beginObjectProperty("ranges");
    beginListProperty("blocks");

    for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
        beginObject();
        integerProperty("number", bno);
        beginListProperty("vregs");

        LBlock* lir = regalloc->graph.getBlock(bno);
        for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
            for (size_t k = 0; k < ins->numDefs(); k++) {
                uint32_t id = ins->getDef(k)->virtualRegister();
                VirtualRegister* vreg = &regalloc->vregs[id];

                beginObject();
                integerProperty("vreg", id);
                beginListProperty("ranges");

                for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
                    LiveRange* range = LiveRange::get(*iter);

                    beginObject();
                    property("allocation");
                    fprintf(fp_, "\"%s\"", range->bundle()->allocation().toString());
                    integerProperty("start", range->from().bits());
                    integerProperty("end", range->to().bits());
                    endObject();
                }

                endList();
                endObject();
            }
        }

        endList();
        endObject();
    }

    endList();
    endObject();
}
Example #12
0
void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
{
    out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset);
    out.print(", stack rules = [");
    
    auto printOperand = [&] (VirtualRegister reg) {
        out.print(inContext(m_expectedValues.operand(reg), context), " (");
        VirtualRegister toReg;
        bool overwritten = false;
        for (OSREntryReshuffling reshuffling : m_reshufflings) {
            if (reg == VirtualRegister(reshuffling.fromOffset)) {
                toReg = VirtualRegister(reshuffling.toOffset);
                break;
            }
            if (reg == VirtualRegister(reshuffling.toOffset))
                overwritten = true;
        }
        if (!overwritten && !toReg.isValid())
            toReg = reg;
        if (toReg.isValid()) {
            if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
                out.print("ignored");
            else
                out.print("maps to ", toReg);
        } else
            out.print("overwritten");
        if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
            out.print(", forced double");
        if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
            out.print(", forced machine int");
        out.print(")");
    };
    
    CommaPrinter comma;
    for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
        out.print(comma, "arg", argumentIndex, ":");
        printOperand(virtualRegisterForArgument(argumentIndex));
    }
    for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
        out.print(comma, "loc", localIndex, ":");
        printOperand(virtualRegisterForLocal(localIndex));
    }
    
    out.print("], machine stack used = ", m_machineStackUsed);
}
Example #13
0
bool argumentsInvolveStackSlot(InlineCallFrame* inlineCallFrame, VirtualRegister reg)
{
    if (!inlineCallFrame)
        return (reg.isArgument() && reg.toArgument()) || reg.isHeader();
    
    if (inlineCallFrame->isClosureCall
        && reg == VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee))
        return true;
    
    if (inlineCallFrame->isVarargs()
        && reg == VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount))
        return true;
    
    unsigned numArguments = inlineCallFrame->arguments.size() - 1;
    VirtualRegister argumentStart =
        VirtualRegister(inlineCallFrame->stackOffset) + CallFrame::argumentOffset(0);
    return reg >= argumentStart && reg < argumentStart + numArguments;
}
void
GreedyAllocator::informSnapshot(LInstruction *ins)
{
    LSnapshot *snapshot = ins->snapshot();
    for (size_t i = 0; i < snapshot->numEntries(); i++) {
        LAllocation *a = snapshot->getEntry(i);
        if (!a->isUse())
            continue;

        // Every definition in a snapshot gets a stack slot. This
        // simplification means we can treat normal snapshots and LOsiPoint
        // snapshots (which follow calls) the same, without adding a special
        // exception to note that registers are spilled at the LOsiPoint.
        VirtualRegister *vr = getVirtualRegister(a->toUse());
        allocateStack(vr);
        *a = vr->backingStack();
    }
}
EncodedJSValue JSLexicalEnvironment::argumentsGetter(ExecState*, JSObject* slotBase, EncodedJSValue, PropertyName)
{
    JSLexicalEnvironment* lexicalEnvironment = jsCast<JSLexicalEnvironment*>(slotBase);
    CallFrame* callFrame = CallFrame::create(reinterpret_cast<Register*>(lexicalEnvironment->m_registers));
    return JSValue::encode(jsUndefined());

    VirtualRegister argumentsRegister = callFrame->codeBlock()->argumentsRegister();
    if (JSValue arguments = callFrame->uncheckedR(argumentsRegister.offset()).jsValue())
        return JSValue::encode(arguments);
    int realArgumentsRegister = unmodifiedArgumentsRegister(argumentsRegister).offset();

    JSValue arguments = JSValue(Arguments::create(callFrame->vm(), callFrame));
    callFrame->uncheckedR(argumentsRegister.offset()) = arguments;
    callFrame->uncheckedR(realArgumentsRegister) = arguments;
    
    ASSERT(callFrame->uncheckedR(realArgumentsRegister).jsValue().inherits(Arguments::info()));
    return JSValue::encode(callFrame->uncheckedR(realArgumentsRegister).jsValue());
}
bool
GreedyAllocator::evict(AnyRegister reg)
{
    VirtualRegister *vr = state[reg];
    JS_ASSERT(vr->reg() == reg);

    // If the virtual register does not have a stack slot, allocate one now.
    allocateStack(vr);

    // We're allocating bottom-up, so eviction *restores* a register, otherwise
    // it could not be used downstream.
    if (!restore(vr->backingStack(), reg))
        return false;

    freeReg(reg);
    vr->unsetRegister();
    return true;
}
Example #17
0
void
C1Spewer::spewRanges(GenericPrinter& out, BacktrackingAllocator* regalloc, LNode* ins)
{
    for (size_t k = 0; k < ins->numDefs(); k++) {
        uint32_t id = ins->getDef(k)->virtualRegister();
        VirtualRegister* vreg = &regalloc->vregs[id];

        for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
            LiveRange* range = LiveRange::get(*iter);
            out.printf("%d object \"", id);
            out.printf("%s", range->bundle()->allocation().toString().get());
            out.printf("\" %d -1", id);
            out.printf(" [%u, %u[", range->from().bits(), range->to().bits());
            for (UsePositionIterator usePos(range->usesBegin()); usePos; usePos++)
                out.printf(" %u M", usePos->pos.bits());
            out.printf(" \"\"\n");
        }
    }
}
EncodedJSValue JSActivation::argumentsGetter(ExecState*, EncodedJSValue slotBase, EncodedJSValue, PropertyName)
{
    JSActivation* activation = jsCast<JSActivation*>(JSValue::decode(slotBase));
    CallFrame* callFrame = CallFrame::create(reinterpret_cast<Register*>(activation->m_registers));
    ASSERT(!activation->isTornOff() && (callFrame->codeBlock()->usesArguments() || callFrame->codeBlock()->usesEval()));
    if (activation->isTornOff() || !(callFrame->codeBlock()->usesArguments() || callFrame->codeBlock()->usesEval()))
        return JSValue::encode(jsUndefined());

    VirtualRegister argumentsRegister = callFrame->codeBlock()->argumentsRegister();
    if (JSValue arguments = callFrame->uncheckedR(argumentsRegister.offset()).jsValue())
        return JSValue::encode(arguments);
    int realArgumentsRegister = unmodifiedArgumentsRegister(argumentsRegister).offset();

    JSValue arguments = JSValue(Arguments::create(callFrame->vm(), callFrame));
    callFrame->uncheckedR(argumentsRegister.offset()) = arguments;
    callFrame->uncheckedR(realArgumentsRegister) = arguments;
    
    ASSERT(callFrame->uncheckedR(realArgumentsRegister).jsValue().inherits(Arguments::info()));
    return JSValue::encode(callFrame->uncheckedR(realArgumentsRegister).jsValue());
}
bool
GreedyAllocator::prescanUses(LInstruction *ins)
{
    for (size_t i = 0; i < ins->numOperands(); i++) {
        LAllocation *a = ins->getOperand(i);
        if (!a->isUse()) {
            JS_ASSERT(a->isConstant());
            continue;
        }

        LUse *use = a->toUse();
        VirtualRegister *vr = getVirtualRegister(use);
        if (use->policy() == LUse::FIXED) {
            // A def or temp may use the same register, so we have to use the
            // unchecked version.
            disallowed.addUnchecked(GetFixedRegister(vr->def, use));
        } else if (vr->hasRegister()) {
            discouraged.addUnchecked(vr->reg());
        }
    }
    return true;
}
bool
GreedyAllocator::buildPhiMoves(LBlock *block)
{
    IonSpew(IonSpew_RegAlloc, " Merging phi state."); 

    phiMoves = Mover();

    MBasicBlock *mblock = block->mir();
    if (!mblock->successorWithPhis())
        return true;

    // Insert moves from our state into our successor's phi.
    uint32 pos = mblock->positionInPhiSuccessor();
    LBlock *successor = mblock->successorWithPhis()->lir();
    for (size_t i = 0; i < successor->numPhis(); i++) {
        LPhi *phi = successor->getPhi(i);
        JS_ASSERT(phi->numDefs() == 1);

        VirtualRegister *phiReg = getVirtualRegister(phi->getDef(0));
        allocateStack(phiReg);

        LAllocation *in = phi->getOperand(pos);
        VirtualRegister *inReg = getVirtualRegister(in->toUse());
        allocateStack(inReg);

        // Try to get a register for the input.
        if (!inReg->hasRegister() && !allocatableRegs().empty(inReg->isDouble())) {
            if (!allocateReg(inReg))
                return false;
        }

        // Add a move from the input to the phi.
        if (inReg->hasRegister()) {
            if (!phiMoves.move(inReg->reg(), phiReg->backingStack()))
                return false;
        } else {
            if (!phiMoves.move(inReg->backingStack(), phiReg->backingStack()))
                return false;
        }
    }

    return true;
}
Example #21
0
void
C1Spewer::spewIntervals(FILE *fp, LinearScanAllocator *regalloc, LInstruction *ins, size_t &nextId)
{
    for (size_t k = 0; k < ins->numDefs(); k++) {
        VirtualRegister *vreg = &regalloc->vregs[ins->getDef(k)->virtualRegister()];

        for (size_t i = 0; i < vreg->numIntervals(); i++) {
            LiveInterval *live = vreg->getInterval(i);
            if (live->numRanges()) {
                fprintf(fp, "%d object \"", (i == 0) ? vreg->id() : int32_t(nextId++));
                fprintf(fp, "%s", live->getAllocation()->toString());
                fprintf(fp, "\" %d -1", vreg->id());
                for (size_t j = 0; j < live->numRanges(); j++) {
                    fprintf(fp, " [%d, %d[", live->getRange(j)->from.pos(),
                            live->getRange(j)->to.pos());
                }
                for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++)
                    fprintf(fp, " %d M", usePos->pos.pos());
                fprintf(fp, " \"\"\n");
            }
        }
    }
}
Example #22
0
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
{
    CCallHelpers::JumpList end;
    
    if (argCountRecovery.isConstant()) {
        // FIXME: We could constant-fold a lot of the computation below in this case.
        // https://bugs.webkit.org/show_bug.cgi?id=141486
        jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
    } else
        jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
    if (firstVarArgOffset) {
        CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
        jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
        CCallHelpers::Jump endVarArgs = jit.jump();
        sufficientArguments.link(&jit);
        jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
        endVarArgs.link(&jit);
    }
    slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
    
    emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);

    slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));

    // Initialize ArgumentCount.
    jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));

    // Copy arguments.
    jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
    CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
    // scratchGPR1: argumentCount

    CCallHelpers::Label copyLoop = jit.label();
    int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
#if USE(JSVALUE64)
    jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
    jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
#else // USE(JSVALUE64), so this begins the 32-bit case
    jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
    jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
    jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
    jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
#endif // USE(JSVALUE64), end of 32-bit case
    jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
    
    done.link(&jit);
}
    bool run()
    {
        SharedSymbolTable* symbolTable = codeBlock()->symbolTable();

        // This enumerates the locals that we actually care about and packs them. So for example
        // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
        // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
        // Flush, PhantomLocal).
        
        BitVector usedLocals;
        
        // Collect those variables that are used from IR.
        bool hasGetLocalUnlinked = false;
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                Node* node = block->at(nodeIndex);
                switch (node->op()) {
                case GetLocal:
                case SetLocal:
                case Flush:
                case PhantomLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->local().isArgument())
                        break;
                    usedLocals.set(variable->local().toLocal());
                    break;
                }
                    
                case GetLocalUnlinked: {
                    VirtualRegister operand = node->unlinkedLocal();
                    if (operand.isArgument())
                        break;
                    usedLocals.set(operand.toLocal());
                    hasGetLocalUnlinked = true;
                    break;
                }
                    
                default:
                    break;
                }
            }
        }
        
        // Ensure that captured variables and captured inline arguments are pinned down.
        // They should have been because of flushes, except that the flushes can be optimized
        // away.
        if (symbolTable) {
            for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--)
                usedLocals.set(VirtualRegister(i).toLocal());
        }
        if (codeBlock()->usesArguments()) {
            usedLocals.set(codeBlock()->argumentsRegister().toLocal());
            usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal());
        }
        if (codeBlock()->uncheckedActivationRegister().isValid())
            usedLocals.set(codeBlock()->activationRegister().toLocal());
        for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            if (!inlineCallFrame->executable->usesArguments())
                continue;
            
            VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame);
            usedLocals.set(argumentsRegister.toLocal());
            usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal());
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                usedLocals.set(VirtualRegister(
                    virtualRegisterForArgument(argument).offset() +
                    inlineCallFrame->stackOffset).toLocal());
            }
        }
        
        Vector<unsigned> allocation(usedLocals.size());
        m_graph.m_nextMachineLocal = 0;
        for (unsigned i = 0; i < usedLocals.size(); ++i) {
            if (!usedLocals.get(i)) {
                allocation[i] = UINT_MAX;
                continue;
            }
            
            allocation[i] = m_graph.m_nextMachineLocal++;
        }
        
        for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
            VariableAccessData* variable = &m_graph.m_variableAccessData[i];
            if (!variable->isRoot())
                continue;
            
            if (variable->local().isArgument()) {
                variable->machineLocal() = variable->local();
                continue;
            }
            
            size_t local = variable->local().toLocal();
            if (local >= allocation.size())
                continue;
            
            if (allocation[local] == UINT_MAX)
                continue;
            
            variable->machineLocal() = virtualRegisterForLocal(
                allocation[variable->local().toLocal()]);
        }
        
        if (codeBlock()->usesArguments()) {
            VirtualRegister argumentsRegister = virtualRegisterForLocal(
                allocation[codeBlock()->argumentsRegister().toLocal()]);
            RELEASE_ASSERT(
                virtualRegisterForLocal(allocation[
                    unmodifiedArgumentsRegister(
                        codeBlock()->argumentsRegister()).toLocal()])
                == unmodifiedArgumentsRegister(argumentsRegister));
            codeBlock()->setArgumentsRegister(argumentsRegister);
        }
        
        if (codeBlock()->uncheckedActivationRegister().isValid()) {
            codeBlock()->setActivationRegister(
                virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()]));
        }
        
        for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
            InlineVariableData data = m_graph.m_inlineVariableData[i];
            InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
            
            if (inlineCallFrame->executable->usesArguments()) {
                inlineCallFrame->argumentsRegister = virtualRegisterForLocal(
                    allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]);

                RELEASE_ASSERT(
                    virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister(
                        m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()])
                    == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister));
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                ArgumentPosition& position = m_graph.m_argumentPositions[
                    data.argumentPositionStart + argument];
                VariableAccessData* variable = position.someVariable();
                ValueSource source;
                if (!variable)
                    source = ValueSource(SourceIsDead);
                else {
                    source = ValueSource::forFlushFormat(
                        variable->machineLocal(), variable->flushFormat());
                }
                inlineCallFrame->arguments[argument] = source.valueRecovery();
            }
            
            RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
            if (inlineCallFrame->isClosureCall) {
                ValueSource source = ValueSource::forFlushFormat(
                    data.calleeVariable->machineLocal(),
                    data.calleeVariable->flushFormat());
                inlineCallFrame->calleeRecovery = source.valueRecovery();
            } else
                RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
        }
        
        if (symbolTable) {
            if (symbolTable->captureCount()) {
                unsigned captureStartLocal = allocation[
                    VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()];
                ASSERT(captureStartLocal != UINT_MAX);
                m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset();
            } else
                m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset();
        
            // This is an abomination. If we had captured an argument then the argument ends
            // up being "slow", meaning that loads of the argument go through an extra lookup
            // table.
            if (const SlowArgument* slowArguments = symbolTable->slowArguments()) {
                auto newSlowArguments = std::make_unique<SlowArgument[]>(
                    symbolTable->parameterCount());
                for (size_t i = symbolTable->parameterCount(); i--;) {
                    newSlowArguments[i] = slowArguments[i];
                    VirtualRegister reg = VirtualRegister(slowArguments[i].index);
                    if (reg.isLocal())
                        newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset();
                }
            
                m_graph.m_slowArguments = std::move(newSlowArguments);
            }
        }
        
        // Fix GetLocalUnlinked's variable references.
        if (hasGetLocalUnlinked) {
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                    Node* node = block->at(nodeIndex);
                    switch (node->op()) {
                    case GetLocalUnlinked: {
                        VirtualRegister operand = node->unlinkedLocal();
                        if (operand.isLocal())
                            operand = virtualRegisterForLocal(allocation[operand.toLocal()]);
                        node->setUnlinkedMachineLocal(operand);
                        break;
                    }
                        
                    default:
                        break;
                    }
                }
            }
        }
        
        return true;
    }
bool
GreedyAllocator::allocateRegisters()
{
    // Allocate registers bottom-up, such that we see all uses before their
    // definitions.
    for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) {
        LBlock *block = graph.getBlock(i);

        IonSpew(IonSpew_RegAlloc, "Allocating block %d", (uint32)i);

        // All registers should be free.
        JS_ASSERT(state.free == RegisterSet::All());

        // Allocate stack for any phis.
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi *phi = block->getPhi(j);
            VirtualRegister *vreg = getVirtualRegister(phi->getDef(0));
            allocateStack(vreg);
        }

        // Allocate registers.
        if (!allocateRegistersInBlock(block))
            return false;

        LMoveGroup *entrySpills = block->getEntryMoveGroup();

        // We've reached the top of the block. Spill all registers by inserting
        // moves from their stack locations.
        for (AnyRegisterIterator iter(RegisterSet::All()); iter.more(); iter++) {
            VirtualRegister *vreg = state[*iter];
            if (!vreg) {
                JS_ASSERT(state.free.has(*iter));
                continue;
            }

            JS_ASSERT(vreg->reg() == *iter);
            JS_ASSERT(!state.free.has(vreg->reg()));
            allocateStack(vreg);

            LAllocation *from = LAllocation::New(vreg->backingStack());
            LAllocation *to = LAllocation::New(vreg->reg());
            if (!entrySpills->add(from, to))
                return false;

            killReg(vreg);
            vreg->unsetRegister();
        }

        // Before killing phis, ensure that each phi input has its own stack
        // allocation. This ensures we won't allocate the same slot for any phi
        // as its input, which technically may be legal (since the phi becomes
        // the last use of the slot), but we avoid for sanity.
        for (size_t i = 0; i < block->numPhis(); i++) {
            LPhi *phi = block->getPhi(i);
            for (size_t j = 0; j < phi->numOperands(); j++) {
                VirtualRegister *in = getVirtualRegister(phi->getOperand(j)->toUse());
                allocateStack(in);
            }
        }

        // Kill phis.
        for (size_t i = 0; i < block->numPhis(); i++) {
            LPhi *phi = block->getPhi(i);
            VirtualRegister *vr = getVirtualRegister(phi->getDef(0));
            JS_ASSERT(!vr->hasRegister());
            killStack(vr);
        }
    }
    return true;
}
    void handleBlockForTryCatch(BasicBlock* block, InsertionSet& insertionSet)
    {
        HandlerInfo* currentExceptionHandler = nullptr;
        FastBitVector liveAtCatchHead;
        liveAtCatchHead.resize(m_graph.block(0)->variablesAtTail.numberOfLocals());

        HandlerInfo* cachedHandlerResult;
        CodeOrigin cachedCodeOrigin;
        auto catchHandler = [&] (CodeOrigin origin) -> HandlerInfo* {
            ASSERT(origin);
            if (origin == cachedCodeOrigin)
                return cachedHandlerResult;

            unsigned bytecodeIndexToCheck = origin.bytecodeIndex;

            cachedCodeOrigin = origin;

            while (1) {
                InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
                CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
                if (HandlerInfo* handler = codeBlock->handlerForBytecodeOffset(bytecodeIndexToCheck)) {
                    liveAtCatchHead.clearAll();

                    unsigned catchBytecodeIndex = handler->target;
                    m_graph.forAllLocalsLiveInBytecode(CodeOrigin(catchBytecodeIndex, inlineCallFrame), [&] (VirtualRegister operand) {
                        liveAtCatchHead[operand.toLocal()] = true;
                    });

                    cachedHandlerResult = handler;
                    break;
                }

                if (!inlineCallFrame) {
                    cachedHandlerResult = nullptr;
                    break;
                }

                bytecodeIndexToCheck = inlineCallFrame->directCaller.bytecodeIndex;
                origin = inlineCallFrame->directCaller;
            }

            return cachedHandlerResult;
        };

        Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr);
        HashSet<InlineCallFrame*> seenInlineCallFrames;

        auto flushEverything = [&] (NodeOrigin origin, unsigned index) {
            RELEASE_ASSERT(currentExceptionHandler);
            auto flush = [&] (VirtualRegister operand, bool alwaysInsert) {
                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()]) 
                    || operand.isArgument()
                    || alwaysInsert) {

                    ASSERT(isValidFlushLocation(block, index, operand));

                    VariableAccessData* accessData = currentBlockAccessData.operand(operand);
                    if (!accessData)
                        accessData = newVariableAccessData(operand);

                    currentBlockAccessData.operand(operand) = accessData;

                    insertionSet.insertNode(index, SpecNone, 
                        Flush, origin, OpInfo(accessData));
                }
            };

            for (unsigned local = 0; local < block->variablesAtTail.numberOfLocals(); local++)
                flush(virtualRegisterForLocal(local), false);
            for (InlineCallFrame* inlineCallFrame : seenInlineCallFrames)
                flush(VirtualRegister(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()), true);
            flush(VirtualRegister(CallFrame::thisArgumentOffset()), true);

            seenInlineCallFrames.clear();
        };

        for (unsigned nodeIndex = 0; nodeIndex < block->size(); nodeIndex++) {
            Node* node = block->at(nodeIndex);

            {
                HandlerInfo* newHandler = catchHandler(node->origin.semantic);
                if (newHandler != currentExceptionHandler && currentExceptionHandler)
                    flushEverything(node->origin, nodeIndex);
                currentExceptionHandler = newHandler;
            }

            if (currentExceptionHandler && (node->op() == SetLocal || node->op() == SetArgument)) {
                InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame;
                if (inlineCallFrame)
                    seenInlineCallFrames.add(inlineCallFrame);
                VirtualRegister operand = node->local();

                int stackOffset = inlineCallFrame ? inlineCallFrame->stackOffset : 0;
                if ((operand.isLocal() && liveAtCatchHead[operand.toLocal()])
                    || operand.isArgument()
                    || (operand.offset() == stackOffset + CallFrame::thisArgumentOffset())) {

                    ASSERT(isValidFlushLocation(block, nodeIndex, operand));

                    VariableAccessData* variableAccessData = currentBlockAccessData.operand(operand);
                    if (!variableAccessData)
                        variableAccessData = newVariableAccessData(operand);

                    insertionSet.insertNode(nodeIndex, SpecNone, 
                        Flush, node->origin, OpInfo(variableAccessData));
                }
            }

            if (node->accessesStack(m_graph))
                currentBlockAccessData.operand(node->local()) = node->variableAccessData();
        }

        if (currentExceptionHandler) {
            NodeOrigin origin = block->at(block->size() - 1)->origin;
            flushEverything(origin, block->size());
        }
    }
Example #26
0
    bool run()
    {
        // This enumerates the locals that we actually care about and packs them. So for example
        // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We
        // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal,
        // Flush, PhantomLocal).
        
        BitVector usedLocals;
        
        // Collect those variables that are used from IR.
        bool hasNodesThatNeedFixup = false;
        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
            BasicBlock* block = m_graph.block(blockIndex);
            if (!block)
                continue;
            for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                Node* node = block->at(nodeIndex);
                switch (node->op()) {
                case GetLocal:
                case SetLocal:
                case Flush:
                case PhantomLocal: {
                    VariableAccessData* variable = node->variableAccessData();
                    if (variable->local().isArgument())
                        break;
                    usedLocals.set(variable->local().toLocal());
                    break;
                }
                    
                case GetLocalUnlinked: {
                    VirtualRegister operand = node->unlinkedLocal();
                    if (operand.isArgument())
                        break;
                    usedLocals.set(operand.toLocal());
                    hasNodesThatNeedFixup = true;
                    break;
                }
                    
                case LoadVarargs:
                case ForwardVarargs: {
                    LoadVarargsData* data = node->loadVarargsData();
                    if (data->count.isLocal())
                        usedLocals.set(data->count.toLocal());
                    if (data->start.isLocal()) {
                        // This part really relies on the contiguity of stack layout
                        // assignments.
                        ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal());
                        for (unsigned i = data->limit; i--;) 
                            usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal());
                    } // the else case shouldn't happen.
                    hasNodesThatNeedFixup = true;
                    break;
                }
                    
                case PutStack:
                case GetStack: {
                    StackAccessData* stack = node->stackAccessData();
                    if (stack->local.isArgument())
                        break;
                    usedLocals.set(stack->local.toLocal());
                    break;
                }
                    
                default:
                    break;
                }
            }
        }
        
        for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) {
            InlineCallFrame* inlineCallFrame = *iter;
            
            if (inlineCallFrame->isVarargs()) {
                usedLocals.set(VirtualRegister(
                    JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal());
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                usedLocals.set(VirtualRegister(
                    virtualRegisterForArgument(argument).offset() +
                    inlineCallFrame->stackOffset).toLocal());
            }
        }
        
        Vector<unsigned> allocation(usedLocals.size());
        m_graph.m_nextMachineLocal = 0;
        for (unsigned i = 0; i < usedLocals.size(); ++i) {
            if (!usedLocals.get(i)) {
                allocation[i] = UINT_MAX;
                continue;
            }
            
            allocation[i] = m_graph.m_nextMachineLocal++;
        }
        
        for (unsigned i = m_graph.m_variableAccessData.size(); i--;) {
            VariableAccessData* variable = &m_graph.m_variableAccessData[i];
            if (!variable->isRoot())
                continue;
            
            if (variable->local().isArgument()) {
                variable->machineLocal() = variable->local();
                continue;
            }
            
            size_t local = variable->local().toLocal();
            if (local >= allocation.size())
                continue;
            
            if (allocation[local] == UINT_MAX)
                continue;
            
            variable->machineLocal() = assign(allocation, variable->local());
        }
        
        for (StackAccessData* data : m_graph.m_stackAccessData) {
            if (!data->local.isLocal()) {
                data->machineLocal = data->local;
                continue;
            }
            
            if (static_cast<size_t>(data->local.toLocal()) >= allocation.size())
                continue;
            if (allocation[data->local.toLocal()] == UINT_MAX)
                continue;
            
            data->machineLocal = assign(allocation, data->local);
        }
        
        // This register is never valid for DFG code blocks.
        codeBlock()->setActivationRegister(VirtualRegister());
        if (LIKELY(!m_graph.hasDebuggerEnabled()))
            codeBlock()->setScopeRegister(VirtualRegister());
        else
            codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister()));

        for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) {
            InlineVariableData data = m_graph.m_inlineVariableData[i];
            InlineCallFrame* inlineCallFrame = data.inlineCallFrame;
            
            if (inlineCallFrame->isVarargs()) {
                inlineCallFrame->argumentCountRegister = assign(
                    allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
            }
            
            for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
                ArgumentPosition& position = m_graph.m_argumentPositions[
                    data.argumentPositionStart + argument];
                VariableAccessData* variable = position.someVariable();
                ValueSource source;
                if (!variable)
                    source = ValueSource(SourceIsDead);
                else {
                    source = ValueSource::forFlushFormat(
                        variable->machineLocal(), variable->flushFormat());
                }
                inlineCallFrame->arguments[argument] = source.valueRecovery();
            }
            
            RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable);
            if (inlineCallFrame->isClosureCall) {
                VariableAccessData* variable = data.calleeVariable->find();
                ValueSource source = ValueSource::forFlushFormat(
                    variable->machineLocal(),
                    variable->flushFormat());
                inlineCallFrame->calleeRecovery = source.valueRecovery();
            } else
                RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant());
        }
        
        // Fix GetLocalUnlinked's variable references.
        if (hasNodesThatNeedFixup) {
            for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                BasicBlock* block = m_graph.block(blockIndex);
                if (!block)
                    continue;
                for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                    Node* node = block->at(nodeIndex);
                    switch (node->op()) {
                    case GetLocalUnlinked: {
                        node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal()));
                        break;
                    }
                        
                    case LoadVarargs:
                    case ForwardVarargs: {
                        LoadVarargsData* data = node->loadVarargsData();
                        data->machineCount = assign(allocation, data->count);
                        data->machineStart = assign(allocation, data->start);
                        break;
                    }
                        
                    default:
                        break;
                    }
                }
            }
        }
        
        return true;
    }
void BytecodeGeneratorification::run()
{
    // We calculate the liveness at each merge point. This gives us the information which registers should be saved and resumed conservatively.

    {
        GeneratorLivenessAnalysis pass(*this);
        pass.run();
    }

    UnlinkedCodeBlock* codeBlock = m_graph.codeBlock();
    BytecodeRewriter rewriter(m_graph);

    // Setup the global switch for the generator.
    {
        unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter);
        unsigned switchTableIndex = m_graph.codeBlock()->numberOfSwitchJumpTables();
        VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State));
        auto& jumpTable = m_graph.codeBlock()->addSwitchJumpTable();
        jumpTable.min = 0;
        jumpTable.branchOffsets.resize(m_yields.size() + 1);
        jumpTable.branchOffsets.fill(0);
        jumpTable.add(0, nextToEnterPoint);
        for (unsigned i = 0; i < m_yields.size(); ++i)
            jumpTable.add(i + 1, m_yields[i].point);

        rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) {
            fragment.appendInstruction(op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset());
        });
    }

    for (const YieldData& data : m_yields) {
        VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame));

        // Emit save sequence.
        rewriter.insertFragmentBefore(data.point, [&](BytecodeRewriter::Fragment& fragment) {
            data.liveness.forEachSetBit([&](size_t index) {
                VirtualRegister operand = virtualRegisterForLocal(index);
                Storage storage = storageForGeneratorLocal(index);

                fragment.appendInstruction(
                    op_put_to_scope,
                    scope.offset(), // scope
                    storage.identifierIndex, // identifier
                    operand.offset(), // value
                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
                    m_generatorFrameSymbolTableIndex, // symbol table constant index
                    storage.scopeOffset.offset() // scope offset
                );
            });

            // Insert op_ret just after save sequence.
            fragment.appendInstruction(op_ret, data.argument);
        });

        // Emit resume sequence.
        rewriter.insertFragmentAfter(data.point, [&](BytecodeRewriter::Fragment& fragment) {
            data.liveness.forEachSetBit([&](size_t index) {
                VirtualRegister operand = virtualRegisterForLocal(index);
                Storage storage = storageForGeneratorLocal(index);

                UnlinkedValueProfile profile = codeBlock->addValueProfile();
                fragment.appendInstruction(
                    op_get_from_scope,
                    operand.offset(), // dst
                    scope.offset(), // scope
                    storage.identifierIndex, // identifier
                    GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand(), // info
                    0, // local scope depth
                    storage.scopeOffset.offset(), // scope offset
                    profile // profile
                );
            });
        });

        // Clip the unnecessary bytecodes.
        rewriter.removeBytecode(data.point);
    }

    rewriter.execute();
}
static void compileStub(
    unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock)
{
    StackMaps::Record* record = nullptr;
    
    for (unsigned i = jitCode->stackmaps.records.size(); i--;) {
        record = &jitCode->stackmaps.records[i];
        if (record->patchpointID == exit.m_stackmapID)
            break;
    }
    
    RELEASE_ASSERT(record->patchpointID == exit.m_stackmapID);
    
    // This code requires framePointerRegister is the same as callFrameRegister
    static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same");

    CCallHelpers jit(vm, codeBlock);
    
    // We need scratch space to save all registers, to build up the JS stack, to deal with unwind
    // fixup, pointers to all of the objects we materialize, and the elements inside those objects
    // that we materialize.
    
    // Figure out how much space we need for those object allocations.
    unsigned numMaterializations = 0;
    size_t maxMaterializationNumArguments = 0;
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) {
        numMaterializations++;
        
        maxMaterializationNumArguments = std::max(
            maxMaterializationNumArguments,
            materialization->properties().size());
    }
    
    ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(
        sizeof(EncodedJSValue) * (
            exit.m_values.size() + numMaterializations + maxMaterializationNumArguments) +
        requiredScratchMemorySizeInBytes() +
        codeBlock->calleeSaveRegisters()->size() * sizeof(uint64_t));
    EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
    EncodedJSValue* materializationPointers = scratch + exit.m_values.size();
    EncodedJSValue* materializationArguments = materializationPointers + numMaterializations;
    char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments);
    uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes());
    
    HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer;
    unsigned materializationCount = 0;
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) {
        materializationToPointer.add(
            materialization, materializationPointers + materializationCount++);
    }
    
    // Note that we come in here, the stack used to be as LLVM left it except that someone called pushToSave().
    // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use
    // that slot for saveAllRegisters().

    saveAllRegisters(jit, registerScratch);
    
    // Bring the stack back into a sane form and assert that it's sane.
    jit.popToRestore(GPRInfo::regT0);
    jit.checkStackPointerAlignment();
    
    if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) {
        Profiler::Database& database = *vm->m_perBytecodeProfiler;
        Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
        
        Profiler::OSRExit* profilerExit = compilation->addOSRExit(
            exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
            exit.m_kind, exit.m_kind == UncountableInvalidation);
        jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
    }

    // The remaining code assumes that SP/FP are in the same state that they were in the FTL's
    // call frame.
    
    // Get the call frame and tag thingies.
    // Restore the exiting function's callFrame value into a regT4
    jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
    jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister);
    
    // Do some value profiling.
    if (exit.m_profileDataFormat != DataFormatNone) {
        record->locations[0].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0);
        reboxAccordingToFormat(
            exit.m_profileDataFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
        
        if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
            CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
            if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
                jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1);
                jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID());
                jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeOffset()), GPRInfo::regT1);
                jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2);
                jit.lshift32(GPRInfo::regT1, GPRInfo::regT2);
                jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
            }
        }

        if (!!exit.m_valueProfile)
            jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0));
    }

    // Materialize all objects. Don't materialize an object until all
    // of the objects it needs have been materialized. We break cycles
    // by populating objects late - we only consider an object as
    // needing another object if the later is needed for the
    // allocation of the former.

    HashSet<ExitTimeObjectMaterialization*> toMaterialize;
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
        toMaterialize.add(materialization);

    while (!toMaterialize.isEmpty()) {
        unsigned previousToMaterializeSize = toMaterialize.size();

        Vector<ExitTimeObjectMaterialization*> worklist;
        worklist.appendRange(toMaterialize.begin(), toMaterialize.end());
        for (ExitTimeObjectMaterialization* materialization : worklist) {
            // Check if we can do anything about this right now.
            bool allGood = true;
            for (ExitPropertyValue value : materialization->properties()) {
                if (!value.value().isObjectMaterialization())
                    continue;
                if (!value.location().neededForMaterialization())
                    continue;
                if (toMaterialize.contains(value.value().objectMaterialization())) {
                    // Gotta skip this one, since it needs a
                    // materialization that hasn't been materialized.
                    allGood = false;
                    break;
                }
            }
            if (!allGood)
                continue;

            // All systems go for materializing the object. First we
            // recover the values of all of its fields and then we
            // call a function to actually allocate the beast.
            // We only recover the fields that are needed for the allocation.
            for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
                const ExitPropertyValue& property = materialization->properties()[propertyIndex];
                const ExitValue& value = property.value();
                if (!property.location().neededForMaterialization())
                    continue;

                compileRecovery(
                    jit, value, record, jitCode->stackmaps, registerScratch,
                    materializationToPointer);
                jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
            }
            
            // This call assumes that we don't pass arguments on the stack.
            jit.setupArgumentsWithExecState(
                CCallHelpers::TrustedImmPtr(materialization),
                CCallHelpers::TrustedImmPtr(materializationArguments));
            jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
            jit.call(GPRInfo::nonArgGPR0);
            jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));

            // Let everyone know that we're done.
            toMaterialize.remove(materialization);
        }
        
        // We expect progress! This ensures that we crash rather than looping infinitely if there
        // is something broken about this fixpoint. Or, this could happen if we ever violate the
        // "materializations form a DAG" rule.
        RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize);
    }

    // Now that all the objects have been allocated, we populate them
    // with the correct values. This time we can recover all the
    // fields, including those that are only needed for the allocation.
    for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) {
        for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
            const ExitValue& value = materialization->properties()[propertyIndex].value();
            compileRecovery(
                jit, value, record, jitCode->stackmaps, registerScratch,
                materializationToPointer);
            jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
        }

        // This call assumes that we don't pass arguments on the stack
        jit.setupArgumentsWithExecState(
            CCallHelpers::TrustedImmPtr(materialization),
            CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)),
            CCallHelpers::TrustedImmPtr(materializationArguments));
        jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0);
        jit.call(GPRInfo::nonArgGPR0);
    }

    // Save all state from wherever the exit data tells us it was, into the appropriate place in
    // the scratch buffer. This also does the reboxing.
    
    for (unsigned index = exit.m_values.size(); index--;) {
        compileRecovery(
            jit, exit.m_values[index], record, jitCode->stackmaps, registerScratch,
            materializationToPointer);
        jit.store64(GPRInfo::regT0, scratch + index);
    }
    
    // Henceforth we make it look like the exiting function was called through a register
    // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then
    // we restore the various things according to either exit.m_values or by copying from the
    // old frame, and finally we save the various callee-save registers into where the
    // restoration thunk would restore them from.
    
    // Before we start messing with the frame, we need to set aside any registers that the
    // FTL code was preserving.
    for (unsigned i = codeBlock->calleeSaveRegisters()->size(); i--;) {
        RegisterAtOffset entry = codeBlock->calleeSaveRegisters()->at(i);
        jit.load64(
            MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()),
            GPRInfo::regT0);
        jit.store64(GPRInfo::regT0, unwindScratch + i);
    }
    
    jit.load32(CCallHelpers::payloadFor(JSStack::ArgumentCount), GPRInfo::regT2);
    
    // Let's say that the FTL function had failed its arity check. In that case, the stack will
    // contain some extra stuff.
    //
    // We compute the padded stack space:
    //
    //     paddedStackSpace = roundUp(codeBlock->numParameters - regT2 + 1)
    //
    // The stack will have regT2 + CallFrameHeaderSize stuff.
    // We want to make the stack look like this, from higher addresses down:
    //
    //     - argument padding
    //     - actual arguments
    //     - call frame header

    // This code assumes that we're dealing with FunctionCode.
    RELEASE_ASSERT(codeBlock->codeType() == FunctionCode);
    
    jit.add32(
        MacroAssembler::TrustedImm32(-codeBlock->numParameters()), GPRInfo::regT2,
        GPRInfo::regT3);
    MacroAssembler::Jump arityIntact = jit.branch32(
        MacroAssembler::GreaterThanOrEqual, GPRInfo::regT3, MacroAssembler::TrustedImm32(0));
    jit.neg32(GPRInfo::regT3);
    jit.add32(MacroAssembler::TrustedImm32(1 + stackAlignmentRegisters() - 1), GPRInfo::regT3);
    jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), GPRInfo::regT3);
    jit.add32(GPRInfo::regT3, GPRInfo::regT2);
    arityIntact.link(&jit);

    CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);

    // First set up SP so that our data doesn't get clobbered by signals.
    unsigned conservativeStackDelta =
        (exit.m_values.numberOfLocals() + baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters()) * sizeof(Register) +
        maxFrameExtentForSlowPathCall;
    conservativeStackDelta = WTF::roundUpToMultipleOf(
        stackAlignmentBytes(), conservativeStackDelta);
    jit.addPtr(
        MacroAssembler::TrustedImm32(-conservativeStackDelta),
        MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister);
    jit.checkStackPointerAlignment();

    RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters();
    RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters();

    for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
        if (!allFTLCalleeSaves.get(reg))
            continue;
        unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg);
        RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg);

        if (reg.isGPR()) {
            GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr();

            if (unwindIndex == UINT_MAX) {
                // The FTL compilation didn't preserve this register. This means that it also
                // didn't use the register. So its value at the beginning of OSR exit should be
                // preserved by the thunk. Luckily, we saved all registers into the register
                // scratch buffer, so we can restore them from there.
                jit.load64(registerScratch + offsetOfReg(reg), regToLoad);
            } else {
                // The FTL compilation preserved the register. Its new value is therefore
                // irrelevant, but we can get the value that was preserved by using the unwind
                // data. We've already copied all unwind-able preserved registers into the unwind
                // scratch buffer, so we can get it from there.
                jit.load64(unwindScratch + unwindIndex, regToLoad);
            }

            if (baselineRegisterOffset)
                jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
        } else {
            FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr();

            if (unwindIndex == UINT_MAX)
                jit.loadDouble(MacroAssembler::TrustedImmPtr(registerScratch + offsetOfReg(reg)), fpRegToLoad);
            else
                jit.loadDouble(MacroAssembler::TrustedImmPtr(unwindScratch + unwindIndex), fpRegToLoad);

            if (baselineRegisterOffset)
                jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
        }
    }

    size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters();

    // Now get state out of the scratch buffer and place it back into the stack. The values are
    // already reboxed so we just move them.
    for (unsigned index = exit.m_values.size(); index--;) {
        VirtualRegister reg = exit.m_values.virtualRegisterForIndex(index);

        if (reg.isLocal() && reg.toLocal() < static_cast<int>(baselineVirtualRegistersForCalleeSaves))
            continue;

        jit.load64(scratch + index, GPRInfo::regT0);
        jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(reg));
    }
    
    handleExitCounts(jit, exit);
    reifyInlinedCallFrames(jit, exit);
    adjustAndJumpToTarget(jit, exit, false);
    
    LinkBuffer patchBuffer(*vm, jit, codeBlock);
    exit.m_code = FINALIZE_CODE_IF(
        shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
        patchBuffer,
        ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s, and record = %s",
            exitID, toCString(exit.m_codeOrigin).data(),
            exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
            toCString(ignoringContext<DumpContext>(exit.m_values)).data(),
            toCString(*record).data()));
}
bool
GreedyAllocator::allocateDefinition(LInstruction *ins, LDefinition *def)
{
    VirtualRegister *vr = getVirtualRegister(def);

    LAllocation output;
    switch (def->policy()) {
      case LDefinition::PASSTHROUGH:
        // This is purely passthru, so ignore it.
        return true;

      case LDefinition::DEFAULT:
      case LDefinition::MUST_REUSE_INPUT:
      {
        AnyRegister reg;
        // Either take the register requested, or allocate a new one.
        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
            ins->getOperand(def->getReusedInput())->toUse()->isFixedRegister())
        {
            LAllocation *a = ins->getOperand(def->getReusedInput());
            VirtualRegister *vuse = getVirtualRegister(a->toUse());
            reg = GetFixedRegister(vuse->def, a->toUse());
        } else if (vr->hasRegister()) {
            reg = vr->reg();
        } else {
            if (!allocate(vr->type(), DISALLOW, &reg))
                return false;
        }

        if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
            LUse *use = ins->getOperand(def->getReusedInput())->toUse();
            VirtualRegister *vuse = getVirtualRegister(use);
            // If the use already has the given register, we need to evict.
            if (vuse->hasRegister() && vuse->reg() == reg) {
                if (!evict(reg))
                    return false;
            }

            // Make sure our input is using a fixed register.
            if (reg.isFloat())
                *use = LUse(reg.fpu(), use->virtualRegister());
            else
                *use = LUse(reg.gpr(), use->virtualRegister());
        }
        output = LAllocation(reg);
        break;
      }

      case LDefinition::PRESET:
      {
        // Eviction and disallowing occurred during the definition
        // pre-scan pass.
        output = *def->output();
        break;
      }
    }

    if (output.isRegister()) {
        JS_ASSERT_IF(output.isFloatReg(), disallowed.has(output.toFloatReg()->reg()));
        JS_ASSERT_IF(output.isGeneralReg(), disallowed.has(output.toGeneralReg()->reg()));
    }

    // Finally, set the output.
    def->setOutput(output);
    return true;
}
bool
GreedyAllocator::spillDefinition(LDefinition *def)
{
    if (def->policy() == LDefinition::PASSTHROUGH)
        return true;

    VirtualRegister *vr = getVirtualRegister(def);
    const LAllocation *output = def->output();

    if (output->isRegister()) {
        if (vr->hasRegister()) {
            // If the returned register is different from the output
            // register, a move is required.
            AnyRegister out = GetAllocatedRegister(output);
            if (out != vr->reg()) {
                if (!spill(*output, vr->reg()))
                    return false;
            }
        }

        // Spill to the stack if needed.
        if (vr->hasStackSlot() && vr->backingStackUsed()) {
            if (!spill(*output, vr->backingStack()))
                return false;
        }
    } else if (vr->hasRegister()) {
        // This definition has a canonical spill location, so make sure to
        // load it to the resulting register, if any.
        JS_ASSERT(!vr->hasStackSlot());
        JS_ASSERT(vr->hasBackingStack());
        if (!spill(*output, vr->reg()))
            return false;
    }

    return true;
}