Example #1
0
/*
 * ??? This function really categorizes the lblock "line"... ???
 * Annotates the "line" with ALWAYSMISS/ALWAYSHIT/FIRSTMISS/FIRSTHIT
 * In the case of FIRSTMISS, also annotate with the loop-header of the most inner loop.
 */
void CATBuilder::worst(LBlock *line , ContextTree *node , LBlockSet *idset, int dec){
	int number = idset->count();
	BasicBlock *bb = line->bb();
	LBlock *cacheline;
	BitSet *in = new BitSet(number);


	in = IN(bb);

	//int count = 0;
	bool nonconflitdetected = false;
	bool continu = false;
	unsigned long tagcachline,tagline;

	//test if it's the lbloc which find in the same memory block

	/*
	 * If the IN(line) = {LB} and cacheblock(line)==cacheblock(LB), then
	 * nonconflict (Always Hit)
	 */
	if (in->count() == 1){
		for (int i=0;i < number;i++){
		if (in->contains(i)){
			cacheline = idset->lblock(i);
			tagcachline = ((unsigned long)cacheline->address()) >> dec;
			unsigned long tagline = ((unsigned long)line->address()) >> dec;
				if (tagcachline == tagline )
					nonconflitdetected = true;
			}
		}
	}
void
CodeGeneratorMIPS64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
{
    MTableSwitch* mir = ool->mir();

    masm.haltingAlign(sizeof(void*));
    masm.bind(ool->jumpLabel()->target());
    masm.addCodeLabel(*ool->jumpLabel());

    for (size_t i = 0; i < mir->numCases(); i++) {
        LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
        Label* caseheader = caseblock->label();
        uint32_t caseoffset = caseheader->offset();

        // The entries of the jump table need to be absolute addresses and thus
        // must be patched after codegen is finished. Each table entry uses 8
        // instructions (4 for load address, 2 for branch, and 2 padding).
        CodeLabel cl;
        masm.ma_li(ScratchRegister, cl.patchAt());
        masm.branch(ScratchRegister);
        masm.as_nop();
        masm.as_nop();
        cl.target()->bind(caseoffset);
        masm.addCodeLabel(cl);
    }
}
Example #3
0
void
JSONSpewer::spewLIR(MIRGraph *mir)
{
    if (!fp_)
        return;

    beginObjectProperty("lir");
    beginListProperty("blocks");

    for (MBasicBlockIterator i(mir->begin()); i != mir->end(); i++) {
        LBlock *block = i->lir();
        if (!block)
            continue;

        beginObject();
        integerProperty("number", i->id());

        beginListProperty("instructions");
        for (size_t p = 0; p < block->numPhis(); p++)
            spewLIns(block->getPhi(p));
        for (LInstructionIterator ins(block->begin()); ins != block->end(); ins++)
            spewLIns(*ins);
        endList();

        endObject();
    }

    endList();
    endObject();
}
bool
RegisterAllocator::init()
{
    if (!insData.init(mir, graph.numInstructions()))
        return false;

    if (!entryPositions.reserve(graph.numBlocks()) || !exitPositions.reserve(graph.numBlocks()))
        return false;

    for (size_t i = 0; i < graph.numBlocks(); i++) {
        LBlock* block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
            insData[ins->id()] = *ins;
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi* phi = block->getPhi(j);
            insData[phi->id()] = phi;
        }

        CodePosition entry = block->numPhis() != 0
                             ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
                             : inputOf(block->firstInstructionWithId());
        CodePosition exit = outputOf(block->lastInstructionWithId());

        MOZ_ASSERT(block->mir()->id() == i);
        entryPositions.infallibleAppend(entry);
        exitPositions.infallibleAppend(exit);
    }

    return true;
}
void
JSONSpewer::spewIntervals(LinearScanAllocator *regalloc)
{
    if (!fp_)
        return;

    beginObjectProperty("intervals");
    beginListProperty("blocks");

    for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
        beginObject();
        integerProperty("number", bno);
        beginListProperty("vregs");

        LBlock *lir = regalloc->graph.getBlock(bno);
        for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
            for (size_t k = 0; k < ins->numDefs(); k++) {
                VirtualRegister *vreg = &regalloc->vregs[ins->getDef(k)->virtualRegister()];

                beginObject();
                integerProperty("vreg", vreg->reg());
                beginListProperty("intervals");

                for (size_t i = 0; i < vreg->numIntervals(); i++) {
                    LiveInterval *live = vreg->getInterval(i);

                    if (live->numRanges()) {
                        beginObject();
                        property("allocation");
                        fprintf(fp_, "\"");
                        LAllocation::PrintAllocation(fp_, live->getAllocation());
                        fprintf(fp_, "\"");
                        beginListProperty("ranges");

                        for (size_t j = 0; j < live->numRanges(); j++) {
                            beginObject();
                            integerProperty("start", live->getRange(j)->from.pos());
                            integerProperty("end", live->getRange(j)->to.pos());
                            endObject();
                        }

                        endList();
                        endObject();
                    }
                }

                endList();
                endObject();
            }
        }

        endList();
        endObject();
    }

    endList();
    endObject();
}
bool
LiveRangeAllocator<VREG>::init()
{
    if (!RegisterAllocator::init())
        return false;

    liveIn = lir->mir()->allocate<BitSet*>(graph.numBlockIds());
    if (!liveIn)
        return false;

    // Initialize fixed intervals.
    for (size_t i = 0; i < AnyRegister::Total; i++) {
        AnyRegister reg = AnyRegister::FromCode(i);
        LiveInterval *interval = new LiveInterval(0);
        interval->setAllocation(LAllocation(reg));
        fixedIntervals[i] = interval;
    }

    fixedIntervalsUnion = new LiveInterval(0);

    if (!vregs.init(lir->mir(), graph.numVirtualRegisters()))
        return false;

    // Build virtual register objects
    for (size_t i = 0; i < graph.numBlocks(); i++) {
        if (mir->shouldCancel("LSRA create data structures (main loop)"))
            return false;

        LBlock *block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            for (size_t j = 0; j < ins->numDefs(); j++) {
                LDefinition *def = ins->getDef(j);
                if (def->policy() != LDefinition::PASSTHROUGH) {
                    uint32_t reg = def->virtualRegister();
                    if (!vregs[reg].init(reg, block, *ins, def, /* isTemp */ false))
                        return false;
                }
            }

            for (size_t j = 0; j < ins->numTemps(); j++) {
                LDefinition *def = ins->getTemp(j);
                if (def->isBogusTemp())
                    continue;
                if (!vregs[def].init(def->virtualRegister(), block, *ins, def, /* isTemp */ true))
                    return false;
            }
        }
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi *phi = block->getPhi(j);
            LDefinition *def = phi->getDef(0);
            if (!vregs[def].init(phi->id(), block, phi, def, /* isTemp */ false))
                return false;
        }
    }

    return true;
}
Example #7
0
void
C1Spewer::spewRanges(GenericPrinter& out, MBasicBlock* block, BacktrackingAllocator* regalloc)
{
    LBlock* lir = block->lir();
    if (!lir)
        return;

    for (size_t i = 0; i < lir->numPhis(); i++)
        spewRanges(out, regalloc, lir->getPhi(i));

    for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++)
        spewRanges(out, regalloc, *ins);
}
Example #8
0
void
C1Spewer::spewIntervals(FILE *fp, MBasicBlock *block, LinearScanAllocator *regalloc, size_t &nextId)
{
    LBlock *lir = block->lir();
    if (!lir)
        return;

    for (size_t i = 0; i < lir->numPhis(); i++)
        spewIntervals(fp, regalloc, lir->getPhi(i), nextId);

    for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++)
        spewIntervals(fp, regalloc, *ins, nextId);
}
Example #9
0
bool
StupidAllocator::init()
{
    if (!RegisterAllocator::init())
        return false;

    if (!virtualRegisters.reserve(graph.numVirtualRegisters()))
        return false;
    for (size_t i = 0; i < graph.numVirtualRegisters(); i++)
        virtualRegisters.infallibleAppend(NULL);

    for (size_t i = 0; i < graph.numBlocks(); i++) {
        LBlock *block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            for (size_t j = 0; j < ins->numDefs(); j++) {
                LDefinition *def = ins->getDef(j);
                if (def->policy() != LDefinition::PASSTHROUGH)
                    virtualRegisters[def->virtualRegister()] = def;
            }

            for (size_t j = 0; j < ins->numTemps(); j++) {
                LDefinition *def = ins->getTemp(j);
                if (def->isBogusTemp())
                    continue;
                virtualRegisters[def->virtualRegister()] = def;
            }
        }
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi *phi = block->getPhi(j);
            LDefinition *def = phi->getDef(0);
            uint32 vreg = def->virtualRegister();

            virtualRegisters[vreg] = def;
        }
    }

    // Assign physical registers to the tracked allocation.
    {
        registerCount = 0;
        RegisterSet remainingRegisters(allRegisters_);
        while (!remainingRegisters.empty(/* float = */ false))
            registers[registerCount++].reg = AnyRegister(remainingRegisters.takeGeneral());
        while (!remainingRegisters.empty(/* float = */ true))
            registers[registerCount++].reg = AnyRegister(remainingRegisters.takeFloat());
        JS_ASSERT(registerCount <= MAX_REGISTERS);
    }

    return true;
}
Example #10
0
bool
StupidAllocator::init()
{
    if (!RegisterAllocator::init())
        return false;

    if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
        return false;

    for (size_t i = 0; i < graph.numBlocks(); i++) {
        LBlock* block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            for (size_t j = 0; j < ins->numDefs(); j++) {
                LDefinition* def = ins->getDef(j);
                virtualRegisters[def->virtualRegister()] = def;
            }

            for (size_t j = 0; j < ins->numTemps(); j++) {
                LDefinition* def = ins->getTemp(j);
                if (def->isBogusTemp())
                    continue;
                virtualRegisters[def->virtualRegister()] = def;
            }
        }
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi* phi = block->getPhi(j);
            LDefinition* def = phi->getDef(0);
            uint32_t vreg = def->virtualRegister();

            virtualRegisters[vreg] = def;
        }
    }

    // Assign physical registers to the tracked allocation.
    {
        registerCount = 0;
        LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
        while (!remainingRegisters.emptyGeneral())
            registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyGeneral());

        while (!remainingRegisters.emptyFloat())
            registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyFloat());

        MOZ_ASSERT(registerCount <= MAX_REGISTERS);
    }

    return true;
}
void
StupidAllocator::syncForBlockEnd(LBlock *block, LInstruction *ins)
{
    // Sync any dirty registers, and update the synced state for phi nodes at
    // each successor of a block. We cannot conflate the storage for phis with
    // that of their inputs, as we cannot prove the live ranges of the phi and
    // its input do not overlap. The values for the two may additionally be
    // different, as the phi could be for the value of the input in a previous
    // loop iteration.

    for (size_t i = 0; i < registerCount; i++)
        syncRegister(ins, i);

    LMoveGroup *group = nullptr;

    MBasicBlock *successor = block->mir()->successorWithPhis();
    if (successor) {
        uint32_t position = block->mir()->positionInPhiSuccessor();
        LBlock *lirsuccessor = graph.getBlock(successor->id());
        for (size_t i = 0; i < lirsuccessor->numPhis(); i++) {
            LPhi *phi = lirsuccessor->getPhi(i);

            uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister();
            uint32_t destvreg = phi->getDef(0)->virtualRegister();

            if (sourcevreg == destvreg)
                continue;

            LAllocation *source = stackLocation(sourcevreg);
            LAllocation *dest = stackLocation(destvreg);

            if (!group) {
                // The moves we insert here need to happen simultaneously with
                // each other, yet after any existing moves before the instruction.
                LMoveGroup *input = getInputMoveGroup(ins->id());
                if (input->numMoves() == 0) {
                    group = input;
                } else {
                    group = new LMoveGroup(alloc());
                    block->insertAfter(input, group);
                }
            }

            group->add(source, dest);
        }
    }
}
Example #12
0
void
JSONSpewer::spewRanges(BacktrackingAllocator* regalloc)
{
    if (!fp_)
        return;

    beginObjectProperty("ranges");
    beginListProperty("blocks");

    for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
        beginObject();
        integerProperty("number", bno);
        beginListProperty("vregs");

        LBlock* lir = regalloc->graph.getBlock(bno);
        for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++) {
            for (size_t k = 0; k < ins->numDefs(); k++) {
                uint32_t id = ins->getDef(k)->virtualRegister();
                VirtualRegister* vreg = &regalloc->vregs[id];

                beginObject();
                integerProperty("vreg", id);
                beginListProperty("ranges");

                for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
                    LiveRange* range = LiveRange::get(*iter);

                    beginObject();
                    property("allocation");
                    fprintf(fp_, "\"%s\"", range->bundle()->allocation().toString());
                    integerProperty("start", range->from().bits());
                    integerProperty("end", range->to().bits());
                    endObject();
                }

                endList();
                endObject();
            }
        }

        endList();
        endObject();
    }

    endList();
    endObject();
}
Example #13
0
bool
RegisterAllocator::init()
{
    if (!insData.init(mir, graph.numInstructions()))
        return false;

    for (size_t i = 0; i < graph.numBlocks(); i++) {
        LBlock* block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
            insData[ins->id()] = *ins;
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi* phi = block->getPhi(j);
            insData[phi->id()] = phi;
        }
    }

    return true;
}
Example #14
0
bool
RegisterAllocator::init()
{
    if (!insData.init(lir->mir(), graph.numInstructions()))
        return false;

    for (size_t i = 0; i < graph.numBlocks(); i++) {
        LBlock *block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
            insData[*ins].init(*ins, block);
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi *phi = block->getPhi(j);
            insData[phi].init(phi, block);
        }
    }

    return true;
}
bool
GreedyAllocator::buildPhiMoves(LBlock *block)
{
    IonSpew(IonSpew_RegAlloc, " Merging phi state."); 

    phiMoves = Mover();

    MBasicBlock *mblock = block->mir();
    if (!mblock->successorWithPhis())
        return true;

    // Insert moves from our state into our successor's phi.
    uint32 pos = mblock->positionInPhiSuccessor();
    LBlock *successor = mblock->successorWithPhis()->lir();
    for (size_t i = 0; i < successor->numPhis(); i++) {
        LPhi *phi = successor->getPhi(i);
        JS_ASSERT(phi->numDefs() == 1);

        VirtualRegister *phiReg = getVirtualRegister(phi->getDef(0));
        allocateStack(phiReg);

        LAllocation *in = phi->getOperand(pos);
        VirtualRegister *inReg = getVirtualRegister(in->toUse());
        allocateStack(inReg);

        // Try to get a register for the input.
        if (!inReg->hasRegister() && !allocatableRegs().empty(inReg->isDouble())) {
            if (!allocateReg(inReg))
                return false;
        }

        // Add a move from the input to the phi.
        if (inReg->hasRegister()) {
            if (!phiMoves.move(inReg->reg(), phiReg->backingStack()))
                return false;
        } else {
            if (!phiMoves.move(inReg->backingStack(), phiReg->backingStack()))
                return false;
        }
    }

    return true;
}
bool
StupidAllocator::go()
{
    // This register allocator is intended to be as simple as possible, while
    // still being complicated enough to share properties with more complicated
    // allocators. Namely, physical registers may be used to carry virtual
    // registers across LIR instructions, but not across basic blocks.
    //
    // This algorithm does not pay any attention to liveness. It is performed
    // as a single forward pass through the basic blocks in the program. As
    // virtual registers and temporaries are defined they are assigned physical
    // registers, evicting existing allocations in an LRU fashion.

    // For virtual registers not carried in a register, a canonical spill
    // location is used. Each vreg has a different spill location; since we do
    // not track liveness we cannot determine that two vregs have disjoint
    // lifetimes. Thus, the maximum stack height is the number of vregs (scaled
    // by two on 32 bit platforms to allow storing double values).
    graph.setLocalSlotCount(DefaultStackSlot(graph.numVirtualRegisters() - 1) + 1);

    if (!init())
        return false;

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock *block = graph.getBlock(blockIndex);
        JS_ASSERT(block->mir()->id() == blockIndex);

        for (size_t i = 0; i < registerCount; i++)
            registers[i].set(MISSING_ALLOCATION);

        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction *ins = *iter;

            if (ins == *block->rbegin())
                syncForBlockEnd(block, ins);

            allocateForInstruction(ins);
        }
    }

    return true;
}
// Scan all instructions inside the loop. If any instruction has a use of a
// definition that is defined outside its containing loop, then stack space
// for that definition must be reserved ahead of time. Otherwise, we could
// re-use storage that has been temporarily allocated - see bug 694481.
bool
GreedyAllocator::findLoopCarriedUses(LBlock *backedge)
{
    Vector<LBlock *, 4, SystemAllocPolicy> worklist;
    MBasicBlock *mheader = backedge->mir()->loopHeaderOfBackedge();
    uint32 upperBound = backedge->lastId();
    uint32 lowerBound = mheader->lir()->firstId();

    IonSpew(IonSpew_RegAlloc, "  Finding loop-carried uses.");

    for (size_t i = 0; i < mheader->numContainedInLoop(); i++) {
        LBlock *block = mheader->getContainedInLoop(i)->lir();

        for (LInstructionIterator i = block->begin(); i != block->end(); i++)
            findLoopCarriedUses(*i, lowerBound, upperBound);
        for (size_t i = 0; i < block->numPhis(); i++)
            findLoopCarriedUses(block->getPhi(i), lowerBound, upperBound);
    }

    IonSpew(IonSpew_RegAlloc, "  Done finding loop-carried uses.");

    return true;
}
void
CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue,
                                   MBasicBlock *mirFalse, Assembler::NaNCond ifNaN)
{
    LBlock *ifTrue = mirTrue->lir();
    LBlock *ifFalse = mirFalse->lir();

    if (ifNaN == Assembler::NaN_IsFalse)
        masm.j(Assembler::Parity, ifFalse->label());
    else if (ifNaN == Assembler::NaN_IsTrue)
        masm.j(Assembler::Parity, ifTrue->label());

    if (isNextBlock(ifFalse)) {
        masm.j(cond, ifTrue->label());
    } else {
        masm.j(Assembler::InvertCondition(cond), ifFalse->label());
        if (!isNextBlock(ifTrue))
            masm.jmp(ifTrue->label());
    }
}
Example #19
0
void
AllocationIntegrityState::dump()
{
#ifdef DEBUG
    fprintf(stderr, "Register Allocation Integrity State:\n");

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);
        MBasicBlock* mir = block->mir();

        fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
        for (size_t i = 0; i < mir->numSuccessors(); i++)
            fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
        fprintf(stderr, "\n");

        for (size_t i = 0; i < block->numPhis(); i++) {
            const InstructionInfo& info = blocks[blockIndex].phis[i];
            LPhi* phi = block->getPhi(i);
            CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
            CodePosition output(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT);

            fprintf(stderr, "[%u,%u Phi] [def %s] ",
                    input.bits(),
                    output.bits(),
                    phi->getDef(0)->toString());
            for (size_t j = 0; j < phi->numOperands(); j++)
                fprintf(stderr, " [use %s]", info.inputs[j].toString());
            fprintf(stderr, "\n");
        }

        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;
            const InstructionInfo& info = instructions[ins->id()];

            CodePosition input(ins->id(), CodePosition::INPUT);
            CodePosition output(ins->id(), CodePosition::OUTPUT);

            fprintf(stderr, "[");
            if (input != CodePosition::MIN)
                fprintf(stderr, "%u,%u ", input.bits(), output.bits());
            fprintf(stderr, "%s]", ins->opName());

            if (ins->isMoveGroup()) {
                LMoveGroup* group = ins->toMoveGroup();
                for (int i = group->numMoves() - 1; i >= 0; i--) {
                    // Use two printfs, as LAllocation::toString is not reentrant.
                    fprintf(stderr, " [%s", group->getMove(i).from()->toString());
                    fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
                }
                fprintf(stderr, "\n");
                continue;
            }

            for (size_t i = 0; i < ins->numDefs(); i++)
                fprintf(stderr, " [def %s]", ins->getDef(i)->toString());

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition* temp = ins->getTemp(i);
                if (!temp->isBogusTemp())
                    fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(),
                           temp->toString());
            }

            size_t index = 0;
            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                fprintf(stderr, " [use %s", info.inputs[index++].toString());
                if (!alloc->isConstant())
                    fprintf(stderr, " %s", alloc->toString());
                fprintf(stderr, "]");
            }

            fprintf(stderr, "\n");
        }
    }

    // Print discovered allocations at the ends of blocks, in the order they
    // were discovered.

    Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
    seenOrdered.appendN(IntegrityItem(), seen.count());

    for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
        IntegrityItem item = iter.front();
        seenOrdered[item.index] = item;
    }

    if (!seenOrdered.empty()) {
        fprintf(stderr, "Intermediate Allocations:\n");

        for (size_t i = 0; i < seenOrdered.length(); i++) {
            IntegrityItem item = seenOrdered[i];
            fprintf(stderr, "  block %u reg v%u alloc %s\n",
                   item.block->mir()->id(), item.vreg, item.alloc.toString());
        }
    }

    fprintf(stderr, "\n");
#endif
}
Example #20
0
bool
AllocationIntegrityState::record()
{
    // Ignore repeated record() calls.
    if (!instructions.empty())
        return true;

    if (!instructions.appendN(InstructionInfo(), graph.numInstructions()))
        return false;

    if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
        return false;

    if (!blocks.reserve(graph.numBlocks()))
        return false;
    for (size_t i = 0; i < graph.numBlocks(); i++) {
        blocks.infallibleAppend(BlockInfo());
        LBlock* block = graph.getBlock(i);
        MOZ_ASSERT(block->mir()->id() == i);

        BlockInfo& blockInfo = blocks[i];
        if (!blockInfo.phis.reserve(block->numPhis()))
            return false;

        for (size_t j = 0; j < block->numPhis(); j++) {
            blockInfo.phis.infallibleAppend(InstructionInfo());
            InstructionInfo& info = blockInfo.phis[j];
            LPhi* phi = block->getPhi(j);
            MOZ_ASSERT(phi->numDefs() == 1);
            uint32_t vreg = phi->getDef(0)->virtualRegister();
            virtualRegisters[vreg] = phi->getDef(0);
            if (!info.outputs.append(*phi->getDef(0)))
                return false;
            for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
                if (!info.inputs.append(*phi->getOperand(k)))
                    return false;
            }
        }

        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;
            InstructionInfo& info = instructions[ins->id()];

            for (size_t k = 0; k < ins->numTemps(); k++) {
                if (!ins->getTemp(k)->isBogusTemp()) {
                    uint32_t vreg = ins->getTemp(k)->virtualRegister();
                    virtualRegisters[vreg] = ins->getTemp(k);
                }
                if (!info.temps.append(*ins->getTemp(k)))
                    return false;
            }
            for (size_t k = 0; k < ins->numDefs(); k++) {
                if (!ins->getDef(k)->isBogusTemp()) {
                    uint32_t vreg = ins->getDef(k)->virtualRegister();
                    virtualRegisters[vreg] = ins->getDef(k);
                }
                if (!info.outputs.append(*ins->getDef(k)))
                    return false;
            }
            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                if (!info.inputs.append(**alloc))
                    return false;
            }
        }
    }

    return seen.init();
}
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
    if (!init())
        return false;

    Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
    BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds());
    if (!loopDone)
        return false;

    for (size_t i = graph.numBlocks(); i > 0; i--) {
        if (mir->shouldCancel("Build Liveness Info (main loop)"))
            return false;

        LBlock *block = graph.getBlock(i - 1);
        MBasicBlock *mblock = block->mir();

        BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters());
        if (!live)
            return false;
        liveIn[mblock->id()] = live;

        // Propagate liveIn from our successors to us
        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
            MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
            // Skip backedges, as we fix them up at the loop header.
            if (mblock->id() < successor->id())
                live->insertAll(liveIn[successor->id()]);
        }

        // Add successor phis
        if (mblock->successorWithPhis()) {
            LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
                LPhi *phi = phiSuccessor->getPhi(j);
                LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
                uint32_t reg = use->toUse()->virtualRegister();
                live->insert(reg);
            }
        }

        // Variables are assumed alive for the entire block, a define shortens
        // the interval to the point of definition.
        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
            if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                                  outputOf(block->lastId()).next()))
            {
                return false;
            }
        }

        // Shorten the front end of live intervals for live variables to their
        // point of definition, if found.
        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
            // Calls may clobber registers, so force a spill and reload around the callsite.
            if (ins->isCall()) {
                for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
                    if (forLSRA) {
                        if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
                            return false;
                    } else {
                        bool found = false;
                        for (size_t i = 0; i < ins->numDefs(); i++) {
                            if (ins->getDef(i)->isPreset() &&
                                *ins->getDef(i)->output() == LAllocation(*iter)) {
                                found = true;
                                break;
                            }
                        }
                        if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
                            return false;
                    }
                }
            }

            for (size_t i = 0; i < ins->numDefs(); i++) {
                if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
                    LDefinition *def = ins->getDef(i);

                    CodePosition from;
                    if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
                        // The fixed range covers the current instruction so the
                        // interval for the virtual register starts at the next
                        // instruction. If the next instruction has a fixed use,
                        // this can lead to unnecessary register moves. To avoid
                        // special handling for this, assert the next instruction
                        // has no fixed uses. defineFixed guarantees this by inserting
                        // an LNop.
                        JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
                        AnyRegister reg = def->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
                            return false;
                        from = outputOf(*ins).next();
                    } else {
                        from = forLSRA ? inputOf(*ins) : outputOf(*ins);
                    }

                    if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
                        // MUST_REUSE_INPUT is implemented by allocating an output
                        // register and moving the input to it. Register hints are
                        // used to avoid unnecessary moves. We give the input an
                        // LUse::ANY policy to avoid allocating a register for the
                        // input.
                        LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
                        JS_ASSERT(inputUse->policy() == LUse::REGISTER);
                        JS_ASSERT(inputUse->usedAtStart());
                        *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
                    }

                    LiveInterval *interval = vregs[def].getInterval(0);
                    interval->setFrom(from);

                    // Ensure that if there aren't any uses, there's at least
                    // some interval for the output to go into.
                    if (interval->numRanges() == 0) {
                        if (!interval->addRangeAtHead(from, from.next()))
                            return false;
                    }
                    live->remove(def->virtualRegister());
                }
            }

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition *temp = ins->getTemp(i);
                if (temp->isBogusTemp())
                    continue;

                if (forLSRA) {
                    if (temp->policy() == LDefinition::PRESET) {
                        if (ins->isCall())
                            continue;
                        AnyRegister reg = temp->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                            return false;

                        // Fixed intervals are not added to safepoints, so do it
                        // here.
                        if (LSafepoint *safepoint = ins->safepoint())
                            AddRegisterToSafepoint(safepoint, reg, *temp);
                    } else {
                        JS_ASSERT(!ins->isCall());
                        if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
                            return false;
                    }
                } else {
                    // Normally temps are considered to cover both the input
                    // and output of the associated instruction. In some cases
                    // though we want to use a fixed register as both an input
                    // and clobbered register in the instruction, so watch for
                    // this and shorten the temp to cover only the output.
                    CodePosition from = inputOf(*ins);
                    if (temp->policy() == LDefinition::PRESET) {
                        AnyRegister reg = temp->output()->toRegister();
                        for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
                            if (alloc->isUse()) {
                                LUse *use = alloc->toUse();
                                if (use->isFixedRegister()) {
                                    if (GetFixedRegister(vregs[use].def(), use) == reg)
                                        from = outputOf(*ins);
                                }
                            }
                        }
                    }

                    CodePosition to =
                        ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
                    if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
                        return false;
                }
            }

            DebugOnly<bool> hasUseRegister = false;
            DebugOnly<bool> hasUseRegisterAtStart = false;

            for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
                if (inputAlloc->isUse()) {
                    LUse *use = inputAlloc->toUse();

                    // The first instruction, LLabel, has no uses.
                    JS_ASSERT(inputOf(*ins) > outputOf(block->firstId()));

                    // Call uses should always be at-start or fixed, since the fixed intervals
                    // use all registers.
                    JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
                                 use->isFixedRegister() || use->usedAtStart());

#ifdef DEBUG
                    // Don't allow at-start call uses if there are temps of the same kind,
                    // so that we don't assign the same register.
                    if (ins->isCall() && use->usedAtStart()) {
                        for (size_t i = 0; i < ins->numTemps(); i++)
                            JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble());
                    }

                    // If there are both useRegisterAtStart(x) and useRegister(y)
                    // uses, we may assign the same register to both operands due to
                    // interval splitting (bug 772830). Don't allow this for now.
                    if (use->policy() == LUse::REGISTER) {
                        if (use->usedAtStart()) {
                            if (!IsInputReused(*ins, use))
                                hasUseRegisterAtStart = true;
                        } else {
                            hasUseRegister = true;
                        }
                    }

                    JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif

                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
                    if (use->policy() == LUse::RECOVERED_INPUT)
                        continue;

                    CodePosition to;
                    if (forLSRA) {
                        if (use->isFixedRegister()) {
                            AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
                            if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                                return false;
                            to = inputOf(*ins);

                            // Fixed intervals are not added to safepoints, so do it
                            // here.
                            LSafepoint *safepoint = ins->safepoint();
                            if (!ins->isCall() && safepoint)
                                AddRegisterToSafepoint(safepoint, reg, *vregs[use].def());
                        } else {
                            to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
                        }
                    } else {
                        to = (use->usedAtStart() || ins->isCall())
                           ? inputOf(*ins) : outputOf(*ins);
                        if (use->isFixedRegister()) {
                            LAllocation reg(AnyRegister::FromCode(use->registerCode()));
                            for (size_t i = 0; i < ins->numDefs(); i++) {
                                LDefinition *def = ins->getDef(i);
                                if (def->policy() == LDefinition::PRESET && *def->output() == reg)
                                    to = inputOf(*ins);
                            }
                        }
                    }

                    LiveInterval *interval = vregs[use].getInterval(0);
                    if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
                        return false;
                    interval->addUse(new(alloc()) UsePosition(use, to));

                    live->insert(use->virtualRegister());
                }
            }
        }

        // Phis have simultaneous assignment semantics at block begin, so at
        // the beginning of the block we can be sure that liveIn does not
        // contain any phi outputs.
        for (unsigned int i = 0; i < block->numPhis(); i++) {
            LDefinition *def = block->getPhi(i)->getDef(0);
            if (live->contains(def->virtualRegister())) {
                live->remove(def->virtualRegister());
            } else {
                // This is a dead phi, so add a dummy range over all phis. This
                // can go away if we have an earlier dead code elimination pass.
                if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                               outputOf(block->firstId())))
                {
                    return false;
                }
            }
        }

        if (mblock->isLoopHeader()) {
            // A divergence from the published algorithm is required here, as
            // our block order does not guarantee that blocks of a loop are
            // contiguous. As a result, a single live interval spanning the
            // loop is not possible. Additionally, we require liveIn in a later
            // pass for resolution, so that must also be fixed up here.
            MBasicBlock *loopBlock = mblock->backedge();
            while (true) {
                // Blocks must already have been visited to have a liveIn set.
                JS_ASSERT(loopBlock->id() >= mblock->id());

                // Add an interval for this entire loop block
                CodePosition from = inputOf(loopBlock->lir()->firstId());
                CodePosition to = outputOf(loopBlock->lir()->lastId()).next();

                for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
                    if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
                        return false;
                }

                // Fix up the liveIn set to account for the new interval
                liveIn[loopBlock->id()]->insertAll(live);

                // Make sure we don't visit this node again
                loopDone->insert(loopBlock->id());

                // If this is the loop header, any predecessors are either the
                // backedge or out of the loop, so skip any predecessors of
                // this block
                if (loopBlock != mblock) {
                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
                        MBasicBlock *pred = loopBlock->getPredecessor(i);
                        if (loopDone->contains(pred->id()))
                            continue;
                        if (!loopWorkList.append(pred))
                            return false;
                    }
                }

                // Terminate loop if out of work.
                if (loopWorkList.empty())
                    break;

                // Grab the next block off the work list, skipping any OSR block.
                while (!loopWorkList.empty()) {
                    loopBlock = loopWorkList.popCopy();
                    if (loopBlock->lir() != graph.osrBlock())
                        break;
                }

                // If end is reached without finding a non-OSR block, then no more work items were found.
                if (loopBlock->lir() == graph.osrBlock()) {
                    JS_ASSERT(loopWorkList.empty());
                    break;
                }
            }

            // Clear the done set for other loops
            loopDone->clear();
        }

        JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
    }

    validateVirtualRegisters();

    // If the script has an infinite loop, there may be no MReturn and therefore
    // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
    // rest of the allocator can assume it has at least one range.
    if (fixedIntervalsUnion->numRanges() == 0) {
        if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
                                                 CodePosition(0, CodePosition::OUTPUT)))
        {
            return false;
        }
    }

    return true;
}
Example #22
0
void Builder::processLBlockSet(WorkSpace *fw, otawa::ccg::LBlockSet *lbset) {
	ASSERT(fw);
	ASSERT(lbset);
	const hard::Cache *cache = hard::CACHE_CONFIGURATION(fw)->instCache();

	// Create the CCG
	Collection *ccgs = Graph::GRAPHS(fw);
	if(!ccgs) {
		ccgs = new Collection(cache->rowCount());
		fw->addProp(new DeletableProperty<Collection *>(Graph::GRAPHS, ccgs));
	}
	Graph *ccg = new Graph;
	ccgs->ccgs[lbset->line()] = ccg;

	// Initialization
	for(LBlockSet::Iterator lblock(*lbset); lblock; lblock++) {
		Node *node = new Node(lblock);
		ccg->add(node);
		Graph::NODE(lblock) = node;
	}

	// Run the DFA
	Problem prob(lbset, lbset->count(), cache, fw);
	const CFGCollection *coll = INVOLVED_CFGS(fw);
	dfa::XCFGVisitor<Problem> visitor(*coll, prob);
	dfa::XIterativeDFA<dfa::XCFGVisitor<Problem> > engine(visitor);
	engine.process();

	// Add the annotations from the DFA result
	for (CFGCollection::Iterator cfg(coll); cfg; cfg++) {
		for (CFG::BBIterator block(*cfg); block; block++) {
			dfa::XCFGVisitor<Problem>::key_t pair(*cfg, *block);
			dfa::BitSet *bitset = engine.in(pair);
			block->addProp(new DeletableProperty<dfa::BitSet *>(IN, new dfa::BitSet(*bitset)));
		}
	}

	// Detecting the non conflict state of each lblock
	BasicBlock *BB;
	LBlock *line;
	int length = lbset->count();
	for(LBlockSet::Iterator lbloc(*lbset); lbloc; lbloc++)
		if(lbloc->id() != 0 && lbloc->id() != length - 1) {
			BB = lbloc->bb();
			dfa::BitSet *inid = IN(BB);
			for(dfa::BitSet::Iterator bit(*inid); bit; bit++)
				line = lbset->lblock(*bit);
				if(cache->block(line->address()) == cache->block(lbloc->address())
					&& BB != line->bb())
					NON_CONFLICT(lbloc) = true;

		}

	// Building the ccg edges using DFA
	length = lbset->count();
	address_t adinst;
	LBlock *aux;

	for (CFGCollection::Iterator cfg(coll); cfg; cfg++) {
		for (CFG::BBIterator bb(*cfg); bb; bb++) {
			if ((cfg != ENTRY_CFG(fw)) || (!bb->isEntry() && !bb->isExit())) {
				dfa::BitSet *info = IN(bb);
				ASSERT(info);
				bool test = false;
				bool visit;
				for(BasicBlock::InstIter inst(bb); inst; inst++) {
					visit = false;
					adinst = inst->address();
					for (LBlockSet::Iterator lbloc(*lbset); lbloc; lbloc++){
						address_t address = lbloc->address();
						// the first lblock in the BB it's a conflict
						if(adinst == address && !test && bb == lbloc->bb()) {
							for (int i = 0; i< length; i++)
								if (info->contains(i)) {
									LBlock *lblock = lbset->lblock(i);
									Node *node = Graph::NODE(lblock);
									new Edge (node, Graph::NODE(lbloc));
								}
							aux = lbloc;
							test = true;
							visit = true;
							break;
						}

						if(adinst == address && !visit && bb == lbloc->bb()) {
							new Edge(Graph::NODE(aux), Graph::NODE(lbloc));
							aux = lbloc;
							break;
						}
					}
				}
			}
		}
	}

	// build edge to LBlock end
	BasicBlock *exit = ENTRY_CFG(fw)->exit();
	LBlock *end = lbset->lblock(length-1);
	dfa::BitSet *info = IN(exit);
	for (int i = 0; i< length; i++)
		if (info->contains(i)) {
			LBlock *ccgnode1 = lbset->lblock(i);
			new Edge(Graph::NODE(ccgnode1), Graph::NODE(end));
		}

	// Build edge from 'S' till 'end'
	LBlock *s = lbset->lblock(0);
	new Edge(Graph::NODE(s), Graph::NODE(end));

	// Cleanup the DFA annotations
	for (CFGCollection::Iterator cfg(coll); cfg; cfg++)
		for (CFG::BBIterator block(cfg); block; block++)
			block->removeProp(&IN);
}
Example #23
0
void
RegisterAllocator::dumpInstructions()
{
#ifdef DEBUG
    fprintf(stderr, "Instructions:\n");

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);
        MBasicBlock* mir = block->mir();

        fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
        for (size_t i = 0; i < mir->numSuccessors(); i++)
            fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
        fprintf(stderr, "\n");

        for (size_t i = 0; i < block->numPhis(); i++) {
            LPhi* phi = block->getPhi(i);

            fprintf(stderr, "[%u,%u Phi] [def %s]",
                    inputOf(phi).bits(),
                    outputOf(phi).bits(),
                    phi->getDef(0)->toString());
            for (size_t j = 0; j < phi->numOperands(); j++)
                fprintf(stderr, " [use %s]", phi->getOperand(j)->toString());
            fprintf(stderr, "\n");
        }

        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;

            fprintf(stderr, "[");
            if (ins->id() != 0)
                fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits());
            fprintf(stderr, "%s]", ins->opName());

            if (ins->isMoveGroup()) {
                LMoveGroup* group = ins->toMoveGroup();
                for (int i = group->numMoves() - 1; i >= 0; i--) {
                    // Use two printfs, as LAllocation::toString is not reentant.
                    fprintf(stderr, " [%s", group->getMove(i).from()->toString());
                    fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
                }
                fprintf(stderr, "\n");
                continue;
            }

            for (size_t i = 0; i < ins->numDefs(); i++)
                fprintf(stderr, " [def %s]", ins->getDef(i)->toString());

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition* temp = ins->getTemp(i);
                if (!temp->isBogusTemp())
                    fprintf(stderr, " [temp %s]", temp->toString());
            }

            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                if (!alloc->isBogus())
                    fprintf(stderr, " [use %s]", alloc->toString());
            }

            fprintf(stderr, "\n");
        }
    }
    fprintf(stderr, "\n");
#endif // DEBUG
}
Example #24
0
bool
AllocationIntegrityState::check(bool populateSafepoints)
{
    MOZ_ASSERT(!instructions.empty());

#ifdef DEBUG
    if (JitSpewEnabled(JitSpew_RegAlloc))
        dump();

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);

        // Check that all instruction inputs and outputs have been assigned an allocation.
        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;

            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
                MOZ_ASSERT(!alloc->isUse());

            for (size_t i = 0; i < ins->numDefs(); i++) {
                LDefinition* def = ins->getDef(i);
                MOZ_ASSERT(!def->output()->isUse());

                LDefinition oldDef = instructions[ins->id()].outputs[i];
                MOZ_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
                              *def->output() == *ins->getOperand(oldDef.getReusedInput()));
            }

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition* temp = ins->getTemp(i);
                MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());

                LDefinition oldTemp = instructions[ins->id()].temps[i];
                MOZ_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
                              *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
            }
        }
    }
#endif

    // Check that the register assignment and move groups preserve the original
    // semantics of the virtual registers. Each virtual register has a single
    // write (owing to the SSA representation), but the allocation may move the
    // written value around between registers and memory locations along
    // different paths through the script.
    //
    // For each use of an allocation, follow the physical value which is read
    // backward through the script, along all paths to the value's virtual
    // register's definition.
    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);
        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;
            const InstructionInfo& info = instructions[ins->id()];

            LSafepoint* safepoint = ins->safepoint();
            if (safepoint) {
                for (size_t i = 0; i < ins->numTemps(); i++) {
                    if (ins->getTemp(i)->isBogusTemp())
                        continue;
                    uint32_t vreg = info.temps[i].virtualRegister();
                    LAllocation* alloc = ins->getTemp(i)->output();
                    if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints))
                        return false;
                }
                MOZ_ASSERT_IF(ins->isCall() && !populateSafepoints,
                              safepoint->liveRegs().emptyFloat() &&
                              safepoint->liveRegs().emptyGeneral());
            }

            size_t inputIndex = 0;
            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                LAllocation oldInput = info.inputs[inputIndex++];
                if (!oldInput.isUse())
                    continue;

                uint32_t vreg = oldInput.toUse()->virtualRegister();

                if (safepoint && !oldInput.toUse()->usedAtStart()) {
                    if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints))
                        return false;
                }

                // Start checking at the previous instruction, in case this
                // instruction reuses its input register for an output.
                LInstructionReverseIterator riter = block->rbegin(ins);
                riter++;
                checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints);

                while (!worklist.empty()) {
                    IntegrityItem item = worklist.popCopy();
                    checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints);
                }
            }
        }
    }

    return true;
}
bool
GreedyAllocator::allocateRegisters()
{
    // Allocate registers bottom-up, such that we see all uses before their
    // definitions.
    for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) {
        LBlock *block = graph.getBlock(i);

        IonSpew(IonSpew_RegAlloc, "Allocating block %d", (uint32)i);

        // All registers should be free.
        JS_ASSERT(state.free == RegisterSet::All());

        // Allocate stack for any phis.
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi *phi = block->getPhi(j);
            VirtualRegister *vreg = getVirtualRegister(phi->getDef(0));
            allocateStack(vreg);
        }

        // Allocate registers.
        if (!allocateRegistersInBlock(block))
            return false;

        LMoveGroup *entrySpills = block->getEntryMoveGroup();

        // We've reached the top of the block. Spill all registers by inserting
        // moves from their stack locations.
        for (AnyRegisterIterator iter(RegisterSet::All()); iter.more(); iter++) {
            VirtualRegister *vreg = state[*iter];
            if (!vreg) {
                JS_ASSERT(state.free.has(*iter));
                continue;
            }

            JS_ASSERT(vreg->reg() == *iter);
            JS_ASSERT(!state.free.has(vreg->reg()));
            allocateStack(vreg);

            LAllocation *from = LAllocation::New(vreg->backingStack());
            LAllocation *to = LAllocation::New(vreg->reg());
            if (!entrySpills->add(from, to))
                return false;

            killReg(vreg);
            vreg->unsetRegister();
        }

        // Before killing phis, ensure that each phi input has its own stack
        // allocation. This ensures we won't allocate the same slot for any phi
        // as its input, which technically may be legal (since the phi becomes
        // the last use of the slot), but we avoid for sanity.
        for (size_t i = 0; i < block->numPhis(); i++) {
            LPhi *phi = block->getPhi(i);
            for (size_t j = 0; j < phi->numOperands(); j++) {
                VirtualRegister *in = getVirtualRegister(phi->getOperand(j)->toUse());
                allocateStack(in);
            }
        }

        // Kill phis.
        for (size_t i = 0; i < block->numPhis(); i++) {
            LPhi *phi = block->getPhi(i);
            VirtualRegister *vr = getVirtualRegister(phi->getDef(0));
            JS_ASSERT(!vr->hasRegister());
            killStack(vr);
        }
    }
    return true;
}