コード例 #1
0
ファイル: LIR.cpp プロジェクト: dadaa/gecko-dev
bool
LAllocation::aliases(const LAllocation& other) const
{
    if (isFloatReg() && other.isFloatReg())
        return toFloatReg()->reg().aliases(other.toFloatReg()->reg());
    return *this == other;
}
コード例 #2
0
bool
CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation *ool)
{
    LInstruction *ins = ool->ins();
    Register reg = ToRegister(ins->getDef(0));

    mozilla::DebugOnly<LAllocation *> lhs = ins->getOperand(0);
    LAllocation *rhs = ins->getOperand(1);

    JS_ASSERT(reg == ToRegister(lhs));
    JS_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs));

    // Undo the effect of the ALU operation, which was performed on the output
    // register and overflowed. Writing to the output register clobbered an
    // input reg, and the original value of the input needs to be recovered
    // to satisfy the constraint imposed by any RECOVERED_INPUT operands to
    // the bailout snapshot.

    if (rhs->isConstant()) {
        Imm32 constant(ToInt32(rhs));
        if (ins->isAddI())
            masm.subl(constant, reg);
        else
            masm.addl(constant, reg);
    } else {
        if (ins->isAddI())
            masm.subl(ToOperand(rhs), reg);
        else
            masm.addl(ToOperand(rhs), reg);
    }

    return bailout(ool->ins()->snapshot());
}
コード例 #3
0
void
CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox)
{
    MUnbox* mir = unbox->mir();

    if (mir->fallible()) {
        const ValueOperand value = ToValue(unbox, LUnbox::Input);
        masm.splitTag(value, SecondScratchReg);
        bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(MIRTypeToTag(mir->type())),
                     unbox->snapshot());
    }

    LAllocation* input = unbox->getOperand(LUnbox::Input);
    Register result = ToRegister(unbox->output());
    if (input->isRegister()) {
        Register inputReg = ToRegister(input);
        switch (mir->type()) {
          case MIRType::Int32:
            masm.unboxInt32(inputReg, result);
            break;
          case MIRType::Boolean:
            masm.unboxBoolean(inputReg, result);
            break;
          case MIRType::Object:
            masm.unboxObject(inputReg, result);
            break;
          case MIRType::String:
            masm.unboxString(inputReg, result);
            break;
          case MIRType::Symbol:
            masm.unboxSymbol(inputReg, result);
            break;
          default:
            MOZ_CRASH("Given MIRType cannot be unboxed.");
        }
        return;
    }

    Address inputAddr = ToAddress(input);
    switch (mir->type()) {
      case MIRType::Int32:
        masm.unboxInt32(inputAddr, result);
        break;
      case MIRType::Boolean:
        masm.unboxBoolean(inputAddr, result);
        break;
      case MIRType::Object:
        masm.unboxObject(inputAddr, result);
        break;
      case MIRType::String:
        masm.unboxString(inputAddr, result);
        break;
      case MIRType::Symbol:
        masm.unboxSymbol(inputAddr, result);
        break;
      default:
        MOZ_CRASH("Given MIRType cannot be unboxed.");
    }
}
コード例 #4
0
void
GreedyAllocator::informSnapshot(LInstruction *ins)
{
    LSnapshot *snapshot = ins->snapshot();
    for (size_t i = 0; i < snapshot->numEntries(); i++) {
        LAllocation *a = snapshot->getEntry(i);
        if (!a->isUse())
            continue;

        // Every definition in a snapshot gets a stack slot. This
        // simplification means we can treat normal snapshots and LOsiPoint
        // snapshots (which follow calls) the same, without adding a special
        // exception to note that registers are spilled at the LOsiPoint.
        VirtualRegister *vr = getVirtualRegister(a->toUse());
        allocateStack(vr);
        *a = vr->backingStack();
    }
}
コード例 #5
0
bool
GreedyAllocator::buildPhiMoves(LBlock *block)
{
    IonSpew(IonSpew_RegAlloc, " Merging phi state."); 

    phiMoves = Mover();

    MBasicBlock *mblock = block->mir();
    if (!mblock->successorWithPhis())
        return true;

    // Insert moves from our state into our successor's phi.
    uint32 pos = mblock->positionInPhiSuccessor();
    LBlock *successor = mblock->successorWithPhis()->lir();
    for (size_t i = 0; i < successor->numPhis(); i++) {
        LPhi *phi = successor->getPhi(i);
        JS_ASSERT(phi->numDefs() == 1);

        VirtualRegister *phiReg = getVirtualRegister(phi->getDef(0));
        allocateStack(phiReg);

        LAllocation *in = phi->getOperand(pos);
        VirtualRegister *inReg = getVirtualRegister(in->toUse());
        allocateStack(inReg);

        // Try to get a register for the input.
        if (!inReg->hasRegister() && !allocatableRegs().empty(inReg->isDouble())) {
            if (!allocateReg(inReg))
                return false;
        }

        // Add a move from the input to the phi.
        if (inReg->hasRegister()) {
            if (!phiMoves.move(inReg->reg(), phiReg->backingStack()))
                return false;
        } else {
            if (!phiMoves.move(inReg->backingStack(), phiReg->backingStack()))
                return false;
        }
    }

    return true;
}
コード例 #6
0
bool
GreedyAllocator::allocateInputs(LInstruction *ins)
{
    // First deal with fixed-register policies and policies that require
    // registers.
    for (size_t i = 0; i < ins->numOperands(); i++) {
        LAllocation *a = ins->getOperand(i);
        if (!a->isUse())
            continue;
        LUse *use = a->toUse();
        VirtualRegister *vr = getVirtualRegister(use);
        if (use->policy() == LUse::FIXED) {
            if (!allocateFixedOperand(a, vr))
                return false;
        } else if (use->policy() == LUse::REGISTER) {
            if (!allocateRegisterOperand(a, vr))
                return false;
        }
    }

    // Finally, deal with things that take either registers or memory.
    for (size_t i = 0; i < ins->numOperands(); i++) {
        LAllocation *a = ins->getOperand(i);
        if (!a->isUse())
            continue;

        VirtualRegister *vr = getVirtualRegister(a->toUse());
        if (!allocateAnyOperand(a, vr))
            return false;
    }

    return true;
}
コード例 #7
0
bool
GreedyAllocator::prescanUses(LInstruction *ins)
{
    for (size_t i = 0; i < ins->numOperands(); i++) {
        LAllocation *a = ins->getOperand(i);
        if (!a->isUse()) {
            JS_ASSERT(a->isConstant());
            continue;
        }

        LUse *use = a->toUse();
        VirtualRegister *vr = getVirtualRegister(use);
        if (use->policy() == LUse::FIXED) {
            // A def or temp may use the same register, so we have to use the
            // unchecked version.
            disallowed.addUnchecked(GetFixedRegister(vr->def, use));
        } else if (vr->hasRegister()) {
            discouraged.addUnchecked(vr->reg());
        }
    }
    return true;
}
コード例 #8
0
bool
AllocationIntegrityState::check(bool populateSafepoints)
{
    MOZ_ASSERT(!instructions.empty());

#ifdef DEBUG
    if (JitSpewEnabled(JitSpew_RegAlloc))
        dump();

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);

        // Check that all instruction inputs and outputs have been assigned an allocation.
        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;

            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
                MOZ_ASSERT(!alloc->isUse());

            for (size_t i = 0; i < ins->numDefs(); i++) {
                LDefinition* def = ins->getDef(i);
                MOZ_ASSERT(!def->output()->isUse());

                LDefinition oldDef = instructions[ins->id()].outputs[i];
                MOZ_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
                              *def->output() == *ins->getOperand(oldDef.getReusedInput()));
            }

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition* temp = ins->getTemp(i);
                MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());

                LDefinition oldTemp = instructions[ins->id()].temps[i];
                MOZ_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
                              *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
            }
        }
    }
#endif

    // Check that the register assignment and move groups preserve the original
    // semantics of the virtual registers. Each virtual register has a single
    // write (owing to the SSA representation), but the allocation may move the
    // written value around between registers and memory locations along
    // different paths through the script.
    //
    // For each use of an allocation, follow the physical value which is read
    // backward through the script, along all paths to the value's virtual
    // register's definition.
    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);
        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;
            const InstructionInfo& info = instructions[ins->id()];

            LSafepoint* safepoint = ins->safepoint();
            if (safepoint) {
                for (size_t i = 0; i < ins->numTemps(); i++) {
                    if (ins->getTemp(i)->isBogusTemp())
                        continue;
                    uint32_t vreg = info.temps[i].virtualRegister();
                    LAllocation* alloc = ins->getTemp(i)->output();
                    if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints))
                        return false;
                }
                MOZ_ASSERT_IF(ins->isCall() && !populateSafepoints,
                              safepoint->liveRegs().emptyFloat() &&
                              safepoint->liveRegs().emptyGeneral());
            }

            size_t inputIndex = 0;
            for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
                LAllocation oldInput = info.inputs[inputIndex++];
                if (!oldInput.isUse())
                    continue;

                uint32_t vreg = oldInput.toUse()->virtualRegister();

                if (safepoint && !oldInput.toUse()->usedAtStart()) {
                    if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints))
                        return false;
                }

                // Start checking at the previous instruction, in case this
                // instruction reuses its input register for an output.
                LInstructionReverseIterator riter = block->rbegin(ins);
                riter++;
                checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints);

                while (!worklist.empty()) {
                    IntegrityItem item = worklist.popCopy();
                    checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints);
                }
            }
        }
    }

    return true;
}
コード例 #9
0
bool
AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
                                                   uint32_t vreg, LAllocation alloc,
                                                   bool populateSafepoints)
{
    LSafepoint* safepoint = ins->safepoint();
    MOZ_ASSERT(safepoint);

    if (ins->isCall() && alloc.isRegister())
        return true;

    if (alloc.isRegister()) {
        AnyRegister reg = alloc.toRegister();
        if (populateSafepoints)
            safepoint->addLiveRegister(reg);

        MOZ_ASSERT(safepoint->liveRegs().has(reg));
    }

    // The |this| argument slot is implicitly included in all safepoints.
    if (alloc.isArgument() && alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value))
        return true;

    LDefinition::Type type = virtualRegisters[vreg]
                             ? virtualRegisters[vreg]->type()
                             : LDefinition::GENERAL;

    switch (type) {
      case LDefinition::OBJECT:
        if (populateSafepoints) {
            JitSpew(JitSpew_RegAlloc, "Safepoint object v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addGcPointer(alloc))
                return false;
        }
        MOZ_ASSERT(safepoint->hasGcPointer(alloc));
        break;
      case LDefinition::SLOTS:
        if (populateSafepoints) {
            JitSpew(JitSpew_RegAlloc, "Safepoint slots v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addSlotsOrElementsPointer(alloc))
                return false;
        }
        MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
        break;
#ifdef JS_NUNBOX32
      // Do not assert that safepoint information for nunbox types is complete,
      // as if a vreg for a value's components are copied in multiple places
      // then the safepoint information may not reflect all copies. All copies
      // of payloads must be reflected, however, for generational GC.
      case LDefinition::TYPE:
        if (populateSafepoints) {
            JitSpew(JitSpew_RegAlloc, "Safepoint type v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addNunboxType(vreg, alloc))
                return false;
        }
        break;
      case LDefinition::PAYLOAD:
        if (populateSafepoints) {
            JitSpew(JitSpew_RegAlloc, "Safepoint payload v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addNunboxPayload(vreg, alloc))
                return false;
        }
        MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
        break;
#else
      case LDefinition::BOX:
        if (populateSafepoints) {
            JitSpew(JitSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addBoxedValue(alloc))
                return false;
        }
        MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
        break;
#endif
      default:
        break;
    }

    return true;
}
コード例 #10
0
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
    if (!init())
        return false;

    Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
    BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds());
    if (!loopDone)
        return false;

    for (size_t i = graph.numBlocks(); i > 0; i--) {
        if (mir->shouldCancel("Build Liveness Info (main loop)"))
            return false;

        LBlock *block = graph.getBlock(i - 1);
        MBasicBlock *mblock = block->mir();

        BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters());
        if (!live)
            return false;
        liveIn[mblock->id()] = live;

        // Propagate liveIn from our successors to us
        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
            MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
            // Skip backedges, as we fix them up at the loop header.
            if (mblock->id() < successor->id())
                live->insertAll(liveIn[successor->id()]);
        }

        // Add successor phis
        if (mblock->successorWithPhis()) {
            LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
                LPhi *phi = phiSuccessor->getPhi(j);
                LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
                uint32_t reg = use->toUse()->virtualRegister();
                live->insert(reg);
            }
        }

        // Variables are assumed alive for the entire block, a define shortens
        // the interval to the point of definition.
        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
            if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                                  outputOf(block->lastId()).next()))
            {
                return false;
            }
        }

        // Shorten the front end of live intervals for live variables to their
        // point of definition, if found.
        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
            // Calls may clobber registers, so force a spill and reload around the callsite.
            if (ins->isCall()) {
                for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
                    if (forLSRA) {
                        if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
                            return false;
                    } else {
                        bool found = false;
                        for (size_t i = 0; i < ins->numDefs(); i++) {
                            if (ins->getDef(i)->isPreset() &&
                                *ins->getDef(i)->output() == LAllocation(*iter)) {
                                found = true;
                                break;
                            }
                        }
                        if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
                            return false;
                    }
                }
            }

            for (size_t i = 0; i < ins->numDefs(); i++) {
                if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
                    LDefinition *def = ins->getDef(i);

                    CodePosition from;
                    if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
                        // The fixed range covers the current instruction so the
                        // interval for the virtual register starts at the next
                        // instruction. If the next instruction has a fixed use,
                        // this can lead to unnecessary register moves. To avoid
                        // special handling for this, assert the next instruction
                        // has no fixed uses. defineFixed guarantees this by inserting
                        // an LNop.
                        JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
                        AnyRegister reg = def->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
                            return false;
                        from = outputOf(*ins).next();
                    } else {
                        from = forLSRA ? inputOf(*ins) : outputOf(*ins);
                    }

                    if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
                        // MUST_REUSE_INPUT is implemented by allocating an output
                        // register and moving the input to it. Register hints are
                        // used to avoid unnecessary moves. We give the input an
                        // LUse::ANY policy to avoid allocating a register for the
                        // input.
                        LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
                        JS_ASSERT(inputUse->policy() == LUse::REGISTER);
                        JS_ASSERT(inputUse->usedAtStart());
                        *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
                    }

                    LiveInterval *interval = vregs[def].getInterval(0);
                    interval->setFrom(from);

                    // Ensure that if there aren't any uses, there's at least
                    // some interval for the output to go into.
                    if (interval->numRanges() == 0) {
                        if (!interval->addRangeAtHead(from, from.next()))
                            return false;
                    }
                    live->remove(def->virtualRegister());
                }
            }

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition *temp = ins->getTemp(i);
                if (temp->isBogusTemp())
                    continue;

                if (forLSRA) {
                    if (temp->policy() == LDefinition::PRESET) {
                        if (ins->isCall())
                            continue;
                        AnyRegister reg = temp->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                            return false;

                        // Fixed intervals are not added to safepoints, so do it
                        // here.
                        if (LSafepoint *safepoint = ins->safepoint())
                            AddRegisterToSafepoint(safepoint, reg, *temp);
                    } else {
                        JS_ASSERT(!ins->isCall());
                        if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
                            return false;
                    }
                } else {
                    // Normally temps are considered to cover both the input
                    // and output of the associated instruction. In some cases
                    // though we want to use a fixed register as both an input
                    // and clobbered register in the instruction, so watch for
                    // this and shorten the temp to cover only the output.
                    CodePosition from = inputOf(*ins);
                    if (temp->policy() == LDefinition::PRESET) {
                        AnyRegister reg = temp->output()->toRegister();
                        for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
                            if (alloc->isUse()) {
                                LUse *use = alloc->toUse();
                                if (use->isFixedRegister()) {
                                    if (GetFixedRegister(vregs[use].def(), use) == reg)
                                        from = outputOf(*ins);
                                }
                            }
                        }
                    }

                    CodePosition to =
                        ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
                    if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
                        return false;
                }
            }

            DebugOnly<bool> hasUseRegister = false;
            DebugOnly<bool> hasUseRegisterAtStart = false;

            for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
                if (inputAlloc->isUse()) {
                    LUse *use = inputAlloc->toUse();

                    // The first instruction, LLabel, has no uses.
                    JS_ASSERT(inputOf(*ins) > outputOf(block->firstId()));

                    // Call uses should always be at-start or fixed, since the fixed intervals
                    // use all registers.
                    JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
                                 use->isFixedRegister() || use->usedAtStart());

#ifdef DEBUG
                    // Don't allow at-start call uses if there are temps of the same kind,
                    // so that we don't assign the same register.
                    if (ins->isCall() && use->usedAtStart()) {
                        for (size_t i = 0; i < ins->numTemps(); i++)
                            JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble());
                    }

                    // If there are both useRegisterAtStart(x) and useRegister(y)
                    // uses, we may assign the same register to both operands due to
                    // interval splitting (bug 772830). Don't allow this for now.
                    if (use->policy() == LUse::REGISTER) {
                        if (use->usedAtStart()) {
                            if (!IsInputReused(*ins, use))
                                hasUseRegisterAtStart = true;
                        } else {
                            hasUseRegister = true;
                        }
                    }

                    JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif

                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
                    if (use->policy() == LUse::RECOVERED_INPUT)
                        continue;

                    CodePosition to;
                    if (forLSRA) {
                        if (use->isFixedRegister()) {
                            AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
                            if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                                return false;
                            to = inputOf(*ins);

                            // Fixed intervals are not added to safepoints, so do it
                            // here.
                            LSafepoint *safepoint = ins->safepoint();
                            if (!ins->isCall() && safepoint)
                                AddRegisterToSafepoint(safepoint, reg, *vregs[use].def());
                        } else {
                            to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
                        }
                    } else {
                        to = (use->usedAtStart() || ins->isCall())
                           ? inputOf(*ins) : outputOf(*ins);
                        if (use->isFixedRegister()) {
                            LAllocation reg(AnyRegister::FromCode(use->registerCode()));
                            for (size_t i = 0; i < ins->numDefs(); i++) {
                                LDefinition *def = ins->getDef(i);
                                if (def->policy() == LDefinition::PRESET && *def->output() == reg)
                                    to = inputOf(*ins);
                            }
                        }
                    }

                    LiveInterval *interval = vregs[use].getInterval(0);
                    if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
                        return false;
                    interval->addUse(new(alloc()) UsePosition(use, to));

                    live->insert(use->virtualRegister());
                }
            }
        }

        // Phis have simultaneous assignment semantics at block begin, so at
        // the beginning of the block we can be sure that liveIn does not
        // contain any phi outputs.
        for (unsigned int i = 0; i < block->numPhis(); i++) {
            LDefinition *def = block->getPhi(i)->getDef(0);
            if (live->contains(def->virtualRegister())) {
                live->remove(def->virtualRegister());
            } else {
                // This is a dead phi, so add a dummy range over all phis. This
                // can go away if we have an earlier dead code elimination pass.
                if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                               outputOf(block->firstId())))
                {
                    return false;
                }
            }
        }

        if (mblock->isLoopHeader()) {
            // A divergence from the published algorithm is required here, as
            // our block order does not guarantee that blocks of a loop are
            // contiguous. As a result, a single live interval spanning the
            // loop is not possible. Additionally, we require liveIn in a later
            // pass for resolution, so that must also be fixed up here.
            MBasicBlock *loopBlock = mblock->backedge();
            while (true) {
                // Blocks must already have been visited to have a liveIn set.
                JS_ASSERT(loopBlock->id() >= mblock->id());

                // Add an interval for this entire loop block
                CodePosition from = inputOf(loopBlock->lir()->firstId());
                CodePosition to = outputOf(loopBlock->lir()->lastId()).next();

                for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
                    if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
                        return false;
                }

                // Fix up the liveIn set to account for the new interval
                liveIn[loopBlock->id()]->insertAll(live);

                // Make sure we don't visit this node again
                loopDone->insert(loopBlock->id());

                // If this is the loop header, any predecessors are either the
                // backedge or out of the loop, so skip any predecessors of
                // this block
                if (loopBlock != mblock) {
                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
                        MBasicBlock *pred = loopBlock->getPredecessor(i);
                        if (loopDone->contains(pred->id()))
                            continue;
                        if (!loopWorkList.append(pred))
                            return false;
                    }
                }

                // Terminate loop if out of work.
                if (loopWorkList.empty())
                    break;

                // Grab the next block off the work list, skipping any OSR block.
                while (!loopWorkList.empty()) {
                    loopBlock = loopWorkList.popCopy();
                    if (loopBlock->lir() != graph.osrBlock())
                        break;
                }

                // If end is reached without finding a non-OSR block, then no more work items were found.
                if (loopBlock->lir() == graph.osrBlock()) {
                    JS_ASSERT(loopWorkList.empty());
                    break;
                }
            }

            // Clear the done set for other loops
            loopDone->clear();
        }

        JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
    }

    validateVirtualRegisters();

    // If the script has an infinite loop, there may be no MReturn and therefore
    // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
    // rest of the allocator can assume it has at least one range.
    if (fixedIntervalsUnion->numRanges() == 0) {
        if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
                                                 CodePosition(0, CodePosition::OUTPUT)))
        {
            return false;
        }
    }

    return true;
}
コード例 #11
0
bool
GreedyAllocator::allocateDefinition(LInstruction *ins, LDefinition *def)
{
    VirtualRegister *vr = getVirtualRegister(def);

    LAllocation output;
    switch (def->policy()) {
      case LDefinition::PASSTHROUGH:
        // This is purely passthru, so ignore it.
        return true;

      case LDefinition::DEFAULT:
      case LDefinition::MUST_REUSE_INPUT:
      {
        AnyRegister reg;
        // Either take the register requested, or allocate a new one.
        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
            ins->getOperand(def->getReusedInput())->toUse()->isFixedRegister())
        {
            LAllocation *a = ins->getOperand(def->getReusedInput());
            VirtualRegister *vuse = getVirtualRegister(a->toUse());
            reg = GetFixedRegister(vuse->def, a->toUse());
        } else if (vr->hasRegister()) {
            reg = vr->reg();
        } else {
            if (!allocate(vr->type(), DISALLOW, &reg))
                return false;
        }

        if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
            LUse *use = ins->getOperand(def->getReusedInput())->toUse();
            VirtualRegister *vuse = getVirtualRegister(use);
            // If the use already has the given register, we need to evict.
            if (vuse->hasRegister() && vuse->reg() == reg) {
                if (!evict(reg))
                    return false;
            }

            // Make sure our input is using a fixed register.
            if (reg.isFloat())
                *use = LUse(reg.fpu(), use->virtualRegister());
            else
                *use = LUse(reg.gpr(), use->virtualRegister());
        }
        output = LAllocation(reg);
        break;
      }

      case LDefinition::PRESET:
      {
        // Eviction and disallowing occurred during the definition
        // pre-scan pass.
        output = *def->output();
        break;
      }
    }

    if (output.isRegister()) {
        JS_ASSERT_IF(output.isFloatReg(), disallowed.has(output.toFloatReg()->reg()));
        JS_ASSERT_IF(output.isGeneralReg(), disallowed.has(output.toGeneralReg()->reg()));
    }

    // Finally, set the output.
    def->setOutput(output);
    return true;
}
コード例 #12
0
ファイル: LIR.cpp プロジェクト: AtulKumar2/gecko-dev
bool
LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type)
{
#ifdef DEBUG
    MOZ_ASSERT(from != to);
    for (size_t i = 0; i < moves_.length(); i++)
        MOZ_ASSERT(to != moves_[i].to());

    // Check that SIMD moves are aligned according to ABI requirements.
    if (LDefinition(type).isSimdType()) {
        MOZ_ASSERT(from.isMemory() || from.isFloatReg());
        if (from.isMemory()) {
            if (from.isArgument())
                MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
            else
                MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
        }
        MOZ_ASSERT(to.isMemory() || to.isFloatReg());
        if (to.isMemory()) {
            if (to.isArgument())
                MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
            else
                MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
        }
    }
#endif
    return moves_.append(LMove(from, to, type));
}
コード例 #13
0
bool
AllocationIntegrityState::checkSafepointAllocation(LInstruction *ins,
                                                   uint32_t vreg, LAllocation alloc,
                                                   bool populateSafepoints)
{
    LSafepoint *safepoint = ins->safepoint();
    JS_ASSERT(safepoint);

    if (ins->isCall() && alloc.isRegister())
        return true;

    if (alloc.isRegister()) {
        AnyRegister reg = alloc.toRegister();
        if (populateSafepoints)
            safepoint->addLiveRegister(reg);
        JS_ASSERT(safepoint->liveRegs().has(reg));
    }

    LDefinition::Type type = virtualRegisters[vreg]
                             ? virtualRegisters[vreg]->type()
                             : LDefinition::GENERAL;

    switch (type) {
      case LDefinition::OBJECT:
        if (populateSafepoints) {
            IonSpew(IonSpew_RegAlloc, "Safepoint object v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addGcPointer(alloc))
                return false;
        }
        JS_ASSERT(safepoint->hasGcPointer(alloc));
        break;
#ifdef JS_NUNBOX32
      // Do not assert that safepoint information for nunboxes is complete,
      // as if a vreg for a value's components are copied in multiple places
      // then the safepoint information may not reflect all copies.
      // See SafepointWriter::writeNunboxParts.
      case LDefinition::TYPE:
        if (populateSafepoints) {
            IonSpew(IonSpew_RegAlloc, "Safepoint type v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addNunboxType(vreg, alloc))
                return false;
        }
        break;
      case LDefinition::PAYLOAD:
        if (populateSafepoints) {
            IonSpew(IonSpew_RegAlloc, "Safepoint payload v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addNunboxPayload(vreg, alloc))
                return false;
        }
        break;
#else
      case LDefinition::BOX:
        if (populateSafepoints) {
            IonSpew(IonSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
                    vreg, ins->id(), alloc.toString());
            if (!safepoint->addBoxedValue(alloc))
                return false;
        }
        JS_ASSERT(safepoint->hasBoxedValue(alloc));
        break;
#endif
      default:
        break;
    }

    return true;
}