// Discard |def| and mine its operands for any subsequently dead defs.
bool
ValueNumberer::discardDef(MDefinition* def)
{
#ifdef JS_JITSPEW
    JitSpew(JitSpew_GVN, "      Discarding %s %s%u",
            def->block()->isMarked() ? "unreachable" : "dead",
            def->opName(), def->id());
#endif
#ifdef DEBUG
    MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator");
    if (def->block()->isMarked()) {
        MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses");
    } else {
        MOZ_ASSERT(IsDiscardable(def), "Discarding non-discardable definition");
        MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set");
    }
#endif

    MBasicBlock* block = def->block();
    if (def->isPhi()) {
        MPhi* phi = def->toPhi();
        if (!releaseAndRemovePhiOperands(phi))
             return false;
        block->discardPhi(phi);
    } else {
        MInstruction* ins = def->toInstruction();
        if (MResumePoint* resume = ins->resumePoint()) {
            if (!releaseResumePointOperands(resume))
                return false;
        }
        if (!releaseOperands(ins))
             return false;
        block->discardIgnoreOperands(ins);
    }

    // If that was the last definition in the block, it can be safely removed
    // from the graph.
    if (block->phisEmpty() && block->begin() == block->end()) {
        MOZ_ASSERT(block->isMarked(), "Reachable block lacks at least a control instruction");

        // As a special case, don't remove a block which is a dominator tree
        // root so that we don't invalidate the iterator in visitGraph. We'll
        // check for this and remove it later.
        if (block->immediateDominator() != block) {
            JitSpew(JitSpew_GVN, "      Block block%u is now empty; discarding", block->id());
            graph_.removeBlock(block);
            blocksRemoved_ = true;
        } else {
            JitSpew(JitSpew_GVN, "      Dominator root block%u is now empty; will discard later",
                    block->id());
        }
    }

    return true;
}
Example #2
0
bool
jit::LICM(MIRGenerator *mir, MIRGraph &graph)
{
    JitSpew(JitSpew_LICM, "Beginning LICM pass");

    // Iterate in RPO to visit outer loops before inner loops. We'd hoist the
    // same things either way, but outer first means we do a little less work.
    for (auto i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
        MBasicBlock *header = *i;
        if (!header->isLoopHeader())
            continue;

        bool canOsr;
        MarkLoopBlocks(graph, header, &canOsr);

        // Hoisting out of a loop that has an entry from the OSR block in
        // addition to its normal entry is tricky. In theory we could clone
        // the instruction and insert phis.
        if (!canOsr)
            VisitLoop(graph, header);
        else
            JitSpew(JitSpew_LICM, "  Skipping loop with header block%u due to OSR", header->id());

        UnmarkLoopBlocks(graph, header);

        if (mir->shouldCancel("LICM (main loop)"))
            return false;
    }

    return true;
}
Example #3
0
// Visit all the blocks dominated by dominatorRoot.
bool
ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot)
{
    JitSpew(JitSpew_GVN, "  Visiting dominator tree (with %llu blocks) rooted at block%u%s",
            uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
            dominatorRoot == graph_.entryBlock() ? " (normal entry block)" :
            dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" :
            dominatorRoot->numPredecessors() == 0 ? " (odd unreachable block)" :
            " (merge point from normal entry and OSR entry)");
    MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
            "root is not a dominator tree root");

    // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice
    // property that we'll always visit a block before any block it dominates,
    // so we can make a single pass through the list and see every full
    // redundance.
    size_t numVisited = 0;
    size_t numDiscarded = 0;
    for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ) {
        MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
        MBasicBlock* block = *iter++;
        // We're only visiting blocks in dominatorRoot's tree right now.
        if (!dominatorRoot->dominates(block))
            continue;

        // If this is a loop backedge, remember the header, as we may not be able
        // to find it after we simplify the block.
        MBasicBlock* header = block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr;

        if (block->isMarked()) {
            // This block has become unreachable; handle it specially.
            if (!visitUnreachableBlock(block))
                return false;
            ++numDiscarded;
        } else {
            // Visit the block!
            if (!visitBlock(block, dominatorRoot))
                return false;
            ++numVisited;
        }

        // If the block is/was a loop backedge, check to see if the block that
        // is/was its header has optimizable phis, which would want a re-run.
        if (!rerun_ && header && loopHasOptimizablePhi(header)) {
            JitSpew(JitSpew_GVN, "    Loop phi in block%u can now be optimized; will re-run GVN!",
                    header->id());
            rerun_ = true;
            remainingBlocks_.clear();
        }

        MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded,
                   "Visited blocks too many times");
        if (numVisited >= dominatorRoot->numDominated() - numDiscarded)
            break;
    }

    totalNumVisited_ += numVisited;
    values_.clear();
    return true;
}
// A loop is about to be made reachable only through an OSR entry into one of
// its nested loops. Fix everything up.
bool
ValueNumberer::fixupOSROnlyLoop(MBasicBlock* block, MBasicBlock* backedge)
{
    // Create an empty and unreachable(!) block which jumps to |block|. This
    // allows |block| to remain marked as a loop header, so we don't have to
    // worry about moving a different block into place as the new loop header,
    // which is hard, especially if the OSR is into a nested loop. Doing all
    // that would produce slightly more optimal code, but this is so
    // extraordinarily rare that it isn't worth the complexity.
    MBasicBlock* fake = MBasicBlock::New(graph_, block->info(), nullptr, MBasicBlock::NORMAL);
    if (fake == nullptr)
        return false;

    graph_.insertBlockBefore(block, fake);
    fake->setImmediateDominator(fake);
    fake->addNumDominated(1);
    fake->setDomIndex(fake->id());
    fake->setUnreachable();

    // Create zero-input phis to use as inputs for any phis in |block|.
    // Again, this is a little odd, but it's the least-odd thing we can do
    // without significant complexity.
    for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
        MPhi* phi = *iter;
        MPhi* fakePhi = MPhi::New(graph_.alloc(), phi->type());
        fake->addPhi(fakePhi);
        if (!phi->addInputSlow(fakePhi))
            return false;
    }

    fake->end(MGoto::New(graph_.alloc(), block));

    if (!block->addPredecessorWithoutPhis(fake))
        return false;

    // Restore |backedge| as |block|'s loop backedge.
    block->clearLoopHeader();
    block->setLoopHeader(backedge);

    JitSpew(JitSpew_GVN, "        Created fake block%u", fake->id());
    hasOSRFixups_ = true;
    return true;
}
Example #5
0
void
ion::AssertExtendedGraphCoherency(MIRGraph &graph)
{
    // Checks the basic GraphCoherency but also other conditions that
    // do not hold immediately (such as the fact that critical edges
    // are split)

#ifdef DEBUG
    AssertGraphCoherency(graph);

    uint32_t idx = 0;
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        JS_ASSERT(block->id() == idx++);

        // No critical edges:
        if (block->numSuccessors() > 1)
            for (size_t i = 0; i < block->numSuccessors(); i++)
                JS_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);

        if (block->isLoopHeader()) {
            JS_ASSERT(block->numPredecessors() == 2);
            MBasicBlock *backedge = block->getPredecessor(1);
            JS_ASSERT(backedge->id() >= block->id());
            JS_ASSERT(backedge->numSuccessors() == 1);
            JS_ASSERT(backedge->getSuccessor(0) == *block);
        }

        if (!block->phisEmpty()) {
            for (size_t i = 0; i < block->numPredecessors(); i++) {
                MBasicBlock *pred = block->getPredecessor(i);
                JS_ASSERT(pred->successorWithPhis() == *block);
                JS_ASSERT(pred->positionInPhiSuccessor() == i);
            }
        }

        uint32_t successorWithPhis = 0;
        for (size_t i = 0; i < block->numSuccessors(); i++)
            if (!block->getSuccessor(i)->phisEmpty())
                successorWithPhis++;

        JS_ASSERT(successorWithPhis <= 1);
        JS_ASSERT_IF(successorWithPhis, block->successorWithPhis() != NULL);

        // I'd like to assert this, but it's not necc. true.  Sometimes we set this
        // flag to non-NULL just because a successor has multiple preds, even if it
        // does not actually have any phis.
        //
        // JS_ASSERT_IF(!successorWithPhis, block->successorWithPhis() == NULL);
    }
#endif
}
Example #6
0
void
MIRGraph::removeBlocksAfter(MBasicBlock *start)
{
    MBasicBlockIterator iter(begin());
    iter++;
    while (iter != end()) {
        MBasicBlock *block = *iter;
        iter++;

        if (block->id() <= start->id())
            continue;

        removeBlock(block);
    }
}
void
StupidAllocator::syncForBlockEnd(LBlock *block, LInstruction *ins)
{
    // Sync any dirty registers, and update the synced state for phi nodes at
    // each successor of a block. We cannot conflate the storage for phis with
    // that of their inputs, as we cannot prove the live ranges of the phi and
    // its input do not overlap. The values for the two may additionally be
    // different, as the phi could be for the value of the input in a previous
    // loop iteration.

    for (size_t i = 0; i < registerCount; i++)
        syncRegister(ins, i);

    LMoveGroup *group = nullptr;

    MBasicBlock *successor = block->mir()->successorWithPhis();
    if (successor) {
        uint32_t position = block->mir()->positionInPhiSuccessor();
        LBlock *lirsuccessor = graph.getBlock(successor->id());
        for (size_t i = 0; i < lirsuccessor->numPhis(); i++) {
            LPhi *phi = lirsuccessor->getPhi(i);

            uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister();
            uint32_t destvreg = phi->getDef(0)->virtualRegister();

            if (sourcevreg == destvreg)
                continue;

            LAllocation *source = stackLocation(sourcevreg);
            LAllocation *dest = stackLocation(destvreg);

            if (!group) {
                // The moves we insert here need to happen simultaneously with
                // each other, yet after any existing moves before the instruction.
                LMoveGroup *input = getInputMoveGroup(ins->id());
                if (input->numMoves() == 0) {
                    group = input;
                } else {
                    group = new LMoveGroup(alloc());
                    block->insertAfter(input, group);
                }
            }

            group->add(source, dest);
        }
    }
}
Example #8
0
void
MIRGraph::removeBlocksAfter(MBasicBlock *start)
{
    MBasicBlockIterator iter(begin());
    iter++;
    while (iter != end()) {
        MBasicBlock *block = *iter;
        iter++;

        if (block->id() <= start->id())
            continue;

        // removeBlock will not remove the resumepoints, since
        // it can be shared with outer blocks. So remove them now.
        block->discardAllResumePoints();
        removeBlock(block);
    }
}
Example #9
0
// Visit all the blocks in the graph.
bool
ValueNumberer::visitGraph()
{
    // Due to OSR blocks, the set of blocks dominated by a blocks may not be
    // contiguous in the RPO. Do a separate traversal for each dominator tree
    // root. There's always the main entry, and sometimes there's an OSR entry,
    // and then there are the roots formed where the OSR paths merge with the
    // main entry paths.
    for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ) {
        MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
        MBasicBlock* block = *iter;
        if (block->immediateDominator() == block) {
            if (!visitDominatorTree(block))
                return false;

            // Normally unreachable blocks would be removed by now, but if this
            // block is a dominator tree root, it has been special-cased and left
            // in place in order to avoid invalidating our iterator. Now that
            // we've finished the tree, increment the iterator, and then if it's
            // marked for removal, remove it.
            ++iter;
            if (block->isMarked()) {
                JitSpew(JitSpew_GVN, "      Discarding dominator root block%u",
                        block->id());
                MOZ_ASSERT(block->begin() == block->end(),
                           "Unreachable dominator tree root has instructions after tree walk");
                MOZ_ASSERT(block->phisEmpty(),
                           "Unreachable dominator tree root has phis after tree walk");
                graph_.removeBlock(block);
                blocksRemoved_ = true;
            }

            MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(), "Visited blocks too many times");
            if (totalNumVisited_ >= graph_.numBlocks())
                break;
        } else {
            // This block a dominator tree root. Proceed to the next one.
            ++iter;
        }
    }
    totalNumVisited_ = 0;
    return true;
}
Example #10
0
// Whether there might be a path from src to dest, excluding loop backedges. This is
// approximate and really ought to depend on precomputed reachability information.
static inline bool
BlockMightReach(MBasicBlock* src, MBasicBlock* dest)
{
    while (src->id() <= dest->id()) {
        if (src == dest)
            return true;
        switch (src->numSuccessors()) {
          case 0:
            return false;
          case 1: {
            MBasicBlock* successor = src->getSuccessor(0);
            if (successor->id() <= src->id())
                return true; // Don't iloop.
            src = successor;
            break;
          }
          default:
            return true;
        }
    }
    return false;
}
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
    if (!init())
        return false;

    Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
    BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds());
    if (!loopDone)
        return false;

    for (size_t i = graph.numBlocks(); i > 0; i--) {
        if (mir->shouldCancel("Build Liveness Info (main loop)"))
            return false;

        LBlock *block = graph.getBlock(i - 1);
        MBasicBlock *mblock = block->mir();

        BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters());
        if (!live)
            return false;
        liveIn[mblock->id()] = live;

        // Propagate liveIn from our successors to us
        for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
            MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
            // Skip backedges, as we fix them up at the loop header.
            if (mblock->id() < successor->id())
                live->insertAll(liveIn[successor->id()]);
        }

        // Add successor phis
        if (mblock->successorWithPhis()) {
            LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
            for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
                LPhi *phi = phiSuccessor->getPhi(j);
                LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
                uint32_t reg = use->toUse()->virtualRegister();
                live->insert(reg);
            }
        }

        // Variables are assumed alive for the entire block, a define shortens
        // the interval to the point of definition.
        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
            if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                                  outputOf(block->lastId()).next()))
            {
                return false;
            }
        }

        // Shorten the front end of live intervals for live variables to their
        // point of definition, if found.
        for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
            // Calls may clobber registers, so force a spill and reload around the callsite.
            if (ins->isCall()) {
                for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
                    if (forLSRA) {
                        if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
                            return false;
                    } else {
                        bool found = false;
                        for (size_t i = 0; i < ins->numDefs(); i++) {
                            if (ins->getDef(i)->isPreset() &&
                                *ins->getDef(i)->output() == LAllocation(*iter)) {
                                found = true;
                                break;
                            }
                        }
                        if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
                            return false;
                    }
                }
            }

            for (size_t i = 0; i < ins->numDefs(); i++) {
                if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
                    LDefinition *def = ins->getDef(i);

                    CodePosition from;
                    if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
                        // The fixed range covers the current instruction so the
                        // interval for the virtual register starts at the next
                        // instruction. If the next instruction has a fixed use,
                        // this can lead to unnecessary register moves. To avoid
                        // special handling for this, assert the next instruction
                        // has no fixed uses. defineFixed guarantees this by inserting
                        // an LNop.
                        JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
                        AnyRegister reg = def->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
                            return false;
                        from = outputOf(*ins).next();
                    } else {
                        from = forLSRA ? inputOf(*ins) : outputOf(*ins);
                    }

                    if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
                        // MUST_REUSE_INPUT is implemented by allocating an output
                        // register and moving the input to it. Register hints are
                        // used to avoid unnecessary moves. We give the input an
                        // LUse::ANY policy to avoid allocating a register for the
                        // input.
                        LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
                        JS_ASSERT(inputUse->policy() == LUse::REGISTER);
                        JS_ASSERT(inputUse->usedAtStart());
                        *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
                    }

                    LiveInterval *interval = vregs[def].getInterval(0);
                    interval->setFrom(from);

                    // Ensure that if there aren't any uses, there's at least
                    // some interval for the output to go into.
                    if (interval->numRanges() == 0) {
                        if (!interval->addRangeAtHead(from, from.next()))
                            return false;
                    }
                    live->remove(def->virtualRegister());
                }
            }

            for (size_t i = 0; i < ins->numTemps(); i++) {
                LDefinition *temp = ins->getTemp(i);
                if (temp->isBogusTemp())
                    continue;

                if (forLSRA) {
                    if (temp->policy() == LDefinition::PRESET) {
                        if (ins->isCall())
                            continue;
                        AnyRegister reg = temp->output()->toRegister();
                        if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                            return false;

                        // Fixed intervals are not added to safepoints, so do it
                        // here.
                        if (LSafepoint *safepoint = ins->safepoint())
                            AddRegisterToSafepoint(safepoint, reg, *temp);
                    } else {
                        JS_ASSERT(!ins->isCall());
                        if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
                            return false;
                    }
                } else {
                    // Normally temps are considered to cover both the input
                    // and output of the associated instruction. In some cases
                    // though we want to use a fixed register as both an input
                    // and clobbered register in the instruction, so watch for
                    // this and shorten the temp to cover only the output.
                    CodePosition from = inputOf(*ins);
                    if (temp->policy() == LDefinition::PRESET) {
                        AnyRegister reg = temp->output()->toRegister();
                        for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
                            if (alloc->isUse()) {
                                LUse *use = alloc->toUse();
                                if (use->isFixedRegister()) {
                                    if (GetFixedRegister(vregs[use].def(), use) == reg)
                                        from = outputOf(*ins);
                                }
                            }
                        }
                    }

                    CodePosition to =
                        ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
                    if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
                        return false;
                }
            }

            DebugOnly<bool> hasUseRegister = false;
            DebugOnly<bool> hasUseRegisterAtStart = false;

            for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
                if (inputAlloc->isUse()) {
                    LUse *use = inputAlloc->toUse();

                    // The first instruction, LLabel, has no uses.
                    JS_ASSERT(inputOf(*ins) > outputOf(block->firstId()));

                    // Call uses should always be at-start or fixed, since the fixed intervals
                    // use all registers.
                    JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
                                 use->isFixedRegister() || use->usedAtStart());

#ifdef DEBUG
                    // Don't allow at-start call uses if there are temps of the same kind,
                    // so that we don't assign the same register.
                    if (ins->isCall() && use->usedAtStart()) {
                        for (size_t i = 0; i < ins->numTemps(); i++)
                            JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble());
                    }

                    // If there are both useRegisterAtStart(x) and useRegister(y)
                    // uses, we may assign the same register to both operands due to
                    // interval splitting (bug 772830). Don't allow this for now.
                    if (use->policy() == LUse::REGISTER) {
                        if (use->usedAtStart()) {
                            if (!IsInputReused(*ins, use))
                                hasUseRegisterAtStart = true;
                        } else {
                            hasUseRegister = true;
                        }
                    }

                    JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif

                    // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
                    if (use->policy() == LUse::RECOVERED_INPUT)
                        continue;

                    CodePosition to;
                    if (forLSRA) {
                        if (use->isFixedRegister()) {
                            AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
                            if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
                                return false;
                            to = inputOf(*ins);

                            // Fixed intervals are not added to safepoints, so do it
                            // here.
                            LSafepoint *safepoint = ins->safepoint();
                            if (!ins->isCall() && safepoint)
                                AddRegisterToSafepoint(safepoint, reg, *vregs[use].def());
                        } else {
                            to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
                        }
                    } else {
                        to = (use->usedAtStart() || ins->isCall())
                           ? inputOf(*ins) : outputOf(*ins);
                        if (use->isFixedRegister()) {
                            LAllocation reg(AnyRegister::FromCode(use->registerCode()));
                            for (size_t i = 0; i < ins->numDefs(); i++) {
                                LDefinition *def = ins->getDef(i);
                                if (def->policy() == LDefinition::PRESET && *def->output() == reg)
                                    to = inputOf(*ins);
                            }
                        }
                    }

                    LiveInterval *interval = vregs[use].getInterval(0);
                    if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
                        return false;
                    interval->addUse(new(alloc()) UsePosition(use, to));

                    live->insert(use->virtualRegister());
                }
            }
        }

        // Phis have simultaneous assignment semantics at block begin, so at
        // the beginning of the block we can be sure that liveIn does not
        // contain any phi outputs.
        for (unsigned int i = 0; i < block->numPhis(); i++) {
            LDefinition *def = block->getPhi(i)->getDef(0);
            if (live->contains(def->virtualRegister())) {
                live->remove(def->virtualRegister());
            } else {
                // This is a dead phi, so add a dummy range over all phis. This
                // can go away if we have an earlier dead code elimination pass.
                if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
                                                               outputOf(block->firstId())))
                {
                    return false;
                }
            }
        }

        if (mblock->isLoopHeader()) {
            // A divergence from the published algorithm is required here, as
            // our block order does not guarantee that blocks of a loop are
            // contiguous. As a result, a single live interval spanning the
            // loop is not possible. Additionally, we require liveIn in a later
            // pass for resolution, so that must also be fixed up here.
            MBasicBlock *loopBlock = mblock->backedge();
            while (true) {
                // Blocks must already have been visited to have a liveIn set.
                JS_ASSERT(loopBlock->id() >= mblock->id());

                // Add an interval for this entire loop block
                CodePosition from = inputOf(loopBlock->lir()->firstId());
                CodePosition to = outputOf(loopBlock->lir()->lastId()).next();

                for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
                    if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
                        return false;
                }

                // Fix up the liveIn set to account for the new interval
                liveIn[loopBlock->id()]->insertAll(live);

                // Make sure we don't visit this node again
                loopDone->insert(loopBlock->id());

                // If this is the loop header, any predecessors are either the
                // backedge or out of the loop, so skip any predecessors of
                // this block
                if (loopBlock != mblock) {
                    for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
                        MBasicBlock *pred = loopBlock->getPredecessor(i);
                        if (loopDone->contains(pred->id()))
                            continue;
                        if (!loopWorkList.append(pred))
                            return false;
                    }
                }

                // Terminate loop if out of work.
                if (loopWorkList.empty())
                    break;

                // Grab the next block off the work list, skipping any OSR block.
                while (!loopWorkList.empty()) {
                    loopBlock = loopWorkList.popCopy();
                    if (loopBlock->lir() != graph.osrBlock())
                        break;
                }

                // If end is reached without finding a non-OSR block, then no more work items were found.
                if (loopBlock->lir() == graph.osrBlock()) {
                    JS_ASSERT(loopWorkList.empty());
                    break;
                }
            }

            // Clear the done set for other loops
            loopDone->clear();
        }

        JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
    }

    validateVirtualRegisters();

    // If the script has an infinite loop, there may be no MReturn and therefore
    // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
    // rest of the allocator can assume it has at least one range.
    if (fixedIntervalsUnion->numRanges() == 0) {
        if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
                                                 CodePosition(0, CodePosition::OUTPUT)))
        {
            return false;
        }
    }

    return true;
}
Example #12
0
bool
Sink(MIRGenerator* mir, MIRGraph& graph)
{
    TempAllocator& alloc = graph.alloc();
    bool sinkEnabled = mir->optimizationInfo().sinkEnabled();

    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Sink"))
            return false;

        for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
            MInstruction* ins = *iter++;

            // Only instructions which can be recovered on bailout can be moved
            // into the bailout paths.
            if (ins->isGuard() || ins->isGuardRangeBailouts() ||
                ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout())
            {
                continue;
            }

            // Compute a common dominator for all uses of the current
            // instruction.
            bool hasLiveUses = false;
            bool hasUses = false;
            MBasicBlock* usesDominator = nullptr;
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
                hasUses = true;
                MNode* consumerNode = (*i)->consumer();
                if (consumerNode->isResumePoint())
                    continue;

                MDefinition* consumer = consumerNode->toDefinition();
                if (consumer->isRecoveredOnBailout())
                    continue;

                hasLiveUses = true;

                // If the instruction is a Phi, then we should dominate the
                // predecessor from which the value is coming from.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isPhi())
                    consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));

                usesDominator = CommonDominator(usesDominator, consumerBlock);
                if (usesDominator == *block)
                    break;
            }

            // Leave this instruction for DCE.
            if (!hasUses)
                continue;

            // We have no uses, so sink this instruction in all the bailout
            // paths.
            if (!hasLiveUses) {
                MOZ_ASSERT(!usesDominator);
                ins->setRecoveredOnBailout();
                JitSpewDef(JitSpew_Sink, "  No live uses, recover the instruction on bailout\n", ins);
                continue;
            }

            // This guard is temporarly moved here as the above code deals with
            // Dead Code elimination, which got moved into this Sink phase, as
            // the Dead Code elimination used to move instructions with no-live
            // uses to the bailout path.
            if (!sinkEnabled)
                continue;

            // To move an effectful instruction, we would have to verify that the
            // side-effect is not observed. In the mean time, we just inhibit
            // this optimization on effectful instructions.
            if (ins->isEffectful())
                continue;

            // If all the uses are under a loop, we might not want to work
            // against LICM by moving everything back into the loop, but if the
            // loop is it-self inside an if, then we still want to move the
            // computation under this if statement.
            while (block->loopDepth() < usesDominator->loopDepth()) {
                MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
                usesDominator = usesDominator->immediateDominator();
            }

            // Only move instructions if there is a branch between the dominator
            // of the uses and the original instruction. This prevent moving the
            // computation of the arguments into an inline function if there is
            // no major win.
            MBasicBlock* lastJoin = usesDominator;
            while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
                MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
                MBasicBlock* next = lastJoin->immediateDominator();
                if (next->numSuccessors() > 1)
                    break;
                lastJoin = next;
            }
            if (*block == lastJoin)
                continue;

            // Skip to the next instruction if we cannot find a common dominator
            // for all the uses of this instruction, or if the common dominator
            // correspond to the block of the current instruction.
            if (!usesDominator || usesDominator == *block)
                continue;

            // Only instruction which can be recovered on bailout and which are
            // sinkable can be moved into blocks which are below while filling
            // the resume points with a clone which is recovered on bailout.

            // If the instruction has live uses and if it is clonable, then we
            // can clone the instruction for all non-dominated uses and move the
            // instruction into the block which is dominating all live uses.
            if (!ins->canClone())
                continue;

            // If the block is a split-edge block, which is created for folding
            // test conditions, then the block has no resume point and has
            // multiple predecessors.  In such case, we cannot safely move
            // bailing instruction to these blocks as we have no way to bailout.
            if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1)
                continue;

            JitSpewDef(JitSpew_Sink, "  Can Clone & Recover, sink instruction\n", ins);
            JitSpew(JitSpew_Sink, "  into Block %u", usesDominator->id());

            // Copy the arguments and clone the instruction.
            MDefinitionVector operands(alloc);
            for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
                if (!operands.append(ins->getOperand(i)))
                    return false;
            }

            MInstruction* clone = ins->clone(alloc, operands);
            ins->block()->insertBefore(ins, clone);
            clone->setRecoveredOnBailout();

            // We should not update the producer of the entry resume point, as
            // it cannot refer to any instruction within the basic block excepts
            // for Phi nodes.
            MResumePoint* entry = usesDominator->entryResumePoint();

            // Replace the instruction by its clone in all the resume points /
            // recovered-on-bailout instructions which are not in blocks which
            // are dominated by the usesDominator block.
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) {
                MUse* use = *i++;
                MNode* consumer = use->consumer();

                // If the consumer is a Phi, then we look for the index of the
                // use to find the corresponding predecessor block, which is
                // then used as the consumer block.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) {
                    consumerBlock = consumerBlock->getPredecessor(
                        consumer->toDefinition()->toPhi()->indexOf(use));
                }

                // Keep the current instruction for all dominated uses, except
                // for the entry resume point of the block in which the
                // instruction would be moved into.
                if (usesDominator->dominates(consumerBlock) &&
                    (!consumer->isResumePoint() || consumer->toResumePoint() != entry))
                {
                    continue;
                }

                use->replaceProducer(clone);
            }

            // As we move this instruction in a different block, we should
            // verify that we do not carry over a resume point which would refer
            // to an outdated state of the control flow.
            if (ins->resumePoint())
                ins->clearResumePoint();

            // Now, that all uses which are not dominated by usesDominator are
            // using the cloned instruction, we can safely move the instruction
            // into the usesDominator block.
            MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
            block->moveBefore(at, ins);
        }
    }

    return true;
}
Example #13
0
void
C1Spewer::spewPass(FILE *fp, MBasicBlock *block)
{
    fprintf(fp, "  begin_block\n");
    fprintf(fp, "    name \"B%d\"\n", block->id());
    fprintf(fp, "    from_bci -1\n");
    fprintf(fp, "    to_bci -1\n");

    fprintf(fp, "    predecessors");
    for (uint32_t i = 0; i < block->numPredecessors(); i++) {
        MBasicBlock *pred = block->getPredecessor(i);
        fprintf(fp, " \"B%d\"", pred->id());
    }
    fprintf(fp, "\n");

    fprintf(fp, "    successors");
    for (uint32_t i = 0; i < block->numSuccessors(); i++) {
        MBasicBlock *successor = block->getSuccessor(i);
        fprintf(fp, " \"B%d\"", successor->id());
    }
    fprintf(fp, "\n");

    fprintf(fp, "    xhandlers\n");
    fprintf(fp, "    flags\n");

    if (block->lir() && block->lir()->begin() != block->lir()->end()) {
        fprintf(fp, "    first_lir_id %d\n", block->lir()->firstId());
        fprintf(fp, "    last_lir_id %d\n", block->lir()->lastId());
    }

    fprintf(fp, "    begin_states\n");

    if (block->entryResumePoint()) {
        fprintf(fp, "      begin_locals\n");
        fprintf(fp, "        size %d\n", (int)block->numEntrySlots());
        fprintf(fp, "        method \"None\"\n");
        for (uint32_t i = 0; i < block->numEntrySlots(); i++) {
            MDefinition *ins = block->getEntrySlot(i);
            fprintf(fp, "        ");
            fprintf(fp, "%d ", i);
            if (ins->isUnused())
                fprintf(fp, "unused");
            else
                ins->printName(fp);
            fprintf(fp, "\n");
        }
        fprintf(fp, "      end_locals\n");
    }
    fprintf(fp, "    end_states\n");

    fprintf(fp, "    begin_HIR\n");
    for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++)
        DumpDefinition(fp, *phi);
    for (MInstructionIterator i(block->begin()); i != block->end(); i++)
        DumpDefinition(fp, *i);
    fprintf(fp, "    end_HIR\n");

    if (block->lir()) {
        fprintf(fp, "    begin_LIR\n");
        for (size_t i = 0; i < block->lir()->numPhis(); i++)
            DumpLIR(fp, block->lir()->getPhi(i));
        for (LInstructionIterator i(block->lir()->begin()); i != block->lir()->end(); i++)
            DumpLIR(fp, *i);
        fprintf(fp, "    end_LIR\n");
    }

    fprintf(fp, "  end_block\n");
}
Example #14
0
bool
ValueNumberer::run(UpdateAliasAnalysisFlag updateAliasAnalysis)
{
    updateAliasAnalysis_ = updateAliasAnalysis == UpdateAliasAnalysis;

    JitSpew(JitSpew_GVN, "Running GVN on graph (with %llu blocks)",
            uint64_t(graph_.numBlocks()));

    // Top level non-sparse iteration loop. If an iteration performs a
    // significant change, such as discarding a block which changes the
    // dominator tree and may enable more optimization, this loop takes another
    // iteration.
    int runs = 0;
    for (;;) {
        if (!visitGraph())
            return false;

        // Test whether any block which was not removed but which had at least
        // one predecessor removed will have a new dominator parent.
        while (!remainingBlocks_.empty()) {
            MBasicBlock* block = remainingBlocks_.popCopy();
            if (!block->isDead() && IsDominatorRefined(block)) {
                JitSpew(JitSpew_GVN, "  Dominator for block%u can now be refined; will re-run GVN!",
                        block->id());
                rerun_ = true;
                remainingBlocks_.clear();
                break;
            }
        }

        if (blocksRemoved_) {
            if (!AccountForCFGChanges(mir_, graph_, dependenciesBroken_))
                return false;

            blocksRemoved_ = false;
            dependenciesBroken_ = false;
        }

        if (mir_->shouldCancel("GVN (outer loop)"))
            return false;

        // If no further opportunities have been discovered, we're done.
        if (!rerun_)
            break;

        rerun_ = false;

        // Enforce an arbitrary iteration limit. This is rarely reached, and
        // isn't even strictly necessary, as the algorithm is guaranteed to
        // terminate on its own in a finite amount of time (since every time we
        // re-run we discard the construct which triggered the re-run), but it
        // does help avoid slow compile times on pathological code.
        ++runs;
        if (runs == 6) {
            JitSpew(JitSpew_GVN, "Re-run cutoff of %d reached. Terminating GVN!", runs);
            break;
        }

        JitSpew(JitSpew_GVN, "Re-running GVN on graph (run %d, now with %llu blocks)",
                runs, uint64_t(graph_.numBlocks()));
    }

    return true;
}
bool
RangeAnalysis::addBetaNobes()
{
    IonSpew(IonSpew_Range, "Adding beta nobes");

    for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
        MBasicBlock *block = *i;
        IonSpew(IonSpew_Range, "Looking at block %d", block->id());

        BranchDirection branch_dir;
        MTest *test = block->immediateDominatorBranch(&branch_dir);

        if (!test || !test->getOperand(0)->isCompare())
            continue;

        MCompare *compare = test->getOperand(0)->toCompare();

        MDefinition *left = compare->getOperand(0);
        MDefinition *right = compare->getOperand(1);
        int32 bound;
        MDefinition *val = NULL;

        JSOp jsop = compare->jsop();

        if (branch_dir == FALSE_BRANCH)
            jsop = analyze::NegateCompareOp(jsop);

        if (left->isConstant() && left->toConstant()->value().isInt32()) {
            bound = left->toConstant()->value().toInt32();
            val = right;
            jsop = analyze::ReverseCompareOp(jsop);
        } else if (right->isConstant() && right->toConstant()->value().isInt32()) {
            bound = right->toConstant()->value().toInt32();
            val = left;
        } else {
            MDefinition *smaller = NULL;
            MDefinition *greater = NULL;
            if (jsop == JSOP_LT) {
                smaller = left;
                greater = right;
            } else if (jsop == JSOP_GT) {
                smaller = right;
                greater = left;
            }
            if (smaller && greater) {
                MBeta *beta;
                beta = MBeta::New(smaller, Range(JSVAL_INT_MIN, JSVAL_INT_MAX-1));
                block->insertBefore(*block->begin(), beta);
                replaceDominatedUsesWith(smaller, beta, block);
                beta = MBeta::New(greater, Range(JSVAL_INT_MIN+1, JSVAL_INT_MAX));
                block->insertBefore(*block->begin(), beta);
                replaceDominatedUsesWith(greater, beta, block);
            }
            continue;
        }

        JS_ASSERT(val);


        Range comp;
        switch (jsop) {
          case JSOP_LE:
            comp.setUpper(bound);
            break;
          case JSOP_LT:
            if (!SafeSub(bound, 1, &bound))
                break;
            comp.setUpper(bound);
            break;
          case JSOP_GE:
            comp.setLower(bound);
            break;
          case JSOP_GT:
            if (!SafeAdd(bound, 1, &bound))
                break;
            comp.setLower(bound);
            break;
          case JSOP_EQ:
            comp.setLower(bound);
            comp.setUpper(bound);
          default:
            break; // well, for neq we could have
                   // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges.
        }

        IonSpew(IonSpew_Range, "Adding beta node for %d", val->id());
        MBeta *beta = MBeta::New(val, comp);
        block->insertBefore(*block->begin(), beta);
        replaceDominatedUsesWith(val, beta, block);
    }

    return true;
}
Example #16
0
bool
ValueNumberer::eliminateRedundancies()
{
    // A definition is 'redundant' iff it is dominated by another definition
    // with the same value number.
    //
    // So, we traverse the dominator tree in pre-order, maintaining a hashmap
    // from value numbers to instructions.
    //
    // For each definition d with value number v, we look up v in the hashmap.
    //
    // If there is a definition d' in the hashmap, and the current traversal
    // index is within that instruction's dominated range, then we eliminate d,
    // replacing all uses of d with uses of d'.
    //
    // If there is no valid definition in the hashtable (the current definition
    // is not in dominated scope), then we insert the current instruction,
    // since it is the most dominant instruction with the given value number.

    InstructionMap defs;

    if (!defs.init())
        return false;

    IonSpew(IonSpew_GVN, "Eliminating redundant instructions");

    // Stack for pre-order CFG traversal.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }

    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();

        IonSpew(IonSpew_GVN, "Looking at block %d", block->id());

        // Add all immediate dominators to the front of the worklist.
        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }

        // For each instruction, attempt to look up a dominating definition.
        for (MDefinitionIterator iter(block); iter; ) {
            MDefinition *ins = simplify(*iter, true);

            // Instruction was replaced, and all uses have already been fixed.
            if (ins != *iter) {
                iter = block->discardDefAt(iter);
                continue;
            }

            // Instruction has side-effects and cannot be folded.
            if (!ins->isMovable() || ins->isEffectful()) {
                iter++;
                continue;
            }

            MDefinition *dom = findDominatingDef(defs, ins, index);
            if (!dom)
                return false; // Insertion failed.

            if (dom == ins || !dom->updateForReplacement(ins)) {
                iter++;
                continue;
            }

            IonSpew(IonSpew_GVN, "instruction %d is dominated by instruction %d (from block %d)",
                    ins->id(), dom->id(), dom->block()->id());

            ins->replaceAllUsesWith(dom);

            JS_ASSERT(!ins->hasUses());
            JS_ASSERT(ins->block() == block);
            JS_ASSERT(!ins->isEffectful());
            JS_ASSERT(ins->isMovable());

            iter = ins->block()->discardDefAt(iter);
        }
        index++;
    }

    JS_ASSERT(index == graph_.numBlocks());
    return true;
}