コード例 #1
0
bool
UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators()
{
    // Removes blocks that are not marked from the graph.  For blocks
    // that *are* marked, clears the mark and adjusts the id to its
    // new value.  Also adds blocks that are immediately reachable
    // from an unmarked block to the frontier.

    size_t id = marked_;
    for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) {
        if (mir_->shouldCancel("Eliminate Unreachable Code"))
            return false;

        MBasicBlock *block = *iter;
        iter++;

        // Unconditionally clear the dominators.  It's somewhat complex to
        // adjust the values and relatively fast to just recompute.
        block->clearDominatorInfo();

        if (block->isMarked()) {
            block->setId(--id);
            for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
                checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
            for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
                checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
        } else {
            for (size_t i = 0, c = block->numSuccessors(); i < c; i++) {
                MBasicBlock *succ = block->getSuccessor(i);
                if (succ->isMarked()) {
                    // succ is on the frontier of blocks to be removed:
                    succ->removePredecessor(block);

                    if (!redundantPhis_) {
                        for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) {
                            if (iter->operandIfRedundant()) {
                                redundantPhis_ = true;
                                break;
                            }
                        }
                    }
                }
            }

            graph_.removeBlock(block);
        }
    }

    JS_ASSERT(id == 0);

    return true;
}
コード例 #2
0
// Visit the control instruction at the end of |block|.
bool
ValueNumberer::visitControlInstruction(MBasicBlock* block, const MBasicBlock* dominatorRoot)
{
    // Look for a simplified form of the control instruction.
    MControlInstruction* control = block->lastIns();
    MDefinition* rep = simplified(control);
    if (rep == control)
        return true;

    if (rep == nullptr)
        return false;

    MControlInstruction* newControl = rep->toControlInstruction();
    MOZ_ASSERT(!newControl->block(),
               "Control instruction replacement shouldn't already be in a block");
#ifdef DEBUG
    JitSpew(JitSpew_GVN, "      Folded control instruction %s%u to %s%u",
            control->opName(), control->id(), newControl->opName(), graph_.getNumInstructionIds());
#endif

    // If the simplification removes any CFG edges, update the CFG and remove
    // any blocks that become dead.
    size_t oldNumSuccs = control->numSuccessors();
    size_t newNumSuccs = newControl->numSuccessors();
    if (newNumSuccs != oldNumSuccs) {
        MOZ_ASSERT(newNumSuccs < oldNumSuccs, "New control instruction has too many successors");
        for (size_t i = 0; i != oldNumSuccs; ++i) {
            MBasicBlock* succ = control->getSuccessor(i);
            if (HasSuccessor(newControl, succ))
                continue;
            if (succ->isMarked())
                continue;
            if (!removePredecessorAndCleanUp(succ, block))
                return false;
            if (succ->isMarked())
                continue;
            if (!rerun_) {
                if (!remainingBlocks_.append(succ))
                    return false;
            }
        }
    }

    if (!releaseOperands(control))
        return false;
    block->discardIgnoreOperands(control);
    block->end(newControl);
    if (block->entryResumePoint() && newNumSuccs != oldNumSuccs)
        block->flagOperandsOfPrunedBranches(newControl);
    return processDeadDefs();
}
コード例 #3
0
ファイル: LICM.cpp プロジェクト: martasect/gecko
static void
VisitLoop(MIRGraph &graph, MBasicBlock *header)
{
    MInstruction *hoistPoint = header->loopPredecessor()->lastIns();

    JitSpew(JitSpew_LICM, "  Visiting loop with header block%u, hoisting to %s%u",
            header->id(), hoistPoint->opName(), hoistPoint->id());

    MBasicBlock *backedge = header->backedge();

    // This indicates whether the loop contains calls or other things which
    // clobber most or all floating-point registers. In such loops,
    // floating-point constants should not be hoisted unless it enables further
    // hoisting.
    bool hasCalls = LoopContainsPossibleCall(graph, header, backedge);

    for (auto i(graph.rpoBegin(header)); ; ++i) {
        MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
        MBasicBlock *block = *i;
        if (!block->isMarked())
            continue;

        VisitLoopBlock(block, header, hoistPoint, hasCalls);

        if (block == backedge)
            break;
    }
}
コード例 #4
0
// Visit all the blocks dominated by dominatorRoot.
bool
ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot)
{
    JitSpew(JitSpew_GVN, "  Visiting dominator tree (with %llu blocks) rooted at block%u%s",
            uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(),
            dominatorRoot == graph_.entryBlock() ? " (normal entry block)" :
            dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" :
            dominatorRoot->numPredecessors() == 0 ? " (odd unreachable block)" :
            " (merge point from normal entry and OSR entry)");
    MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot,
            "root is not a dominator tree root");

    // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice
    // property that we'll always visit a block before any block it dominates,
    // so we can make a single pass through the list and see every full
    // redundance.
    size_t numVisited = 0;
    size_t numDiscarded = 0;
    for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ) {
        MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
        MBasicBlock* block = *iter++;
        // We're only visiting blocks in dominatorRoot's tree right now.
        if (!dominatorRoot->dominates(block))
            continue;

        // If this is a loop backedge, remember the header, as we may not be able
        // to find it after we simplify the block.
        MBasicBlock* header = block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr;

        if (block->isMarked()) {
            // This block has become unreachable; handle it specially.
            if (!visitUnreachableBlock(block))
                return false;
            ++numDiscarded;
        } else {
            // Visit the block!
            if (!visitBlock(block, dominatorRoot))
                return false;
            ++numVisited;
        }

        // If the block is/was a loop backedge, check to see if the block that
        // is/was its header has optimizable phis, which would want a re-run.
        if (!rerun_ && header && loopHasOptimizablePhi(header)) {
            JitSpew(JitSpew_GVN, "    Loop phi in block%u can now be optimized; will re-run GVN!",
                    header->id());
            rerun_ = true;
            remainingBlocks_.clear();
        }

        MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded,
                   "Visited blocks too many times");
        if (numVisited >= dominatorRoot->numDominated() - numDiscarded)
            break;
    }

    totalNumVisited_ += numVisited;
    values_.clear();
    return true;
}
コード例 #5
0
// |block| is unreachable. Mine it for opportunities to delete more dead
// code, and then discard it.
bool
ValueNumberer::visitUnreachableBlock(MBasicBlock* block)
{
    JitSpew(JitSpew_GVN, "    Visiting unreachable block%u%s%s%s", block->id(),
            block->isLoopHeader() ? " (loop header)" : "",
            block->isSplitEdge() ? " (split edge)" : "",
            block->immediateDominator() == block ? " (dominator root)" : "");

    MOZ_ASSERT(block->isMarked(), "Visiting unmarked (and therefore reachable?) block");
    MOZ_ASSERT(block->numPredecessors() == 0, "Block marked unreachable still has predecessors");
    MOZ_ASSERT(block != graph_.entryBlock(), "Removing normal entry block");
    MOZ_ASSERT(block != graph_.osrBlock(), "Removing OSR entry block");
    MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared");

    // Disconnect all outgoing CFG edges.
    for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) {
        MBasicBlock* succ = block->getSuccessor(i);
        if (succ->isDead() || succ->isMarked())
            continue;
        if (!removePredecessorAndCleanUp(succ, block))
            return false;
        if (succ->isMarked())
            continue;
        // |succ| is still reachable. Make a note of it so that we can scan
        // it for interesting dominator tree changes later.
        if (!rerun_) {
            if (!remainingBlocks_.append(succ))
                return false;
        }
    }

    // Discard any instructions with no uses. The remaining instructions will be
    // discarded when their last use is discarded.
    MOZ_ASSERT(nextDef_ == nullptr);
    for (MDefinitionIterator iter(block); iter; ) {
        MDefinition* def = *iter++;
        if (def->hasUses())
            continue;
        nextDef_ = *iter;
        if (!discardDefsRecursively(def))
            return false;
    }

    nextDef_ = nullptr;
    MControlInstruction* control = block->lastIns();
    return discardDefsRecursively(control);
}
コード例 #6
0
// Discard |def| and mine its operands for any subsequently dead defs.
bool
ValueNumberer::discardDef(MDefinition* def)
{
#ifdef JS_JITSPEW
    JitSpew(JitSpew_GVN, "      Discarding %s %s%u",
            def->block()->isMarked() ? "unreachable" : "dead",
            def->opName(), def->id());
#endif
#ifdef DEBUG
    MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator");
    if (def->block()->isMarked()) {
        MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses");
    } else {
        MOZ_ASSERT(IsDiscardable(def), "Discarding non-discardable definition");
        MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set");
    }
#endif

    MBasicBlock* block = def->block();
    if (def->isPhi()) {
        MPhi* phi = def->toPhi();
        if (!releaseAndRemovePhiOperands(phi))
             return false;
        block->discardPhi(phi);
    } else {
        MInstruction* ins = def->toInstruction();
        if (MResumePoint* resume = ins->resumePoint()) {
            if (!releaseResumePointOperands(resume))
                return false;
        }
        if (!releaseOperands(ins))
             return false;
        block->discardIgnoreOperands(ins);
    }

    // If that was the last definition in the block, it can be safely removed
    // from the graph.
    if (block->phisEmpty() && block->begin() == block->end()) {
        MOZ_ASSERT(block->isMarked(), "Reachable block lacks at least a control instruction");

        // As a special case, don't remove a block which is a dominator tree
        // root so that we don't invalidate the iterator in visitGraph. We'll
        // check for this and remove it later.
        if (block->immediateDominator() != block) {
            JitSpew(JitSpew_GVN, "      Block block%u is now empty; discarding", block->id());
            graph_.removeBlock(block);
            blocksRemoved_ = true;
        } else {
            JitSpew(JitSpew_GVN, "      Dominator root block%u is now empty; will discard later",
                    block->id());
        }
    }

    return true;
}
コード例 #7
0
static void
AssertReversePostOrder(MIRGraph &graph)
{
    // Check that every block is visited after all its predecessors (except backedges).
    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
        JS_ASSERT(!block->isMarked());

        for (size_t i = 0; i < block->numPredecessors(); i++) {
            MBasicBlock *pred = block->getPredecessor(i);
            JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked());
        }

        block->mark();
    }

    graph.unmarkBlocks();
}
コード例 #8
0
// Visit all the blocks in the graph.
bool
ValueNumberer::visitGraph()
{
    // Due to OSR blocks, the set of blocks dominated by a blocks may not be
    // contiguous in the RPO. Do a separate traversal for each dominator tree
    // root. There's always the main entry, and sometimes there's an OSR entry,
    // and then there are the roots formed where the OSR paths merge with the
    // main entry paths.
    for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ) {
        MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information");
        MBasicBlock* block = *iter;
        if (block->immediateDominator() == block) {
            if (!visitDominatorTree(block))
                return false;

            // Normally unreachable blocks would be removed by now, but if this
            // block is a dominator tree root, it has been special-cased and left
            // in place in order to avoid invalidating our iterator. Now that
            // we've finished the tree, increment the iterator, and then if it's
            // marked for removal, remove it.
            ++iter;
            if (block->isMarked()) {
                JitSpew(JitSpew_GVN, "      Discarding dominator root block%u",
                        block->id());
                MOZ_ASSERT(block->begin() == block->end(),
                           "Unreachable dominator tree root has instructions after tree walk");
                MOZ_ASSERT(block->phisEmpty(),
                           "Unreachable dominator tree root has phis after tree walk");
                graph_.removeBlock(block);
                blocksRemoved_ = true;
            }

            MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(), "Visited blocks too many times");
            if (totalNumVisited_ >= graph_.numBlocks())
                break;
        } else {
            // This block a dominator tree root. Proceed to the next one.
            ++iter;
        }
    }
    totalNumVisited_ = 0;
    return true;
}
コード例 #9
0
ファイル: LICM.cpp プロジェクト: martasect/gecko
// Test whether any instruction in the loop possiblyCalls().
static bool
LoopContainsPossibleCall(MIRGraph &graph, MBasicBlock *header, MBasicBlock *backedge)
{
    for (auto i(graph.rpoBegin(header)); ; ++i) {
        MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
        MBasicBlock *block = *i;
        if (!block->isMarked())
            continue;

        for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd; ++insIter) {
            MInstruction *ins = *insIter;
            if (ins->possiblyCalls()) {
                JitSpew(JitSpew_LICM, "    Possile call found at %s%u", ins->opName(), ins->id());
                return true;
            }
        }

        if (block == backedge)
            break;
    }
    return false;
}
コード例 #10
0
ファイル: LICM.cpp プロジェクト: Nebelhom/mozilla-central
Loop::LoopReturn
Loop::init()
{
    IonSpew(IonSpew_LICM, "Loop identified, headed by block %d", header_->id());
    IonSpew(IonSpew_LICM, "footer is block %d", header_->backedge()->id());

    // The first predecessor of the loop header must dominate the header.
    JS_ASSERT(header_->id() > header_->getPredecessor(0)->id());

    // Loops from backedge to header and marks all visited blocks
    // as part of the loop. At the same time add all hoistable instructions
    // (in RPO order) to the instruction worklist.
    Vector<MBasicBlock *, 1, IonAllocPolicy> inlooplist;
    if (!inlooplist.append(header_->backedge()))
        return LoopReturn_Error;
    header_->backedge()->mark();

    while (!inlooplist.empty()) {
        MBasicBlock *block = inlooplist.back();

        // Hoisting requires more finesse if the loop contains a block that
        // self-dominates: there exists control flow that may enter the loop
        // without passing through the loop preheader.
        //
        // Rather than perform a complicated analysis of the dominance graph,
        // just return a soft error to ignore this loop.
        if (block->immediateDominator() == block) {
            while (!worklist_.empty())
                popFromWorklist();
            return LoopReturn_Skip;
        }

        // Add not yet visited predecessors to the inlooplist.
        if (block != header_) {
            for (size_t i = 0; i < block->numPredecessors(); i++) {
                MBasicBlock *pred = block->getPredecessor(i);
                if (pred->isMarked())
                    continue;

                if (!inlooplist.append(pred))
                    return LoopReturn_Error;
                pred->mark();
            }
        }

        // If any block was added, process them first.
        if (block != inlooplist.back())
            continue;

        // Add all instructions in this block (but the control instruction) to the worklist
        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            MInstruction *ins = *i;

            if (isHoistable(ins)) {
                if (!insertInWorklist(ins))
                    return LoopReturn_Error;
            }
        }

        // All successors of this block are visited.
        inlooplist.popBack();
    }

    return LoopReturn_Success;
}
コード例 #11
0
// OSR fixups serve the purpose of representing the non-OSR entry into a loop
// when the only real entry is an OSR entry into the middle. However, if the
// entry into the middle is subsequently folded away, the loop may actually
// have become unreachable. Mark-and-sweep all blocks to remove all such code.
bool ValueNumberer::cleanupOSRFixups()
{
    // Mark.
    Vector<MBasicBlock*, 0, JitAllocPolicy> worklist(graph_.alloc());
    unsigned numMarked = 2;
    graph_.entryBlock()->mark();
    graph_.osrBlock()->mark();
    if (!worklist.append(graph_.entryBlock()) || !worklist.append(graph_.osrBlock()))
        return false;
    while (!worklist.empty()) {
        MBasicBlock* block = worklist.popCopy();
        for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
            MBasicBlock* succ = block->getSuccessor(i);
            if (!succ->isMarked()) {
                ++numMarked;
                succ->mark();
                if (!worklist.append(succ))
                    return false;
            } else if (succ->isLoopHeader() &&
                       succ->loopPredecessor() == block &&
                       succ->numPredecessors() == 3)
            {
                // Unmark fixup blocks if the loop predecessor is marked after
                // the loop header.
                succ->getPredecessor(1)->unmarkUnchecked();
            }
        }

        // OSR fixup blocks are needed if and only if the loop header is
        // reachable from its backedge (via the OSR block) and not from its
        // original loop predecessor.
        //
        // Thus OSR fixup blocks are removed if the loop header is not
        // reachable, or if the loop header is reachable from both its backedge
        // and its original loop predecessor.
        if (block->isLoopHeader()) {
            MBasicBlock* maybeFixupBlock = nullptr;
            if (block->numPredecessors() == 2) {
                maybeFixupBlock = block->getPredecessor(0);
            } else {
                MOZ_ASSERT(block->numPredecessors() == 3);
                if (!block->loopPredecessor()->isMarked())
                    maybeFixupBlock = block->getPredecessor(1);
            }

            if (maybeFixupBlock &&
                !maybeFixupBlock->isMarked() &&
                maybeFixupBlock->numPredecessors() == 0)
            {
                MOZ_ASSERT(maybeFixupBlock->numSuccessors() == 1,
                           "OSR fixup block should have exactly one successor");
                MOZ_ASSERT(maybeFixupBlock != graph_.entryBlock(),
                           "OSR fixup block shouldn't be the entry block");
                MOZ_ASSERT(maybeFixupBlock != graph_.osrBlock(),
                           "OSR fixup block shouldn't be the OSR entry block");
                maybeFixupBlock->mark();
            }
        }
    }

    // And sweep.
    return RemoveUnmarkedBlocks(mir_, graph_, numMarked);
}
コード例 #12
0
void
RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi)
{
    // Given a bound on the number of backedges taken, compute an upper and
    // lower bound for a phi node that may change by a constant amount each
    // iteration. Unlike for the case when computing the iteration bound
    // itself, the phi does not need to change the same amount every iteration,
    // but is required to change at most N and be either nondecreasing or
    // nonincreasing.

    if (phi->numOperands() != 2)
        return;

    MBasicBlock *preLoop = header->loopPredecessor();
    JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header);

    MBasicBlock *backedge = header->backedge();
    JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header);

    MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor());
    if (initial->block()->isMarked())
        return;

    SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));

    if (modified.term != phi || modified.constant == 0)
        return;

    if (!phi->range())
        phi->setRange(new Range());

    LinearSum initialSum;
    if (!initialSum.add(initial, 1))
        return;

    // The phi may change by N each iteration, and is either nondecreasing or
    // nonincreasing. initial(phi) is either a lower or upper bound for the
    // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
    // at all points within the loop, provided that loopBound >= 0.
    //
    // We are more interested, however, in the bound for phi at points
    // dominated by the loop bound's test; if the test dominates e.g. a bounds
    // check we want to hoist from the loop, using the value of the phi at the
    // head of the loop for this will usually be too imprecise to hoist the
    // check. These points will execute only if the backedge executes at least
    // one more time (as the test passed and the test dominates the backedge),
    // so we know both that loopBound >= 1 and that the phi's value has changed
    // at most loopBound - 1 times. Thus, another upper or lower bound for the
    // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
    // ensure that loopBound >= 0.

    LinearSum limitSum(loopBound->sum);
    if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
        return;

    int32_t negativeConstant;
    if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
        return;

    if (modified.constant > 0) {
        phi->range()->setSymbolicLower(new SymbolicBound(NULL, initialSum));
        phi->range()->setSymbolicUpper(new SymbolicBound(loopBound, limitSum));
    } else {
        phi->range()->setSymbolicUpper(new SymbolicBound(NULL, initialSum));
        phi->range()->setSymbolicLower(new SymbolicBound(loopBound, limitSum));
    }

    IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
    SpewRange(phi);
}
コード例 #13
0
void
RangeAnalysis::analyzeLoop(MBasicBlock *header)
{
    // Try to compute an upper bound on the number of times the loop backedge
    // will be taken. Look for tests that dominate the backedge and which have
    // an edge leaving the loop body.
    MBasicBlock *backedge = header->backedge();

    // Ignore trivial infinite loops.
    if (backedge == header)
        return;

    markBlocksInLoopBody(header, backedge);

    LoopIterationBound *iterationBound = NULL;

    MBasicBlock *block = backedge;
    do {
        BranchDirection direction;
        MTest *branch = block->immediateDominatorBranch(&direction);

        if (block == block->immediateDominator())
            break;

        block = block->immediateDominator();

        if (branch) {
            direction = NegateBranchDirection(direction);
            MBasicBlock *otherBlock = branch->branchSuccessor(direction);
            if (!otherBlock->isMarked()) {
                iterationBound =
                    analyzeLoopIterationCount(header, branch, direction);
                if (iterationBound)
                    break;
            }
        }
    } while (block != header);

    if (!iterationBound) {
        graph_.unmarkBlocks();
        return;
    }

#ifdef DEBUG
    if (IonSpewEnabled(IonSpew_Range)) {
        Sprinter sp(GetIonContext()->cx);
        sp.init();
        iterationBound->sum.print(sp);
        IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s",
                sp.string());
    }
#endif

    // Try to compute symbolic bounds for the phi nodes at the head of this
    // loop, expressed in terms of the iteration bound just computed.

    for (MDefinitionIterator iter(header); iter; iter++) {
        MDefinition *def = *iter;
        if (def->isPhi())
            analyzeLoopPhi(header, iterationBound, def->toPhi());
    }

    // Try to hoist any bounds checks from the loop using symbolic bounds.

    Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks;

    for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
        MBasicBlock *block = *iter;
        if (!block->isMarked())
            continue;

        for (MDefinitionIterator iter(block); iter; iter++) {
            MDefinition *def = *iter;
            if (def->isBoundsCheck() && def->isMovable()) {
                if (tryHoistBoundsCheck(header, def->toBoundsCheck()))
                    hoistedChecks.append(def->toBoundsCheck());
            }
        }
    }

    // Note: replace all uses of the original bounds check with the
    // actual index. This is usually done during bounds check elimination,
    // but in this case it's safe to do it here since the load/store is
    // definitely not loop-invariant, so we will never move it before
    // one of the bounds checks we just added.
    for (size_t i = 0; i < hoistedChecks.length(); i++) {
        MBoundsCheck *ins = hoistedChecks[i];
        ins->replaceAllUsesWith(ins->index());
        ins->block()->discard(ins);
    }

    graph_.unmarkBlocks();
}
コード例 #14
0
bool
RangeAnalysis::tryHoistBoundsCheck(MBasicBlock *header, MBoundsCheck *ins)
{
    // The bounds check's length must be loop invariant.
    if (ins->length()->block()->isMarked())
        return false;

    // The bounds check's index should not be loop invariant (else we would
    // already have hoisted it during LICM).
    SimpleLinearSum index = ExtractLinearSum(ins->index());
    if (!index.term || !index.term->block()->isMarked())
        return false;

    // Check for a symbolic lower and upper bound on the index. If either
    // condition depends on an iteration bound for the loop, only hoist if
    // the bounds check is dominated by the iteration bound's test.
    if (!index.term->range())
        return false;
    const SymbolicBound *lower = index.term->range()->symbolicLower();
    if (!lower || !SymbolicBoundIsValid(header, ins, lower))
        return false;
    const SymbolicBound *upper = index.term->range()->symbolicUpper();
    if (!upper || !SymbolicBoundIsValid(header, ins, upper))
        return false;

    MBasicBlock *preLoop = header->loopPredecessor();
    JS_ASSERT(!preLoop->isMarked());

    MDefinition *lowerTerm = ConvertLinearSum(preLoop, lower->sum);
    if (!lowerTerm)
        return false;

    MDefinition *upperTerm = ConvertLinearSum(preLoop, upper->sum);
    if (!upperTerm)
        return false;

    // We are checking that index + indexConstant >= 0, and know that
    // index >= lowerTerm + lowerConstant. Thus, check that:
    //
    // lowerTerm + lowerConstant + indexConstant >= 0
    // lowerTerm >= -lowerConstant - indexConstant

    int32_t lowerConstant = 0;
    if (!SafeSub(lowerConstant, index.constant, &lowerConstant))
        return false;
    if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant))
        return false;
    MBoundsCheckLower *lowerCheck = MBoundsCheckLower::New(lowerTerm);
    lowerCheck->setMinimum(lowerConstant);

    // We are checking that index < boundsLength, and know that
    // index <= upperTerm + upperConstant. Thus, check that:
    //
    // upperTerm + upperConstant < boundsLength

    int32_t upperConstant = index.constant;
    if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant))
        return false;
    MBoundsCheck *upperCheck = MBoundsCheck::New(upperTerm, ins->length());
    upperCheck->setMinimum(upperConstant);
    upperCheck->setMaximum(upperConstant);

    // Hoist the loop invariant upper and lower bounds checks.
    preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
    preLoop->insertBefore(preLoop->lastIns(), upperCheck);

    return true;
}
コード例 #15
0
bool
UnreachableCodeElimination::prunePointlessBranchesAndMarkReachableBlocks()
{
    BlockList worklist, optimizableBlocks;

    // Process everything reachable from the start block, ignoring any
    // OSR block.
    if (!enqueue(graph_.entryBlock(), worklist))
        return false;
    while (!worklist.empty()) {
        if (mir_->shouldCancel("Eliminate Unreachable Code"))
            return false;

        MBasicBlock *block = worklist.popCopy();

        // If this block is a test on a constant operand, only enqueue
        // the relevant successor. Also, remember the block for later.
        if (MBasicBlock *succ = optimizableSuccessor(block)) {
            if (!optimizableBlocks.append(block))
                return false;
            if (!enqueue(succ, worklist))
                return false;
        } else {
            // Otherwise just visit all successors.
            for (size_t i = 0; i < block->numSuccessors(); i++) {
                MBasicBlock *succ = block->getSuccessor(i);
                if (!enqueue(succ, worklist))
                    return false;
            }
        }
    }

    // Now, if there is an OSR block, check that all of its successors
    // were reachable (bug 880377). If not, we are in danger of
    // creating a CFG with two disjoint parts, so simply mark all
    // blocks as reachable. This generally occurs when the TI info for
    // stack types is incorrect or incomplete, due to operations that
    // have not yet executed in baseline.
    if (graph_.osrBlock()) {
        MBasicBlock *osrBlock = graph_.osrBlock();
        JS_ASSERT(!osrBlock->isMarked());
        if (!enqueue(osrBlock, worklist))
            return false;
        for (size_t i = 0; i < osrBlock->numSuccessors(); i++) {
            if (!osrBlock->getSuccessor(i)->isMarked()) {
                // OSR block has an otherwise unreachable successor, abort.
                for (MBasicBlockIterator iter(graph_.begin()); iter != graph_.end(); iter++)
                    iter->mark();
                marked_ = graph_.numBlocks();
                return true;
            }
        }
    }

    // Now that we know we will not abort due to OSR, go back and
    // transform any tests on constant operands into gotos.
    for (uint32_t i = 0; i < optimizableBlocks.length(); i++) {
        MBasicBlock *block = optimizableBlocks[i];
        MBasicBlock *succ = optimizableSuccessor(block);
        JS_ASSERT(succ);

        MGoto *gotoIns = MGoto::New(graph_.alloc(), succ);
        block->discardLastIns();
        block->end(gotoIns);
        MBasicBlock *successorWithPhis = block->successorWithPhis();
        if (successorWithPhis && successorWithPhis != succ)
            block->setSuccessorWithPhis(nullptr, 0);
    }

    return true;
}
コード例 #16
0
bool
UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators()
{
    // Removes blocks that are not marked from the graph.  For blocks
    // that *are* marked, clears the mark and adjusts the id to its
    // new value.  Also adds blocks that are immediately reachable
    // from an unmarked block to the frontier.

    size_t id = marked_;
    for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) {
        if (mir_->shouldCancel("Eliminate Unreachable Code"))
            return false;

        MBasicBlock *block = *iter;
        iter++;

        // Unconditionally clear the dominators.  It's somewhat complex to
        // adjust the values and relatively fast to just recompute.
        block->clearDominatorInfo();

        if (block->isMarked()) {
            block->setId(--id);
            for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
                checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
            for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
                checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
        } else {
            if (block->numPredecessors() > 1) {
                // If this block had phis, then any reachable
                // predecessors need to have the successorWithPhis
                // flag cleared.
                for (size_t i = 0; i < block->numPredecessors(); i++)
                    block->getPredecessor(i)->setSuccessorWithPhis(nullptr, 0);
            }

            if (block->isLoopBackedge()) {
                // NB. We have to update the loop header if we
                // eliminate the backedge. At first I thought this
                // check would be insufficient, because it would be
                // possible to have code like this:
                //
                //    while (true) {
                //       ...;
                //       if (1 == 1) break;
                //    }
                //
                // in which the backedge is removed as part of
                // rewriting the condition, but no actual blocks are
                // removed.  However, in all such cases, the backedge
                // would be a critical edge and hence the critical
                // edge block is being removed.
                block->loopHeaderOfBackedge()->clearLoopHeader();
            }

            for (size_t i = 0, c = block->numSuccessors(); i < c; i++) {
                MBasicBlock *succ = block->getSuccessor(i);
                if (succ->isMarked()) {
                    // succ is on the frontier of blocks to be removed:
                    succ->removePredecessor(block);

                    if (!redundantPhis_) {
                        for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) {
                            if (iter->operandIfRedundant()) {
                                redundantPhis_ = true;
                                break;
                            }
                        }
                    }
                }
            }

            // When we remove a call, we can't leave the corresponding MPassArg
            // in the graph. Since lowering will fail. Replace it with the
            // argument for the exceptional case when it is kept alive in a
            // ResumePoint. DCE will remove the unused MPassArg instruction.
            for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
                if (iter->isCall()) {
                    MCall *call = iter->toCall();
                    for (size_t i = 0; i < call->numStackArgs(); i++) {
                        JS_ASSERT(call->getArg(i)->isPassArg());
                        JS_ASSERT(call->getArg(i)->hasOneDefUse());
                        MPassArg *arg = call->getArg(i)->toPassArg();
                        arg->replaceAllUsesWith(arg->getArgument());
                    }
                }
            }

            graph_.removeBlock(block);
        }
    }

    JS_ASSERT(id == 0);

    return true;
}