static bool TryEliminateTypeBarrier(MTypeBarrier *barrier, bool *eliminated) { JS_ASSERT(!*eliminated); const types::StackTypeSet *barrierTypes = barrier->typeSet(); const types::StackTypeSet *inputTypes = barrier->input()->typeSet(); if (!barrierTypes || !inputTypes) return true; bool filtersNull = barrierTypes->filtersType(inputTypes, types::Type::NullType()); bool filtersUndefined = barrierTypes->filtersType(inputTypes, types::Type::UndefinedType()); if (!filtersNull && !filtersUndefined) return true; MBasicBlock *block = barrier->block(); while (true) { BranchDirection direction; MTest *test = block->immediateDominatorBranch(&direction); if (test) { TryEliminateTypeBarrierFromTest(barrier, filtersNull, filtersUndefined, test, direction, eliminated); } MBasicBlock *previous = block->immediateDominator(); if (previous == block) break; block = previous; } return true; }
static void ComputeImmediateDominators(MIRGraph &graph) { // The default start block is a root and therefore only self-dominates. MBasicBlock *startBlock = *graph.begin(); startBlock->setImmediateDominator(startBlock); // Any OSR block is a root and therefore only self-dominates. MBasicBlock *osrBlock = graph.osrBlock(); if (osrBlock) osrBlock->setImmediateDominator(osrBlock); bool changed = true; while (changed) { changed = false; ReversePostorderIterator block = graph.rpoBegin(); // For each block in RPO, intersect all dominators. for (; block != graph.rpoEnd(); block++) { // If a node has once been found to have no exclusive dominator, // it will never have an exclusive dominator, so it may be skipped. if (block->immediateDominator() == *block) continue; MBasicBlock *newIdom = block->getPredecessor(0); // Find the first common dominator. for (size_t i = 1; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); if (pred->immediateDominator() != NULL) newIdom = IntersectDominators(pred, newIdom); // If there is no common dominator, the block self-dominates. if (newIdom == NULL) { block->setImmediateDominator(*block); changed = true; break; } } if (newIdom && block->immediateDominator() != newIdom) { block->setImmediateDominator(newIdom); changed = true; } } } #ifdef DEBUG // Assert that all blocks have dominator information. for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { JS_ASSERT(block->immediateDominator() != NULL); } #endif }
// Discard |def| and mine its operands for any subsequently dead defs. bool ValueNumberer::discardDef(MDefinition* def) { #ifdef JS_JITSPEW JitSpew(JitSpew_GVN, " Discarding %s %s%u", def->block()->isMarked() ? "unreachable" : "dead", def->opName(), def->id()); #endif #ifdef DEBUG MOZ_ASSERT(def != nextDef_, "Invalidating the MDefinition iterator"); if (def->block()->isMarked()) { MOZ_ASSERT(!def->hasUses(), "Discarding def that still has uses"); } else { MOZ_ASSERT(IsDiscardable(def), "Discarding non-discardable definition"); MOZ_ASSERT(!values_.has(def), "Discarding a definition still in the set"); } #endif MBasicBlock* block = def->block(); if (def->isPhi()) { MPhi* phi = def->toPhi(); if (!releaseAndRemovePhiOperands(phi)) return false; block->discardPhi(phi); } else { MInstruction* ins = def->toInstruction(); if (MResumePoint* resume = ins->resumePoint()) { if (!releaseResumePointOperands(resume)) return false; } if (!releaseOperands(ins)) return false; block->discardIgnoreOperands(ins); } // If that was the last definition in the block, it can be safely removed // from the graph. if (block->phisEmpty() && block->begin() == block->end()) { MOZ_ASSERT(block->isMarked(), "Reachable block lacks at least a control instruction"); // As a special case, don't remove a block which is a dominator tree // root so that we don't invalidate the iterator in visitGraph. We'll // check for this and remove it later. if (block->immediateDominator() != block) { JitSpew(JitSpew_GVN, " Block block%u is now empty; discarding", block->id()); graph_.removeBlock(block); blocksRemoved_ = true; } else { JitSpew(JitSpew_GVN, " Dominator root block%u is now empty; will discard later", block->id()); } } return true; }
// Walk up the dominator tree from |now| to |old| and test for any defs which // look potentially interesting to GVN. static bool ScanDominatorsForDefs(MBasicBlock* now, MBasicBlock* old) { MOZ_ASSERT(old->dominates(now), "Refined dominator not dominated by old dominator"); for (MBasicBlock* i = now; i != old; i = i->immediateDominator()) { if (BlockHasInterestingDefs(i)) return true; } return false; }
// Whether bound is valid at the specified bounds check instruction in a loop, // and may be used to hoist ins. static inline bool SymbolicBoundIsValid(MBasicBlock *header, MBoundsCheck *ins, const SymbolicBound *bound) { if (!bound->loop) return true; if (ins->block() == header) return false; MBasicBlock *bb = ins->block()->immediateDominator(); while (bb != header && bb != bound->loop->test->block()) bb = bb->immediateDominator(); return bb == bound->loop->test->block(); }
// Walk up the dominator tree from |block| to the root and test for any defs // which look potentially interesting to GVN. static bool ScanDominatorsForDefs(MBasicBlock* block) { for (MBasicBlock* i = block;;) { if (BlockHasInterestingDefs(block)) return true; MBasicBlock* immediateDominator = i->immediateDominator(); if (immediateDominator == i) break; i = immediateDominator; } return false; }
// Visit all the blocks in the graph. bool ValueNumberer::visitGraph() { // Due to OSR blocks, the set of blocks dominated by a blocks may not be // contiguous in the RPO. Do a separate traversal for each dominator tree // root. There's always the main entry, and sometimes there's an OSR entry, // and then there are the roots formed where the OSR paths merge with the // main entry paths. for (ReversePostorderIterator iter(graph_.rpoBegin()); ; ) { MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information"); MBasicBlock* block = *iter; if (block->immediateDominator() == block) { if (!visitDominatorTree(block)) return false; // Normally unreachable blocks would be removed by now, but if this // block is a dominator tree root, it has been special-cased and left // in place in order to avoid invalidating our iterator. Now that // we've finished the tree, increment the iterator, and then if it's // marked for removal, remove it. ++iter; if (block->isMarked()) { JitSpew(JitSpew_GVN, " Discarding dominator root block%u", block->id()); MOZ_ASSERT(block->begin() == block->end(), "Unreachable dominator tree root has instructions after tree walk"); MOZ_ASSERT(block->phisEmpty(), "Unreachable dominator tree root has phis after tree walk"); graph_.removeBlock(block); blocksRemoved_ = true; } MOZ_ASSERT(totalNumVisited_ <= graph_.numBlocks(), "Visited blocks too many times"); if (totalNumVisited_ >= graph_.numBlocks()) break; } else { // This block a dominator tree root. Proceed to the next one. ++iter; } } totalNumVisited_ = 0; return true; }
// Given a block which has had predecessors removed but is still reachable, test // whether the block's new dominator will be closer than its old one and whether // it will expose potential optimization opportunities. static MBasicBlock* ComputeNewDominator(MBasicBlock* block, MBasicBlock* old) { MBasicBlock* now = block->getPredecessor(0); for (size_t i = 1, e = block->numPredecessors(); i < e; ++i) { MBasicBlock* pred = block->getPredecessor(i); // Note that dominators haven't been recomputed yet, so we have to check // whether now dominates pred, not block. while (!now->dominates(pred)) { MBasicBlock* next = now->immediateDominator(); if (next == old) return old; if (next == now) { MOZ_ASSERT(block == old, "Non-self-dominating block became self-dominating"); return block; } now = next; } } MOZ_ASSERT(old != block || old != now, "Missed self-dominating block staying self-dominating"); return now; }
bool ValueNumberer::insertOSRFixups() { ReversePostorderIterator end(graph_.end()); for (ReversePostorderIterator iter(graph_.begin()); iter != end; ) { MBasicBlock* block = *iter++; // Only add fixup block above for loops which can be reached from OSR. if (!block->isLoopHeader()) continue; // If the loop header is not self-dominated, then this loop does not // have to deal with a second entry point, so there is no need to add a // second entry point with a fixup block. if (block->immediateDominator() != block) continue; if (!fixupOSROnlyLoop(block, block->backedge())) return false; } return true; }
Loop::LoopReturn Loop::init() { IonSpew(IonSpew_LICM, "Loop identified, headed by block %d", header_->id()); IonSpew(IonSpew_LICM, "footer is block %d", header_->backedge()->id()); // The first predecessor of the loop header must dominate the header. JS_ASSERT(header_->id() > header_->getPredecessor(0)->id()); // Loops from backedge to header and marks all visited blocks // as part of the loop. At the same time add all hoistable instructions // (in RPO order) to the instruction worklist. Vector<MBasicBlock *, 1, IonAllocPolicy> inlooplist; if (!inlooplist.append(header_->backedge())) return LoopReturn_Error; header_->backedge()->mark(); while (!inlooplist.empty()) { MBasicBlock *block = inlooplist.back(); // Hoisting requires more finesse if the loop contains a block that // self-dominates: there exists control flow that may enter the loop // without passing through the loop preheader. // // Rather than perform a complicated analysis of the dominance graph, // just return a soft error to ignore this loop. if (block->immediateDominator() == block) { while (!worklist_.empty()) popFromWorklist(); return LoopReturn_Skip; } // Add not yet visited predecessors to the inlooplist. if (block != header_) { for (size_t i = 0; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); if (pred->isMarked()) continue; if (!inlooplist.append(pred)) return LoopReturn_Error; pred->mark(); } } // If any block was added, process them first. if (block != inlooplist.back()) continue; // Add all instructions in this block (but the control instruction) to the worklist for (MInstructionIterator i = block->begin(); i != block->end(); i++) { MInstruction *ins = *i; if (isHoistable(ins)) { if (!insertInWorklist(ins)) return LoopReturn_Error; } } // All successors of this block are visited. inlooplist.popBack(); } return LoopReturn_Success; }
bool Sink(MIRGenerator* mir, MIRGraph& graph) { TempAllocator& alloc = graph.alloc(); bool sinkEnabled = mir->optimizationInfo().sinkEnabled(); for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { if (mir->shouldCancel("Sink")) return false; for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) { MInstruction* ins = *iter++; // Only instructions which can be recovered on bailout can be moved // into the bailout paths. if (ins->isGuard() || ins->isGuardRangeBailouts() || ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout()) { continue; } // Compute a common dominator for all uses of the current // instruction. bool hasLiveUses = false; bool hasUses = false; MBasicBlock* usesDominator = nullptr; for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) { hasUses = true; MNode* consumerNode = (*i)->consumer(); if (consumerNode->isResumePoint()) continue; MDefinition* consumer = consumerNode->toDefinition(); if (consumer->isRecoveredOnBailout()) continue; hasLiveUses = true; // If the instruction is a Phi, then we should dominate the // predecessor from which the value is coming from. MBasicBlock* consumerBlock = consumer->block(); if (consumer->isPhi()) consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i)); usesDominator = CommonDominator(usesDominator, consumerBlock); if (usesDominator == *block) break; } // Leave this instruction for DCE. if (!hasUses) continue; // We have no uses, so sink this instruction in all the bailout // paths. if (!hasLiveUses) { MOZ_ASSERT(!usesDominator); ins->setRecoveredOnBailout(); JitSpewDef(JitSpew_Sink, " No live uses, recover the instruction on bailout\n", ins); continue; } // This guard is temporarly moved here as the above code deals with // Dead Code elimination, which got moved into this Sink phase, as // the Dead Code elimination used to move instructions with no-live // uses to the bailout path. if (!sinkEnabled) continue; // To move an effectful instruction, we would have to verify that the // side-effect is not observed. In the mean time, we just inhibit // this optimization on effectful instructions. if (ins->isEffectful()) continue; // If all the uses are under a loop, we might not want to work // against LICM by moving everything back into the loop, but if the // loop is it-self inside an if, then we still want to move the // computation under this if statement. while (block->loopDepth() < usesDominator->loopDepth()) { MOZ_ASSERT(usesDominator != usesDominator->immediateDominator()); usesDominator = usesDominator->immediateDominator(); } // Only move instructions if there is a branch between the dominator // of the uses and the original instruction. This prevent moving the // computation of the arguments into an inline function if there is // no major win. MBasicBlock* lastJoin = usesDominator; while (*block != lastJoin && lastJoin->numPredecessors() == 1) { MOZ_ASSERT(lastJoin != lastJoin->immediateDominator()); MBasicBlock* next = lastJoin->immediateDominator(); if (next->numSuccessors() > 1) break; lastJoin = next; } if (*block == lastJoin) continue; // Skip to the next instruction if we cannot find a common dominator // for all the uses of this instruction, or if the common dominator // correspond to the block of the current instruction. if (!usesDominator || usesDominator == *block) continue; // Only instruction which can be recovered on bailout and which are // sinkable can be moved into blocks which are below while filling // the resume points with a clone which is recovered on bailout. // If the instruction has live uses and if it is clonable, then we // can clone the instruction for all non-dominated uses and move the // instruction into the block which is dominating all live uses. if (!ins->canClone()) continue; // If the block is a split-edge block, which is created for folding // test conditions, then the block has no resume point and has // multiple predecessors. In such case, we cannot safely move // bailing instruction to these blocks as we have no way to bailout. if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1) continue; JitSpewDef(JitSpew_Sink, " Can Clone & Recover, sink instruction\n", ins); JitSpew(JitSpew_Sink, " into Block %u", usesDominator->id()); // Copy the arguments and clone the instruction. MDefinitionVector operands(alloc); for (size_t i = 0, end = ins->numOperands(); i < end; i++) { if (!operands.append(ins->getOperand(i))) return false; } MInstruction* clone = ins->clone(alloc, operands); ins->block()->insertBefore(ins, clone); clone->setRecoveredOnBailout(); // We should not update the producer of the entry resume point, as // it cannot refer to any instruction within the basic block excepts // for Phi nodes. MResumePoint* entry = usesDominator->entryResumePoint(); // Replace the instruction by its clone in all the resume points / // recovered-on-bailout instructions which are not in blocks which // are dominated by the usesDominator block. for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) { MUse* use = *i++; MNode* consumer = use->consumer(); // If the consumer is a Phi, then we look for the index of the // use to find the corresponding predecessor block, which is // then used as the consumer block. MBasicBlock* consumerBlock = consumer->block(); if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) { consumerBlock = consumerBlock->getPredecessor( consumer->toDefinition()->toPhi()->indexOf(use)); } // Keep the current instruction for all dominated uses, except // for the entry resume point of the block in which the // instruction would be moved into. if (usesDominator->dominates(consumerBlock) && (!consumer->isResumePoint() || consumer->toResumePoint() != entry)) { continue; } use->replaceProducer(clone); } // As we move this instruction in a different block, we should // verify that we do not carry over a resume point which would refer // to an outdated state of the control flow. if (ins->resumePoint()) ins->clearResumePoint(); // Now, that all uses which are not dominated by usesDominator are // using the cloned instruction, we can safely move the instruction // into the usesDominator block. MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover); block->moveBefore(at, ins); } } return true; }
// A bounds check is considered redundant if it's dominated by another bounds // check with the same length and the indexes differ by only a constant amount. // In this case we eliminate the redundant bounds check and update the other one // to cover the ranges of both checks. // // Bounds checks are added to a hash map and since the hash function ignores // differences in constant offset, this offers a fast way to find redundant // checks. bool ion::EliminateRedundantBoundsChecks(MIRGraph &graph) { BoundsCheckMap checks; if (!checks.init()) return false; // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } for (MDefinitionIterator iter(block); iter; ) { if (!iter->isBoundsCheck()) { iter++; continue; } MBoundsCheck *check = iter->toBoundsCheck(); // Replace all uses of the bounds check with the actual index. // This is (a) necessary, because we can coalesce two different // bounds checks and would otherwise use the wrong index and // (b) helps register allocation. Note that this is safe since // no other pass after bounds check elimination moves instructions. check->replaceAllUsesWith(check->index()); if (!check->isMovable()) { iter++; continue; } MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, check, index); if (!dominating) return false; if (dominating == check) { // We didn't find a dominating bounds check. iter++; continue; } bool eliminated = false; if (!TryEliminateBoundsCheck(dominating, check, &eliminated)) return false; if (eliminated) iter = check->block()->discardDefAt(iter); else iter++; } index++; } JS_ASSERT(index == graph.numBlocks()); return true; }
bool ion::BuildDominatorTree(MIRGraph &graph) { ComputeImmediateDominators(graph); // Traversing through the graph in post-order means that every use // of a definition is visited before the def itself. Since a def // dominates its uses, by the time we reach a particular // block, we have processed all of its dominated children, so // block->numDominated() is accurate. for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) { MBasicBlock *child = *i; MBasicBlock *parent = child->immediateDominator(); // If the block only self-dominates, it has no definite parent. if (child == parent) continue; if (!parent->addImmediatelyDominatedBlock(child)) return false; // An additional +1 for the child block. parent->addNumDominated(child->numDominated() + 1); } #ifdef DEBUG // If compiling with OSR, many blocks will self-dominate. // Without OSR, there is only one root block which dominates all. if (!graph.osrBlock()) JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1); #endif // Now, iterate through the dominator tree and annotate every // block with its index in the pre-order traversal of the // dominator tree. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); block->setDomIndex(index); for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } index++; } return true; }
// Eliminate checks which are redundant given each other or other instructions. // // A type barrier is considered redundant if all missing types have been tested // for by earlier control instructions. // // A bounds check is considered redundant if it's dominated by another bounds // check with the same length and the indexes differ by only a constant amount. // In this case we eliminate the redundant bounds check and update the other one // to cover the ranges of both checks. // // Bounds checks are added to a hash map and since the hash function ignores // differences in constant offset, this offers a fast way to find redundant // checks. bool ion::EliminateRedundantChecks(MIRGraph &graph) { BoundsCheckMap checks; if (!checks.init()) return false; // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } for (MDefinitionIterator iter(block); iter; ) { bool eliminated = false; if (iter->isBoundsCheck()) { if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated)) return false; } else if (iter->isTypeBarrier()) { if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated)) return false; } else if (iter->isConvertElementsToDoubles()) { // Now that code motion passes have finished, replace any // ConvertElementsToDoubles with the actual elements. MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles(); ins->replaceAllUsesWith(ins->elements()); } if (eliminated) iter = block->discardDefAt(iter); else iter++; } index++; } JS_ASSERT(index == graph.numBlocks()); return true; }
LoopIterationBound * RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header, MTest *test, BranchDirection direction) { SimpleLinearSum lhs(NULL, 0); MDefinition *rhs; bool lessEqual; if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) return NULL; // Ensure the rhs is a loop invariant term. if (rhs && rhs->block()->isMarked()) { if (lhs.term && lhs.term->block()->isMarked()) return NULL; MDefinition *temp = lhs.term; lhs.term = rhs; rhs = temp; if (!SafeSub(0, lhs.constant, &lhs.constant)) return NULL; lessEqual = !lessEqual; } JS_ASSERT_IF(rhs, !rhs->block()->isMarked()); // Ensure the lhs is a phi node from the start of the loop body. if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) return NULL; // Check that the value of the lhs changes by a constant amount with each // loop iteration. This requires that the lhs be written in every loop // iteration with a value that is a constant difference from its value at // the start of the iteration. if (lhs.term->toPhi()->numOperands() != 2) return NULL; // The first operand of the phi should be the lhs' value at the start of // the first executed iteration, and not a value written which could // replace the second operand below during the middle of execution. MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0); if (lhsInitial->block()->isMarked()) return NULL; // The second operand of the phi should be a value written by an add/sub // in every loop iteration, i.e. in a block which dominates the backedge. MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1); if (lhsWrite->isBeta()) lhsWrite = lhsWrite->getOperand(0); if (!lhsWrite->isAdd() && !lhsWrite->isSub()) return NULL; if (!lhsWrite->block()->isMarked()) return NULL; MBasicBlock *bb = header->backedge(); for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {} if (bb != lhsWrite->block()) return NULL; SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite); // Check that the value of the lhs at the backedge is of the form // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start // of the iteration, and not that written to lhs in a previous iteration, // as such a previous value could not appear directly in the addition: // it could not be stored in lhs as the lhs add/sub executes in every // iteration, and if it were stored in another variable its use here would // be as an operand to a phi node for that variable. if (lhsModified.term != lhs.term) return NULL; LinearSum bound; if (lhsModified.constant == 1 && !lessEqual) { // The value of lhs is 'initial(lhs) + iterCount' and this will end // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound // on the number of backedges executed is: // // initial(lhs) + iterCount + lhsN == rhs // iterCount == rhsN - initial(lhs) - lhsN if (rhs) { if (!bound.add(rhs, 1)) return NULL; } if (!bound.add(lhsInitial, -1)) return NULL; int32_t lhsConstant; if (!SafeSub(0, lhs.constant, &lhsConstant)) return NULL; if (!bound.add(lhsConstant)) return NULL; } else if (lhsModified.constant == -1 && lessEqual) { // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above // case, an upper bound on the number of backedges executed is: // // initial(lhs) - iterCount + lhsN == rhs // iterCount == initial(lhs) - rhs + lhsN if (!bound.add(lhsInitial, 1)) return NULL; if (rhs) { if (!bound.add(rhs, -1)) return NULL; } if (!bound.add(lhs.constant)) return NULL; } else { return NULL; } return new LoopIterationBound(header, test, bound); }
void RangeAnalysis::analyzeLoop(MBasicBlock *header) { // Try to compute an upper bound on the number of times the loop backedge // will be taken. Look for tests that dominate the backedge and which have // an edge leaving the loop body. MBasicBlock *backedge = header->backedge(); // Ignore trivial infinite loops. if (backedge == header) return; markBlocksInLoopBody(header, backedge); LoopIterationBound *iterationBound = NULL; MBasicBlock *block = backedge; do { BranchDirection direction; MTest *branch = block->immediateDominatorBranch(&direction); if (block == block->immediateDominator()) break; block = block->immediateDominator(); if (branch) { direction = NegateBranchDirection(direction); MBasicBlock *otherBlock = branch->branchSuccessor(direction); if (!otherBlock->isMarked()) { iterationBound = analyzeLoopIterationCount(header, branch, direction); if (iterationBound) break; } } } while (block != header); if (!iterationBound) { graph_.unmarkBlocks(); return; } #ifdef DEBUG if (IonSpewEnabled(IonSpew_Range)) { Sprinter sp(GetIonContext()->cx); sp.init(); iterationBound->sum.print(sp); IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s", sp.string()); } #endif // Try to compute symbolic bounds for the phi nodes at the head of this // loop, expressed in terms of the iteration bound just computed. for (MDefinitionIterator iter(header); iter; iter++) { MDefinition *def = *iter; if (def->isPhi()) analyzeLoopPhi(header, iterationBound, def->toPhi()); } // Try to hoist any bounds checks from the loop using symbolic bounds. Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks; for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { MBasicBlock *block = *iter; if (!block->isMarked()) continue; for (MDefinitionIterator iter(block); iter; iter++) { MDefinition *def = *iter; if (def->isBoundsCheck() && def->isMovable()) { if (tryHoistBoundsCheck(header, def->toBoundsCheck())) hoistedChecks.append(def->toBoundsCheck()); } } } // Note: replace all uses of the original bounds check with the // actual index. This is usually done during bounds check elimination, // but in this case it's safe to do it here since the load/store is // definitely not loop-invariant, so we will never move it before // one of the bounds checks we just added. for (size_t i = 0; i < hoistedChecks.length(); i++) { MBoundsCheck *ins = hoistedChecks[i]; ins->replaceAllUsesWith(ins->index()); ins->block()->discard(ins); } graph_.unmarkBlocks(); }
bool Loop::hoistInstructions(InstructionQueue &toHoist, InstructionQueue &boundsChecks) { // Hoist bounds checks first, so that hoistBoundsCheck can test for // invariant instructions, but delay actual insertion until the end to // handle dependencies on loop invariant instructions. InstructionQueue hoistedChecks; for (size_t i = 0; i < boundsChecks.length(); i++) { MBoundsCheck *ins = boundsChecks[i]->toBoundsCheck(); if (isLoopInvariant(ins) || !isInLoop(ins)) continue; // Try to find a test dominating the bounds check which can be // transformed into a hoistable check. Stop after the first such check // which could be transformed (the one which will be the closest to the // access in the source). MBasicBlock *block = ins->block(); while (true) { BranchDirection direction; MTest *branch = block->immediateDominatorBranch(&direction); if (branch) { MInstruction *upper, *lower; tryHoistBoundsCheck(ins, branch, direction, &upper, &lower); if (upper && !hoistedChecks.append(upper)) return false; if (lower && !hoistedChecks.append(lower)) return false; if (upper || lower) { ins->block()->discard(ins); break; } } MBasicBlock *dom = block->immediateDominator(); if (dom == block) break; block = dom; } } // Move all instructions to the preLoop_ block just before the control instruction. for (size_t i = 0; i < toHoist.length(); i++) { MInstruction *ins = toHoist[i]; // Loads may have an implicit dependency on either stores (effectful instructions) or // control instructions so we should never move these. JS_ASSERT(!ins->isControlInstruction()); JS_ASSERT(!ins->isEffectful()); JS_ASSERT(ins->isMovable()); if (checkHotness(ins->block())) { ins->block()->moveBefore(preLoop_->lastIns(), ins); ins->setNotLoopInvariant(); } } for (size_t i = 0; i < hoistedChecks.length(); i++) { MInstruction *ins = hoistedChecks[i]; preLoop_->insertBefore(preLoop_->lastIns(), ins); } return true; }
bool ValueNumberer::eliminateRedundancies() { // A definition is 'redundant' iff it is dominated by another definition // with the same value number. // // So, we traverse the dominator tree in pre-order, maintaining a hashmap // from value numbers to instructions. // // For each definition d with value number v, we look up v in the hashmap. // // If there is a definition d' in the hashmap, and the current traversal // index is within that instruction's dominated range, then we eliminate d, // replacing all uses of d with uses of d'. // // If there is no valid definition in the hashtable (the current definition // is not in dominated scope), then we insert the current instruction, // since it is the most dominant instruction with the given value number. InstructionMap defs; if (!defs.init()) return false; IonSpew(IonSpew_GVN, "Eliminating redundant instructions"); // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); IonSpew(IonSpew_GVN, "Looking at block %d", block->id()); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } // For each instruction, attempt to look up a dominating definition. for (MDefinitionIterator iter(block); iter; ) { MDefinition *ins = simplify(*iter, true); // Instruction was replaced, and all uses have already been fixed. if (ins != *iter) { iter = block->discardDefAt(iter); continue; } // Instruction has side-effects and cannot be folded. if (!ins->isMovable() || ins->isEffectful()) { iter++; continue; } MDefinition *dom = findDominatingDef(defs, ins, index); if (!dom) return false; // Insertion failed. if (dom == ins || !dom->updateForReplacement(ins)) { iter++; continue; } IonSpew(IonSpew_GVN, "instruction %d is dominated by instruction %d (from block %d)", ins->id(), dom->id(), dom->block()->id()); ins->replaceAllUsesWith(dom); JS_ASSERT(!ins->hasUses()); JS_ASSERT(ins->block() == block); JS_ASSERT(!ins->isEffectful()); JS_ASSERT(ins->isMovable()); iter = ins->block()->discardDefAt(iter); } index++; } JS_ASSERT(index == graph_.numBlocks()); return true; }