void ion::AssertGraphCoherency(MIRGraph &graph) { #ifdef DEBUG // Assert successor and predecessor list coherency. uint32_t count = 0; for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { count++; for (size_t i = 0; i < block->numSuccessors(); i++) JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i))); for (size_t i = 0; i < block->numPredecessors(); i++) JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i))); for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) { for (uint32_t i = 0; i < ins->numOperands(); i++) JS_ASSERT(CheckMarkedAsUse(*ins, ins->getOperand(i))); } } JS_ASSERT(graph.numBlocks() == count); AssertReversePostOrder(graph); #endif }
// A bounds check is considered redundant if it's dominated by another bounds // check with the same length and the indexes differ by only a constant amount. // In this case we eliminate the redundant bounds check and update the other one // to cover the ranges of both checks. // // Bounds checks are added to a hash map and since the hash function ignores // differences in constant offset, this offers a fast way to find redundant // checks. bool ion::EliminateRedundantBoundsChecks(MIRGraph &graph) { BoundsCheckMap checks; if (!checks.init()) return false; // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } for (MDefinitionIterator iter(block); iter; ) { if (!iter->isBoundsCheck()) { iter++; continue; } MBoundsCheck *check = iter->toBoundsCheck(); // Replace all uses of the bounds check with the actual index. // This is (a) necessary, because we can coalesce two different // bounds checks and would otherwise use the wrong index and // (b) helps register allocation. Note that this is safe since // no other pass after bounds check elimination moves instructions. check->replaceAllUsesWith(check->index()); if (!check->isMovable()) { iter++; continue; } MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, check, index); if (!dominating) return false; if (dominating == check) { // We didn't find a dominating bounds check. iter++; continue; } bool eliminated = false; if (!TryEliminateBoundsCheck(dominating, check, &eliminated)) return false; if (eliminated) iter = check->block()->discardDefAt(iter); else iter++; } index++; } JS_ASSERT(index == graph.numBlocks()); return true; }
bool ion::BuildDominatorTree(MIRGraph &graph) { ComputeImmediateDominators(graph); // Traversing through the graph in post-order means that every use // of a definition is visited before the def itself. Since a def // dominates its uses, by the time we reach a particular // block, we have processed all of its dominated children, so // block->numDominated() is accurate. for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) { MBasicBlock *child = *i; MBasicBlock *parent = child->immediateDominator(); // If the block only self-dominates, it has no definite parent. if (child == parent) continue; if (!parent->addImmediatelyDominatedBlock(child)) return false; // An additional +1 for the child block. parent->addNumDominated(child->numDominated() + 1); } #ifdef DEBUG // If compiling with OSR, many blocks will self-dominate. // Without OSR, there is only one root block which dominates all. if (!graph.osrBlock()) JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1); #endif // Now, iterate through the dominator tree and annotate every // block with its index in the pre-order traversal of the // dominator tree. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); block->setDomIndex(index); for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } index++; } return true; }
// Eliminate checks which are redundant given each other or other instructions. // // A type barrier is considered redundant if all missing types have been tested // for by earlier control instructions. // // A bounds check is considered redundant if it's dominated by another bounds // check with the same length and the indexes differ by only a constant amount. // In this case we eliminate the redundant bounds check and update the other one // to cover the ranges of both checks. // // Bounds checks are added to a hash map and since the hash function ignores // differences in constant offset, this offers a fast way to find redundant // checks. bool ion::EliminateRedundantChecks(MIRGraph &graph) { BoundsCheckMap checks; if (!checks.init()) return false; // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } for (MDefinitionIterator iter(block); iter; ) { bool eliminated = false; if (iter->isBoundsCheck()) { if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated)) return false; } else if (iter->isTypeBarrier()) { if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated)) return false; } else if (iter->isConvertElementsToDoubles()) { // Now that code motion passes have finished, replace any // ConvertElementsToDoubles with the actual elements. MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles(); ins->replaceAllUsesWith(ins->elements()); } if (eliminated) iter = block->discardDefAt(iter); else iter++; } index++; } JS_ASSERT(index == graph.numBlocks()); return true; }