bool RangeAnalysis::removeBetaNobes() { IonSpew(IonSpew_Range, "Removing beta nobes"); for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { MBasicBlock *block = *i; for (MDefinitionIterator iter(*i); iter; ) { MDefinition *def = *iter; if (def->isBeta()) { MDefinition *op = def->getOperand(0); IonSpew(IonSpew_Range, "Removing beta node %d for %d", def->id(), op->id()); def->replaceAllUsesWith(op); iter = block->discardDefAt(iter); } else { // We only place Beta nodes at the beginning of basic // blocks, so if we see something else, we can move on // to the next block. break; } } } return true; }
bool ValueNumberer::eliminateRedundancies() { // A definition is 'redundant' iff it is dominated by another definition // with the same value number. // // So, we traverse the dominator tree in pre-order, maintaining a hashmap // from value numbers to instructions. // // For each definition d with value number v, we look up v in the hashmap. // // If there is a definition d' in the hashmap, and the current traversal // index is within that instruction's dominated range, then we eliminate d, // replacing all uses of d with uses of d'. // // If there is no valid definition in the hashtable (the current definition // is not in dominated scope), then we insert the current instruction, // since it is the most dominant instruction with the given value number. InstructionMap defs; if (!defs.init()) return false; IonSpew(IonSpew_GVN, "Eliminating redundant instructions"); // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); IonSpew(IonSpew_GVN, "Looking at block %d", block->id()); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } // For each instruction, attempt to look up a dominating definition. for (MDefinitionIterator iter(block); iter; ) { MDefinition *ins = simplify(*iter, true); // Instruction was replaced, and all uses have already been fixed. if (ins != *iter) { iter = block->discardDefAt(iter); continue; } // Instruction has side-effects and cannot be folded. if (!ins->isMovable() || ins->isEffectful()) { iter++; continue; } MDefinition *dom = findDominatingDef(defs, ins, index); if (!dom) return false; // Insertion failed. if (dom == ins || !dom->updateForReplacement(ins)) { iter++; continue; } IonSpew(IonSpew_GVN, "instruction %d is dominated by instruction %d (from block %d)", ins->id(), dom->id(), dom->block()->id()); ins->replaceAllUsesWith(dom); JS_ASSERT(!ins->hasUses()); JS_ASSERT(ins->block() == block); JS_ASSERT(!ins->isEffectful()); JS_ASSERT(ins->isMovable()); iter = ins->block()->discardDefAt(iter); } index++; } JS_ASSERT(index == graph_.numBlocks()); return true; }
// Eliminate checks which are redundant given each other or other instructions. // // A type barrier is considered redundant if all missing types have been tested // for by earlier control instructions. // // A bounds check is considered redundant if it's dominated by another bounds // check with the same length and the indexes differ by only a constant amount. // In this case we eliminate the redundant bounds check and update the other one // to cover the ranges of both checks. // // Bounds checks are added to a hash map and since the hash function ignores // differences in constant offset, this offers a fast way to find redundant // checks. bool ion::EliminateRedundantChecks(MIRGraph &graph) { BoundsCheckMap checks; if (!checks.init()) return false; // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } for (MDefinitionIterator iter(block); iter; ) { bool eliminated = false; if (iter->isBoundsCheck()) { if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated)) return false; } else if (iter->isTypeBarrier()) { if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated)) return false; } else if (iter->isConvertElementsToDoubles()) { // Now that code motion passes have finished, replace any // ConvertElementsToDoubles with the actual elements. MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles(); ins->replaceAllUsesWith(ins->elements()); } if (eliminated) iter = block->discardDefAt(iter); else iter++; } index++; } JS_ASSERT(index == graph.numBlocks()); return true; }