MDefinition * ValueNumberer::simplify(MDefinition *def, bool useValueNumbers) { if (def->isEffectful()) return def; MDefinition *ins = def->foldsTo(useValueNumbers); if (ins == def || !ins->updateForFolding(def)) return def; // ensure this instruction has a VN if (!ins->valueNumberData()) ins->setValueNumberData(new ValueNumberData); if (!ins->block()) { // In this case, we made a new def by constant folding, for // example, we replaced add(#3,#4) with a new const(#7) node. // We will only fold a phi into one of its operands. JS_ASSERT(!def->isPhi()); def->block()->insertAfter(def->toInstruction(), ins->toInstruction()); ins->setValueNumber(lookupValue(ins)); } JS_ASSERT(ins->id() != 0); def->replaceAllUsesWith(ins); IonSpew(IonSpew_GVN, "Folding %d to be %d", def->id(), ins->id()); return ins; }
bool RangeAnalysis::removeBetaNobes() { IonSpew(IonSpew_Range, "Removing beta nobes"); for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { MBasicBlock *block = *i; for (MDefinitionIterator iter(*i); iter; ) { MDefinition *def = *iter; if (def->isBeta()) { MDefinition *op = def->getOperand(0); IonSpew(IonSpew_Range, "Removing beta node %d for %d", def->id(), op->id()); def->replaceAllUsesWith(op); iter = block->discardDefAt(iter); } else { // We only place Beta nodes at the beginning of basic // blocks, so if we see something else, we can move on // to the next block. break; } } } return true; }
void ValueNumberer::breakClass(MDefinition *def) { if (def->valueNumber() == def->id()) { IonSpew(IonSpew_GVN, "Breaking congruence with itself: %d", def->id()); ValueNumberData *defdata = def->valueNumberData(); JS_ASSERT(defdata->classPrev == NULL); // If the def was the only member of the class, then there is nothing to do. if (defdata->classNext == NULL) return; // If upon closer inspection, we are still equivalent to this class // then there isn't anything for us to do. if (!needsSplit(def)) return; // Get a new representative member MDefinition *newRep = defdata->classNext; // Chop off the head of the list (the old representative) newRep->valueNumberData()->classPrev = NULL; def->valueNumberData()->classNext = NULL; IonSpew(IonSpew_GVN, "Choosing a new representative: %d", newRep->id()); // make the VN of every member in the class the VN of the new representative number. for (MDefinition *tmp = newRep; tmp != NULL; tmp = tmp->valueNumberData()->classNext) { // if this instruction is already scheduled to be processed, don't do anything. if (tmp->isInWorklist()) continue; IonSpew(IonSpew_GVN, "Moving to a new congruence class: %d", tmp->id()); tmp->setValueNumber(newRep->id()); markConsumers(tmp); markDefinition(tmp); } // Insert the new representative => number mapping into the table // Logically, there should not be anything in the table currently, but // old values are never removed, so there's a good chance something will // already be there. values.put(newRep, newRep->id()); } else { // The element that is breaking from the list isn't the representative element // just strip it from the list ValueNumberData *defdata = def->valueNumberData(); if (defdata->classPrev) defdata->classPrev->valueNumberData()->classNext = defdata->classNext; if (defdata->classNext) defdata->classNext->valueNumberData()->classPrev = defdata->classPrev; // Make sure there is no nastinees accidentally linking elements into the old list later. defdata->classPrev = NULL; defdata->classNext = NULL; } }
MDefinition * ValueNumberer::findSplit(MDefinition *def) { for (MDefinition *vncheck = def->valueNumberData()->classNext; vncheck != NULL; vncheck = vncheck->valueNumberData()->classNext) { if (!def->congruentTo(vncheck)) { IonSpew(IonSpew_GVN, "Proceeding with split because %d is not congruent to %d", def->id(), vncheck->id()); return vncheck; } } return NULL; }
bool RangeAnalysis::analyze() { IonSpew(IonSpew_Range, "Doing range propagation"); for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { MBasicBlock *block = *iter; for (MDefinitionIterator iter(block); iter; iter++) { MDefinition *def = *iter; def->computeRange(); IonSpew(IonSpew_Range, "computing range on %d", def->id()); SpewRange(def); } if (block->isLoopHeader()) analyzeLoop(block); } return true; }
// Visit |def|. bool ValueNumberer::visitDefinition(MDefinition* def) { // Nop does not fit in any of the previous optimization, as its only purpose // is to reduce the register pressure by keeping additional resume // point. Still, there is no need consecutive list of MNop instructions, and // this will slow down every other iteration on the Graph. if (def->isNop()) { MNop* nop = def->toNop(); MBasicBlock* block = nop->block(); // We look backward to know if we can remove the previous Nop, we do not // look forward as we would not benefit from the folding made by GVN. MInstructionReverseIterator iter = ++block->rbegin(nop); // This nop is at the beginning of the basic block, just replace the // resume point of the basic block by the one from the resume point. if (iter == block->rend()) { JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id()); nop->moveResumePointAsEntry(); block->discard(nop); return true; } // The previous instruction is also a Nop, no need to keep it anymore. MInstruction* prev = *iter; if (prev->isNop()) { JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id()); block->discard(prev); return true; } return true; } // If this instruction has a dependency() into an unreachable block, we'll // need to update AliasAnalysis. MInstruction* dep = def->dependency(); if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) { JitSpew(JitSpew_GVN, " AliasAnalysis invalidated"); if (updateAliasAnalysis_ && !dependenciesBroken_) { // TODO: Recomputing alias-analysis could theoretically expose more // GVN opportunities. JitSpew(JitSpew_GVN, " Will recompute!"); dependenciesBroken_ = true; } // Temporarily clear its dependency, to protect foldsTo, which may // wish to use the dependency to do store-to-load forwarding. def->setDependency(def->toInstruction()); } else { dep = nullptr; } // Look for a simplified form of |def|. MDefinition* sim = simplified(def); if (sim != def) { if (sim == nullptr) return false; // If |sim| doesn't belong to a block, insert it next to |def|. if (sim->block() == nullptr) def->block()->insertAfter(def->toInstruction(), sim->toInstruction()); #ifdef DEBUG JitSpew(JitSpew_GVN, " Folded %s%u to %s%u", def->opName(), def->id(), sim->opName(), sim->id()); #endif MOZ_ASSERT(!sim->isDiscarded()); ReplaceAllUsesWith(def, sim); // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a // guard, then either |rep| is also a guard, or a guard isn't actually // needed, so we can clear |def|'s guard flag and let it be discarded. def->setNotGuardUnchecked(); if (DeadIfUnused(def)) { if (!discardDefsRecursively(def)) return false; // If that ended up discarding |sim|, then we're done here. if (sim->isDiscarded()) return true; } // Otherwise, procede to optimize with |sim| in place of |def|. def = sim; } // Now that foldsTo is done, re-enable the original dependency. Even though // it may be pointing into a discarded block, it's still valid for the // purposes of detecting congruent loads. if (dep != nullptr) def->setDependency(dep); // Look for a dominating def which makes |def| redundant. MDefinition* rep = leader(def); if (rep != def) { if (rep == nullptr) return false; if (rep->updateForReplacement(def)) { #ifdef DEBUG JitSpew(JitSpew_GVN, " Replacing %s%u with %s%u", def->opName(), def->id(), rep->opName(), rep->id()); #endif ReplaceAllUsesWith(def, rep); // The node's congruentTo said |def| is congruent to |rep|, and it's // dominated by |rep|. If |def| is a guard, it's covered by |rep|, // so we can clear |def|'s guard flag and let it be discarded. def->setNotGuardUnchecked(); if (DeadIfUnused(def)) { // discardDef should not add anything to the deadDefs, as the // redundant operation should have the same input operands. mozilla::DebugOnly<bool> r = discardDef(def); MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, " "so it shouldn't have failed"); MOZ_ASSERT(deadDefs_.empty(), "discardDef shouldn't have added anything to the worklist"); } def = rep; } } return true; }
bool RangeAnalysis::analyze() { int numBlocks = 0; for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { numBlocks++; MBasicBlock *curBlock = *i; if (!curBlock->isLoopHeader()) continue; for (MPhiIterator pi(curBlock->phisBegin()); pi != curBlock->phisEnd(); pi++) if (!pi->initCounts()) return false; } IonSpew(IonSpew_Range, "Doing range propagation"); MDefinitionVector worklist; for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; iter++) { MDefinition *def = *iter; AddToWorklist(worklist, def); } } size_t iters = 0; while (!worklist.empty()) { MDefinition *def = PopFromWorklist(worklist); IonSpew(IonSpew_Range, "recomputing range on %d", def->id()); SpewRange(def); if (!def->earlyAbortCheck() && def->recomputeRange()) { JS_ASSERT(def->range()->lower() <= def->range()->upper()); IonSpew(IonSpew_Range, "Range changed; adding consumers"); IonSpew(IonSpew_Range, "New range for %d is: (%d, %d)", def->id(), def->range()->lower(), def->range()->upper()); for (MUseDefIterator use(def); use; use++) { if(!AddToWorklist(worklist, use.def())) return false; } } iters++; if (iters >= numBlocks * 100) return false; } // Cleanup (in case we stopped due to MAX_ITERS) for(size_t i = 0; i < worklist.length(); i++) worklist[i]->setNotInWorklist(); #ifdef DEBUG for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; iter++) { MDefinition *def = *iter; SpewRange(def); JS_ASSERT(def->range()->lower() <= def->range()->upper()); JS_ASSERT(!def->isInWorklist()); } } #endif return true; }
bool RangeAnalysis::addBetaNobes() { IonSpew(IonSpew_Range, "Adding beta nobes"); for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { MBasicBlock *block = *i; IonSpew(IonSpew_Range, "Looking at block %d", block->id()); BranchDirection branch_dir; MTest *test = block->immediateDominatorBranch(&branch_dir); if (!test || !test->getOperand(0)->isCompare()) continue; MCompare *compare = test->getOperand(0)->toCompare(); MDefinition *left = compare->getOperand(0); MDefinition *right = compare->getOperand(1); int32 bound; MDefinition *val = NULL; JSOp jsop = compare->jsop(); if (branch_dir == FALSE_BRANCH) jsop = analyze::NegateCompareOp(jsop); if (left->isConstant() && left->toConstant()->value().isInt32()) { bound = left->toConstant()->value().toInt32(); val = right; jsop = analyze::ReverseCompareOp(jsop); } else if (right->isConstant() && right->toConstant()->value().isInt32()) { bound = right->toConstant()->value().toInt32(); val = left; } else { MDefinition *smaller = NULL; MDefinition *greater = NULL; if (jsop == JSOP_LT) { smaller = left; greater = right; } else if (jsop == JSOP_GT) { smaller = right; greater = left; } if (smaller && greater) { MBeta *beta; beta = MBeta::New(smaller, Range(JSVAL_INT_MIN, JSVAL_INT_MAX-1)); block->insertBefore(*block->begin(), beta); replaceDominatedUsesWith(smaller, beta, block); beta = MBeta::New(greater, Range(JSVAL_INT_MIN+1, JSVAL_INT_MAX)); block->insertBefore(*block->begin(), beta); replaceDominatedUsesWith(greater, beta, block); } continue; } JS_ASSERT(val); Range comp; switch (jsop) { case JSOP_LE: comp.setUpper(bound); break; case JSOP_LT: if (!SafeSub(bound, 1, &bound)) break; comp.setUpper(bound); break; case JSOP_GE: comp.setLower(bound); break; case JSOP_GT: if (!SafeAdd(bound, 1, &bound)) break; comp.setLower(bound); break; case JSOP_EQ: comp.setLower(bound); comp.setUpper(bound); default: break; // well, for neq we could have // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges. } IonSpew(IonSpew_Range, "Adding beta node for %d", val->id()); MBeta *beta = MBeta::New(val, comp); block->insertBefore(*block->begin(), beta); replaceDominatedUsesWith(val, beta, block); } return true; }
void ValueNumberer::breakClass(MDefinition *def) { if (def->valueNumber() == def->id()) { IonSpew(IonSpew_GVN, "Breaking congruence with itself: %d", def->id()); ValueNumberData *defdata = def->valueNumberData(); JS_ASSERT(defdata->classPrev == NULL); // If the def was the only member of the class, then there is nothing to do. if (defdata->classNext == NULL) return; // If upon closer inspection, we are still equivalent to this class // then there isn't anything for us to do. MDefinition *newRep = findSplit(def); if (!newRep) return; ValueNumberData *newdata = newRep->valueNumberData(); // Right now, |defdata| is at the front of the list, and |newdata| is // somewhere in the middle. // // We want to move |defdata| and everything up to but excluding // |newdata| to a new list, with |defdata| still as the canonical // element. // // We then want to take |newdata| and everything after, and // mark them for processing (since |newdata| is now a new canonical // element). // MDefinition *lastOld = newdata->classPrev; JS_ASSERT(lastOld); // newRep is NOT the first element of the list. JS_ASSERT(lastOld->valueNumberData()->classNext == newRep); //lastOld is now the last element of the old list (congruent to //|def|) lastOld->valueNumberData()->classNext = NULL; #ifdef DEBUG for (MDefinition *tmp = def; tmp != NULL; tmp = tmp->valueNumberData()->classNext) { JS_ASSERT(tmp->valueNumber() == def->valueNumber()); JS_ASSERT(tmp->congruentTo(def)); JS_ASSERT(tmp != newRep); } #endif //|newRep| is now the first element of a new list, therefore it is the //new canonical element. Mark the remaining elements in the list //(including |newRep|) newdata->classPrev = NULL; IonSpew(IonSpew_GVN, "Choosing a new representative: %d", newRep->id()); // make the VN of every member in the class the VN of the new representative number. for (MDefinition *tmp = newRep; tmp != NULL; tmp = tmp->valueNumberData()->classNext) { // if this instruction is already scheduled to be processed, don't do anything. if (tmp->isInWorklist()) continue; IonSpew(IonSpew_GVN, "Moving to a new congruence class: %d", tmp->id()); tmp->setValueNumber(newRep->id()); markConsumers(tmp); markDefinition(tmp); } // Insert the new representative => number mapping into the table // Logically, there should not be anything in the table currently, but // old values are never removed, so there's a good chance something will // already be there. values.put(newRep, newRep->id()); } else { // The element that is breaking from the list isn't the representative element // just strip it from the list ValueNumberData *defdata = def->valueNumberData(); if (defdata->classPrev) defdata->classPrev->valueNumberData()->classNext = defdata->classNext; if (defdata->classNext) defdata->classNext->valueNumberData()->classPrev = defdata->classPrev; // Make sure there is no nastinees accidentally linking elements into the old list later. defdata->classPrev = NULL; defdata->classNext = NULL; } }
bool ValueNumberer::eliminateRedundancies() { // A definition is 'redundant' iff it is dominated by another definition // with the same value number. // // So, we traverse the dominator tree in pre-order, maintaining a hashmap // from value numbers to instructions. // // For each definition d with value number v, we look up v in the hashmap. // // If there is a definition d' in the hashmap, and the current traversal // index is within that instruction's dominated range, then we eliminate d, // replacing all uses of d with uses of d'. // // If there is no valid definition in the hashtable (the current definition // is not in dominated scope), then we insert the current instruction, // since it is the most dominant instruction with the given value number. InstructionMap defs; if (!defs.init()) return false; IonSpew(IonSpew_GVN, "Eliminating redundant instructions"); // Stack for pre-order CFG traversal. Vector<MBasicBlock *, 1, IonAllocPolicy> worklist; // The index of the current block in the CFG traversal. size_t index = 0; // Add all self-dominating blocks to the worklist. // This includes all roots. Order does not matter. for (MBasicBlockIterator i(graph_.begin()); i != graph_.end(); i++) { MBasicBlock *block = *i; if (block->immediateDominator() == block) { if (!worklist.append(block)) return false; } } // Starting from each self-dominating block, traverse the CFG in pre-order. while (!worklist.empty()) { MBasicBlock *block = worklist.popCopy(); IonSpew(IonSpew_GVN, "Looking at block %d", block->id()); // Add all immediate dominators to the front of the worklist. for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) { if (!worklist.append(block->getImmediatelyDominatedBlock(i))) return false; } // For each instruction, attempt to look up a dominating definition. for (MDefinitionIterator iter(block); iter; ) { MDefinition *ins = simplify(*iter, true); // Instruction was replaced, and all uses have already been fixed. if (ins != *iter) { iter = block->discardDefAt(iter); continue; } // Instruction has side-effects and cannot be folded. if (!ins->isMovable() || ins->isEffectful()) { iter++; continue; } MDefinition *dom = findDominatingDef(defs, ins, index); if (!dom) return false; // Insertion failed. if (dom == ins || !dom->updateForReplacement(ins)) { iter++; continue; } IonSpew(IonSpew_GVN, "instruction %d is dominated by instruction %d (from block %d)", ins->id(), dom->id(), dom->block()->id()); ins->replaceAllUsesWith(dom); JS_ASSERT(!ins->hasUses()); JS_ASSERT(ins->block() == block); JS_ASSERT(!ins->isEffectful()); JS_ASSERT(ins->isMovable()); iter = ins->block()->discardDefAt(iter); } index++; } JS_ASSERT(index == graph_.numBlocks()); return true; }
bool ValueNumberer::computeValueNumbers() { // At the end of this function, we will have the value numbering stored in // each instruction. // // We also need an "optimistic" value number, for temporary use, which is // stored in a hashtable. // // For the instruction x := y op z, we map (op, VN[y], VN[z]) to a value // number, say v. If it is not in the map, we use the instruction id. // // If the instruction in question's value number is not already // v, we break the congruence and set it to v. We repeat until saturation. // This will take at worst O(d) time, where d is the loop connectedness // of the SSA def/use graph. // // The algorithm is the simple RPO-based algorithm from // "SCC-Based Value Numbering" by Cooper and Simpson. // // If we are performing a pessimistic pass, then we assume that every // definition is in its own congruence class, since we know nothing about // values that enter Phi nodes through back edges. We then make one pass // through the graph, ignoring back edges. This yields less congruences on // any graph with back-edges, but is much faster to perform. IonSpew(IonSpew_GVN, "Numbering instructions"); if (!values.init()) return false; // Stick a VN object onto every mdefinition for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; iter++) iter->setValueNumberData(new ValueNumberData); MControlInstruction *jump = block->lastIns(); jump->setValueNumberData(new ValueNumberData); } // Assign unique value numbers if pessimistic. // It might be productive to do this in the MDefinition constructor or // possibly in a previous pass, if it seems reasonable. if (pessimisticPass_) { for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; iter++) iter->setValueNumber(iter->id()); } } else { // For each root block, add all of its instructions to the worklist. markBlock(*(graph_.begin())); if (graph_.osrBlock()) markBlock(graph_.osrBlock()); } while (count_ > 0) { #ifdef DEBUG if (!pessimisticPass_) { size_t debugCount = 0; IonSpew(IonSpew_GVN, "The following instructions require processing:"); for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; iter++) { if (iter->isInWorklist()) { IonSpew(IonSpew_GVN, "\t%d", iter->id()); debugCount++; } } if (block->lastIns()->isInWorklist()) { IonSpew(IonSpew_GVN, "\t%d", block->lastIns()->id()); debugCount++; } } if (!debugCount) IonSpew(IonSpew_GVN, "\tNone"); JS_ASSERT(debugCount == count_); } #endif for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; ) { if (!isMarked(*iter)) { iter++; continue; } JS_ASSERT_IF(!pessimisticPass_, count_ > 0); unmarkDefinition(*iter); MDefinition *ins = simplify(*iter, false); if (ins != *iter) { iter = block->discardDefAt(iter); continue; } uint32 value = lookupValue(ins); if (!value) return false; // Hashtable insertion failed if (ins->valueNumber() != value) { IonSpew(IonSpew_GVN, "Broke congruence for instruction %d (%p) with VN %d (now using %d)", ins->id(), (void *) ins, ins->valueNumber(), value); ins->setValueNumber(value); markConsumers(ins); } iter++; } // Process control flow instruction: MControlInstruction *jump = block->lastIns(); // If we are pessimistic, then this will never get set. if (!jump->isInWorklist()) continue; unmarkDefinition(jump); if (jump->valueNumber() == 0) { jump->setValueNumber(jump->id()); for (size_t i = 0; i < jump->numSuccessors(); i++) markBlock(jump->getSuccessor(i)); } } // If we are doing a pessimistic pass, we only go once through the // instruction list. if (pessimisticPass_) break; } #ifdef DEBUG for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MDefinitionIterator iter(*block); iter; iter++) { JS_ASSERT(!iter->isInWorklist()); JS_ASSERT(iter->valueNumber() != 0); } } #endif return true; }
// Visit |def|. bool ValueNumberer::visitDefinition(MDefinition *def) { // If this instruction has a dependency() into an unreachable block, we'll // need to update AliasAnalysis. MDefinition *dep = def->dependency(); if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) { JitSpew(JitSpew_GVN, " AliasAnalysis invalidated"); if (updateAliasAnalysis_ && !dependenciesBroken_) { // TODO: Recomputing alias-analysis could theoretically expose more // GVN opportunities. JitSpew(JitSpew_GVN, " Will recompute!"); dependenciesBroken_ = true; } // Temporarily clear its dependency, to protect foldsTo, which may // wish to use the dependency to do store-to-load forwarding. def->setDependency(def->toInstruction()); } else { dep = nullptr; } // Look for a simplified form of |def|. MDefinition *sim = simplified(def); if (sim != def) { if (sim == nullptr) return false; // If |sim| doesn't belong to a block, insert it next to |def|. if (sim->block() == nullptr) def->block()->insertAfter(def->toInstruction(), sim->toInstruction()); #ifdef DEBUG JitSpew(JitSpew_GVN, " Folded %s%u to %s%u", def->opName(), def->id(), sim->opName(), sim->id()); #endif ReplaceAllUsesWith(def, sim); // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a // guard, then either |rep| is also a guard, or a guard isn't actually // needed, so we can clear |def|'s guard flag and let it be discarded. def->setNotGuardUnchecked(); if (DeadIfUnused(def)) { if (!discardDefsRecursively(def)) return false; } def = sim; } // Now that foldsTo is done, re-enable the original dependency. Even though // it may be pointing into a discarded block, it's still valid for the // purposes of detecting congruent loads. if (dep != nullptr) def->setDependency(dep); // Look for a dominating def which makes |def| redundant. MDefinition *rep = leader(def); if (rep != def) { if (rep == nullptr) return false; if (rep->updateForReplacement(def)) { #ifdef DEBUG JitSpew(JitSpew_GVN, " Replacing %s%u with %s%u", def->opName(), def->id(), rep->opName(), rep->id()); #endif ReplaceAllUsesWith(def, rep); // The node's congruentTo said |def| is congruent to |rep|, and it's // dominated by |rep|. If |def| is a guard, it's covered by |rep|, // so we can clear |def|'s guard flag and let it be discarded. def->setNotGuardUnchecked(); if (DeadIfUnused(def)) { // discardDef should not add anything to the deadDefs, as the // redundant operation should have the same input operands. mozilla::DebugOnly<bool> r = discardDef(def); MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, " "so it shouldn't have failed"); MOZ_ASSERT(deadDefs_.empty(), "discardDef shouldn't have added anything to the worklist"); } def = rep; } } return true; }
// Visit |def|. bool ValueNumberer::visitDefinition(MDefinition* def) { // Nop does not fit in any of the previous optimization, as its only purpose // is to reduce the register pressure by keeping additional resume // point. Still, there is no need consecutive list of MNop instructions, and // this will slow down every other iteration on the Graph. if (def->isNop()) { MNop* nop = def->toNop(); MBasicBlock* block = nop->block(); // We look backward to know if we can remove the previous Nop, we do not // look forward as we would not benefit from the folding made by GVN. MInstructionReverseIterator iter = ++block->rbegin(nop); // This nop is at the beginning of the basic block, just replace the // resume point of the basic block by the one from the resume point. if (iter == block->rend()) { JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id()); nop->moveResumePointAsEntry(); block->discard(nop); return true; } // The previous instruction is also a Nop, no need to keep it anymore. MInstruction* prev = *iter; if (prev->isNop()) { JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id()); block->discard(prev); return true; } // The Nop is introduced to capture the result and make sure the operands // are not live anymore when there are no further uses. Though when // all operands are still needed the Nop doesn't decrease the liveness // and can get removed. MResumePoint* rp = nop->resumePoint(); if (rp && rp->numOperands() > 0 && rp->getOperand(rp->numOperands() - 1) == prev && !nop->block()->lastIns()->isThrow() && !prev->isAssertRecoveredOnBailout()) { size_t numOperandsLive = 0; for (size_t j = 0; j < prev->numOperands(); j++) { for (size_t i = 0; i < rp->numOperands(); i++) { if (prev->getOperand(j) == rp->getOperand(i)) { numOperandsLive++; break; } } } if (numOperandsLive == prev->numOperands()) { JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id()); block->discard(nop); } } return true; } // Skip optimizations on instructions which are recovered on bailout, to // avoid mixing instructions which are recovered on bailouts with // instructions which are not. if (def->isRecoveredOnBailout()) return true; // If this instruction has a dependency() into an unreachable block, we'll // need to update AliasAnalysis. MDefinition* dep = def->dependency(); if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) { JitSpew(JitSpew_GVN, " AliasAnalysis invalidated"); if (updateAliasAnalysis_ && !dependenciesBroken_) { // TODO: Recomputing alias-analysis could theoretically expose more // GVN opportunities. JitSpew(JitSpew_GVN, " Will recompute!"); dependenciesBroken_ = true; } // Temporarily clear its dependency, to protect foldsTo, which may // wish to use the dependency to do store-to-load forwarding. def->setDependency(def->toInstruction()); } else { dep = nullptr; } // Look for a simplified form of |def|. MDefinition* sim = simplified(def); if (sim != def) { if (sim == nullptr) return false; bool isNewInstruction = sim->block() == nullptr; // If |sim| doesn't belong to a block, insert it next to |def|. if (isNewInstruction) def->block()->insertAfter(def->toInstruction(), sim->toInstruction()); #ifdef JS_JITSPEW JitSpew(JitSpew_GVN, " Folded %s%u to %s%u", def->opName(), def->id(), sim->opName(), sim->id()); #endif MOZ_ASSERT(!sim->isDiscarded()); ReplaceAllUsesWith(def, sim); // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a // guard, then either |rep| is also a guard, or a guard isn't actually // needed, so we can clear |def|'s guard flag and let it be discarded. def->setNotGuardUnchecked(); if (def->isGuardRangeBailouts()) sim->setGuardRangeBailoutsUnchecked(); if (DeadIfUnused(def)) { if (!discardDefsRecursively(def)) return false; // If that ended up discarding |sim|, then we're done here. if (sim->isDiscarded()) return true; } if (!rerun_ && def->isPhi() && !sim->isPhi()) { rerun_ = true; JitSpew(JitSpew_GVN, " Replacing phi%u may have enabled cascading optimisations; " "will re-run", def->id()); } // Otherwise, procede to optimize with |sim| in place of |def|. def = sim; // If the simplified instruction was already part of the graph, then we // probably already visited and optimized this instruction. if (!isNewInstruction) return true; } // Now that foldsTo is done, re-enable the original dependency. Even though // it may be pointing into a discarded block, it's still valid for the // purposes of detecting congruent loads. if (dep != nullptr) def->setDependency(dep); // Look for a dominating def which makes |def| redundant. MDefinition* rep = leader(def); if (rep != def) { if (rep == nullptr) return false; if (rep->updateForReplacement(def)) { #ifdef JS_JITSPEW JitSpew(JitSpew_GVN, " Replacing %s%u with %s%u", def->opName(), def->id(), rep->opName(), rep->id()); #endif ReplaceAllUsesWith(def, rep); // The node's congruentTo said |def| is congruent to |rep|, and it's // dominated by |rep|. If |def| is a guard, it's covered by |rep|, // so we can clear |def|'s guard flag and let it be discarded. def->setNotGuardUnchecked(); if (DeadIfUnused(def)) { // discardDef should not add anything to the deadDefs, as the // redundant operation should have the same input operands. mozilla::DebugOnly<bool> r = discardDef(def); MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, " "so it shouldn't have failed"); MOZ_ASSERT(deadDefs_.empty(), "discardDef shouldn't have added anything to the worklist"); } def = rep; } } return true; }