bool EdgeCaseAnalysis::AllUsesTruncate(MInstruction *m) { for (MUseIterator use = m->usesBegin(); use != m->usesEnd(); use++) { // See #809485 why this is allowed if (use->node()->isResumePoint()) continue; MDefinition *def = use->node()->toDefinition(); if (def->isTruncateToInt32()) continue; if (def->isBitAnd()) continue; if (def->isBitOr()) continue; if (def->isBitXor()) continue; if (def->isLsh()) continue; if (def->isRsh()) continue; if (def->isBitNot()) continue; if (def->isAdd() && def->toAdd()->isTruncated()) continue; if (def->isSub() && def->toSub()->isTruncated()) continue; // cannot use divide, since |truncate(int32(x/y) + int32(a/b)) != truncate(x/y+a/b)| return false; } return true; }
void EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins) { MDefinition* ptr = ins->ptr(); if (ptr->isConstantValue()) { // Look for heap[i] where i is a constant offset, and fold the offset. // By doing the folding now, we simplify the task of codegen; the offset // is always the address mode immediate. This also allows it to avoid // a situation where the sum of a constant pointer value and a non-zero // offset doesn't actually fit into the address mode immediate. int32_t imm = ptr->constantValue().toInt32(); if (imm != 0 && tryAddDisplacement(ins, imm)) { MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0)); ins->block()->insertBefore(ins, zero); ins->replacePtr(zero); } } else if (ptr->isAdd()) { // Look for heap[a+i] where i is a constant offset, and fold the offset. // Alignment masks have already been moved out of the way by the // Alignment Mask Analysis pass. MDefinition* op0 = ptr->toAdd()->getOperand(0); MDefinition* op1 = ptr->toAdd()->getOperand(1); if (op0->isConstantValue()) mozilla::Swap(op0, op1); if (op1->isConstantValue()) { int32_t imm = op1->constantValue().toInt32(); if (tryAddDisplacement(ins, imm)) ins->replacePtr(op0); } } }
// Determine whether the possible value of start (a phi node within the loop) // can become smaller than an initial value at loop entry. bool Loop::nonDecreasing(MDefinition *initial, MDefinition *start) { MDefinitionVector worklist; MDefinitionVector seen; if (!worklist.append(start)) return false; while (!worklist.empty()) { MDefinition *def = worklist.popCopy(); bool duplicate = false; for (size_t i = 0; i < seen.length() && !duplicate; i++) { if (seen[i] == def) duplicate = true; } if (duplicate) continue; if (!seen.append(def)) return false; if (def->type() != MIRType_Int32) return false; if (!isInLoop(def)) { if (def != initial) return false; continue; } if (def->isPhi()) { MPhi *phi = def->toPhi(); for (size_t i = 0; i < phi->numOperands(); i++) { if (!worklist.append(phi->getOperand(i))) return false; } continue; } if (def->isAdd()) { if (def->toAdd()->specialization() != MIRType_Int32) return false; MDefinition *lhs = def->toAdd()->getOperand(0); MDefinition *rhs = def->toAdd()->getOperand(1); if (!rhs->isConstant()) return false; Value v = rhs->toConstant()->value(); if (!v.isInt32() || v.toInt32() < 0) return false; if (!worklist.append(lhs)) return false; continue; } return false; } return true; }
static void AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph) { // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result, // since the users of the BitAnd include heap accesses. This will expose // the redundancy for GVN when expressions like this: // a&m // (a+1)&m, // (a+2)&m, // are transformed into this: // a&m // (a&m)+1 // (a&m)+2 // and it will allow the constants to be folded by the // EffectiveAddressAnalysis pass. // // Putting the add on the outside might seem like it exposes other users of // the expression to the possibility of i32 overflow, if we aren't in wasm // and they aren't naturally truncating. However, since we use MAdd::New // with MIRType::Int32, we make sure that the value is truncated, just as it // would be by the MBitAnd. MOZ_ASSERT(IsCompilingWasm()); if (!ptr->isBitAnd()) return; MDefinition* lhs = ptr->toBitAnd()->getOperand(0); MDefinition* rhs = ptr->toBitAnd()->getOperand(1); if (lhs->isConstant()) mozilla::Swap(lhs, rhs); if (!lhs->isAdd() || !rhs->isConstant()) return; MDefinition* op0 = lhs->toAdd()->getOperand(0); MDefinition* op1 = lhs->toAdd()->getOperand(1); if (op0->isConstant()) mozilla::Swap(op0, op1); if (!op1->isConstant()) return; uint32_t i = op1->toConstant()->toInt32(); uint32_t m = rhs->toConstant()->toInt32(); if (!IsAlignmentMask(m) || (i & m) != i) return; // The pattern was matched! Produce the replacement expression. MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32); ptr->block()->insertBefore(ptr->toBitAnd(), and_); MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32); ptr->block()->insertBefore(ptr->toBitAnd(), add); ptr->replaceAllUsesWith(add); ptr->block()->discard(ptr->toBitAnd()); }
void EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins) { MDefinition* base = ins->base(); if (base->isConstant()) { // Look for heap[i] where i is a constant offset, and fold the offset. // By doing the folding now, we simplify the task of codegen; the offset // is always the address mode immediate. This also allows it to avoid // a situation where the sum of a constant pointer value and a non-zero // offset doesn't actually fit into the address mode immediate. int32_t imm = base->toConstant()->toInt32(); if (imm != 0 && tryAddDisplacement(ins, imm)) { MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0)); ins->block()->insertBefore(ins, zero); ins->replaceBase(zero); } // If the index is within the minimum heap length, we can optimize // away the bounds check. if (imm >= 0) { int32_t end = (uint32_t)imm + ins->byteSize(); if (end >= imm && (uint32_t)end <= mir_->minWasmHeapLength()) ins->removeBoundsCheck(); } } else if (base->isAdd()) { // Look for heap[a+i] where i is a constant offset, and fold the offset. // Alignment masks have already been moved out of the way by the // Alignment Mask Analysis pass. MDefinition* op0 = base->toAdd()->getOperand(0); MDefinition* op1 = base->toAdd()->getOperand(1); if (op0->isConstant()) mozilla::Swap(op0, op1); if (op1->isConstant()) { int32_t imm = op1->toConstant()->toInt32(); if (tryAddDisplacement(ins, imm)) ins->replaceBase(op0); } } }
int EdgeCaseAnalysis::AllUsesTruncate(MInstruction *m) { // If all uses truncate, the return value must be at least 1. If anything doesn't truncate // 0 is explicitly returned. int ret = 1; for (MUseIterator use = m->usesBegin(); use != m->usesEnd(); use++) { // See #809485 why this is allowed if (use->node()->isResumePoint()) continue; MDefinition *def = use->node()->toDefinition(); if (def->isTruncateToInt32()) continue; if (def->isBitAnd()) continue; if (def->isBitOr()) continue; if (def->isBitXor()) continue; if (def->isLsh()) continue; if (def->isRsh()) continue; if (def->isBitNot()) continue; if (def->isAdd() && def->toAdd()->isTruncated()) { ret = Max(ret, def->toAdd()->isTruncated() + 1); continue; } if (def->isSub() && def->toSub()->isTruncated()) { ret = Max(ret, def->toSub()->isTruncated() + 1); continue; } // cannot use divide, since |truncate(int32(x/y) + int32(a/b)) != truncate(x/y+a/b)| return 0; } return ret; }
LoopIterationBound * RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header, MTest *test, BranchDirection direction) { SimpleLinearSum lhs(NULL, 0); MDefinition *rhs; bool lessEqual; if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) return NULL; // Ensure the rhs is a loop invariant term. if (rhs && rhs->block()->isMarked()) { if (lhs.term && lhs.term->block()->isMarked()) return NULL; MDefinition *temp = lhs.term; lhs.term = rhs; rhs = temp; if (!SafeSub(0, lhs.constant, &lhs.constant)) return NULL; lessEqual = !lessEqual; } JS_ASSERT_IF(rhs, !rhs->block()->isMarked()); // Ensure the lhs is a phi node from the start of the loop body. if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) return NULL; // Check that the value of the lhs changes by a constant amount with each // loop iteration. This requires that the lhs be written in every loop // iteration with a value that is a constant difference from its value at // the start of the iteration. if (lhs.term->toPhi()->numOperands() != 2) return NULL; // The first operand of the phi should be the lhs' value at the start of // the first executed iteration, and not a value written which could // replace the second operand below during the middle of execution. MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0); if (lhsInitial->block()->isMarked()) return NULL; // The second operand of the phi should be a value written by an add/sub // in every loop iteration, i.e. in a block which dominates the backedge. MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1); if (lhsWrite->isBeta()) lhsWrite = lhsWrite->getOperand(0); if (!lhsWrite->isAdd() && !lhsWrite->isSub()) return NULL; if (!lhsWrite->block()->isMarked()) return NULL; MBasicBlock *bb = header->backedge(); for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {} if (bb != lhsWrite->block()) return NULL; SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite); // Check that the value of the lhs at the backedge is of the form // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start // of the iteration, and not that written to lhs in a previous iteration, // as such a previous value could not appear directly in the addition: // it could not be stored in lhs as the lhs add/sub executes in every // iteration, and if it were stored in another variable its use here would // be as an operand to a phi node for that variable. if (lhsModified.term != lhs.term) return NULL; LinearSum bound; if (lhsModified.constant == 1 && !lessEqual) { // The value of lhs is 'initial(lhs) + iterCount' and this will end // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound // on the number of backedges executed is: // // initial(lhs) + iterCount + lhsN == rhs // iterCount == rhsN - initial(lhs) - lhsN if (rhs) { if (!bound.add(rhs, 1)) return NULL; } if (!bound.add(lhsInitial, -1)) return NULL; int32_t lhsConstant; if (!SafeSub(0, lhs.constant, &lhsConstant)) return NULL; if (!bound.add(lhsConstant)) return NULL; } else if (lhsModified.constant == -1 && lessEqual) { // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above // case, an upper bound on the number of backedges executed is: // // initial(lhs) - iterCount + lhsN == rhs // iterCount == initial(lhs) - rhs + lhsN if (!bound.add(lhsInitial, 1)) return NULL; if (rhs) { if (!bound.add(rhs, -1)) return NULL; } if (!bound.add(lhs.constant)) return NULL; } else { return NULL; } return new LoopIterationBound(header, test, bound); }