예제 #1
0
파일: Lint.cpp 프로젝트: adiaaida/llvm
void Lint::visitURem(BinaryOperator &I) {
  Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
         "Undefined behavior: Division by zero", &I);
}
예제 #2
0
bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
  if (isa<Constant>(Val))
    return true;

  ValueCacheEntryTy &Cache = lookup(Val);
  SeenBlocks.insert(BB);
  LVILatticeVal &BBLV = Cache[BB];
  
  // OverDefinedCacheUpdater is a helper object that will update
  // the OverDefinedCache for us when this method exits.  Make sure to
  // call markResult on it as we exist, passing a bool to indicate if the
  // cache needs updating, i.e. if we have solve a new value or not.
  OverDefinedCacheUpdater ODCacheUpdater(Val, BB, BBLV, this);

  // If we've already computed this block's value, return it.
  if (!BBLV.isUndefined()) {
    DEBUG(dbgs() << "  reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n');
    
    // Since we're reusing a cached value here, we don't need to update the 
    // OverDefinedCahce.  The cache will have been properly updated 
    // whenever the cached value was inserted.
    ODCacheUpdater.markResult(false);
    return true;
  }

  // Otherwise, this is the first time we're seeing this block.  Reset the
  // lattice value to overdefined, so that cycles will terminate and be
  // conservatively correct.
  BBLV.markOverdefined();
  
  Instruction *BBI = dyn_cast<Instruction>(Val);
  if (BBI == 0 || BBI->getParent() != BB) {
    return ODCacheUpdater.markResult(solveBlockValueNonLocal(BBLV, Val, BB));
  }

  if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
    return ODCacheUpdater.markResult(solveBlockValuePHINode(BBLV, PN, BB));
  }

  if (AllocaInst *AI = dyn_cast<AllocaInst>(BBI)) {
    BBLV = LVILatticeVal::getNot(ConstantPointerNull::get(AI->getType()));
    return ODCacheUpdater.markResult(true);
  }

  // We can only analyze the definitions of certain classes of instructions
  // (integral binops and casts at the moment), so bail if this isn't one.
  LVILatticeVal Result;
  if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
     !BBI->getType()->isIntegerTy()) {
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");
    BBLV.markOverdefined();
    return ODCacheUpdater.markResult(true);
  }

  // FIXME: We're currently limited to binops with a constant RHS.  This should
  // be improved.
  BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
  if (BO && !isa<ConstantInt>(BO->getOperand(1))) { 
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");

    BBLV.markOverdefined();
    return ODCacheUpdater.markResult(true);
  }

  return ODCacheUpdater.markResult(solveBlockValueConstantRange(BBLV, BBI, BB));
}
예제 #3
0
/// HandleFloatingPointIV - If the loop has floating induction variable
/// then insert corresponding integer induction variable if possible.
/// For example,
/// for(double i = 0; i < 10000; ++i)
///   bar(i)
/// is converted into
/// for(int i = 0; i < 10000; ++i)
///   bar((double)i);
///
void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
  unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
  unsigned BackEdge     = IncomingEdge^1;

  // Check incoming value.
  ConstantFP *InitValueVal =
    dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge));

  int64_t InitValue;
  if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue))
    return;

  // Check IV increment. Reject this PN if increment operation is not
  // an add or increment value can not be represented by an integer.
  BinaryOperator *Incr =
    dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
  if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd) return;

  // If this is not an add of the PHI with a constantfp, or if the constant fp
  // is not an integer, bail out.
  ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
  int64_t IncValue;
  if (IncValueVal == 0 || Incr->getOperand(0) != PN ||
      !ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
    return;

  // Check Incr uses. One user is PN and the other user is an exit condition
  // used by the conditional terminator.
  Value::use_iterator IncrUse = Incr->use_begin();
  Instruction *U1 = cast<Instruction>(*IncrUse++);
  if (IncrUse == Incr->use_end()) return;
  Instruction *U2 = cast<Instruction>(*IncrUse++);
  if (IncrUse != Incr->use_end()) return;

  // Find exit condition, which is an fcmp.  If it doesn't exist, or if it isn't
  // only used by a branch, we can't transform it.
  FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
  if (!Compare)
    Compare = dyn_cast<FCmpInst>(U2);
  if (Compare == 0 || !Compare->hasOneUse() ||
      !isa<BranchInst>(Compare->use_back()))
    return;

  BranchInst *TheBr = cast<BranchInst>(Compare->use_back());

  // We need to verify that the branch actually controls the iteration count
  // of the loop.  If not, the new IV can overflow and no one will notice.
  // The branch block must be in the loop and one of the successors must be out
  // of the loop.
  assert(TheBr->isConditional() && "Can't use fcmp if not conditional");
  if (!L->contains(TheBr->getParent()) ||
      (L->contains(TheBr->getSuccessor(0)) &&
       L->contains(TheBr->getSuccessor(1))))
    return;


  // If it isn't a comparison with an integer-as-fp (the exit value), we can't
  // transform it.
  ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
  int64_t ExitValue;
  if (ExitValueVal == 0 ||
      !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
    return;

  // Find new predicate for integer comparison.
  CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
  switch (Compare->getPredicate()) {
  default: return;  // Unknown comparison.
  case CmpInst::FCMP_OEQ:
  case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break;
  case CmpInst::FCMP_ONE:
  case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break;
  case CmpInst::FCMP_OGT:
  case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break;
  case CmpInst::FCMP_OGE:
  case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break;
  case CmpInst::FCMP_OLT:
  case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break;
  case CmpInst::FCMP_OLE:
  case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break;
  }

  // We convert the floating point induction variable to a signed i32 value if
  // we can.  This is only safe if the comparison will not overflow in a way
  // that won't be trapped by the integer equivalent operations.  Check for this
  // now.
  // TODO: We could use i64 if it is native and the range requires it.

  // The start/stride/exit values must all fit in signed i32.
  if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue))
    return;

  // If not actually striding (add x, 0.0), avoid touching the code.
  if (IncValue == 0)
    return;

  // Positive and negative strides have different safety conditions.
  if (IncValue > 0) {
    // If we have a positive stride, we require the init to be less than the
    // exit value and an equality or less than comparison.
    if (InitValue >= ExitValue ||
        NewPred == CmpInst::ICMP_SGT || NewPred == CmpInst::ICMP_SGE)
      return;

    uint32_t Range = uint32_t(ExitValue-InitValue);
    if (NewPred == CmpInst::ICMP_SLE) {
      // Normalize SLE -> SLT, check for infinite loop.
      if (++Range == 0) return;  // Range overflows.
    }

    unsigned Leftover = Range % uint32_t(IncValue);

    // If this is an equality comparison, we require that the strided value
    // exactly land on the exit value, otherwise the IV condition will wrap
    // around and do things the fp IV wouldn't.
    if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
        Leftover != 0)
      return;

    // If the stride would wrap around the i32 before exiting, we can't
    // transform the IV.
    if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue)
      return;

  } else {
    // If we have a negative stride, we require the init to be greater than the
    // exit value and an equality or greater than comparison.
    if (InitValue >= ExitValue ||
        NewPred == CmpInst::ICMP_SLT || NewPred == CmpInst::ICMP_SLE)
      return;

    uint32_t Range = uint32_t(InitValue-ExitValue);
    if (NewPred == CmpInst::ICMP_SGE) {
      // Normalize SGE -> SGT, check for infinite loop.
      if (++Range == 0) return;  // Range overflows.
    }

    unsigned Leftover = Range % uint32_t(-IncValue);

    // If this is an equality comparison, we require that the strided value
    // exactly land on the exit value, otherwise the IV condition will wrap
    // around and do things the fp IV wouldn't.
    if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
        Leftover != 0)
      return;

    // If the stride would wrap around the i32 before exiting, we can't
    // transform the IV.
    if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue)
      return;
  }

  const IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());

  // Insert new integer induction variable.
  PHINode *NewPHI = PHINode::Create(Int32Ty, PN->getName()+".int", PN);
  NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue),
                      PN->getIncomingBlock(IncomingEdge));

  Value *NewAdd =
    BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue),
                              Incr->getName()+".int", Incr);
  NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge));

  ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd,
                                      ConstantInt::get(Int32Ty, ExitValue),
                                      Compare->getName());

  // In the following deletions, PN may become dead and may be deleted.
  // Use a WeakVH to observe whether this happens.
  WeakVH WeakPH = PN;

  // Delete the old floating point exit comparison.  The branch starts using the
  // new comparison.
  NewCompare->takeName(Compare);
  Compare->replaceAllUsesWith(NewCompare);
  RecursivelyDeleteTriviallyDeadInstructions(Compare);

  // Delete the old floating point increment.
  Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
  RecursivelyDeleteTriviallyDeadInstructions(Incr);

  // If the FP induction variable still has uses, this is because something else
  // in the loop uses its value.  In order to canonicalize the induction
  // variable, we chose to eliminate the IV and rewrite it in terms of an
  // int->fp cast.
  //
  // We give preference to sitofp over uitofp because it is faster on most
  // platforms.
  if (WeakPH) {
    Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
                                 PN->getParent()->getFirstNonPHI());
    PN->replaceAllUsesWith(Conv);
    RecursivelyDeleteTriviallyDeadInstructions(PN);
  }

  // Add a new IVUsers entry for the newly-created integer PHI.
  IU->AddUsersIfInteresting(NewPHI);
}
예제 #4
0
void IndVarSimplify::EliminateIVRemainders() {
  SmallVector<WeakVH, 16> DeadInsts;

  // Look for SRem and URem users.
  for (IVUsers::iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
    IVStrideUse &UI = *I;
    BinaryOperator *Rem = dyn_cast<BinaryOperator>(UI.getUser());
    if (!Rem) continue;

    bool isSigned = Rem->getOpcode() == Instruction::SRem;
    if (!isSigned && Rem->getOpcode() != Instruction::URem)
      continue;

    // We're only interested in the case where we know something about
    // the numerator.
    if (UI.getOperandValToReplace() != Rem->getOperand(0))
      continue;

    // Get the SCEVs for the ICmp operands.
    const SCEV *S = SE->getSCEV(Rem->getOperand(0));
    const SCEV *X = SE->getSCEV(Rem->getOperand(1));

    // Simplify unnecessary loops away.
    const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
    S = SE->getSCEVAtScope(S, ICmpLoop);
    X = SE->getSCEVAtScope(X, ICmpLoop);

    // i % n  -->  i  if i is in [0,n).
    if ((!isSigned || SE->isKnownNonNegative(S)) &&
        SE->isKnownPredicate(isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
                             S, X))
      Rem->replaceAllUsesWith(Rem->getOperand(0));
    else {
      // (i+1) % n  -->  (i+1)==n?0:(i+1)  if i is in [0,n).
      const SCEV *LessOne =
        SE->getMinusSCEV(S, SE->getConstant(S->getType(), 1));
      if ((!isSigned || SE->isKnownNonNegative(LessOne)) &&
          SE->isKnownPredicate(isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
                               LessOne, X)) {
        ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ,
                                      Rem->getOperand(0), Rem->getOperand(1),
                                      "tmp");
        SelectInst *Sel =
          SelectInst::Create(ICmp,
                             ConstantInt::get(Rem->getType(), 0),
                             Rem->getOperand(0), "tmp", Rem);
        Rem->replaceAllUsesWith(Sel);
      } else
        continue;
    }

    // Inform IVUsers about the new users.
    if (Instruction *I = dyn_cast<Instruction>(Rem->getOperand(0)))
      IU->AddUsersIfInteresting(I);

    DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
    DeadInsts.push_back(Rem);
  }

  // Now that we're done iterating through lists, clean up any instructions
  // which are now dead.
  while (!DeadInsts.empty())
    if (Instruction *Inst =
          dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
      RecursivelyDeleteTriviallyDeadInstructions(Inst);
}
예제 #5
0
// Insert an intrinsic for fast fdiv for safe math situations where we can
// reduce precision. Leave fdiv for situations where the generic node is
// expected to be optimized.
bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
  Type *Ty = FDiv.getType();

  // TODO: Handle half
  if (!Ty->getScalarType()->isFloatTy())
    return false;

  MDNode *FPMath = FDiv.getMetadata(LLVMContext::MD_fpmath);
  if (!FPMath)
    return false;

  const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
  float ULP = FPOp->getFPAccuracy();
  if (ULP < 2.5f)
    return false;

  FastMathFlags FMF = FPOp->getFastMathFlags();
  bool UnsafeDiv = HasUnsafeFPMath || FMF.unsafeAlgebra() ||
                                      FMF.allowReciprocal();
  if (ST->hasFP32Denormals() && !UnsafeDiv)
    return false;

  IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath);
  Builder.setFastMathFlags(FMF);
  Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());

  const AMDGPUIntrinsicInfo *II = TM->getIntrinsicInfo();
  Function *Decl
    = II->getDeclaration(Mod, AMDGPUIntrinsic::amdgcn_fdiv_fast, {});

  Value *Num = FDiv.getOperand(0);
  Value *Den = FDiv.getOperand(1);

  Value *NewFDiv = nullptr;

  if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
    NewFDiv = UndefValue::get(VT);

    // FIXME: Doesn't do the right thing for cases where the vector is partially
    // constant. This works when the scalarizer pass is run first.
    for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
      Value *NumEltI = Builder.CreateExtractElement(Num, I);
      Value *DenEltI = Builder.CreateExtractElement(Den, I);
      Value *NewElt;

      if (shouldKeepFDivF32(NumEltI, UnsafeDiv)) {
        NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
      } else {
        NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI });
      }

      NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
    }
  } else {
    if (!shouldKeepFDivF32(Num, UnsafeDiv))
      NewFDiv = Builder.CreateCall(Decl, { Num, Den });
  }

  if (NewFDiv) {
    FDiv.replaceAllUsesWith(NewFDiv);
    NewFDiv->takeName(&FDiv);
    FDiv.eraseFromParent();
  }

  return true;
}
예제 #6
0
/// GetShiftedValue - When CanEvaluateShifted returned true for an expression,
/// this value inserts the new computation that produces the shifted value.
static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
                              InstCombiner &IC) {
  // We can always evaluate constants shifted.
  if (Constant *C = dyn_cast<Constant>(V)) {
    if (isLeftShift)
      V = IC.Builder->CreateShl(C, NumBits);
    else
      V = IC.Builder->CreateLShr(C, NumBits);
    // If we got a constantexpr back, try to simplify it with TD info.
    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
      V = ConstantFoldConstantExpression(CE, IC.getDataLayout(),
                                         IC.getTargetLibraryInfo());
    return V;
  }

  Instruction *I = cast<Instruction>(V);
  IC.Worklist.Add(I);

  switch (I->getOpcode()) {
  default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
  case Instruction::And:
  case Instruction::Or:
  case Instruction::Xor:
    // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
    I->setOperand(0, GetShiftedValue(I->getOperand(0), NumBits,isLeftShift,IC));
    I->setOperand(1, GetShiftedValue(I->getOperand(1), NumBits,isLeftShift,IC));
    return I;

  case Instruction::Shl: {
    BinaryOperator *BO = cast<BinaryOperator>(I);
    unsigned TypeWidth = BO->getType()->getScalarSizeInBits();

    // We only accept shifts-by-a-constant in CanEvaluateShifted.
    ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));

    // We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
    if (isLeftShift) {
      // If this is oversized composite shift, then unsigned shifts get 0.
      unsigned NewShAmt = NumBits+CI->getZExtValue();
      if (NewShAmt >= TypeWidth)
        return Constant::getNullValue(I->getType());

      BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
      BO->setHasNoUnsignedWrap(false);
      BO->setHasNoSignedWrap(false);
      return I;
    }

    // We turn shl(c)+lshr(c) -> and(c2) if the input doesn't already have
    // zeros.
    if (CI->getValue() == NumBits) {
      APInt Mask(APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits));
      V = IC.Builder->CreateAnd(BO->getOperand(0),
                                ConstantInt::get(BO->getContext(), Mask));
      if (Instruction *VI = dyn_cast<Instruction>(V)) {
        VI->moveBefore(BO);
        VI->takeName(BO);
      }
      return V;
    }

    // We turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but only when we know that
    // the and won't be needed.
    assert(CI->getZExtValue() > NumBits);
    BO->setOperand(1, ConstantInt::get(BO->getType(),
                                       CI->getZExtValue() - NumBits));
    BO->setHasNoUnsignedWrap(false);
    BO->setHasNoSignedWrap(false);
    return BO;
  }
  case Instruction::LShr: {
    BinaryOperator *BO = cast<BinaryOperator>(I);
    unsigned TypeWidth = BO->getType()->getScalarSizeInBits();
    // We only accept shifts-by-a-constant in CanEvaluateShifted.
    ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));

    // We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
    if (!isLeftShift) {
      // If this is oversized composite shift, then unsigned shifts get 0.
      unsigned NewShAmt = NumBits+CI->getZExtValue();
      if (NewShAmt >= TypeWidth)
        return Constant::getNullValue(BO->getType());

      BO->setOperand(1, ConstantInt::get(BO->getType(), NewShAmt));
      BO->setIsExact(false);
      return I;
    }

    // We turn lshr(c)+shl(c) -> and(c2) if the input doesn't already have
    // zeros.
    if (CI->getValue() == NumBits) {
      APInt Mask(APInt::getHighBitsSet(TypeWidth, TypeWidth - NumBits));
      V = IC.Builder->CreateAnd(I->getOperand(0),
                                ConstantInt::get(BO->getContext(), Mask));
      if (Instruction *VI = dyn_cast<Instruction>(V)) {
        VI->moveBefore(I);
        VI->takeName(I);
      }
      return V;
    }

    // We turn lshr(c1)+shl(c2) -> lshr(c3)+and(c4), but only when we know that
    // the and won't be needed.
    assert(CI->getZExtValue() > NumBits);
    BO->setOperand(1, ConstantInt::get(BO->getType(),
                                       CI->getZExtValue() - NumBits));
    BO->setIsExact(false);
    return BO;
  }

  case Instruction::Select:
    I->setOperand(1, GetShiftedValue(I->getOperand(1), NumBits,isLeftShift,IC));
    I->setOperand(2, GetShiftedValue(I->getOperand(2), NumBits,isLeftShift,IC));
    return I;
  case Instruction::PHI: {
    // We can change a phi if we can change all operands.  Note that we never
    // get into trouble with cyclic PHIs here because we only consider
    // instructions with a single use.
    PHINode *PN = cast<PHINode>(I);
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
      PN->setIncomingValue(i, GetShiftedValue(PN->getIncomingValue(i),
                                              NumBits, isLeftShift, IC));
    return PN;
  }
  }
}
예제 #7
0
/*
BalanceTree(root I)
  worklist: set
  leaves: vector
  mark I visited
  Push(worklist, Ra. Rb)
  // find all the leaves of the tree rooted at I
  while worklist not empty
    // look backwards following def-use from use
    T = ’R1 <- op1, Ra1, Rb1’ = Def(Pop(worklist))
    if T is a root
      // balance computes weight in this case
      if T not visited
         BalanceTree(T)
      SortedInsert(leaves, T, Weight(T))
    else if op(T) == op(I)
      // add uses to worklist
      Push(worklist, Ra1, Rb1)
*/
BinaryOperator* balanceTree(BinaryOperator* root, std::map<Instruction*,bool>& visitMap, std::vector<BinaryOperator*>& roots)
{
  assert(root);
  if(visitMap[root])
    return NULL;
  std::list<Value*> worklist;
  std::set<std::pair<int,Value*>,weight_less_than> leaves;
  visitMap[root] = true;
  worklist.push_back( root->getOperand(0) );
  worklist.push_back( root->getOperand(1) );
  while( !worklist.empty() )
  {
    Value* v = worklist.front();
    worklist.pop_front();
    assert(v);
    BinaryOperator* T = dynamic_cast<BinaryOperator*>(v);
    if( T and std::find(roots.begin(), roots.end(), T) != roots.end() ) // T is a binary operator that exists in the root list
    {
      if( !visitMap[T] ) //if we havent visited it, replace it with its balanced version
      {
        T = balanceTree(T, visitMap, roots);
      }
      if( !T )
      {
        INTERNAL_ERROR("balanceTree(" << *root << ") failed while attempting to balance leaf node " << *v << "; balance returned NULL!\n");
      }
      assert( T and "Balancing operation that was a root resulted in NULL being returned from balance function!" );
      leaves.insert(std::pair<int,Instruction*>(calculateWeight(T, roots), T));
    }
    else if( T and !isDifferentOperation(T, root) ) //if T isnt a root, and isnt a different operation than our root, we need to process it
    {
      worklist.push_back( T->getOperand(0) );
      worklist.push_back( T->getOperand(1) );
      //remove all of the signed, name, and size call uses
      for(Value::use_iterator UI = T->use_begin(); UI != T->use_end();)
      {
        CallInst* CI = dynamic_cast<CallInst*>(*UI);
        if( isROCCCFunctionCall(CI, ROCCCNames::VariableName) or
            isROCCCFunctionCall(CI, ROCCCNames::VariableSize) or
            isROCCCFunctionCall(CI, ROCCCNames::VariableSigned) )
        {
          CI->eraseFromParent();
          UI = T->use_begin();
        }
        else
          ++UI;
      }
    }
    else //T isnt a BinaryOperator, or isn't a root, or is a different operation than our root - just add it as a single leaf
    {
      leaves.insert(std::pair<int,Value*>(1, v));
    }
  }
  /*
  // construct a balanced tree from leaves
  while size(leaves) > 1
    Ra1 = Dequeue(leaves)
    Rb1 = Dequeue(leaves)
    T = ’R1 <- op1, Ra1, Rb1’
    insert T before I
    Weight(R1) = Weight(Ra1) + Weight(Rb1)
    SortedInsert(leaves, R1, Weight(R1))
  */
  while( leaves.size() > 1 )
  {
    std::pair<int,Value*> Ra1 = *leaves.begin();
    leaves.erase(leaves.begin());
    std::pair<int,Value*> Rb1 = *leaves.begin();
    leaves.erase(leaves.begin());
    int weight = Ra1.first + Rb1.first;
    //workaround to create a binary instruction with different operand types; create with undefs, then replace
    BinaryOperator* T = BinaryOperator::create(root->getOpcode(), UndefValue::get(root->getType()), UndefValue::get(root->getType()), "tmp", root);
    T->setOperand(0, Ra1.second);
    T->setOperand(1, Rb1.second);
    setSizeInBits(T, getSizeInBits(root));
    setValueSigned(T, isValueSigned(root));
    leaves.insert(std::pair<int,Value*>(weight, T));
  }
  BinaryOperator* last_inserted = NULL;
  if(leaves.begin() != leaves.end())
    last_inserted = dynamic_cast<BinaryOperator*>(leaves.begin()->second);
  if( last_inserted )
  {
    setValueName(last_inserted, getValueName(root));
    root->uncheckedReplaceAllUsesWith(last_inserted);
    std::string name = root->getName();
    root->eraseFromParent();
    last_inserted->setName(name);
    roots.erase(std::find(roots.begin(), roots.end(), root));
    roots.push_back(last_inserted);
    visitMap[last_inserted] = true;
  }
  return last_inserted;
}
예제 #8
0
/// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
/// instruction.
bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
  SelectInst *SI = cast<SelectInst>(I.getOperand(1));

  // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
  int NonNullOperand = -1;
  if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
    if (ST->isNullValue())
      NonNullOperand = 2;
  // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
  if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
    if (ST->isNullValue())
      NonNullOperand = 1;

  if (NonNullOperand == -1)
    return false;

  Value *SelectCond = SI->getOperand(0);

  // Change the div/rem to use 'Y' instead of the select.
  I.setOperand(1, SI->getOperand(NonNullOperand));

  // Okay, we know we replace the operand of the div/rem with 'Y' with no
  // problem.  However, the select, or the condition of the select may have
  // multiple uses.  Based on our knowledge that the operand must be non-zero,
  // propagate the known value for the select into other uses of it, and
  // propagate a known value of the condition into its other users.

  // If the select and condition only have a single use, don't bother with this,
  // early exit.
  if (SI->use_empty() && SelectCond->hasOneUse())
    return true;

  // Scan the current block backward, looking for other uses of SI.
  BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();

  while (BBI != BBFront) {
    --BBI;
    // If we found a call to a function, we can't assume it will return, so
    // information from below it cannot be propagated above it.
    if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
      break;

    // Replace uses of the select or its condition with the known values.
    for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
         I != E; ++I) {
      if (*I == SI) {
        *I = SI->getOperand(NonNullOperand);
        Worklist.Add(BBI);
      } else if (*I == SelectCond) {
        *I = Builder->getInt1(NonNullOperand == 1);
        Worklist.Add(BBI);
      }
    }

    // If we past the instruction, quit looking for it.
    if (&*BBI == SI)
      SI = nullptr;
    if (&*BBI == SelectCond)
      SelectCond = nullptr;

    // If we ran out of things to eliminate, break out of the loop.
    if (!SelectCond && !SI)
      break;

  }
  return true;
}
예제 #9
0
void Lint::visitURem(BinaryOperator &I) {
  Assert1(!isZero(I.getOperand(1), TD),
          "Undefined behavior: Division by zero", &I);
}
예제 #10
0
bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
  if (isa<Constant>(Val))
    return true;

  if (hasCachedValueInfo(Val, BB)) {
    // If we have a cached value, use that.
    DEBUG(dbgs() << "  reuse BB '" << BB->getName()
                 << "' val=" << getCachedValueInfo(Val, BB) << '\n');

    // Since we're reusing a cached value, we don't need to update the
    // OverDefinedCache. The cache will have been properly updated whenever the
    // cached value was inserted.
    return true;
  }

  // Hold off inserting this value into the Cache in case we have to return
  // false and come back later.
  LVILatticeVal Res;

  Instruction *BBI = dyn_cast<Instruction>(Val);
  if (!BBI || BBI->getParent() != BB) {
    if (!solveBlockValueNonLocal(Res, Val, BB))
      return false;
   insertResult(Val, BB, Res);
   return true;
  }

  if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
    if (!solveBlockValuePHINode(Res, PN, BB))
      return false;
    insertResult(Val, BB, Res);
    return true;
  }

  // If this value is a nonnull pointer, record it's range and bailout.
  PointerType *PT = dyn_cast<PointerType>(BBI->getType());
  if (PT && isKnownNonNull(BBI)) {
    Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT));
    insertResult(Val, BB, Res);
    return true;
  }

  // If this is an instruction which supports range metadata, return the
  // implied range.  TODO: This should be an intersection, not a union.
  Res.mergeIn(getFromRangeMetadata(BBI), DL);

  // We can only analyze the definitions of certain classes of instructions
  // (integral binops and casts at the moment), so bail if this isn't one.
  LVILatticeVal Result;
  if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
     !BBI->getType()->isIntegerTy()) {
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");
    Res.markOverdefined();
    insertResult(Val, BB, Res);
    return true;
  }

  // FIXME: We're currently limited to binops with a constant RHS.  This should
  // be improved.
  BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
  if (BO && !isa<ConstantInt>(BO->getOperand(1))) { 
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");

    Res.markOverdefined();
    insertResult(Val, BB, Res);
    return true;
  }

  if (!solveBlockValueConstantRange(Res, BBI, BB))
    return false;
  insertResult(Val, BB, Res);
  return true;
}
예제 #11
0
Instruction *InstCombiner::visitMul(BinaryOperator &I) {
  bool Changed = SimplifyAssociativeOrCommutative(I);
  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);

  if (Value *V = SimplifyVectorOp(I))
    return ReplaceInstUsesWith(I, V);

  if (Value *V = SimplifyMulInst(Op0, Op1, DL))
    return ReplaceInstUsesWith(I, V);

  if (Value *V = SimplifyUsingDistributiveLaws(I))
    return ReplaceInstUsesWith(I, V);

  if (match(Op1, m_AllOnes()))  // X * -1 == 0 - X
    return BinaryOperator::CreateNeg(Op0, I.getName());

  // Also allow combining multiply instructions on vectors.
  {
    Value *NewOp;
    Constant *C1, *C2;
    const APInt *IVal;
    if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
                        m_Constant(C1))) &&
        match(C1, m_APInt(IVal)))
      // ((X << C1)*C2) == (X * (C2 << C1))
      return BinaryOperator::CreateMul(NewOp, ConstantExpr::getShl(C1, C2));

    if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
      Constant *NewCst = nullptr;
      if (match(C1, m_APInt(IVal)) && IVal->isPowerOf2())
        // Replace X*(2^C) with X << C, where C is either a scalar or a splat.
        NewCst = ConstantInt::get(NewOp->getType(), IVal->logBase2());
      else if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(C1))
        // Replace X*(2^C) with X << C, where C is a vector of known
        // constant powers of 2.
        NewCst = getLogBase2Vector(CV);

      if (NewCst) {
        BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
        if (I.hasNoSignedWrap()) Shl->setHasNoSignedWrap();
        if (I.hasNoUnsignedWrap()) Shl->setHasNoUnsignedWrap();
        return Shl;
      }
    }
  }

  if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
    // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n
    // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n
    // The "* (2**n)" thus becomes a potential shifting opportunity.
    {
      const APInt &   Val = CI->getValue();
      const APInt &PosVal = Val.abs();
      if (Val.isNegative() && PosVal.isPowerOf2()) {
        Value *X = nullptr, *Y = nullptr;
        if (Op0->hasOneUse()) {
          ConstantInt *C1;
          Value *Sub = nullptr;
          if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
            Sub = Builder->CreateSub(X, Y, "suba");
          else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
            Sub = Builder->CreateSub(Builder->CreateNeg(C1), Y, "subc");
          if (Sub)
            return
              BinaryOperator::CreateMul(Sub,
                                        ConstantInt::get(Y->getType(), PosVal));
        }
      }
    }
  }

  // Simplify mul instructions with a constant RHS.
  if (isa<Constant>(Op1)) {
    // Try to fold constant mul into select arguments.
    if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
      if (Instruction *R = FoldOpIntoSelect(I, SI))
        return R;

    if (isa<PHINode>(Op0))
      if (Instruction *NV = FoldOpIntoPhi(I))
        return NV;

    // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
    {
      Value *X;
      Constant *C1;
      if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
        Value *Mul = Builder->CreateMul(C1, Op1);
        // Only go forward with the transform if C1*CI simplifies to a tidier
        // constant.
        if (!match(Mul, m_Mul(m_Value(), m_Value())))
          return BinaryOperator::CreateAdd(Builder->CreateMul(X, Op1), Mul);
      }
    }
  }

  if (Value *Op0v = dyn_castNegVal(Op0))     // -X * -Y = X*Y
    if (Value *Op1v = dyn_castNegVal(Op1))
      return BinaryOperator::CreateMul(Op0v, Op1v);

  // (X / Y) *  Y = X - (X % Y)
  // (X / Y) * -Y = (X % Y) - X
  {
    Value *Op1C = Op1;
    BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
    if (!BO ||
        (BO->getOpcode() != Instruction::UDiv &&
         BO->getOpcode() != Instruction::SDiv)) {
      Op1C = Op0;
      BO = dyn_cast<BinaryOperator>(Op1);
    }
    Value *Neg = dyn_castNegVal(Op1C);
    if (BO && BO->hasOneUse() &&
        (BO->getOperand(1) == Op1C || BO->getOperand(1) == Neg) &&
        (BO->getOpcode() == Instruction::UDiv ||
         BO->getOpcode() == Instruction::SDiv)) {
      Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);

      // If the division is exact, X % Y is zero, so we end up with X or -X.
      if (PossiblyExactOperator *SDiv = dyn_cast<PossiblyExactOperator>(BO))
        if (SDiv->isExact()) {
          if (Op1BO == Op1C)
            return ReplaceInstUsesWith(I, Op0BO);
          return BinaryOperator::CreateNeg(Op0BO);
        }

      Value *Rem;
      if (BO->getOpcode() == Instruction::UDiv)
        Rem = Builder->CreateURem(Op0BO, Op1BO);
      else
        Rem = Builder->CreateSRem(Op0BO, Op1BO);
      Rem->takeName(BO);

      if (Op1BO == Op1C)
        return BinaryOperator::CreateSub(Op0BO, Rem);
      return BinaryOperator::CreateSub(Rem, Op0BO);
    }
  }

  /// i1 mul -> i1 and.
  if (I.getType()->getScalarType()->isIntegerTy(1))
    return BinaryOperator::CreateAnd(Op0, Op1);

  // X*(1 << Y) --> X << Y
  // (1 << Y)*X --> X << Y
  {
    Value *Y;
    if (match(Op0, m_Shl(m_One(), m_Value(Y))))
      return BinaryOperator::CreateShl(Op1, Y);
    if (match(Op1, m_Shl(m_One(), m_Value(Y))))
      return BinaryOperator::CreateShl(Op0, Y);
  }

  // If one of the operands of the multiply is a cast from a boolean value, then
  // we know the bool is either zero or one, so this is a 'masking' multiply.
  //   X * Y (where Y is 0 or 1) -> X & (0-Y)
  if (!I.getType()->isVectorTy()) {
    // -2 is "-1 << 1" so it is all bits set except the low one.
    APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);

    Value *BoolCast = nullptr, *OtherOp = nullptr;
    if (MaskedValueIsZero(Op0, Negative2))
      BoolCast = Op0, OtherOp = Op1;
    else if (MaskedValueIsZero(Op1, Negative2))
      BoolCast = Op1, OtherOp = Op0;

    if (BoolCast) {
      Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()),
                                    BoolCast);
      return BinaryOperator::CreateAnd(V, OtherOp);
    }
  }

  return Changed ? &I : nullptr;
}
예제 #12
0
bool AlignmentFromAssumptions::extractAlignmentInfo(CallInst *I,
                                 Value *&AAPtr, const SCEV *&AlignSCEV,
                                 const SCEV *&OffSCEV) {
  // An alignment assume must be a statement about the least-significant
  // bits of the pointer being zero, possibly with some offset.
  ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0));
  if (!ICI)
    return false;

  // This must be an expression of the form: x & m == 0.
  if (ICI->getPredicate() != ICmpInst::ICMP_EQ)
    return false;

  // Swap things around so that the RHS is 0.
  Value *CmpLHS = ICI->getOperand(0);
  Value *CmpRHS = ICI->getOperand(1);
  const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS);
  const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS);
  if (CmpLHSSCEV->isZero())
    std::swap(CmpLHS, CmpRHS);
  else if (!CmpRHSSCEV->isZero())
    return false;

  BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS);
  if (!CmpBO || CmpBO->getOpcode() != Instruction::And)
    return false;

  // Swap things around so that the right operand of the and is a constant
  // (the mask); we cannot deal with variable masks.
  Value *AndLHS = CmpBO->getOperand(0);
  Value *AndRHS = CmpBO->getOperand(1);
  const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS);
  const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS);
  if (isa<SCEVConstant>(AndLHSSCEV)) {
    std::swap(AndLHS, AndRHS);
    std::swap(AndLHSSCEV, AndRHSSCEV);
  }

  const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV);
  if (!MaskSCEV)
    return false;

  // The mask must have some trailing ones (otherwise the condition is
  // trivial and tells us nothing about the alignment of the left operand).
  unsigned TrailingOnes =
    MaskSCEV->getValue()->getValue().countTrailingOnes();
  if (!TrailingOnes)
    return false;

  // Cap the alignment at the maximum with which LLVM can deal (and make sure
  // we don't overflow the shift).
  uint64_t Alignment;
  TrailingOnes = std::min(TrailingOnes,
    unsigned(sizeof(unsigned) * CHAR_BIT - 1));
  Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment);

  Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext());
  AlignSCEV = SE->getConstant(Int64Ty, Alignment);

  // The LHS might be a ptrtoint instruction, or it might be the pointer
  // with an offset.
  AAPtr = nullptr;
  OffSCEV = nullptr;
  if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) {
    AAPtr = PToI->getPointerOperand();
    OffSCEV = SE->getConstant(Int64Ty, 0);
  } else if (const SCEVAddExpr* AndLHSAddSCEV =
             dyn_cast<SCEVAddExpr>(AndLHSSCEV)) {
    // Try to find the ptrtoint; subtract it and the rest is the offset.
    for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(),
         JE = AndLHSAddSCEV->op_end(); J != JE; ++J)
      if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J))
        if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) {
          AAPtr = PToI->getPointerOperand();
          OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J);
          break;
        }
  }

  if (!AAPtr)
    return false;

  // Sign extend the offset to 64 bits (so that it is like all of the other
  // expressions). 
  unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits();
  if (OffSCEVBits < 64)
    OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty);
  else if (OffSCEVBits > 64)
    return false;

  AAPtr = AAPtr->stripPointerCasts();
  return true;
}
/// HandleFloatingPointIV - If the loop has floating induction variable
/// then insert corresponding integer induction variable if possible.
/// For example,
/// for(double i = 0; i < 10000; ++i)
///   bar(i)
/// is converted into
/// for(int i = 0; i < 10000; ++i)
///   bar((double)i);
///
void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {

  unsigned IncomingEdge = L->contains(PH->getIncomingBlock(0));
  unsigned BackEdge     = IncomingEdge^1;

  // Check incoming value.
  ConstantFP *InitValue = dyn_cast<ConstantFP>(PH->getIncomingValue(IncomingEdge));
  if (!InitValue) return;
  uint64_t newInitValue =
              Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
  if (!convertToInt(InitValue->getValueAPF(), &newInitValue))
    return;

  // Check IV increment. Reject this PH if increment operation is not
  // an add or increment value can not be represented by an integer.
  BinaryOperator *Incr =
    dyn_cast<BinaryOperator>(PH->getIncomingValue(BackEdge));
  if (!Incr) return;
  if (Incr->getOpcode() != Instruction::FAdd) return;
  ConstantFP *IncrValue = NULL;
  unsigned IncrVIndex = 1;
  if (Incr->getOperand(1) == PH)
    IncrVIndex = 0;
  IncrValue = dyn_cast<ConstantFP>(Incr->getOperand(IncrVIndex));
  if (!IncrValue) return;
  uint64_t newIncrValue =
              Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
  if (!convertToInt(IncrValue->getValueAPF(), &newIncrValue))
    return;

  // Check Incr uses. One user is PH and the other users is exit condition used
  // by the conditional terminator.
  Value::use_iterator IncrUse = Incr->use_begin();
  Instruction *U1 = cast<Instruction>(IncrUse++);
  if (IncrUse == Incr->use_end()) return;
  Instruction *U2 = cast<Instruction>(IncrUse++);
  if (IncrUse != Incr->use_end()) return;

  // Find exit condition.
  FCmpInst *EC = dyn_cast<FCmpInst>(U1);
  if (!EC)
    EC = dyn_cast<FCmpInst>(U2);
  if (!EC) return;

  if (BranchInst *BI = dyn_cast<BranchInst>(EC->getParent()->getTerminator())) {
    if (!BI->isConditional()) return;
    if (BI->getCondition() != EC) return;
  }

  // Find exit value. If exit value can not be represented as an integer then
  // do not handle this floating point PH.
  ConstantFP *EV = NULL;
  unsigned EVIndex = 1;
  if (EC->getOperand(1) == Incr)
    EVIndex = 0;
  EV = dyn_cast<ConstantFP>(EC->getOperand(EVIndex));
  if (!EV) return;
  uint64_t intEV = Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
  if (!convertToInt(EV->getValueAPF(), &intEV))
    return;

  // Find new predicate for integer comparison.
  CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
  switch (EC->getPredicate()) {
  case CmpInst::FCMP_OEQ:
  case CmpInst::FCMP_UEQ:
    NewPred = CmpInst::ICMP_EQ;
    break;
  case CmpInst::FCMP_OGT:
  case CmpInst::FCMP_UGT:
    NewPred = CmpInst::ICMP_UGT;
    break;
  case CmpInst::FCMP_OGE:
  case CmpInst::FCMP_UGE:
    NewPred = CmpInst::ICMP_UGE;
    break;
  case CmpInst::FCMP_OLT:
  case CmpInst::FCMP_ULT:
    NewPred = CmpInst::ICMP_ULT;
    break;
  case CmpInst::FCMP_OLE:
  case CmpInst::FCMP_ULE:
    NewPred = CmpInst::ICMP_ULE;
    break;
  default:
    break;
  }
  if (NewPred == CmpInst::BAD_ICMP_PREDICATE) return;

  // Insert new integer induction variable.
  PHINode *NewPHI = PHINode::Create(Type::getInt32Ty(PH->getContext()),
                                    PH->getName()+".int", PH);
  NewPHI->addIncoming(ConstantInt::get(Type::getInt32Ty(PH->getContext()),
                                       newInitValue),
                      PH->getIncomingBlock(IncomingEdge));

  Value *NewAdd = BinaryOperator::CreateAdd(NewPHI,
                           ConstantInt::get(Type::getInt32Ty(PH->getContext()),
                                                             newIncrValue),
                                            Incr->getName()+".int", Incr);
  NewPHI->addIncoming(NewAdd, PH->getIncomingBlock(BackEdge));

  // The back edge is edge 1 of newPHI, whatever it may have been in the
  // original PHI.
  ConstantInt *NewEV = ConstantInt::get(Type::getInt32Ty(PH->getContext()),
                                        intEV);
  Value *LHS = (EVIndex == 1 ? NewPHI->getIncomingValue(1) : NewEV);
  Value *RHS = (EVIndex == 1 ? NewEV : NewPHI->getIncomingValue(1));
  ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(),
                                 NewPred, LHS, RHS, EC->getName());

  // In the following deletions, PH may become dead and may be deleted.
  // Use a WeakVH to observe whether this happens.
  WeakVH WeakPH = PH;

  // Delete old, floating point, exit comparison instruction.
  NewEC->takeName(EC);
  EC->replaceAllUsesWith(NewEC);
  RecursivelyDeleteTriviallyDeadInstructions(EC);

  // Delete old, floating point, increment instruction.
  Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
  RecursivelyDeleteTriviallyDeadInstructions(Incr);

  // Replace floating induction variable, if it isn't already deleted.
  // Give SIToFPInst preference over UIToFPInst because it is faster on
  // platforms that are widely used.
  if (WeakPH && !PH->use_empty()) {
    if (useSIToFPInst(*InitValue, *EV, newInitValue, intEV)) {
      SIToFPInst *Conv = new SIToFPInst(NewPHI, PH->getType(), "indvar.conv",
                                        PH->getParent()->getFirstNonPHI());
      PH->replaceAllUsesWith(Conv);
    } else {
      UIToFPInst *Conv = new UIToFPInst(NewPHI, PH->getType(), "indvar.conv",
                                        PH->getParent()->getFirstNonPHI());
      PH->replaceAllUsesWith(Conv);
    }
    RecursivelyDeleteTriviallyDeadInstructions(PH);
  }

  // Add a new IVUsers entry for the newly-created integer PHI.
  IU->AddUsersIfInteresting(NewPHI);
}
예제 #14
0
Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
                                               BinaryOperator &I) {
  bool isLeftShift = I.getOpcode() == Instruction::Shl;


  // See if we can propagate this shift into the input, this covers the trivial
  // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
  if (I.getOpcode() != Instruction::AShr &&
      CanEvaluateShifted(Op0, Op1->getZExtValue(), isLeftShift, *this)) {
    DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"
              " to eliminate shift:\n  IN: " << *Op0 << "\n  SH: " << I <<"\n");

    return ReplaceInstUsesWith(I,
                 GetShiftedValue(Op0, Op1->getZExtValue(), isLeftShift, *this));
  }


  // See if we can simplify any instructions used by the instruction whose sole
  // purpose is to compute bits we don't care about.
  uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();

  // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
  // a signed shift.
  //
  if (Op1->uge(TypeBits)) {
    if (I.getOpcode() != Instruction::AShr)
      return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
    // ashr i32 X, 32 --> ashr i32 X, 31
    I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
    return &I;
  }

  // ((X*C1) << C2) == (X * (C1 << C2))
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
    if (BO->getOpcode() == Instruction::Mul && isLeftShift)
      if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
        return BinaryOperator::CreateMul(BO->getOperand(0),
                                        ConstantExpr::getShl(BOOp, Op1));

  // Try to fold constant and into select arguments.
  if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
    if (Instruction *R = FoldOpIntoSelect(I, SI))
      return R;
  if (isa<PHINode>(Op0))
    if (Instruction *NV = FoldOpIntoPhi(I))
      return NV;

  // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
  if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
    Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
    // If 'shift2' is an ashr, we would have to get the sign bit into a funny
    // place.  Don't try to do this transformation in this case.  Also, we
    // require that the input operand is a shift-by-constant so that we have
    // confidence that the shifts will get folded together.  We could do this
    // xform in more cases, but it is unlikely to be profitable.
    if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
        isa<ConstantInt>(TrOp->getOperand(1))) {
      // Okay, we'll do this xform.  Make the shift of shift.
      Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
      // (shift2 (shift1 & 0x00FF), c2)
      Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());

      // For logical shifts, the truncation has the effect of making the high
      // part of the register be zeros.  Emulate this by inserting an AND to
      // clear the top bits as needed.  This 'and' will usually be zapped by
      // other xforms later if dead.
      unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
      unsigned DstSize = TI->getType()->getScalarSizeInBits();
      APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));

      // The mask we constructed says what the trunc would do if occurring
      // between the shifts.  We want to know the effect *after* the second
      // shift.  We know that it is a logical shift by a constant, so adjust the
      // mask as appropriate.
      if (I.getOpcode() == Instruction::Shl)
        MaskV <<= Op1->getZExtValue();
      else {
        assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
        MaskV = MaskV.lshr(Op1->getZExtValue());
      }

      // shift1 & 0x00FF
      Value *And = Builder->CreateAnd(NSh,
                                      ConstantInt::get(I.getContext(), MaskV),
                                      TI->getName());

      // Return the value truncated to the interesting size.
      return new TruncInst(And, I.getType());
    }
  }

  if (Op0->hasOneUse()) {
    if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
      // Turn ((X >> C) + Y) << C  ->  (X + (Y << C)) & (~0 << C)
      Value *V1, *V2;
      ConstantInt *CC;
      switch (Op0BO->getOpcode()) {
      default: break;
      case Instruction::Add:
      case Instruction::And:
      case Instruction::Or:
      case Instruction::Xor: {
        // These operators commute.
        // Turn (Y + (X >> C)) << C  ->  (X + (Y << C)) & (~0 << C)
        if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
            match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
                  m_Specific(Op1)))) {
          Value *YS =         // (Y << C)
            Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
          // (X + (Y << C))
          Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
                                          Op0BO->getOperand(1)->getName());
          uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
          return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
                     APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
        }

        // Turn (Y + ((X >> C) & CC)) << C  ->  ((X & (CC << C)) + (Y << C))
        Value *Op0BOOp1 = Op0BO->getOperand(1);
        if (isLeftShift && Op0BOOp1->hasOneUse() &&
            match(Op0BOOp1,
                  m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
                        m_ConstantInt(CC)))) {
          Value *YS =   // (Y << C)
            Builder->CreateShl(Op0BO->getOperand(0), Op1,
                                         Op0BO->getName());
          // X & (CC << C)
          Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
                                         V1->getName()+".mask");
          return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
        }
      }

      // FALL THROUGH.
      case Instruction::Sub: {
        // Turn ((X >> C) + Y) << C  ->  (X + (Y << C)) & (~0 << C)
        if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
            match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
                  m_Specific(Op1)))) {
          Value *YS =  // (Y << C)
            Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
          // (X + (Y << C))
          Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
                                          Op0BO->getOperand(0)->getName());
          uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
          return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
                     APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
        }

        // Turn (((X >> C)&CC) + Y) << C  ->  (X + (Y << C)) & (CC << C)
        if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
            match(Op0BO->getOperand(0),
                  m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))),
                        m_ConstantInt(CC))) && V2 == Op1) {
          Value *YS = // (Y << C)
            Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
          // X & (CC << C)
          Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
                                         V1->getName()+".mask");

          return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
        }

        break;
      }
      }


      // If the operand is an bitwise operator with a constant RHS, and the
      // shift is the only use, we can pull it out of the shift.
      if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
        bool isValid = true;     // Valid only for And, Or, Xor
        bool highBitSet = false; // Transform if high bit of constant set?

        switch (Op0BO->getOpcode()) {
        default: isValid = false; break;   // Do not perform transform!
        case Instruction::Add:
          isValid = isLeftShift;
          break;
        case Instruction::Or:
        case Instruction::Xor:
          highBitSet = false;
          break;
        case Instruction::And:
          highBitSet = true;
          break;
        }

        // If this is a signed shift right, and the high bit is modified
        // by the logical operation, do not perform the transformation.
        // The highBitSet boolean indicates the value of the high bit of
        // the constant which would cause it to be modified for this
        // operation.
        //
        if (isValid && I.getOpcode() == Instruction::AShr)
          isValid = Op0C->getValue()[TypeBits-1] == highBitSet;

        if (isValid) {
          Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);

          Value *NewShift =
            Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
          NewShift->takeName(Op0BO);

          return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
                                        NewRHS);
        }
      }
    }
  }

  // Find out if this is a shift of a shift by a constant.
  BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
  if (ShiftOp && !ShiftOp->isShift())
    ShiftOp = 0;

  if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {

    // This is a constant shift of a constant shift. Be careful about hiding
    // shl instructions behind bit masks. They are used to represent multiplies
    // by a constant, and it is important that simple arithmetic expressions
    // are still recognizable by scalar evolution.
    //
    // The transforms applied to shl are very similar to the transforms applied
    // to mul by constant. We can be more aggressive about optimizing right
    // shifts.
    //
    // Combinations of right and left shifts will still be optimized in
    // DAGCombine where scalar evolution no longer applies.

    ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
    uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
    uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
    assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
    if (ShiftAmt1 == 0) return 0;  // Will be simplified in the future.
    Value *X = ShiftOp->getOperand(0);

    IntegerType *Ty = cast<IntegerType>(I.getType());

    // Check for (X << c1) << c2  and  (X >> c1) >> c2
    if (I.getOpcode() == ShiftOp->getOpcode()) {
      uint32_t AmtSum = ShiftAmt1+ShiftAmt2;   // Fold into one big shift.
      // If this is oversized composite shift, then unsigned shifts get 0, ashr
      // saturates.
      if (AmtSum >= TypeBits) {
        if (I.getOpcode() != Instruction::AShr)
          return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
        AmtSum = TypeBits-1;  // Saturate to 31 for i32 ashr.
      }

      return BinaryOperator::Create(I.getOpcode(), X,
                                    ConstantInt::get(Ty, AmtSum));
    }

    if (ShiftAmt1 == ShiftAmt2) {
      // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
      if (I.getOpcode() == Instruction::LShr &&
          ShiftOp->getOpcode() == Instruction::Shl) {
        APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
        return BinaryOperator::CreateAnd(X,
                                        ConstantInt::get(I.getContext(), Mask));
      }
    } else if (ShiftAmt1 < ShiftAmt2) {
      uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;

      // (X >>?,exact C1) << C2 --> X << (C2-C1)
      // The inexact version is deferred to DAGCombine so we don't hide shl
      // behind a bit mask.
      if (I.getOpcode() == Instruction::Shl &&
          ShiftOp->getOpcode() != Instruction::Shl &&
          ShiftOp->isExact()) {
        assert(ShiftOp->getOpcode() == Instruction::LShr ||
               ShiftOp->getOpcode() == Instruction::AShr);
        ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
        BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
                                                        X, ShiftDiffCst);
        NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
        NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
        return NewShl;
      }

      // (X << C1) >>u C2  --> X >>u (C2-C1) & (-1 >> C2)
      if (I.getOpcode() == Instruction::LShr &&
          ShiftOp->getOpcode() == Instruction::Shl) {
        ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
        // (X <<nuw C1) >>u C2 --> X >>u (C2-C1)
        if (ShiftOp->hasNoUnsignedWrap()) {
          BinaryOperator *NewLShr = BinaryOperator::Create(Instruction::LShr,
                                                           X, ShiftDiffCst);
          NewLShr->setIsExact(I.isExact());
          return NewLShr;
        }
        Value *Shift = Builder->CreateLShr(X, ShiftDiffCst);

        APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
        return BinaryOperator::CreateAnd(Shift,
                                         ConstantInt::get(I.getContext(),Mask));
      }

      // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
      // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
      if (I.getOpcode() == Instruction::AShr &&
          ShiftOp->getOpcode() == Instruction::Shl) {
        if (ShiftOp->hasNoSignedWrap()) {
          // (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
          ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
          BinaryOperator *NewAShr = BinaryOperator::Create(Instruction::AShr,
                                                           X, ShiftDiffCst);
          NewAShr->setIsExact(I.isExact());
          return NewAShr;
        }
      }
    } else {
      assert(ShiftAmt2 < ShiftAmt1);
      uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;

      // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
      // The inexact version is deferred to DAGCombine so we don't hide shl
      // behind a bit mask.
      if (I.getOpcode() == Instruction::Shl &&
          ShiftOp->getOpcode() != Instruction::Shl &&
          ShiftOp->isExact()) {
        ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
        BinaryOperator *NewShr = BinaryOperator::Create(ShiftOp->getOpcode(),
                                                        X, ShiftDiffCst);
        NewShr->setIsExact(true);
        return NewShr;
      }

      // (X << C1) >>u C2  --> X << (C1-C2) & (-1 >> C2)
      if (I.getOpcode() == Instruction::LShr &&
          ShiftOp->getOpcode() == Instruction::Shl) {
        ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
        if (ShiftOp->hasNoUnsignedWrap()) {
          // (X <<nuw C1) >>u C2 --> X <<nuw (C1-C2)
          BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
                                                          X, ShiftDiffCst);
          NewShl->setHasNoUnsignedWrap(true);
          return NewShl;
        }
        Value *Shift = Builder->CreateShl(X, ShiftDiffCst);

        APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
        return BinaryOperator::CreateAnd(Shift,
                                         ConstantInt::get(I.getContext(),Mask));
      }

      // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
      // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
      if (I.getOpcode() == Instruction::AShr &&
          ShiftOp->getOpcode() == Instruction::Shl) {
        if (ShiftOp->hasNoSignedWrap()) {
          // (X <<nsw C1) >>s C2 --> X <<nsw (C1-C2)
          ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
          BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
                                                          X, ShiftDiffCst);
          NewShl->setHasNoSignedWrap(true);
          return NewShl;
        }
      }
    }
  }
  return 0;
}
예제 #15
0
void Lint::visitSub(BinaryOperator &I) {
    Assert1(!isa<UndefValue>(I.getOperand(0)) ||
            !isa<UndefValue>(I.getOperand(1)),
            "Undefined result: sub(undef, undef)", &I);
}
예제 #16
0
LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
  // See if we already have a value for this block.
  LVILatticeVal BBLV = getCachedEntryForBlock(BB);
  
  // If we've already computed this block's value, return it.
  if (!BBLV.isUndefined()) {
    DEBUG(dbgs() << "  reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n');
    return BBLV;
  }

  // Otherwise, this is the first time we're seeing this block.  Reset the
  // lattice value to overdefined, so that cycles will terminate and be
  // conservatively correct.
  BBLV.markOverdefined();
  Cache[BB] = BBLV;
  
  Instruction *BBI = dyn_cast<Instruction>(Val);
  if (BBI == 0 || BBI->getParent() != BB) {
    LVILatticeVal Result;  // Start Undefined.
    
    // If this is a pointer, and there's a load from that pointer in this BB,
    // then we know that the pointer can't be NULL.
    bool NotNull = false;
    if (Val->getType()->isPointerTy()) {
      for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
        LoadInst *L = dyn_cast<LoadInst>(BI);
        if (L && L->getPointerAddressSpace() == 0 &&
            L->getPointerOperand()->getUnderlyingObject() ==
              Val->getUnderlyingObject()) {
          NotNull = true;
          break;
        }
      }
    }
    
    unsigned NumPreds = 0;    
    // Loop over all of our predecessors, merging what we know from them into
    // result.
    for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
      Result.mergeIn(getEdgeValue(*PI, BB));
      
      // If we hit overdefined, exit early.  The BlockVals entry is already set
      // to overdefined.
      if (Result.isOverdefined()) {
        DEBUG(dbgs() << " compute BB '" << BB->getName()
                     << "' - overdefined because of pred.\n");
        // If we previously determined that this is a pointer that can't be null
        // then return that rather than giving up entirely.
        if (NotNull) {
          const PointerType *PTy = cast<PointerType>(Val->getType());
          Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
        }
        
        return Result;
      }
      ++NumPreds;
    }
    
    
    // If this is the entry block, we must be asking about an argument.  The
    // value is overdefined.
    if (NumPreds == 0 && BB == &BB->getParent()->front()) {
      assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
      Result.markOverdefined();
      return Result;
    }
    
    // Return the merged value, which is more precise than 'overdefined'.
    assert(!Result.isOverdefined());
    return Cache[BB] = Result;
  }
  
  // If this value is defined by an instruction in this block, we have to
  // process it here somehow or return overdefined.
  if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
    LVILatticeVal Result;  // Start Undefined.
    
    // Loop over all of our predecessors, merging what we know from them into
    // result.
    for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
      Value* PhiVal = PN->getIncomingValueForBlock(*PI);
      Result.mergeIn(Parent.getValueOnEdge(PhiVal, *PI, BB));
      
      // If we hit overdefined, exit early.  The BlockVals entry is already set
      // to overdefined.
      if (Result.isOverdefined()) {
        DEBUG(dbgs() << " compute BB '" << BB->getName()
                     << "' - overdefined because of pred.\n");
        return Result;
      }
    }
    
    // Return the merged value, which is more precise than 'overdefined'.
    assert(!Result.isOverdefined());
    return Cache[BB] = Result;
  }

  assert(Cache[BB].isOverdefined() && "Recursive query changed our cache?");

  // We can only analyze the definitions of certain classes of instructions
  // (integral binops and casts at the moment), so bail if this isn't one.
  LVILatticeVal Result;
  if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
     !BBI->getType()->isIntegerTy()) {
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");
    Result.markOverdefined();
    return Result;
  }
   
  // FIXME: We're currently limited to binops with a constant RHS.  This should
  // be improved.
  BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
  if (BO && !isa<ConstantInt>(BO->getOperand(1))) { 
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");

    Result.markOverdefined();
    return Result;
  }  

  // Figure out the range of the LHS.  If that fails, bail.
  LVILatticeVal LHSVal = Parent.getValueInBlock(BBI->getOperand(0), BB);
  if (!LHSVal.isConstantRange()) {
    Result.markOverdefined();
    return Result;
  }
  
  ConstantInt *RHS = 0;
  ConstantRange LHSRange = LHSVal.getConstantRange();
  ConstantRange RHSRange(1);
  const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
  if (isa<BinaryOperator>(BBI)) {
    RHS = dyn_cast<ConstantInt>(BBI->getOperand(1));
    if (!RHS) {
      Result.markOverdefined();
      return Result;
    }
    
    RHSRange = ConstantRange(RHS->getValue(), RHS->getValue()+1);
  }
      
  // NOTE: We're currently limited by the set of operations that ConstantRange
  // can evaluate symbolically.  Enhancing that set will allows us to analyze
  // more definitions.
  switch (BBI->getOpcode()) {
  case Instruction::Add:
    Result.markConstantRange(LHSRange.add(RHSRange));
    break;
  case Instruction::Sub:
    Result.markConstantRange(LHSRange.sub(RHSRange));
    break;
  case Instruction::Mul:
    Result.markConstantRange(LHSRange.multiply(RHSRange));
    break;
  case Instruction::UDiv:
    Result.markConstantRange(LHSRange.udiv(RHSRange));
    break;
  case Instruction::Shl:
    Result.markConstantRange(LHSRange.shl(RHSRange));
    break;
  case Instruction::LShr:
    Result.markConstantRange(LHSRange.lshr(RHSRange));
    break;
  case Instruction::Trunc:
    Result.markConstantRange(LHSRange.truncate(ResultTy->getBitWidth()));
    break;
  case Instruction::SExt:
    Result.markConstantRange(LHSRange.signExtend(ResultTy->getBitWidth()));
    break;
  case Instruction::ZExt:
    Result.markConstantRange(LHSRange.zeroExtend(ResultTy->getBitWidth()));
    break;
  case Instruction::BitCast:
    Result.markConstantRange(LHSRange);
    break;
  case Instruction::And:
    Result.markConstantRange(LHSRange.binaryAnd(RHSRange));
    break;
  case Instruction::Or:
    Result.markConstantRange(LHSRange.binaryOr(RHSRange));
    break;
  
  // Unhandled instructions are overdefined.
  default:
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");
    Result.markOverdefined();
    break;
  }
  
  return Cache[BB] = Result;
}
예제 #17
0
void Lint::visitShl(BinaryOperator &I) {
    if (ConstantInt *CI =
                dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
        Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
                "Undefined result: Shift count out of range", &I);
}
예제 #18
0
bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
  if (isa<Constant>(Val))
    return true;

  if (lookup(Val).count(BB)) {
    // If we have a cached value, use that.
    DEBUG(dbgs() << "  reuse BB '" << BB->getName()
                 << "' val=" << lookup(Val)[BB] << '\n');

    // Since we're reusing a cached value, we don't need to update the
    // OverDefinedCache. The cache will have been properly updated whenever the
    // cached value was inserted.
    return true;
  }

  // Hold off inserting this value into the Cache in case we have to return
  // false and come back later.
  LVILatticeVal Res;

  Instruction *BBI = dyn_cast<Instruction>(Val);
  if (!BBI || BBI->getParent() != BB) {
    if (!solveBlockValueNonLocal(Res, Val, BB))
      return false;
   insertResult(Val, BB, Res);
   return true;
  }

  if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
    if (!solveBlockValuePHINode(Res, PN, BB))
      return false;
    insertResult(Val, BB, Res);
    return true;
  }

  if (AllocaInst *AI = dyn_cast<AllocaInst>(BBI)) {
    Res = LVILatticeVal::getNot(ConstantPointerNull::get(AI->getType()));
    insertResult(Val, BB, Res);
    return true;
  }

  // We can only analyze the definitions of certain classes of instructions
  // (integral binops and casts at the moment), so bail if this isn't one.
  LVILatticeVal Result;
  if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
     !BBI->getType()->isIntegerTy()) {
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");
    Res.markOverdefined();
    insertResult(Val, BB, Res);
    return true;
  }

  // FIXME: We're currently limited to binops with a constant RHS.  This should
  // be improved.
  BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
  if (BO && !isa<ConstantInt>(BO->getOperand(1))) {
    DEBUG(dbgs() << " compute BB '" << BB->getName()
                 << "' - overdefined because inst def found.\n");

    Res.markOverdefined();
    insertResult(Val, BB, Res);
    return true;
  }

  if (!solveBlockValueConstantRange(Res, BBI, BB))
    return false;
  insertResult(Val, BB, Res);
  return true;
}