Esempio n. 1
0
void checkEraseAndIterators(SmallPtrSetImpl<int*> &S) {
  int buf[3];

  S.insert(&buf[0]);
  S.insert(&buf[1]);
  S.insert(&buf[2]);

  // Iterators must still be valid after erase() calls;
  auto B = S.begin();
  auto M = std::next(B);
  auto E = S.end();
  EXPECT_TRUE(*B == &buf[0] || *B == &buf[1] || *B == &buf[2]);
  EXPECT_TRUE(*M == &buf[0] || *M == &buf[1] || *M == &buf[2]);
  EXPECT_TRUE(*B != *M);
  int *Removable = *std::next(M);
  // No iterator points to Removable now.
  EXPECT_TRUE(Removable == &buf[0] || Removable == &buf[1] ||
              Removable == &buf[2]);
  EXPECT_TRUE(Removable != *B && Removable != *M);

  S.erase(Removable);

  // B,M,E iterators should still be valid
  EXPECT_EQ(B, S.begin());
  EXPECT_EQ(M, std::next(B));
  EXPECT_EQ(E, S.end());
  EXPECT_EQ(std::next(M), E);
}
Esempio n. 2
0
/// Collect all blocks from \p CurLoop which lie on all possible paths from
/// the header of \p CurLoop (inclusive) to BB (exclusive) into the set
/// \p Predecessors. If \p BB is the header, \p Predecessors will be empty.
static void collectTransitivePredecessors(
    const Loop *CurLoop, const BasicBlock *BB,
    SmallPtrSetImpl<const BasicBlock *> &Predecessors) {
  assert(Predecessors.empty() && "Garbage in predecessors set?");
  assert(CurLoop->contains(BB) && "Should only be called for loop blocks!");
  if (BB == CurLoop->getHeader())
    return;
  SmallVector<const BasicBlock *, 4> WorkList;
  for (auto *Pred : predecessors(BB)) {
    Predecessors.insert(Pred);
    WorkList.push_back(Pred);
  }
  while (!WorkList.empty()) {
    auto *Pred = WorkList.pop_back_val();
    assert(CurLoop->contains(Pred) && "Should only reach loop blocks!");
    // We are not interested in backedges and we don't want to leave loop.
    if (Pred == CurLoop->getHeader())
      continue;
    // TODO: If BB lies in an inner loop of CurLoop, this will traverse over all
    // blocks of this inner loop, even those that are always executed AFTER the
    // BB. It may make our analysis more conservative than it could be, see test
    // @nested and @nested_no_throw in test/Analysis/MustExecute/loop-header.ll.
    // We can ignore backedge of all loops containing BB to get a sligtly more
    // optimistic result.
    for (auto *PredPred : predecessors(Pred))
      if (Predecessors.insert(PredPred).second)
        WorkList.push_back(PredPred);
  }
}
Esempio n. 3
0
void LTOCodeGenerator::
applyRestriction(GlobalValue &GV,
                 ArrayRef<StringRef> Libcalls,
                 std::vector<const char*> &MustPreserveList,
                 SmallPtrSetImpl<GlobalValue*> &AsmUsed,
                 Mangler &Mangler) {
  // There are no restrictions to apply to declarations.
  if (GV.isDeclaration())
    return;

  // There is nothing more restrictive than private linkage.
  if (GV.hasPrivateLinkage())
    return;

  SmallString<64> Buffer;
  TargetMach->getNameWithPrefix(Buffer, &GV, Mangler);

  if (MustPreserveSymbols.count(Buffer))
    MustPreserveList.push_back(GV.getName().data());
  if (AsmUndefinedRefs.count(Buffer))
    AsmUsed.insert(&GV);

  // Conservatively append user-supplied runtime library functions to
  // llvm.compiler.used.  These could be internalized and deleted by
  // optimizations like -globalopt, causing problems when later optimizations
  // add new library calls (e.g., llvm.memset => memset and printf => puts).
  // Leave it to the linker to remove any dead code (e.g. with -dead_strip).
  if (isa<Function>(GV) &&
      std::binary_search(Libcalls.begin(), Libcalls.end(), GV.getName()))
    AsmUsed.insert(&GV);
}
Esempio n. 4
0
/// Walk up the CFG from StartPos (which is in StartBB) and find local and
/// non-local dependencies on Arg.
///
/// TODO: Cache results?
void
llvm::objcarc::FindDependencies(DependenceKind Flavor,
                                const Value *Arg,
                                BasicBlock *StartBB, Instruction *StartInst,
                                SmallPtrSetImpl<Instruction *> &DependingInsts,
                                SmallPtrSetImpl<const BasicBlock *> &Visited,
                                ProvenanceAnalysis &PA) {
  BasicBlock::iterator StartPos = StartInst;

  SmallVector<std::pair<BasicBlock *, BasicBlock::iterator>, 4> Worklist;
  Worklist.push_back(std::make_pair(StartBB, StartPos));
  do {
    std::pair<BasicBlock *, BasicBlock::iterator> Pair =
      Worklist.pop_back_val();
    BasicBlock *LocalStartBB = Pair.first;
    BasicBlock::iterator LocalStartPos = Pair.second;
    BasicBlock::iterator StartBBBegin = LocalStartBB->begin();
    for (;;) {
      if (LocalStartPos == StartBBBegin) {
        pred_iterator PI(LocalStartBB), PE(LocalStartBB, false);
        if (PI == PE)
          // If we've reached the function entry, produce a null dependence.
          DependingInsts.insert(nullptr);
        else
          // Add the predecessors to the worklist.
          do {
            BasicBlock *PredBB = *PI;
            if (Visited.insert(PredBB))
              Worklist.push_back(std::make_pair(PredBB, PredBB->end()));
          } while (++PI != PE);
        break;
      }

      Instruction *Inst = --LocalStartPos;
      if (Depends(Flavor, Inst, Arg, PA)) {
        DependingInsts.insert(Inst);
        break;
      }
    }
  } while (!Worklist.empty());

  // Determine whether the original StartBB post-dominates all of the blocks we
  // visited. If not, insert a sentinal indicating that most optimizations are
  // not safe.
  for (SmallPtrSet<const BasicBlock *, 4>::const_iterator I = Visited.begin(),
       E = Visited.end(); I != E; ++I) {
    const BasicBlock *BB = *I;
    if (BB == StartBB)
      continue;
    const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
    for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) {
      const BasicBlock *Succ = *SI;
      if (Succ != StartBB && !Visited.count(Succ)) {
        DependingInsts.insert(reinterpret_cast<Instruction *>(-1));
        return;
      }
    }
  }
}
Esempio n. 5
0
/// MarkBlocksLiveIn - Insert BB and all of its predecessors into LiveBBs until
/// we reach blocks we've already seen.
static void MarkBlocksLiveIn(BasicBlock *BB,
                             SmallPtrSetImpl<BasicBlock *> &LiveBBs) {
  if (!LiveBBs.insert(BB).second)
    return; // already been here.

  df_iterator_default_set<BasicBlock*> Visited;

  for (BasicBlock *B : inverse_depth_first_ext(BB, Visited))
    LiveBBs.insert(B);

}
Esempio n. 6
0
/// Find values that are marked as llvm.used.
static void findUsedValues(GlobalVariable *LLVMUsed,
                           SmallPtrSetImpl<const GlobalValue*> &UsedValues) {
    if (!LLVMUsed) return;
    UsedValues.insert(LLVMUsed);

    ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());

    for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i)
        if (GlobalValue *GV =
                    dyn_cast<GlobalValue>(Inits->getOperand(i)->stripPointerCasts()))
            UsedValues.insert(GV);
}
void AMDGPUAlwaysInline::recursivelyVisitUsers(
  GlobalValue &GV,
  SmallPtrSetImpl<Function *> &FuncsToAlwaysInline) {
  SmallVector<User *, 16> Stack;

  SmallPtrSet<const Value *, 8> Visited;

  for (User *U : GV.users())
    Stack.push_back(U);

  while (!Stack.empty()) {
    User *U = Stack.pop_back_val();
    if (!Visited.insert(U).second)
      continue;

    if (Instruction *I = dyn_cast<Instruction>(U)) {
      Function *F = I->getParent()->getParent();
      if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) {
        FuncsToAlwaysInline.insert(F);
        Stack.push_back(F);
      }

      // No need to look at further users, but we do need to inline any callers.
      continue;
    }

    for (User *UU : U->users())
      Stack.push_back(UU);
  }
}
Esempio n. 8
0
bool RecurrenceDescriptor::getSourceExtensionKind(
    Instruction *Start, Instruction *Exit, Type *RT, bool &IsSigned,
    SmallPtrSetImpl<Instruction *> &Visited,
    SmallPtrSetImpl<Instruction *> &CI) {

  SmallVector<Instruction *, 8> Worklist;
  bool FoundOneOperand = false;
  unsigned DstSize = RT->getPrimitiveSizeInBits();
  Worklist.push_back(Exit);

  // Traverse the instructions in the reduction expression, beginning with the
  // exit value.
  while (!Worklist.empty()) {
    Instruction *I = Worklist.pop_back_val();
    for (Use &U : I->operands()) {

      // Terminate the traversal if the operand is not an instruction, or we
      // reach the starting value.
      Instruction *J = dyn_cast<Instruction>(U.get());
      if (!J || J == Start)
        continue;

      // Otherwise, investigate the operation if it is also in the expression.
      if (Visited.count(J)) {
        Worklist.push_back(J);
        continue;
      }

      // If the operand is not in Visited, it is not a reduction operation, but
      // it does feed into one. Make sure it is either a single-use sign- or
      // zero-extend instruction.
      CastInst *Cast = dyn_cast<CastInst>(J);
      bool IsSExtInst = isa<SExtInst>(J);
      if (!Cast || !Cast->hasOneUse() || !(isa<ZExtInst>(J) || IsSExtInst))
        return false;

      // Ensure the source type of the extend is no larger than the reduction
      // type. It is not necessary for the types to be identical.
      unsigned SrcSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
      if (SrcSize > DstSize)
        return false;

      // Furthermore, ensure that all such extends are of the same kind.
      if (FoundOneOperand) {
        if (IsSigned != IsSExtInst)
          return false;
      } else {
        FoundOneOperand = true;
        IsSigned = IsSExtInst;
      }

      // Lastly, if the source type of the extend matches the reduction type,
      // add the extend to CI so that we can avoid accounting for it in the
      // cost model.
      if (SrcSize == DstSize)
        CI.insert(Cast);
    }
  }
  return true;
}
Esempio n. 9
0
static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA,
                         SmallPtrSetImpl<Instruction *> &Stores) {

  if (Inst->mayWriteToMemory()) {
    Stores.insert(Inst);
    return false;
  }

  if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
    MemoryLocation Loc = MemoryLocation::get(L);
    for (Instruction *S : Stores)
      if (isModSet(AA.getModRefInfo(S, Loc)))
        return false;
  }

  if (Inst->isTerminator() || isa<PHINode>(Inst) || Inst->isEHPad() ||
      Inst->mayThrow())
    return false;

  if (auto *Call = dyn_cast<CallBase>(Inst)) {
    // Convergent operations cannot be made control-dependent on additional
    // values.
    if (Call->hasFnAttr(Attribute::Convergent))
      return false;

    for (Instruction *S : Stores)
      if (isModSet(AA.getModRefInfo(S, Call)))
        return false;
  }

  return true;
}
Esempio n. 10
0
static void collectMDInDomain(const MDNode *List, const MDNode *Domain,
                              SmallPtrSetImpl<const MDNode *> &Nodes) {
  for (const MDOperand &MDOp : List->operands())
    if (const MDNode *MD = dyn_cast<MDNode>(MDOp))
      if (AliasScopeNode(MD).getDomain() == Domain)
        Nodes.insert(MD);
}
Esempio n. 11
0
static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
                         SmallPtrSetImpl<Instruction *> &Stores) {

  if (Inst->mayWriteToMemory()) {
    Stores.insert(Inst);
    return false;
  }

  if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
    MemoryLocation Loc = MemoryLocation::get(L);
    for (Instruction *S : Stores)
      if (AA->getModRefInfo(S, Loc) & MRI_Mod)
        return false;
  }

  if (isa<TerminatorInst>(Inst) || isa<PHINode>(Inst))
    return false;

  // Convergent operations cannot be made control-dependent on additional
  // values.
  if (auto CS = CallSite(Inst)) {
    if (CS.hasFnAttr(Attribute::Convergent))
      return false;
  }

  return true;
}
Esempio n. 12
0
/// Collect cast instructions that can be ignored in the vectorizer's cost
/// model, given a reduction exit value and the minimal type in which the
/// reduction can be represented.
static void collectCastsToIgnore(Loop *TheLoop, Instruction *Exit,
                                 Type *RecurrenceType,
                                 SmallPtrSetImpl<Instruction *> &Casts) {

  SmallVector<Instruction *, 8> Worklist;
  SmallPtrSet<Instruction *, 8> Visited;
  Worklist.push_back(Exit);

  while (!Worklist.empty()) {
    Instruction *Val = Worklist.pop_back_val();
    Visited.insert(Val);
    if (auto *Cast = dyn_cast<CastInst>(Val))
      if (Cast->getSrcTy() == RecurrenceType) {
        // If the source type of a cast instruction is equal to the recurrence
        // type, it will be eliminated, and should be ignored in the vectorizer
        // cost model.
        Casts.insert(Cast);
        continue;
      }

    // Add all operands to the work list if they are loop-varying values that
    // we haven't yet visited.
    for (Value *O : cast<User>(Val)->operands())
      if (auto *I = dyn_cast<Instruction>(O))
        if (TheLoop->contains(I) && !Visited.count(I))
          Worklist.push_back(I);
  }
}
Esempio n. 13
0
void ScopedNoAliasAAResult::collectMDInDomain(
    const MDNode *List, const MDNode *Domain,
    SmallPtrSetImpl<const MDNode *> &Nodes) const {
  for (unsigned i = 0, ie = List->getNumOperands(); i != ie; ++i)
    if (const MDNode *MD = dyn_cast<MDNode>(List->getOperand(i)))
      if (AliasScopeNode(MD).getDomain() == Domain)
        Nodes.insert(MD);
}
Esempio n. 14
0
/// getMachineBasicBlocks - Populate given set using machine basic blocks which
/// have machine instructions that belong to lexical scope identified by
/// DebugLoc.
void LexicalScopes::getMachineBasicBlocks(
    const DILocation *DL, SmallPtrSetImpl<const MachineBasicBlock *> &MBBs) {
  MBBs.clear();
  LexicalScope *Scope = getOrCreateLexicalScope(DL);
  if (!Scope)
    return;

  if (Scope == CurrentFnLexicalScope) {
    for (const auto &MBB : *MF)
      MBBs.insert(&MBB);
    return;
  }

  SmallVectorImpl<InsnRange> &InsnRanges = Scope->getRanges();
  for (auto &R : InsnRanges)
    MBBs.insert(R.first->getParent());
}
Esempio n. 15
0
    Action walkToTypePre(Type ty) override {
      if (ty->is<DependentMemberType>())
        return Action::SkipChildren;

      if (auto typeVar = ty->getAs<TypeVariableType>())
        typeVars.insert(typeVar);
      return Action::Continue;
    }
Esempio n. 16
0
/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
/// we reach blocks we've already seen.
static void MarkBlocksLiveIn(BasicBlock *BB,
                             SmallPtrSetImpl<BasicBlock *> &LiveBBs) {
  if (!LiveBBs.insert(BB).second)
    return; // already been here.

  for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
    MarkBlocksLiveIn(*PI, LiveBBs);
}
Esempio n. 17
0
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
                            SmallPtrSetImpl<Constant *> &SimpleConstants,
                            const DataLayout &DL) {
  // If we already checked this constant, we win.
  if (!SimpleConstants.insert(C).second)
    return true;
  // Check the constant.
  return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
}
Esempio n. 18
0
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
    const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
    const Instruction *CtxI, const DominatorTree *DT,
    SmallPtrSetImpl<const Value *> &Visited) {
  // Note that it is not safe to speculate into a malloc'd region because
  // malloc may return null.

  // bitcast instructions are no-ops as far as dereferenceability is concerned.
  if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
    return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  bool CheckForNonNull = false;
  APInt KnownDerefBytes(Size.getBitWidth(),
                        V->getPointerDereferenceableBytes(DL, CheckForNonNull));
  if (KnownDerefBytes.getBoolValue()) {
    if (KnownDerefBytes.uge(Size))
      if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
        return isAligned(V, Align, DL);
  }

  // For GEPs, determine if the indexing lands within the allocated object.
  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    const Value *Base = GEP->getPointerOperand();

    APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
    if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
        !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
      return false;

    // If the base pointer is dereferenceable for Offset+Size bytes, then the
    // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
    // pointer is aligned to Align bytes, and the Offset is divisible by Align
    // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
    // aligned to Align bytes.

    return Visited.insert(Base).second &&
           isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
                                              CtxI, DT, Visited);
  }

  // For gc.relocate, look through relocations
  if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
    return isDereferenceableAndAlignedPointer(
        RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);

  if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
    return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  // If we don't know, assume the worst.
  return false;
}
Esempio n. 19
0
/// getMachineBasicBlocks - Populate given set using machine basic blocks which
/// have machine instructions that belong to lexical scope identified by
/// DebugLoc.
void LexicalScopes::getMachineBasicBlocks(
    const MDLocation *DL, SmallPtrSetImpl<const MachineBasicBlock *> &MBBs) {
  MBBs.clear();
  LexicalScope *Scope = getOrCreateLexicalScope(DL);
  if (!Scope)
    return;

  if (Scope == CurrentFnLexicalScope) {
    for (const auto &MBB : *MF)
      MBBs.insert(&MBB);
    return;
  }

  SmallVectorImpl<InsnRange> &InsnRanges = Scope->getRanges();
  for (SmallVectorImpl<InsnRange>::iterator I = InsnRanges.begin(),
                                            E = InsnRanges.end();
       I != E; ++I) {
    InsnRange &R = *I;
    MBBs.insert(R.first->getParent());
  }
}
Esempio n. 20
0
/// Compute the set of GlobalValue that depends from V.
/// The recursion stops as soon as a GlobalValue is met.
void GlobalDCEPass::ComputeDependencies(Value *V,
                                        SmallPtrSetImpl<GlobalValue *> &Deps) {
  if (auto *I = dyn_cast<Instruction>(V)) {
    Function *Parent = I->getParent()->getParent();
    Deps.insert(Parent);
  } else if (auto *GV = dyn_cast<GlobalValue>(V)) {
    Deps.insert(GV);
  } else if (auto *CE = dyn_cast<Constant>(V)) {
    // Avoid walking the whole tree of a big ConstantExprs multiple times.
    auto Where = ConstantDependenciesCache.find(CE);
    if (Where != ConstantDependenciesCache.end()) {
      auto const &K = Where->second;
      Deps.insert(K.begin(), K.end());
    } else {
      SmallPtrSetImpl<GlobalValue *> &LocalDeps = ConstantDependenciesCache[CE];
      for (User *CEUser : CE->users())
        ComputeDependencies(CEUser, LocalDeps);
      Deps.insert(LocalDeps.begin(), LocalDeps.end());
    }
  }
}
Esempio n. 21
0
static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
                     Constant *C) {
  if (!Cache.insert(Expr).second)
    return false;

  for (auto &O : Expr->operands()) {
    if (O == C)
      return true;
    auto *CE = dyn_cast<ConstantExpr>(O);
    if (!CE)
      continue;
    if (contains(Cache, CE, C))
      return true;
  }
  return false;
}
Esempio n. 22
0
/// Visit each register class belonging to the given register bank.
///
/// A class belongs to the bank iff any of these apply:
/// * It is explicitly specified
/// * It is a subclass of a class that is a member.
/// * It is a class containing subregisters of the registers of a class that
///   is a member. This is known as a subreg-class.
///
/// This function must be called for each explicitly specified register class.
///
/// \param RC The register class to search.
/// \param Kind A debug string containing the path the visitor took to reach RC.
/// \param VisitFn The action to take for each class visited. It may be called
///                multiple times for a given class if there are multiple paths
///                to the class.
static void visitRegisterBankClasses(
    CodeGenRegBank &RegisterClassHierarchy, const CodeGenRegisterClass *RC,
    const Twine Kind,
    std::function<void(const CodeGenRegisterClass *, StringRef)> VisitFn,
    SmallPtrSetImpl<const CodeGenRegisterClass *> &VisitedRCs) {

  // Make sure we only visit each class once to avoid infinite loops.
  if (VisitedRCs.count(RC))
    return;
  VisitedRCs.insert(RC);

  // Visit each explicitly named class.
  VisitFn(RC, Kind.str());

  for (const auto &PossibleSubclass : RegisterClassHierarchy.getRegClasses()) {
    std::string TmpKind =
        (Twine(Kind) + " (" + PossibleSubclass.getName() + ")").str();

    // Visit each subclass of an explicitly named class.
    if (RC != &PossibleSubclass && RC->hasSubClass(&PossibleSubclass))
      visitRegisterBankClasses(RegisterClassHierarchy, &PossibleSubclass,
                               TmpKind + " " + RC->getName() + " subclass",
                               VisitFn, VisitedRCs);

    // Visit each class that contains only subregisters of RC with a common
    // subregister-index.
    //
    // More precisely, PossibleSubclass is a subreg-class iff Reg:SubIdx is in
    // PossibleSubclass for all registers Reg from RC using any
    // subregister-index SubReg
    for (const auto &SubIdx : RegisterClassHierarchy.getSubRegIndices()) {
      BitVector BV(RegisterClassHierarchy.getRegClasses().size());
      PossibleSubclass.getSuperRegClasses(&SubIdx, BV);
      if (BV.test(RC->EnumValue)) {
        std::string TmpKind2 = (Twine(TmpKind) + " " + RC->getName() +
                                " class-with-subregs: " + RC->getName())
                                   .str();
        VisitFn(&PossibleSubclass, TmpKind2);
      }
    }
  }
}
Esempio n. 23
0
bool GuardWideningImpl::isAvailableAt(Value *V, Instruction *Loc,
                                      SmallPtrSetImpl<Instruction *> &Visited) {
  auto *Inst = dyn_cast<Instruction>(V);
  if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))
    return true;

  if (!isSafeToSpeculativelyExecute(Inst, Loc, &DT) ||
      Inst->mayReadFromMemory())
    return false;

  Visited.insert(Inst);

  // We only want to go _up_ the dominance chain when recursing.
  assert(!isa<PHINode>(Loc) &&
         "PHIs should return false for isSafeToSpeculativelyExecute");
  assert(DT.isReachableFromEntry(Inst->getParent()) &&
         "We did a DFS from the block entry!");
  return all_of(Inst->operands(),
                [&](Value *Op) { return isAvailableAt(Op, Loc, Visited); });
}
Esempio n. 24
0
static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
                         SmallPtrSetImpl<Instruction *> &Stores) {

  if (Inst->mayWriteToMemory()) {
    Stores.insert(Inst);
    return false;
  }

  if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
    AliasAnalysis::Location Loc = AA->getLocation(L);
    for (Instruction *S : Stores)
      if (AA->getModRefInfo(S, Loc) & AliasAnalysis::Mod)
        return false;
  }

  if (isa<TerminatorInst>(Inst) || isa<PHINode>(Inst))
    return false;

  return true;
}
Esempio n. 25
0
bool GuardWideningImpl::parseRangeChecks(
    Value *CheckCond, SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
    SmallPtrSetImpl<Value *> &Visited) {
  if (!Visited.insert(CheckCond).second)
    return true;

  using namespace llvm::PatternMatch;

  {
    Value *AndLHS, *AndRHS;
    if (match(CheckCond, m_And(m_Value(AndLHS), m_Value(AndRHS))))
      return parseRangeChecks(AndLHS, Checks) &&
             parseRangeChecks(AndRHS, Checks);
  }

  auto *IC = dyn_cast<ICmpInst>(CheckCond);
  if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||
      (IC->getPredicate() != ICmpInst::ICMP_ULT &&
       IC->getPredicate() != ICmpInst::ICMP_UGT))
    return false;

  Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);
  if (IC->getPredicate() == ICmpInst::ICMP_UGT)
    std::swap(CmpLHS, CmpRHS);

  auto &DL = IC->getModule()->getDataLayout();

  GuardWideningImpl::RangeCheck Check(
      CmpLHS, cast<ConstantInt>(ConstantInt::getNullValue(CmpRHS->getType())),
      CmpRHS, IC);

  if (!isKnownNonNegative(Check.getLength(), DL))
    return false;

  // What we have in \c Check now is a correct interpretation of \p CheckCond.
  // Try to see if we can move some constant offsets into the \c Offset field.

  bool Changed;
  auto &Ctx = CheckCond->getContext();

  do {
    Value *OpLHS;
    ConstantInt *OpRHS;
    Changed = false;

#ifndef NDEBUG
    auto *BaseInst = dyn_cast<Instruction>(Check.getBase());
    assert((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) &&
           "Unreachable instruction?");
#endif

    if (match(Check.getBase(), m_Add(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
      Check.setBase(OpLHS);
      APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
      Check.setOffset(ConstantInt::get(Ctx, NewOffset));
      Changed = true;
    } else if (match(Check.getBase(),
                     m_Or(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
      unsigned BitWidth = OpLHS->getType()->getScalarSizeInBits();
      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
      computeKnownBits(OpLHS, KnownZero, KnownOne, DL);
      if ((OpRHS->getValue() & KnownZero) == OpRHS->getValue()) {
        Check.setBase(OpLHS);
        APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
        Check.setOffset(ConstantInt::get(Ctx, NewOffset));
        Changed = true;
      }
    }
  } while (Changed);

  Checks.push_back(Check);
  return true;
}
Esempio n. 26
0
// Compute the unlikely successors to the block BB in the loop L, specifically
// those that are unlikely because this is a loop, and add them to the
// UnlikelyBlocks set.
static void
computeUnlikelySuccessors(const BasicBlock *BB, Loop *L,
                          SmallPtrSetImpl<const BasicBlock*> &UnlikelyBlocks) {
  // Sometimes in a loop we have a branch whose condition is made false by
  // taking it. This is typically something like
  //  int n = 0;
  //  while (...) {
  //    if (++n >= MAX) {
  //      n = 0;
  //    }
  //  }
  // In this sort of situation taking the branch means that at the very least it
  // won't be taken again in the next iteration of the loop, so we should
  // consider it less likely than a typical branch.
  //
  // We detect this by looking back through the graph of PHI nodes that sets the
  // value that the condition depends on, and seeing if we can reach a successor
  // block which can be determined to make the condition false.
  //
  // FIXME: We currently consider unlikely blocks to be half as likely as other
  // blocks, but if we consider the example above the likelyhood is actually
  // 1/MAX. We could therefore be more precise in how unlikely we consider
  // blocks to be, but it would require more careful examination of the form
  // of the comparison expression.
  const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
  if (!BI || !BI->isConditional())
    return;

  // Check if the branch is based on an instruction compared with a constant
  CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
  if (!CI || !isa<Instruction>(CI->getOperand(0)) ||
      !isa<Constant>(CI->getOperand(1)))
    return;

  // Either the instruction must be a PHI, or a chain of operations involving
  // constants that ends in a PHI which we can then collapse into a single value
  // if the PHI value is known.
  Instruction *CmpLHS = dyn_cast<Instruction>(CI->getOperand(0));
  PHINode *CmpPHI = dyn_cast<PHINode>(CmpLHS);
  Constant *CmpConst = dyn_cast<Constant>(CI->getOperand(1));
  // Collect the instructions until we hit a PHI
  SmallVector<BinaryOperator *, 1> InstChain;
  while (!CmpPHI && CmpLHS && isa<BinaryOperator>(CmpLHS) &&
         isa<Constant>(CmpLHS->getOperand(1))) {
    // Stop if the chain extends outside of the loop
    if (!L->contains(CmpLHS))
      return;
    InstChain.push_back(cast<BinaryOperator>(CmpLHS));
    CmpLHS = dyn_cast<Instruction>(CmpLHS->getOperand(0));
    if (CmpLHS)
      CmpPHI = dyn_cast<PHINode>(CmpLHS);
  }
  if (!CmpPHI || !L->contains(CmpPHI))
    return;

  // Trace the phi node to find all values that come from successors of BB
  SmallPtrSet<PHINode*, 8> VisitedInsts;
  SmallVector<PHINode*, 8> WorkList;
  WorkList.push_back(CmpPHI);
  VisitedInsts.insert(CmpPHI);
  while (!WorkList.empty()) {
    PHINode *P = WorkList.back();
    WorkList.pop_back();
    for (BasicBlock *B : P->blocks()) {
      // Skip blocks that aren't part of the loop
      if (!L->contains(B))
        continue;
      Value *V = P->getIncomingValueForBlock(B);
      // If the source is a PHI add it to the work list if we haven't
      // already visited it.
      if (PHINode *PN = dyn_cast<PHINode>(V)) {
        if (VisitedInsts.insert(PN).second)
          WorkList.push_back(PN);
        continue;
      }
      // If this incoming value is a constant and B is a successor of BB, then
      // we can constant-evaluate the compare to see if it makes the branch be
      // taken or not.
      Constant *CmpLHSConst = dyn_cast<Constant>(V);
      if (!CmpLHSConst ||
          std::find(succ_begin(BB), succ_end(BB), B) == succ_end(BB))
        continue;
      // First collapse InstChain
      for (Instruction *I : llvm::reverse(InstChain)) {
        CmpLHSConst = ConstantExpr::get(I->getOpcode(), CmpLHSConst,
                                        cast<Constant>(I->getOperand(1)), true);
        if (!CmpLHSConst)
          break;
      }
      if (!CmpLHSConst)
        continue;
      // Now constant-evaluate the compare
      Constant *Result = ConstantExpr::getCompare(CI->getPredicate(),
                                                  CmpLHSConst, CmpConst, true);
      // If the result means we don't branch to the block then that block is
      // unlikely.
      if (Result &&
          ((Result->isZeroValue() && B == BI->getSuccessor(0)) ||
           (Result->isOneValue() && B == BI->getSuccessor(1))))
        UnlikelyBlocks.insert(B);
    }
  }
}
Esempio n. 27
0
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
    const Value *V, unsigned Align, const DataLayout &DL,
    const Instruction *CtxI, const DominatorTree *DT,
    const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
    // Note that it is not safe to speculate into a malloc'd region because
    // malloc may return null.

    // These are obviously ok if aligned.
    if (isa<AllocaInst>(V))
        return isAligned(V, Align, DL);

    // It's not always safe to follow a bitcast, for example:
    //   bitcast i8* (alloca i8) to i32*
    // would result in a 4-byte load from a 1-byte alloca. However,
    // if we're casting from a pointer from a type of larger size
    // to a type of smaller size (or the same size), and the alignment
    // is at least as large as for the resulting pointer type, then
    // we can look through the bitcast.
    if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
        Type *STy = BC->getSrcTy()->getPointerElementType(),
              *DTy = BC->getDestTy()->getPointerElementType();
        if (STy->isSized() && DTy->isSized() &&
                (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
                (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
            return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
                    CtxI, DT, TLI, Visited);
    }

    // Global variables which can't collapse to null are ok.
    if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
        if (!GV->hasExternalWeakLinkage())
            return isAligned(V, Align, DL);

    // byval arguments are okay.
    if (const Argument *A = dyn_cast<Argument>(V))
        if (A->hasByValAttr())
            return isAligned(V, Align, DL);

    if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
        return isAligned(V, Align, DL);

    // For GEPs, determine if the indexing lands within the allocated object.
    if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
        Type *Ty = GEP->getResultElementType();
        const Value *Base = GEP->getPointerOperand();

        // Conservatively require that the base pointer be fully dereferenceable
        // and aligned.
        if (!Visited.insert(Base).second)
            return false;
        if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
                                                Visited))
            return false;

        APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
        if (!GEP->accumulateConstantOffset(DL, Offset))
            return false;

        // Check if the load is within the bounds of the underlying object
        // and offset is aligned.
        uint64_t LoadSize = DL.getTypeStoreSize(Ty);
        Type *BaseType = GEP->getSourceElementType();
        assert(isPowerOf2_32(Align) && "must be a power of 2!");
        return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
               !(Offset & APInt(Offset.getBitWidth(), Align-1));
    }

    // For gc.relocate, look through relocations
    if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
        return isDereferenceableAndAlignedPointer(
                   RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);

    if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
        return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
                CtxI, DT, TLI, Visited);

    // If we don't know, assume the worst.
    return false;
}
Esempio n. 28
0
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
    const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
    const Instruction *CtxI, const DominatorTree *DT,
    SmallPtrSetImpl<const Value *> &Visited) {
  // Already visited?  Bail out, we've likely hit unreachable code.
  if (!Visited.insert(V).second)
    return false;

  // Note that it is not safe to speculate into a malloc'd region because
  // malloc may return null.

  // bitcast instructions are no-ops as far as dereferenceability is concerned.
  if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
    return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  bool CheckForNonNull = false;
  APInt KnownDerefBytes(Size.getBitWidth(),
                        V->getPointerDereferenceableBytes(DL, CheckForNonNull));
  if (KnownDerefBytes.getBoolValue()) {
    if (KnownDerefBytes.uge(Size))
      if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT))
        return isAligned(V, Align, DL);
  }

  // For GEPs, determine if the indexing lands within the allocated object.
  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    const Value *Base = GEP->getPointerOperand();

    APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
    if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
        !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
      return false;

    // If the base pointer is dereferenceable for Offset+Size bytes, then the
    // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
    // pointer is aligned to Align bytes, and the Offset is divisible by Align
    // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
    // aligned to Align bytes.

    // Offset and Size may have different bit widths if we have visited an
    // addrspacecast, so we can't do arithmetic directly on the APInt values.
    return isDereferenceableAndAlignedPointer(
        Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()),
        DL, CtxI, DT, Visited);
  }

  // For gc.relocate, look through relocations
  if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
    return isDereferenceableAndAlignedPointer(
        RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);

  if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
    return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
                                              DL, CtxI, DT, Visited);

  if (auto CS = ImmutableCallSite(V))
    if (auto *RP = getArgumentAliasingToReturnedPointer(CS))
      return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
                                                Visited);

  // If we don't know, assume the worst.
  return false;
}
Esempio n. 29
0
/// isDereferenceablePointer - Test if this value is always a pointer to
/// allocated and suitably aligned memory for a simple load or store.
static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
                                     SmallPtrSetImpl<const Value *> &Visited) {
  // Note that it is not safe to speculate into a malloc'd region because
  // malloc may return null.

  // These are obviously ok.
  if (isa<AllocaInst>(V)) return true;

  // It's not always safe to follow a bitcast, for example:
  //   bitcast i8* (alloca i8) to i32*
  // would result in a 4-byte load from a 1-byte alloca. However,
  // if we're casting from a pointer from a type of larger size
  // to a type of smaller size (or the same size), and the alignment
  // is at least as large as for the resulting pointer type, then
  // we can look through the bitcast.
  if (DL)
    if (const BitCastInst* BC = dyn_cast<BitCastInst>(V)) {
      Type *STy = BC->getSrcTy()->getPointerElementType(),
           *DTy = BC->getDestTy()->getPointerElementType();
      if (STy->isSized() && DTy->isSized() &&
          (DL->getTypeStoreSize(STy) >=
           DL->getTypeStoreSize(DTy)) &&
          (DL->getABITypeAlignment(STy) >=
           DL->getABITypeAlignment(DTy)))
        return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
    }

  // Global variables which can't collapse to null are ok.
  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
    return !GV->hasExternalWeakLinkage();

  // byval arguments are okay. Arguments specifically marked as
  // dereferenceable are okay too.
  if (const Argument *A = dyn_cast<Argument>(V)) {
    if (A->hasByValAttr())
      return true;
    else if (uint64_t Bytes = A->getDereferenceableBytes()) {
      Type *Ty = V->getType()->getPointerElementType();
      if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
        return true;
    }

    return false;
  }

  // Return values from call sites specifically marked as dereferenceable are
  // also okay.
  if (ImmutableCallSite CS = V) {
    if (uint64_t Bytes = CS.getDereferenceableBytes(0)) {
      Type *Ty = V->getType()->getPointerElementType();
      if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
        return true;
    }
  }

  // For GEPs, determine if the indexing lands within the allocated object.
  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
    // Conservatively require that the base pointer be fully dereferenceable.
    if (!Visited.insert(GEP->getOperand(0)))
      return false;
    if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
      return false;
    // Check the indices.
    gep_type_iterator GTI = gep_type_begin(GEP);
    for (User::const_op_iterator I = GEP->op_begin()+1,
         E = GEP->op_end(); I != E; ++I) {
      Value *Index = *I;
      Type *Ty = *GTI++;
      // Struct indices can't be out of bounds.
      if (isa<StructType>(Ty))
        continue;
      ConstantInt *CI = dyn_cast<ConstantInt>(Index);
      if (!CI)
        return false;
      // Zero is always ok.
      if (CI->isZero())
        continue;
      // Check to see that it's within the bounds of an array.
      ArrayType *ATy = dyn_cast<ArrayType>(Ty);
      if (!ATy)
        return false;
      if (CI->getValue().getActiveBits() > 64)
        return false;
      if (CI->getZExtValue() >= ATy->getNumElements())
        return false;
    }
    // Indices check out; this is dereferenceable.
    return true;
  }

  if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
    return isDereferenceablePointer(ASC->getOperand(0), DL, Visited);

  // If we don't know, assume the worst.
  return false;
}
Esempio n. 30
0
/// Determine which blocks the value is live in.
///
/// These are blocks which lead to uses.  Knowing this allows us to avoid
/// inserting PHI nodes into blocks which don't lead to uses (thus, the
/// inserted phi nodes would be dead).
void PromoteMem2Reg::ComputeLiveInBlocks(
    AllocaInst *AI, AllocaInfo &Info,
    const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
    SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
  // To determine liveness, we must iterate through the predecessors of blocks
  // where the def is live.  Blocks are added to the worklist if we need to
  // check their predecessors.  Start with all the using blocks.
  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
                                                    Info.UsingBlocks.end());

  // If any of the using blocks is also a definition block, check to see if the
  // definition occurs before or after the use.  If it happens before the use,
  // the value isn't really live-in.
  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
    BasicBlock *BB = LiveInBlockWorklist[i];
    if (!DefBlocks.count(BB))
      continue;

    // Okay, this is a block that both uses and defines the value.  If the first
    // reference to the alloca is a def (store), then we know it isn't live-in.
    for (BasicBlock::iterator I = BB->begin();; ++I) {
      if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
        if (SI->getOperand(1) != AI)
          continue;

        // We found a store to the alloca before a load.  The alloca is not
        // actually live-in here.
        LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
        LiveInBlockWorklist.pop_back();
        --i;
        --e;
        break;
      }

      if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
        if (LI->getOperand(0) != AI)
          continue;

        // Okay, we found a load before a store to the alloca.  It is actually
        // live into this block.
        break;
      }
    }
  }

  // Now that we have a set of blocks where the phi is live-in, recursively add
  // their predecessors until we find the full region the value is live.
  while (!LiveInBlockWorklist.empty()) {
    BasicBlock *BB = LiveInBlockWorklist.pop_back_val();

    // The block really is live in here, insert it into the set.  If already in
    // the set, then it has already been processed.
    if (!LiveInBlocks.insert(BB).second)
      continue;

    // Since the value is live into BB, it is either defined in a predecessor or
    // live into it to.  Add the preds to the worklist unless they are a
    // defining block.
    for (BasicBlock *P : predecessors(BB)) {
      // The value is not live into a predecessor if it defines the value.
      if (DefBlocks.count(P))
        continue;

      // Otherwise it is, add to the worklist.
      LiveInBlockWorklist.push_back(P);
    }
  }
}