Esempio n. 1
0
/// RemoveFactorFromExpression - If V is an expression tree that is a 
/// multiplication sequence, and if this sequence contains a multiply by Factor,
/// remove Factor from the tree and return the new tree.
Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
  BinaryOperator *BO = isReassociableOp(V, Instruction::Mul);
  if (!BO) return 0;
  
  SmallVector<ValueEntry, 8> Factors;
  LinearizeExprTree(BO, Factors);

  bool FoundFactor = false;
  bool NeedsNegate = false;
  for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
    if (Factors[i].Op == Factor) {
      FoundFactor = true;
      Factors.erase(Factors.begin()+i);
      break;
    }
    
    // If this is a negative version of this factor, remove it.
    if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor))
      if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op))
        if (FC1->getValue() == -FC2->getValue()) {
          FoundFactor = NeedsNegate = true;
          Factors.erase(Factors.begin()+i);
          break;
        }
  }
  
  if (!FoundFactor) {
    // Make sure to restore the operands to the expression tree.
    RewriteExprTree(BO, Factors);
    return 0;
  }
  
  BasicBlock::iterator InsertPt = BO; ++InsertPt;
  
  // If this was just a single multiply, remove the multiply and return the only
  // remaining operand.
  if (Factors.size() == 1) {
    ValueRankMap.erase(BO);
    DeadInsts.push_back(BO);
    V = Factors[0].Op;
  } else {
    RewriteExprTree(BO, Factors);
    V = BO;
  }
  
  if (NeedsNegate)
    V = BinaryOperator::CreateNeg(V, "neg", InsertPt);
  
  return V;
}
Esempio n. 2
0
/// removeDeadFunctions - Remove dead functions that are not included in
/// DNR (Do Not Remove) list.
bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
  SmallVector<CallGraphNode*, 16> FunctionsToRemove;

  // Scan for all of the functions, looking for ones that should now be removed
  // from the program.  Insert the dead ones in the FunctionsToRemove set.
  for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
    CallGraphNode *CGN = I->second;
    Function *F = CGN->getFunction();
    if (!F || F->isDeclaration())
      continue;

    // Handle the case when this function is called and we only want to care
    // about always-inline functions. This is a bit of a hack to share code
    // between here and the InlineAlways pass.
    if (AlwaysInlineOnly &&
        !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
                                         Attribute::AlwaysInline))
      continue;

    // If the only remaining users of the function are dead constants, remove
    // them.
    F->removeDeadConstantUsers();

    if (!F->isDefTriviallyDead())
      continue;
    
    // Remove any call graph edges from the function to its callees.
    CGN->removeAllCalledFunctions();

    // Remove any edges from the external node to the function's call graph
    // node.  These edges might have been made irrelegant due to
    // optimization of the program.
    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);

    // Removing the node for callee from the call graph and delete it.
    FunctionsToRemove.push_back(CGN);
  }
  if (FunctionsToRemove.empty())
    return false;

  // Now that we know which functions to delete, do so.  We didn't want to do
  // this inline, because that would invalidate our CallGraph::iterator
  // objects. :(
  //
  // Note that it doesn't matter that we are iterating over a non-stable order
  // here to do this, it doesn't matter which order the functions are deleted
  // in.
  array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
  FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
                                      FunctionsToRemove.end()),
                          FunctionsToRemove.end());
  for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
                                                  E = FunctionsToRemove.end();
       I != E; ++I) {
    delete CG.removeFunctionFromModule(*I);
    ++NumDeleted;
  }
  return true;
}
Esempio n. 3
0
 ArrayRef<Identifier> getNames() {
   llvm::array_pod_sort(names.begin(), names.end(),
                        [](const Identifier *lhs, const Identifier *rhs) {
     return lhs->compare(*rhs);
   });
   names.erase(std::unique(names.begin(), names.end()), names.end());
   return names;
 }
Counter CounterExpressionBuilder::simplify(Counter ExpressionTree) {
  // Gather constant terms.
  SmallVector<Term, 32> Terms;
  extractTerms(ExpressionTree, +1, Terms);

  // If there are no terms, this is just a zero. The algorithm below assumes at
  // least one term.
  if (Terms.size() == 0)
    return Counter::getZero();

  // Group the terms by counter ID.
  std::sort(Terms.begin(), Terms.end(), [](const Term &LHS, const Term &RHS) {
    return LHS.CounterID < RHS.CounterID;
  });

  // Combine terms by counter ID to eliminate counters that sum to zero.
  auto Prev = Terms.begin();
  for (auto I = Prev + 1, E = Terms.end(); I != E; ++I) {
    if (I->CounterID == Prev->CounterID) {
      Prev->Factor += I->Factor;
      continue;
    }
    ++Prev;
    *Prev = *I;
  }
  Terms.erase(++Prev, Terms.end());

  Counter C;
  // Create additions. We do this before subtractions to avoid constructs like
  // ((0 - X) + Y), as opposed to (Y - X).
  for (auto T : Terms) {
    if (T.Factor <= 0)
      continue;
    for (int I = 0; I < T.Factor; ++I)
      if (C.isZero())
        C = Counter::getCounter(T.CounterID);
      else
        C = get(CounterExpression(CounterExpression::Add, C,
                                  Counter::getCounter(T.CounterID)));
  }

  // Create subtractions.
  for (auto T : Terms) {
    if (T.Factor >= 0)
      continue;
    for (int I = 0; I < -T.Factor; ++I)
      C = get(CounterExpression(CounterExpression::Subtract, C,
                                Counter::getCounter(T.CounterID)));
  }
  return C;
}
Esempio n. 5
0
PreservedAnalyses AlwaysInlinerPass::run(Module &M, ModuleAnalysisManager &) {
  InlineFunctionInfo IFI;
  SmallSetVector<CallSite, 16> Calls;
  bool Changed = false;
  SmallVector<Function *, 16> InlinedFunctions;
  for (Function &F : M)
    if (!F.isDeclaration() && F.hasFnAttribute(Attribute::AlwaysInline) &&
        isInlineViable(F)) {
      Calls.clear();

      for (User *U : F.users())
        if (auto CS = CallSite(U))
          if (CS.getCalledFunction() == &F)
            Calls.insert(CS);

      for (CallSite CS : Calls)
        // FIXME: We really shouldn't be able to fail to inline at this point!
        // We should do something to log or check the inline failures here.
        Changed |= InlineFunction(CS, IFI);

      // Remember to try and delete this function afterward. This both avoids
      // re-walking the rest of the module and avoids dealing with any iterator
      // invalidation issues while deleting functions.
      InlinedFunctions.push_back(&F);
    }

  // Remove any live functions.
  erase_if(InlinedFunctions, [&](Function *F) {
    F->removeDeadConstantUsers();
    return !F->isDefTriviallyDead();
  });

  // Delete the non-comdat ones from the module and also from our vector.
  auto NonComdatBegin = partition(
      InlinedFunctions, [&](Function *F) { return F->hasComdat(); });
  for (Function *F : make_range(NonComdatBegin, InlinedFunctions.end()))
    M.getFunctionList().erase(F);
  InlinedFunctions.erase(NonComdatBegin, InlinedFunctions.end());

  if (!InlinedFunctions.empty()) {
    // Now we just have the comdat functions. Filter out the ones whose comdats
    // are not actually dead.
    filterDeadComdatFunctions(M, InlinedFunctions);
    // The remaining functions are actually dead.
    for (Function *F : InlinedFunctions)
      M.getFunctionList().erase(F);
  }

  return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
}
Esempio n. 6
0
static void bindExtensions(SourceFile &SF, TypeChecker &TC) {
  // Utility function to try and resolve the extended type without diagnosing.
  // If we succeed, we go ahead and bind the extension. Otherwise, return false.
  auto tryBindExtension = [&](ExtensionDecl *ext) -> bool {
    if (auto nominal = ext->getExtendedNominal()) {
      bindExtensionToNominal(ext, nominal);
      return true;
    }

    return false;
  };

  // Phase 1 - try to bind each extension, adding those whose type cannot be
  // resolved to a worklist.
  SmallVector<ExtensionDecl *, 8> worklist;

  // FIXME: The current source file needs to be handled specially, because of
  // private extensions.
  SF.forAllVisibleModules([&](ModuleDecl::ImportedModule import) {
    // FIXME: Respect the access path?
    for (auto file : import.second->getFiles()) {
      auto SF = dyn_cast<SourceFile>(file);
      if (!SF)
        continue;

      for (auto D : SF->Decls) {
        if (auto ED = dyn_cast<ExtensionDecl>(D))
          if (!tryBindExtension(ED))
            worklist.push_back(ED);
      }
    }
  });

  // Phase 2 - repeatedly go through the worklist and attempt to bind each
  // extension there, removing it from the worklist if we succeed.
  bool changed;
  do {
    changed = false;

    auto last = std::remove_if(worklist.begin(), worklist.end(),
                               tryBindExtension);
    if (last != worklist.end()) {
      worklist.erase(last, worklist.end());
      changed = true;
    }
  } while(changed);

  // Any remaining extensions are invalid. They will be diagnosed later by
  // typeCheckDecl().
}
Esempio n. 7
0
/// \brief Computes the set of declarations referenced by these base
/// paths.
void CXXBasePaths::ComputeDeclsFound() {
  assert(NumDeclsFound == 0 && !DeclsFound &&
         "Already computed the set of declarations");

  SmallVector<NamedDecl *, 8> Decls;
  for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
    Decls.push_back(*Path->Decls.first);

  // Eliminate duplicated decls.
  llvm::array_pod_sort(Decls.begin(), Decls.end());
  Decls.erase(std::unique(Decls.begin(), Decls.end()), Decls.end());

  NumDeclsFound = Decls.size();
  DeclsFound = new NamedDecl * [NumDeclsFound];
  std::copy(Decls.begin(), Decls.end(), DeclsFound);
}
Esempio n. 8
0
/// Print only DIEs that have a certain name.
static void filterByAccelName(ArrayRef<std::string> Names, DWARFContext &DICtx,
                              raw_ostream &OS) {
  SmallVector<uint64_t, 4> Offsets;
  for (const auto &Name : Names) {
    getDIEOffset(DICtx.getAppleNames(), Name, Offsets);
    getDIEOffset(DICtx.getAppleTypes(), Name, Offsets);
    getDIEOffset(DICtx.getAppleNamespaces(), Name, Offsets);
    getDIEOffset(DICtx.getDebugNames(), Name, Offsets);
  }
  llvm::sort(Offsets.begin(), Offsets.end());
  Offsets.erase(std::unique(Offsets.begin(), Offsets.end()), Offsets.end());

  for (uint64_t Off: Offsets) {
    DWARFDie Die = DICtx.getDIEForOffset(Off);
    Die.dump(OS, 0, getDumpOpts());
  }
}
Esempio n. 9
0
void Job::printSummary(raw_ostream &os) const {
  // Deciding how to describe our inputs is a bit subtle; if we are a Job built
  // from a JobAction that itself has InputActions sources, then we collect
  // those up. Otherwise it's more correct to talk about our inputs as the
  // outputs of our input-jobs.
  SmallVector<std::string, 4> Inputs;

  for (const Action *A : getSource().getInputs())
    if (const InputAction *IA = dyn_cast<InputAction>(A))
      Inputs.push_back(IA->getInputArg().getValue());

  for (const Job *J : getInputs())
    for (const std::string &f : J->getOutput().getPrimaryOutputFilenames())
      Inputs.push_back(f);

  size_t limit = 3;
  size_t actual = Inputs.size();
  if (actual > limit) {
    Inputs.erase(Inputs.begin() + limit, Inputs.end());
  }

  os << "{" << getSource().getClassName() << ": ";
  interleave(getOutput().getPrimaryOutputFilenames(),
             [&](const std::string &Arg) {
               os << llvm::sys::path::filename(Arg);
             },
             [&] { os << ' '; });
  os << " <= ";
  interleave(Inputs,
             [&](const std::string &Arg) {
               os << llvm::sys::path::filename(Arg);
             },
             [&] { os << ' '; });
  if (actual > limit) {
    os << " ... " << (actual-limit) << " more";
  }
  os << "}";
}
Esempio n. 10
0
bool CallingConvention_AnyArch_AnyCC::analyzeFunction(ParameterRegistry &registry, CallInformation &fillOut, llvm::Function &func)
{
	if (!isFullDisassembly() || md::isPrototype(func))
	{
		return false;
	}
	
	auto regs = &*func.arg_begin();
	unordered_map<const TargetRegisterInfo*, ModRefInfo> resultMap;
	
	// Find all GEPs
	const auto& target = registry.getTargetInfo();
	unordered_multimap<const TargetRegisterInfo*, User*> registerUsers;
	for (User* user : regs->users())
	{
		if (const TargetRegisterInfo* maybeRegister = target.registerInfo(*user))
		{
			const TargetRegisterInfo& registerInfo = target.largestOverlappingRegister(*maybeRegister);
			registerUsers.insert({&registerInfo, user});
		}
	}
	
	// Find all users of these GEPs
	DominatorsPerRegister gepUsers;
	for (auto iter = registerUsers.begin(); iter != registerUsers.end(); iter++)
	{
		addAllUsers(*iter->second, iter->first, gepUsers);
	}
	
	DominatorTree& preDom = registry.getAnalysis<DominatorTreeWrapperPass>(func).getDomTree();
	PostDominatorTree& postDom = registry.getAnalysis<PostDominatorTreeWrapperPass>(func).getPostDomTree();
	
	// Add calls
	SmallVector<CallInst*, 8> calls;
	CallGraph& cg = registry.getAnalysis<CallGraphWrapperPass>().getCallGraph();
	CallGraphNode* thisFunc = cg[&func];
	for (const auto& pair : *thisFunc)
	{
		Function* callee = pair.second->getFunction();
		if (const CallInformation* callInfo = registry.getCallInfo(*callee))
		if (callInfo->getStage() == CallInformation::Completed)
		{
			// pair.first is a weak value handle and has a cast operator to get the pointee
			CallInst* caller = cast<CallInst>((Value*)pair.first);
			calls.push_back(caller);
			
			for (const auto& vi : *callInfo)
			{
				if (vi.type == ValueInformation::IntegerRegister)
				{
					gepUsers[vi.registerInfo].insert(caller);
				}
			}
		}
	}
	
	// Start out resultMap based on call dominance. Weed out calls until dominant call set has been established.
	// This map will be refined by results from mod/ref instruction analysis. The purpose is mainly to define
	// mod/ref behavior for registers that are used in callees of this function, but not in this function
	// directly.
	while (calls.size() > 0)
	{
		unordered_map<const TargetRegisterInfo*, unsigned> callResult;
		auto dominant = findDominantValues(preDom, calls);
		for (CallInst* call : dominant)
		{
			Function* callee = call->getCalledFunction();
			for (const auto& pair : translateToModRef(*registry.getCallInfo(*callee)))
			{
				callResult[pair.first] |= pair.second;
			}
			
			calls.erase(find(calls.begin(), calls.end(), call));
		}
		
		for (const auto& pair : callResult)
		{
			resultMap[pair.first] = static_cast<ModRefInfo>(pair.second);
		}
	}
	
	// Find the dominant use(s)
	auto preDominatingUses = gepUsers;
	for (auto& pair : preDominatingUses)
	{
		pair.second = findDominantValues(preDom, pair.second);
	}
	
	// Fill out ModRef use dictionary
	// (Ref info is incomplete)
	for (auto& pair : preDominatingUses)
	{
		ModRefInfo& r = resultMap[pair.first];
		r = IncompleteRef;
		for (auto inst : pair.second)
		{
			if (isa<StoreInst>(inst))
			{
				// If we see a dominant store, then the register is modified.
				r = MRI_Mod;
				break;
			}
			if (CallInst* call = dyn_cast<CallInst>(inst))
			{
				// If the first user is a call, propagate its ModRef value.
				r = registry.getCallInfo(*call->getCalledFunction())->getRegisterModRef(*pair.first);
				break;
			}
		}
	}
	
	// Find post-dominating stores
	auto postDominatingUses = gepUsers;
	for (auto& pair : postDominatingUses)
	{
		const TargetRegisterInfo* key = pair.first;
		auto& set = pair.second;
		// remove non-Mod instructions
		for (auto iter = set.begin(); iter != set.end(); )
		{
			if (isa<StoreInst>(*iter))
			{
				iter++;
				continue;
			}
			else if (CallInst* call = dyn_cast<CallInst>(*iter))
			{
				auto callee = call->getCalledFunction();
				const auto& info = *registry.getCallInfo(*callee);
				if ((info.getRegisterModRef(*key) & MRI_Mod) == MRI_Mod)
				{
					iter++;
					continue;
				}
			}
			iter = set.erase(iter);
		}
		
		set = findDominantValues(postDom, set);
	}
	
	MemorySSA& mssa = *registry.getMemorySSA(func);
	
	// Walk up post-dominating uses until we get to liveOnEntry.
	for (auto& pair : postDominatingUses)
	{
		walkUpPostDominatingUse(target, mssa, preDominatingUses, postDominatingUses, resultMap, pair.first);
	}
	
	// Use resultMap to build call information. First, sort registers by their pointer order; this ensures stable
	// parameter order.
	
	// We have authoritative information on used parameters, but not on return values. Only register parameters in this
	// step.
	SmallVector<pair<const TargetRegisterInfo*, ModRefInfo>, 16> registers;
	copy(resultMap.begin(), resultMap.end(), registers.begin());
	sort(registers.begin(), registers.end());
	
	vector<const TargetRegisterInfo*> returns;
	for (const auto& pair : resultMap)
	{
		if (pair.second & MRI_Ref)
		{
			fillOut.addParameter(ValueInformation::IntegerRegister, pair.first);
		}
		if (pair.second & MRI_Mod)
		{
			returns.push_back(pair.first);
		}
	}
	
	// Check for used returns.
	for (const TargetRegisterInfo* reg : ipaFindUsedReturns(registry, func, returns))
	{
		fillOut.addReturn(ValueInformation::IntegerRegister, reg);
	}
	return true;
}
Esempio n. 11
0
PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
                                   CGSCCAnalysisManager &AM, LazyCallGraph &CG,
                                   CGSCCUpdateResult &UR) {
  const ModuleAnalysisManager &MAM =
      AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG).getManager();
  bool Changed = false;

  assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
  Module &M = *InitialC.begin()->getFunction().getParent();
  ProfileSummaryInfo *PSI = MAM.getCachedResult<ProfileSummaryAnalysis>(M);

  if (!ImportedFunctionsStats &&
      InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) {
    ImportedFunctionsStats =
        llvm::make_unique<ImportedFunctionsInliningStatistics>();
    ImportedFunctionsStats->setModuleInfo(M);
  }

  // We use a single common worklist for calls across the entire SCC. We
  // process these in-order and append new calls introduced during inlining to
  // the end.
  //
  // Note that this particular order of processing is actually critical to
  // avoid very bad behaviors. Consider *highly connected* call graphs where
  // each function contains a small amonut of code and a couple of calls to
  // other functions. Because the LLVM inliner is fundamentally a bottom-up
  // inliner, it can handle gracefully the fact that these all appear to be
  // reasonable inlining candidates as it will flatten things until they become
  // too big to inline, and then move on and flatten another batch.
  //
  // However, when processing call edges *within* an SCC we cannot rely on this
  // bottom-up behavior. As a consequence, with heavily connected *SCCs* of
  // functions we can end up incrementally inlining N calls into each of
  // N functions because each incremental inlining decision looks good and we
  // don't have a topological ordering to prevent explosions.
  //
  // To compensate for this, we don't process transitive edges made immediate
  // by inlining until we've done one pass of inlining across the entire SCC.
  // Large, highly connected SCCs still lead to some amount of code bloat in
  // this model, but it is uniformly spread across all the functions in the SCC
  // and eventually they all become too large to inline, rather than
  // incrementally maknig a single function grow in a super linear fashion.
  SmallVector<std::pair<CallSite, int>, 16> Calls;

  FunctionAnalysisManager &FAM =
      AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
          .getManager();

  // Populate the initial list of calls in this SCC.
  for (auto &N : InitialC) {
    auto &ORE =
        FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction());
    // We want to generally process call sites top-down in order for
    // simplifications stemming from replacing the call with the returned value
    // after inlining to be visible to subsequent inlining decisions.
    // FIXME: Using instructions sequence is a really bad way to do this.
    // Instead we should do an actual RPO walk of the function body.
    for (Instruction &I : instructions(N.getFunction()))
      if (auto CS = CallSite(&I))
        if (Function *Callee = CS.getCalledFunction()) {
          if (!Callee->isDeclaration())
            Calls.push_back({CS, -1});
          else if (!isa<IntrinsicInst>(I)) {
            using namespace ore;
            ORE.emit([&]() {
              return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
                     << NV("Callee", Callee) << " will not be inlined into "
                     << NV("Caller", CS.getCaller())
                     << " because its definition is unavailable"
                     << setIsVerbose();
            });
          }
        }
  }
  if (Calls.empty())
    return PreservedAnalyses::all();

  // Capture updatable variables for the current SCC and RefSCC.
  auto *C = &InitialC;
  auto *RC = &C->getOuterRefSCC();

  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function *, int>, 16> InlineHistory;

  // Track a set vector of inlined callees so that we can augment the caller
  // with all of their edges in the call graph before pruning out the ones that
  // got simplified away.
  SmallSetVector<Function *, 4> InlinedCallees;

  // Track the dead functions to delete once finished with inlining calls. We
  // defer deleting these to make it easier to handle the call graph updates.
  SmallVector<Function *, 4> DeadFunctions;

  // Loop forward over all of the calls. Note that we cannot cache the size as
  // inlining can introduce new calls that need to be processed.
  for (int i = 0; i < (int)Calls.size(); ++i) {
    // We expect the calls to typically be batched with sequences of calls that
    // have the same caller, so we first set up some shared infrastructure for
    // this caller. We also do any pruning we can at this layer on the caller
    // alone.
    Function &F = *Calls[i].first.getCaller();
    LazyCallGraph::Node &N = *CG.lookup(F);
    if (CG.lookupSCC(N) != C)
      continue;
    if (F.hasFnAttribute(Attribute::OptimizeNone))
      continue;

    LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n");

    // Get a FunctionAnalysisManager via a proxy for this particular node. We
    // do this each time we visit a node as the SCC may have changed and as
    // we're going to mutate this particular function we want to make sure the
    // proxy is in place to forward any invalidation events. We can use the
    // manager we get here for looking up results for functions other than this
    // node however because those functions aren't going to be mutated by this
    // pass.
    FunctionAnalysisManager &FAM =
        AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG)
            .getManager();

    // Get the remarks emission analysis for the caller.
    auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);

    std::function<AssumptionCache &(Function &)> GetAssumptionCache =
        [&](Function &F) -> AssumptionCache & {
      return FAM.getResult<AssumptionAnalysis>(F);
    };
    auto GetBFI = [&](Function &F) -> BlockFrequencyInfo & {
      return FAM.getResult<BlockFrequencyAnalysis>(F);
    };

    auto GetInlineCost = [&](CallSite CS) {
      Function &Callee = *CS.getCalledFunction();
      auto &CalleeTTI = FAM.getResult<TargetIRAnalysis>(Callee);
      return getInlineCost(CS, Params, CalleeTTI, GetAssumptionCache, {GetBFI},
                           PSI, &ORE);
    };

    // Now process as many calls as we have within this caller in the sequnece.
    // We bail out as soon as the caller has to change so we can update the
    // call graph and prepare the context of that new caller.
    bool DidInline = false;
    for (; i < (int)Calls.size() && Calls[i].first.getCaller() == &F; ++i) {
      int InlineHistoryID;
      CallSite CS;
      std::tie(CS, InlineHistoryID) = Calls[i];
      Function &Callee = *CS.getCalledFunction();

      if (InlineHistoryID != -1 &&
          InlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory))
        continue;

      // Check if this inlining may repeat breaking an SCC apart that has
      // already been split once before. In that case, inlining here may
      // trigger infinite inlining, much like is prevented within the inliner
      // itself by the InlineHistory above, but spread across CGSCC iterations
      // and thus hidden from the full inline history.
      if (CG.lookupSCC(*CG.lookup(Callee)) == C &&
          UR.InlinedInternalEdges.count({&N, C})) {
        LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
                             "previously split out of this SCC by inlining: "
                          << F.getName() << " -> " << Callee.getName() << "\n");
        continue;
      }

      Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE);
      // Check whether we want to inline this callsite.
      if (!OIC)
        continue;

      // Setup the data structure used to plumb customization into the
      // `InlineFunction` routine.
      InlineFunctionInfo IFI(
          /*cg=*/nullptr, &GetAssumptionCache, PSI,
          &FAM.getResult<BlockFrequencyAnalysis>(*(CS.getCaller())),
          &FAM.getResult<BlockFrequencyAnalysis>(Callee));

      // Get DebugLoc to report. CS will be invalid after Inliner.
      DebugLoc DLoc = CS->getDebugLoc();
      BasicBlock *Block = CS.getParent();

      using namespace ore;

      if (!InlineFunction(CS, IFI)) {
        ORE.emit([&]() {
          return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
                 << NV("Callee", &Callee) << " will not be inlined into "
                 << NV("Caller", &F);
        });
        continue;
      }
      DidInline = true;
      InlinedCallees.insert(&Callee);

      ORE.emit([&]() {
        bool AlwaysInline = OIC->isAlways();
        StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined";
        OptimizationRemark R(DEBUG_TYPE, RemarkName, DLoc, Block);
        R << NV("Callee", &Callee) << " inlined into ";
        R << NV("Caller", &F);
        if (AlwaysInline)
          R << " with cost=always";
        else {
          R << " with cost=" << NV("Cost", OIC->getCost());
          R << " (threshold=" << NV("Threshold", OIC->getThreshold());
          R << ")";
        }
        return R;
      });

      // Add any new callsites to defined functions to the worklist.
      if (!IFI.InlinedCallSites.empty()) {
        int NewHistoryID = InlineHistory.size();
        InlineHistory.push_back({&Callee, InlineHistoryID});
        for (CallSite &CS : reverse(IFI.InlinedCallSites))
          if (Function *NewCallee = CS.getCalledFunction())
            if (!NewCallee->isDeclaration())
              Calls.push_back({CS, NewHistoryID});
      }

      if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
        ImportedFunctionsStats->recordInline(F, Callee);

      // Merge the attributes based on the inlining.
      AttributeFuncs::mergeAttributesForInlining(F, Callee);

      // For local functions, check whether this makes the callee trivially
      // dead. In that case, we can drop the body of the function eagerly
      // which may reduce the number of callers of other functions to one,
      // changing inline cost thresholds.
      if (Callee.hasLocalLinkage()) {
        // To check this we also need to nuke any dead constant uses (perhaps
        // made dead by this operation on other functions).
        Callee.removeDeadConstantUsers();
        if (Callee.use_empty() && !CG.isLibFunction(Callee)) {
          Calls.erase(
              std::remove_if(Calls.begin() + i + 1, Calls.end(),
                             [&Callee](const std::pair<CallSite, int> &Call) {
                               return Call.first.getCaller() == &Callee;
                             }),
              Calls.end());
          // Clear the body and queue the function itself for deletion when we
          // finish inlining and call graph updates.
          // Note that after this point, it is an error to do anything other
          // than use the callee's address or delete it.
          Callee.dropAllReferences();
          assert(find(DeadFunctions, &Callee) == DeadFunctions.end() &&
                 "Cannot put cause a function to become dead twice!");
          DeadFunctions.push_back(&Callee);
        }
      }
    }

    // Back the call index up by one to put us in a good position to go around
    // the outer loop.
    --i;

    if (!DidInline)
      continue;
    Changed = true;

    // Add all the inlined callees' edges as ref edges to the caller. These are
    // by definition trivial edges as we always have *some* transitive ref edge
    // chain. While in some cases these edges are direct calls inside the
    // callee, they have to be modeled in the inliner as reference edges as
    // there may be a reference edge anywhere along the chain from the current
    // caller to the callee that causes the whole thing to appear like
    // a (transitive) reference edge that will require promotion to a call edge
    // below.
    for (Function *InlinedCallee : InlinedCallees) {
      LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee);
      for (LazyCallGraph::Edge &E : *CalleeN)
        RC->insertTrivialRefEdge(N, E.getNode());
    }

    // At this point, since we have made changes we have at least removed
    // a call instruction. However, in the process we do some incremental
    // simplification of the surrounding code. This simplification can
    // essentially do all of the same things as a function pass and we can
    // re-use the exact same logic for updating the call graph to reflect the
    // change.
    LazyCallGraph::SCC *OldC = C;
    C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR);
    LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
    RC = &C->getOuterRefSCC();

    // If this causes an SCC to split apart into multiple smaller SCCs, there
    // is a subtle risk we need to prepare for. Other transformations may
    // expose an "infinite inlining" opportunity later, and because of the SCC
    // mutation, we will revisit this function and potentially re-inline. If we
    // do, and that re-inlining also has the potentially to mutate the SCC
    // structure, the infinite inlining problem can manifest through infinite
    // SCC splits and merges. To avoid this, we capture the originating caller
    // node and the SCC containing the call edge. This is a slight over
    // approximation of the possible inlining decisions that must be avoided,
    // but is relatively efficient to store.
    // FIXME: This seems like a very heavyweight way of retaining the inline
    // history, we should look for a more efficient way of tracking it.
    if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) {
          return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
        })) {
      LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
                           "retaining this to avoid infinite inlining.\n");
      UR.InlinedInternalEdges.insert({&N, OldC});
    }
    InlinedCallees.clear();
  }

  // Now that we've finished inlining all of the calls across this SCC, delete
  // all of the trivially dead functions, updating the call graph and the CGSCC
  // pass manager in the process.
  //
  // Note that this walks a pointer set which has non-deterministic order but
  // that is OK as all we do is delete things and add pointers to unordered
  // sets.
  for (Function *DeadF : DeadFunctions) {
    // Get the necessary information out of the call graph and nuke the
    // function there. Also, cclear out any cached analyses.
    auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
    FunctionAnalysisManager &FAM =
        AM.getResult<FunctionAnalysisManagerCGSCCProxy>(DeadC, CG)
            .getManager();
    FAM.clear(*DeadF, DeadF->getName());
    AM.clear(DeadC, DeadC.getName());
    auto &DeadRC = DeadC.getOuterRefSCC();
    CG.removeDeadFunction(*DeadF);

    // Mark the relevant parts of the call graph as invalid so we don't visit
    // them.
    UR.InvalidatedSCCs.insert(&DeadC);
    UR.InvalidatedRefSCCs.insert(&DeadRC);

    // And delete the actual function from the module.
    M.getFunctionList().erase(DeadF);
  }

  if (!Changed)
    return PreservedAnalyses::all();

  // Even if we change the IR, we update the core CGSCC data structures and so
  // can preserve the proxy to the function analysis manager.
  PreservedAnalyses PA;
  PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
  return PA;
}
Esempio n. 12
0
/// \brief Recursively emit notes for each macro expansion and caret
/// diagnostics where appropriate.
///
/// Walks up the macro expansion stack printing expansion notes, the code
/// snippet, caret, underlines and FixItHint display as appropriate at each
/// level.
///
/// \param Loc The location for this caret.
/// \param Level The diagnostic level currently being emitted.
/// \param Ranges The underlined ranges for this code snippet.
/// \param Hints The FixIt hints active for this diagnostic.
void DiagnosticRenderer::emitMacroExpansions(SourceLocation Loc,
                                             DiagnosticsEngine::Level Level,
                                             ArrayRef<CharSourceRange> Ranges,
                                             ArrayRef<FixItHint> Hints,
                                             const SourceManager &SM) {
  assert(!Loc.isInvalid() && "must have a valid source location here");

  // Produce a stack of macro backtraces.
  SmallVector<SourceLocation, 8> LocationStack;
  unsigned IgnoredEnd = 0;
  while (Loc.isMacroID()) {
    // If this is the expansion of a macro argument, point the caret at the
    // use of the argument in the definition of the macro, not the expansion.
    if (SM.isMacroArgExpansion(Loc))
      LocationStack.push_back(SM.getImmediateExpansionRange(Loc).first);
    else
      LocationStack.push_back(Loc);

    if (checkRangesForMacroArgExpansion(Loc, Ranges, SM))
      IgnoredEnd = LocationStack.size();

    Loc = SM.getImmediateMacroCallerLoc(Loc);

    // Once the location no longer points into a macro, try stepping through
    // the last found location.  This sometimes produces additional useful
    // backtraces.
    if (Loc.isFileID())
      Loc = SM.getImmediateMacroCallerLoc(LocationStack.back());
    assert(!Loc.isInvalid() && "must have a valid source location here");
  }

  LocationStack.erase(LocationStack.begin(),
                      LocationStack.begin() + IgnoredEnd);

  unsigned MacroDepth = LocationStack.size();
  unsigned MacroLimit = DiagOpts->MacroBacktraceLimit;
  if (MacroDepth <= MacroLimit || MacroLimit == 0) {
    for (auto I = LocationStack.rbegin(), E = LocationStack.rend();
         I != E; ++I)
      emitSingleMacroExpansion(*I, Level, Ranges, SM);
    return;
  }

  unsigned MacroStartMessages = MacroLimit / 2;
  unsigned MacroEndMessages = MacroLimit / 2 + MacroLimit % 2;

  for (auto I = LocationStack.rbegin(),
            E = LocationStack.rbegin() + MacroStartMessages;
       I != E; ++I)
    emitSingleMacroExpansion(*I, Level, Ranges, SM);

  SmallString<200> MessageStorage;
  llvm::raw_svector_ostream Message(MessageStorage);
  Message << "(skipping " << (MacroDepth - MacroLimit)
          << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
             "see all)";
  emitBasicNote(Message.str());

  for (auto I = LocationStack.rend() - MacroEndMessages,
            E = LocationStack.rend();
       I != E; ++I)
    emitSingleMacroExpansion(*I, Level, Ranges, SM);
}
Esempio n. 13
0
/// RemoveBlockIfDead - If the specified block is dead, remove it, update loop
/// information, and remove any dead successors it has.
///
void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
                                     std::vector<Instruction*> &Worklist,
                                     Loop *L) {
  if (pred_begin(BB) != pred_end(BB)) {
    // This block isn't dead, since an edge to BB was just removed, see if there
    // are any easy simplifications we can do now.
    if (BasicBlock *Pred = BB->getSinglePredecessor()) {
      // If it has one pred, fold phi nodes in BB.
      while (isa<PHINode>(BB->begin()))
        ReplaceUsesOfWith(BB->begin(), 
                          cast<PHINode>(BB->begin())->getIncomingValue(0), 
                          Worklist, L, LPM);
      
      // If this is the header of a loop and the only pred is the latch, we now
      // have an unreachable loop.
      if (Loop *L = LI->getLoopFor(BB))
        if (loopHeader == BB && L->contains(Pred)) {
          // Remove the branch from the latch to the header block, this makes
          // the header dead, which will make the latch dead (because the header
          // dominates the latch).
          LPM->deleteSimpleAnalysisValue(Pred->getTerminator(), L);
          Pred->getTerminator()->eraseFromParent();
          new UnreachableInst(BB->getContext(), Pred);
          
          // The loop is now broken, remove it from LI.
          RemoveLoopFromHierarchy(L);
          
          // Reprocess the header, which now IS dead.
          RemoveBlockIfDead(BB, Worklist, L);
          return;
        }
      
      // If pred ends in a uncond branch, add uncond branch to worklist so that
      // the two blocks will get merged.
      if (BranchInst *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
        if (BI->isUnconditional())
          Worklist.push_back(BI);
    }
    return;
  }

  DEBUG(dbgs() << "Nuking dead block: " << *BB);
  
  // Remove the instructions in the basic block from the worklist.
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    RemoveFromWorklist(I, Worklist);
    
    // Anything that uses the instructions in this basic block should have their
    // uses replaced with undefs.
    // If I is not void type then replaceAllUsesWith undef.
    // This allows ValueHandlers and custom metadata to adjust itself.
    if (!I->getType()->isVoidTy())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }
  
  // If this is the edge to the header block for a loop, remove the loop and
  // promote all subloops.
  if (Loop *BBLoop = LI->getLoopFor(BB)) {
    if (BBLoop->getLoopLatch() == BB)
      RemoveLoopFromHierarchy(BBLoop);
  }

  // Remove the block from the loop info, which removes it from any loops it
  // was in.
  LI->removeBlock(BB);
  
  
  // Remove phi node entries in successors for this block.
  TerminatorInst *TI = BB->getTerminator();
  SmallVector<BasicBlock*, 4> Succs;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
    Succs.push_back(TI->getSuccessor(i));
    TI->getSuccessor(i)->removePredecessor(BB);
  }
  
  // Unique the successors, remove anything with multiple uses.
  array_pod_sort(Succs.begin(), Succs.end());
  Succs.erase(std::unique(Succs.begin(), Succs.end()), Succs.end());
  
  // Remove the basic block, including all of the instructions contained in it.
  LPM->deleteSimpleAnalysisValue(BB, L);  
  BB->eraseFromParent();
  // Remove successor blocks here that are not dead, so that we know we only
  // have dead blocks in this list.  Nondead blocks have a way of becoming dead,
  // then getting removed before we revisit them, which is badness.
  //
  for (unsigned i = 0; i != Succs.size(); ++i)
    if (pred_begin(Succs[i]) != pred_end(Succs[i])) {
      // One exception is loop headers.  If this block was the preheader for a
      // loop, then we DO want to visit the loop so the loop gets deleted.
      // We know that if the successor is a loop header, that this loop had to
      // be the preheader: the case where this was the latch block was handled
      // above and headers can only have two predecessors.
      if (!LI->isLoopHeader(Succs[i])) {
        Succs.erase(Succs.begin()+i);
        --i;
      }
    }
  
  for (unsigned i = 0, e = Succs.size(); i != e; ++i)
    RemoveBlockIfDead(Succs[i], Worklist, L);
}
Esempio n. 14
0
Value* LoopTripCount::insertTripCount(Loop* L, Instruction* InsertPos)
{
	// inspired from Loop::getCanonicalInductionVariable
	BasicBlock *H = L->getHeader();
	BasicBlock* LoopPred = L->getLoopPredecessor();
	BasicBlock* startBB = NULL;//which basicblock stores start value
	int OneStep = 0;// the extra add or plus step for calc

   Assert(LoopPred, "Require Loop has a Pred");
	DEBUG(errs()<<"loop  depth:"<<L->getLoopDepth()<<"\n");
	/** whats difference on use of predecessor and preheader??*/
	//RET_ON_FAIL(self->getLoopLatch()&&self->getLoopPreheader());
	//assert(self->getLoopLatch() && self->getLoopPreheader() && "need loop simplify form" );
	ret_null_fail(L->getLoopLatch(), "need loop simplify form");

	BasicBlock* TE = NULL;//True Exit
	SmallVector<BasicBlock*,4> Exits;
	L->getExitingBlocks(Exits);

	if(Exits.size()==1) TE = Exits.front();
	else{
		if(std::find(Exits.begin(),Exits.end(),L->getLoopLatch())!=Exits.end()) TE = L->getLoopLatch();
		else{
			SmallVector<llvm::Loop::Edge,4> ExitEdges;
			L->getExitEdges(ExitEdges);
			//stl 用法,先把所有满足条件的元素(出口的结束符是不可到达)移动到数组的末尾,再统一删除
			ExitEdges.erase(std::remove_if(ExitEdges.begin(), ExitEdges.end(), 
						[](llvm::Loop::Edge& I){
						return isa<UnreachableInst>(I.second->getTerminator());
						}), ExitEdges.end());
			if(ExitEdges.size()==1) TE = const_cast<BasicBlock*>(ExitEdges.front().first);
		}
	}

	//process true exit
	ret_null_fail(TE, "need have a true exit");

	Instruction* IndOrNext = NULL;
	Value* END = NULL;
   //终止块的终止指令:分情况讨论branchinst,switchinst;
   //跳转指令br bool a1,a2;condition<-->bool
	if(isa<BranchInst>(TE->getTerminator())){
		const BranchInst* EBR = cast<BranchInst>(TE->getTerminator());
		Assert(EBR->isConditional(), "end branch is not conditional");
		ICmpInst* EC = dyn_cast<ICmpInst>(EBR->getCondition());
		if(EC->getPredicate() == EC->ICMP_SGT){
         Assert(!L->contains(EBR->getSuccessor(0)), *EBR<<":abnormal exit with great than");//终止块的终止指令---->跳出执行循环外的指令
         OneStep += 1;
      } else if(EC->getPredicate() == EC->ICMP_EQ)
         Assert(!L->contains(EBR->getSuccessor(0)), *EBR<<":abnormal exit with great than");
      else if(EC->getPredicate() == EC->ICMP_SLT) {
         ret_null_fail(!L->contains(EBR->getSuccessor(1)), *EBR<<":abnormal exit with less than");
      } else {
         ret_null_fail(0, *EC<<" unknow combination of end condition");
      }
		IndOrNext = dyn_cast<Instruction>(castoff(EC->getOperand(0)));//去掉类型转化
		END = EC->getOperand(1);
		DEBUG(errs()<<"end   value:"<<*EC<<"\n");
	}else if(isa<SwitchInst>(TE->getTerminator())){
		SwitchInst* ESW = const_cast<SwitchInst*>(cast<SwitchInst>(TE->getTerminator()));
		IndOrNext = dyn_cast<Instruction>(castoff(ESW->getCondition()));
		for(auto I = ESW->case_begin(),E = ESW->case_end();I!=E;++I){
			if(!L->contains(I.getCaseSuccessor())){
				ret_null_fail(!END,"");
				assert(!END && "shouldn't have two ends");
				END = I.getCaseValue();
			}
		}
		DEBUG(errs()<<"end   value:"<<*ESW<<"\n");
	}else{
		assert(0 && "unknow terminator type");
	}

	ret_null_fail(L->isLoopInvariant(END), "end value should be loop invariant");//至此得END值

	Value* start = NULL;
	Value* ind = NULL;
	Instruction* next = NULL;
	bool addfirst = false;//add before icmp ed

	DISABLE(errs()<<*IndOrNext<<"\n");
	if(isa<LoadInst>(IndOrNext)){
		//memory depend analysis
		Value* PSi = IndOrNext->getOperand(0);//point type Step.i

		int SICount[2] = {0};//store in predecessor count,store in loop body count
		for( auto I = PSi->use_begin(),E = PSi->use_end();I!=E;++I){
			DISABLE(errs()<<**I<<"\n");
			StoreInst* SI = dyn_cast<StoreInst>(*I);
			if(!SI || SI->getOperand(1) != PSi) continue;
			if(!start&&L->isLoopInvariant(SI->getOperand(0))) {
				if(SI->getParent() != LoopPred)
					if(std::find(pred_begin(LoopPred),pred_end(LoopPred),SI->getParent()) == pred_end(LoopPred)) continue;
				start = SI->getOperand(0);
				startBB = SI->getParent();
				++SICount[0];
			}
			Instruction* SI0 = dyn_cast<Instruction>(SI->getOperand(0));
			if(L->contains(SI) && SI0 && SI0->getOpcode() == Instruction::Add){
				next = SI0;
				++SICount[1];
			}

		}
		Assert(SICount[0]==1 && SICount[1]==1, "");
		ind = IndOrNext;
	}else{
		if(isa<PHINode>(IndOrNext)){
			PHINode* PHI = cast<PHINode>(IndOrNext);
			ind = IndOrNext;
			if(castoff(PHI->getIncomingValue(0)) == castoff(PHI->getIncomingValue(1)) && PHI->getParent() != H)
				ind = castoff(PHI->getIncomingValue(0));
			addfirst = false;
		}else if(IndOrNext->getOpcode() == Instruction::Add){
			next = IndOrNext;
			addfirst = true;
		}else{
			Assert(0 ,"unknow how to analysis");
		}

		for(auto I = H->begin();isa<PHINode>(I);++I){
			PHINode* P = cast<PHINode>(I);
			if(ind && P == ind){
				//start = P->getIncomingValueForBlock(L->getLoopPredecessor());
				start = tryFindStart(P, L, startBB);
				next = dyn_cast<Instruction>(P->getIncomingValueForBlock(L->getLoopLatch()));
			}else if(next && P->getIncomingValueForBlock(L->getLoopLatch()) == next){
				//start = P->getIncomingValueForBlock(L->getLoopPredecessor());
				start = tryFindStart(P, L, startBB);
				ind = P;
			}
		}
	}


	Assert(start ,"couldn't find a start value");
	//process complex loops later
	//DEBUG(if(L->getLoopDepth()>1 || !L->getSubLoops().empty()) return NULL);
	DEBUG(errs()<<"start value:"<<*start<<"\n");
	DEBUG(errs()<<"ind   value:"<<*ind<<"\n");
	DEBUG(errs()<<"next  value:"<<*next<<"\n");


	//process non add later
	unsigned next_phi_idx = 0;
	ConstantInt* Step = NULL,*PrevStep = NULL;/*only used if next is phi node*/
   ret_null_fail(next, "");
	PHINode* next_phi = dyn_cast<PHINode>(next);
	do{
		if(next_phi) {
			next = dyn_cast<Instruction>(next_phi->getIncomingValue(next_phi_idx));
			ret_null_fail(next, "");
			DEBUG(errs()<<"next phi "<<next_phi_idx<<":"<<*next<<"\n");
			if(Step&&PrevStep){
				Assert(Step->getSExtValue() == PrevStep->getSExtValue(),"");
			}
			PrevStep = Step;
		}
		Assert(next->getOpcode() == Instruction::Add , "why induction increment is not Add");
		Assert(next->getOperand(0) == ind ,"why induction increment is not add it self");
		Step = dyn_cast<ConstantInt>(next->getOperand(1));
		Assert(Step,"");
	}while(next_phi && ++next_phi_idx<next_phi->getNumIncomingValues());
	//RET_ON_FAIL(Step->equalsInt(1));
	//assert(VERBOSE(Step->equalsInt(1),Step) && "why induction increment number is not 1");


	Value* RES = NULL;
	//if there are no predecessor, we can insert code into start value basicblock
	IRBuilder<> Builder(InsertPos);
	Assert(start->getType()->isIntegerTy() && END->getType()->isIntegerTy() , " why increment is not integer type");
	if(start->getType() != END->getType()){
		start = Builder.CreateCast(CastInst::getCastOpcode(start, false,
					END->getType(), false),start,END->getType());
	}
   if(Step->getType() != END->getType()){
      //Because Step is a Constant, so it casted is constant
		Step = dyn_cast<ConstantInt>(Builder.CreateCast(CastInst::getCastOpcode(Step, false,
					END->getType(), false),Step,END->getType()));
      AssertRuntime(Step);
   }
	if(Step->isMinusOne())
		RES = Builder.CreateSub(start,END);
	else//Step Couldn't be zero
		RES = Builder.CreateSub(END, start);
	if(addfirst) OneStep -= 1;
	if(Step->isMinusOne()) OneStep*=-1;
	assert(OneStep<=1 && OneStep>=-1);
	RES = (OneStep==1)?Builder.CreateAdd(RES,Step):(OneStep==-1)?Builder.CreateSub(RES, Step):RES;
	if(!Step->isMinusOne()&&!Step->isOne())
		RES = Builder.CreateSDiv(RES, Step);
	RES->setName(H->getName()+".tc");

	return RES;
}
Esempio n. 15
0
/// ParseMicrosoftAsmStatement. When -fms-extensions/-fasm-blocks is enabled,
/// this routine is called to collect the tokens for an MS asm statement.
///
/// [MS]  ms-asm-statement:
///         ms-asm-block
///         ms-asm-block ms-asm-statement
///
/// [MS]  ms-asm-block:
///         '__asm' ms-asm-line '\n'
///         '__asm' '{' ms-asm-instruction-block[opt] '}' ';'[opt]
///
/// [MS]  ms-asm-instruction-block
///         ms-asm-line
///         ms-asm-line '\n' ms-asm-instruction-block
///
StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
  SourceManager &SrcMgr = PP.getSourceManager();
  SourceLocation EndLoc = AsmLoc;
  SmallVector<Token, 4> AsmToks;

  bool SingleLineMode = true;
  unsigned BraceNesting = 0;
  unsigned short savedBraceCount = BraceCount;
  bool InAsmComment = false;
  FileID FID;
  unsigned LineNo = 0;
  unsigned NumTokensRead = 0;
  SmallVector<SourceLocation, 4> LBraceLocs;
  bool SkippedStartOfLine = false;

  if (Tok.is(tok::l_brace)) {
    // Braced inline asm: consume the opening brace.
    SingleLineMode = false;
    BraceNesting = 1;
    EndLoc = ConsumeBrace();
    LBraceLocs.push_back(EndLoc);
    ++NumTokensRead;
  } else {
    // Single-line inline asm; compute which line it is on.
    std::pair<FileID, unsigned> ExpAsmLoc =
        SrcMgr.getDecomposedExpansionLoc(EndLoc);
    FID = ExpAsmLoc.first;
    LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second);
    LBraceLocs.push_back(SourceLocation());
  }

  SourceLocation TokLoc = Tok.getLocation();
  do {
    // If we hit EOF, we're done, period.
    if (isEofOrEom())
      break;

    if (!InAsmComment && Tok.is(tok::l_brace)) {
      // Consume the opening brace.
      SkippedStartOfLine = Tok.isAtStartOfLine();
      EndLoc = ConsumeBrace();
      BraceNesting++;
      LBraceLocs.push_back(EndLoc);
      TokLoc = Tok.getLocation();
      ++NumTokensRead;
      continue;
    } else if (!InAsmComment && Tok.is(tok::semi)) {
      // A semicolon in an asm is the start of a comment.
      InAsmComment = true;
      if (!SingleLineMode) {
        // Compute which line the comment is on.
        std::pair<FileID, unsigned> ExpSemiLoc =
            SrcMgr.getDecomposedExpansionLoc(TokLoc);
        FID = ExpSemiLoc.first;
        LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second);
      }
    } else if (SingleLineMode || InAsmComment) {
      // If end-of-line is significant, check whether this token is on a
      // new line.
      std::pair<FileID, unsigned> ExpLoc =
          SrcMgr.getDecomposedExpansionLoc(TokLoc);
      if (ExpLoc.first != FID ||
          SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) {
        // If this is a single-line __asm, we're done, except if the next
        // line begins with an __asm too, in which case we finish a comment
        // if needed and then keep processing the next line as a single
        // line __asm.
        bool isAsm = Tok.is(tok::kw_asm);
        if (SingleLineMode && !isAsm)
          break;
        // We're no longer in a comment.
        InAsmComment = false;
        if (isAsm) {
          LineNo = SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second);
          SkippedStartOfLine = Tok.isAtStartOfLine();
        }
      } else if (!InAsmComment && Tok.is(tok::r_brace)) {
        // In MSVC mode, braces only participate in brace matching and
        // separating the asm statements.  This is an intentional
        // departure from the Apple gcc behavior.
        if (!BraceNesting)
          break;
      }
    }
    if (!InAsmComment && BraceNesting && Tok.is(tok::r_brace) &&
        BraceCount == (savedBraceCount + BraceNesting)) {
      // Consume the closing brace.
      SkippedStartOfLine = Tok.isAtStartOfLine();
      EndLoc = ConsumeBrace();
      BraceNesting--;
      // Finish if all of the opened braces in the inline asm section were
      // consumed.
      if (BraceNesting == 0 && !SingleLineMode)
        break;
      else {
        LBraceLocs.pop_back();
        TokLoc = Tok.getLocation();
        ++NumTokensRead;
        continue;
      }
    }

    // Consume the next token; make sure we don't modify the brace count etc.
    // if we are in a comment.
    EndLoc = TokLoc;
    if (InAsmComment)
      PP.Lex(Tok);
    else {
      // Set the token as the start of line if we skipped the original start
      // of line token in case it was a nested brace.
      if (SkippedStartOfLine)
        Tok.setFlag(Token::StartOfLine);
      AsmToks.push_back(Tok);
      ConsumeAnyToken();
    }
    TokLoc = Tok.getLocation();
    ++NumTokensRead;
    SkippedStartOfLine = false;
  } while (1);

  if (BraceNesting && BraceCount != savedBraceCount) {
    // __asm without closing brace (this can happen at EOF).
    for (unsigned i = 0; i < BraceNesting; ++i) {
      Diag(Tok, diag::err_expected) << tok::r_brace;
      Diag(LBraceLocs.back(), diag::note_matching) << tok::l_brace;
      LBraceLocs.pop_back();
    }
    return StmtError();
  } else if (NumTokensRead == 0) {
    // Empty __asm.
    Diag(Tok, diag::err_expected) << tok::l_brace;
    return StmtError();
  }

  // Okay, prepare to use MC to parse the assembly.
  SmallVector<StringRef, 4> ConstraintRefs;
  SmallVector<Expr *, 4> Exprs;
  SmallVector<StringRef, 4> ClobberRefs;

  // We need an actual supported target.
  const llvm::Triple &TheTriple = Actions.Context.getTargetInfo().getTriple();
  llvm::Triple::ArchType ArchTy = TheTriple.getArch();
  const std::string &TT = TheTriple.getTriple();
  const llvm::Target *TheTarget = nullptr;
  bool UnsupportedArch =
      (ArchTy != llvm::Triple::x86 && ArchTy != llvm::Triple::x86_64);
  if (UnsupportedArch) {
    Diag(AsmLoc, diag::err_msasm_unsupported_arch) << TheTriple.getArchName();
  } else {
    std::string Error;
    TheTarget = llvm::TargetRegistry::lookupTarget(TT, Error);
    if (!TheTarget)
      Diag(AsmLoc, diag::err_msasm_unable_to_create_target) << Error;
  }

  assert(!LBraceLocs.empty() && "Should have at least one location here");

  // If we don't support assembly, or the assembly is empty, we don't
  // need to instantiate the AsmParser, etc.
  if (!TheTarget || AsmToks.empty()) {
    return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLocs[0], AsmToks, StringRef(),
                                  /*NumOutputs*/ 0, /*NumInputs*/ 0,
                                  ConstraintRefs, ClobberRefs, Exprs, EndLoc);
  }

  // Expand the tokens into a string buffer.
  SmallString<512> AsmString;
  SmallVector<unsigned, 8> TokOffsets;
  if (buildMSAsmString(PP, AsmLoc, AsmToks, TokOffsets, AsmString))
    return StmtError();

  std::unique_ptr<llvm::MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TT));
  std::unique_ptr<llvm::MCAsmInfo> MAI(TheTarget->createMCAsmInfo(*MRI, TT));
  // Get the instruction descriptor.
  std::unique_ptr<llvm::MCInstrInfo> MII(TheTarget->createMCInstrInfo());
  std::unique_ptr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
  std::unique_ptr<llvm::MCSubtargetInfo> STI(
      TheTarget->createMCSubtargetInfo(TT, "", ""));

  llvm::SourceMgr TempSrcMgr;
  llvm::MCContext Ctx(MAI.get(), MRI.get(), MOFI.get(), &TempSrcMgr);
  MOFI->InitMCObjectFileInfo(TheTriple, llvm::Reloc::Default,
                             llvm::CodeModel::Default, Ctx);
  std::unique_ptr<llvm::MemoryBuffer> Buffer =
      llvm::MemoryBuffer::getMemBuffer(AsmString, "<MS inline asm>");

  // Tell SrcMgr about this buffer, which is what the parser will pick up.
  TempSrcMgr.AddNewSourceBuffer(std::move(Buffer), llvm::SMLoc());

  std::unique_ptr<llvm::MCStreamer> Str(createNullStreamer(Ctx));
  std::unique_ptr<llvm::MCAsmParser> Parser(
      createMCAsmParser(TempSrcMgr, Ctx, *Str.get(), *MAI));

  // FIXME: init MCOptions from sanitizer flags here.
  llvm::MCTargetOptions MCOptions;
  std::unique_ptr<llvm::MCTargetAsmParser> TargetParser(
      TheTarget->createMCAsmParser(*STI, *Parser, *MII, MCOptions));

  std::unique_ptr<llvm::MCInstPrinter> IP(
      TheTarget->createMCInstPrinter(llvm::Triple(TT), 1, *MAI, *MII, *MRI));

  // Change to the Intel dialect.
  Parser->setAssemblerDialect(1);
  Parser->setTargetParser(*TargetParser.get());
  Parser->setParsingInlineAsm(true);
  TargetParser->setParsingInlineAsm(true);

  ClangAsmParserCallback Callback(*this, AsmLoc, AsmString, AsmToks,
                                  TokOffsets);
  TargetParser->setSemaCallback(&Callback);
  TempSrcMgr.setDiagHandler(ClangAsmParserCallback::DiagHandlerCallback,
                            &Callback);

  unsigned NumOutputs;
  unsigned NumInputs;
  std::string AsmStringIR;
  SmallVector<std::pair<void *, bool>, 4> OpExprs;
  SmallVector<std::string, 4> Constraints;
  SmallVector<std::string, 4> Clobbers;
  if (Parser->parseMSInlineAsm(AsmLoc.getPtrEncoding(), AsmStringIR, NumOutputs,
                               NumInputs, OpExprs, Constraints, Clobbers,
                               MII.get(), IP.get(), Callback))
    return StmtError();

  // Filter out "fpsw".  Clang doesn't accept it, and it always lists flags and
  // fpsr as clobbers.
  auto End = std::remove(Clobbers.begin(), Clobbers.end(), "fpsw");
  Clobbers.erase(End, Clobbers.end());

  // Build the vector of clobber StringRefs.
  ClobberRefs.insert(ClobberRefs.end(), Clobbers.begin(), Clobbers.end());

  // Recast the void pointers and build the vector of constraint StringRefs.
  unsigned NumExprs = NumOutputs + NumInputs;
  ConstraintRefs.resize(NumExprs);
  Exprs.resize(NumExprs);
  for (unsigned i = 0, e = NumExprs; i != e; ++i) {
    Expr *OpExpr = static_cast<Expr *>(OpExprs[i].first);
    if (!OpExpr)
      return StmtError();

    // Need address of variable.
    if (OpExprs[i].second)
      OpExpr =
          Actions.BuildUnaryOp(getCurScope(), AsmLoc, UO_AddrOf, OpExpr).get();

    ConstraintRefs[i] = StringRef(Constraints[i]);
    Exprs[i] = OpExpr;
  }

  // FIXME: We should be passing source locations for better diagnostics.
  return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLocs[0], AsmToks, AsmStringIR,
                                NumOutputs, NumInputs, ConstraintRefs,
                                ClobberRefs, Exprs, EndLoc);
}
Esempio n. 16
0
LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
    LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
    CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
  using Node = LazyCallGraph::Node;
  using Edge = LazyCallGraph::Edge;
  using SCC = LazyCallGraph::SCC;
  using RefSCC = LazyCallGraph::RefSCC;

  RefSCC &InitialRC = InitialC.getOuterRefSCC();
  SCC *C = &InitialC;
  RefSCC *RC = &InitialRC;
  Function &F = N.getFunction();

  // Walk the function body and build up the set of retained, promoted, and
  // demoted edges.
  SmallVector<Constant *, 16> Worklist;
  SmallPtrSet<Constant *, 16> Visited;
  SmallPtrSet<Node *, 16> RetainedEdges;
  SmallSetVector<Node *, 4> PromotedRefTargets;
  SmallSetVector<Node *, 4> DemotedCallTargets;

  // First walk the function and handle all called functions. We do this first
  // because if there is a single call edge, whether there are ref edges is
  // irrelevant.
  for (Instruction &I : instructions(F))
    if (auto CS = CallSite(&I))
      if (Function *Callee = CS.getCalledFunction())
        if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
          Node &CalleeN = *G.lookup(*Callee);
          Edge *E = N->lookup(CalleeN);
          // FIXME: We should really handle adding new calls. While it will
          // make downstream usage more complex, there is no fundamental
          // limitation and it will allow passes within the CGSCC to be a bit
          // more flexible in what transforms they can do. Until then, we
          // verify that new calls haven't been introduced.
          assert(E && "No function transformations should introduce *new* "
                      "call edges! Any new calls should be modeled as "
                      "promoted existing ref edges!");
          bool Inserted = RetainedEdges.insert(&CalleeN).second;
          (void)Inserted;
          assert(Inserted && "We should never visit a function twice.");
          if (!E->isCall())
            PromotedRefTargets.insert(&CalleeN);
        }

  // Now walk all references.
  for (Instruction &I : instructions(F))
    for (Value *Op : I.operand_values())
      if (auto *C = dyn_cast<Constant>(Op))
        if (Visited.insert(C).second)
          Worklist.push_back(C);

  auto VisitRef = [&](Function &Referee) {
    Node &RefereeN = *G.lookup(Referee);
    Edge *E = N->lookup(RefereeN);
    // FIXME: Similarly to new calls, we also currently preclude
    // introducing new references. See above for details.
    assert(E && "No function transformations should introduce *new* ref "
                "edges! Any new ref edges would require IPO which "
                "function passes aren't allowed to do!");
    bool Inserted = RetainedEdges.insert(&RefereeN).second;
    (void)Inserted;
    assert(Inserted && "We should never visit a function twice.");
    if (E->isCall())
      DemotedCallTargets.insert(&RefereeN);
  };
  LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);

  // Include synthetic reference edges to known, defined lib functions.
  for (auto *F : G.getLibFunctions())
    // While the list of lib functions doesn't have repeats, don't re-visit
    // anything handled above.
    if (!Visited.count(F))
      VisitRef(*F);

  // First remove all of the edges that are no longer present in this function.
  // The first step makes these edges uniformly ref edges and accumulates them
  // into a separate data structure so removal doesn't invalidate anything.
  SmallVector<Node *, 4> DeadTargets;
  for (Edge &E : *N) {
    if (RetainedEdges.count(&E.getNode()))
      continue;

    SCC &TargetC = *G.lookupSCC(E.getNode());
    RefSCC &TargetRC = TargetC.getOuterRefSCC();
    if (&TargetRC == RC && E.isCall()) {
      if (C != &TargetC) {
        // For separate SCCs this is trivial.
        RC->switchTrivialInternalEdgeToRef(N, E.getNode());
      } else {
        // Now update the call graph.
        C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, E.getNode()),
                                   G, N, C, AM, UR);
      }
    }

    // Now that this is ready for actual removal, put it into our list.
    DeadTargets.push_back(&E.getNode());
  }
  // Remove the easy cases quickly and actually pull them out of our list.
  DeadTargets.erase(
      llvm::remove_if(DeadTargets,
                      [&](Node *TargetN) {
                        SCC &TargetC = *G.lookupSCC(*TargetN);
                        RefSCC &TargetRC = TargetC.getOuterRefSCC();

                        // We can't trivially remove internal targets, so skip
                        // those.
                        if (&TargetRC == RC)
                          return false;

                        RC->removeOutgoingEdge(N, *TargetN);
                        LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '"
                                          << N << "' to '" << TargetN << "'\n");
                        return true;
                      }),
      DeadTargets.end());

  // Now do a batch removal of the internal ref edges left.
  auto NewRefSCCs = RC->removeInternalRefEdge(N, DeadTargets);
  if (!NewRefSCCs.empty()) {
    // The old RefSCC is dead, mark it as such.
    UR.InvalidatedRefSCCs.insert(RC);

    // Note that we don't bother to invalidate analyses as ref-edge
    // connectivity is not really observable in any way and is intended
    // exclusively to be used for ordering of transforms rather than for
    // analysis conclusions.

    // Update RC to the "bottom".
    assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
    RC = &C->getOuterRefSCC();
    assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");

    // The RC worklist is in reverse postorder, so we enqueue the new ones in
    // RPO except for the one which contains the source node as that is the
    // "bottom" we will continue processing in the bottom-up walk.
    assert(NewRefSCCs.front() == RC &&
           "New current RefSCC not first in the returned list!");
    for (RefSCC *NewRC : llvm::reverse(make_range(std::next(NewRefSCCs.begin()),
                                                  NewRefSCCs.end()))) {
      assert(NewRC != RC && "Should not encounter the current RefSCC further "
                            "in the postorder list of new RefSCCs.");
      UR.RCWorklist.insert(NewRC);
      LLVM_DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "
                        << *NewRC << "\n");
    }
  }

  // Next demote all the call edges that are now ref edges. This helps make
  // the SCCs small which should minimize the work below as we don't want to
  // form cycles that this would break.
  for (Node *RefTarget : DemotedCallTargets) {
    SCC &TargetC = *G.lookupSCC(*RefTarget);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    // The easy case is when the target RefSCC is not this RefSCC. This is
    // only supported when the target RefSCC is a child of this RefSCC.
    if (&TargetRC != RC) {
      assert(RC->isAncestorOf(TargetRC) &&
             "Cannot potentially form RefSCC cycles here!");
      RC->switchOutgoingEdgeToRef(N, *RefTarget);
      LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
                        << "' to '" << *RefTarget << "'\n");
      continue;
    }

    // We are switching an internal call edge to a ref edge. This may split up
    // some SCCs.
    if (C != &TargetC) {
      // For separate SCCs this is trivial.
      RC->switchTrivialInternalEdgeToRef(N, *RefTarget);
      continue;
    }

    // Now update the call graph.
    C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
                               C, AM, UR);
  }

  // Now promote ref edges into call edges.
  for (Node *CallTarget : PromotedRefTargets) {
    SCC &TargetC = *G.lookupSCC(*CallTarget);
    RefSCC &TargetRC = TargetC.getOuterRefSCC();

    // The easy case is when the target RefSCC is not this RefSCC. This is
    // only supported when the target RefSCC is a child of this RefSCC.
    if (&TargetRC != RC) {
      assert(RC->isAncestorOf(TargetRC) &&
             "Cannot potentially form RefSCC cycles here!");
      RC->switchOutgoingEdgeToCall(N, *CallTarget);
      LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
                        << "' to '" << *CallTarget << "'\n");
      continue;
    }
    LLVM_DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '"
                      << N << "' to '" << *CallTarget << "'\n");

    // Otherwise we are switching an internal ref edge to a call edge. This
    // may merge away some SCCs, and we add those to the UpdateResult. We also
    // need to make sure to update the worklist in the event SCCs have moved
    // before the current one in the post-order sequence
    bool HasFunctionAnalysisProxy = false;
    auto InitialSCCIndex = RC->find(*C) - RC->begin();
    bool FormedCycle = RC->switchInternalEdgeToCall(
        N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
          for (SCC *MergedC : MergedSCCs) {
            assert(MergedC != &TargetC && "Cannot merge away the target SCC!");

            HasFunctionAnalysisProxy |=
                AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
                    *MergedC) != nullptr;

            // Mark that this SCC will no longer be valid.
            UR.InvalidatedSCCs.insert(MergedC);

            // FIXME: We should really do a 'clear' here to forcibly release
            // memory, but we don't have a good way of doing that and
            // preserving the function analyses.
            auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
            PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
            AM.invalidate(*MergedC, PA);
          }
        });

    // If we formed a cycle by creating this call, we need to update more data
    // structures.
    if (FormedCycle) {
      C = &TargetC;
      assert(G.lookupSCC(N) == C && "Failed to update current SCC!");

      // If one of the invalidated SCCs had a cached proxy to a function
      // analysis manager, we need to create a proxy in the new current SCC as
      // the invalidated SCCs had their functions moved.
      if (HasFunctionAnalysisProxy)
        AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);

      // Any analyses cached for this SCC are no longer precise as the shape
      // has changed by introducing this cycle. However, we have taken care to
      // update the proxies so it remains valide.
      auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
      PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
      AM.invalidate(*C, PA);
    }
    auto NewSCCIndex = RC->find(*C) - RC->begin();
    // If we have actually moved an SCC to be topologically "below" the current
    // one due to merging, we will need to revisit the current SCC after
    // visiting those moved SCCs.
    //
    // It is critical that we *do not* revisit the current SCC unless we
    // actually move SCCs in the process of merging because otherwise we may
    // form a cycle where an SCC is split apart, merged, split, merged and so
    // on infinitely.
    if (InitialSCCIndex < NewSCCIndex) {
      // Put our current SCC back onto the worklist as we'll visit other SCCs
      // that are now definitively ordered prior to the current one in the
      // post-order sequence, and may end up observing more precise context to
      // optimize the current SCC.
      UR.CWorklist.insert(C);
      LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *C
                        << "\n");
      // Enqueue in reverse order as we pop off the back of the worklist.
      for (SCC &MovedC : llvm::reverse(make_range(RC->begin() + InitialSCCIndex,
                                                  RC->begin() + NewSCCIndex))) {
        UR.CWorklist.insert(&MovedC);
        LLVM_DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "
                          << MovedC << "\n");
      }
    }
  }

  assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
  assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
  assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");

  // Record the current RefSCC and SCC for higher layers of the CGSCC pass
  // manager now that all the updates have been applied.
  if (RC != &InitialRC)
    UR.UpdatedRC = RC;
  if (C != &InitialC)
    UR.UpdatedC = C;

  return *C;
}
Esempio n. 17
0
bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) {
  if (skipFunction(*MF.getFunction()))
    return false;

  bool MadeChange = false;
  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();

  for (auto MFI = MF.begin(), MFIE = MF.end(); MFI != MFIE; ++MFI) {
    MachineBasicBlock *MBB = &*MFI;
    SmallVector<MachineInstr *, 4> Splats;

    for (auto MBBI = MBB->rbegin(); MBBI != MBB->rend(); ++MBBI) {
      MachineInstr *MI = &*MBBI;

      if (MI->hasUnmodeledSideEffects() || MI->isCall()) {
        Splats.clear();
        continue;
      }

      // We're looking for a sequence like this:
      // %F0<def> = LFD 0, %X3<kill>, %QF0<imp-def>; mem:LD8[%a](tbaa=!2)
      // %QF1<def> = QVESPLATI %QF0<kill>, 0, %RM<imp-use>

      for (auto SI = Splats.begin(); SI != Splats.end();) {
        MachineInstr *SMI = *SI;
        unsigned SplatReg = SMI->getOperand(0).getReg();
        unsigned SrcReg = SMI->getOperand(1).getReg();

        if (MI->modifiesRegister(SrcReg, TRI)) {
          switch (MI->getOpcode()) {
          default:
            SI = Splats.erase(SI);
            continue;
          case PPC::LFS:
          case PPC::LFD:
          case PPC::LFSU:
          case PPC::LFDU:
          case PPC::LFSUX:
          case PPC::LFDUX:
          case PPC::LFSX:
          case PPC::LFDX:
          case PPC::LFIWAX:
          case PPC::LFIWZX:
            if (SplatReg != SrcReg) {
              // We need to change the load to define the scalar subregister of
              // the QPX splat source register.
              unsigned SubRegIndex =
                TRI->getSubRegIndex(SrcReg, MI->getOperand(0).getReg());
              unsigned SplatSubReg = TRI->getSubReg(SplatReg, SubRegIndex);

              // Substitute both the explicit defined register, and also the
              // implicit def of the containing QPX register.
              MI->getOperand(0).setReg(SplatSubReg);
              MI->substituteRegister(SrcReg, SplatReg, 0, *TRI);
            }

            SI = Splats.erase(SI);

            // If SMI is directly after MI, then MBBI's base iterator is
            // pointing at SMI.  Adjust MBBI around the call to erase SMI to
            // avoid invalidating MBBI.
            ++MBBI;
            SMI->eraseFromParent();
            --MBBI;

            ++NumSimplified;
            MadeChange = true;
            continue;
          }
        }

        // If this instruction defines the splat register, then we cannot move
        // the previous definition above it. If it reads from the splat
        // register, then it must already be alive from some previous
        // definition, and if the splat register is different from the source
        // register, then this definition must not be the load for which we're
        // searching.
        if (MI->modifiesRegister(SplatReg, TRI) ||
            (SrcReg != SplatReg &&
             MI->readsRegister(SplatReg, TRI))) {
          SI = Splats.erase(SI);
          continue;
        }

        ++SI;
      }

      if (MI->getOpcode() != PPC::QVESPLATI &&
          MI->getOpcode() != PPC::QVESPLATIs &&
          MI->getOpcode() != PPC::QVESPLATIb)
        continue;
      if (MI->getOperand(2).getImm() != 0)
        continue;

      // If there are other uses of the scalar value after this, replacing
      // those uses might be non-trivial.
      if (!MI->getOperand(1).isKill())
        continue;

      Splats.push_back(MI);
    }
  }

  return MadeChange;
}
Esempio n. 18
0
/// describeAliasSet - Return TBAA metadata describing what a load from or store
/// to the given tree may alias.
MDNode *describeAliasSet(tree t) {
  alias_set_type alias_set = get_alias_set(t);
  // Alias set 0 is the root of the alias graph and can alias anything.  A
  // negative value represents an unknown alias set, which as far as we know
  // may also alias anything.
  if (alias_set <= 0)
    return 0;

  // The difficulty here is that GCC's alias sets are the nodes of a directed
  // acyclic graph (DAG) rooted at 0, and in complicated cases it really is a
  // DAG and not a tree.  This is due to record types: the DAG has a node for
  // each record type with, for every field, an edge from the node to the node
  // for the field's type.  As a result the leaves of the DAG are usually scalar
  // types, with an incoming edge from every record type with a field with that
  // scalar type.  On the other hand, LLVM requires TBAA nodes to form a tree.
  // In short we need to come up with a tree and a graph map (i.e. a map that
  // takes nodes to nodes and edges to edges) from GCC's DAG to this tree.  (An
  // alternative is to complicate LLVM so that it too uses DAGs for TBAA).  An
  // additional difficulty is that we don't actually know the edges in the DAG:
  // GCC's alias analysis interface does not expose them.  All that we have is
  // alias_set_subset_of(s, t) which returns true iff there is a path from t to
  // s in the DAG.  Finally, we don't know the nodes of the DAG either!  We only
  // discover them progressively as we convert functions.
  // For the moment we take a very simple approach: we only use the leaf nodes
  // of GCC's DAG.  This means that we do a good job for scalars and a poor job
  // for record types, including complex types.
  static std::map<alias_set_type, MDNode *> NodeTags; // Node -> metadata map.
  static SmallVector<alias_set_type, 8> LeafNodes;    // Current set of leaves.

  std::map<alias_set_type, MDNode *>::iterator I = NodeTags.find(alias_set);
  if (I != NodeTags.end())
    return I->second;

  if (LeafNodes.empty())
    // Check for a GCC special case: a node can have an edge to the root node.
    // This is handled automatically (below) except when there are not yet any
    // known leaf nodes.
    if (alias_set_subset_of(0, alias_set)) {
      NodeTags[alias_set] = 0;
      return 0;
    }

  // If there is a path from this node to any leaf node then it is not a leaf
  // node and can be discarded.
  for (unsigned i = 0, e = (unsigned) LeafNodes.size(); i != e; ++i)
    if (alias_set_subset_of(LeafNodes[i], alias_set)) {
      NodeTags[alias_set] = 0;
      return 0;
    }
  assert(!alias_set_subset_of(0, alias_set) && "'May alias' not transitive?");

  // If there is a path from any leaf node to this one then no longer consider
  // that node to be a leaf.
  for (unsigned i = (unsigned) LeafNodes.size(); i;) {
    alias_set_type leaf_set = LeafNodes[--i];
    if (alias_set_subset_of(alias_set, leaf_set)) {
      LeafNodes.erase(LeafNodes.begin() + i);
      MDNode *&LeafTag = NodeTags[leaf_set];
      // It would be neat to strip the tbaa tag from any instructions using it
      // but it is simpler to just replace it with the root tag everywhere.
      LeafTag->replaceAllUsesWith(getTBAARoot());
      LeafTag = 0;
    }
  }

  // Create metadata describing the new node hanging off root.  The name doesn't
  // matter much but needs to be unique for the compilation unit.
  tree type =
      TYPE_CANONICAL(TYPE_MAIN_VARIANT(isa<TYPE>(t) ? t : TREE_TYPE(t)));
  std::string TreeName =
      ("alias set " + Twine(alias_set) + ": " + getDescriptiveName(type)).str();
  MDBuilder MDHelper(Context);

  MDNode *AliasTag = MDHelper.createTBAANode(TreeName, getTBAARoot());
  NodeTags[alias_set] = AliasTag;
  LeafNodes.push_back(alias_set);
  return AliasTag;
}
Esempio n. 19
0
/// If there were any appending global variables, link them together now.
Expected<Constant *>
IRLinker::linkAppendingVarProto(GlobalVariable *DstGV,
                                const GlobalVariable *SrcGV) {
  Type *EltTy = cast<ArrayType>(TypeMap.get(SrcGV->getValueType()))
                    ->getElementType();

  // FIXME: This upgrade is done during linking to support the C API.  Once the
  // old form is deprecated, we should move this upgrade to
  // llvm::UpgradeGlobalVariable() and simplify the logic here and in
  // Mapper::mapAppendingVariable() in ValueMapper.cpp.
  StringRef Name = SrcGV->getName();
  bool IsNewStructor = false;
  bool IsOldStructor = false;
  if (Name == "llvm.global_ctors" || Name == "llvm.global_dtors") {
    if (cast<StructType>(EltTy)->getNumElements() == 3)
      IsNewStructor = true;
    else
      IsOldStructor = true;
  }

  PointerType *VoidPtrTy = Type::getInt8Ty(SrcGV->getContext())->getPointerTo();
  if (IsOldStructor) {
    auto &ST = *cast<StructType>(EltTy);
    Type *Tys[3] = {ST.getElementType(0), ST.getElementType(1), VoidPtrTy};
    EltTy = StructType::get(SrcGV->getContext(), Tys, false);
  }

  uint64_t DstNumElements = 0;
  if (DstGV) {
    ArrayType *DstTy = cast<ArrayType>(DstGV->getValueType());
    DstNumElements = DstTy->getNumElements();

    if (!SrcGV->hasAppendingLinkage() || !DstGV->hasAppendingLinkage())
      return stringErr(
          "Linking globals named '" + SrcGV->getName() +
          "': can only link appending global with another appending "
          "global!");

    // Check to see that they two arrays agree on type.
    if (EltTy != DstTy->getElementType())
      return stringErr("Appending variables with different element types!");
    if (DstGV->isConstant() != SrcGV->isConstant())
      return stringErr("Appending variables linked with different const'ness!");

    if (DstGV->getAlignment() != SrcGV->getAlignment())
      return stringErr(
          "Appending variables with different alignment need to be linked!");

    if (DstGV->getVisibility() != SrcGV->getVisibility())
      return stringErr(
          "Appending variables with different visibility need to be linked!");

    if (DstGV->hasGlobalUnnamedAddr() != SrcGV->hasGlobalUnnamedAddr())
      return stringErr(
          "Appending variables with different unnamed_addr need to be linked!");

    if (DstGV->getSection() != SrcGV->getSection())
      return stringErr(
          "Appending variables with different section name need to be linked!");
  }

  SmallVector<Constant *, 16> SrcElements;
  getArrayElements(SrcGV->getInitializer(), SrcElements);

  if (IsNewStructor) {
    auto It = remove_if(SrcElements, [this](Constant *E) {
      auto *Key =
          dyn_cast<GlobalValue>(E->getAggregateElement(2)->stripPointerCasts());
      if (!Key)
        return false;
      GlobalValue *DGV = getLinkedToGlobal(Key);
      return !shouldLink(DGV, *Key);
    });
    SrcElements.erase(It, SrcElements.end());
  }
  uint64_t NewSize = DstNumElements + SrcElements.size();
  ArrayType *NewType = ArrayType::get(EltTy, NewSize);

  // Create the new global variable.
  GlobalVariable *NG = new GlobalVariable(
      DstM, NewType, SrcGV->isConstant(), SrcGV->getLinkage(),
      /*init*/ nullptr, /*name*/ "", DstGV, SrcGV->getThreadLocalMode(),
      SrcGV->getType()->getAddressSpace());

  NG->copyAttributesFrom(SrcGV);
  forceRenaming(NG, SrcGV->getName());

  Constant *Ret = ConstantExpr::getBitCast(NG, TypeMap.get(SrcGV->getType()));

  Mapper.scheduleMapAppendingVariable(*NG,
                                      DstGV ? DstGV->getInitializer() : nullptr,
                                      IsOldStructor, SrcElements);

  // Replace any uses of the two global variables with uses of the new
  // global.
  if (DstGV) {
    DstGV->replaceAllUsesWith(ConstantExpr::getBitCast(NG, DstGV->getType()));
    DstGV->eraseFromParent();
  }

  return Ret;
}
Esempio n. 20
0
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
  AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
  auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
  const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
  AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();

  SmallPtrSet<Function*, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F) SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;
  
  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function*, int>, 8> InlineHistory;

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) continue;
    
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        CallSite CS(cast<Value>(I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;
        
        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
          continue;
        
        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;
  
  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  
  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, AA, ACT);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;
      
      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
        DEBUG(dbgs() << "    -> Deleting dead call: "
                     << *CS.getInstruction() << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // We can only inline direct calls to non-declarations.
        if (!Callee || Callee->isDeclaration()) continue;
      
        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;
        
        LLVMContext &CallerCtx = Caller->getContext();

        // Get DebugLoc to report. CS will be invalid after Inliner.
        DebugLoc DLoc = CS.getInstruction()->getDebugLoc();

        // If the policy determines that we should inline this function,
        // try to do so.
        if (!shouldInline(CS)) {
          emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
                                       Twine(Callee->getName() +
                                             " will not be inlined into " +
                                             Caller->getName()));
          continue;
        }

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime)) {
          emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
                                       Twine(Callee->getName() +
                                             " will not be inlined into " +
                                             Caller->getName()));
          continue;
        }
        ++NumInlined;

        // Report the inline decision.
        emitOptimizationRemark(
            CallerCtx, DEBUG_TYPE, *Caller, DLoc,
            Twine(Callee->getName() + " inlined into " + Caller->getName()));

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
               i != e; ++i) {
            Value *Ptr = InlineInfo.InlinedCalls[i];
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
          }
        }
      }
      
      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&
          
          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: "
              << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];
        
        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();
        
        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use 
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin()+CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
Esempio n. 21
0
/// Remove dead functions that are not included in DNR (Do Not Remove) list.
bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
                                            bool AlwaysInlineOnly) {
  SmallVector<CallGraphNode *, 16> FunctionsToRemove;
  SmallVector<Function *, 16> DeadFunctionsInComdats;

  auto RemoveCGN = [&](CallGraphNode *CGN) {
    // Remove any call graph edges from the function to its callees.
    CGN->removeAllCalledFunctions();

    // Remove any edges from the external node to the function's call graph
    // node.  These edges might have been made irrelegant due to
    // optimization of the program.
    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);

    // Removing the node for callee from the call graph and delete it.
    FunctionsToRemove.push_back(CGN);
  };

  // Scan for all of the functions, looking for ones that should now be removed
  // from the program.  Insert the dead ones in the FunctionsToRemove set.
  for (const auto &I : CG) {
    CallGraphNode *CGN = I.second.get();
    Function *F = CGN->getFunction();
    if (!F || F->isDeclaration())
      continue;

    // Handle the case when this function is called and we only want to care
    // about always-inline functions. This is a bit of a hack to share code
    // between here and the InlineAlways pass.
    if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
      continue;

    // If the only remaining users of the function are dead constants, remove
    // them.
    F->removeDeadConstantUsers();

    if (!F->isDefTriviallyDead())
      continue;

    // It is unsafe to drop a function with discardable linkage from a COMDAT
    // without also dropping the other members of the COMDAT.
    // The inliner doesn't visit non-function entities which are in COMDAT
    // groups so it is unsafe to do so *unless* the linkage is local.
    if (!F->hasLocalLinkage()) {
      if (F->hasComdat()) {
        DeadFunctionsInComdats.push_back(F);
        continue;
      }
    }

    RemoveCGN(CGN);
  }
  if (!DeadFunctionsInComdats.empty()) {
    // Filter out the functions whose comdats remain alive.
    filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats);
    // Remove the rest.
    for (Function *F : DeadFunctionsInComdats)
      RemoveCGN(CG[F]);
  }

  if (FunctionsToRemove.empty())
    return false;

  // Now that we know which functions to delete, do so.  We didn't want to do
  // this inline, because that would invalidate our CallGraph::iterator
  // objects. :(
  //
  // Note that it doesn't matter that we are iterating over a non-stable order
  // here to do this, it doesn't matter which order the functions are deleted
  // in.
  array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
  FunctionsToRemove.erase(
      std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
      FunctionsToRemove.end());
  for (CallGraphNode *CGN : FunctionsToRemove) {
    delete CG.removeFunctionFromModule(CGN);
    ++NumDeleted;
  }
  return true;
}
Esempio n. 22
0
static bool
inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
                std::function<AssumptionCache &(Function &)> GetAssumptionCache,
                ProfileSummaryInfo *PSI, TargetLibraryInfo &TLI,
                bool InsertLifetime,
                function_ref<InlineCost(CallSite CS)> GetInlineCost,
                function_ref<AAResults &(Function &)> AARGetter,
                ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
  SmallPtrSet<Function *, 8> SCCFunctions;
  LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphNode *Node : SCC) {
    Function *F = Node->getFunction();
    if (F)
      SCCFunctions.insert(F);
    LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;

  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function *, int>, 8> InlineHistory;

  for (CallGraphNode *Node : SCC) {
    Function *F = Node->getFunction();
    if (!F || F->isDeclaration())
      continue;

    OptimizationRemarkEmitter ORE(F);
    for (BasicBlock &BB : *F)
      for (Instruction &I : BB) {
        CallSite CS(cast<Value>(&I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;

        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (Function *Callee = CS.getCalledFunction())
          if (Callee->isDeclaration()) {
            using namespace ore;

            ORE.emit([&]() {
              return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
                     << NV("Callee", Callee) << " will not be inlined into "
                     << NV("Caller", CS.getCaller())
                     << " because its definition is unavailable"
                     << setIsVerbose();
            });
            continue;
          }

        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;

  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache, PSI);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    // CallSites may be modified inside so ranged for loop can not be used.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;

      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // We can only inline direct calls to non-declarations.
      if (!Callee || Callee->isDeclaration())
        continue;

      Instruction *Instr = CS.getInstruction();

      bool IsTriviallyDead = isInstructionTriviallyDead(Instr, &TLI);

      int InlineHistoryID;
      if (!IsTriviallyDead) {
        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;
      }

      // FIXME for new PM: because of the old PM we currently generate ORE and
      // in turn BFI on demand.  With the new PM, the ORE dependency should
      // just become a regular analysis dependency.
      OptimizationRemarkEmitter ORE(Caller);

      Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE);
      // If the policy determines that we should inline this function,
      // delete the call instead.
      if (!OIC)
        continue;

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (IsTriviallyDead) {
        LLVM_DEBUG(dbgs() << "    -> Deleting dead call: " << *Instr << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        Instr->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // Get DebugLoc to report. CS will be invalid after Inliner.
        DebugLoc DLoc = CS->getDebugLoc();
        BasicBlock *Block = CS.getParent();

        // Attempt to inline the function.
        using namespace ore;

        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime, AARGetter,
                                  ImportedFunctionsStats)) {
          ORE.emit([&]() {
            return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
                                            Block)
                   << NV("Callee", Callee) << " will not be inlined into "
                   << NV("Caller", Caller);
          });
          continue;
        }
        ++NumInlined;

        ORE.emit([&]() {
          bool AlwaysInline = OIC->isAlways();
          StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined";
          OptimizationRemark R(DEBUG_TYPE, RemarkName, DLoc, Block);
          R << NV("Callee", Callee) << " inlined into ";
          R << NV("Caller", Caller);
          if (AlwaysInline)
            R << " with cost=always";
          else {
            R << " with cost=" << NV("Cost", OIC->getCost());
            R << " (threshold=" << NV("Threshold", OIC->getThreshold());
            R << ")";
          }
          return R;
        });

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (Value *Ptr : InlineInfo.InlinedCalls)
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
        }
      }

      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&
          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        LLVM_DEBUG(dbgs() << "    -> Deleting dead function: "
                          << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];

        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();

        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin() + CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
Esempio n. 23
0
/// Remove dead functions that are not included in DNR (Do Not Remove) list.
bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
  SmallVector<CallGraphNode*, 16> FunctionsToRemove;
  SmallVector<CallGraphNode *, 16> DeadFunctionsInComdats;
  SmallDenseMap<const Comdat *, int, 16> ComdatEntriesAlive;

  auto RemoveCGN = [&](CallGraphNode *CGN) {
    // Remove any call graph edges from the function to its callees.
    CGN->removeAllCalledFunctions();

    // Remove any edges from the external node to the function's call graph
    // node.  These edges might have been made irrelegant due to
    // optimization of the program.
    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);

    // Removing the node for callee from the call graph and delete it.
    FunctionsToRemove.push_back(CGN);
  };

  // Scan for all of the functions, looking for ones that should now be removed
  // from the program.  Insert the dead ones in the FunctionsToRemove set.
  for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
    CallGraphNode *CGN = I->second;
    Function *F = CGN->getFunction();
    if (!F || F->isDeclaration())
      continue;

    // Handle the case when this function is called and we only want to care
    // about always-inline functions. This is a bit of a hack to share code
    // between here and the InlineAlways pass.
    if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
      continue;

    // If the only remaining users of the function are dead constants, remove
    // them.
    F->removeDeadConstantUsers();

    if (!F->isDefTriviallyDead())
      continue;

    // It is unsafe to drop a function with discardable linkage from a COMDAT
    // without also dropping the other members of the COMDAT.
    // The inliner doesn't visit non-function entities which are in COMDAT
    // groups so it is unsafe to do so *unless* the linkage is local.
    if (!F->hasLocalLinkage()) {
      if (const Comdat *C = F->getComdat()) {
        --ComdatEntriesAlive[C];
        DeadFunctionsInComdats.push_back(CGN);
        continue;
      }
    }

    RemoveCGN(CGN);
  }
  if (!DeadFunctionsInComdats.empty()) {
    // Count up all the entities in COMDAT groups
    auto ComdatGroupReferenced = [&](const Comdat *C) {
      auto I = ComdatEntriesAlive.find(C);
      if (I != ComdatEntriesAlive.end())
        ++(I->getSecond());
    };
    for (const Function &F : CG.getModule())
      if (const Comdat *C = F.getComdat())
        ComdatGroupReferenced(C);
    for (const GlobalVariable &GV : CG.getModule().globals())
      if (const Comdat *C = GV.getComdat())
        ComdatGroupReferenced(C);
    for (const GlobalAlias &GA : CG.getModule().aliases())
      if (const Comdat *C = GA.getComdat())
        ComdatGroupReferenced(C);
    for (CallGraphNode *CGN : DeadFunctionsInComdats) {
      Function *F = CGN->getFunction();
      const Comdat *C = F->getComdat();
      int NumAlive = ComdatEntriesAlive[C];
      // We can remove functions in a COMDAT group if the entire group is dead.
      assert(NumAlive >= 0);
      if (NumAlive > 0)
        continue;

      RemoveCGN(CGN);
    }
  }

  if (FunctionsToRemove.empty())
    return false;

  // Now that we know which functions to delete, do so.  We didn't want to do
  // this inline, because that would invalidate our CallGraph::iterator
  // objects. :(
  //
  // Note that it doesn't matter that we are iterating over a non-stable order
  // here to do this, it doesn't matter which order the functions are deleted
  // in.
  array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
  FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
                                      FunctionsToRemove.end()),
                          FunctionsToRemove.end());
  for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
                                                  E = FunctionsToRemove.end();
       I != E; ++I) {
    delete CG.removeFunctionFromModule(*I);
    ++NumDeleted;
  }
  return true;
}
Esempio n. 24
0
/// If there are any -verify errors (e.g. differences between expectations
/// and actual diagnostics produced), apply fixits to the original source
/// file and drop it back in place.
void DiagnosticVerifier::autoApplyFixes(unsigned BufferID,
                                        ArrayRef<llvm::SMDiagnostic> diags) {
  // Walk the list of diagnostics, pulling out any fixits into an array of just
  // them.
  SmallVector<llvm::SMFixIt, 4> FixIts;
  for (auto &diag : diags)
    FixIts.append(diag.getFixIts().begin(), diag.getFixIts().end());

  // If we have no fixits to apply, avoid touching the file.
  if (FixIts.empty())
    return;
  
  // Sort the fixits by their start location.
  std::sort(FixIts.begin(), FixIts.end(),
            [&](const llvm::SMFixIt &lhs, const llvm::SMFixIt &rhs) -> bool {
              return lhs.getRange().Start.getPointer()
                   < rhs.getRange().Start.getPointer();
            });
  // Coalesce identical fix-its. This happens most often with "expected-error 2"
  // syntax.
  FixIts.erase(std::unique(FixIts.begin(), FixIts.end(),
                           [](const llvm::SMFixIt &lhs,
                              const llvm::SMFixIt &rhs) -> bool {
                 return lhs.getRange().Start == rhs.getRange().Start &&
                        lhs.getRange().End == rhs.getRange().End &&
                        lhs.getText() == rhs.getText();
               }), FixIts.end());
  // Filter out overlapping fix-its. This allows the compiler to apply changes
  // to the easy parts of the file, and leave in the tricky cases for the
  // developer to handle manually.
  FixIts.erase(swift::removeAdjacentIf(FixIts.begin(), FixIts.end(),
                                       [](const llvm::SMFixIt &lhs,
                                          const llvm::SMFixIt &rhs) {
    return lhs.getRange().End.getPointer() > rhs.getRange().Start.getPointer();
  }), FixIts.end());

  // Get the contents of the original source file.
  auto memBuffer = SM.getLLVMSourceMgr().getMemoryBuffer(BufferID);
  auto bufferRange = memBuffer->getBuffer();

  // Apply the fixes, building up a new buffer as an std::string.
  const char *LastPos = bufferRange.begin();
  std::string Result;
  
  for (auto &fix : FixIts) {
    // We cannot handle overlapping fixits, so assert that they don't happen.
    assert(LastPos <= fix.getRange().Start.getPointer() &&
           "Cannot handle overlapping fixits");
    
    // Keep anything from the last spot we've checked to the start of the fixit.
    Result.append(LastPos, fix.getRange().Start.getPointer());
    
    // Replace the content covered by the fixit with the replacement text.
    Result.append(fix.getText().begin(), fix.getText().end());
    
    // Next character to consider is at the end of the fixit.
    LastPos = fix.getRange().End.getPointer();
  }
  
  // Retain the end of the file.
  Result.append(LastPos, bufferRange.end());

  std::error_code error;
  llvm::raw_fd_ostream outs(memBuffer->getBufferIdentifier(), error,
                            llvm::sys::fs::OpenFlags::F_None);
  if (!error)
    outs << Result;
}
Esempio n. 25
0
static void lookupInModule(Module *module, Module::AccessPathTy accessPath,
                           SmallVectorImpl<ValueDecl *> &decls,
                           ResolutionKind resolutionKind, bool canReturnEarly,
                           LazyResolver *typeResolver,
                           ModuleLookupCache &cache,
                           const DeclContext *moduleScopeContext,
                           bool respectAccessControl,
                           ArrayRef<Module::ImportedModule> extraImports,
                           CallbackTy callback) {
  assert(module);
  assert(std::none_of(extraImports.begin(), extraImports.end(),
                      [](Module::ImportedModule import) -> bool {
    return !import.second;
  }));

  ModuleLookupCache::iterator iter;
  bool isNew;
  std::tie(iter, isNew) = cache.insert({{accessPath, module}, {}});
  if (!isNew) {
    decls.append(iter->second.begin(), iter->second.end());
    return;
  }

  size_t initialCount = decls.size();

  SmallVector<ValueDecl *, 4> localDecls;
  callback(module, accessPath, localDecls);
  if (respectAccessControl) {
    auto newEndIter = std::remove_if(localDecls.begin(), localDecls.end(),
                                    [=](ValueDecl *VD) {
      if (typeResolver) {
        typeResolver->resolveAccessibility(VD);
      }
      if (!VD->hasAccessibility())
        return false;
      return !VD->isAccessibleFrom(moduleScopeContext);
    });
    localDecls.erase(newEndIter, localDecls.end());

    // This only applies to immediate imports of the top-level module.
    if (moduleScopeContext && moduleScopeContext->getParentModule() != module)
      moduleScopeContext = nullptr;
  }

  OverloadSetTy overloads;
  resolutionKind = recordImportDecls(typeResolver, decls, localDecls,
                                     overloads, resolutionKind);

  bool foundDecls = decls.size() > initialCount;
  if (!foundDecls || !canReturnEarly ||
      resolutionKind == ResolutionKind::Overloadable) {
    SmallVector<Module::ImportedModule, 8> reexports;
    module->getImportedModulesForLookup(reexports);
    assert(std::none_of(reexports.begin(), reexports.end(),
                        [](Module::ImportedModule import) -> bool {
      return !import.second;
    }));
    reexports.append(extraImports.begin(), extraImports.end());

    // Prefer scoped imports (import func Swift.max) to whole-module imports.
    SmallVector<ValueDecl *, 8> unscopedValues;
    SmallVector<ValueDecl *, 8> scopedValues;
    for (auto next : reexports) {
      // Filter any whole-module imports, and skip specific-decl imports if the
      // import path doesn't match exactly.
      Module::AccessPathTy combinedAccessPath;
      if (accessPath.empty()) {
        combinedAccessPath = next.first;
      } else if (!next.first.empty() &&
                 !Module::isSameAccessPath(next.first, accessPath)) {
        // If we ever allow importing non-top-level decls, it's possible the
        // rule above isn't what we want.
        assert(next.first.size() == 1 && "import of non-top-level decl");
        continue;
      } else {
        combinedAccessPath = accessPath;
      }

      auto &resultSet = next.first.empty() ? unscopedValues : scopedValues;
      lookupInModule<OverloadSetTy>(next.second, combinedAccessPath,
                                    resultSet, resolutionKind, canReturnEarly,
                                    typeResolver, cache, moduleScopeContext,
                                    respectAccessControl, {}, callback);
    }

    // Add the results from scoped imports.
    resolutionKind = recordImportDecls(typeResolver, decls, scopedValues,
                                       overloads, resolutionKind);

    // Add the results from unscoped imports.
    foundDecls = decls.size() > initialCount;
    if (!foundDecls || !canReturnEarly ||
        resolutionKind == ResolutionKind::Overloadable) {
      resolutionKind = recordImportDecls(typeResolver, decls, unscopedValues,
                                         overloads, resolutionKind);
    }
  }

  // Remove duplicated declarations.
  llvm::SmallPtrSet<ValueDecl *, 4> knownDecls;
  decls.erase(std::remove_if(decls.begin() + initialCount, decls.end(),
                             [&](ValueDecl *d) -> bool { 
                               return !knownDecls.insert(d).second;
                             }),
              decls.end());

  auto &cachedValues = cache[{accessPath, module}];
  cachedValues.insert(cachedValues.end(),
                      decls.begin() + initialCount,
                      decls.end());
}
Esempio n. 26
0
/// If there were any appending global variables, link them together now.
/// Return true on error.
Constant *IRLinker::linkAppendingVarProto(GlobalVariable *DstGV,
                                          const GlobalVariable *SrcGV) {
  Type *EltTy = cast<ArrayType>(TypeMap.get(SrcGV->getType()->getElementType()))
                    ->getElementType();

  StringRef Name = SrcGV->getName();
  bool IsNewStructor = false;
  bool IsOldStructor = false;
  if (Name == "llvm.global_ctors" || Name == "llvm.global_dtors") {
    if (cast<StructType>(EltTy)->getNumElements() == 3)
      IsNewStructor = true;
    else
      IsOldStructor = true;
  }

  PointerType *VoidPtrTy = Type::getInt8Ty(SrcGV->getContext())->getPointerTo();
  if (IsOldStructor) {
    auto &ST = *cast<StructType>(EltTy);
    Type *Tys[3] = {ST.getElementType(0), ST.getElementType(1), VoidPtrTy};
    EltTy = StructType::get(SrcGV->getContext(), Tys, false);
  }

  if (DstGV) {
    ArrayType *DstTy = cast<ArrayType>(DstGV->getType()->getElementType());

    if (!SrcGV->hasAppendingLinkage() || !DstGV->hasAppendingLinkage()) {
      emitError(
          "Linking globals named '" + SrcGV->getName() +
          "': can only link appending global with another appending global!");
      return nullptr;
    }

    // Check to see that they two arrays agree on type.
    if (EltTy != DstTy->getElementType()) {
      emitError("Appending variables with different element types!");
      return nullptr;
    }
    if (DstGV->isConstant() != SrcGV->isConstant()) {
      emitError("Appending variables linked with different const'ness!");
      return nullptr;
    }

    if (DstGV->getAlignment() != SrcGV->getAlignment()) {
      emitError(
          "Appending variables with different alignment need to be linked!");
      return nullptr;
    }

    if (DstGV->getVisibility() != SrcGV->getVisibility()) {
      emitError(
          "Appending variables with different visibility need to be linked!");
      return nullptr;
    }

    if (DstGV->hasUnnamedAddr() != SrcGV->hasUnnamedAddr()) {
      emitError(
          "Appending variables with different unnamed_addr need to be linked!");
      return nullptr;
    }

    if (StringRef(DstGV->getSection()) != SrcGV->getSection()) {
      emitError(
          "Appending variables with different section name need to be linked!");
      return nullptr;
    }
  }

  SmallVector<Constant *, 16> DstElements;
  if (DstGV)
    getArrayElements(DstGV->getInitializer(), DstElements);

  SmallVector<Constant *, 16> SrcElements;
  getArrayElements(SrcGV->getInitializer(), SrcElements);

  if (IsNewStructor)
    SrcElements.erase(
        std::remove_if(SrcElements.begin(), SrcElements.end(),
                       [this](Constant *E) {
                         auto *Key = dyn_cast<GlobalValue>(
                             E->getAggregateElement(2)->stripPointerCasts());
                         if (!Key)
                           return false;
                         GlobalValue *DGV = getLinkedToGlobal(Key);
                         return !shouldLink(DGV, *Key);
                       }),
        SrcElements.end());
  uint64_t NewSize = DstElements.size() + SrcElements.size();
  ArrayType *NewType = ArrayType::get(EltTy, NewSize);

  // Create the new global variable.
  GlobalVariable *NG = new GlobalVariable(
      DstM, NewType, SrcGV->isConstant(), SrcGV->getLinkage(),
      /*init*/ nullptr, /*name*/ "", DstGV, SrcGV->getThreadLocalMode(),
      SrcGV->getType()->getAddressSpace());

  NG->copyAttributesFrom(SrcGV);
  forceRenaming(NG, SrcGV->getName());

  Constant *Ret = ConstantExpr::getBitCast(NG, TypeMap.get(SrcGV->getType()));

  // Stop recursion.
  ValueMap[SrcGV] = Ret;

  for (auto *V : SrcElements) {
    Constant *NewV;
    if (IsOldStructor) {
      auto *S = cast<ConstantStruct>(V);
      auto *E1 = MapValue(S->getOperand(0), ValueMap, RF_MoveDistinctMDs,
                          &TypeMap, &GValMaterializer);
      auto *E2 = MapValue(S->getOperand(1), ValueMap, RF_MoveDistinctMDs,
                          &TypeMap, &GValMaterializer);
      Value *Null = Constant::getNullValue(VoidPtrTy);
      NewV =
          ConstantStruct::get(cast<StructType>(EltTy), E1, E2, Null, nullptr);
    } else {
      NewV = MapValue(V, ValueMap, RF_MoveDistinctMDs, &TypeMap,
                      &GValMaterializer);
    }
    DstElements.push_back(NewV);
  }

  NG->setInitializer(ConstantArray::get(NewType, DstElements));

  // Replace any uses of the two global variables with uses of the new
  // global.
  if (DstGV) {
    DstGV->replaceAllUsesWith(ConstantExpr::getBitCast(NG, DstGV->getType()));
    DstGV->eraseFromParent();
  }

  return Ret;
}