示例#1
0
/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
/// into the caller, update the specified callgraph to reflect the changes we
/// made.  Note that it's possible that not all code was copied over, so only
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
                                         Function::iterator FirstNewBlock,
                                         ValueToValueMapTy &VMap,
                                         InlineFunctionInfo &IFI) {
  CallGraph &CG = *IFI.CG;
  const Function *Caller = CS.getInstruction()->getParent()->getParent();
  const Function *Callee = CS.getCalledFunction();
  CallGraphNode *CalleeNode = CG[Callee];
  CallGraphNode *CallerNode = CG[Caller];

  // Since we inlined some uninlined call sites in the callee into the caller,
  // add edges from the caller to all of the callees of the callee.
  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();

  // Consider the case where CalleeNode == CallerNode.
  CallGraphNode::CalledFunctionsVector CallCache;
  if (CalleeNode == CallerNode) {
    CallCache.assign(I, E);
    I = CallCache.begin();
    E = CallCache.end();
  }

  for (; I != E; ++I) {
    const Value *OrigCall = I->first;

    ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
    // Only copy the edge if the call was inlined!
    if (VMI == VMap.end() || VMI->second == 0)
      continue;
    
    // If the call was inlined, but then constant folded, there is no edge to
    // add.  Check for this case.
    Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
    if (NewCall == 0) continue;

    // Remember that this call site got inlined for the client of
    // InlineFunction.
    IFI.InlinedCalls.push_back(NewCall);

    // It's possible that inlining the callsite will cause it to go from an
    // indirect to a direct call by resolving a function pointer.  If this
    // happens, set the callee of the new call site to a more precise
    // destination.  This can also happen if the call graph node of the caller
    // was just unnecessarily imprecise.
    if (I->second->getFunction() == 0)
      if (Function *F = CallSite(NewCall).getCalledFunction()) {
        // Indirect call site resolved to direct call.
        CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);

        continue;
      }

    CallerNode->addCalledFunction(CallSite(NewCall), I->second);
  }
  
  // Update the call graph by deleting the edge from Callee to Caller.  We must
  // do this after the loop above in case Caller and Callee are the same.
  CallerNode->removeCallEdgeFor(CS);
}
示例#2
0
// doFinalization - Remove now-dead linkonce functions at the end of
// processing to avoid breaking the SCC traversal.
bool Inliner::doFinalization(CallGraph &CG) {
  std::set<CallGraphNode*> FunctionsToRemove;

  // Scan for all of the functions, looking for ones that should now be removed
  // from the program.  Insert the dead ones in the FunctionsToRemove set.
  for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
    CallGraphNode *CGN = I->second;
    if (Function *F = CGN ? CGN->getFunction() : 0) {
      // If the only remaining users of the function are dead constants, remove
      // them.
      F->removeDeadConstantUsers();

      if ((F->hasLinkOnceLinkage() || F->hasInternalLinkage()) &&
          F->use_empty()) {

        // Remove any call graph edges from the function to its callees.
        while (!CGN->empty())
          CGN->removeCallEdgeTo((CGN->end()-1)->second);

        // Remove any edges from the external node to the function's call graph
        // node.  These edges might have been made irrelegant due to
        // optimization of the program.
        CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);

        // Removing the node for callee from the call graph and delete it.
        FunctionsToRemove.insert(CGN);
      }
    }
  }

  // Now that we know which functions to delete, do so.  We didn't want to do
  // this inline, because that would invalidate our CallGraph::iterator
  // objects. :(
  bool Changed = false;
  for (std::set<CallGraphNode*>::iterator I = FunctionsToRemove.begin(),
         E = FunctionsToRemove.end(); I != E; ++I) {
    delete CG.removeFunctionFromModule(*I);
    ++NumDeleted;
    Changed = true;
  }

  return Changed;
}
Function * StructuredModuleEditor::cloneFunc(Function * Original) {
	if (Original == NULL)
		return NULL;

	ValueMap<const Value*, WeakVH> VMap;

// Creates a clone of the function we are cloning
	Function *Clone = CloneFunction(Original, VMap, false);
	Clone->setName(Original->getName() + "-cloned");

// Adds the clone to the Module
	M->getFunctionList().push_back(Clone);

// Adds the clone to the CFG
	CG->getOrInsertFunction(Clone);

// Adds each of the original function's CFG node's interprocedural out-edges
// to the clone's node. All of the original function's intraprocedural in-edges are redirected to the cloned function.
// The clone will have no interprocedural in-edges as it
// was just created.
	CallGraphNode *CloneNode = CG->getOrInsertFunction(Clone);
	for (Function::iterator BBI = Clone->begin(), BBE = Clone->end();
			BBI != BBE; ++BBI) {
		for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end(); II != IE;
				++II) {
			CallSite CS(cast<Value>(II));
// If this isn't a call, or it is a call to an intrinsic...
			if (!CS || isa<IntrinsicInst>(II))
				continue;

			Function *Callee = CS.getCalledFunction();
			if (Callee == Original) {
				Callee = Clone;
				CS.setCalledFunction(Clone);
			}

			CloneNode->addCalledFunction(CS, CG->getOrInsertFunction(Callee));
		}
	}

	return Clone;
}
示例#4
0
void CallGraph::addToCallGraph(Function *F) {
  CallGraphNode *Node = getOrInsertFunction(F);

  // If this function has external linkage, anything could call it.
  if (!F->hasLocalLinkage()) {
    ExternalCallingNode->addCalledFunction(CallSite(), Node);

    // Found the entry point?
    if (F->getName() == "main") {
      if (Root) // Found multiple external mains?  Don't pick one.
        Root = ExternalCallingNode;
      else
        Root = Node; // Found a main, keep track of it!
    }
  }

  // If this function has its address taken, anything could call it.
  if (F->hasAddressTaken())
    ExternalCallingNode->addCalledFunction(CallSite(), Node);

  // If this function is not defined in this translation unit, it could call
  // anything.
  if (F->isDeclaration() && !F->isIntrinsic())
    Node->addCalledFunction(CallSite(), CallsExternalNode.get());

  // Look for calls by this function.
  for (Function::iterator BB = F->begin(), BBE = F->end(); BB != BBE; ++BB)
    for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;
         ++II) {
      CallSite CS(cast<Value>(II));
      if (CS) {
        const Function *Callee = CS.getCalledFunction();
        if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID()))
          // Indirect calls of intrinsics are not allowed so no need to check.
          // We can be more precise here by using TargetArg returned by
          // Intrinsic::isLeaf.
          Node->addCalledFunction(CS, CallsExternalNode.get());
        else if (!Callee->isIntrinsic())
          Node->addCalledFunction(CS, getOrInsertFunction(Callee));
      }
    }
}
示例#5
0
void CallGraph::verify() const {
#ifndef NDEBUG
  // For every function in the module, add it to our SILFunction set.
  llvm::DenseSet<SILFunction *> Functions;
  for (auto &F : M)
    Functions.insert(&F);

  // For every pair (SILFunction, CallGraphNode) in the
  // function-to-node map, verify:
  //
  //    a. The function is in the current module.
  //    b. The call graph node is for that same function.
  //
  // In addition, call the verify method for the function.
  unsigned numEdges = 0;
  for (auto &P : FunctionToNodeMap) {
    SILFunction *F = P.first;
    CallGraphNode *Node = P.second;
    assert(Functions.count(F) &&
           "Function in call graph but not in module!?");
    assert(Node->getFunction() == F &&
           "Func mapped to node, but node has different Function inside?!");
    verify(F);
    numEdges += Node->getCalleeEdges().size();
  }

  assert(InstToEdgeMap.size() == numEdges &&
         "Some edges in InstToEdgeMap are not contained in any node");

  // Verify the callee sets.
  for (auto Iter : CalleeSetCache) {
    auto *CalleeSet = Iter.second.getPointer();
    for (CallGraphNode *Node : *CalleeSet) {
      SILFunction *F = Node->getFunction();
      assert(tryGetCallGraphNode(F) &&
             "Callee set contains dangling node poiners");
    }
  }
#endif
}
示例#6
0
/// DeleteBasicBlock - remove the specified basic block from the program,
/// updating the callgraph to reflect any now-obsolete edges due to calls that
/// exist in the BB.
void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
  assert(pred_empty(BB) && "BB is not dead!");
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();

  Instruction *TokenInst = nullptr;

  CallGraphNode *CGN = CG[BB->getParent()];
  for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) {
    --I;

    if (I->getType()->isTokenTy()) {
      TokenInst = &*I;
      break;
    }

    if (auto CS = CallSite (&*I)) {
      const Function *Callee = CS.getCalledFunction();
      if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID()))
        CGN->removeCallEdgeFor(CS);
      else if (!Callee->isIntrinsic())
        CGN->removeCallEdgeFor(CS);
    }

    if (!I->use_empty())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }

  if (TokenInst) {
    if (!isa<TerminatorInst>(TokenInst))
      changeToUnreachable(TokenInst->getNextNode(), /*UseLLVMTrap=*/false);
  } else {
    // Get the list of successors of this block.
    std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB));

    for (unsigned i = 0, e = Succs.size(); i != e; ++i)
      Succs[i]->removePredecessor(BB);

    BB->eraseFromParent();
  }
}
/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
/// into the caller, update the specified callgraph to reflect the changes we
/// made.  Note that it's possible that not all code was copied over, so only
/// some edges of the callgraph will be remain.
static void UpdateCallGraphAfterInlining(const Function *Caller,
                                         const Function *Callee,
                                         Function::iterator FirstNewBlock,
                                       DenseMap<const Value*, Value*> &ValueMap,
                                         CallGraph &CG) {
  // Update the call graph by deleting the edge from Callee to Caller
  CallGraphNode *CalleeNode = CG[Callee];
  CallGraphNode *CallerNode = CG[Caller];
  CallerNode->removeCallEdgeTo(CalleeNode);
  
  // Since we inlined some uninlined call sites in the callee into the caller,
  // add edges from the caller to all of the callees of the callee.
  for (CallGraphNode::iterator I = CalleeNode->begin(),
       E = CalleeNode->end(); I != E; ++I) {
    const Instruction *OrigCall = I->first.getInstruction();
    
    DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
    // Only copy the edge if the call was inlined!
    if (VMI != ValueMap.end() && VMI->second) {
      // If the call was inlined, but then constant folded, there is no edge to
      // add.  Check for this case.
      if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
        CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
    }
  }
}
bool CallGraphCFG::findBBPath(CallGraphNode *n, std::vector<BasicBlock*> &path, std::string srcFile, int srcLine)
{
    if (n == NULL) return false;

    Function *F = n->getFunction();

    std::cerr << "Processing " << F->getNameStr() << "\n";

    // Are we on a leaf?
    if (n->size() == 0) {
        BasicBlock *bb=NULL;
        if (findLineInFunction(F,&bb,srcFile,srcLine)) {
            path.push_back(bb);
            return true;
        }
    }

    for (CallGraphNode::iterator it = n->begin(); it != n->end(); ++it) {
        CallSite cs = it->first;
        CallGraphNode *tCGN = it->second;
        Instruction *tI = cs.getInstruction();
        if (tI == NULL) return false;
        BasicBlock *bb = tI->getParent();
        Function *tF = tCGN->getFunction();

        path.push_back(bb);
        if (findLineInBB(bb,srcFile,srcLine))
            return true;

        if (tF != F) {    // Dont get stuck in recursion
            if (findBBPath(tCGN,path,srcFile,srcLine))
                return true;
        }

        std::cerr << " Dead end, reverting...\n";  // FIX: This is misleading, not really correct.
        path.pop_back();
    }
    return false;
}
示例#9
0
// InlineCallIfPossible - If it is possible to inline the specified call site,
// do so and update the CallGraph for this operation.
static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
                                 const std::set<Function*> &SCCFunctions,
                                 const TargetData &TD) {
  Function *Callee = CS.getCalledFunction();
  if (!InlineFunction(CS, &CG, &TD)) return false;

  // If we inlined the last possible call site to the function, delete the
  // function body now.
  if (Callee->use_empty() && Callee->hasInternalLinkage() &&
      !SCCFunctions.count(Callee)) {
    DOUT << "    -> Deleting dead function: " << Callee->getName() << "\n";

    // Remove any call graph edges from the callee to its callees.
    CallGraphNode *CalleeNode = CG[Callee];
    while (!CalleeNode->empty())
      CalleeNode->removeCallEdgeTo((CalleeNode->end()-1)->second);

    // Removing the node for callee from the call graph and delete it.
    delete CG.removeFunctionFromModule(CalleeNode);
    ++NumDeleted;
  }
  return true;
}
示例#10
0
/// DeleteBasicBlock - remove the specified basic block from the program,
/// updating the callgraph to reflect any now-obsolete edges due to calls that
/// exist in the BB.
void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
  assert(pred_begin(BB) == pred_end(BB) && "BB is not dead!");
  CallGraph &CG = getAnalysis<CallGraph>();

  CallGraphNode *CGN = CG[BB->getParent()];
  for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) {
    --I;
    if (CallInst *CI = dyn_cast<CallInst>(I)) {
      if (!isa<IntrinsicInst>(I))
        CGN->removeCallEdgeFor(CI);
    } else if (InvokeInst *II = dyn_cast<InvokeInst>(I))
      CGN->removeCallEdgeFor(II);
    if (!I->use_empty())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }

  // Get the list of successors of this block.
  std::vector<BasicBlock*> Succs(succ_begin(BB), succ_end(BB));

  for (unsigned i = 0, e = Succs.size(); i != e; ++i)
    Succs[i]->removePredecessor(BB);

  BB->eraseFromParent();
}
bool StructuredModuleEditor::removeFunc(Function *FunctionToRemove) {
// Checks to make sure the function we are trying to remove
// actually exists in the CFG
	if (FunctionToRemove == NULL) {
		OS << "Function does not exist in the call graph!\n";
		return false;
	}

	CallGraphNode *NodeToRemove = (*CG)[FunctionToRemove];

	// We cannot remove a node if it has any inteprocedural in-edges
	for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) {
		CallGraphNode *CallingNode = (*CG)[I];
		for (CallGraphNode::iterator CGNI = CallingNode->begin(), CGNE =
				CallingNode->end(); CGNI != CGNE; ++CGNI) {
			Function *Caller = I;
			Function *Callee = CGNI->second->getFunction();
			if (Callee == FunctionToRemove && Caller != Callee) {
				OS << "Cannot remove " << FunctionToRemove->getName()
						<< " because it has at least one interprocedural edge!\n";
				OS << "It is called by " << Caller->getName() << "\n";
				return false;
			}
		}
	}

// Removes all call graph edges from the node we are removing to its callees.
	NodeToRemove->removeAllCalledFunctions();
	CG->getExternalCallingNode()->removeAnyCallEdgeTo(NodeToRemove);

// Removes all call graph edges from callees to the node we are removing
	for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) {
		CallGraphNode *CallingNode = (*CG)[I];
		CallingNode->removeAnyCallEdgeTo(NodeToRemove);
	}
	NodeToRemove->removeAnyCallEdgeTo(CG->getCallsExternalNode());

// Removes the function from the module and the CFG
	FunctionToRemove->dropAllReferences();

	// Remove the function from the module
	CG->removeFunctionFromModule(NodeToRemove);

	return true;
}
示例#12
0
bool CallGraphChecker::existsInCallGraph(Instruction *Call, Function *Callee) {
  CallGraph &CG = getAnalysis<CallGraph>();

  assert(Call && Callee);
  CallGraphNode *CallerNode = CG[Call->getParent()->getParent()];
  CallGraphNode *CalleeNode = CG[Callee];
  assert(CallerNode && CalleeNode);

  if (find(CallerNode->begin(), CallerNode->end(),
           CallGraphNode::CallRecord(Call, CalleeNode))
      != CallerNode->end()) {
    return true;
  }

  // An instruction conservatively calls all functions by calling
  // CallsExternalNode.
  if (find(CallerNode->begin(), CallerNode->end(),
           CallGraphNode::CallRecord(Call, CG.getCallsExternalNode()))
      != CallerNode->end()) {
    return true;
  }

  return false;
}
示例#13
0
/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
/// into the caller, update the specified callgraph to reflect the changes we
/// made.  Note that it's possible that not all code was copied over, so only
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
                                         Function::iterator FirstNewBlock,
                                       DenseMap<const Value*, Value*> &ValueMap,
                                         CallGraph &CG) {
  const Function *Caller = CS.getInstruction()->getParent()->getParent();
  const Function *Callee = CS.getCalledFunction();
  CallGraphNode *CalleeNode = CG[Callee];
  CallGraphNode *CallerNode = CG[Caller];

  // Since we inlined some uninlined call sites in the callee into the caller,
  // add edges from the caller to all of the callees of the callee.
  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();

  // Consider the case where CalleeNode == CallerNode.
  CallGraphNode::CalledFunctionsVector CallCache;
  if (CalleeNode == CallerNode) {
    CallCache.assign(I, E);
    I = CallCache.begin();
    E = CallCache.end();
  }

  for (; I != E; ++I) {
    const Instruction *OrigCall = I->first.getInstruction();

    DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
    // Only copy the edge if the call was inlined!
    if (VMI != ValueMap.end() && VMI->second) {
      // If the call was inlined, but then constant folded, there is no edge to
      // add.  Check for this case.
      if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
        CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
    }
  }
  // Update the call graph by deleting the edge from Callee to Caller.  We must
  // do this after the loop above in case Caller and Callee are the same.
  CallerNode->removeCallEdgeFor(CS);
}
示例#14
0
bool InternalizePass::runOnModule(Module &M) {
  CallGraph *CG = getAnalysisIfAvailable<CallGraph>();
  CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
  bool Changed = false;

  // Never internalize functions which code-gen might insert.
  // FIXME: We should probably add this (and the __stack_chk_guard) via some
  // type of call-back in CodeGen.
  ExternalNames.insert("__stack_chk_fail");

  // Mark all functions not in the api as internal.
  // FIXME: maybe use private linkage?
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
    if (!I->isDeclaration() &&         // Function must be defined here
        // Available externally is really just a "declaration with a body".
        !I->hasAvailableExternallyLinkage() &&
        !I->hasLocalLinkage() &&  // Can't already have internal linkage
        !ExternalNames.count(I->getName())) {// Not marked to keep external?
      I->setLinkage(GlobalValue::InternalLinkage);
      // Remove a callgraph edge from the external node to this function.
      if (ExternalNode) ExternalNode->removeOneAbstractEdgeTo((*CG)[I]);
      Changed = true;
      ++NumFunctions;
      DEBUG(dbgs() << "Internalizing func " << I->getName() << "\n");
    }

  // Never internalize the llvm.used symbol.  It is used to implement
  // attribute((used)).
  // FIXME: Shouldn't this just filter on llvm.metadata section??
  ExternalNames.insert("llvm.used");
  ExternalNames.insert("llvm.compiler.used");

  // Never internalize anchors used by the machine module info, else the info
  // won't find them.  (see MachineModuleInfo.)
  ExternalNames.insert("llvm.global_ctors");
  ExternalNames.insert("llvm.global_dtors");
  ExternalNames.insert("llvm.global.annotations");

  // Never internalize symbols code-gen inserts.
  ExternalNames.insert("__stack_chk_guard");

  // Mark all global variables with initializers that are not in the api as
  // internal as well.
  // FIXME: maybe use private linkage?
  for (Module::global_iterator I = M.global_begin(), E = M.global_end();
       I != E; ++I)
    if (!I->isDeclaration() && !I->hasLocalLinkage() &&
        // Available externally is really just a "declaration with a body".
        !I->hasAvailableExternallyLinkage() &&
        !ExternalNames.count(I->getName())) {
      I->setLinkage(GlobalValue::InternalLinkage);
      Changed = true;
      ++NumGlobals;
      DEBUG(dbgs() << "Internalized gvar " << I->getName() << "\n");
    }

  // Mark all aliases that are not in the api as internal as well.
  for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
       I != E; ++I)
    if (!I->isDeclaration() && !I->hasInternalLinkage() &&
        // Available externally is really just a "declaration with a body".
        !I->hasAvailableExternallyLinkage() &&
        !ExternalNames.count(I->getName())) {
      I->setLinkage(GlobalValue::InternalLinkage);
      Changed = true;
      ++NumAliases;
      DEBUG(dbgs() << "Internalized alias " << I->getName() << "\n");
    }

  return Changed;
}
示例#15
0
// InlineFunction - This function inlines the called function into the basic
// block of the caller.  This returns false if it is not possible to inline this
// call.  The program is still in a well defined state if this occurs though.
//
// Note that this only does one level of inlining.  For example, if the
// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
// exists in the instruction stream.  Similiarly this will inline a recursive
// function by one level.
//
bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) {
  Instruction *TheCall = CS.getInstruction();
  assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
         "Instruction not in function!");

  const Function *CalledFunc = CS.getCalledFunction();
  if (CalledFunc == 0 ||          // Can't inline external function or indirect
      CalledFunc->isDeclaration() || // call, or call to a vararg function!
      CalledFunc->getFunctionType()->isVarArg()) return false;


  // If the call to the callee is not a tail call, we must clear the 'tail'
  // flags on any calls that we inline.
  bool MustClearTailCallFlags =
    !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());

  // If the call to the callee cannot throw, set the 'nounwind' flag on any
  // calls that we inline.
  bool MarkNoUnwind = CS.doesNotThrow();

  BasicBlock *OrigBB = TheCall->getParent();
  Function *Caller = OrigBB->getParent();

  // GC poses two hazards to inlining, which only occur when the callee has GC:
  //  1. If the caller has no GC, then the callee's GC must be propagated to the
  //     caller.
  //  2. If the caller has a differing GC, it is invalid to inline.
  if (CalledFunc->hasGC()) {
    if (!Caller->hasGC())
      Caller->setGC(CalledFunc->getGC());
    else if (CalledFunc->getGC() != Caller->getGC())
      return false;
  }

  // Get an iterator to the last basic block in the function, which will have
  // the new function inlined after it.
  //
  Function::iterator LastBlock = &Caller->back();

  // Make sure to capture all of the return instructions from the cloned
  // function.
  std::vector<ReturnInst*> Returns;
  ClonedCodeInfo InlinedFunctionInfo;
  Function::iterator FirstNewBlock;

  { // Scope to destroy ValueMap after cloning.
    DenseMap<const Value*, Value*> ValueMap;

    assert(CalledFunc->arg_size() == CS.arg_size() &&
           "No varargs calls can be inlined!");

    // Calculate the vector of arguments to pass into the function cloner, which
    // matches up the formal to the actual argument values.
    CallSite::arg_iterator AI = CS.arg_begin();
    unsigned ArgNo = 0;
    for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
         E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
      Value *ActualArg = *AI;

      // When byval arguments actually inlined, we need to make the copy implied
      // by them explicit.  However, we don't do this if the callee is readonly
      // or readnone, because the copy would be unneeded: the callee doesn't
      // modify the struct.
      if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) &&
          !CalledFunc->onlyReadsMemory()) {
        const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
        const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty);

        // Create the alloca.  If we have TargetData, use nice alignment.
        unsigned Align = 1;
        if (TD) Align = TD->getPrefTypeAlignment(AggTy);
        Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(),
                                          Caller->begin()->begin());
        // Emit a memcpy.
        const Type *Tys[] = { Type::Int64Ty };
        Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
                                                       Intrinsic::memcpy, 
                                                       Tys, 1);
        Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
        Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);

        Value *Size;
        if (TD == 0)
          Size = ConstantExpr::getSizeOf(AggTy);
        else
          Size = ConstantInt::get(Type::Int64Ty, TD->getTypeStoreSize(AggTy));

        // Always generate a memcpy of alignment 1 here because we don't know
        // the alignment of the src pointer.  Other optimizations can infer
        // better alignment.
        Value *CallArgs[] = {
          DestCast, SrcCast, Size, ConstantInt::get(Type::Int32Ty, 1)
        };
        CallInst *TheMemCpy =
          CallInst::Create(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);

        // If we have a call graph, update it.
        if (CG) {
          CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
          CallGraphNode *CallerNode = (*CG)[Caller];
          CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
        }

        // Uses of the argument in the function should use our new alloca
        // instead.
        ActualArg = NewAlloca;
      }

      ValueMap[I] = ActualArg;
    }

    // We want the inliner to prune the code as it copies.  We would LOVE to
    // have no dead or constant instructions leftover after inlining occurs
    // (which can happen, e.g., because an argument was constant), but we'll be
    // happy with whatever the cloner can do.
    CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
                              &InlinedFunctionInfo, TD);

    // Remember the first block that is newly cloned over.
    FirstNewBlock = LastBlock; ++FirstNewBlock;

    // Update the callgraph if requested.
    if (CG)
      UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *CG);
  }

  // If there are any alloca instructions in the block that used to be the entry
  // block for the callee, move them to the entry block of the caller.  First
  // calculate which instruction they should be inserted before.  We insert the
  // instructions at the end of the current alloca list.
  //
  {
    BasicBlock::iterator InsertPoint = Caller->begin()->begin();
    for (BasicBlock::iterator I = FirstNewBlock->begin(),
           E = FirstNewBlock->end(); I != E; )
      if (AllocaInst *AI = dyn_cast<AllocaInst>(I++)) {
        // If the alloca is now dead, remove it.  This often occurs due to code
        // specialization.
        if (AI->use_empty()) {
          AI->eraseFromParent();
          continue;
        }

        if (isa<Constant>(AI->getArraySize())) {
          // Scan for the block of allocas that we can move over, and move them
          // all at once.
          while (isa<AllocaInst>(I) &&
                 isa<Constant>(cast<AllocaInst>(I)->getArraySize()))
            ++I;

          // Transfer all of the allocas over in a block.  Using splice means
          // that the instructions aren't removed from the symbol table, then
          // reinserted.
          Caller->getEntryBlock().getInstList().splice(
              InsertPoint,
              FirstNewBlock->getInstList(),
              AI, I);
        }
      }
  }

  // If the inlined code contained dynamic alloca instructions, wrap the inlined
  // code with llvm.stacksave/llvm.stackrestore intrinsics.
  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
    Module *M = Caller->getParent();
    // Get the two intrinsics we care about.
    Constant *StackSave, *StackRestore;
    StackSave    = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
    StackRestore = Intrinsic::getDeclaration(M, Intrinsic::stackrestore);

    // If we are preserving the callgraph, add edges to the stacksave/restore
    // functions for the calls we insert.
    CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
    if (CG) {
      // We know that StackSave/StackRestore are Function*'s, because they are
      // intrinsics which must have the right types.
      StackSaveCGN    = CG->getOrInsertFunction(cast<Function>(StackSave));
      StackRestoreCGN = CG->getOrInsertFunction(cast<Function>(StackRestore));
      CallerNode = (*CG)[Caller];
    }

    // Insert the llvm.stacksave.
    CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
                                          FirstNewBlock->begin());
    if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);

    // Insert a call to llvm.stackrestore before any return instructions in the
    // inlined function.
    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
      CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
      if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
    }

    // Count the number of StackRestore calls we insert.
    unsigned NumStackRestores = Returns.size();

    // If we are inlining an invoke instruction, insert restores before each
    // unwind.  These unwinds will be rewritten into branches later.
    if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
      for (Function::iterator BB = FirstNewBlock, E = Caller->end();
           BB != E; ++BB)
        if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
          CallInst::Create(StackRestore, SavedPtr, "", UI);
          ++NumStackRestores;
        }
    }
  }

  // If we are inlining tail call instruction through a call site that isn't
  // marked 'tail', we must remove the tail marker for any calls in the inlined
  // code.  Also, calls inlined through a 'nounwind' call site should be marked
  // 'nounwind'.
  if (InlinedFunctionInfo.ContainsCalls &&
      (MustClearTailCallFlags || MarkNoUnwind)) {
    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
         BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
        if (CallInst *CI = dyn_cast<CallInst>(I)) {
          if (MustClearTailCallFlags)
            CI->setTailCall(false);
          if (MarkNoUnwind)
            CI->setDoesNotThrow();
        }
  }

  // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
  // instructions are unreachable.
  if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
         BB != E; ++BB) {
      TerminatorInst *Term = BB->getTerminator();
      if (isa<UnwindInst>(Term)) {
        new UnreachableInst(Term);
        BB->getInstList().erase(Term);
      }
    }

  // If we are inlining for an invoke instruction, we must make sure to rewrite
  // any inlined 'unwind' instructions into branches to the invoke exception
  // destination, and call instructions into invoke instructions.
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
    HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);

  // If we cloned in _exactly one_ basic block, and if that block ends in a
  // return instruction, we splice the body of the inlined callee directly into
  // the calling basic block.
  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
    // Move all of the instructions right before the call.
    OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
                                 FirstNewBlock->begin(), FirstNewBlock->end());
    // Remove the cloned basic block.
    Caller->getBasicBlockList().pop_back();

    // If the call site was an invoke instruction, add a branch to the normal
    // destination.
    if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
      BranchInst::Create(II->getNormalDest(), TheCall);

    // If the return instruction returned a value, replace uses of the call with
    // uses of the returned value.
    if (!TheCall->use_empty()) {
      ReturnInst *R = Returns[0];
      TheCall->replaceAllUsesWith(R->getReturnValue());
    }
    // Since we are now done with the Call/Invoke, we can delete it.
    TheCall->eraseFromParent();

    // Since we are now done with the return instruction, delete it also.
    Returns[0]->eraseFromParent();

    // We are now done with the inlining.
    return true;
  }

  // Otherwise, we have the normal case, of more than one block to inline or
  // multiple return sites.

  // We want to clone the entire callee function into the hole between the
  // "starter" and "ender" blocks.  How we accomplish this depends on whether
  // this is an invoke instruction or a call instruction.
  BasicBlock *AfterCallBB;
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {

    // Add an unconditional branch to make this look like the CallInst case...
    BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);

    // Split the basic block.  This guarantees that no PHI nodes will have to be
    // updated due to new incoming edges, and make the invoke case more
    // symmetric to the call case.
    AfterCallBB = OrigBB->splitBasicBlock(NewBr,
                                          CalledFunc->getName()+".exit");

  } else {  // It's a call
    // If this is a call instruction, we need to split the basic block that
    // the call lives in.
    //
    AfterCallBB = OrigBB->splitBasicBlock(TheCall,
                                          CalledFunc->getName()+".exit");
  }

  // Change the branch that used to go to AfterCallBB to branch to the first
  // basic block of the inlined function.
  //
  TerminatorInst *Br = OrigBB->getTerminator();
  assert(Br && Br->getOpcode() == Instruction::Br &&
         "splitBasicBlock broken!");
  Br->setOperand(0, FirstNewBlock);


  // Now that the function is correct, make it a little bit nicer.  In
  // particular, move the basic blocks inserted from the end of the function
  // into the space made by splitting the source basic block.
  Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
                                     FirstNewBlock, Caller->end());

  // Handle all of the return instructions that we just cloned in, and eliminate
  // any users of the original call/invoke instruction.
  const Type *RTy = CalledFunc->getReturnType();

  if (Returns.size() > 1) {
    // The PHI node should go at the front of the new basic block to merge all
    // possible incoming values.
    PHINode *PHI = 0;
    if (!TheCall->use_empty()) {
      PHI = PHINode::Create(RTy, TheCall->getName(),
                            AfterCallBB->begin());
      // Anything that used the result of the function call should now use the
      // PHI node as their operand.
      TheCall->replaceAllUsesWith(PHI);
    }

    // Loop over all of the return instructions adding entries to the PHI node
    // as appropriate.
    if (PHI) {
      for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
        ReturnInst *RI = Returns[i];
        assert(RI->getReturnValue()->getType() == PHI->getType() &&
               "Ret value not consistent in function!");
        PHI->addIncoming(RI->getReturnValue(), RI->getParent());
      }
    }

    // Add a branch to the merge points and remove return instructions.
    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
      ReturnInst *RI = Returns[i];
      BranchInst::Create(AfterCallBB, RI);
      RI->eraseFromParent();
    }
  } else if (!Returns.empty()) {
    // Otherwise, if there is exactly one return value, just replace anything
    // using the return value of the call with the computed value.
    if (!TheCall->use_empty())
      TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());

    // Splice the code from the return block into the block that it will return
    // to, which contains the code that was after the call.
    BasicBlock *ReturnBB = Returns[0]->getParent();
    AfterCallBB->getInstList().splice(AfterCallBB->begin(),
                                      ReturnBB->getInstList());

    // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
    ReturnBB->replaceAllUsesWith(AfterCallBB);

    // Delete the return instruction now and empty ReturnBB now.
    Returns[0]->eraseFromParent();
    ReturnBB->eraseFromParent();
  } else if (!TheCall->use_empty()) {
    // No returns, but something is using the return value of the call.  Just
    // nuke the result.
    TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
  }

  // Since we are now done with the Call/Invoke, we can delete it.
  TheCall->eraseFromParent();

  // We should always be able to fold the entry block of the function into the
  // single predecessor of the block...
  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);

  // Splice the code entry block into calling block, right before the
  // unconditional branch.
  OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
  CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes

  // Remove the unconditional branch.
  OrigBB->getInstList().erase(Br);

  // Now we can remove the CalleeEntry block, which is now empty.
  Caller->getBasicBlockList().erase(CalleeEntry);

  return true;
}
示例#16
0
/// updateCallSites - Update all sites that call F to use NF.
CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) {
  CallGraph &CG = getAnalysis<CallGraph>();
  SmallVector<Value*, 16> Args;

  // Attributes - Keep track of the parameter attributes for the arguments.
  SmallVector<AttributeWithIndex, 8> ArgAttrsVec;

  // Get a new callgraph node for NF.
  CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);

  while (!F->use_empty()) {
    CallSite CS(*F->use_begin());
    Instruction *Call = CS.getInstruction();

    const AttrListPtr &PAL = F->getAttributes();
    // Add any return attributes.
    if (Attributes attrs = PAL.getRetAttributes())
      ArgAttrsVec.push_back(AttributeWithIndex::get(0, attrs));

    // Copy arguments, however skip first one.
    CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
    Value *FirstCArg = *AI;
    ++AI;
    // 0th parameter attribute is reserved for return type.
    // 1th parameter attribute is for first 1st sret argument.
    unsigned ParamIndex = 2; 
    while (AI != AE) {
      Args.push_back(*AI); 
      if (Attributes Attrs = PAL.getParamAttributes(ParamIndex))
        ArgAttrsVec.push_back(AttributeWithIndex::get(ParamIndex - 1, Attrs));
      ++ParamIndex;
      ++AI;
    }

    // Add any function attributes.
    if (Attributes attrs = PAL.getFnAttributes())
      ArgAttrsVec.push_back(AttributeWithIndex::get(~0, attrs));
    
    AttrListPtr NewPAL = AttrListPtr::get(ArgAttrsVec.begin(), ArgAttrsVec.end());
    
    // Build new call instruction.
    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args.begin(), Args.end(), "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(NewPAL);
    } else {
      New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(NewPAL);
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    Args.clear();
    ArgAttrsVec.clear();
    New->takeName(Call);

    // Update the callgraph to know that the callsite has been transformed.
    CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
    CalleeNode->removeCallEdgeFor(Call);
    CalleeNode->addCalledFunction(New, NF_CGN);
    
    // Update all users of sret parameter to extract value using extractvalue.
    for (Value::use_iterator UI = FirstCArg->use_begin(), 
           UE = FirstCArg->use_end(); UI != UE; ) {
      User *U2 = *UI++;
      CallInst *C2 = dyn_cast<CallInst>(U2);
      if (C2 && (C2 == Call))
        continue;
      
      GetElementPtrInst *UGEP = cast<GetElementPtrInst>(U2);
      ConstantInt *Idx = cast<ConstantInt>(UGEP->getOperand(2));
      Value *GR = ExtractValueInst::Create(New, Idx->getZExtValue(),
                                           "evi", UGEP);
      while(!UGEP->use_empty()) {
        // isSafeToUpdateAllCallers has checked that all GEP uses are
        // LoadInsts
        LoadInst *L = cast<LoadInst>(*UGEP->use_begin());
        L->replaceAllUsesWith(GR);
        L->eraseFromParent();
      }
      UGEP->eraseFromParent();
      continue;
    }
    Call->eraseFromParent();
  }
  
  return NF_CGN;
}
示例#17
0
bool InternalizePass::runOnModule(Module &M) {
  CallGraphWrapperPass *CGPass = getAnalysisIfAvailable<CallGraphWrapperPass>();
  CallGraph *CG = CGPass ? &CGPass->getCallGraph() : 0;
  CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
  bool Changed = false;

  SmallPtrSet<GlobalValue *, 8> Used;
  collectUsedGlobalVariables(M, Used, false);

  // We must assume that globals in llvm.used have a reference that not even
  // the linker can see, so we don't internalize them.
  // For llvm.compiler.used the situation is a bit fuzzy. The assembler and
  // linker can drop those symbols. If this pass is running as part of LTO,
  // one might think that it could just drop llvm.compiler.used. The problem
  // is that even in LTO llvm doesn't see every reference. For example,
  // we don't see references from function local inline assembly. To be
  // conservative, we internalize symbols in llvm.compiler.used, but we
  // keep llvm.compiler.used so that the symbol is not deleted by llvm.
  for (SmallPtrSet<GlobalValue *, 8>::iterator I = Used.begin(), E = Used.end();
       I != E; ++I) {
    GlobalValue *V = *I;
    ExternalNames.insert(V->getName());
  }

  // Mark all functions not in the api as internal.
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
    if (!shouldInternalize(*I, ExternalNames, OnlyHidden))
      continue;

    I->setLinkage(GlobalValue::InternalLinkage);

    if (ExternalNode)
      // Remove a callgraph edge from the external node to this function.
      ExternalNode->removeOneAbstractEdgeTo((*CG)[I]);

    Changed = true;
    ++NumFunctions;
    DEBUG(dbgs() << "Internalizing func " << I->getName() << "\n");
  }

  // Never internalize the llvm.used symbol.  It is used to implement
  // attribute((used)).
  // FIXME: Shouldn't this just filter on llvm.metadata section??
  ExternalNames.insert("llvm.used");
  ExternalNames.insert("llvm.compiler.used");

  // Never internalize anchors used by the machine module info, else the info
  // won't find them.  (see MachineModuleInfo.)
  ExternalNames.insert("llvm.global_ctors");
  ExternalNames.insert("llvm.global_dtors");
  ExternalNames.insert("llvm.global.annotations");

  // Never internalize symbols code-gen inserts.
  // FIXME: We should probably add this (and the __stack_chk_guard) via some
  // type of call-back in CodeGen.
  ExternalNames.insert("__stack_chk_fail");
  ExternalNames.insert("__stack_chk_guard");

  // Mark all global variables with initializers that are not in the api as
  // internal as well.
  for (Module::global_iterator I = M.global_begin(), E = M.global_end();
       I != E; ++I) {
    if (!shouldInternalize(*I, ExternalNames, OnlyHidden))
      continue;

    I->setLinkage(GlobalValue::InternalLinkage);
    Changed = true;
    ++NumGlobals;
    DEBUG(dbgs() << "Internalized gvar " << I->getName() << "\n");
  }

  // Mark all aliases that are not in the api as internal as well.
  for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
       I != E; ++I) {
    if (!shouldInternalize(*I, ExternalNames, OnlyHidden))
      continue;

    I->setLinkage(GlobalValue::InternalLinkage);
    Changed = true;
    ++NumAliases;
    DEBUG(dbgs() << "Internalized alias " << I->getName() << "\n");
  }

  return Changed;
}
void StructuredModuleEditor::instrumentCallsToFunction(Function *Callee) {
	if (Callee == NULL) {
		OS << "Function not found!\n";
		return;
	}

	InstList Calls = getCallsToFunction(Callee);

	FuncList Callers;
	for (InstList::iterator II = Calls.begin(), IE = Calls.end(); II != IE;
			++II) {
		Function *Caller = (*II)->getParent()->getParent();
		if (std::find(Callers.begin(), Callers.end(), Caller) == Callers.end())
			Callers.push_back(Caller);
	}

	OS << Callers.size() << " functions call '" << Callee->getName()
			<< "'...\n";
	OS << "=================================\n";
	for (FuncList::iterator FI = Callers.begin(), FE = Callers.end(); FI != FE;
			++FI) {
		OS << (*FI)->getName() << "\n";
	}
	OS << "=================================\n";

	std::vector<Value*> PreArgs;
	std::vector<Type*> PreArgTypes;
	for (Function::arg_iterator I = Callee->arg_begin(), E = Callee->arg_end();
			I != E; ++I) {
		PreArgTypes.push_back(I->getType());
		PreArgs.push_back(I);
	}

	std::vector<Type*> PostArgTypes;
	if (!Callee->getReturnType()->isVoidTy()) {
		PostArgTypes.push_back(Callee->getReturnType());
	}

	FuncList Clones;

	Clones.push_back(Callee);
	for (uint64_t i = 0; i < Callers.size() - 1; i++) {
		Function *Clone = cloneFunc(Callee);
		Clones.push_back(Clone);
	}

	for (uint64_t i = 0; i < Clones.size(); i++) {
		Constant *PreConst = M->getOrInsertFunction("",
				FunctionType::get(Type::getVoidTy(getGlobalContext()),
						PreArgTypes, false));
		Function *Pre = cast<Function>(PreConst);
		Pre->setName("pre");
		CG->getOrInsertFunction(Pre);

		Constant *PostConst = M->getOrInsertFunction("",
				FunctionType::get(Type::getVoidTy(getGlobalContext()),
						PostArgTypes, false));
		Function *Post = cast<Function>(PostConst);
		Post->setName("post");
		CG->getOrInsertFunction(Post);

		/*
		 OS << "\n";
		 OS << "Wrapping '" << Clones.at(i)->getName() << "'...\n\n";
		 OS << "Pre-invocation function = " << Pre->getName() << "\n";
		 OS << *Pre;
		 OS << "Post-invocation function = " << Post->getName() << "\n";
		 OS << *Post;
		 OS << "**************************************\n";*/

		Function *Wrapper = wrapFunc(Clones.at(i), Pre, Post);
		if (i == 0)
			Callee = Wrapper;

		Function *Caller = Callers.at(i);
		for (Function::iterator BBI = Caller->begin(), BBE = Caller->end();
				BBI != BBE; ++BBI) {
			for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end();
					II != IE; ++II) {
				CallSite CS(cast<Value>(II));
				// If this isn't a call, or it is a call to an intrinsic...
				if (!CS || isa<IntrinsicInst>(II))
					continue;

				if (Callee == CS.getCalledFunction()) {
					CS.setCalledFunction(Wrapper);

					// Creates an edge from the calling node to its new destination node
					CallGraphNode *CallingNode = (*CG)[CS.getCaller()];
					CallGraphNode *NewCalleeNode = (*CG)[Wrapper];
					CallingNode->replaceCallEdge(CS, CS, NewCalleeNode);
				}
			}
		}
	}

	OS << "Functions successfully wrapped!\n";
}
示例#19
0
/// DoPromotion - This method actually performs the promotion of the specified
/// arguments, and returns the new function.  At this point, we know that it's
/// safe to do so.
CallGraphNode *ArgPromotion::DoPromotion(Function *F,
                               SmallPtrSet<Argument*, 8> &ArgsToPromote,
                              SmallPtrSet<Argument*, 8> &ByValArgsToTransform) {

  // Start by computing a new prototype for the function, which is the same as
  // the old function, but has modified arguments.
  const FunctionType *FTy = F->getFunctionType();
  std::vector<const Type*> Params;

  typedef std::set<IndicesVector> ScalarizeTable;

  // ScalarizedElements - If we are promoting a pointer that has elements
  // accessed out of it, keep track of which elements are accessed so that we
  // can add one argument for each.
  //
  // Arguments that are directly loaded will have a zero element value here, to
  // handle cases where there are both a direct load and GEP accesses.
  //
  std::map<Argument*, ScalarizeTable> ScalarizedElements;

  // OriginalLoads - Keep track of a representative load instruction from the
  // original function so that we can tell the alias analysis implementation
  // what the new GEP/Load instructions we are inserting look like.
  std::map<IndicesVector, LoadInst*> OriginalLoads;

  // Attributes - Keep track of the parameter attributes for the arguments
  // that we are *not* promoting. For the ones that we do promote, the parameter
  // attributes are lost
  SmallVector<AttributeWithIndex, 8> AttributesVec;
  const AttrListPtr &PAL = F->getAttributes();

  // Add any return attributes.
  if (Attributes attrs = PAL.getRetAttributes())
    AttributesVec.push_back(AttributeWithIndex::get(0, attrs));

  // First, determine the new argument list
  unsigned ArgIndex = 1;
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
       ++I, ++ArgIndex) {
    if (ByValArgsToTransform.count(I)) {
      // Simple byval argument? Just add all the struct element types.
      const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
      const StructType *STy = cast<StructType>(AgTy);
      for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
        Params.push_back(STy->getElementType(i));
      ++NumByValArgsPromoted;
    } else if (!ArgsToPromote.count(I)) {
      // Unchanged argument
      Params.push_back(I->getType());
      if (Attributes attrs = PAL.getParamAttributes(ArgIndex))
        AttributesVec.push_back(AttributeWithIndex::get(Params.size(), attrs));
    } else if (I->use_empty()) {
      // Dead argument (which are always marked as promotable)
      ++NumArgumentsDead;
    } else {
      // Okay, this is being promoted. This means that the only uses are loads
      // or GEPs which are only used by loads

      // In this table, we will track which indices are loaded from the argument
      // (where direct loads are tracked as no indices).
      ScalarizeTable &ArgIndices = ScalarizedElements[I];
      for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
           ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        assert(isa<LoadInst>(User) || isa<GetElementPtrInst>(User));
        IndicesVector Indices;
        Indices.reserve(User->getNumOperands() - 1);
        // Since loads will only have a single operand, and GEPs only a single
        // non-index operand, this will record direct loads without any indices,
        // and gep+loads with the GEP indices.
        for (User::op_iterator II = User->op_begin() + 1, IE = User->op_end();
             II != IE; ++II)
          Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
        // GEPs with a single 0 index can be merged with direct loads
        if (Indices.size() == 1 && Indices.front() == 0)
          Indices.clear();
        ArgIndices.insert(Indices);
        LoadInst *OrigLoad;
        if (LoadInst *L = dyn_cast<LoadInst>(User))
          OrigLoad = L;
        else
          // Take any load, we will use it only to update Alias Analysis
          OrigLoad = cast<LoadInst>(User->use_back());
        OriginalLoads[Indices] = OrigLoad;
      }

      // Add a parameter to the function for each element passed in.
      for (ScalarizeTable::iterator SI = ArgIndices.begin(),
             E = ArgIndices.end(); SI != E; ++SI) {
        // not allowed to dereference ->begin() if size() is 0
        Params.push_back(GetElementPtrInst::getIndexedType(I->getType(),
                                                           SI->begin(),
                                                           SI->end()));
        assert(Params.back());
      }

      if (ArgIndices.size() == 1 && ArgIndices.begin()->empty())
        ++NumArgumentsPromoted;
      else
        ++NumAggregatesPromoted;
    }
  }

  // Add any function attributes.
  if (Attributes attrs = PAL.getFnAttributes())
    AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));

  const Type *RetTy = FTy->getReturnType();

  // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
  // have zero fixed arguments.
  bool ExtraArgHack = false;
  if (Params.empty() && FTy->isVarArg()) {
    ExtraArgHack = true;
    Params.push_back(Type::getInt32Ty(F->getContext()));
  }

  // Construct the new function type using the new arguments.
  FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());

  // Create the new function body and insert it into the module.
  Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
  NF->copyAttributesFrom(F);

  
  DEBUG(dbgs() << "ARG PROMOTION:  Promoting to:" << *NF << "\n"
        << "From: " << *F);
  
  // Recompute the parameter attributes list based on the new arguments for
  // the function.
  NF->setAttributes(AttrListPtr::get(AttributesVec.begin(),
                                     AttributesVec.end()));
  AttributesVec.clear();

  F->getParent()->getFunctionList().insert(F, NF);
  NF->takeName(F);

  // Get the alias analysis information that we need to update to reflect our
  // changes.
  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();

  // Get the callgraph information that we need to update to reflect our
  // changes.
  CallGraph &CG = getAnalysis<CallGraph>();
  
  // Get a new callgraph node for NF.
  CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
  

  // Loop over all of the callers of the function, transforming the call sites
  // to pass in the loaded pointers.
  //
  SmallVector<Value*, 16> Args;
  while (!F->use_empty()) {
    CallSite CS = CallSite::get(F->use_back());
    assert(CS.getCalledFunction() == F);
    Instruction *Call = CS.getInstruction();
    const AttrListPtr &CallPAL = CS.getAttributes();

    // Add any return attributes.
    if (Attributes attrs = CallPAL.getRetAttributes())
      AttributesVec.push_back(AttributeWithIndex::get(0, attrs));

    // Loop over the operands, inserting GEP and loads in the caller as
    // appropriate.
    CallSite::arg_iterator AI = CS.arg_begin();
    ArgIndex = 1;
    for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
         I != E; ++I, ++AI, ++ArgIndex)
      if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) {
        Args.push_back(*AI);          // Unmodified argument

        if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
          AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));

      } else if (ByValArgsToTransform.count(I)) {
        // Emit a GEP and load for each element of the struct.
        const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
        const StructType *STy = cast<StructType>(AgTy);
        Value *Idxs[2] = {
              ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
          Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
          Value *Idx = GetElementPtrInst::Create(*AI, Idxs, Idxs+2,
                                                 (*AI)->getName()+"."+utostr(i),
                                                 Call);
          // TODO: Tell AA about the new values?
          Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call));
        }
      } else if (!I->use_empty()) {
        // Non-dead argument: insert GEPs and loads as appropriate.
        ScalarizeTable &ArgIndices = ScalarizedElements[I];
        // Store the Value* version of the indices in here, but declare it now
        // for reuse.
        std::vector<Value*> Ops;
        for (ScalarizeTable::iterator SI = ArgIndices.begin(),
               E = ArgIndices.end(); SI != E; ++SI) {
          Value *V = *AI;
          LoadInst *OrigLoad = OriginalLoads[*SI];
          if (!SI->empty()) {
            Ops.reserve(SI->size());
            const Type *ElTy = V->getType();
            for (IndicesVector::const_iterator II = SI->begin(),
                 IE = SI->end(); II != IE; ++II) {
              // Use i32 to index structs, and i64 for others (pointers/arrays).
              // This satisfies GEP constraints.
              const Type *IdxTy = (ElTy->isStructTy() ?
                    Type::getInt32Ty(F->getContext()) : 
                    Type::getInt64Ty(F->getContext()));
              Ops.push_back(ConstantInt::get(IdxTy, *II));
              // Keep track of the type we're currently indexing.
              ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II);
            }
            // And create a GEP to extract those indices.
            V = GetElementPtrInst::Create(V, Ops.begin(), Ops.end(),
                                          V->getName()+".idx", Call);
            Ops.clear();
            AA.copyValue(OrigLoad->getOperand(0), V);
          }
          // Since we're replacing a load make sure we take the alignment
          // of the previous load.
          LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call);
          newLoad->setAlignment(OrigLoad->getAlignment());
          Args.push_back(newLoad);
          AA.copyValue(OrigLoad, Args.back());
        }
      }

    if (ExtraArgHack)
      Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext())));

    // Push any varargs arguments on the list.
    for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
      Args.push_back(*AI);
      if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
        AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
    }

    // Add any function attributes.
    if (Attributes attrs = CallPAL.getFnAttributes())
      AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));

    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args.begin(), Args.end(), "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
                                                          AttributesVec.end()));
    } else {
      New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(),
                                                        AttributesVec.end()));
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    Args.clear();
    AttributesVec.clear();

    // Update the alias analysis implementation to know that we are replacing
    // the old call with a new one.
    AA.replaceWithNewValue(Call, New);

    // Update the callgraph to know that the callsite has been transformed.
    CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
    CalleeNode->replaceCallEdge(Call, New, NF_CGN);

    if (!Call->use_empty()) {
      Call->replaceAllUsesWith(New);
      New->takeName(Call);
    }

    // Finally, remove the old call from the program, reducing the use-count of
    // F.
    Call->eraseFromParent();
  }

  // Since we have now created the new function, splice the body of the old
  // function right into the new function, leaving the old rotting hulk of the
  // function empty.
  NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());

  // Loop over the argument list, transfering uses of the old arguments over to
  // the new arguments, also transfering over the names as well.
  //
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
       I2 = NF->arg_begin(); I != E; ++I) {
    if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) {
      // If this is an unmodified argument, move the name and users over to the
      // new version.
      I->replaceAllUsesWith(I2);
      I2->takeName(I);
      AA.replaceWithNewValue(I, I2);
      ++I2;
      continue;
    }

    if (ByValArgsToTransform.count(I)) {
      // In the callee, we create an alloca, and store each of the new incoming
      // arguments into the alloca.
      Instruction *InsertPt = NF->begin()->begin();

      // Just add all the struct element types.
      const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
      Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt);
      const StructType *STy = cast<StructType>(AgTy);
      Value *Idxs[2] = {
            ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };

      for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
        Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
        Value *Idx = 
          GetElementPtrInst::Create(TheAlloca, Idxs, Idxs+2,
                                    TheAlloca->getName()+"."+Twine(i), 
                                    InsertPt);
        I2->setName(I->getName()+"."+Twine(i));
        new StoreInst(I2++, Idx, InsertPt);
      }

      // Anything that used the arg should now use the alloca.
      I->replaceAllUsesWith(TheAlloca);
      TheAlloca->takeName(I);
      AA.replaceWithNewValue(I, TheAlloca);
      continue;
    }

    if (I->use_empty()) {
      AA.deleteValue(I);
      continue;
    }

    // Otherwise, if we promoted this argument, then all users are load
    // instructions (or GEPs with only load users), and all loads should be
    // using the new argument that we added.
    ScalarizeTable &ArgIndices = ScalarizedElements[I];

    while (!I->use_empty()) {
      if (LoadInst *LI = dyn_cast<LoadInst>(I->use_back())) {
        assert(ArgIndices.begin()->empty() &&
               "Load element should sort to front!");
        I2->setName(I->getName()+".val");
        LI->replaceAllUsesWith(I2);
        AA.replaceWithNewValue(LI, I2);
        LI->eraseFromParent();
        DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
              << "' in function '" << F->getName() << "'\n");
      } else {
        GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back());
        IndicesVector Operands;
        Operands.reserve(GEP->getNumIndices());
        for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
             II != IE; ++II)
          Operands.push_back(cast<ConstantInt>(*II)->getSExtValue());

        // GEPs with a single 0 index can be merged with direct loads
        if (Operands.size() == 1 && Operands.front() == 0)
          Operands.clear();

        Function::arg_iterator TheArg = I2;
        for (ScalarizeTable::iterator It = ArgIndices.begin();
             *It != Operands; ++It, ++TheArg) {
          assert(It != ArgIndices.end() && "GEP not handled??");
        }

        std::string NewName = I->getName();
        for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
            NewName += "." + utostr(Operands[i]);
        }
        NewName += ".val";
        TheArg->setName(NewName);

        DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
              << "' of function '" << NF->getName() << "'\n");

        // All of the uses must be load instructions.  Replace them all with
        // the argument specified by ArgNo.
        while (!GEP->use_empty()) {
          LoadInst *L = cast<LoadInst>(GEP->use_back());
          L->replaceAllUsesWith(TheArg);
          AA.replaceWithNewValue(L, TheArg);
          L->eraseFromParent();
        }
        AA.deleteValue(GEP);
        GEP->eraseFromParent();
      }
    }

    // Increment I2 past all of the arguments added for this promoted pointer.
    for (unsigned i = 0, e = ArgIndices.size(); i != e; ++i)
      ++I2;
  }

  // Notify the alias analysis implementation that we inserted a new argument.
  if (ExtraArgHack)
    AA.copyValue(Constant::getNullValue(Type::getInt32Ty(F->getContext())), 
                 NF->arg_begin());


  // Tell the alias analysis that the old function is about to disappear.
  AA.replaceWithNewValue(F, NF);

  
  NF_CGN->stealCalledFunctionsFrom(CG[F]);
  
  // Now that the old function is dead, delete it.  If there is a dangling
  // reference to the CallgraphNode, just leave the dead function around for
  // someone else to nuke.
  CallGraphNode *CGN = CG[F];
  if (CGN->getNumReferences() == 0)
    delete CG.removeFunctionFromModule(CGN);
  else
    F->setLinkage(Function::ExternalLinkage);
  
  return NF_CGN;
}
示例#20
0
文件: Inliner.cpp 项目: 8l/SPIRV-LLVM
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
  AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
  auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
  const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
  AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();

  SmallPtrSet<Function*, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F) SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;
  
  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function*, int>, 8> InlineHistory;

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) continue;
    
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        CallSite CS(cast<Value>(I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;
        
        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
          continue;
        
        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;
  
  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  
  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, AA, ACT);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;
      
      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
        DEBUG(dbgs() << "    -> Deleting dead call: "
                     << *CS.getInstruction() << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // We can only inline direct calls to non-declarations.
        if (!Callee || Callee->isDeclaration()) continue;
      
        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;
        
        LLVMContext &CallerCtx = Caller->getContext();

        // Get DebugLoc to report. CS will be invalid after Inliner.
        DebugLoc DLoc = CS.getInstruction()->getDebugLoc();

        // If the policy determines that we should inline this function,
        // try to do so.
        if (!shouldInline(CS)) {
          emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
                                       Twine(Callee->getName() +
                                             " will not be inlined into " +
                                             Caller->getName()));
          continue;
        }

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime)) {
          emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
                                       Twine(Callee->getName() +
                                             " will not be inlined into " +
                                             Caller->getName()));
          continue;
        }
        ++NumInlined;

        // Report the inline decision.
        emitOptimizationRemark(
            CallerCtx, DEBUG_TYPE, *Caller, DLoc,
            Twine(Callee->getName() + " inlined into " + Caller->getName()));

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
               i != e; ++i) {
            Value *Ptr = InlineInfo.InlinedCalls[i];
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
          }
        }
      }
      
      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&
          
          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: "
              << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];
        
        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();
        
        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use 
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin()+CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
示例#21
0
bool PathList::runOnModule(Module &M) {
	module = &M;
	
	llvm::dbgs() << "[runOnModule]: Moduel M has " << M.getFunctionList().size() << " Functions in all.\n";
	
	// for test
	Function *f1 = M.getFunction("fprintf");
	if (!f1)
		dbgs() << "[Test]: can not find function fprintf.\n";
	else
		dbgs() << "[Test]: find function fprintf.\n";
	  
	CallGraph &CG = getAnalysis<CallGraph>();
//	CG.dump();
	
	CallGraphNode *cgNode = CG.getRoot();
	cgNode->dump();
//	errs()<<node->getFunction()->getName()<<'\n';
	
	Function *startFunc;
	Function *endFunc;
	startFunc = M.getFunction("__user_main");
	
	//std::string fileName("/home/xqx/data/xqx/projects/benckmarks-klee/texinfo-4.8/build-shit/makeinfo/../../makeinfo/insertion.c");
	//int lineNo = 407;
	
	BB = getBB(fileName, lineNo);
	*targetBbpp = getBB(fileName, lineNo);
	if (BB) {
		endFunc = BB->getParent();
		if (!endFunc) {
			errs()<<"Error: get endFunc failed.\n";
			return false;
		}
		if (!startFunc) {
		  	errs()<<"Error: get startFunc failed.\n";
			return false;
		}
		errs()<<startFunc->getName()<<'\n';
	}
	else {
		errs()<<"Error: get BB failed.\n";
		return false;
	}
	
	
	
	//read start and end from xml files
//	defectList enStart, enEnd;
//	getEntryList("/tmp/entrys.xml", &enStart, "start");
//	getEntryList("/tmp/entrys.xml", &enEnd, "end");
//	getEntryList("/tmp/entrys.xml", &dl, "end");
//	dumpEntryList(&enStart);
//	dumpEntryList(&enEnd);
//	dumpEntryList(&dl);
	
	//read bug information from xml file
/*	for (defectList::iterator dit = dl.begin(); dit != dl.end(); dit++) {
		StringRef file(dit->first.c_str());
		std::vector<int> lines = dit->second;
		BasicBlock *BB = getBB(file, *(lines.begin()));
		if (BB) {
			endFunc = BB->getParent();
		}
	}
*/	
	//to store temporary path
	std::vector<BasicBlock*> p;
	// a counter
	int map_count = 0;
	
	for (Module::iterator i = M.begin(), e = M.end(); i != e; ++i) {
		Function *F = i;
		if (!F) {
			llvm::errs() << "***NULL Function***\n";
			continue;
		}
		cgNode = CG.getOrInsertFunction(F);
		F = cgNode->getFunction();
//		
		for (CallGraphNode::iterator I = cgNode->begin(), E = cgNode->end();
				I != E; ++I){
			CallGraphNode::CallRecord *cr = &*I;
//			llvm::errs() << "\tCS<" << cr->first << "> calls";
			// check if the CallInst is existed
			if(cr->first){
				Instruction *TmpIns = dyn_cast<Instruction>(cr->first);
				if(TmpIns) {
//					errs() << "\t" << *TmpIns << "\n";
					//unsigned int l, c;
					//std::string cfi_path = getInstPath(TmpIns, l, c);
					//if (!cfi_path.empty()) {
					//	if (cfi_path.find("uclibc") != std::string::npos) {
					//		dbgs() << "[Filter Uclib]: find an instruction from uclibc.\n";
					//		continue;
					//	} else if (cfi_path.find("POSIX") != std::string::npos) {
					//		dbgs() << "[Filter Uclib]: find an instruction from POSIX.\n";
					//		continue;
					//	}
					//}
				} else
					continue;
			}
			// get the funciton pointer which is called by current CallRecord cr
			Function *FI = cr->second->getFunction();
			if (!FI)
				continue;
			
			// create a new CalledFunctions element and push it into calledFunctionMap.
			calledFunctionMap[FI].push_back(std::make_pair(F, dyn_cast<Instruction>(cr->first)));
			// for debuging
			map_count++;			
		}

	}
	
	dbgs() << "[Count Number of calledFunctionMap]: "<< calledFunctionMap.size() <<'\n';
	
	// analyze the global function pointer table
	if(function_pointer_analysis()) {
		errs() << "[Analyze global function pointer table success]\n";
	} else {
		errs() << "[Analyze global function pointer table failed]\n";
	}
	
	dbgs() << "[Count Number of calledFunctionMap]: "<< calledFunctionMap.size() <<'\n';
	
	// filter the instructions from uclibc
	//filter_uclibc();

	llvm::errs() << "=================================hh\n";
	llvm::errs() << "get Function Path: " << endFunc->getName() 
		<< " to " << startFunc->getName() << " \n";
	
//	printCalledFuncAndCFGPath(endFunc, startFunc, BB, p);
		
	// modification by wh
	evo_paths = new entire_path;
	//filter_paths = new func_bbs_type;
	//BB_paths_map = new std::map<std::pair<Function*, BasicBlock*>, std::vector<BasicBlock*> >;
	std::vector<std::pair< Function*, Instruction*> > tmp_func_path;
//	std::vector<BasicBlock*> tmp_bb_path;
//	explore_function_paths(endFunc, startFunc, bug_Inst, &tmp_func_path);
	collect_funcitons(endFunc, startFunc, bug_Inst, &tmp_func_path);
//	dbgs() << "++++++Found " << evo_paths->size() << " function paths.\n";
	
//	for (entire_path::iterator ep_it = evo_paths->begin(); ep_it != evo_paths->end(); ep_it++) {
//		for (std::vector<std::pair< Function*, Instruction*> >::iterator pair_it = ep_it->begin(); pair_it != ep_it->end(); pair_it++) {
//			if (filter_paths->size() != 0) {
//				std::vector<Instruction*>::iterator inst_it = std::find((*filter_paths)[pair_it->first].begin(), (*filter_paths)[pair_it->first].end(), pair_it->second);
//				if (inst_it != (*filter_paths)[pair_it->first].end()) {
//					continue;
//				}
//			}
//			(*filter_paths)[pair_it->first].push_back(pair_it->second);
//		}
//	}
	dbgs() << "[filter_paths]: contain " << filter_paths->size() << " functions in all.\n";
	
	for (func_bbs_type::iterator fbs_it = filter_paths->begin(); fbs_it != filter_paths->end(); fbs_it++) {
		for (std::vector<Instruction*>::iterator bb_it2 = fbs_it->second.begin(); bb_it2 != fbs_it->second.end(); bb_it2++) {
			dbgs() << "^^^^^^ " << fbs_it->first->getName() << ": " << (*bb_it2)->getParent()->getName() << '\n';
			// to expand functions
			call_insts.push_back((*bb_it2));
			
			explore_basicblock_paths(fbs_it->first, (*bb_it2)->getParent(), &(*BB_paths_map)[std::make_pair(fbs_it->first, *bb_it2)]);
			dbgs() << "^^^^^^ found " << (*BB_paths_map)[std::make_pair(fbs_it->first, *bb_it2)].size() << " basicblocks.\n";
		}
	}
	
	llvm::dbgs() << "!!!!!!!! Found " << call_insts.size() << " call instructions.\n";
	llvm::dbgs() << "!!!!!!!! Found " << path_basicblocks.size() << " path basicblocks.\n";
	
	// expand functions
	for (std::vector<Instruction*>::iterator ci_it = call_insts.begin(); ci_it != call_insts.end(); ci_it++) {
		BasicBlock *call_bb = (*ci_it)->getParent();
		if (!call_bb) {
			continue;
		}
		for (BasicBlock::iterator inst = call_bb->begin(); inst != call_bb->end(); inst++) {
			if (&*inst == *ci_it) {
				break;
			}
			if (isa<CallInst>(&*inst)) {
				std::vector<Instruction*>::iterator ci = std::find(path_call_insts.begin(), path_call_insts.end(), &*inst);
				if (ci != path_call_insts.end())
					continue;
				path_call_insts.push_back(&*inst);
			}
		}
	}
	llvm::dbgs() << "@@@@@@@@ After search call_insts, found " << path_call_insts.size() << " call instructions.\n";
	for (std::vector<BasicBlock*>::iterator p_bb_it = path_basicblocks.begin(); p_bb_it != path_basicblocks.end(); p_bb_it++) {
		for (BasicBlock::iterator inst = (*p_bb_it)->begin(); inst != (*p_bb_it)->end(); inst++) {
			if (isa<CallInst>(&*inst)) {
				std::vector<Instruction*>::iterator ci = std::find(path_call_insts.begin(), path_call_insts.end(), &*inst);
				if (ci != path_call_insts.end())
					continue;
				path_call_insts.push_back(&*inst);
			}
		}
	}
	llvm::dbgs() << "@@@@@@@@ After search path_basicblocks, found " << path_call_insts.size() << " call instructions.\n";
	for (std::vector<Instruction*>::iterator iit = path_call_insts.begin(); iit != path_call_insts.end(); iit++) {
		CallInst *ci = dyn_cast<CallInst>(*iit);
		if (!ci)
			continue;
		Function *ff = ci->getCalledFunction();
		if (!ff) {
			//ci->dump();
			//dbgs() << "\t[called value] " << ci->getOperand(0)->getName() << '\n'; 
			
			continue;
		}
		std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), ff);
		if (fit == otherCalledFuncs->end())
			otherCalledFuncs->push_back(ff);
	}
	llvm::dbgs() << "((((((((( Found " << otherCalledFuncs->size() << " functions.\n";
	
	for (int index = 0; index < otherCalledFuncs->size(); index++) {
		Function *f = otherCalledFuncs->at(index);
/*		if (!f) {
			//f->dump();
			llvm::dbgs() << "?????? index = " << index << " size = " << otherCalledFuncs->size()<< '\n';
			continue;
		}
*/		for (inst_iterator f_it = inst_begin(f); f_it != inst_end(f); f_it++) {
			CallInst *ci = dyn_cast<CallInst>(&*f_it);
			if (!ci)
				continue;
			if (!ci->getCalledFunction()) {
				//ci->dump();
				continue;
			}
			std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), ci->getCalledFunction());
			if (fit == otherCalledFuncs->end())
				otherCalledFuncs->push_back(ci->getCalledFunction());
		}
	}
	llvm::dbgs() << "((((((((( Found " << otherCalledFuncs->size() << " functions.\n";
	
	//This should be just for statistic.
	int tmp_funcNum_in_filter_notIn_other = 0;
	for (func_bbs_type::iterator fbs_it = filter_paths->begin(); fbs_it != filter_paths->end(); fbs_it++) {
		if (!fbs_it->first) {
			llvm::dbgs() << "[Warning]: Found a null Function pointer in filter_paths.\n";
			continue;
		}
		std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), fbs_it->first);
		if (fit == otherCalledFuncs->end())
			//otherCalledFuncs->push_back(fbs_it->first);
			tmp_funcNum_in_filter_notIn_other ++;
	}
	llvm::dbgs() << "<><><><> After searching filter_paths, found " << otherCalledFuncs->size() + tmp_funcNum_in_filter_notIn_other << " functions.\n";
/*	for (entire_path::iterator ep_it = evo_paths->begin(); ep_it != evo_paths->end(); ep_it++) {
		dbgs() << "Path length is: " << ep_it->size() << '\n';
		for (std::vector<std::pair< Function*, BasicBlock*> >::iterator pair_it = ep_it->begin(); pair_it != ep_it->end(); pair_it++) {
			 dbgs() << "^^^^^^ " << pair_it->first->getName() << ": " << pair_it->second->getName() << '\n';
			 explore_basicblock_paths(pair_it->first, pair_it->second, &(*BB_paths_map)[*pair_it]);
			 dbgs() << "^^^^^^ found " << (*BB_paths_map)[*pair_it].size() << " basicblocks.\n";
		}
	}
*/		
	llvm::errs() << "on-end\n";
	llvm::errs() << "=================================\n";
	
	// output all of the paths
/*	errs()<<"Find "<<paths_found->size()<<" paths in all.\n";
	for(paths::iterator ips = paths_found->begin();ips != paths_found->end();ips++) {
//		std::vector<BasicBlock*> *tmpP = dyn_cast<std::vector<BasicBlock*>*>(&*ips);
		dbgs() << "=========A Path Start============\n";
		for(std::vector<BasicBlock*>::iterator ps = ips->begin(), pe = ips->end(); ps != pe; ps++) {
			BasicBlock *tmpStr = *ps;
			errs()<<"\t"<<tmpStr->getParent()->getName()<<": "<<tmpStr->getName()<<" -> \n";
		}
		errs()<<"=================================\n";
	}
*/	
	return false;
}
示例#22
0
CallGraphNode* ArgumentRecovery::recoverArguments(llvm::CallGraphNode *node)
{
	Function* fn = node->getFunction();
	if (fn == nullptr)
	{
		// "theoretical nodes", whatever that is
		return nullptr;
	}
	
	// quick exit if there isn't exactly one argument
	if (fn->arg_size() != 1)
	{
		return nullptr;
	}
	
	Argument* fnArg = fn->arg_begin();
	if (!isStructType(fnArg))
	{
		return nullptr;
	}
	
	// This is a nasty NASTY hack that relies on the AA pass being RegisterUse.
	// The data should be moved to a separate helper pass that can be queried from both the AA pass and this one.
	RegisterUse& regUse = getAnalysis<RegisterUse>();
	CallGraph& cg = getAnalysis<CallGraphWrapperPass>().getCallGraph();
	
	const auto* modRefInfo = regUse.getModRefInfo(fn);
	assert(modRefInfo != nullptr);
	
	// At this point we pretty much know that we're going to modify the function, so start doing that.
	// Get register offsets from the old function before we start mutilating it.
	auto& registerMap = exposeAllRegisters(fn);
	
	// Create a new function prototype, asking RegisterUse for which registers should be passed in, and how.
	
	LLVMContext& ctx = fn->getContext();
	SmallVector<pair<const char*, Type*>, 16> parameters;
	Type* int64 = Type::getInt64Ty(ctx);
	Type* int64ptr = Type::getInt64PtrTy(ctx);
	for (const auto& pair : *modRefInfo)
	{
		if (pair.second != RegisterUse::NoModRef)
		{
			Type* paramType = (pair.second & RegisterUse::Mod) == RegisterUse::Mod ? int64ptr : int64;
			parameters.push_back({pair.first, paramType});
		}
	}
	
	// Order parameters.
	// FIXME: This could use an ABI-specific sort routine. For now, use a lexicographical sort.
	sort(parameters.begin(), parameters.end(), [](const pair<const char*, Type*>& a, const pair<const char*, Type*>& b) {
		return strcmp(a.first, b.first) < 0;
	});
	
	// Extract parameter types.
	SmallVector<Type*, 16> parameterTypes;
	for (const auto& pair : parameters)
	{
		parameterTypes.push_back(pair.second);
	}
	
	// Ideally, we would also do caller analysis here to figure out which output registers are never read, such that
	// we can either eliminate them from the parameter list or pass them by value instead of by address.
	// We would also pick a return value.
	FunctionType* newFunctionType = FunctionType::get(Type::getVoidTy(ctx), parameterTypes, false);

	Function* newFunc = Function::Create(newFunctionType, fn->getLinkage());
	newFunc->copyAttributesFrom(fn);
	fn->getParent()->getFunctionList().insert(fn, newFunc);
	newFunc->takeName(fn);
	fn->setName("__hollow_husk__" + newFunc->getName());
	
	// Set argument names
	size_t i = 0;
	
	for (Argument& arg : newFunc->args())
	{
		arg.setName(parameters[i].first);
		i++;
	}
	
	// update call graph
	CallGraphNode* newFuncNode = cg.getOrInsertFunction(newFunc);
	CallGraphNode* oldFuncNode = cg[fn];
	
	// loop over callers and transform call sites.
	while (!fn->use_empty())
	{
		CallSite cs(fn->user_back());
		Instruction* call = cast<CallInst>(cs.getInstruction());
		Function* caller = call->getParent()->getParent();
		
		auto& registerPositions = exposeAllRegisters(caller);
		SmallVector<Value*, 16> callParameters;
		for (const auto& pair : parameters)
		{
			// HACKHACK: find a pointer to a 64-bit int in the set.
			Value* registerPointer = nullptr;
			auto range = registerPositions.equal_range(pair.first);
			for (auto iter = range.first; iter != range.second; iter++)
			{
				if (auto gep = dyn_cast<GetElementPtrInst>(iter->second))
				if (gep->getResultElementType() == int64)
				{
					registerPointer = gep;
					break;
				}
			}
			
			assert(registerPointer != nullptr);
			
			if (isa<PointerType>(pair.second))
			{
				callParameters.push_back(registerPointer);
			}
			else
			{
				// Create a load instruction. GVN will get rid of it if it's unnecessary.
				LoadInst* load = new LoadInst(registerPointer, pair.first, call);
				callParameters.push_back(load);
			}
		}
		
		CallInst* newCall = CallInst::Create(newFunc, callParameters, "", call);
		
		// Update AA
		regUse.replaceWithNewValue(call, newCall);
		
		// Update call graph
		CallGraphNode* calleeNode = cg[caller];
		calleeNode->replaceCallEdge(cs, CallSite(newCall), newFuncNode);
		
		// Finish replacing
		if (!call->use_empty())
		{
			call->replaceAllUsesWith(newCall);
			newCall->takeName(call);
		}
		
		call->eraseFromParent();
	}
	
	// Do not fix functions without a body.
	if (!fn->isDeclaration())
	{
		// Fix up function code. Start by moving everything into the new function.
		newFunc->getBasicBlockList().splice(newFunc->begin(), fn->getBasicBlockList());
		newFuncNode->stealCalledFunctionsFrom(oldFuncNode);
		
		// Change register uses
		size_t argIndex = 0;
		auto& argList = newFunc->getArgumentList();
		
		// Create a temporary insertion point. We don't want an existing instruction since chances are that we'll remove it.
		Instruction* insertionPoint = BinaryOperator::CreateAdd(ConstantInt::get(int64, 0), ConstantInt::get(int64, 0), "noop", newFunc->begin()->begin());
		for (auto iter = argList.begin(); iter != argList.end(); iter++, argIndex++)
		{
			Value* replaceWith = iter;
			const auto& paramTuple = parameters[argIndex];
			if (!isa<PointerType>(paramTuple.second))
			{
				// Create an alloca, copy value from parameter, replace GEP with alloca.
				// This is ugly code gen, but it will optimize easily, and still work if
				// we need a pointer reference to the register.
				auto alloca = new AllocaInst(paramTuple.second, paramTuple.first, insertionPoint);
				new StoreInst(iter, alloca, insertionPoint);
				replaceWith = alloca;
			}
			
			// Replace all uses with new instance.
			auto iterPair = registerMap.equal_range(paramTuple.first);
			for (auto registerMapIter = iterPair.first; registerMapIter != iterPair.second; registerMapIter++)
			{
				auto& registerValue = registerMapIter->second;
				registerValue->replaceAllUsesWith(replaceWith);
				cast<Instruction>(registerValue)->eraseFromParent();
				registerValue = replaceWith;
			}
		}
		
		// At this point, the uses of the argument struct left should be:
		// * preserved registers
		// * indirect jumps
		const auto& target = getAnalysis<TargetInfo>();
		while (!fnArg->use_empty())
		{
			auto lastUser = fnArg->user_back();
			if (auto user = dyn_cast<GetElementPtrInst>(lastUser))
			{
				// Promote register to alloca.
				const char* maybeName = target.registerName(*user);
				const char* regName = target.largestOverlappingRegister(maybeName);
				assert(regName != nullptr);
				
				auto alloca = new AllocaInst(user->getResultElementType(), regName, insertionPoint);
				user->replaceAllUsesWith(alloca);
				user->eraseFromParent();
			}
			else
			{
				auto call = cast<CallInst>(lastUser);
				
				Function* intrin = nullptr;
				StringRef intrinName = call->getCalledFunction()->getName();
				if (intrinName == "x86_jump_intrin")
				{
					intrin = indirectJump;
				}
				else if (intrinName == "x86_call_intrin")
				{
					intrin = indirectCall;
				}
				else
				{
					assert(false);
					// Can't decompile this function. Delete its body.
					newFunc->deleteBody();
					insertionPoint = nullptr;
					break;
				}
				
				// Replace intrinsic with another intrinsic.
				Value* jumpTarget = call->getOperand(2);
				SmallVector<Value*, 16> callArgs;
				callArgs.push_back(jumpTarget);
				for (Argument& arg : argList)
				{
					callArgs.push_back(&arg);
				}
				
				CallInst* varargCall = CallInst::Create(intrin, callArgs, "", call);
				newFuncNode->replaceCallEdge(CallSite(call), CallSite(varargCall), cg[intrin]);
				regUse.replaceWithNewValue(call, varargCall);
				
				varargCall->takeName(call);
				call->eraseFromParent();
			}
		}
		if (insertionPoint != nullptr)
		{
			// no longer needed
			insertionPoint->eraseFromParent();
		}
	}
	
	// At this point nothing should be using the old register argument anymore. (Pray!)
	// Leave the hollow husk of the old function in place to be erased by global DCE.
	registerAddresses[newFunc] = move(registerMap);
	registerAddresses.erase(fn);
	
	// Should be all.
	return newFuncNode;
}
示例#23
0
文件: tinline.cpp 项目: ragnard/terra
//Inliner handles erasing functions since it also maintains a copy of the callgraph
//that needs to be kept up to date with the functions in the module
void ManualInliner::eraseFunction(Function * F) {
    CallGraphNode * n = CG->getOrInsertFunction(F);
    n->removeAllCalledFunctions();
    CG->removeFunctionFromModule(n);
    delete F;
}
示例#24
0
bool InlineModule::runOnModule( Module & M ) {
  
  std::vector<std::string> leafNames;
  //File *file = fopen("inline_info.txt", "r");
  //if (!file) {
  //  errs() << "Error: Could not open inline_info file.\n";
  //  retrun true;
  //}
  std::string line;
  std::ifstream file ("inline_info.txt");
  if(file.is_open()) {
    while(std::getline(file, line))
      leafNames.push_back(line);
    file.close();
  }
  else
    errs() << "Error: Could not open inline_info file.\n";

  //makeLeaf.push_back(M.getFunction("ORACLE_0"));
  //makeLeaf.push_back(M.getFunction("ORACLE_1"));
  //makeLeaf.push_back(M.getFunction("ORACLE_2"));
  //makeLeaf.push_back(M.getFunction("ORACLE_3"));

  for (std::vector<std::string>::iterator i = leafNames.begin(), e = leafNames.end();
      i!=e; ++i) {
    if (debugInlining)
      errs() << "inline_info: " << *i << "\n";
    makeLeaf.push_back(M.getFunction(*i));
  }
  

  // First, get a pointer to previous analysis results
  CallGraph & CG = getAnalysis<CallGraph>();

  CallGraphNode * entry = CG.getRoot();
  if( entry && entry->getFunction() && debugInlining)
    errs() << "Entry is function: " << entry->getFunction()->getName() << "\n";

  // Iterate over all SCCs in the module in bottom-up order
  for( scc_iterator<CallGraph*>
   si=scc_begin( &CG ), se=scc_end( &CG ); si != se; ++si ) {
    runOnSCC( *si );
  }

  //reverse the vector for preorder
  std::reverse(vectPostOrder.begin(),vectPostOrder.end());

  for(std::vector<Function*>::iterator vit = vectPostOrder.begin(), vitE = vectPostOrder.end();
      vit!=vitE; ++vit) { 
    Function *f = *vit;      
    runOnFunction(*f);    
  }

  
  // now we have all the call sites which need to be inlined
  // inline from the leaves all the way up
  const TargetData *TD = getAnalysisIfAvailable<TargetData>();
  InlineFunctionInfo InlineInfo(&CG, TD);  

  std::reverse(inlineCallInsts.begin(),inlineCallInsts.end());
  for (std::vector<CallInst*>::iterator i = inlineCallInsts.begin(), e = inlineCallInsts.end();
      i!=e; ++i) {
    CallInst* CI = *i;
    bool success = InlineFunction(CI, InlineInfo, false);
    if(!success) {
      if (debugInlining)
        errs() << "Error: Could not inline callee function " << CI->getCalledFunction()->getName()
                 << " into caller function " << "\n";
      continue;
    }
    if (debugInlining)    
      errs() << "Successfully inlined callee function " << CI->getCalledFunction()->getName()
                 << "into caller function " << "\n";
  }  
  
  return false;
}
示例#25
0
bool Strator::runOnModule(Module &m) {
	errs() << "Strator started!\n";
	/// Get previous analysis results
	// multithreadedFunctionMap = &(getAnalysis<MultithreadFinder>().multithreadedFunctionMap);
//	if(UseLocalValueInfo)
//		localValueInfo = &(getAnalysis<LocalIdentifier>().localValueInfo);
	if(UseAliasAnalysis || DebugAliasAnalysis)
		aa = &getAnalysis<AliasAnalysis>();
		//aa = getAnalysis<LocalIdentifier>().aa;
	//useDefMap = &(getAnalysis<UseDefBuilder>().useDefMap);
	CallGraph& callGraph = getAnalysis<CallGraph>();

	GEPFactory = new GEPValueFactory(&m);

	if(PrintPrevPasses){
		printLocalModificationInfo();
	}

	CallGraphNode* externalNode = callGraph.getExternalCallingNode();

	if (DirectedRaceDetection){
		/// This is the first and default race detection strategy
		/// that only tracks a main function (like the
		/// unit test case). We call this directed race detection
		/// There is no thread pool in this case but a single worker
		threadPoolSize = 0;

		/// We use this simply to mark all the external entry point functions as initially not analyzed
		CallGraphNode::iterator it;
		for(it = externalNode->begin(); it != externalNode->end(); ++it){
			Function* f = 0;
			if((f = it->second->getFunction()))
				if(f->size() > 0){
					functionMap[f->getName().str()] = false;
					++functionCount;
				}
		}

		// cerr << "Total entry point count: " << functionCount << endl;

		CallGraphNode* root = callGraph.getRoot();
		assert(root->size() && "Size of the call graph root is 0! Something is wrong");
		cerr << "Root calls: " << root->size() << " functions"<< endl;

		if(!root->getFunction()){
			cerr << "The root represents an external node" << endl;
			assert(false && "You need to switch to global race detection!");
		}
		// cerr << root->getFunction()->getName().str() << endl;
		/// Initialize all functions to not-multithreaded
		workers.push_back(new StratorWorker(this));
		for(it = root->begin(); it != root->end(); ++it){
			Function* f = 0;
			if((f = it->second->getFunction())){
				if(f->size() > 0){
					workers[0]->multithreadedFunctionMap[it->second->getFunction()->getName().str()] = false;
				}
			}
		}
		Strator::StratorWorker::LockSet* lockSet = new Strator::StratorWorker::LockSet();
		workers[0]->traverseFunction(*(root->getFunction()), *lockSet);
	} else {
		/// This is the second variant of race detection. Here all the external
		/// functions with definitions are considered as root and race detection
		/// is performed over all their childeren. this variant is called global
		/// race detection
		pthread_t tids[50];
		threadPoolSize = 50;

		if(externalNode){
			/// TODO: We should investigate the following: Some static functions (thus
			/// having internal linkage) are considered as external nodes as they are
			/// used as parameters to some functions (like pthread_create). We should
			/// understand if it is necessary to add such functions as external nodes.
			CallGraphNode::iterator it;
			for(it = externalNode->begin(); it != externalNode->end(); ++it){
				Function* f = 0;
				if((f = it->second->getFunction()))
					if(f->size() > 0){
						cerr << "adding function \"" << it->second->getFunction()->getName().str() << "\" to task list" << endl;
						tasks.push_back(f);
						++functionCount;
					}
			}
			/// Create the thread pool
			threadPoolSize = (threadPoolSize < functionCount) ? threadPoolSize : functionCount;
			/// Create as many workers as the pool size
			// threadPoolSize = 1;
			cerr << "Thread pool size:" << threadPoolSize << endl;
			for(unsigned i=0; i<threadPoolSize; ++i)
				workers.push_back(new StratorWorker(this));
			/// trigger the
			for(unsigned i=0; i<threadPoolSize; ++i){
				int retVal = pthread_create(&tids[i], 0, stratorWorkerHelper, workers[i]);
				assert(retVal == 0 && "Problem with creating the threads");
			}

			/// Synchronize the threads
			for(unsigned i=0; i<threadPoolSize; ++i){
				int retVal = pthread_join(tids[i], 0);
				assert(retVal == 0 && "Problem with joining the threads");
			}
		}
	}

	if(ReportUnprocessed){
		/// list all the unprocessed entry point functions
		cerr << "Following entry point functions were not processed: " << endl;
		FunctionMap::iterator fMIt;
		for(fMIt = functionMap.begin(); fMIt != functionMap.end(); ++fMIt){
			if(!fMIt->second)
				cerr << fMIt->first << endl;
		}
	}

	/// We have gathered race detection data, now report
	reportRaces();
	/// We did not modify the module, return false
	return false;
}
示例#26
0
文件: Inliner.cpp 项目: 8l/SPIRV-LLVM
/// Remove dead functions that are not included in DNR (Do Not Remove) list.
bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
  SmallVector<CallGraphNode*, 16> FunctionsToRemove;
  SmallVector<CallGraphNode *, 16> DeadFunctionsInComdats;
  SmallDenseMap<const Comdat *, int, 16> ComdatEntriesAlive;

  auto RemoveCGN = [&](CallGraphNode *CGN) {
    // Remove any call graph edges from the function to its callees.
    CGN->removeAllCalledFunctions();

    // Remove any edges from the external node to the function's call graph
    // node.  These edges might have been made irrelegant due to
    // optimization of the program.
    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);

    // Removing the node for callee from the call graph and delete it.
    FunctionsToRemove.push_back(CGN);
  };

  // Scan for all of the functions, looking for ones that should now be removed
  // from the program.  Insert the dead ones in the FunctionsToRemove set.
  for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
    CallGraphNode *CGN = I->second;
    Function *F = CGN->getFunction();
    if (!F || F->isDeclaration())
      continue;

    // Handle the case when this function is called and we only want to care
    // about always-inline functions. This is a bit of a hack to share code
    // between here and the InlineAlways pass.
    if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
      continue;

    // If the only remaining users of the function are dead constants, remove
    // them.
    F->removeDeadConstantUsers();

    if (!F->isDefTriviallyDead())
      continue;

    // It is unsafe to drop a function with discardable linkage from a COMDAT
    // without also dropping the other members of the COMDAT.
    // The inliner doesn't visit non-function entities which are in COMDAT
    // groups so it is unsafe to do so *unless* the linkage is local.
    if (!F->hasLocalLinkage()) {
      if (const Comdat *C = F->getComdat()) {
        --ComdatEntriesAlive[C];
        DeadFunctionsInComdats.push_back(CGN);
        continue;
      }
    }

    RemoveCGN(CGN);
  }
  if (!DeadFunctionsInComdats.empty()) {
    // Count up all the entities in COMDAT groups
    auto ComdatGroupReferenced = [&](const Comdat *C) {
      auto I = ComdatEntriesAlive.find(C);
      if (I != ComdatEntriesAlive.end())
        ++(I->getSecond());
    };
    for (const Function &F : CG.getModule())
      if (const Comdat *C = F.getComdat())
        ComdatGroupReferenced(C);
    for (const GlobalVariable &GV : CG.getModule().globals())
      if (const Comdat *C = GV.getComdat())
        ComdatGroupReferenced(C);
    for (const GlobalAlias &GA : CG.getModule().aliases())
      if (const Comdat *C = GA.getComdat())
        ComdatGroupReferenced(C);
    for (CallGraphNode *CGN : DeadFunctionsInComdats) {
      Function *F = CGN->getFunction();
      const Comdat *C = F->getComdat();
      int NumAlive = ComdatEntriesAlive[C];
      // We can remove functions in a COMDAT group if the entire group is dead.
      assert(NumAlive >= 0);
      if (NumAlive > 0)
        continue;

      RemoveCGN(CGN);
    }
  }

  if (FunctionsToRemove.empty())
    return false;

  // Now that we know which functions to delete, do so.  We didn't want to do
  // this inline, because that would invalidate our CallGraph::iterator
  // objects. :(
  //
  // Note that it doesn't matter that we are iterating over a non-stable order
  // here to do this, it doesn't matter which order the functions are deleted
  // in.
  array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
  FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
                                      FunctionsToRemove.end()),
                          FunctionsToRemove.end());
  for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
                                                  E = FunctionsToRemove.end();
       I != E; ++I) {
    delete CG.removeFunctionFromModule(*I);
    ++NumDeleted;
  }
  return true;
}
示例#27
0
static bool
inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
                std::function<AssumptionCache &(Function &)> GetAssumptionCache,
                ProfileSummaryInfo *PSI, TargetLibraryInfo &TLI,
                bool InsertLifetime,
                function_ref<InlineCost(CallSite CS)> GetInlineCost,
                function_ref<AAResults &(Function &)> AARGetter,
                ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
  SmallPtrSet<Function *, 8> SCCFunctions;
  LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphNode *Node : SCC) {
    Function *F = Node->getFunction();
    if (F)
      SCCFunctions.insert(F);
    LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;

  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function *, int>, 8> InlineHistory;

  for (CallGraphNode *Node : SCC) {
    Function *F = Node->getFunction();
    if (!F || F->isDeclaration())
      continue;

    OptimizationRemarkEmitter ORE(F);
    for (BasicBlock &BB : *F)
      for (Instruction &I : BB) {
        CallSite CS(cast<Value>(&I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;

        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (Function *Callee = CS.getCalledFunction())
          if (Callee->isDeclaration()) {
            using namespace ore;

            ORE.emit([&]() {
              return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
                     << NV("Callee", Callee) << " will not be inlined into "
                     << NV("Caller", CS.getCaller())
                     << " because its definition is unavailable"
                     << setIsVerbose();
            });
            continue;
          }

        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;

  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);

  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache, PSI);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    // CallSites may be modified inside so ranged for loop can not be used.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;

      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // We can only inline direct calls to non-declarations.
      if (!Callee || Callee->isDeclaration())
        continue;

      Instruction *Instr = CS.getInstruction();

      bool IsTriviallyDead = isInstructionTriviallyDead(Instr, &TLI);

      int InlineHistoryID;
      if (!IsTriviallyDead) {
        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;
      }

      // FIXME for new PM: because of the old PM we currently generate ORE and
      // in turn BFI on demand.  With the new PM, the ORE dependency should
      // just become a regular analysis dependency.
      OptimizationRemarkEmitter ORE(Caller);

      Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE);
      // If the policy determines that we should inline this function,
      // delete the call instead.
      if (!OIC)
        continue;

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (IsTriviallyDead) {
        LLVM_DEBUG(dbgs() << "    -> Deleting dead call: " << *Instr << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        Instr->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // Get DebugLoc to report. CS will be invalid after Inliner.
        DebugLoc DLoc = CS->getDebugLoc();
        BasicBlock *Block = CS.getParent();

        // Attempt to inline the function.
        using namespace ore;

        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime, AARGetter,
                                  ImportedFunctionsStats)) {
          ORE.emit([&]() {
            return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
                                            Block)
                   << NV("Callee", Callee) << " will not be inlined into "
                   << NV("Caller", Caller);
          });
          continue;
        }
        ++NumInlined;

        ORE.emit([&]() {
          bool AlwaysInline = OIC->isAlways();
          StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined";
          OptimizationRemark R(DEBUG_TYPE, RemarkName, DLoc, Block);
          R << NV("Callee", Callee) << " inlined into ";
          R << NV("Caller", Caller);
          if (AlwaysInline)
            R << " with cost=always";
          else {
            R << " with cost=" << NV("Cost", OIC->getCost());
            R << " (threshold=" << NV("Threshold", OIC->getThreshold());
            R << ")";
          }
          return R;
        });

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (Value *Ptr : InlineInfo.InlinedCalls)
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
        }
      }

      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&
          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        LLVM_DEBUG(dbgs() << "    -> Deleting dead function: "
                          << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];

        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();

        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin() + CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
示例#28
0
bool InternalizePass::runOnModule(Module &M) {
  CallGraph *CG = getAnalysisIfAvailable<CallGraph>();
  CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
  
  if (ExternalNames.empty()) {
    // Return if we're not in 'all but main' mode and have no external api
    if (!AllButMain)
      return false;
    // If no list or file of symbols was specified, check to see if there is a
    // "main" symbol defined in the module.  If so, use it, otherwise do not
    // internalize the module, it must be a library or something.
    //
    Function *MainFunc = M.getFunction("main");
    if (MainFunc == 0 || MainFunc->isDeclaration())
      return false;  // No main found, must be a library...

    // Preserve main, internalize all else.
    ExternalNames.insert(MainFunc->getName());
  }

  bool Changed = false;

  // Never internalize functions which code-gen might insert.
  ExternalNames.insert("__stack_chk_fail");

  // Mark all functions not in the api as internal.
  // FIXME: maybe use private linkage?
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
    if (!I->isDeclaration() &&         // Function must be defined here
        // Available externally is really just a "declaration with a body".
        !I->hasAvailableExternallyLinkage() &&
        !I->hasLocalLinkage() &&  // Can't already have internal linkage
        !ExternalNames.count(I->getName())) {// Not marked to keep external?
      I->setLinkage(GlobalValue::InternalLinkage);
      // Remove a callgraph edge from the external node to this function.
      if (ExternalNode) ExternalNode->removeOneAbstractEdgeTo((*CG)[I]);
      Changed = true;
      ++NumFunctions;
      DEBUG(dbgs() << "Internalizing func " << I->getName() << "\n");
    }

  // Never internalize the llvm.used symbol.  It is used to implement
  // attribute((used)).
  // FIXME: Shouldn't this just filter on llvm.metadata section??
  ExternalNames.insert("llvm.used");
  ExternalNames.insert("llvm.compiler.used");

  // Never internalize anchors used by the machine module info, else the info
  // won't find them.  (see MachineModuleInfo.)
  ExternalNames.insert("llvm.global_ctors");
  ExternalNames.insert("llvm.global_dtors");
  ExternalNames.insert("llvm.global.annotations");

  // Never internalize symbols code-gen inserts.
  ExternalNames.insert("__stack_chk_guard");

  // Mark all global variables with initializers that are not in the api as
  // internal as well.
  // FIXME: maybe use private linkage?
  for (Module::global_iterator I = M.global_begin(), E = M.global_end();
       I != E; ++I)
    if (!I->isDeclaration() && !I->hasLocalLinkage() &&
        // Available externally is really just a "declaration with a body".
        !I->hasAvailableExternallyLinkage() &&
        !ExternalNames.count(I->getName())) {
      I->setLinkage(GlobalValue::InternalLinkage);
      Changed = true;
      ++NumGlobals;
      DEBUG(dbgs() << "Internalized gvar " << I->getName() << "\n");
    }

  // Mark all aliases that are not in the api as internal as well.
  for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
       I != E; ++I)
    if (!I->isDeclaration() && !I->hasInternalLinkage() &&
        // Available externally is really just a "declaration with a body".
        !I->hasAvailableExternallyLinkage() &&
        !ExternalNames.count(I->getName())) {
      I->setLinkage(GlobalValue::InternalLinkage);
      Changed = true;
      ++NumAliases;
      DEBUG(dbgs() << "Internalized alias " << I->getName() << "\n");
    }

  return Changed;
}
示例#29
0
/// Remove dead functions that are not included in DNR (Do Not Remove) list.
bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
                                            bool AlwaysInlineOnly) {
  SmallVector<CallGraphNode *, 16> FunctionsToRemove;
  SmallVector<Function *, 16> DeadFunctionsInComdats;

  auto RemoveCGN = [&](CallGraphNode *CGN) {
    // Remove any call graph edges from the function to its callees.
    CGN->removeAllCalledFunctions();

    // Remove any edges from the external node to the function's call graph
    // node.  These edges might have been made irrelegant due to
    // optimization of the program.
    CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);

    // Removing the node for callee from the call graph and delete it.
    FunctionsToRemove.push_back(CGN);
  };

  // Scan for all of the functions, looking for ones that should now be removed
  // from the program.  Insert the dead ones in the FunctionsToRemove set.
  for (const auto &I : CG) {
    CallGraphNode *CGN = I.second.get();
    Function *F = CGN->getFunction();
    if (!F || F->isDeclaration())
      continue;

    // Handle the case when this function is called and we only want to care
    // about always-inline functions. This is a bit of a hack to share code
    // between here and the InlineAlways pass.
    if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
      continue;

    // If the only remaining users of the function are dead constants, remove
    // them.
    F->removeDeadConstantUsers();

    if (!F->isDefTriviallyDead())
      continue;

    // It is unsafe to drop a function with discardable linkage from a COMDAT
    // without also dropping the other members of the COMDAT.
    // The inliner doesn't visit non-function entities which are in COMDAT
    // groups so it is unsafe to do so *unless* the linkage is local.
    if (!F->hasLocalLinkage()) {
      if (F->hasComdat()) {
        DeadFunctionsInComdats.push_back(F);
        continue;
      }
    }

    RemoveCGN(CGN);
  }
  if (!DeadFunctionsInComdats.empty()) {
    // Filter out the functions whose comdats remain alive.
    filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats);
    // Remove the rest.
    for (Function *F : DeadFunctionsInComdats)
      RemoveCGN(CG[F]);
  }

  if (FunctionsToRemove.empty())
    return false;

  // Now that we know which functions to delete, do so.  We didn't want to do
  // this inline, because that would invalidate our CallGraph::iterator
  // objects. :(
  //
  // Note that it doesn't matter that we are iterating over a non-stable order
  // here to do this, it doesn't matter which order the functions are deleted
  // in.
  array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
  FunctionsToRemove.erase(
      std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
      FunctionsToRemove.end());
  for (CallGraphNode *CGN : FunctionsToRemove) {
    delete CG.removeFunctionFromModule(CGN);
    ++NumDeleted;
  }
  return true;
}