void Preparer::expandCallSite(CallSite CS) { if (!CS.getCalledFunction()) return; Function *F = CS.getCalledFunction(); if (!F->isVarArg()) return; vector<Value *> Args; for (CallSite::arg_iterator ArgI = CS.arg_begin(); ArgI != CS.arg_end(); ArgI++) { Args.push_back(*ArgI); } Args.push_back(ConstantInt::get( IntegerType::get(CS.getInstruction()->getContext(), 8), 0)); string InstName = ""; if (CS.getInstruction()->getName() != "") InstName = CS.getInstruction()->getName().str() + ".padded"; if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) { CallInst *NewCI = CallInst::Create(F, Args, InstName, CI); NewCI->setAttributes(CI->getAttributes()); CI->replaceAllUsesWith(NewCI); CI->eraseFromParent(); } else if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { InvokeInst *NewII = InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), Args, InstName, II); NewII->setAttributes(II->getAttributes()); II->replaceAllUsesWith(NewII); II->eraseFromParent(); } }
CallSite GNUstep::IMPCacher::SplitSend(CallSite msgSend) { BasicBlock *lookupBB = msgSend->getParent(); Function *F = lookupBB->getParent(); Module *M = F->getParent(); Function *send = M->getFunction("objc_msgSend"); Function *send_stret = M->getFunction("objc_msgSend_stret"); Function *send_fpret = M->getFunction("objc_msgSend_fpret"); Value *self; Value *cmd; int selfIndex = 0; if ((msgSend.getCalledFunction() == send) || (msgSend.getCalledFunction() == send_fpret)) { self = msgSend.getArgument(0); cmd = msgSend.getArgument(1); } else if (msgSend.getCalledFunction() == send_stret) { selfIndex = 1; self = msgSend.getArgument(1); cmd = msgSend.getArgument(2); } else { abort(); return CallSite(); } CGBuilder B(&F->getEntryBlock(), F->getEntryBlock().begin()); Value *selfPtr = B.CreateAlloca(self->getType()); B.SetInsertPoint(msgSend.getInstruction()); B.CreateStore(self, selfPtr, true); LLVMType *impTy = msgSend.getCalledValue()->getType(); LLVMType *slotTy = PointerType::getUnqual(StructType::get(PtrTy, PtrTy, PtrTy, IntTy, impTy, PtrTy, NULL)); Value *slot; Constant *lookupFn = M->getOrInsertFunction("objc_msg_lookup_sender", slotTy, selfPtr->getType(), cmd->getType(), PtrTy, NULL); if (msgSend.isCall()) { slot = B.CreateCall3(lookupFn, selfPtr, cmd, Constant::getNullValue(PtrTy)); } else { InvokeInst *inv = cast<InvokeInst>(msgSend.getInstruction()); BasicBlock *callBB = SplitBlock(lookupBB, msgSend.getInstruction(), Owner); removeTerminator(lookupBB); B.SetInsertPoint(lookupBB); slot = B.CreateInvoke3(lookupFn, callBB, inv->getUnwindDest(), selfPtr, cmd, Constant::getNullValue(PtrTy)); addPredecssor(inv->getUnwindDest(), msgSend->getParent(), lookupBB); B.SetInsertPoint(msgSend.getInstruction()); } Value *imp = B.CreateLoad(B.CreateStructGEP(slot, 4)); msgSend.setArgument(selfIndex, B.CreateLoad(selfPtr, true)); msgSend.setCalledFunction(imp); return CallSite(slot); }
// InlineCallIfPossible - If it is possible to inline the specified call site, // do so and update the CallGraph for this operation. static bool InlineCallIfPossible(CallSite CS, CallGraph &CG, const std::set<Function*> &SCCFunctions, const TargetData &TD) { Function *Callee = CS.getCalledFunction(); Function *Caller = CS.getCaller(); if (!InlineFunction(CS, &CG, &TD)) return false; // If the inlined function had a higher stack protection level than the // calling function, then bump up the caller's stack protection level. if (Callee->hasFnAttr(Attribute::StackProtectReq)) Caller->addFnAttr(Attribute::StackProtectReq); else if (Callee->hasFnAttr(Attribute::StackProtect) && !Caller->hasFnAttr(Attribute::StackProtectReq)) Caller->addFnAttr(Attribute::StackProtect); // If we inlined the last possible call site to the function, delete the // function body now. if (Callee->use_empty() && Callee->hasLocalLinkage() && !SCCFunctions.count(Callee)) { DOUT << " -> Deleting dead function: " << Callee->getName() << "\n"; CallGraphNode *CalleeNode = CG[Callee]; // Remove any call graph edges from the callee to its callees. CalleeNode->removeAllCalledFunctions(); // Removing the node for callee from the call graph and delete it. delete CG.removeFunctionFromModule(CalleeNode); ++NumDeleted; } return true; }
unsigned Inliner::getInlineThreshold(CallSite CS) const { int thres = InlineThreshold; // -inline-threshold or else selected by // overall opt level // If -inline-threshold is not given, listen to the optsize attribute when it // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres) thres = OptSizeThreshold; // Listen to the inlinehint attribute when it would increase the threshold // and the caller does not need to minimize its size. Function *Callee = CS.getCalledFunction(); bool InlineHint = Callee && !Callee->isDeclaration() && Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::InlineHint); if (InlineHint && HintThreshold > thres && !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize)) thres = HintThreshold; // Listen to the cold attribute when it would decrease the threshold. bool ColdCallee = Callee && !Callee->isDeclaration() && Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, Attribute::Cold); if (ColdCallee && ColdThreshold < thres) thres = ColdThreshold; return thres; }
unsigned Inliner::getInlineThreshold(CallSite CS) const { int Threshold = InlineThreshold; // -inline-threshold or else selected by // overall opt level // If -inline-threshold is not given, listen to the optsize attribute when it // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && // FIXME: Use Function::optForSize(). Caller->hasFnAttribute(Attribute::OptimizeForSize); if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < Threshold) Threshold = OptSizeThreshold; Function *Callee = CS.getCalledFunction(); if (!Callee || Callee->isDeclaration()) return Threshold; // If profile information is available, use that to adjust threshold of hot // and cold functions. // FIXME: The heuristic used below for determining hotness and coldness are // based on preliminary SPEC tuning and may not be optimal. Replace this with // a well-tuned heuristic based on *callsite* hotness and not callee hotness. uint64_t FunctionCount = 0, MaxFunctionCount = 0; bool HasPGOCounts = false; if (Callee->getEntryCount() && Callee->getParent()->getMaximumFunctionCount()) { HasPGOCounts = true; FunctionCount = Callee->getEntryCount().getValue(); MaxFunctionCount = Callee->getParent()->getMaximumFunctionCount().getValue(); } // Listen to the inlinehint attribute or profile based hotness information // when it would increase the threshold and the caller does not need to // minimize its size. bool InlineHint = Callee->hasFnAttribute(Attribute::InlineHint) || (HasPGOCounts && FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount)); if (InlineHint && HintThreshold > Threshold && !Caller->hasFnAttribute(Attribute::MinSize)) Threshold = HintThreshold; // Listen to the cold attribute or profile based coldness information // when it would decrease the threshold. bool ColdCallee = Callee->hasFnAttribute(Attribute::Cold) || (HasPGOCounts && FunctionCount <= (uint64_t)(0.01 * (double)MaxFunctionCount)); // Command line argument for InlineLimit will override the default // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold, // do not use the default cold threshold even if it is smaller. if ((InlineLimit.getNumOccurrences() == 0 || ColdThreshold.getNumOccurrences() > 0) && ColdCallee && ColdThreshold < Threshold) Threshold = ColdThreshold; return Threshold; }
/// If it is possible to inline the specified call site, /// do so and update the CallGraph for this operation. /// /// This function also does some basic book-keeping to update the IR. The /// InlinedArrayAllocas map keeps track of any allocas that are already /// available from other functions inlined into the caller. If we are able to /// inline this call site we attempt to reuse already available allocas or add /// any new allocas to the set if not possible. static bool InlineCallIfPossible( CallSite CS, InlineFunctionInfo &IFI, InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { Function *Callee = CS.getCalledFunction(); Function *Caller = CS.getCaller(); AAResults &AAR = AARGetter(*Callee); // Try to inline the function. Get the list of static allocas that were // inlined. if (!InlineFunction(CS, IFI, &AAR, InsertLifetime)) return false; if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) ImportedFunctionsStats.recordInline(*Caller, *Callee); AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); if (!DisableInlinedAllocaMerging) mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); return true; }
bool InlineMalloc::runOnFunction(Function& F) { Function* Malloc = F.getParent()->getFunction("gcmalloc"); if (!Malloc || Malloc->isDeclaration()) return false; bool Changed = false; for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; BI++) { BasicBlock *Cur = BI; for (BasicBlock::iterator II = Cur->begin(), IE = Cur->end(); II != IE;) { Instruction *I = II; II++; CallSite Call = CallSite::get(I); Instruction* CI = Call.getInstruction(); if (CI) { Function* Temp = Call.getCalledFunction(); if (Temp == Malloc) { if (dyn_cast<Constant>(Call.getArgument(0))) { InlineFunctionInfo IFI(NULL, mvm::MvmModule::TheTargetData); Changed |= InlineFunction(Call, IFI); break; } } } } } return Changed; }
bool StructuredModuleEditor::replaceEdge(Function *Caller, uint64_t CallSiteIndex, Function *Destination) { // If either the caller or the callee is not present in the CFG, // we cannot replace the edge if (Caller == NULL || Destination == NULL) return false; // If we cannot locate the specified callsite to redirect, // we cannot replace the edge CallSite CS; if (!getCallSite(Caller, CallSiteIndex, CS)) return false; Function *OldDestination = CS.getCalledFunction(); // If the old callee and the new callee do not have the same // signature, we cannot replace the edge if (!signaturesMatch(OldDestination, Destination)) return false; // Sets the callsite's callee to the specified callee CS.setCalledFunction(Destination); CallGraphNode *DestinationNode = CG->getOrInsertFunction(Destination); CG->getOrInsertFunction(Caller)->replaceCallEdge(CS, CS, DestinationNode); return true; }
// deleteValue method - This method is used to remove a pointer value from the // AliasSetTracker entirely. It should be used when an instruction is deleted // from the program to update the AST. If you don't use this, you would have // dangling pointers to deleted instructions. // void AliasSetTracker::deleteValue(Value *PtrVal) { // Notify the alias analysis implementation that this value is gone. AA.deleteValue(PtrVal); // If this is a call instruction, remove the callsite from the appropriate // AliasSet. CallSite CS = CallSite::get(PtrVal); if (CS.getInstruction()) { Function *F = CS.getCalledFunction(); if (!F || !AA.doesNotAccessMemory(F)) { if (AliasSet *AS = findAliasSetForCallSite(CS)) AS->removeCallSite(CS); } } // First, look up the PointerRec for this pointer. hash_map<Value*, AliasSet::PointerRec>::iterator I = PointerMap.find(PtrVal); if (I == PointerMap.end()) return; // Noop // If we found one, remove the pointer from the alias set it is in. AliasSet::HashNodePair &PtrValEnt = *I; AliasSet *AS = PtrValEnt.second.getAliasSet(*this); // Unlink from the list of values... PtrValEnt.second.removeFromList(); // Stop using the alias set AS->dropRef(*this); PointerMap.erase(I); }
unsigned Inliner::getInlineThreshold(CallSite CS) const { int thres = InlineThreshold; // -inline-threshold or else selected by // overall opt level // If -inline-threshold is not given, listen to the optsize attribute when it // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && Caller->hasFnAttribute(Attribute::OptimizeForSize); if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres) thres = OptSizeThreshold; // Listen to the inlinehint attribute when it would increase the threshold // and the caller does not need to minimize its size. Function *Callee = CS.getCalledFunction(); bool InlineHint = Callee && !Callee->isDeclaration() && Callee->hasFnAttribute(Attribute::InlineHint); if (InlineHint && HintThreshold > thres && !Caller->hasFnAttribute(Attribute::MinSize)) thres = HintThreshold; // Listen to the cold attribute when it would decrease the threshold. bool ColdCallee = Callee && !Callee->isDeclaration() && Callee->hasFnAttribute(Attribute::Cold); // Command line argument for InlineLimit will override the default // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold, // do not use the default cold threshold even if it is smaller. if ((InlineLimit.getNumOccurrences() == 0 || ColdThreshold.getNumOccurrences() > 0) && ColdCallee && ColdThreshold < thres) thres = ColdThreshold; return thres; }
/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee /// into the caller, update the specified callgraph to reflect the changes we /// made. Note that it's possible that not all code was copied over, so only /// some edges of the callgraph may remain. static void UpdateCallGraphAfterInlining(CallSite CS, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI) { CallGraph &CG = *IFI.CG; const Function *Caller = CS.getInstruction()->getParent()->getParent(); const Function *Callee = CS.getCalledFunction(); CallGraphNode *CalleeNode = CG[Callee]; CallGraphNode *CallerNode = CG[Caller]; // Since we inlined some uninlined call sites in the callee into the caller, // add edges from the caller to all of the callees of the callee. CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); // Consider the case where CalleeNode == CallerNode. CallGraphNode::CalledFunctionsVector CallCache; if (CalleeNode == CallerNode) { CallCache.assign(I, E); I = CallCache.begin(); E = CallCache.end(); } for (; I != E; ++I) { const Value *OrigCall = I->first; ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); // Only copy the edge if the call was inlined! if (VMI == VMap.end() || VMI->second == 0) continue; // If the call was inlined, but then constant folded, there is no edge to // add. Check for this case. Instruction *NewCall = dyn_cast<Instruction>(VMI->second); if (NewCall == 0) continue; // Remember that this call site got inlined for the client of // InlineFunction. IFI.InlinedCalls.push_back(NewCall); // It's possible that inlining the callsite will cause it to go from an // indirect to a direct call by resolving a function pointer. If this // happens, set the callee of the new call site to a more precise // destination. This can also happen if the call graph node of the caller // was just unnecessarily imprecise. if (I->second->getFunction() == 0) if (Function *F = CallSite(NewCall).getCalledFunction()) { // Indirect call site resolved to direct call. CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); continue; } CallerNode->addCalledFunction(CallSite(NewCall), I->second); } // Update the call graph by deleting the edge from Callee to Caller. We must // do this after the loop above in case Caller and Callee are the same. CallerNode->removeCallEdgeFor(CS); }
void EmitMemSet(IRBuilder<>& B, Value* Dst, Value* Val, Value* Len, const Analysis& A) { Dst = B.CreateBitCast(Dst, PointerType::getUnqual(B.getInt8Ty())); CallSite CS = B.CreateMemSet(Dst, Val, Len, 1 /*Align*/, false /*isVolatile*/); if (A.CGNode) A.CGNode->addCalledFunction(CS, A.CG->getOrInsertFunction(CS.getCalledFunction())); }
bool AliasSetTracker::remove(CallSite CS) { if (Function *F = CS.getCalledFunction()) if (AA.doesNotAccessMemory(F)) return false; // doesn't alias anything AliasSet *AS = findAliasSetForCallSite(CS); if (!AS) return false; remove(*AS); return true; }
void InstrumentFreeCalls::visitCallSite(CallSite CS) { Function *F = CS.getCalledFunction(); if (!F || !F->hasName() || F->getName() != "free") return; // Insert the check right before the free call. Instruction *I = CS.getInstruction(); IRBuilder<> Builder(I); Builder.CreateCall(FreeCheckFunction, I->getOperand(0)); ++FreeChecksInserted; }
AliasAnalysis::ModRefBehavior AliasAnalysis::getModRefBehavior(CallSite CS, std::vector<PointerAccessInfo> *Info) { if (CS.doesNotAccessMemory()) // Can't do better than this. return DoesNotAccessMemory; ModRefBehavior MRB = getModRefBehavior(CS.getCalledFunction(), Info); if (MRB != DoesNotAccessMemory && CS.onlyReadsMemory()) return OnlyReadsMemory; return MRB; }
static bool getPossibleTargets(CallSite CS, SmallVectorImpl<Function *> &Output) { if (auto *Fn = CS.getCalledFunction()) { Output.push_back(Fn); return true; } // TODO: If the call is indirect, we might be able to enumerate all potential // targets of the call and return them, rather than just failing. return false; }
/// \brief Get the inline cost for the always-inliner. /// /// The always inliner *only* handles functions which are marked with the /// attribute to force inlining. As such, it is dramatically simpler and avoids /// using the powerful (but expensive) inline cost analysis. Instead it uses /// a very simple and boring direct walk of the instructions looking for /// impossible-to-inline constructs. /// /// Note, it would be possible to go to some lengths to cache the information /// computed here, but as we only expect to do this for relatively few and /// small functions which have the explicit attribute to force inlining, it is /// likely not worth it in practice. InlineCost AlwaysInlinerLegacyPass::getInlineCost(CallSite CS) { Function *Callee = CS.getCalledFunction(); // Only inline direct calls to functions with always-inline attributes // that are viable for inlining. FIXME: We shouldn't even get here for // declarations. if (Callee && !Callee->isDeclaration() && CS.hasFnAttr(Attribute::AlwaysInline) && isInlineViable(*Callee)) return InlineCost::getAlways(); return InlineCost::getNever(); }
void MemoryInstrumenter::instrumentFork(const CallSite &CS) { Instruction *Ins = CS.getInstruction(); assert(!Ins->isTerminator()); Function *Callee = CS.getCalledFunction(); StringRef CalleeName = Callee->getName(); assert(CalleeName == "fork" || CalleeName == "vfork"); BasicBlock::iterator Loc = Ins; CallInst::Create(BeforeForkHook, "", Loc); ++Loc; CallInst::Create(AfterForkHook, Ins, "", Loc); }
FunctionList DSNodeEquivs::getCallees(CallSite &CS) { const Function *CalledFunc = CS.getCalledFunction(); // If the called function is casted from one function type to another, peer // into the cast instruction and pull out the actual function being called. if (ConstantExpr *CExpr = dyn_cast<ConstantExpr>(CS.getCalledValue())) { if (CExpr->getOpcode() == Instruction::BitCast && isa<Function>(CExpr->getOperand(0))) CalledFunc = cast<Function>(CExpr->getOperand(0)); } FunctionList Callees; // Direct calls are simple. if (CalledFunc) { Callees.push_back(CalledFunc); return Callees; } // Okay, indirect call. // Ask the DSCallGraph what this calls... TDDataStructures &TDDS = getAnalysis<TDDataStructures>(); const DSCallGraph &DSCG = TDDS.getCallGraph(); DSCallGraph::callee_iterator CalleeIt = DSCG.callee_begin(CS); DSCallGraph::callee_iterator CalleeItEnd = DSCG.callee_end(CS); for (; CalleeIt != CalleeItEnd; ++CalleeIt) Callees.push_back(*CalleeIt); // If the callgraph doesn't give us what we want, query the DSGraph // ourselves. if (Callees.empty()) { Instruction *Inst = CS.getInstruction(); Function *Parent = Inst->getParent()->getParent(); Value *CalledValue = CS.getCalledValue(); DSNodeHandle &NH = TDDS.getDSGraph(*Parent)->getNodeForValue(CalledValue); if (!NH.isNull()) { DSNode *Node = NH.getNode(); Node->addFullFunctionList(Callees); } } // For debugging, dump out the callsites we are unable to get callees for. DEBUG( if (Callees.empty()) { errs() << "Failed to get callees for callsite:\n"; CS.getInstruction()->dump(); });
void Preparer::expandMalloc(CallSite CS) { Function *Callee = CS.getCalledFunction(); assert(Callee); StringRef CalleeName = Callee->getName(); if (CalleeName == "malloc" || CalleeName == "valloc") { Value *Size = CS.getArgument(0); Value *ExpandedSize = BinaryOperator::Create( Instruction::Add, Size, ConstantInt::get(cast<IntegerType>(Size->getType()), 1), "expanded.size", CS.getInstruction()); CS.setArgument(0, ExpandedSize); } }
// Checks to see if a given CallSite is making an indirect call, including // cases where the indirect call is made through a bitcast. static bool isIndirectCall(CallSite &CS) { if (CS.getCalledFunction()) return false; // Check the value to see if it is merely a bitcast of a function. In // this case, it will translate to a direct function call in the resulting // assembly, so we won't treat it as an indirect call here. const Value *V = CS.getCalledValue(); if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { return !(CE->isCast() && isa<Function>(CE->getOperand(0))); } // Otherwise, since we know it's a call, it must be an indirect call return true; }
bool AliasSetTracker::add(CallSite CS) { if (Function *F = CS.getCalledFunction()) if (AA.doesNotAccessMemory(F)) return true; // doesn't alias anything AliasSet *AS = findAliasSetForCallSite(CS); if (!AS) { AliasSets.push_back(new AliasSet()); AS = &AliasSets.back(); AS->addCallSite(CS, AA); return true; } else { AS->addCallSite(CS, AA); return false; } }
AliasAnalysis::ModRefResult GlobalsModRef::getModRefInfo(CallSite CS, Value *P, unsigned Size) { unsigned Known = ModRef; // If we are asking for mod/ref info of a direct call with a pointer to a // global we are tracking, return information if we have it. if (GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject())) if (GV->hasLocalLinkage()) if (Function *F = CS.getCalledFunction()) if (NonAddressTakenGlobals.count(GV)) if (FunctionRecord *FR = getFunctionInfo(F)) Known = FR->getInfoForGlobal(GV); if (Known == NoModRef) return NoModRef; // No need to query other mod/ref analyses return ModRefResult(Known & AliasAnalysis::getModRefInfo(CS, P, Size)); }
int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) { // Get information about the callee. FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee]; // If we haven't calculated this information yet, do so now. if (CalleeFI->Metrics.NumBlocks == 0) CalleeFI->analyzeFunction(Callee, TD); bool isDirectCall = CS.getCalledFunction() == Callee; Instruction *TheCall = CS.getInstruction(); int Bonus = 0; // If there is only one call of the function, and it has internal linkage, // make it almost guaranteed to be inlined. // if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall) Bonus += InlineConstants::LastCallToStaticBonus; // If the instruction after the call, or if the normal destination of the // invoke is an unreachable instruction, the function is noreturn. As such, // there is little point in inlining this. if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { if (isa<UnreachableInst>(II->getNormalDest()->begin())) Bonus += InlineConstants::NoreturnPenalty; } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall))) Bonus += InlineConstants::NoreturnPenalty; // If this function uses the coldcc calling convention, prefer not to inline // it. if (Callee->getCallingConv() == CallingConv::Cold) Bonus += InlineConstants::ColdccPenalty; // Add to the inline quality for properties that make the call valuable to // inline. This includes factors that indicate that the result of inlining // the function will be optimizable. Currently this just looks at arguments // passed into the function. // CallSite::arg_iterator I = CS.arg_begin(); for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end(); FI != FE; ++I, ++FI) // Compute any constant bonus due to inlining we want to give here. if (isa<Constant>(I)) Bonus += CountBonusForConstant(FI, cast<Constant>(I)); return Bonus; }
// getModRefInfo - Check to see if the specified callsite can clobber the // specified memory object. // AliasAnalysis::ModRefResult LibCallAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) { ModRefResult MRInfo = ModRef; // If this is a direct call to a function that LCI knows about, get the // information about the runtime function. if (LCI) { if (Function *F = CS.getCalledFunction()) { if (const LibCallFunctionInfo *FI = LCI->getFunctionInfo(F)) { MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, P, Size)); if (MRInfo == NoModRef) return NoModRef; } } } // The AliasAnalysis base class has some smarts, lets use them. return (ModRefResult)(MRInfo | AliasAnalysis::getModRefInfo(CS, P, Size)); }
void AliasSet::addCallSite(CallSite CS, AliasAnalysis &AA) { CallSites.push_back(CS); if (Function *F = CS.getCalledFunction()) { AliasAnalysis::ModRefBehavior Behavior = AA.getModRefBehavior(F, CS); if (Behavior == AliasAnalysis::DoesNotAccessMemory) return; else if (Behavior == AliasAnalysis::OnlyReadsMemory) { AliasTy = MayAlias; AccessTy |= Refs; return; } } // FIXME: This should use mod/ref information to make this not suck so bad AliasTy = MayAlias; AccessTy = ModRef; }
unsigned Inliner::getInlineThreshold(CallSite CS) const { int thres = InlineThreshold; // Listen to optsize when -inline-limit is not given. Function *Caller = CS.getCaller(); if (Caller && !Caller->isDeclaration() && Caller->hasFnAttr(Attribute::OptimizeForSize) && InlineLimit.getNumOccurrences() == 0) thres = OptSizeThreshold; // Listen to inlinehint when it would increase the threshold. Function *Callee = CS.getCalledFunction(); if (HintThreshold > thres && Callee && !Callee->isDeclaration() && Callee->hasFnAttr(Attribute::InlineHint)) thres = HintThreshold; return thres; }
// addToCallGraph - Add a function to the call graph, and link the node to all // of the functions that it calls. // void addToCallGraph(Function *F) { CallGraphNode *Node = getOrInsertFunction(F); // If this function has external linkage, anything could call it. if (!F->hasLocalLinkage()) { ExternalCallingNode->addCalledFunction(CallSite(), Node); // Found the entry point? if (F->getName() == "main") { if (Root) // Found multiple external mains? Don't pick one. Root = ExternalCallingNode; else Root = Node; // Found a main, keep track of it! } } // Loop over all of the users of the function, looking for non-call uses. for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I) if ((!isa<CallInst>(I) && !isa<InvokeInst>(I)) || !CallSite(cast<Instruction>(I)).isCallee(I)) { // Not a call, or being used as a parameter rather than as the callee. ExternalCallingNode->addCalledFunction(CallSite(), Node); break; } // If this function is not defined in this translation unit, it could call // anything. if (F->isDeclaration() && !F->isIntrinsic()) Node->addCalledFunction(CallSite(), CallsExternalNode); // Look for calls by this function. for (Function::iterator BB = F->begin(), BBE = F->end(); BB != BBE; ++BB) for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE; ++II) { CallSite CS = CallSite::get(II); if (CS.getInstruction() && !isa<DbgInfoIntrinsic>(II)) { const Function *Callee = CS.getCalledFunction(); if (Callee) Node->addCalledFunction(CS, getOrInsertFunction(Callee)); else Node->addCalledFunction(CS, CallsExternalNode); } } }
bool AliasSet::aliasesCallSite(CallSite CS, AliasAnalysis &AA) const { if (Function *F = CS.getCalledFunction()) if (AA.doesNotAccessMemory(F)) return false; if (AA.hasNoModRefInfoForCalls()) return true; for (unsigned i = 0, e = CallSites.size(); i != e; ++i) if (AA.getModRefInfo(CallSites[i], CS) != AliasAnalysis::NoModRef || AA.getModRefInfo(CS, CallSites[i]) != AliasAnalysis::NoModRef) return true; for (iterator I = begin(), E = end(); I != E; ++I) if (AA.getModRefInfo(CS, I.getPointer(), I.getSize()) != AliasAnalysis::NoModRef) return true; return false; }
/// \brief Get the inline cost for the always-inliner. /// /// The always inliner *only* handles functions which are marked with the /// attribute to force inlining. As such, it is dramatically simpler and avoids /// using the powerful (but expensive) inline cost analysis. Instead it uses /// a very simple and boring direct walk of the instructions looking for /// impossible-to-inline constructs. /// /// Note, it would be possible to go to some lengths to cache the information /// computed here, but as we only expect to do this for relatively few and /// small functions which have the explicit attribute to force inlining, it is /// likely not worth it in practice. InlineCost AlwaysInliner::getInlineCost(CallSite CS) { Function *Callee = CS.getCalledFunction(); // We assume indirect calls aren't calling an always-inline function. if (!Callee) return InlineCost::getNever(); // We can't inline calls to external functions. // FIXME: We shouldn't even get here. if (Callee->isDeclaration()) return InlineCost::getNever(); // Return never for anything not marked as always inline. if (!Callee->getFnAttributes().hasAttribute(Attributes::AlwaysInline)) return InlineCost::getNever(); // Do some minimal analysis to preclude non-viable functions. if (!isInlineViable(*Callee)) return InlineCost::getNever(); // Otherwise, force inlining. return InlineCost::getAlways(); }