AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { // Don't assert AA because BasicAA calls us in order to make use of the // logic here. // If CS1 or CS2 are readnone, they don't interact. ModRefBehavior CS1B = getModRefBehavior(CS1); if (CS1B == DoesNotAccessMemory) return NoModRef; ModRefBehavior CS2B = getModRefBehavior(CS2); if (CS2B == DoesNotAccessMemory) return NoModRef; // If they both only read from memory, there is no dependence. if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory) return NoModRef; AliasAnalysis::ModRefResult Mask = ModRef; // If CS1 only reads memory, the only dependence on CS2 can be // from CS1 reading memory written by CS2. if (CS1B == OnlyReadsMemory) Mask = ModRefResult(Mask & Ref); // If CS2 only access memory through arguments, accumulate the mod/ref // information from CS1's references to the memory referenced by // CS2's arguments. if (CS2B == AccessesArguments) { AliasAnalysis::ModRefResult R = NoModRef; for (ImmutableCallSite::arg_iterator I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { R = ModRefResult((R | getModRefInfo(CS1, *I, UnknownSize)) & Mask); if (R == Mask) break; } return R; } // If CS1 only accesses memory through arguments, check if CS2 references // any of the memory referenced by CS1's arguments. If not, return NoModRef. if (CS1B == AccessesArguments) { AliasAnalysis::ModRefResult R = NoModRef; for (ImmutableCallSite::arg_iterator I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) if (getModRefInfo(CS2, *I, UnknownSize) != NoModRef) { R = Mask; break; } if (R == NoModRef) return R; } // If this is BasicAA, don't forward. if (!AA) return Mask; // Otherwise, fall back to the next AA in the chain. But we can merge // in any mask we've managed to compute. return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask); }
/// Test whether the given instruction can result in a reference count /// modification (positive or negative) for the pointer's object. bool llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, InstructionClass Class) { switch (Class) { case IC_Autorelease: case IC_AutoreleaseRV: case IC_IntrinsicUser: case IC_User: // These operations never directly modify a reference count. return false; default: break; } ImmutableCallSite CS = static_cast<const Value *>(Inst); assert(CS && "Only calls can alter reference counts!"); // See if AliasAnalysis can help us with the call. AliasAnalysis::ModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS); if (AliasAnalysis::onlyReadsMemory(MRB)) return false; if (AliasAnalysis::onlyAccessesArgPointees(MRB)) { for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) { const Value *Op = *I; if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op)) return true; } return false; } // Assume the worst. return true; }
void Andersen::addArgumentConstraintForCall(ImmutableCallSite cs, const Function* f) { Function::const_arg_iterator fItr = f->arg_begin(); ImmutableCallSite::arg_iterator aItr = cs.arg_begin(); while (fItr != f->arg_end() && aItr != cs.arg_end()) { const Argument* formal = fItr; const Value* actual = *aItr; if (formal->getType()->isPointerTy()) { NodeIndex fIndex = nodeFactory.getValueNodeFor(formal); assert(fIndex != AndersNodeFactory::InvalidIndex && "Failed to find formal arg node!"); if (actual->getType()->isPointerTy()) { NodeIndex aIndex = nodeFactory.getValueNodeFor(actual); assert(aIndex != AndersNodeFactory::InvalidIndex && "Failed to find actual arg node!"); constraints.emplace_back(AndersConstraint::COPY, fIndex, aIndex); } else constraints.emplace_back(AndersConstraint::COPY, fIndex, nodeFactory.getUniversalPtrNode()); } ++fItr, ++aItr; } // Copy all pointers passed through the varargs section to the varargs node if (f->getFunctionType()->isVarArg()) { while (aItr != cs.arg_end()) { const Value* actual = *aItr; if (actual->getType()->isPointerTy()) { NodeIndex aIndex = nodeFactory.getValueNodeFor(actual); assert(aIndex != AndersNodeFactory::InvalidIndex && "Failed to find actual arg node!"); NodeIndex vaIndex = nodeFactory.getVarargNodeFor(f); assert(vaIndex != AndersNodeFactory::InvalidIndex && "Failed to find vararg node!"); constraints.emplace_back(AndersConstraint::COPY, vaIndex, aIndex); } ++aItr; } } }
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { ModRefInfo Result = MRI_ModRef; for (const auto &AA : AAs) { Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc)); // Early-exit the moment we reach the bottom of the lattice. if (Result == MRI_NoModRef) return Result; } // Try to refine the mod-ref info further using other API entry points to the // aggregate set of AA results. auto MRB = getModRefBehavior(CS); if (MRB == FMRB_DoesNotAccessMemory || MRB == FMRB_OnlyAccessesInaccessibleMem) return MRI_NoModRef; if (onlyReadsMemory(MRB)) Result = ModRefInfo(Result & MRI_Ref); else if (doesNotReadMemory(MRB)) Result = ModRefInfo(Result & MRI_Mod); if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) { bool DoesAlias = false; ModRefInfo AllArgsMask = MRI_NoModRef; if (doesAccessArgPointees(MRB)) { for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { const Value *Arg = *AI; if (!Arg->getType()->isPointerTy()) continue; unsigned ArgIdx = std::distance(CS.arg_begin(), AI); MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI); AliasResult ArgAlias = alias(ArgLoc, Loc); if (ArgAlias != NoAlias) { ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx); DoesAlias = true; AllArgsMask = ModRefInfo(AllArgsMask | ArgMask); } } } if (!DoesAlias) return MRI_NoModRef; Result = ModRefInfo(Result & AllArgsMask); } // If Loc is a constant memory location, the call definitely could not // modify the memory location. if ((Result & MRI_Mod) && pointsToConstantMemory(Loc, /*OrLocal*/ false)) Result = ModRefInfo(Result & ~MRI_Mod); return Result; }
AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(ImmutableCallSite CS, const Location &Loc) { assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); ModRefBehavior MRB = getModRefBehavior(CS); if (MRB == DoesNotAccessMemory) return NoModRef; ModRefResult Mask = ModRef; if (onlyReadsMemory(MRB)) Mask = Ref; if (onlyAccessesArgPointees(MRB)) { bool doesAlias = false; ModRefResult AllArgsMask = NoModRef; if (doesAccessArgPointees(MRB)) { for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { const Value *Arg = *AI; if (!Arg->getType()->isPointerTy()) continue; ModRefResult ArgMask; Location CSLoc = getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI), ArgMask); if (!isNoAlias(CSLoc, Loc)) { doesAlias = true; AllArgsMask = ModRefResult(AllArgsMask | ArgMask); } } } if (!doesAlias) return NoModRef; Mask = ModRefResult(Mask & AllArgsMask); } // If Loc is a constant memory location, the call definitely could not // modify the memory location. if ((Mask & Mod) && pointsToConstantMemory(Loc)) Mask = ModRefResult(Mask & ~Mod); // If this is the end of the chain, don't forward. if (!AA) return Mask; // Otherwise, fall back to the next AA in the chain. But we can merge // in any mask we've managed to compute. return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask); }
AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(ImmutableCallSite CS, const Location &Loc) { // Don't assert AA because BasicAA calls us in order to make use of the // logic here. ModRefBehavior MRB = getModRefBehavior(CS); if (MRB == DoesNotAccessMemory) return NoModRef; ModRefResult Mask = ModRef; if (MRB == OnlyReadsMemory) Mask = Ref; else if (MRB == AliasAnalysis::AccessesArguments) { bool doesAlias = false; for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) if (!isNoAlias(Location(*AI), Loc)) { doesAlias = true; break; } if (!doesAlias) return NoModRef; } // If Loc is a constant memory location, the call definitely could not // modify the memory location. if ((Mask & Mod) && pointsToConstantMemory(Loc)) Mask = ModRefResult(Mask & ~Mod); // If this is BasicAA, don't forward. if (!AA) return Mask; // Otherwise, fall back to the next AA in the chain. But we can merge // in any mask we've managed to compute. return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask); }
std::vector<FlowRecord> TaintReachable::process(const ContextID ctxt, const ImmutableCallSite cs) const { DEBUG(errs() << "Using taint reachable signature for: " << *cs.getInstruction() << "\n"); FlowRecord exp(false,ctxt,ctxt); FlowRecord imp(true,ctxt,ctxt); // implicit from the pc of the call site and the function pointer imp.addSourceValue(*cs->getParent()); imp.addSourceValue(*cs.getCalledValue()); // Sources and sinks of the args for (ImmutableCallSite::arg_iterator arg = cs.arg_begin(), end = cs.arg_end(); arg != end; ++arg) { // every argument's value is a source exp.addSourceValue(**arg); // if the argument is a pointer, everything it reaches is a source // and everything it reaches is a sink if ((*arg)->getType()->isPointerTy()) { exp.addSourceReachablePtr(**arg); imp.addSourceValue(**arg); exp.addSinkReachablePtr(**arg); imp.addSinkReachablePtr(**arg); } } // if the function has a return value it is a sink if (!cs->getType()->isVoidTy()) { imp.addSinkValue(*cs.getInstruction()); exp.addSinkValue(*cs.getInstruction()); } std::vector<FlowRecord> flows; flows.push_back(imp); flows.push_back(exp); return flows; }
std::vector<FlowRecord> ArgsToRet::process(const ContextID ctxt, const ImmutableCallSite cs) const { DEBUG(errs() << "Using ArgsToRet reachable signature for: " << *cs.getInstruction() << "\n"); std::vector<FlowRecord> flows; if (!cs->getType()->isVoidTy()) { FlowRecord exp(false,ctxt,ctxt); // Sources and sinks of the args for (ImmutableCallSite::arg_iterator arg = cs.arg_begin(), end = cs.arg_end(); arg != end; ++arg) { // every argument's value is a source exp.addSourceValue(**arg); } // if the function has a return value it is a sink exp.addSinkValue(*cs.getInstruction()); flows.push_back(exp); } return flows; }
std::vector<FlowRecord> OverflowChecks::process(const ContextID ctxt, const ImmutableCallSite cs) const { DEBUG(errs() << "Using OverflowChecks signature...\n"); FlowRecord exp(false,ctxt,ctxt); FlowRecord imp(true,ctxt,ctxt); imp.addSourceValue(*cs->getParent()); // Add all argument values as sources for (ImmutableCallSite::arg_iterator arg = cs.arg_begin(), end = cs.arg_end(); arg != end; ++arg) exp.addSourceValue(**arg); assert(!cs->getType()->isVoidTy() && "Found 'void' overflow check?"); // And the return value as a sink exp.addSinkValue(*cs.getInstruction()); imp.addSinkValue(*cs.getInstruction()); std::vector<FlowRecord> flows; flows.push_back(imp); flows.push_back(exp); return flows; }
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { ModRefInfo Result = MRI_ModRef; for (const auto &AA : AAs) { Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2)); // Early-exit the moment we reach the bottom of the lattice. if (Result == MRI_NoModRef) return Result; } // Try to refine the mod-ref info further using other API entry points to the // aggregate set of AA results. // If CS1 or CS2 are readnone, they don't interact. auto CS1B = getModRefBehavior(CS1); if (CS1B == FMRB_DoesNotAccessMemory) return MRI_NoModRef; auto CS2B = getModRefBehavior(CS2); if (CS2B == FMRB_DoesNotAccessMemory) return MRI_NoModRef; // If they both only read from memory, there is no dependence. if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) return MRI_NoModRef; // If CS1 only reads memory, the only dependence on CS2 can be // from CS1 reading memory written by CS2. if (onlyReadsMemory(CS1B)) Result = ModRefInfo(Result & MRI_Ref); else if (doesNotReadMemory(CS1B)) Result = ModRefInfo(Result & MRI_Mod); // If CS2 only access memory through arguments, accumulate the mod/ref // information from CS1's references to the memory referenced by // CS2's arguments. if (onlyAccessesArgPointees(CS2B)) { ModRefInfo R = MRI_NoModRef; if (doesAccessArgPointees(CS2B)) { for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I); auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI); // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence // of CS1 on that location is the inverse. ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx); if (ArgMask == MRI_Mod) ArgMask = MRI_ModRef; else if (ArgMask == MRI_Ref) ArgMask = MRI_Mod; ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc)); R = ModRefInfo((R | ArgMask) & Result); if (R == Result) break; } } return R; } // If CS1 only accesses memory through arguments, check if CS2 references // any of the memory referenced by CS1's arguments. If not, return NoModRef. if (onlyAccessesArgPointees(CS1B)) { ModRefInfo R = MRI_NoModRef; if (doesAccessArgPointees(CS1B)) { for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I); auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI); // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1 // might Ref, then we care only about a Mod by CS2. ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx); ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc); if (((ArgMask & MRI_Mod) != MRI_NoModRef && (ArgR & MRI_ModRef) != MRI_NoModRef) || ((ArgMask & MRI_Ref) != MRI_NoModRef && (ArgR & MRI_Mod) != MRI_NoModRef)) R = ModRefInfo((R | ArgMask) & Result); if (R == Result) break; } } return R; } return Result; }
AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); // If CS1 or CS2 are readnone, they don't interact. ModRefBehavior CS1B = getModRefBehavior(CS1); if (CS1B == DoesNotAccessMemory) return NoModRef; ModRefBehavior CS2B = getModRefBehavior(CS2); if (CS2B == DoesNotAccessMemory) return NoModRef; // If they both only read from memory, there is no dependence. if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) return NoModRef; AliasAnalysis::ModRefResult Mask = ModRef; // If CS1 only reads memory, the only dependence on CS2 can be // from CS1 reading memory written by CS2. if (onlyReadsMemory(CS1B)) Mask = ModRefResult(Mask & Ref); // If CS2 only access memory through arguments, accumulate the mod/ref // information from CS1's references to the memory referenced by // CS2's arguments. if (onlyAccessesArgPointees(CS2B)) { AliasAnalysis::ModRefResult R = NoModRef; if (doesAccessArgPointees(CS2B)) { for (ImmutableCallSite::arg_iterator I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; ModRefResult ArgMask; Location CS2Loc = getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I), ArgMask); // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of // CS1 on that location is the inverse. if (ArgMask == Mod) ArgMask = ModRef; else if (ArgMask == Ref) ArgMask = Mod; R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask); if (R == Mask) break; } } return R; } // If CS1 only accesses memory through arguments, check if CS2 references // any of the memory referenced by CS1's arguments. If not, return NoModRef. if (onlyAccessesArgPointees(CS1B)) { AliasAnalysis::ModRefResult R = NoModRef; if (doesAccessArgPointees(CS1B)) { for (ImmutableCallSite::arg_iterator I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; ModRefResult ArgMask; Location CS1Loc = getArgLocation( CS1, (unsigned)std::distance(CS1.arg_begin(), I), ArgMask); // ArgMask indicates what CS1 might do to CS1Loc; if CS1 might Mod // CS1Loc, then we care about either a Mod or a Ref by CS2. If CS1 // might Ref, then we care only about a Mod by CS2. ModRefResult ArgR = getModRefInfo(CS2, CS1Loc); if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) || ((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef)) R = ModRefResult((R | ArgMask) & Mask); if (R == Mask) break; } } return R; } // If this is the end of the chain, don't forward. if (!AA) return Mask; // Otherwise, fall back to the next AA in the chain. But we can merge // in any mask we've managed to compute. return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask); }
AliasAnalysis::ModRefResult AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); // If CS1 or CS2 are readnone, they don't interact. ModRefBehavior CS1B = getModRefBehavior(CS1); if (CS1B == DoesNotAccessMemory) return NoModRef; ModRefBehavior CS2B = getModRefBehavior(CS2); if (CS2B == DoesNotAccessMemory) return NoModRef; // If they both only read from memory, there is no dependence. if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) return NoModRef; AliasAnalysis::ModRefResult Mask = ModRef; // If CS1 only reads memory, the only dependence on CS2 can be // from CS1 reading memory written by CS2. if (onlyReadsMemory(CS1B)) Mask = ModRefResult(Mask & Ref); // If CS2 only access memory through arguments, accumulate the mod/ref // information from CS1's references to the memory referenced by // CS2's arguments. if (onlyAccessesArgPointees(CS2B)) { AliasAnalysis::ModRefResult R = NoModRef; if (doesAccessArgPointees(CS2B)) { MDNode *CS2Tag = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa); for (ImmutableCallSite::arg_iterator I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; Location CS2Loc(Arg, UnknownSize, CS2Tag); R = ModRefResult((R | getModRefInfo(CS1, CS2Loc)) & Mask); if (R == Mask) break; } } return R; } // If CS1 only accesses memory through arguments, check if CS2 references // any of the memory referenced by CS1's arguments. If not, return NoModRef. if (onlyAccessesArgPointees(CS1B)) { AliasAnalysis::ModRefResult R = NoModRef; if (doesAccessArgPointees(CS1B)) { MDNode *CS1Tag = CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa); for (ImmutableCallSite::arg_iterator I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { const Value *Arg = *I; if (!Arg->getType()->isPointerTy()) continue; Location CS1Loc(Arg, UnknownSize, CS1Tag); if (getModRefInfo(CS2, CS1Loc) != NoModRef) { R = Mask; break; } } } if (R == NoModRef) return R; } // If this is the end of the chain, don't forward. if (!AA) return Mask; // Otherwise, fall back to the next AA in the chain. But we can merge // in any mask we've managed to compute. return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask); }
// There are two types of constraints to add for a function call: // - ValueNode(callsite) = ReturnNode(call target) // - ValueNode(formal arg) = ValueNode(actual arg) void Andersen::addConstraintForCall(ImmutableCallSite cs) { if (const Function *f = cs.getCalledFunction()) // Direct call { if (f->isDeclaration() || f->isIntrinsic()) // External library call { // Handle libraries separately if (addConstraintForExternalLibrary(cs, f)) return; else // Unresolved library call: ruin everything! { errs() << "Unresolved ext function: " << f->getName() << "\n"; if (cs.getType()->isPointerTy()) { NodeIndex retIndex = nodeFactory.getValueNodeFor(cs.getInstruction()); assert(retIndex != AndersNodeFactory::InvalidIndex && "Failed to find ret node!"); constraints.emplace_back(AndersConstraint::COPY, retIndex, nodeFactory.getUniversalPtrNode()); } for (ImmutableCallSite::arg_iterator itr = cs.arg_begin(), ite = cs.arg_end(); itr != ite; ++itr) { Value *argVal = *itr; if (argVal->getType()->isPointerTy()) { NodeIndex argIndex = nodeFactory.getValueNodeFor(argVal); assert(argIndex != AndersNodeFactory::InvalidIndex && "Failed to find arg node!"); constraints.emplace_back(AndersConstraint::COPY, argIndex, nodeFactory.getUniversalPtrNode()); } } } } else // Non-external function call { if (cs.getType()->isPointerTy()) { NodeIndex retIndex = nodeFactory.getValueNodeFor(cs.getInstruction()); assert(retIndex != AndersNodeFactory::InvalidIndex && "Failed to find ret node!"); // errs() << f->getName() << "\n"; NodeIndex fRetIndex = nodeFactory.getReturnNodeFor(f); assert(fRetIndex != AndersNodeFactory::InvalidIndex && "Failed to find function ret node!"); constraints.emplace_back(AndersConstraint::COPY, retIndex, fRetIndex); } // The argument constraints addArgumentConstraintForCall(cs, f); } } else // Indirect call { // We do the simplest thing here: just assume the returned value can be // anything :) if (cs.getType()->isPointerTy()) { NodeIndex retIndex = nodeFactory.getValueNodeFor(cs.getInstruction()); assert(retIndex != AndersNodeFactory::InvalidIndex && "Failed to find ret node!"); constraints.emplace_back(AndersConstraint::COPY, retIndex, nodeFactory.getUniversalPtrNode()); } // For argument constraints, first search through all addr-taken functions: // any function that takes can take as many variables is a potential // candidate const Module *M = cs.getInstruction()->getParent()->getParent()->getParent(); for (auto const &f : *M) { NodeIndex funPtrIndex = nodeFactory.getValueNodeFor(&f); if (funPtrIndex == AndersNodeFactory::InvalidIndex) // Not an addr-taken function continue; if (!f.getFunctionType()->isVarArg() && f.arg_size() != cs.arg_size()) // #arg mismatch continue; if (f.isDeclaration() || f.isIntrinsic()) // External library call { if (addConstraintForExternalLibrary(cs, &f)) continue; else { // Pollute everything for (ImmutableCallSite::arg_iterator itr = cs.arg_begin(), ite = cs.arg_end(); itr != ite; ++itr) { Value *argVal = *itr; if (argVal->getType()->isPointerTy()) { NodeIndex argIndex = nodeFactory.getValueNodeFor(argVal); assert(argIndex != AndersNodeFactory::InvalidIndex && "Failed to find arg node!"); constraints.emplace_back(AndersConstraint::COPY, argIndex, nodeFactory.getUniversalPtrNode()); } } } } else addArgumentConstraintForCall(cs, &f); } } }