void AliasSetTracker::add(const AliasSetTracker &AST) { assert(&AA == &AST.AA && "Merging AliasSetTracker objects with different Alias Analyses!"); // Loop over all of the alias sets in AST, adding the pointers contained // therein into the current alias sets. This can cause alias sets to be // merged together in the current AST. for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I) { if (I->Forward) continue; // Ignore forwarding alias sets AliasSet &AS = const_cast<AliasSet&>(*I); // If there are any call sites in the alias set, add them to this AST. for (unsigned i = 0, e = AS.UnknownInsts.size(); i != e; ++i) add(AS.UnknownInsts[i]); // Loop over all of the pointers in this alias set. bool X; for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) { AliasSet &NewAS = addPointer(ASI.getPointer(), ASI.getSize(), ASI.getTBAAInfo(), (AliasSet::AccessType)AS.AccessTy, X); if (AS.isVolatile()) NewAS.setVolatile(); } } }
void AliasSetTracker::add(const AliasSetTracker &AST) { assert(&AA == &AST.AA && "Merging AliasSetTracker objects with different Alias Analyses!"); // Loop over all of the alias sets in AST, adding the pointers contained // therein into the current alias sets. This can cause alias sets to be // merged together in the current AST. for (const AliasSet &AS : AST) { if (AS.Forward) continue; // Ignore forwarding alias sets // If there are any call sites in the alias set, add them to this AST. for (unsigned i = 0, e = AS.UnknownInsts.size(); i != e; ++i) if (auto *Inst = AS.getUnknownInst(i)) add(Inst); // Loop over all of the pointers in this alias set. for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) addPointer( MemoryLocation(ASI.getPointer(), ASI.getSize(), ASI.getAAInfo()), (AliasSet::AccessLattice)AS.Access); } }
void AliasSetTracker::add(const AliasSetTracker &AST) { assert(&AA == &AST.AA && "Merging AliasSetTracker objects with different Alias Analyses!"); // Loop over all of the alias sets in AST, adding the pointers contained // therein into the current alias sets. This can cause alias sets to be // merged together in the current AST. for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I) if (!I->Forward) { // Ignore forwarding alias sets AliasSet &AS = const_cast<AliasSet&>(*I); // If there are any call sites in the alias set, add them to this AST. for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i) add(AS.CallSites[i]); // Loop over all of the pointers in this alias set... AliasSet::iterator I = AS.begin(), E = AS.end(); bool X; for (; I != E; ++I) addPointer(I.getPointer(), I.getSize(), (AliasSet::AccessType)AS.AccessTy, X); } }
/// FindPromotableValuesInLoop - Check the current loop for stores to definite /// pointers, which are not loaded and stored through may aliases and are safe /// for promotion. If these are found, create an alloca for the value, add it /// to the PromotedValues list, and keep track of the mapping from value to /// alloca. void LICM::FindPromotableValuesInLoop( std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues, std::map<Value*, AllocaInst*> &ValueToAllocaMap) { Instruction *FnStart = CurLoop->getHeader()->getParent()->begin()->begin(); // Loop over all of the alias sets in the tracker object. for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end(); I != E; ++I) { AliasSet &AS = *I; // We can promote this alias set if it has a store, if it is a "Must" alias // set, if the pointer is loop invariant, and if we are not eliminating any // volatile loads or stores. if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) continue; assert(!AS.empty() && "Must alias set should have at least one pointer element in it!"); Value *V = AS.begin()->getValue(); // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. { bool PointerOk = true; for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I) if (V->getType() != I->getValue()->getType()) { PointerOk = false; break; } if (!PointerOk) continue; } // It isn't safe to promote a load/store from the loop if the load/store is // conditional. For example, turning: // // for () { if (c) *P += 1; } // // into: // // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; // // is not safe, because *P may only be valid to access if 'c' is true. // // It is safe to promote P if all uses are direct load/stores and if at // least one is guaranteed to be executed. bool GuaranteedToExecute = false; bool InvalidInst = false; for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE; ++UI) { // Ignore instructions not in this loop. Instruction *Use = dyn_cast<Instruction>(*UI); if (!Use || !CurLoop->contains(Use->getParent())) continue; if (!isa<LoadInst>(Use) && !isa<StoreInst>(Use)) { InvalidInst = true; break; } if (!GuaranteedToExecute) GuaranteedToExecute = isSafeToExecuteUnconditionally(*Use); } // If there is an non-load/store instruction in the loop, we can't promote // it. If there isn't a guaranteed-to-execute instruction, we can't // promote. if (InvalidInst || !GuaranteedToExecute) continue; const Type *Ty = cast<PointerType>(V->getType())->getElementType(); AllocaInst *AI = new AllocaInst(Ty, 0, V->getName()+".tmp", FnStart); PromotedValues.push_back(std::make_pair(AI, V)); // Update the AST and alias analysis. CurAST->copyValue(V, AI); for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I) ValueToAllocaMap.insert(std::make_pair(I->getValue(), AI)); DEBUG(errs() << "LICM: Promoting value: " << *V << "\n"); } }
/// PromoteAliasSet - Try to promote memory values to scalars by sinking /// stores out of the loop and moving loads to before the loop. We do this by /// looping over the stores in the loop, looking for stores to Must pointers /// which are loop invariant. /// void LICM::PromoteAliasSet(AliasSet &AS) { // We can promote this alias set if it has a store, if it is a "Must" alias // set, if the pointer is loop invariant, and if we are not eliminating any // volatile loads or stores. if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) return; assert(!AS.empty() && "Must alias set should have at least one pointer element in it!"); Value *SomePtr = AS.begin()->getValue(); // It isn't safe to promote a load/store from the loop if the load/store is // conditional. For example, turning: // // for () { if (c) *P += 1; } // // into: // // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; // // is not safe, because *P may only be valid to access if 'c' is true. // // It is safe to promote P if all uses are direct load/stores and if at // least one is guaranteed to be executed. bool GuaranteedToExecute = false; SmallVector<Instruction*, 64> LoopUses; SmallPtrSet<Value*, 4> PointerMustAliases; // We start with an alignment of one and try to find instructions that allow // us to prove better alignment. unsigned Alignment = 1; // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) { Value *ASIV = ASI->getValue(); PointerMustAliases.insert(ASIV); // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. if (SomePtr->getType() != ASIV->getType()) return; for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end(); UI != UE; ++UI) { // Ignore instructions that are outside the loop. Instruction *Use = dyn_cast<Instruction>(*UI); if (!Use || !CurLoop->contains(Use)) continue; // If there is an non-load/store instruction in the loop, we can't promote // it. if (LoadInst *load = dyn_cast<LoadInst>(Use)) { assert(!load->isVolatile() && "AST broken"); if (!load->isSimple()) return; } else if (StoreInst *store = dyn_cast<StoreInst>(Use)) { // Stores *of* the pointer are not interesting, only stores *to* the // pointer. if (Use->getOperand(1) != ASIV) continue; assert(!store->isVolatile() && "AST broken"); if (!store->isSimple()) return; // Note that we only check GuaranteedToExecute inside the store case // so that we do not introduce stores where they did not exist before // (which would break the LLVM concurrency model). // If the alignment of this instruction allows us to specify a more // restrictive (and performant) alignment and if we are sure this // instruction will be executed, update the alignment. // Larger is better, with the exception of 0 being the best alignment. unsigned InstAlignment = store->getAlignment(); if ((InstAlignment > Alignment || InstAlignment == 0) && (Alignment != 0)) if (isGuaranteedToExecute(*Use)) { GuaranteedToExecute = true; Alignment = InstAlignment; } if (!GuaranteedToExecute) GuaranteedToExecute = isGuaranteedToExecute(*Use); } else return; // Not a load or store. LoopUses.push_back(Use); } } // If there isn't a guaranteed-to-execute instruction, we can't promote. if (!GuaranteedToExecute) return; // Otherwise, this is safe to promote, lets do it! DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n'); Changed = true; ++NumPromoted; // Grab a debug location for the inserted loads/stores; given that the // inserted loads/stores have little relation to the original loads/stores, // this code just arbitrarily picks a location from one, since any debug // location is better than none. DebugLoc DL = LoopUses[0]->getDebugLoc(); SmallVector<BasicBlock*, 8> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); // We use the SSAUpdater interface to insert phi nodes as required. SmallVector<PHINode*, 16> NewPHIs; SSAUpdater SSA(&NewPHIs); LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, *CurAST, DL, Alignment); // Set up the preheader to have a definition of the value. It is the live-out // value from the preheader that uses in the loop will use. LoadInst *PreheaderLoad = new LoadInst(SomePtr, SomePtr->getName()+".promoted", Preheader->getTerminator()); PreheaderLoad->setAlignment(Alignment); PreheaderLoad->setDebugLoc(DL); SSA.AddAvailableValue(Preheader, PreheaderLoad); // Rewrite all the loads in the loop and remember all the definitions from // stores in the loop. Promoter.run(LoopUses); // If the SSAUpdater didn't use the load in the preheader, just zap it now. if (PreheaderLoad->use_empty()) PreheaderLoad->eraseFromParent(); }
/// PromoteAliasSet - Try to promote memory values to scalars by sinking /// stores out of the loop and moving loads to before the loop. We do this by /// looping over the stores in the loop, looking for stores to Must pointers /// which are loop invariant. /// void LICM::PromoteAliasSet(AliasSet &AS) { // We can promote this alias set if it has a store, if it is a "Must" alias // set, if the pointer is loop invariant, and if we are not eliminating any // volatile loads or stores. if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) return; assert(!AS.empty() && "Must alias set should have at least one pointer element in it!"); Value *SomePtr = AS.begin()->getValue(); // It isn't safe to promote a load/store from the loop if the load/store is // conditional. For example, turning: // // for () { if (c) *P += 1; } // // into: // // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; // // is not safe, because *P may only be valid to access if 'c' is true. // // It is safe to promote P if all uses are direct load/stores and if at // least one is guaranteed to be executed. bool GuaranteedToExecute = false; SmallVector<Instruction*, 64> LoopUses; SmallPtrSet<Value*, 4> PointerMustAliases; // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) { Value *ASIV = ASI->getValue(); PointerMustAliases.insert(ASIV); // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. if (SomePtr->getType() != ASIV->getType()) return; for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end(); UI != UE; ++UI) { // Ignore instructions that are outside the loop. Instruction *Use = dyn_cast<Instruction>(*UI); if (!Use || !CurLoop->contains(Use)) continue; // If there is an non-load/store instruction in the loop, we can't promote // it. if (isa<LoadInst>(Use)) assert(!cast<LoadInst>(Use)->isVolatile() && "AST broken"); else if (isa<StoreInst>(Use)) { assert(!cast<StoreInst>(Use)->isVolatile() && "AST broken"); if (Use->getOperand(0) == ASIV) return; } else return; // Not a load or store. if (!GuaranteedToExecute) GuaranteedToExecute = isSafeToExecuteUnconditionally(*Use); LoopUses.push_back(Use); } } // If there isn't a guaranteed-to-execute instruction, we can't promote. if (!GuaranteedToExecute) return; // Otherwise, this is safe to promote, lets do it! DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n'); Changed = true; ++NumPromoted; // We use the SSAUpdater interface to insert phi nodes as required. SmallVector<PHINode*, 16> NewPHIs; SSAUpdater SSA(&NewPHIs); // It wants to know some value of the same type as what we'll be inserting. Value *SomeValue; if (isa<LoadInst>(LoopUses[0])) SomeValue = LoopUses[0]; else SomeValue = cast<StoreInst>(LoopUses[0])->getOperand(0); SSA.Initialize(SomeValue->getType(), SomeValue->getName()); // First step: bucket up uses of the pointers by the block they occur in. // This is important because we have to handle multiple defs/uses in a block // ourselves: SSAUpdater is purely for cross-block references. // FIXME: Want a TinyVector<Instruction*> since there is usually 0/1 element. DenseMap<BasicBlock*, std::vector<Instruction*> > UsesByBlock; for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) { Instruction *User = LoopUses[i]; UsesByBlock[User->getParent()].push_back(User); } // Okay, now we can iterate over all the blocks in the loop with uses, // processing them. Keep track of which loads are loading a live-in value. SmallVector<LoadInst*, 32> LiveInLoads; DenseMap<Value*, Value*> ReplacedLoads; for (unsigned LoopUse = 0, e = LoopUses.size(); LoopUse != e; ++LoopUse) { Instruction *User = LoopUses[LoopUse]; std::vector<Instruction*> &BlockUses = UsesByBlock[User->getParent()]; // If this block has already been processed, ignore this repeat use. if (BlockUses.empty()) continue; // Okay, this is the first use in the block. If this block just has a // single user in it, we can rewrite it trivially. if (BlockUses.size() == 1) { // If it is a store, it is a trivial def of the value in the block. if (isa<StoreInst>(User)) { SSA.AddAvailableValue(User->getParent(), cast<StoreInst>(User)->getOperand(0)); } else { // Otherwise it is a load, queue it to rewrite as a live-in load. LiveInLoads.push_back(cast<LoadInst>(User)); } BlockUses.clear(); continue; } // Otherwise, check to see if this block is all loads. If so, we can queue // them all as live in loads. bool HasStore = false; for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) { if (isa<StoreInst>(BlockUses[i])) { HasStore = true; break; } } if (!HasStore) { for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) LiveInLoads.push_back(cast<LoadInst>(BlockUses[i])); BlockUses.clear(); continue; } // Otherwise, we have mixed loads and stores (or just a bunch of stores). // Since SSAUpdater is purely for cross-block values, we need to determine // the order of these instructions in the block. If the first use in the // block is a load, then it uses the live in value. The last store defines // the live out value. We handle this by doing a linear scan of the block. BasicBlock *BB = User->getParent(); Value *StoredValue = 0; for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) { if (LoadInst *L = dyn_cast<LoadInst>(II)) { // If this is a load from an unrelated pointer, ignore it. if (!PointerMustAliases.count(L->getOperand(0))) continue; // If we haven't seen a store yet, this is a live in use, otherwise // use the stored value. if (StoredValue) { L->replaceAllUsesWith(StoredValue); ReplacedLoads[L] = StoredValue; } else { LiveInLoads.push_back(L); } continue; } if (StoreInst *S = dyn_cast<StoreInst>(II)) { // If this is a store to an unrelated pointer, ignore it. if (!PointerMustAliases.count(S->getOperand(1))) continue; // Remember that this is the active value in the block. StoredValue = S->getOperand(0); } } // The last stored value that happened is the live-out for the block. assert(StoredValue && "Already checked that there is a store in block"); SSA.AddAvailableValue(BB, StoredValue); BlockUses.clear(); } // Now that all the intra-loop values are classified, set up the preheader. // It gets a load of the pointer we're promoting, and it is the live-out value // from the preheader. LoadInst *PreheaderLoad = new LoadInst(SomePtr,SomePtr->getName()+".promoted", Preheader->getTerminator()); SSA.AddAvailableValue(Preheader, PreheaderLoad); // Now that the preheader is good to go, set up the exit blocks. Each exit // block gets a store of the live-out values that feed them. Since we've // already told the SSA updater about the defs in the loop and the preheader // definition, it is all set and we can start using it. SmallVector<BasicBlock*, 8> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { BasicBlock *ExitBlock = ExitBlocks[i]; Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); Instruction *InsertPos = ExitBlock->getFirstNonPHI(); new StoreInst(LiveInValue, SomePtr, InsertPos); } // Okay, now we rewrite all loads that use live-in values in the loop, // inserting PHI nodes as necessary. for (unsigned i = 0, e = LiveInLoads.size(); i != e; ++i) { LoadInst *ALoad = LiveInLoads[i]; Value *NewVal = SSA.GetValueInMiddleOfBlock(ALoad->getParent()); ALoad->replaceAllUsesWith(NewVal); CurAST->copyValue(ALoad, NewVal); ReplacedLoads[ALoad] = NewVal; } // If the preheader load is itself a pointer, we need to tell alias analysis // about the new pointer we created in the preheader block and about any PHI // nodes that just got inserted. if (PreheaderLoad->getType()->isPointerTy()) { // Copy any value stored to or loaded from a must-alias of the pointer. CurAST->copyValue(SomeValue, PreheaderLoad); for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) CurAST->copyValue(SomeValue, NewPHIs[i]); } // Now that everything is rewritten, delete the old instructions from the body // of the loop. They should all be dead now. for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) { Instruction *User = LoopUses[i]; // If this is a load that still has uses, then the load must have been added // as a live value in the SSAUpdate data structure for a block (e.g. because // the loaded value was stored later). In this case, we need to recursively // propagate the updates until we get to the real value. if (!User->use_empty()) { Value *NewVal = ReplacedLoads[User]; assert(NewVal && "not a replaced load?"); // Propagate down to the ultimate replacee. The intermediately loads // could theoretically already have been deleted, so we don't want to // dereference the Value*'s. DenseMap<Value*, Value*>::iterator RLI = ReplacedLoads.find(NewVal); while (RLI != ReplacedLoads.end()) { NewVal = RLI->second; RLI = ReplacedLoads.find(NewVal); } User->replaceAllUsesWith(NewVal); CurAST->copyValue(User, NewVal); } CurAST->deleteValue(User); User->eraseFromParent(); } // fwew, we're done! }
InsInfo::InsInfo(const Instruction *i, AliasAnalysis &aa, AliasSetTracker &ast) : AA(&aa), AST(&ast), ins(i), sliced(true) { DEBUG( errs() << "new InsInfo for "); DEBUG( i->print(errs()) ); DEBUG( errs() << "\n"); //typedef ptr::PointsToSets::PointsToSet PTSet; if (const LoadInst *LI = dyn_cast<const LoadInst>(i)) { addDEF(Pointee(i, -1)); const Value *op = elimConstExpr(LI->getPointerOperand()); if (isa<ConstantPointerNull>(op)) { errs() << "ERROR in analysed code -- reading from address 0 at " << i->getParent()->getParent()->getName() << ":\n"; i->print(errs()); } else if (isa<ConstantInt>(op)) { } else { addREF(Pointee(op, -1)); /*if (!hasExtraReference(op)) { const PTSet &S = getPointsToSet(op,PS); for (PTSet::const_iterator I = S.begin(), E = S.end(); I != E; ++I) addREF(*I); }*/ if (!hasExtraReference(op)) { uint64_t Size = 0; if (op->getType()->isSized()) Size = AA->getTypeStoreSize(op->getType()); Value* temp = const_cast<Value*>(op); const AliasSet* S = AST->getAliasSetForPointerIfExists(temp, Size, LI->getMetadata(LLVMContext::MD_tbaa)); if( S != NULL ){ for (AliasSet::iterator I = S->begin(), E = S->end(); I != E; ++I) addREF(Pointee(I.getPointer(), -1)); } addREF(Pointee(op, -1)); } } } else if (const StoreInst *SI = dyn_cast<const StoreInst>(i)) { const Value *l = elimConstExpr(SI->getPointerOperand()); if (isa<ConstantPointerNull>(l)) { errs() << "ERROR in analysed code -- writing to address 0 at " << i->getParent()->getParent()->getName() << ":\n"; i->print(errs()); } else if (isa<ConstantInt>(l)) { } else { if (hasExtraReference(l)) { addDEF(Pointee(l, -1)); } else { uint64_t Size = 0; if (l->getType()->isSized()) Size = AA->getTypeStoreSize(l->getType()); Value* temp = const_cast<Value*>(l); const AliasSet* S = AST->getAliasSetForPointerIfExists(temp, Size, SI->getMetadata(LLVMContext::MD_tbaa)); if( S!= NULL ){ for (AliasSet::iterator I = S->begin(), E = S->end(); I != E; ++I) addDEF(Pointee(I.getPointer(), -1)); } addDEF(Pointee(l, -1)); /*const PTSet &S = getPointsToSet(l, PS); for (PTSet::const_iterator I = S.begin(), E = S.end(); I != E; ++I) addDEF(*I);*/ } if (!l->getType()->isIntegerTy()) addREF(Pointee(l, -1)); const Value *r = elimConstExpr(SI->getValueOperand()); if (!hasExtraReference(r) && !isConstantValue(r)) addREF(Pointee(r, -1)); } } else if (const GetElementPtrInst *gep = dyn_cast<const GetElementPtrInst>(i)) { addDEF(Pointee(i, -1)); addREF(Pointee(gep->getPointerOperand(), -1)); for (unsigned i = 1, e = gep->getNumOperands(); i != e; ++i) { Value *op = gep->getOperand(i); if (!isa<ConstantInt>(op)) addREF(Pointee(op, -1)); } } else if (CallInst const* const C = dyn_cast<const CallInst>(i)) { const Value *cv = C->getCalledValue(); if (isInlineAssembly(C)) { DEBUG( errs() << "ERROR: Inline assembler detected in " << i->getParent()->getParent()->getName() << ", ignoring\n"); } else if (isMemoryAllocation(cv)) { addDEF(Pointee(i, -1)); } else if (isMemoryDeallocation(cv)) { } else if (isMemoryCopy(cv) || isMemoryMove(cv)) { const Value *l = elimConstExpr(C->getOperand(0)); if (isPointerValue(l)) { uint64_t Size = 0; if (l->getType()->isSized()) Size = AA->getTypeStoreSize(l->getType()); Value* temp = const_cast<Value*>(l); const AliasSet* S = AST->getAliasSetForPointerIfExists(temp, Size, C->getMetadata(LLVMContext::MD_tbaa)); if( S!= NULL ){ for (AliasSet::iterator I = S->begin(), E = S->end(); I != E; ++I) addDEF(Pointee(I.getPointer(), -1)); } addDEF(Pointee(l, -1)); /*const PTSet &L = getPointsToSet(l, PS); for (PTSet::const_iterator p = L.begin(); p != L.end(); ++p) addDEF(*p);*/ } const Value *r = elimConstExpr(C->getOperand(1)); const Value *len = elimConstExpr(C->getOperand(2)); addREF(Pointee(l, -1)); addREF(Pointee(r, -1)); /* memcpy/memset wouldn't work with len being 'undef' */ addREF(Pointee(len, -1)); if (isPointerValue(r)) { uint64_t Size = 0; if (r->getType()->isSized()) Size = AA->getTypeStoreSize(r->getType()); Value* temp = const_cast<Value*>(r); const AliasSet* S = AST->getAliasSetForPointerIfExists(temp, Size, C->getMetadata(LLVMContext::MD_tbaa)); if( S!= NULL ){ for (AliasSet::iterator I = S->begin(), E = S->end(); I != E; ++I) addREF(Pointee(I.getPointer(), -1)); } addREF(Pointee(r, -1)); /*const PTSet &R = getPointsToSet(r, PS); for (PTSet::const_iterator p = R.begin(); p != R.end(); ++p) addREF(*p);*/ } } else if (!memoryManStuff(C)) { //typedef std::vector<const llvm::Function *> CalledVec; //CalledVec CV; //getCalledFunctions(C, PS, std::back_inserter(CV)); const Value *callie = C->getCalledValue(); if (!isa<Function>(callie)) addREF(Pointee(callie, -1)); /*for (CalledVec::const_iterator f = CV.begin(); f != CV.end(); ++f) { mods::Modifies::mapped_type const& M = getModSet(*f, MOD); for (mods::Modifies::mapped_type::const_iterator v = M.begin(); v != M.end(); ++v) addDEF(Pointee(*v, -1)); }*/ if (!callToVoidFunction(C)) addDEF(Pointee(C, -1)); // Add all the arguments to REF for( int i = 0; i< C->getNumArgOperands(); i++){ const Value *r = C->getArgOperand(i); if( const ConstantInt *I = dyn_cast<const ConstantInt>(r) ){ } else addREF(Pointee(r, -1)); } } } else if (isa<const ReturnInst>(i)) { } else if (const BinaryOperator *BO = dyn_cast<const BinaryOperator>(i)) { addDEF(Pointee(i, -1)); if (!isConstantValue(BO->getOperand(0))) addREF(Pointee(BO->getOperand(0), -1)); if (!isConstantValue(BO->getOperand(1))) addREF(Pointee(BO->getOperand(1), -1)); } else if (const CastInst *CI = dyn_cast<const CastInst>(i)) { addDEF(Pointee(i, -1)); //if (!hasExtraReference(CI->getOperand(0))) addREF(Pointee(CI->getOperand(0), -1)); } else if (const AllocaInst *AI = dyn_cast<const AllocaInst>(i)) { addDEF(Pointee(AI, -1)); } else if (const CmpInst *CI = dyn_cast<const CmpInst>(i)) { addDEF(Pointee(i, -1)); if (!isConstantValue(CI->getOperand(0))) addREF(Pointee(CI->getOperand(0), -1)); if (!isConstantValue(CI->getOperand(1))) addREF(Pointee(CI->getOperand(1), -1)); } else if (const BranchInst *BI = dyn_cast<const BranchInst>(i)) { if (BI->isConditional() && !isConstantValue(BI->getCondition())) addREF(Pointee(BI->getCondition(), -1)); } else if (const PHINode *phi = dyn_cast<const PHINode>(i)) { addDEF(Pointee(i, -1)); for (unsigned k = 0; k < phi->getNumIncomingValues(); ++k) if (!isConstantValue(phi->getIncomingValue(k))) addREF(Pointee(phi->getIncomingValue(k), -1)); } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(i)) { if (!isConstantValue(SI->getCondition())) addREF(Pointee(SI->getCondition(), -1)); } else if (const SelectInst *SI = dyn_cast<const SelectInst>(i)) { addDEF(Pointee(i, -1)); if (!isConstantValue(SI->getCondition())) addREF(Pointee(SI->getCondition(), -1)); if (!isConstantValue(SI->getTrueValue())) addREF(Pointee(SI->getTrueValue(), -1)); if (!isConstantValue(SI->getFalseValue())) addREF(Pointee(SI->getFalseValue(), -1)); } else if (isa<const UnreachableInst>(i)) { } else if (const ExtractValueInst *EV = dyn_cast<const ExtractValueInst>(i)) { addDEF(Pointee(i, -1)); addREF(Pointee(EV->getAggregateOperand(), -1)); } else if (const InsertValueInst *IV = dyn_cast<const InsertValueInst>(i)) { const Value *r = IV->getInsertedValueOperand(); addDEF(Pointee(IV->getAggregateOperand(), -1)); if (!isConstantValue(r)) addREF(Pointee(r, -1)); } else { errs() << "ERROR: Unsupported instruction reached\n"; i->print(errs()); } }