bool RegisterStackPools::runOnFunction(Function &F) { TargetData *TD = &getAnalysis<TargetData>(); SmallVector<Value*, 16> Objects; Module &M = *F.getParent(); Function *RegisterStackPoolFunction = M.getFunction("__pool_register_stack"); assert(RegisterStackPoolFunction && "__pool_register_stack function has disappeared!\n"); // Collect alloca instructions for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) for (BasicBlock::iterator I = BB->begin(), IE = BB->end(); I != IE; ++I) if (isa<AllocaInst>(I)) Objects.push_back(I); // Collect ByVal arguments if (RegByval) { for (Function::arg_iterator Arg = F.arg_begin(), E = F.arg_end(); Arg != E; ++Arg) { if (Arg->hasByValAttr()) Objects.push_back(Arg); } } IRBuilder<> Builder(F.getContext()); ObjectSizeOffsetEvaluator ObjSizeEval(TD, F.getContext()); // Add the registration calls. for (size_t i = 0, N = Objects.size(); i < N; i++) { Value *V = Objects[i]; if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { Builder.SetInsertPoint(++BasicBlock::iterator(AI)); } else { Builder.SetInsertPoint(&F.getEntryBlock().front()); } SizeOffsetEvalType SizeOffset = ObjSizeEval.compute(V); assert(ObjSizeEval.bothKnown(SizeOffset)); assert(dyn_cast<ConstantInt>(SizeOffset.second)->isZero()); Value *Size = SizeOffset.first; Size = Builder.CreateIntCast(Size, SizeTy, false, Size->getName() + ".casted"); Value *VoidPtr = Builder.CreatePointerCast(V, VoidPtrTy, V->getName() + ".casted"); Builder.CreateCall2(RegisterStackPoolFunction, VoidPtr, Size); ++StackPoolsRegistered; } return !Objects.empty(); }
void MemoryInstrumenter::instrumentPointerParameters(Function *F) { assert(F && !F->isDeclaration()); DataLayout &TD = getAnalysis<DataLayout>(); Instruction *Entry = F->begin()->getFirstInsertionPt(); for (Function::arg_iterator AI = F->arg_begin(); AI != F->arg_end(); ++AI) { if (PointerType *ArgType = dyn_cast<PointerType>(AI->getType())) { // If an argument is marked as byval, add an implicit allocation. // FIXME: still broken. We need allocate one extra byte for it. We'd // better do it in the backend. if (AI->hasByValAttr()) { uint64_t TypeSize = TD.getTypeStoreSize(ArgType->getElementType()); instrumentMemoryAllocation(AI, ConstantInt::get(LongType, TypeSize), NULL, Entry); } instrumentPointer(AI, NULL, Entry); } } }
/// PropagateConstantsIntoArguments - Look at all uses of the specified /// function. If all uses are direct call sites, and all pass a particular /// constant in for an argument, propagate that constant in as the argument. /// bool IPCP::PropagateConstantsIntoArguments(Function &F) { if (F.arg_empty() || F.use_empty()) return false; // No arguments? Early exit. // For each argument, keep track of its constant value and whether it is a // constant or not. The bool is driven to true when found to be non-constant. SmallVector<std::pair<Constant*, bool>, 16> ArgumentConstants; ArgumentConstants.resize(F.arg_size()); unsigned NumNonconstant = 0; for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) { User *U = *UI; // Ignore blockaddress uses. if (isa<BlockAddress>(U)) continue; // Used by a non-instruction, or not the callee of a function, do not // transform. if (!isa<CallInst>(U) && !isa<InvokeInst>(U)) return false; CallSite CS(cast<Instruction>(U)); if (!CS.isCallee(UI)) return false; // Check out all of the potentially constant arguments. Note that we don't // inspect varargs here. CallSite::arg_iterator AI = CS.arg_begin(); Function::arg_iterator Arg = F.arg_begin(); for (unsigned i = 0, e = ArgumentConstants.size(); i != e; ++i, ++AI, ++Arg) { // If this argument is known non-constant, ignore it. if (ArgumentConstants[i].second) continue; Constant *C = dyn_cast<Constant>(*AI); if (C && ArgumentConstants[i].first == 0) { ArgumentConstants[i].first = C; // First constant seen. } else if (C && ArgumentConstants[i].first == C) { // Still the constant value we think it is. } else if (*AI == &*Arg) { // Ignore recursive calls passing argument down. } else { // Argument became non-constant. If all arguments are non-constant now, // give up on this function. if (++NumNonconstant == ArgumentConstants.size()) return false; ArgumentConstants[i].second = true; } } } // If we got to this point, there is a constant argument! assert(NumNonconstant != ArgumentConstants.size()); bool MadeChange = false; Function::arg_iterator AI = F.arg_begin(); for (unsigned i = 0, e = ArgumentConstants.size(); i != e; ++i, ++AI) { // Do we have a constant argument? if (ArgumentConstants[i].second || AI->use_empty() || (AI->hasByValAttr() && !F.onlyReadsMemory())) continue; Value *V = ArgumentConstants[i].first; if (V == 0) V = UndefValue::get(AI->getType()); AI->replaceAllUsesWith(V); ++NumArgumentsProped; MadeChange = true; } return MadeChange; }
/// handleEndBlock - Remove dead stores to stack-allocated locations in the /// function end block. Ex: /// %A = alloca i32 /// ... /// store i32 1, i32* %A /// ret void bool DSE::handleEndBlock(BasicBlock &BB) { bool MadeChange = false; // Keep track of all of the stack objects that are dead at the end of the // function. SmallPtrSet<Value*, 16> DeadStackObjects; // Find all of the alloca'd pointers in the entry block. BasicBlock *Entry = BB.getParent()->begin(); for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) DeadStackObjects.insert(AI); // Okay, so these are dead heap objects, but if the pointer never escapes // then it's leaked by this function anyways. CallInst *CI = extractMallocCall(I); if (!CI) CI = extractCallocCall(I); if (CI && !PointerMayBeCaptured(CI, true, true)) DeadStackObjects.insert(CI); } // Treat byval arguments the same, stores to them are dead at the end of the // function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), AE = BB.getParent()->arg_end(); AI != AE; ++AI) if (AI->hasByValAttr()) DeadStackObjects.insert(AI); // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; // If we find a store, check to see if it points into a dead stack value. if (hasMemoryWrite(BBI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts SmallVector<Value *, 4> Pointers; GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers); // Stores to stack values are valid candidates for removal. bool AllDead = true; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) if (!DeadStackObjects.count(*I)) { AllDead = false; break; } if (AllDead) { Instruction *Dead = BBI++; DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " << *Dead << "\n Objects: "; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) { dbgs() << **I; if (llvm::next(I) != E) dbgs() << ", "; } dbgs() << '\n'); // DCE instructions only used to calculate that store. DeleteDeadInstruction(Dead, *MD, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; } } // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(BBI)) { Instruction *Inst = BBI++; DeleteDeadInstruction(Inst, *MD, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; } if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) { DeadStackObjects.erase(A); continue; } if (CallInst *CI = extractMallocCall(BBI)) { DeadStackObjects.erase(CI); continue; } if (CallInst *CI = extractCallocCall(BBI)) { DeadStackObjects.erase(CI); continue; } if (CallSite CS = cast<Value>(BBI)) { // If this call does not access memory, it can't be loading any of our // pointers. if (AA->doesNotAccessMemory(CS)) continue; // If the call might load from any of our allocas, then any store above // the call is live. SmallVector<Value*, 8> LiveAllocas; for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(), E = DeadStackObjects.end(); I != E; ++I) { // See if the call site touches it. AliasAnalysis::ModRefResult A = AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA)); if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref) LiveAllocas.push_back(*I); } for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(), E = LiveAllocas.end(); I != E; ++I) DeadStackObjects.erase(*I); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. if (DeadStackObjects.empty()) return MadeChange; continue; } AliasAnalysis::Location LoadedLoc; // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { if (!L->isUnordered()) // Be conservative with atomic/volatile load break; LoadedLoc = AA->getLocation(L); } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { LoadedLoc = AA->getLocation(V); } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { LoadedLoc = AA->getLocationForSource(MTI); } else if (!BBI->mayReadFromMemory()) { // Instruction doesn't read memory. Note that stores that weren't removed // above will hit this case. continue; } else { // Unknown inst; assume it clobbers everything. break; } // Remove any allocas from the DeadPointer set that are loaded, as this // makes any stores above the access live. RemoveAccessedObjects(LoadedLoc, DeadStackObjects); // If all of the allocas were clobbered by the access then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; }
/// handleEndBlock - Remove dead stores to stack-allocated locations in the /// function end block. Ex: /// %A = alloca i32 /// ... /// store i32 1, i32* %A /// ret void bool DSE::handleEndBlock(BasicBlock &BB) { bool MadeChange = false; // Keep track of all of the stack objects that are dead at the end of the // function. SmallPtrSet<Value*, 16> DeadStackObjects; // Find all of the alloca'd pointers in the entry block. BasicBlock *Entry = BB.getParent()->begin(); for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) DeadStackObjects.insert(AI); // Treat byval arguments the same, stores to them are dead at the end of the // function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), AE = BB.getParent()->arg_end(); AI != AE; ++AI) if (AI->hasByValAttr()) DeadStackObjects.insert(AI); // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; // If we find a store, check to see if it points into a dead stack value. if (hasMemoryWrite(BBI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts Value *Pointer = getStoredPointerOperand(BBI)->getUnderlyingObject(); // Stores to stack values are valid candidates for removal. if (DeadStackObjects.count(Pointer)) { Instruction *Dead = BBI++; DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " << *Dead << "\n Object: " << *Pointer << '\n'); // DCE instructions only used to calculate that store. DeleteDeadInstruction(Dead, *MD, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; } } // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(BBI)) { Instruction *Inst = BBI++; DeleteDeadInstruction(Inst, *MD, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; } if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) { DeadStackObjects.erase(A); continue; } if (CallSite CS = cast<Value>(BBI)) { // If this call does not access memory, it can't be loading any of our // pointers. if (AA->doesNotAccessMemory(CS)) continue; unsigned NumModRef = 0, NumOther = 0; // If the call might load from any of our allocas, then any store above // the call is live. SmallVector<Value*, 8> LiveAllocas; for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(), E = DeadStackObjects.end(); I != E; ++I) { // If we detect that our AA is imprecise, it's not worth it to scan the // rest of the DeadPointers set. Just assume that the AA will return // ModRef for everything, and go ahead and bail out. if (NumModRef >= 16 && NumOther == 0) return MadeChange; // See if the call site touches it. AliasAnalysis::ModRefResult A = AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA)); if (A == AliasAnalysis::ModRef) ++NumModRef; else ++NumOther; if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref) LiveAllocas.push_back(*I); } for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(), E = LiveAllocas.end(); I != E; ++I) DeadStackObjects.erase(*I); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. if (DeadStackObjects.empty()) return MadeChange; continue; } AliasAnalysis::Location LoadedLoc; // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { LoadedLoc = AA->getLocation(L); } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { LoadedLoc = AA->getLocation(V); } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { LoadedLoc = AA->getLocationForSource(MTI); } else { // Not a loading instruction. continue; } // Remove any allocas from the DeadPointer set that are loaded, as this // makes any stores above the access live. RemoveAccessedObjects(LoadedLoc, DeadStackObjects); // If all of the allocas were clobbered by the access then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; } return MadeChange; }