/// Merge an autorelease with a retain into a fused call. bool ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease, InstructionClass Class, SmallPtrSet<Instruction *, 4> &DependingInstructions, SmallPtrSet<const BasicBlock *, 4> &Visited) { const Value *Arg = GetObjCArg(Autorelease); // Check that there are no instructions between the retain and the autorelease // (such as an autorelease_pop) which may change the count. CallInst *Retain = 0; if (Class == IC_AutoreleaseRV) FindDependencies(RetainAutoreleaseRVDep, Arg, Autorelease->getParent(), Autorelease, DependingInstructions, Visited, PA); else FindDependencies(RetainAutoreleaseDep, Arg, Autorelease->getParent(), Autorelease, DependingInstructions, Visited, PA); Visited.clear(); if (DependingInstructions.size() != 1) { DependingInstructions.clear(); return false; } Retain = dyn_cast_or_null<CallInst>(*DependingInstructions.begin()); DependingInstructions.clear(); if (!Retain || GetBasicInstructionClass(Retain) != IC_Retain || GetObjCArg(Retain) != Arg) return false; Changed = true; ++NumPeeps; DEBUG(dbgs() << "ObjCARCContract::ContractAutorelease: Fusing " "retain/autorelease. Erasing: " << *Autorelease << "\n" " Old Retain: " << *Retain << "\n"); if (Class == IC_AutoreleaseRV) Retain->setCalledFunction(getRetainAutoreleaseRVCallee(F.getParent())); else Retain->setCalledFunction(getRetainAutoreleaseCallee(F.getParent())); DEBUG(dbgs() << " New Retain: " << *Retain << "\n"); EraseInstruction(Autorelease); return true; }
static bool expandIntrinsic(Module *M, Intrinsic::ID ID) { SmallVector<Type *, 3> Types; Types.push_back(Type::getInt8PtrTy(M->getContext())); if (ID != Intrinsic::memset) Types.push_back(Type::getInt8PtrTy(M->getContext())); unsigned LengthTypePos = Types.size(); Types.push_back(Type::getInt64Ty(M->getContext())); std::string OldName = Intrinsic::getName(ID, Types); Function *OldIntrinsic = M->getFunction(OldName); if (!OldIntrinsic) return false; Types[LengthTypePos] = Type::getInt32Ty(M->getContext()); Function *NewIntrinsic = Intrinsic::getDeclaration(M, ID, Types); for (Value::use_iterator CallIter = OldIntrinsic->use_begin(), E = OldIntrinsic->use_end(); CallIter != E; ) { CallInst *Call = dyn_cast<CallInst>(*CallIter++); if (!Call) { report_fatal_error("CanonicalizeMemIntrinsics: Taking the address of an " "intrinsic is not allowed: " + OldName); } // This temporarily leaves Call non-well-typed. Call->setCalledFunction(NewIntrinsic); // Truncate the "len" argument. No overflow check. IRBuilder<> Builder(Call); Value *Length = Builder.CreateTrunc(Call->getArgOperand(2), Type::getInt32Ty(M->getContext()), "mem_len_truncate"); Call->setArgOperand(2, Length); } OldIntrinsic->eraseFromParent(); return true; }
/* * Replace called function of a given call site. */ void DeadStoreEliminationPass::replaceCallingInst(Instruction* caller, Function* fn) { if (isa<CallInst>(caller)) { CallInst *callInst = dyn_cast<CallInst>(caller); callInst->setCalledFunction(fn); } else if (isa<InvokeInst>(caller)) { InvokeInst *invokeInst = dyn_cast<InvokeInst>(caller); invokeInst->setCalledFunction(fn); } }
/// Merge an autorelease with a retain into a fused call. bool ObjCARCContract::contractAutorelease( Function &F, Instruction *Autorelease, ARCInstKind Class, SmallPtrSetImpl<Instruction *> &DependingInstructions, SmallPtrSetImpl<const BasicBlock *> &Visited) { const Value *Arg = GetArgRCIdentityRoot(Autorelease); // Check that there are no instructions between the retain and the autorelease // (such as an autorelease_pop) which may change the count. CallInst *Retain = nullptr; if (Class == ARCInstKind::AutoreleaseRV) FindDependencies(RetainAutoreleaseRVDep, Arg, Autorelease->getParent(), Autorelease, DependingInstructions, Visited, PA); else FindDependencies(RetainAutoreleaseDep, Arg, Autorelease->getParent(), Autorelease, DependingInstructions, Visited, PA); Visited.clear(); if (DependingInstructions.size() != 1) { DependingInstructions.clear(); return false; } Retain = dyn_cast_or_null<CallInst>(*DependingInstructions.begin()); DependingInstructions.clear(); if (!Retain || GetBasicARCInstKind(Retain) != ARCInstKind::Retain || GetArgRCIdentityRoot(Retain) != Arg) return false; Changed = true; ++NumPeeps; LLVM_DEBUG(dbgs() << " Fusing retain/autorelease!\n" " Autorelease:" << *Autorelease << "\n" " Retain: " << *Retain << "\n"); Function *Decl = EP.get(Class == ARCInstKind::AutoreleaseRV ? ARCRuntimeEntryPointKind::RetainAutoreleaseRV : ARCRuntimeEntryPointKind::RetainAutorelease); Retain->setCalledFunction(Decl); LLVM_DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n"); EraseInstruction(Autorelease); return true; }
void insertCallToAccessFunctionSequential(Function *F, Function *cF) { CallInst *I; BasicBlock *b; Value::user_iterator i = F->user_begin(), e = F->user_end(); while (i != e) { if (isa<CallInst>(*i)) { I = dyn_cast<CallInst>(*i); b = I->getParent(); BasicBlock::iterator helper(I); CallInst *ci = dyn_cast<CallInst>(I->clone()); ci->setCalledFunction(cF); b->getInstList().insertAfter(helper, ci); i++; I->replaceAllUsesWith(ci); insertCallToPAPI(I, ci); } } }
// // Method: runOnModule() // // Description: // Entry point for this LLVM pass. Search for functions which could be called // indirectly and create clones for them which are only called by direct // calls. // // Inputs: // M - A reference to the LLVM module to transform. // // Outputs: // M - The transformed LLVM module. // // Return value: // true - The module was modified. // false - The module was not modified. // bool IndClone::runOnModule(Module& M) { // Set of functions to clone std::vector<Function*> toClone; // // Check all of the functions in the module. If the function could be called // by an indirect function call, add it to our worklist of functions to // clone. // for (Module::iterator I = M.begin(); I != M.end(); ++I) { // Flag whether the function should be cloned bool pleaseCloneTheFunction = false; // // Only clone functions which are defined and cannot be replaced by another // function by the linker. // if (!I->isDeclaration() && !I->mayBeOverridden()) { for (Value::use_iterator ui = I->use_begin(), ue = I->use_end(); ui != ue; ++ui) { if (!isa<CallInst>(*ui) && !isa<InvokeInst>(*ui)) { if(!ui->use_empty()) // // If this function is used for anything other than a direct function // call, then we want to clone it. // pleaseCloneTheFunction = true; } else { // // This is a call instruction, but hold up ranger! We need to make // sure that the function isn't passed as an argument to *another* // function. That would make the function usable in an indirect // function call. // for (unsigned index = 1; index < ui->getNumOperands(); ++index) { if (ui->getOperand(index)->stripPointerCasts() == I) { pleaseCloneTheFunction = true; break; } } } // // If we've discovered that the function could be used by an indirect // call site, schedule it for cloning. // if (pleaseCloneTheFunction) { toClone.push_back(I); break; } } } } // // Update the statistics on the number of functions we'll be cloning. // We only update the statistic if we want to clone one or more functions; // due to the magic of how statistics work, avoiding assignment prevents it // from needlessly showing up. // if (toClone.size()) numCloned += toClone.size(); // // Go through the worklist and clone each function. After cloning a // function, change all direct calls to use the clone instead of using the // original function. // for (unsigned index = 0; index < toClone.size(); ++index) { // // Clone the function and give it a name indicating that it is a clone to // be used for direct function calls. // Function * Original = toClone[index]; Function* DirectF = CloneFunction(Original); DirectF->setName(Original->getName() + "_DIRECT"); // // Make the clone internal; external code can use the original function. // DirectF->setLinkage(GlobalValue::InternalLinkage); // // Link the cloned function into the set of functions belonging to the // module. // Original->getParent()->getFunctionList().push_back(DirectF); // // Find all uses of the function that use it as a direct call. Change // them to use the clone. // for (Value::use_iterator ui = Original->use_begin(), ue = Original->use_end(); ui != ue; ) { CallInst *CI = dyn_cast<CallInst>(*ui); ui++; if (CI) { if (CI->getCalledFunction() == Original) { ++numReplaced; CI->setCalledFunction(DirectF); } } } } // // Assume that we've cloned at least one function. // return true; }
void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) { // Array allocations are probably not worth handling, since an allocation of // the array type is the canonical form. if (!I.isStaticAlloca() || I.isArrayAllocation()) return; IRBuilder<> Builder(&I); // First try to replace the alloca with a vector Type *AllocaTy = I.getAllocatedType(); DEBUG(dbgs() << "Trying to promote " << I << '\n'); if (tryPromoteAllocaToVector(&I)) return; DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n"); const Function &ContainingFunction = *I.getParent()->getParent(); // FIXME: We should also try to get this value from the reqd_work_group_size // function attribute if it is available. unsigned WorkGroupSize = AMDGPU::getMaximumWorkGroupSize(ContainingFunction); int AllocaSize = WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy); if (AllocaSize > LocalMemAvailable) { DEBUG(dbgs() << " Not enough local memory to promote alloca.\n"); return; } std::vector<Value*> WorkList; if (!collectUsesWithPtrTypes(&I, WorkList)) { DEBUG(dbgs() << " Do not know how to convert all uses\n"); return; } DEBUG(dbgs() << "Promoting alloca to local memory\n"); LocalMemAvailable -= AllocaSize; Function *F = I.getParent()->getParent(); Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); GlobalVariable *GV = new GlobalVariable( *Mod, GVTy, false, GlobalValue::InternalLinkage, UndefValue::get(GVTy), Twine(F->getName()) + Twine('.') + I.getName(), nullptr, GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS); GV->setUnnamedAddr(true); GV->setAlignment(I.getAlignment()); Value *TCntY, *TCntZ; std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); Value *TIdX = getWorkitemID(Builder, 0); Value *TIdY = getWorkitemID(Builder, 1); Value *TIdZ = getWorkitemID(Builder, 2); Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); Tmp0 = Builder.CreateMul(Tmp0, TIdX); Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); Value *TID = Builder.CreateAdd(Tmp0, Tmp1); TID = Builder.CreateAdd(TID, TIdZ); Value *Indices[] = { Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), TID }; Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); I.mutateType(Offset->getType()); I.replaceAllUsesWith(Offset); I.eraseFromParent(); for (Value *V : WorkList) { CallInst *Call = dyn_cast<CallInst>(V); if (!Call) { Type *EltTy = V->getType()->getPointerElementType(); PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); // The operand's value should be corrected on its own. if (isa<AddrSpaceCastInst>(V)) continue; // FIXME: It doesn't really make sense to try to do this for all // instructions. V->mutateType(NewTy); continue; } IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call); if (!Intr) { // FIXME: What is this for? It doesn't make sense to promote arbitrary // function calls. If the call is to a defined function that can also be // promoted, we should be able to do this once that function is also // rewritten. std::vector<Type*> ArgTypes; for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands(); ArgIdx != ArgEnd; ++ArgIdx) { ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType()); } Function *F = Call->getCalledFunction(); FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes, F->isVarArg()); Constant *C = Mod->getOrInsertFunction((F->getName() + ".local").str(), NewType, F->getAttributes()); Function *NewF = cast<Function>(C); Call->setCalledFunction(NewF); continue; } Builder.SetInsertPoint(Intr); switch (Intr->getIntrinsicID()) { case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: // These intrinsics are for address space 0 only Intr->eraseFromParent(); continue; case Intrinsic::memcpy: { MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(), MemCpy->getLength(), MemCpy->getAlignment(), MemCpy->isVolatile()); Intr->eraseFromParent(); continue; } case Intrinsic::memmove: { MemMoveInst *MemMove = cast<MemMoveInst>(Intr); Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(), MemMove->getLength(), MemMove->getAlignment(), MemMove->isVolatile()); Intr->eraseFromParent(); continue; } case Intrinsic::memset: { MemSetInst *MemSet = cast<MemSetInst>(Intr); Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), MemSet->getAlignment(), MemSet->isVolatile()); Intr->eraseFromParent(); continue; } case Intrinsic::invariant_start: case Intrinsic::invariant_end: case Intrinsic::invariant_group_barrier: Intr->eraseFromParent(); // FIXME: I think the invariant marker should still theoretically apply, // but the intrinsics need to be changed to accept pointers with any // address space. continue; case Intrinsic::objectsize: { Value *Src = Intr->getOperand(0); Type *SrcTy = Src->getType()->getPointerElementType(); Function *ObjectSize = Intrinsic::getDeclaration(Mod, Intrinsic::objectsize, { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) } ); CallInst *NewCall = Builder.CreateCall(ObjectSize, { Src, Intr->getOperand(1) }); Intr->replaceAllUsesWith(NewCall); Intr->eraseFromParent(); continue; } default: Intr->dump(); llvm_unreachable("Don't know how to promote alloca intrinsic use."); } } }
void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) { IRBuilder<> Builder(&I); // First try to replace the alloca with a vector Type *AllocaTy = I.getAllocatedType(); DEBUG(dbgs() << "Trying to promote " << I << '\n'); if (tryPromoteAllocaToVector(&I)) return; DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n"); // FIXME: This is the maximum work group size. We should try to get // value from the reqd_work_group_size function attribute if it is // available. unsigned WorkGroupSize = 256; int AllocaSize = WorkGroupSize * Mod->getDataLayout()->getTypeAllocSize(AllocaTy); if (AllocaSize > LocalMemAvailable) { DEBUG(dbgs() << " Not enough local memory to promote alloca.\n"); return; } std::vector<Value*> WorkList; if (!collectUsesWithPtrTypes(&I, WorkList)) { DEBUG(dbgs() << " Do not know how to convert all uses\n"); return; } DEBUG(dbgs() << "Promoting alloca to local memory\n"); LocalMemAvailable -= AllocaSize; GlobalVariable *GV = new GlobalVariable( *Mod, ArrayType::get(I.getAllocatedType(), 256), false, GlobalValue::ExternalLinkage, 0, I.getName(), 0, GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS); FunctionType *FTy = FunctionType::get( Type::getInt32Ty(Mod->getContext()), false); AttributeSet AttrSet; AttrSet.addAttribute(Mod->getContext(), 0, Attribute::ReadNone); Value *ReadLocalSizeY = Mod->getOrInsertFunction( "llvm.r600.read.local.size.y", FTy, AttrSet); Value *ReadLocalSizeZ = Mod->getOrInsertFunction( "llvm.r600.read.local.size.z", FTy, AttrSet); Value *ReadTIDIGX = Mod->getOrInsertFunction( "llvm.r600.read.tidig.x", FTy, AttrSet); Value *ReadTIDIGY = Mod->getOrInsertFunction( "llvm.r600.read.tidig.y", FTy, AttrSet); Value *ReadTIDIGZ = Mod->getOrInsertFunction( "llvm.r600.read.tidig.z", FTy, AttrSet); Value *TCntY = Builder.CreateCall(ReadLocalSizeY); Value *TCntZ = Builder.CreateCall(ReadLocalSizeZ); Value *TIdX = Builder.CreateCall(ReadTIDIGX); Value *TIdY = Builder.CreateCall(ReadTIDIGY); Value *TIdZ = Builder.CreateCall(ReadTIDIGZ); Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ); Tmp0 = Builder.CreateMul(Tmp0, TIdX); Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ); Value *TID = Builder.CreateAdd(Tmp0, Tmp1); TID = Builder.CreateAdd(TID, TIdZ); std::vector<Value*> Indices; Indices.push_back(Constant::getNullValue(Type::getInt32Ty(Mod->getContext()))); Indices.push_back(TID); Value *Offset = Builder.CreateGEP(GV, Indices); I.mutateType(Offset->getType()); I.replaceAllUsesWith(Offset); I.eraseFromParent(); for (std::vector<Value*>::iterator i = WorkList.begin(), e = WorkList.end(); i != e; ++i) { Value *V = *i; CallInst *Call = dyn_cast<CallInst>(V); if (!Call) { Type *EltTy = V->getType()->getPointerElementType(); PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); // The operand's value should be corrected on its own. if (isa<AddrSpaceCastInst>(V)) continue; // FIXME: It doesn't really make sense to try to do this for all // instructions. V->mutateType(NewTy); continue; } IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call); if (!Intr) { std::vector<Type*> ArgTypes; for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands(); ArgIdx != ArgEnd; ++ArgIdx) { ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType()); } Function *F = Call->getCalledFunction(); FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes, F->isVarArg()); Constant *C = Mod->getOrInsertFunction(StringRef(F->getName().str() + ".local"), NewType, F->getAttributes()); Function *NewF = cast<Function>(C); Call->setCalledFunction(NewF); continue; } Builder.SetInsertPoint(Intr); switch (Intr->getIntrinsicID()) { case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: // These intrinsics are for address space 0 only Intr->eraseFromParent(); continue; case Intrinsic::memcpy: { MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(), MemCpy->getLength(), MemCpy->getAlignment(), MemCpy->isVolatile()); Intr->eraseFromParent(); continue; } case Intrinsic::memset: { MemSetInst *MemSet = cast<MemSetInst>(Intr); Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), MemSet->getAlignment(), MemSet->isVolatile()); Intr->eraseFromParent(); continue; } default: Intr->dump(); llvm_unreachable("Don't know how to promote alloca intrinsic use."); } } }
bool TypeChecksOpt::runOnModule(Module &M) { TS = &getAnalysis<dsa::TypeSafety<TDDataStructures> >(); // Create the necessary prototypes VoidTy = IntegerType::getVoidTy(M.getContext()); Int8Ty = IntegerType::getInt8Ty(M.getContext()); Int32Ty = IntegerType::getInt32Ty(M.getContext()); Int64Ty = IntegerType::getInt64Ty(M.getContext()); VoidPtrTy = PointerType::getUnqual(Int8Ty); TypeTagTy = Int8Ty; TypeTagPtrTy = PointerType::getUnqual(TypeTagTy); Constant *memsetF = M.getOrInsertFunction ("llvm.memset.i64", VoidTy, VoidPtrTy, Int8Ty, Int64Ty, Int32Ty, NULL); trackGlobal = M.getOrInsertFunction("trackGlobal", VoidTy, VoidPtrTy,/*ptr*/ TypeTagTy,/*type*/ Int64Ty,/*size*/ Int32Ty,/*tag*/ NULL); trackInitInst = M.getOrInsertFunction("trackInitInst", VoidTy, VoidPtrTy,/*ptr*/ Int64Ty,/*size*/ Int32Ty,/*tag*/ NULL); trackUnInitInst = M.getOrInsertFunction("trackUnInitInst", VoidTy, VoidPtrTy,/*ptr*/ Int64Ty,/*size*/ Int32Ty,/*tag*/ NULL); trackStoreInst = M.getOrInsertFunction("trackStoreInst", VoidTy, VoidPtrTy,/*ptr*/ TypeTagTy,/*type*/ Int64Ty,/*size*/ Int32Ty,/*tag*/ NULL); checkTypeInst = M.getOrInsertFunction("checkType", VoidTy, TypeTagTy,/*type*/ Int64Ty,/*size*/ TypeTagPtrTy, VoidPtrTy,/*ptr*/ Int32Ty,/*tag*/ NULL); copyTypeInfo = M.getOrInsertFunction("copyTypeInfo", VoidTy, VoidPtrTy,/*dest ptr*/ VoidPtrTy,/*src ptr*/ Int64Ty,/*size*/ Int32Ty,/*tag*/ NULL); setTypeInfo = M.getOrInsertFunction("setTypeInfo", VoidTy, VoidPtrTy,/*dest ptr*/ TypeTagPtrTy,/*metadata*/ Int64Ty,/*size*/ TypeTagTy, VoidPtrTy, Int32Ty,/*tag*/ NULL); trackStringInput = M.getOrInsertFunction("trackStringInput", VoidTy, VoidPtrTy, Int32Ty, NULL); getTypeTag = M.getOrInsertFunction("getTypeTag", VoidTy, VoidPtrTy, /*ptr*/ Int64Ty, /*size*/ TypeTagPtrTy, /*dest for type tag*/ Int32Ty, /*tag*/ NULL); MallocFunc = M.getFunction("malloc"); for(Value::use_iterator User = trackGlobal->use_begin(); User != trackGlobal->use_end(); ++User) { CallInst *CI = dyn_cast<CallInst>(*User); assert(CI); if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) { std::vector<Value*>Args; Args.push_back(CI->getOperand(1)); Args.push_back(CI->getOperand(3)); Args.push_back(CI->getOperand(4)); CallInst::Create(trackInitInst, Args, "", CI); toDelete.push_back(CI); } } for(Value::use_iterator User = checkTypeInst->use_begin(); User != checkTypeInst->use_end(); ++User) { CallInst *CI = dyn_cast<CallInst>(*User); assert(CI); if(TS->isTypeSafe(CI->getOperand(4)->stripPointerCasts(), CI->getParent()->getParent())) { toDelete.push_back(CI); } } for(Value::use_iterator User = trackStoreInst->use_begin(); User != trackStoreInst->use_end(); ++User) { CallInst *CI = dyn_cast<CallInst>(*User); assert(CI); if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) { toDelete.push_back(CI); } } // for alloca's if they are type known // assume initialized with TOP for(Value::use_iterator User = trackUnInitInst->use_begin(); User != trackUnInitInst->use_end(); ) { CallInst *CI = dyn_cast<CallInst>(*(User++)); assert(CI); // check if operand is an alloca inst. if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) { CI->setCalledFunction(trackInitInst); if(AllocaInst *AI = dyn_cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts())) { // Initialize the allocation to NULL std::vector<Value *> Args2; Args2.push_back(CI->getOperand(1)); Args2.push_back(ConstantInt::get(Int8Ty, 0)); Args2.push_back(CI->getOperand(2)); Args2.push_back(ConstantInt::get(Int32Ty, AI->getAlignment())); CallInst::Create(memsetF, Args2, "", CI); } } } if(MallocFunc) { for(Value::use_iterator User = MallocFunc->use_begin(); User != MallocFunc->use_end(); User ++) { CallInst *CI = dyn_cast<CallInst>(*User); if(!CI) continue; if(TS->isTypeSafe(CI, CI->getParent()->getParent())){ CastInst *BCI = BitCastInst::CreatePointerCast(CI, VoidPtrTy); CastInst *Size = CastInst::CreateSExtOrBitCast(CI->getOperand(1), Int64Ty); Size->insertAfter(CI); BCI->insertAfter(Size); std::vector<Value *>Args; Args.push_back(BCI); Args.push_back(Size); Args.push_back(ConstantInt::get(Int32Ty, 0)); CallInst *CINew = CallInst::Create(trackInitInst, Args); CINew->insertAfter(BCI); } } } // also do for mallocs/calloc/other allocators??? // other allocators?? for(Value::use_iterator User = copyTypeInfo->use_begin(); User != copyTypeInfo->use_end(); ++User) { CallInst *CI = dyn_cast<CallInst>(*User); assert(CI); if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) { std::vector<Value*> Args; Args.push_back(CI->getOperand(1)); Args.push_back(CI->getOperand(3)); // size Args.push_back(CI->getOperand(4)); CallInst::Create(trackInitInst, Args, "", CI); toDelete.push_back(CI); } } for(Value::use_iterator User = setTypeInfo->use_begin(); User != setTypeInfo->use_end(); ++User) { CallInst *CI = dyn_cast<CallInst>(*User); assert(CI); if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) { std::vector<Value*> Args; Args.push_back(CI->getOperand(1)); Args.push_back(CI->getOperand(3)); // size Args.push_back(CI->getOperand(6)); CallInst::Create(trackInitInst, Args, "", CI); toDelete.push_back(CI); } } for(Value::use_iterator User = getTypeTag->use_begin(); User != getTypeTag->use_end(); ++User) { CallInst *CI = dyn_cast<CallInst>(*User); assert(CI); if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) { AllocaInst *AI = dyn_cast<AllocaInst>(CI->getOperand(3)->stripPointerCasts()); assert(AI); std::vector<Value*>Args; Args.push_back(CI->getOperand(3)); Args.push_back(ConstantInt::get(Int8Ty, 255)); Args.push_back(CI->getOperand(2)); Args.push_back(ConstantInt::get(Int32Ty, AI->getAlignment())); CallInst::Create(memsetF, Args, "", CI); toDelete.push_back(CI); } } numSafe += toDelete.size(); while(!toDelete.empty()) { Instruction *I = toDelete.back(); toDelete.pop_back(); I->eraseFromParent(); } return (numSafe > 0); }