/// \brief Check whether a GEP's indices are all constant. /// /// Respects any simplified values known during the analysis of this callsite. bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) return false; return true; }
void visitGetElementPtrInst(GetElementPtrInst &GEP) { Value *pointerOperand = GEP.getPointerOperand(); DSGraph * TDG = budsPass->getDSGraph(*(GEP.getParent()->getParent())); DSNode *DSN = TDG->getNodeForValue(pointerOperand).getNode(); //FIXME DO we really need this ? markReachableAllocas(DSN); if (DSN && DSN->isAllocaNode() && !DSN->isNodeCompletelyFolded()) { unsafeAllocaNodes.push_back(DSN); } }
void ModuloScheduler::findLoopCarriedMemoryAccesses( RAM *globalRAM, std::map<Instruction *, MEM_ACCESS> &memAccessMap, std::map<RAM *, std::vector<MEM_ACCESS>> &memoryAccesses) { assert(alloc); // add additional memory constraints for local memory read/writes for (BasicBlock::iterator I = BB->begin(), ie = BB->end(); I != ie; I++) { Value *addr = NULL; std::string memtype; if (LoadInst *L = dyn_cast<LoadInst>(I)) { addr = L->getPointerOperand(); memtype = "load"; } else if (StoreInst *S = dyn_cast<StoreInst>(I)) { addr = S->getPointerOperand(); memtype = "store"; } else { continue; } RAM *ram; if (LEGUP_CONFIG->getParameterInt("LOCAL_RAMS")) { ram = alloc->getLocalRamFromValue(addr); } else { ram = globalRAM; } if (!ram) continue; GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(addr); if (!GEP) continue; Value *offset = GEP->getOperand(2); MEM_ACCESS access; access.I = I; access.ram = ram; int indexOffset = 0; if (findInductionOffset(offset, ram, loop->getCanonicalInductionVariable(), memtype, &indexOffset)) { // found an offset to the induction variable access.type = MEM_ACCESS::InductionOffset; access.offset = indexOffset; } else { access.type = MEM_ACCESS::Address; access.ptr = GEP; } memoryAccesses[ram].push_back(access); memAccessMap[I] = access; } }
TEST(CloneInstruction, Inbounds) { LLVMContext context; Value *V = new Argument(Type::getInt32PtrTy(context)); Constant *Z = Constant::getNullValue(Type::getInt32Ty(context)); std::vector<Value *> ops; ops.push_back(Z); GetElementPtrInst *GEP = GetElementPtrInst::Create(V, ops.begin(), ops.end()); EXPECT_FALSE(cast<GetElementPtrInst>(GEP->clone())->isInBounds()); GEP->setIsInBounds(); EXPECT_TRUE(cast<GetElementPtrInst>(GEP->clone())->isInBounds()); }
void smtit::performTest1() { for (Module::iterator FI = Mod->begin(), FE = Mod->end(); FI != FE; ++FI) { Function *Func = &*FI; // DEBUG(errs() << *Func << "\n"); for (Function::iterator BI = Func->begin(), BE = Func->end(); BI != BE; ++BI) { BasicBlock *BB = &*BI; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { Instruction *BBI = &*I; //if (true == isa<StoreInst>(BBI)) { if (true == isa<LoadInst>(BBI)) { LoadInst *li = dyn_cast<LoadInst>(BBI); Value *ptrOp = li->getPointerOperand(); DEBUG(errs() << *li << "\t Result Name: " << li->getName() << "\t Pointer Name: " << ptrOp->getName() << "\n"); // DEBUG(errs() << "\tStore Instruction: " << *BBI << " \n"); // DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(SI->getType()) // << " \n"); // Instruction* V = cast<Instruction>(SI->getOperand(1)); // DEBUG(errs() << "\tOperand : " << *V << " \n"); // DEBUG(errs() << "\t\tPointerType: " << isLLVMPAPtrTy(V->getType()) // << " \n"); } else if(true == isa<GetElementPtrInst>(BBI)) { GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(BBI); DEBUG(errs() << *gep << "\t Result Name: " << gep->getName() << "\n"); // DEBUG(errs() << "\tInstruction: " << *BBI << " \n"); // DEBUG(errs() << "\t\tPointerType: " << // isLLVMPAPtrTy(BBI->getType()) << " \n"); } // For def-use chains: All the uses of the definition //DEBUG(errs() << *BBI << "\n"); /* for (User *U : BBI->users()) { if (Instruction *Inst = dyn_cast<Instruction>(U)) { DEBUG(errs()<< " " << *Inst << "\n"); } } for (Value::user_iterator i = BBI->user_begin(), e = BBI->user_end(); i != e; ++i) { if (Instruction *user_inst = dyn_cast<Instruction>(*i)) { DEBUG(errs()<< " " << *user_inst << "\n"); } } */ } } } }
unordered_multimap<const char*, Value*>& ArgumentRecovery::exposeAllRegisters(llvm::Function* fn) { auto iter = registerAddresses.find(fn); if (iter != registerAddresses.end()) { return iter->second; } auto& addresses = registerAddresses[fn]; if (fn->isDeclaration()) { // If a function has no body, it doesn't need a register map. return addresses; } Argument* firstArg = fn->arg_begin(); assert(isStructType(firstArg)); // Get explicitly-used GEPs const auto& target = getAnalysis<TargetInfo>(); for (User* user : firstArg->users()) { if (auto gep = dyn_cast<GetElementPtrInst>(user)) { const char* name = target.registerName(*gep); const char* largestRegister = target.largestOverlappingRegister(name); addresses.insert({largestRegister, gep}); } } // Synthesize GEPs for implicitly-used registers. // Implicit uses are when a function callee uses a register without there being a reference in the caller. // This happens either because the parameter is passed through, or because the register is a scratch register that // the caller doesn't use itself. auto insertionPoint = fn->begin()->begin(); auto& regUse = getAnalysis<RegisterUse>(); const auto& modRefInfo = *regUse.getModRefInfo(fn); for (const auto& pair : modRefInfo) { if ((pair.second & RegisterUse::ModRef) != 0 && addresses.find(pair.first) == addresses.end()) { // Need a GEP here, because the function ModRefs the register implicitly. GetElementPtrInst* synthesizedGep = target.getRegister(firstArg, pair.first); synthesizedGep->insertBefore(insertionPoint); addresses.insert({pair.first, synthesizedGep}); } } return addresses; }
/// If the argument is a GEP, then returns the operand identified by /// getGEPInductionOperand. However, if there is some other non-loop-invariant /// operand, it returns that instead. Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); if (!GEP) return Ptr; unsigned InductionOperand = getGEPInductionOperand(GEP); // Check that all of the gep indices are uniform except for our induction // operand. for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) if (i != InductionOperand && !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) return Ptr; return GEP->getOperand(InductionOperand); }
// // Method: preprocess() // // Description: // %p = bitcast %p1 to T1 // gep(%p) ... // -> // gep (bitcast %p1 to T1), ... // // Inputs: // M - A reference to the LLVM module to process // // Outputs: // M - The transformed LLVM module. // static void preprocess(Module& M) { for (Module::iterator F = M.begin(); F != M.end(); ++F){ for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) { for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE; I++) { if(!(isa<GetElementPtrInst>(I))) continue; GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); if(BitCastInst *BI = dyn_cast<BitCastInst>(GEP->getOperand(0))) { if(Constant *C = dyn_cast<Constant>(BI->getOperand(0))) { GEP->setOperand(0, ConstantExpr::getBitCast(C, BI->getType())); } } } } } }
bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt); // Try to fold GEPs of constant-offset call site argument pointers. This // requires target data and inbounds GEPs. if (TD && I.isInBounds()) { // Check if we have a base + offset for the pointer. Value *Ptr = I.getPointerOperand(); std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); if (BaseAndOffset.first) { // Check if the offset of this GEP is constant, and if so accumulate it // into Offset. if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { // Non-constant GEPs aren't folded, and disable SROA. if (SROACandidate) disableSROA(CostIt); return false; } // Add the result as a new mapping to Base + Offset. ConstantOffsetPtrs[&I] = BaseAndOffset; // Also handle SROA candidates here, we already know that the GEP is // all-constant indexed. if (SROACandidate) SROAArgValues[&I] = SROAArg; return true; } } if (isGEPOffsetConstant(I)) { if (SROACandidate) SROAArgValues[&I] = SROAArg; // Constant GEPs are modeled as free. return true; } // Variable GEPs will require math and will disable SROA. if (SROACandidate) disableSROA(CostIt); return false; }
// -- handle GetElementPtr instruction -- void UnsafeTypeCastingCheck::handleGetElementPtrInstruction (Instruction *inst) { GetElementPtrInst * ginst = dyn_cast<GetElementPtrInst>(inst); if (ginst == NULL) utccAbort("handleGetElementPtrInstruction cannot process with a non-getelementptr instruction"); Value *pt = ginst->getPointerOperand(); UTCC_TYPE pt_ut_self = UH_UT; UTCC_TYPE pt_ut_base = UH_UT; UTCC_TYPE pt_ut_element = llvmT2utccT(ginst->getType()->getPointerElementType(), ginst); if (isVisitedPointer(ginst)) pt_ut_self = queryPointedType(ginst); if (isVisitedPointer(pt)) pt_ut_base = queryPointedType(pt); setPointedType(ginst, utSaturate(pt_ut_element, utSaturate(pt_ut_self, pt_ut_base))); setExprType(ginst, llvmT2utccT(ginst->getType(), ginst)); }
bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) { VectorType *VT = dyn_cast<VectorType>(GEPI.getType()); if (!VT) return false; IRBuilder<> Builder(&GEPI); unsigned NumElems = VT->getNumElements(); unsigned NumIndices = GEPI.getNumIndices(); Scatterer Base = scatter(&GEPI, GEPI.getOperand(0)); SmallVector<Scatterer, 8> Ops; Ops.resize(NumIndices); for (unsigned I = 0; I < NumIndices; ++I) Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1)); ValueVector Res; Res.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) { SmallVector<Value *, 8> Indices; Indices.resize(NumIndices); for (unsigned J = 0; J < NumIndices; ++J) Indices[J] = Ops[J][I]; Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices, GEPI.getName() + ".i" + Twine(I)); if (GEPI.isInBounds()) if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I])) NewGEPI->setIsInBounds(); } gather(&GEPI, Res); return true; }
void ArrayIndexChecker::visitGetElementPtrInst(GetElementPtrInst& I) { DEBUG(dbgs() << "ArrayIndexChecker: visiting GEP " << I << "\n"); visitValue(*I.getPointerOperand()); for (auto Idx = I.idx_begin(), E = I.idx_end(); Idx != E; ++Idx) { visitValue(**Idx); } auto pos = std::find(ptr_value_vec_.begin(), ptr_value_vec_.end(), &I); assert(pos != ptr_value_vec_.end()); index_t varIdx = pos - ptr_value_vec_.begin(); assert(idx2addr_.find(varIdx) != idx2addr_.end()); if (addr2version_[idx2addr_[varIdx]] != 0) throw ArrayIndexIsNotConstant;; DEBUG(dbgs() << "ArrayIndexChecker: visited GEP\n"); }
// Returns a clone of `I` with its operands converted to those specified in // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an // operand whose address space needs to be modified might not exist in // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and // adds that operand use to UndefUsesToFix so that caller can fix them later. // // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast // from a pointer whose type already matches. Therefore, this function returns a // Value* instead of an Instruction*. static Value *cloneInstructionWithNewAddressSpace( Instruction *I, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl<const Use *> *UndefUsesToFix) { Type *NewPtrType = I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); if (I->getOpcode() == Instruction::AddrSpaceCast) { Value *Src = I->getOperand(0); // Because `I` is flat, the source address space must be specific. // Therefore, the inferred address space must be the source space, according // to our algorithm. assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); if (Src->getType() != NewPtrType) return new BitCastInst(Src, NewPtrType); return Src; } // Computes the converted pointer operands. SmallVector<Value *, 4> NewPointerOperands; for (const Use &OperandUse : I->operands()) { if (!OperandUse.get()->getType()->isPointerTy()) NewPointerOperands.push_back(nullptr); else NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef( OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix)); } switch (I->getOpcode()) { case Instruction::BitCast: return new BitCastInst(NewPointerOperands[0], NewPtrType); case Instruction::PHI: { assert(I->getType()->isPointerTy()); PHINode *PHI = cast<PHINode>(I); PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues()); for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) { unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index); NewPHI->addIncoming(NewPointerOperands[OperandNo], PHI->getIncomingBlock(Index)); } return NewPHI; } case Instruction::GetElementPtr: { GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); GetElementPtrInst *NewGEP = GetElementPtrInst::Create( GEP->getSourceElementType(), NewPointerOperands[0], SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end())); NewGEP->setIsInBounds(GEP->isInBounds()); return NewGEP; } case Instruction::Select: { assert(I->getType()->isPointerTy()); return SelectInst::Create(I->getOperand(0), NewPointerOperands[1], NewPointerOperands[2], "", nullptr, I); } default: llvm_unreachable("Unexpected opcode"); } }
/* * Very sloppy implementation for quick prototyping * // TODO Assumption is that the first field contains the number of iterations -- if not, then modify source for now */ Value *HeteroOMPTransform::find_loop_upper_bound(Value *context) { // TODO Assumption is that the first field contains the number of iterations -- if not, then modify source for now for (Value::use_iterator i = context->use_begin(), e = context->use_end(); i != e; ++i) { Instruction *insn = dyn_cast<Instruction>(*i); GetElementPtrInst *GEP; StoreInst *SI; if ((GEP = dyn_cast<GetElementPtrInst>(insn)) && isa<ConstantInt>(GEP->getOperand(2)) && ((cast<ConstantInt>(GEP->getOperand(2)))->equalsInt(0))) { /// README:NOTE THE ASSUMPTION THAT THE FIRST ELEMENT IN THE CONTEXT IS MAX ITERATION OF PARALLEL LOOP for (Value::use_iterator I = insn->use_begin(), E = insn->use_end(); I != E; ++I) { if ((SI = dyn_cast<StoreInst>(*I))) { Value *op_0 = SI->getOperand(0); return op_0; } } } } return NULL; }
/// Determines whether a phi corresponds to an inbounds recurrence where the /// base is not a known nonnull-or-poison value. Returns the base value, or /// null if the phi doesn't correspond to such a recurrence. Value *NullCheckElimination::isNontrivialInBoundsRecurrence(PHINode *PN) { if (PN->getNumOperands() != 2) return nullptr; Value *BaseV; GetElementPtrInst *SuccessorI; if (auto *GEP = castToInBoundsGEP(PN->getOperand(0))) { BaseV = PN->getOperand(1); SuccessorI = GEP; } else if (auto *GEP = castToInBoundsGEP(PN->getOperand(1))) { BaseV = PN->getOperand(0); SuccessorI = GEP; } else { return nullptr; } if (NonNullOrPoisonValues.count(BaseV) || SuccessorI->getOperand(0) != PN) return nullptr; return BaseV; }
Value *NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromGEP( GEPOperator *GEP, int Depth) { Value *NewOperand = hoistAddrSpaceCastFrom(GEP->getPointerOperand(), Depth + 1); if (NewOperand == nullptr) return nullptr; // hoistAddrSpaceCastFrom returns an eliminable addrspacecast or nullptr. assert(isEliminableAddrSpaceCast(NewOperand)); Operator *Cast = cast<Operator>(NewOperand); SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end()); Value *NewASC; if (Instruction *GEPI = dyn_cast<Instruction>(GEP)) { // GEP = gep (addrspacecast X), indices // => // NewGEP = gep X, indices // NewASC = addrspacecast NewGEP GetElementPtrInst *NewGEP = GetElementPtrInst::Create( GEP->getSourceElementType(), Cast->getOperand(0), Indices, "", GEPI); NewGEP->setIsInBounds(GEP->isInBounds()); NewASC = new AddrSpaceCastInst(NewGEP, GEP->getType(), "", GEPI); NewASC->takeName(GEP); // Without RAUWing GEP, the compiler would visit GEP again and emit // redundant instructions. This is exercised in test @rauw in // access-non-generic.ll. GEP->replaceAllUsesWith(NewASC); } else { // GEP is a constant expression. Constant *NewGEP = ConstantExpr::getGetElementPtr( GEP->getSourceElementType(), cast<Constant>(Cast->getOperand(0)), Indices, GEP->isInBounds()); NewASC = ConstantExpr::getAddrSpaceCast(NewGEP, GEP->getType()); } return NewASC; }
bool handleFlavor(LoadInst *li, ConstantExpr *gepce) { if (VERBOSITY("opt") >= 1) { errs() << "\nFound this load of a flavor attr:\n" << *li << '\n'; } GetElementPtrInst *gep = cast<GetElementPtrInst>(gepce->getAsInstruction()); APInt ap_offset(64, 0, true); bool success = gep->accumulateConstantOffset(*g.tm->getDataLayout(), ap_offset); delete gep; assert(success); int64_t offset = ap_offset.getSExtValue(); if (offset == FLAVOR_KINDID_OFFSET) { ObjectFlavor* flavor = getFlavorFromGV(cast<GlobalVariable>(gepce->getOperand(0))); replaceUsesWithConstant(li, flavor->kind_id); return true; } else { ASSERT(0, "%ld", offset); return false; } assert(0); return false; }
void Executor::executeGep(Instruction *i) { if (DisabledSymbolicExeCurRun) { return; } assert(i && "Expecting an instruction!"); GetElementPtrInst *gep = (GetElementPtrInst*) i; assert(gep->getNumIndices()<=2 && "Unsupported gep instruction"); if(AllocaInst *a = dyn_cast<AllocaInst>(gep->getPointerOperand())) { Type *ty = a->getAllocatedType(); if (ty->isIntegerTy()) { // Incompleteness: pointer dereferencement on symbolic value AllLocDefinite = false; return; } } unsigned idx = 0; Type *subTy = NULL; Type *ptrOpTy = gep->getPointerOperandType(); if (ptrOpTy && ptrOpTy->getNumContainedTypes()==1) { subTy = ptrOpTy->getContainedType(0); } if (subTy && subTy->getNumContainedTypes()>0) { idx = 1; } else { assert("Unsupported gep instruction!"); } Value *ptr = (Value*) i; if(gep->hasIndices()) { Value *index = gep->getOperand(idx+1); if (SMAP->contains(index)) { // P[v->sv] // sv is a new symbolic value // Value *a = gep->getPointerOperand(); // ptr = (a index) //SymbolPtr Sindex = SMAP->get(index); //PMAP->createGepOp(ptr, a, Sindex); // Incompleteness: pointer dereferencement on symbolic value AllLocDefinite = false; } else { // P = P - ptr PMAP->remove(ptr); } } // Global allocation /*else if(isa<GlobalVariable>(ptr)) { // ptr in domain(P) if (PMAP->contains(ptr)) { SymbolPtr Pptr = PMAP->get(ptr); // P' = P[ptr->S(val)] PMAP->update(ptr, Pptr); } else { // P = P - ptr PMAP->remove(ptr); } }*/ }
bool EfficiencySanitizer::instrumentGetElementPtr(Instruction *I, Module &M) { GetElementPtrInst *GepInst = dyn_cast<GetElementPtrInst>(I); bool Res = false; if (GepInst == nullptr || GepInst->getNumIndices() == 1) { ++NumIgnoredGEPs; return false; } Type *SourceTy = GepInst->getSourceElementType(); StructType *StructTy = nullptr; ConstantInt *Idx; // Check if GEP calculates address from a struct array. if (isa<StructType>(SourceTy)) { StructTy = cast<StructType>(SourceTy); Idx = dyn_cast<ConstantInt>(GepInst->getOperand(1)); if ((Idx == nullptr || Idx->getSExtValue() != 0) && !shouldIgnoreStructType(StructTy) && StructTyMap.count(StructTy) != 0) Res |= insertCounterUpdate(I, StructTy, getArrayCounterIdx(StructTy)); } // Iterate all (except the first and the last) idx within each GEP instruction // for possible nested struct field address calculation. for (unsigned i = 1; i < GepInst->getNumIndices(); ++i) { SmallVector<Value *, 8> IdxVec(GepInst->idx_begin(), GepInst->idx_begin() + i); Type *Ty = GetElementPtrInst::getIndexedType(SourceTy, IdxVec); unsigned CounterIdx = 0; if (isa<ArrayType>(Ty)) { ArrayType *ArrayTy = cast<ArrayType>(Ty); StructTy = dyn_cast<StructType>(ArrayTy->getElementType()); if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0) continue; // The last counter for struct array access. CounterIdx = getArrayCounterIdx(StructTy); } else if (isa<StructType>(Ty)) { StructTy = cast<StructType>(Ty); if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0) continue; // Get the StructTy's subfield index. Idx = cast<ConstantInt>(GepInst->getOperand(i+1)); assert(Idx->getSExtValue() >= 0 && Idx->getSExtValue() < StructTy->getNumElements()); CounterIdx = getFieldCounterIdx(StructTy) + Idx->getSExtValue(); } Res |= insertCounterUpdate(I, StructTy, CounterIdx); } if (Res) ++NumInstrumentedGEPs; else ++NumIgnoredGEPs; return Res; }
// // Method: visitGetElementPtrInst() // // Description: // This method checks to see if the specified GEP is safe. If it cannot prove // it safe, it then adds a run-time check for it. // void InsertGEPChecks::visitGetElementPtrInst (GetElementPtrInst & GEP) { // // Don't insert a check if GEP only indexes into a structure and the // user doesn't want to do structure index checking. // if (DisableStructChecks && indexesStructsOnly (&GEP)) { return; } // // Get the function in which the GEP instruction lives. // Value * PH = ConstantPointerNull::get (getVoidPtrType(GEP.getContext())); BasicBlock::iterator InsertPt = &GEP; ++InsertPt; Instruction * ResultPtr = castTo (&GEP, getVoidPtrType(GEP.getContext()), GEP.getName() + ".cast", InsertPt); // // Make this an actual cast instruction; it will make it easier to update // DSA. // Value * SrcPtr = castTo (GEP.getPointerOperand(), getVoidPtrType(GEP.getContext()), GEP.getName()+".cast", InsertPt); // // Create the call to the run-time check. // std::vector<Value *> args(1, PH); args.push_back (SrcPtr); args.push_back (ResultPtr); CallInst * CI = CallInst::Create (PoolCheckArrayUI, args, "", InsertPt); // // Add debugging info metadata to the run-time check. // if (MDNode * MD = GEP.getMetadata ("dbg")) CI->setMetadata ("dbg", MD); // // Update the statistics. // ++GEPChecks; return; }
void GCInvariantVerifier::visitGetElementPtrInst(GetElementPtrInst &GEP) { Type *Ty = GEP.getType(); if (!Ty->isPointerTy()) return; unsigned AS = cast<PointerType>(Ty)->getAddressSpace(); if (!isSpecialAS(AS)) return; /* We're actually ok with GEPs here, as long as they don't feed into any uses. Upstream is currently still debating whether CAST(GEP) == GEP(CAST). In the frontend, we always perform CAST(GEP), so while we can enforce this invariant when we run directly after the frontend (Strong == 1), the optimizer will introduce the other form. Thus, we need to allow it while upstream hasn't decided whether the optimizer is allowed to introduce these. */ if (Strong) { Check(AS != AddressSpace::Tracked, "GC tracked values may not appear in GEP expressions." " You may have to decay the value first", &GEP); } }
// FIXME: Merge with llvm::isConsecutiveAccess bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { Value *PtrA = getPointerOperand(A); Value *PtrB = getPointerOperand(B); unsigned ASA = getPointerAddressSpace(A); unsigned ASB = getPointerAddressSpace(B); // Check that the address spaces match and that the pointers are valid. if (!PtrA || !PtrB || (ASA != ASB)) return false; // Make sure that A and B are different pointers of the same size type. unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); Type *PtrATy = PtrA->getType()->getPointerElementType(); Type *PtrBTy = PtrB->getType()->getPointerElementType(); if (PtrA == PtrB || DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || DL.getTypeStoreSize(PtrATy->getScalarType()) != DL.getTypeStoreSize(PtrBTy->getScalarType())) return false; APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy)); unsigned IdxWidth = DL.getIndexSizeInBits(ASA); APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); APInt OffsetDelta = OffsetB - OffsetA; // Check if they are based on the same pointer. That makes the offsets // sufficient. if (PtrA == PtrB) return OffsetDelta == Size; // Compute the necessary base pointer delta to have the necessary final delta // equal to the size. APInt BaseDelta = Size - OffsetDelta; // Compute the distance with SCEV between the base pointers. const SCEV *PtrSCEVA = SE.getSCEV(PtrA); const SCEV *PtrSCEVB = SE.getSCEV(PtrB); const SCEV *C = SE.getConstant(BaseDelta); const SCEV *X = SE.getAddExpr(PtrSCEVA, C); if (X == PtrSCEVB) return true; // Sometimes even this doesn't work, because SCEV can't always see through // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking // things the hard way. // Look through GEPs after checking they're the same except for the last // index. GetElementPtrInst *GEPA = getSourceGEP(A); GetElementPtrInst *GEPB = getSourceGEP(B); if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands()) return false; unsigned FinalIndex = GEPA->getNumOperands() - 1; for (unsigned i = 0; i < FinalIndex; i++) if (GEPA->getOperand(i) != GEPB->getOperand(i)) return false; Instruction *OpA = dyn_cast<Instruction>(GEPA->getOperand(FinalIndex)); Instruction *OpB = dyn_cast<Instruction>(GEPB->getOperand(FinalIndex)); if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || OpA->getType() != OpB->getType()) return false; // Only look through a ZExt/SExt. if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA)) return false; bool Signed = isa<SExtInst>(OpA); OpA = dyn_cast<Instruction>(OpA->getOperand(0)); OpB = dyn_cast<Instruction>(OpB->getOperand(0)); if (!OpA || !OpB || OpA->getType() != OpB->getType()) return false; // Now we need to prove that adding 1 to OpA won't overflow. bool Safe = false; // First attempt: if OpB is an add with NSW/NUW, and OpB is 1 added to OpA, // we're okay. if (OpB->getOpcode() == Instruction::Add && isa<ConstantInt>(OpB->getOperand(1)) && cast<ConstantInt>(OpB->getOperand(1))->getSExtValue() > 0) { if (Signed) Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap(); else Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap(); } unsigned BitWidth = OpA->getType()->getScalarSizeInBits(); // Second attempt: // If any bits are known to be zero other than the sign bit in OpA, we can // add 1 to it while guaranteeing no overflow of any sort. if (!Safe) { KnownBits Known(BitWidth); computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT); if (Known.countMaxTrailingOnes() < (BitWidth - 1)) Safe = true; } if (!Safe) return false; const SCEV *OffsetSCEVA = SE.getSCEV(OpA); const SCEV *OffsetSCEVB = SE.getSCEV(OpB); const SCEV *One = SE.getConstant(APInt(BitWidth, 1)); const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One); return X2 == OffsetSCEVB; }
bool handleCls(LoadInst *li, GlobalVariable *gv) { bool changed = true; if (VERBOSITY("opt") >= 1) { errs() << "\nFound load of class-typed global variable:\n" << *li << '\n'; } BoxedClass *cls = getClassFromGV(gv); if (!cls->is_constant) { assert(0 && "what globally-resolved classes are not constant??"); if (VERBOSITY("opt") >= 1) { errs() << gv->getName() << " is not constant; moving on\n"; } return false; } std::vector<Instruction*> to_remove; for (User* user : li->users()) { if (CallInst *call = dyn_cast<CallInst>(user)) { if (call->getCalledFunction()->getName() == "_maybeDecrefCls") { errs() << "Found decrefcls call: " << *call << '\n'; if (!isUserDefined(cls)) { // Don't delete right away; I think that invalidates the iterator // we're currently iterating over to_remove.push_back(call); } } continue; } GetElementPtrInst *gep = dyn_cast<GetElementPtrInst>(user); if (!gep) { //errs() << "Not a gep: " << *user << '\n'; continue; } APInt ap_offset(64, 0, true); bool success = gep->accumulateConstantOffset(*g.tm->getDataLayout(), ap_offset); assert(success); int64_t offset = ap_offset.getSExtValue(); errs() << "Found a gep at offset " << offset << ": " << *gep << '\n'; for (User* gep_user : gep->users()) { LoadInst *gep_load = dyn_cast<LoadInst>(gep_user); if (!gep_load) { //errs() << "Not a load: " << *gep_user << '\n'; continue; } errs() << "Found a load: " << *gep_load << '\n'; if (offset == CLS_DTOR_OFFSET) { errs() << "Dtor; replacing with " << cls->dtor << "\n"; replaceUsesWithConstant(gep_load, (uintptr_t)cls->dtor); changed = true; } else if (offset == CLS_HASATTRS_OFFSET) { errs() << "Hasattrs; replacing with " << cls->hasattrs << "\n"; replaceUsesWithConstant(gep_load, cls->hasattrs); changed = true; } } } for (int i = 0; i < to_remove.size(); i++) { to_remove[i]->eraseFromParent(); changed = true; } if (VERBOSITY()) { llvm::errs() << "Constant-folding this load: " << *li << '\n'; } li->replaceAllUsesWith(embedConstantPtr(cls, g.llvm_class_type_ptr)); changed = true; return changed; }
// // Method: runOnModule() // // Description: // Entry point for this LLVM pass. // Find all GEPs, and simplify them. // // Inputs: // M - A reference to the LLVM module to transform // // Outputs: // M - The transformed LLVM module. // // Return value: // true - The module was modified. // false - The module was not modified. // bool SimplifyGEP::runOnModule(Module& M) { TD = &getAnalysis<TargetData>(); preprocess(M); for (Module::iterator F = M.begin(); F != M.end(); ++F){ for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) { for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE; I++) { if(!(isa<GetElementPtrInst>(I))) continue; GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); Value *PtrOp = GEP->getOperand(0); Value *StrippedPtr = PtrOp->stripPointerCasts(); // Check if the GEP base pointer is enclosed in a cast if (StrippedPtr != PtrOp) { const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); bool HasZeroPointerIndex = false; if (ConstantInt *C = dyn_cast<ConstantInt>(GEP->getOperand(1))) HasZeroPointerIndex = C->isZero(); // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... // into : GEP [10 x i8]* X, i32 0, ... // // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... // into : GEP i8* X, ... // // This occurs when the program declares an array extern like "int X[];" if (HasZeroPointerIndex) { const PointerType *CPTy = cast<PointerType>(PtrOp->getType()); if (const ArrayType *CATy = dyn_cast<ArrayType>(CPTy->getElementType())) { // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? if (CATy->getElementType() == StrippedPtrTy->getElementType()) { // -> GEP i8* X, ... SmallVector<Value*, 8> Idx(GEP->idx_begin()+1, GEP->idx_end()); GetElementPtrInst *Res = GetElementPtrInst::Create(StrippedPtr, Idx, GEP->getName(), GEP); Res->setIsInBounds(GEP->isInBounds()); GEP->replaceAllUsesWith(Res); continue; } if (const ArrayType *XATy = dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? if (CATy->getElementType() == XATy->getElementType()) { // -> GEP [10 x i8]* X, i32 0, ... // At this point, we know that the cast source type is a pointer // to an array of the same type as the destination pointer // array. Because the array type is never stepped over (there // is a leading zero) we can fold the cast into this GEP. GEP->setOperand(0, StrippedPtr); continue; } } } } else if (GEP->getNumOperands() == 2) { // Transform things like: // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast Type *SrcElTy = StrippedPtrTy->getElementType(); Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); if (TD && SrcElTy->isArrayTy() && TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == TD->getTypeAllocSize(ResElTy)) { Value *Idx[2]; Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP->getContext())); Idx[1] = GEP->getOperand(1); Value *NewGEP = GetElementPtrInst::Create(StrippedPtr, Idx, GEP->getName(), GEP); // V and GEP are both pointer types --> BitCast GEP->replaceAllUsesWith(new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP)); continue; } // Transform things like: // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp // (where tmp = 8*tmp2) into: // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { uint64_t ArrayEltSize = TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We // allow either a mul, shift, or constant here. Value *NewIdx = 0; ConstantInt *Scale = 0; if (ArrayEltSize == 1) { NewIdx = GEP->getOperand(1); Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) { NewIdx = ConstantInt::get(CI->getType(), 1); Scale = CI; } else if (Instruction *Inst =dyn_cast<Instruction>(GEP->getOperand(1))){ if (Inst->getOpcode() == Instruction::Shl && isa<ConstantInt>(Inst->getOperand(1))) { ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); uint32_t ShAmtVal = ShAmt->getLimitedValue(64); Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 1ULL << ShAmtVal); NewIdx = Inst->getOperand(0); } else if (Inst->getOpcode() == Instruction::Mul && isa<ConstantInt>(Inst->getOperand(1))) { Scale = cast<ConstantInt>(Inst->getOperand(1)); NewIdx = Inst->getOperand(0); } } // If the index will be to exactly the right offset with the scale taken // out, perform the transformation. Note, we don't know whether Scale is // signed or not. We'll use unsigned version of division/modulo // operation after making sure Scale doesn't have the sign bit set. if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && Scale->getZExtValue() % ArrayEltSize == 0) { Scale = ConstantInt::get(Scale->getType(), Scale->getZExtValue() / ArrayEltSize); if (Scale->getZExtValue() != 1) { Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), false /*ZExt*/); NewIdx = BinaryOperator::Create(BinaryOperator::Mul, NewIdx, C, "idxscale"); } // Insert the new GEP instruction. Value *Idx[2]; Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP->getContext())); Idx[1] = NewIdx; Value *NewGEP = GetElementPtrInst::Create(StrippedPtr, Idx, GEP->getName(), GEP); GEP->replaceAllUsesWith(new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP)); continue; } } } } } } } return true; }
/// updateCallSites - Update all sites that call F to use NF. CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) { CallGraph &CG = getAnalysis<CallGraph>(); SmallVector<Value*, 16> Args; // Attributes - Keep track of the parameter attributes for the arguments. SmallVector<AttributeWithIndex, 8> ArgAttrsVec; // Get a new callgraph node for NF. CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF); while (!F->use_empty()) { CallSite CS(*F->use_begin()); Instruction *Call = CS.getInstruction(); const AttrListPtr &PAL = F->getAttributes(); // Add any return attributes. if (Attributes attrs = PAL.getRetAttributes()) ArgAttrsVec.push_back(AttributeWithIndex::get(0, attrs)); // Copy arguments, however skip first one. CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); Value *FirstCArg = *AI; ++AI; // 0th parameter attribute is reserved for return type. // 1th parameter attribute is for first 1st sret argument. unsigned ParamIndex = 2; while (AI != AE) { Args.push_back(*AI); if (Attributes Attrs = PAL.getParamAttributes(ParamIndex)) ArgAttrsVec.push_back(AttributeWithIndex::get(ParamIndex - 1, Attrs)); ++ParamIndex; ++AI; } // Add any function attributes. if (Attributes attrs = PAL.getFnAttributes()) ArgAttrsVec.push_back(AttributeWithIndex::get(~0, attrs)); AttrListPtr NewPAL = AttrListPtr::get(ArgAttrsVec.begin(), ArgAttrsVec.end()); // Build new call instruction. Instruction *New; if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), Args.begin(), Args.end(), "", Call); cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); cast<InvokeInst>(New)->setAttributes(NewPAL); } else { New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call); cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); cast<CallInst>(New)->setAttributes(NewPAL); if (cast<CallInst>(Call)->isTailCall()) cast<CallInst>(New)->setTailCall(); } Args.clear(); ArgAttrsVec.clear(); New->takeName(Call); // Update the callgraph to know that the callsite has been transformed. CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()]; CalleeNode->removeCallEdgeFor(Call); CalleeNode->addCalledFunction(New, NF_CGN); // Update all users of sret parameter to extract value using extractvalue. for (Value::use_iterator UI = FirstCArg->use_begin(), UE = FirstCArg->use_end(); UI != UE; ) { User *U2 = *UI++; CallInst *C2 = dyn_cast<CallInst>(U2); if (C2 && (C2 == Call)) continue; GetElementPtrInst *UGEP = cast<GetElementPtrInst>(U2); ConstantInt *Idx = cast<ConstantInt>(UGEP->getOperand(2)); Value *GR = ExtractValueInst::Create(New, Idx->getZExtValue(), "evi", UGEP); while(!UGEP->use_empty()) { // isSafeToUpdateAllCallers has checked that all GEP uses are // LoadInsts LoadInst *L = cast<LoadInst>(*UGEP->use_begin()); L->replaceAllUsesWith(GR); L->eraseFromParent(); } UGEP->eraseFromParent(); continue; } Call->eraseFromParent(); } return NF_CGN; }
string esp::parseName(Value *value){ // has existed if(names.find(value) != names.end()) return names[value]; string name = ""; Value *current = value; /* bool continueFlag = true; do{ if(isa<Instruction > (current)){ Instruction* inst = dyn_cast<Instruction>(current); unsigned op = inst->getOpcode(); switch(op){ case Instruction::Ret :{ break; } case Instruction::Br :{ break; } case Instruction::Switch :{ break; } case Instruction::Call :{ CallInst *callinst = (CallInst*) current; if (((CallInst*) current)->getCalledFunction() != NULL) { name += string("@")+((CallInst*) current)->getCalledFunction()->getNameStr() + "("; } else { name += string("@[funcPTR]("); name += ((CallInst*) current)->getCalledValue()->getNameStr(); } for (unsigned i = 1; i < callinst->getNumOperands(); i++) { name += esp::parseName(callinst->getOperand(i)); } name += string(")"); continueFlag = false; break; } case Instruction::PHI :{ name += string("PHI["); name += current->getNameStr(); PHINode *phi = (PHINode*) current; for (unsigned i = 0; i < phi->getNumIncomingValues(); i++) { Value *incoming = phi->getIncomingValue(i); if (i != 0) name += ","; if (!hasLoop(incoming)) { if (!incoming->hasName()) { name += esp::parseName(incoming); } else { name += incoming->getNameStr(); } } } name += std::string("]"); continueFlag = false; break; } case Instruction::Select :{ break; } case Instruction::Add :{ name += "+"; name += parseBinaryOpName(inst); break; } case Instruction::Sub :{ name += "-"; name += parseBinaryOpName(inst); break; } case Instruction::Mul :{ name += "*"; name += parseBinaryOpName(inst); break; } case Instruction::UDiv :{ name += "/"; name += parseBinaryOpName(inst); break; } case Instruction::SDiv :{ name += "//"; name += parseBinaryOpName(inst); break; } case Instruction::And :{ name += "&"; name += parseBinaryOpName(inst); break; } case Instruction::Or :{ name += "|"; name += parseBinaryOpName(inst); break; } case Instruction::Xor :{ name += "^"; name += parseBinaryOpName(inst); break; } case Instruction::Shl :{ name += "<<"; name += parseBinaryOpName(inst); break; } case Instruction::LShr :{ name += ">>"; name += parseBinaryOpName(inst); break; } case Instruction::AShr :{ name += ">>>"; name += parseBinaryOpName(inst); break; } case Instruction::ICmp :{ ICmpInst * icmp = dyn_cast<ICmpInst>(current); if (isa<Constant>(icmp->getOperand(0))) { name += esp::parseName(icmp->getOperand(1)); continueFlag = false; } else { name += esp::parseName(icmp->getOperand(0)); continueFlag = false; } break; } case Instruction::Alloca :{ name += current->getNameStr(); break; } case Instruction::Load :{ if (((LoadInst*) inst)->isVolatile()) name += std::string("@VolatileLoad"); name += "*"; name += esp::parseName(inst->getOperand(0)); continueFlag = false; break; } case Instruction::Store :{ // need to handle continueFlag = false; break; } case Instruction::GetElementPtr :{ GetElementPtrInst * gep = dyn_cast<GetElementPtrInst>(current); unsigned ops = gep->getNumOperands(); name += "["; for (unsigned i = 1; i < ops; i++) { Value *v = gep->getOperand(i); if (ConstantInt * ci = dyn_cast<ConstantInt>(v)) { if (i == 1 && ci->equalsInt(0)) continue; name += "."; name += ci->getValue().toString(10, false); } else { name += "."; name += esp::parseName(v); } } name += "]"; name += esp::parseName(gep->getOperand(0)); continueFlag = false; break; } case Instruction::BitCast:{ name += esp::parseName(inst->getOperand(0)); continueFlag = false; break; } default :{ // Illegal or unsupported instruction name += current->getNameStr(); break; } } }else if(isa<Argument>(current)){ if (arguments.find(current) != arguments.end()) name += std::string("$") + current->getNameStr(); }else if(isa<GlobalValue>(current)){ name += std::string("@") + current->getNameStr(); }else if(isa<ConstantInt>(current)){ ConstantInt * cint = dyn_cast<ConstantInt > (current); name += cint->getValue().toString(10, true); }else if (isa<Constant > (current)) { Constant *c = dyn_cast<Constant > (current); if (c->isNullValue()) { name += "null"; } }else{ // Illegal format } if(!continueFlag) break; current = parents[current]; }while(current); */ //Refactor do { if (isa<LoadInst > (current)) { name += "*"; if (parents[current] == NULL) name += (((LoadInst*) current)->getOperand(0))->getNameStr(); if (((LoadInst*) current)->isVolatile()) name += std::string("@VolatileLoad"); } else if (dyn_cast<GetElementPtrInst > (current)) { GetElementPtrInst * gep = dyn_cast<GetElementPtrInst > (current); unsigned ops = gep->getNumOperands(); name += "["; for (unsigned i = 1; i < ops; i++) { Value *v = gep->getOperand(i); if (dyn_cast<ConstantInt > (current)) { ConstantInt * ci = dyn_cast<ConstantInt > (current); if (i == 1 && ci->equalsInt(0)) continue; name += "."; name += ci->getValue().toString(10, false); } else { name += "."; name += parseName(v); } } name += "]"; name += parseName(gep->getOperand(0)); break; } else if (isa<AllocaInst > (current)) { name += current->getNameStr(); } else if (isa<Argument > (current)) { if (arguments.find(current) != arguments.end()) name += std::string("$") + current->getNameStr(); } else if (isa<GlobalValue > (current)) { name += std::string("@") + current->getNameStr(); } else if (isa<CallInst > (current)) { CallInst *callinst = (CallInst*) current; if (((CallInst*) current)->getCalledFunction() != NULL) { name += std::string("@")+((CallInst*) current)->getCalledFunction()->getNameStr() + "("; } else { name += std::string("@[funcPTR]("); name += ((CallInst*) current)->getCalledValue()->getNameStr(); } for (unsigned i = 1; i < callinst->getNumOperands(); i++) { name += parseName(callinst->getOperand(i)); } name += std::string(")"); break; } else if (isa<CastInst > (current)) { } else if (isa<PHINode > (current)) { /* name += std::string("PHI["); s += parent->getNameStr(); PHINode *phi = (PHINode*) parent; for (unsigned i = 0; i < phi->getNumIncomingValues(); i++) { //s+=phi->getIncomingBlock(i)->getNameStr(); Value *incoming = phi->getIncomingValue(i); if (i != 0) s += ","; if (!hasLoop(incoming)) { DEBUG(errs() << "incoming#" << i << " no loop(i rather doubt it)\n"); if (!incoming->hasName()) { s += parseName(incoming); } else { s += incoming->getNameStr(); } } } // PHI nodes...ugh s += std::string("]"); break; */ } else if (isa<BinaryOperator > (current)) { BinaryOperator *bo = dyn_cast<BinaryOperator > (current); Instruction::BinaryOps opcode = bo->getOpcode(); if (opcode == Instruction::Add) { name.append("+"); } else if (opcode == Instruction::Sub) { name.append("-"); } else if (opcode == Instruction::Or) { name.append("||"); } else if (opcode == Instruction::Mul) { name.append("*"); } else if (opcode == Instruction::Xor) { name.append("^"); } else if (opcode == Instruction::And) { name.append("&&"); } else if (opcode == Instruction::Shl) { name.append("<<"); } else if (opcode == Instruction::AShr) { name.append(">>"); } else if (opcode == Instruction::LShr) { name.append(">>>"); } Value *v0 = bo->getOperand(0); Value *v1 = bo->getOperand(1); if (isa<ConstantInt > (v0)) { name += ((ConstantInt*) v0)->getValue().toString(10, false); } else if (isa<ConstantInt > (v1)) { name += ((ConstantInt*) v1)->getValue().toString(10, false); } else { printDebugMsg("Binary Operation between non-constants\n"); } } else if (dyn_cast<GEPOperator > (current)) { GEPOperator * gep = dyn_cast<GEPOperator > (current); unsigned ops = gep->getNumOperands(); name += "["; for (unsigned i = 1; i < ops; i++) { Value *v = gep->getOperand(i); if (dyn_cast<ConstantInt > (v)) { ConstantInt * ci = dyn_cast<ConstantInt > (v); if (i == 1 && ci->equalsInt(0)) continue; name += "."; name += ci->getValue().toString(10, false); } } name += "]"; name += parseName(gep->getOperand(0)); break; } else if (dyn_cast<ICmpInst > (current)) { ICmpInst * icmp = dyn_cast<ICmpInst > (current); if (isa<Constant > (icmp->getOperand(0))) { name += parseName(icmp->getOperand(1)); break; } else { name += parseName(icmp->getOperand(0)); break; } } else if (dyn_cast<ConstantInt > (current)) { ConstantInt * cint = dyn_cast<ConstantInt > (current); name += cint->getValue().toString(10, true); } else { name += current->getNameStr(); // might not work } } while ((current = parents[current])); names[value] = name; return name; }
void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) { ExecutionContext &SF = ECStack.back(); SetValue(&I, TheEE->executeGEPOperation(I.getPointerOperand(), gep_type_begin(I), gep_type_end(I), SF), SF); }
GetElementPtrInst * NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I, Value *LHS, Value *RHS, Type *IndexedType) { // Look for GEP's closest dominator that has the same SCEV as GEP except that // the I-th index is replaced with LHS. SmallVector<const SCEV *, 4> IndexExprs; for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) IndexExprs.push_back(SE->getSCEV(*Index)); // Replace the I-th index with LHS. IndexExprs[I] = SE->getSCEV(LHS); const SCEV *CandidateExpr = SE->getGEPExpr( GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()), IndexExprs, GEP->isInBounds()); auto *Candidate = findClosestMatchingDominator(CandidateExpr, GEP); if (Candidate == nullptr) return nullptr; PointerType *TypeOfCandidate = dyn_cast<PointerType>(Candidate->getType()); // Pretty rare but theoretically possible when a numeric value happens to // share CandidateExpr. if (TypeOfCandidate == nullptr) return nullptr; // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType) uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType); Type *ElementType = TypeOfCandidate->getElementType(); uint64_t ElementSize = DL->getTypeAllocSize(ElementType); // Another less rare case: because I is not necessarily the last index of the // GEP, the size of the type at the I-th index (IndexedSize) is not // necessarily divisible by ElementSize. For example, // // #pragma pack(1) // struct S { // int a[3]; // int64 b[8]; // }; // #pragma pack() // // sizeof(S) = 100 is indivisible by sizeof(int64) = 8. // // TODO: bail out on this case for now. We could emit uglygep. if (IndexedSize % ElementSize != 0) return nullptr; // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0]))); IRBuilder<> Builder(GEP); Type *IntPtrTy = DL->getIntPtrType(TypeOfCandidate); if (RHS->getType() != IntPtrTy) RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy); if (IndexedSize != ElementSize) { RHS = Builder.CreateMul( RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize)); } GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS)); NewGEP->setIsInBounds(GEP->isInBounds()); NewGEP->takeName(GEP); return NewGEP; }
/// InsertPHITranslatedPointer - Insert a computation of the PHI translated /// version of 'V' for the edge PredBB->CurBB into the end of the PredBB /// block. All newly created instructions are added to the NewInsts list. /// This returns null on failure. /// Value *PHITransAddr:: InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree &DT, SmallVectorImpl<Instruction*> &NewInsts) { // See if we have a version of this value already available and dominating // PredBB. If so, there is no need to insert a new instance of it. PHITransAddr Tmp(InVal, DL, AC); if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT, /*MustDominate=*/true)) return Tmp.getAddr(); // We don't need to PHI translate values which aren't instructions. auto *Inst = dyn_cast<Instruction>(InVal); if (!Inst) return nullptr; // Handle cast of PHI translatable value. if (CastInst *Cast = dyn_cast<CastInst>(Inst)) { if (!isSafeToSpeculativelyExecute(Cast)) return nullptr; Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0), CurBB, PredBB, DT, NewInsts); if (!OpVal) return nullptr; // Otherwise insert a cast at the end of PredBB. CastInst *New = CastInst::Create(Cast->getOpcode(), OpVal, InVal->getType(), InVal->getName() + ".phi.trans.insert", PredBB->getTerminator()); New->setDebugLoc(Inst->getDebugLoc()); NewInsts.push_back(New); return New; } // Handle getelementptr with at least one PHI operand. if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { SmallVector<Value*, 8> GEPOps; BasicBlock *CurBB = GEP->getParent(); for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { Value *OpVal = InsertPHITranslatedSubExpr(GEP->getOperand(i), CurBB, PredBB, DT, NewInsts); if (!OpVal) return nullptr; GEPOps.push_back(OpVal); } GetElementPtrInst *Result = GetElementPtrInst::Create( GEP->getSourceElementType(), GEPOps[0], makeArrayRef(GEPOps).slice(1), InVal->getName() + ".phi.trans.insert", PredBB->getTerminator()); Result->setDebugLoc(Inst->getDebugLoc()); Result->setIsInBounds(GEP->isInBounds()); NewInsts.push_back(Result); return Result; } #if 0 // FIXME: This code works, but it is unclear that we actually want to insert // a big chain of computation in order to make a value available in a block. // This needs to be evaluated carefully to consider its cost trade offs. // Handle add with a constant RHS. if (Inst->getOpcode() == Instruction::Add && isa<ConstantInt>(Inst->getOperand(1))) { // PHI translate the LHS. Value *OpVal = InsertPHITranslatedSubExpr(Inst->getOperand(0), CurBB, PredBB, DT, NewInsts); if (OpVal == 0) return 0; BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1), InVal->getName()+".phi.trans.insert", PredBB->getTerminator()); Res->setHasNoSignedWrap(cast<BinaryOperator>(Inst)->hasNoSignedWrap()); Res->setHasNoUnsignedWrap(cast<BinaryOperator>(Inst)->hasNoUnsignedWrap()); NewInsts.push_back(Res); return Res; } #endif return nullptr; }
/// DoPromotion - This method actually performs the promotion of the specified /// arguments, and returns the new function. At this point, we know that it's /// safe to do so. CallGraphNode *ArgPromotion::DoPromotion(Function *F, SmallPtrSet<Argument*, 8> &ArgsToPromote, SmallPtrSet<Argument*, 8> &ByValArgsToTransform) { // Start by computing a new prototype for the function, which is the same as // the old function, but has modified arguments. const FunctionType *FTy = F->getFunctionType(); std::vector<const Type*> Params; typedef std::set<IndicesVector> ScalarizeTable; // ScalarizedElements - If we are promoting a pointer that has elements // accessed out of it, keep track of which elements are accessed so that we // can add one argument for each. // // Arguments that are directly loaded will have a zero element value here, to // handle cases where there are both a direct load and GEP accesses. // std::map<Argument*, ScalarizeTable> ScalarizedElements; // OriginalLoads - Keep track of a representative load instruction from the // original function so that we can tell the alias analysis implementation // what the new GEP/Load instructions we are inserting look like. std::map<IndicesVector, LoadInst*> OriginalLoads; // Attributes - Keep track of the parameter attributes for the arguments // that we are *not* promoting. For the ones that we do promote, the parameter // attributes are lost SmallVector<AttributeWithIndex, 8> AttributesVec; const AttrListPtr &PAL = F->getAttributes(); // Add any return attributes. if (Attributes attrs = PAL.getRetAttributes()) AttributesVec.push_back(AttributeWithIndex::get(0, attrs)); // First, determine the new argument list unsigned ArgIndex = 1; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++ArgIndex) { if (ByValArgsToTransform.count(I)) { // Simple byval argument? Just add all the struct element types. const Type *AgTy = cast<PointerType>(I->getType())->getElementType(); const StructType *STy = cast<StructType>(AgTy); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) Params.push_back(STy->getElementType(i)); ++NumByValArgsPromoted; } else if (!ArgsToPromote.count(I)) { // Unchanged argument Params.push_back(I->getType()); if (Attributes attrs = PAL.getParamAttributes(ArgIndex)) AttributesVec.push_back(AttributeWithIndex::get(Params.size(), attrs)); } else if (I->use_empty()) { // Dead argument (which are always marked as promotable) ++NumArgumentsDead; } else { // Okay, this is being promoted. This means that the only uses are loads // or GEPs which are only used by loads // In this table, we will track which indices are loaded from the argument // (where direct loads are tracked as no indices). ScalarizeTable &ArgIndices = ScalarizedElements[I]; for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) { Instruction *User = cast<Instruction>(*UI); assert(isa<LoadInst>(User) || isa<GetElementPtrInst>(User)); IndicesVector Indices; Indices.reserve(User->getNumOperands() - 1); // Since loads will only have a single operand, and GEPs only a single // non-index operand, this will record direct loads without any indices, // and gep+loads with the GEP indices. for (User::op_iterator II = User->op_begin() + 1, IE = User->op_end(); II != IE; ++II) Indices.push_back(cast<ConstantInt>(*II)->getSExtValue()); // GEPs with a single 0 index can be merged with direct loads if (Indices.size() == 1 && Indices.front() == 0) Indices.clear(); ArgIndices.insert(Indices); LoadInst *OrigLoad; if (LoadInst *L = dyn_cast<LoadInst>(User)) OrigLoad = L; else // Take any load, we will use it only to update Alias Analysis OrigLoad = cast<LoadInst>(User->use_back()); OriginalLoads[Indices] = OrigLoad; } // Add a parameter to the function for each element passed in. for (ScalarizeTable::iterator SI = ArgIndices.begin(), E = ArgIndices.end(); SI != E; ++SI) { // not allowed to dereference ->begin() if size() is 0 Params.push_back(GetElementPtrInst::getIndexedType(I->getType(), SI->begin(), SI->end())); assert(Params.back()); } if (ArgIndices.size() == 1 && ArgIndices.begin()->empty()) ++NumArgumentsPromoted; else ++NumAggregatesPromoted; } } // Add any function attributes. if (Attributes attrs = PAL.getFnAttributes()) AttributesVec.push_back(AttributeWithIndex::get(~0, attrs)); const Type *RetTy = FTy->getReturnType(); // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which // have zero fixed arguments. bool ExtraArgHack = false; if (Params.empty() && FTy->isVarArg()) { ExtraArgHack = true; Params.push_back(Type::getInt32Ty(F->getContext())); } // Construct the new function type using the new arguments. FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); // Create the new function body and insert it into the module. Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName()); NF->copyAttributesFrom(F); DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" << "From: " << *F); // Recompute the parameter attributes list based on the new arguments for // the function. NF->setAttributes(AttrListPtr::get(AttributesVec.begin(), AttributesVec.end())); AttributesVec.clear(); F->getParent()->getFunctionList().insert(F, NF); NF->takeName(F); // Get the alias analysis information that we need to update to reflect our // changes. AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); // Get the callgraph information that we need to update to reflect our // changes. CallGraph &CG = getAnalysis<CallGraph>(); // Get a new callgraph node for NF. CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF); // Loop over all of the callers of the function, transforming the call sites // to pass in the loaded pointers. // SmallVector<Value*, 16> Args; while (!F->use_empty()) { CallSite CS = CallSite::get(F->use_back()); assert(CS.getCalledFunction() == F); Instruction *Call = CS.getInstruction(); const AttrListPtr &CallPAL = CS.getAttributes(); // Add any return attributes. if (Attributes attrs = CallPAL.getRetAttributes()) AttributesVec.push_back(AttributeWithIndex::get(0, attrs)); // Loop over the operands, inserting GEP and loads in the caller as // appropriate. CallSite::arg_iterator AI = CS.arg_begin(); ArgIndex = 1; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++AI, ++ArgIndex) if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { Args.push_back(*AI); // Unmodified argument if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex)) AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs)); } else if (ByValArgsToTransform.count(I)) { // Emit a GEP and load for each element of the struct. const Type *AgTy = cast<PointerType>(I->getType())->getElementType(); const StructType *STy = cast<StructType>(AgTy); Value *Idxs[2] = { ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 }; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); Value *Idx = GetElementPtrInst::Create(*AI, Idxs, Idxs+2, (*AI)->getName()+"."+utostr(i), Call); // TODO: Tell AA about the new values? Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call)); } } else if (!I->use_empty()) { // Non-dead argument: insert GEPs and loads as appropriate. ScalarizeTable &ArgIndices = ScalarizedElements[I]; // Store the Value* version of the indices in here, but declare it now // for reuse. std::vector<Value*> Ops; for (ScalarizeTable::iterator SI = ArgIndices.begin(), E = ArgIndices.end(); SI != E; ++SI) { Value *V = *AI; LoadInst *OrigLoad = OriginalLoads[*SI]; if (!SI->empty()) { Ops.reserve(SI->size()); const Type *ElTy = V->getType(); for (IndicesVector::const_iterator II = SI->begin(), IE = SI->end(); II != IE; ++II) { // Use i32 to index structs, and i64 for others (pointers/arrays). // This satisfies GEP constraints. const Type *IdxTy = (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext()) : Type::getInt64Ty(F->getContext())); Ops.push_back(ConstantInt::get(IdxTy, *II)); // Keep track of the type we're currently indexing. ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II); } // And create a GEP to extract those indices. V = GetElementPtrInst::Create(V, Ops.begin(), Ops.end(), V->getName()+".idx", Call); Ops.clear(); AA.copyValue(OrigLoad->getOperand(0), V); } // Since we're replacing a load make sure we take the alignment // of the previous load. LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call); newLoad->setAlignment(OrigLoad->getAlignment()); Args.push_back(newLoad); AA.copyValue(OrigLoad, Args.back()); } } if (ExtraArgHack) Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext()))); // Push any varargs arguments on the list. for (; AI != CS.arg_end(); ++AI, ++ArgIndex) { Args.push_back(*AI); if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex)) AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs)); } // Add any function attributes. if (Attributes attrs = CallPAL.getFnAttributes()) AttributesVec.push_back(AttributeWithIndex::get(~0, attrs)); Instruction *New; if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), Args.begin(), Args.end(), "", Call); cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); cast<InvokeInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(), AttributesVec.end())); } else { New = CallInst::Create(NF, Args.begin(), Args.end(), "", Call); cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); cast<CallInst>(New)->setAttributes(AttrListPtr::get(AttributesVec.begin(), AttributesVec.end())); if (cast<CallInst>(Call)->isTailCall()) cast<CallInst>(New)->setTailCall(); } Args.clear(); AttributesVec.clear(); // Update the alias analysis implementation to know that we are replacing // the old call with a new one. AA.replaceWithNewValue(Call, New); // Update the callgraph to know that the callsite has been transformed. CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()]; CalleeNode->replaceCallEdge(Call, New, NF_CGN); if (!Call->use_empty()) { Call->replaceAllUsesWith(New); New->takeName(Call); } // Finally, remove the old call from the program, reducing the use-count of // F. Call->eraseFromParent(); } // Since we have now created the new function, splice the body of the old // function right into the new function, leaving the old rotting hulk of the // function empty. NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); // Loop over the argument list, transfering uses of the old arguments over to // the new arguments, also transfering over the names as well. // for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), I2 = NF->arg_begin(); I != E; ++I) { if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { // If this is an unmodified argument, move the name and users over to the // new version. I->replaceAllUsesWith(I2); I2->takeName(I); AA.replaceWithNewValue(I, I2); ++I2; continue; } if (ByValArgsToTransform.count(I)) { // In the callee, we create an alloca, and store each of the new incoming // arguments into the alloca. Instruction *InsertPt = NF->begin()->begin(); // Just add all the struct element types. const Type *AgTy = cast<PointerType>(I->getType())->getElementType(); Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt); const StructType *STy = cast<StructType>(AgTy); Value *Idxs[2] = { ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 }; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); Value *Idx = GetElementPtrInst::Create(TheAlloca, Idxs, Idxs+2, TheAlloca->getName()+"."+Twine(i), InsertPt); I2->setName(I->getName()+"."+Twine(i)); new StoreInst(I2++, Idx, InsertPt); } // Anything that used the arg should now use the alloca. I->replaceAllUsesWith(TheAlloca); TheAlloca->takeName(I); AA.replaceWithNewValue(I, TheAlloca); continue; } if (I->use_empty()) { AA.deleteValue(I); continue; } // Otherwise, if we promoted this argument, then all users are load // instructions (or GEPs with only load users), and all loads should be // using the new argument that we added. ScalarizeTable &ArgIndices = ScalarizedElements[I]; while (!I->use_empty()) { if (LoadInst *LI = dyn_cast<LoadInst>(I->use_back())) { assert(ArgIndices.begin()->empty() && "Load element should sort to front!"); I2->setName(I->getName()+".val"); LI->replaceAllUsesWith(I2); AA.replaceWithNewValue(LI, I2); LI->eraseFromParent(); DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() << "' in function '" << F->getName() << "'\n"); } else { GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back()); IndicesVector Operands; Operands.reserve(GEP->getNumIndices()); for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); II != IE; ++II) Operands.push_back(cast<ConstantInt>(*II)->getSExtValue()); // GEPs with a single 0 index can be merged with direct loads if (Operands.size() == 1 && Operands.front() == 0) Operands.clear(); Function::arg_iterator TheArg = I2; for (ScalarizeTable::iterator It = ArgIndices.begin(); *It != Operands; ++It, ++TheArg) { assert(It != ArgIndices.end() && "GEP not handled??"); } std::string NewName = I->getName(); for (unsigned i = 0, e = Operands.size(); i != e; ++i) { NewName += "." + utostr(Operands[i]); } NewName += ".val"; TheArg->setName(NewName); DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() << "' of function '" << NF->getName() << "'\n"); // All of the uses must be load instructions. Replace them all with // the argument specified by ArgNo. while (!GEP->use_empty()) { LoadInst *L = cast<LoadInst>(GEP->use_back()); L->replaceAllUsesWith(TheArg); AA.replaceWithNewValue(L, TheArg); L->eraseFromParent(); } AA.deleteValue(GEP); GEP->eraseFromParent(); } } // Increment I2 past all of the arguments added for this promoted pointer. for (unsigned i = 0, e = ArgIndices.size(); i != e; ++i) ++I2; } // Notify the alias analysis implementation that we inserted a new argument. if (ExtraArgHack) AA.copyValue(Constant::getNullValue(Type::getInt32Ty(F->getContext())), NF->arg_begin()); // Tell the alias analysis that the old function is about to disappear. AA.replaceWithNewValue(F, NF); NF_CGN->stealCalledFunctionsFrom(CG[F]); // Now that the old function is dead, delete it. If there is a dangling // reference to the CallgraphNode, just leave the dead function around for // someone else to nuke. CallGraphNode *CGN = CG[F]; if (CGN->getNumReferences() == 0) delete CG.removeFunctionFromModule(CGN); else F->setLinkage(Function::ExternalLinkage); return NF_CGN; }