void MemoryInstrumenter::instrumentPointerInstruction(Instruction *I) { BasicBlock::iterator Loc; if (isa<PHINode>(I)) { // Cannot insert hooks right after a PHI, because PHINodes have to be // grouped together. Loc = I->getParent()->getFirstNonPHI(); } else if (!I->isTerminator()) { Loc = I; ++Loc; } else { assert(isa<InvokeInst>(I)); InvokeInst *II = cast<InvokeInst>(I); BasicBlock *NormalDest = II->getNormalDest(); // It's not always OK to insert HookTopLevel simply at the beginning of the // normal destination, because the normal destionation may be shared by // multiple InvokeInsts. In that case, we will create a critical edge block, // and add the HookTopLevel over there. if (NormalDest->getUniquePredecessor()) { Loc = NormalDest->getFirstNonPHI(); } else { BasicBlock *CritEdge = BasicBlock::Create(I->getContext(), "crit_edge", I->getParent()->getParent()); Loc = BranchInst::Create(NormalDest, CritEdge); // Now that CritEdge becomes the new predecessor of NormalDest, replace // all phi uses of I->getParent() with CritEdge. for (auto J = NormalDest->begin(); NormalDest->getFirstNonPHI() != J; ++J) { PHINode *Phi = cast<PHINode>(J); int i; while ((i = Phi->getBasicBlockIndex(I->getParent())) >= 0) Phi->setIncomingBlock(i, CritEdge); } II->setNormalDest(CritEdge); } } if (LoadInst *LI = dyn_cast<LoadInst>(I)) instrumentPointer(I, LI->getPointerOperand(), Loc); else instrumentPointer(I, NULL, Loc); }
/// Replaces the given call site (Call or Invoke) with a gc.statepoint /// intrinsic with an empty deoptimization arguments list. This does /// NOT do explicit relocation for GC support. static Value *ReplaceWithStatepoint(const CallSite &CS /* to replace */) { assert(CS.getInstruction()->getModule() && "must be set"); // TODO: technically, a pass is not allowed to get functions from within a // function pass since it might trigger a new function addition. Refactor // this logic out to the initialization of the pass. Doesn't appear to // matter in practice. // Then go ahead and use the builder do actually do the inserts. We insert // immediately before the previous instruction under the assumption that all // arguments will be available here. We can't insert afterwards since we may // be replacing a terminator. IRBuilder<> Builder(CS.getInstruction()); // Note: The gc args are not filled in at this time, that's handled by // RewriteStatepointsForGC (which is currently under review). // Create the statepoint given all the arguments Instruction *Token = nullptr; uint64_t ID; uint32_t NumPatchBytes; AttributeSet OriginalAttrs = CS.getAttributes(); Attribute AttrID = OriginalAttrs.getAttribute(AttributeSet::FunctionIndex, "statepoint-id"); Attribute AttrNumPatchBytes = OriginalAttrs.getAttribute( AttributeSet::FunctionIndex, "statepoint-num-patch-bytes"); AttrBuilder AttrsToRemove; bool HasID = AttrID.isStringAttribute() && !AttrID.getValueAsString().getAsInteger(10, ID); if (HasID) AttrsToRemove.addAttribute("statepoint-id"); else ID = 0xABCDEF00; bool HasNumPatchBytes = AttrNumPatchBytes.isStringAttribute() && !AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes); if (HasNumPatchBytes) AttrsToRemove.addAttribute("statepoint-num-patch-bytes"); else NumPatchBytes = 0; OriginalAttrs = OriginalAttrs.removeAttributes( CS.getInstruction()->getContext(), AttributeSet::FunctionIndex, AttrsToRemove); if (CS.isCall()) { CallInst *ToReplace = cast<CallInst>(CS.getInstruction()); CallInst *Call = Builder.CreateGCStatepointCall( ID, NumPatchBytes, CS.getCalledValue(), makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None, "safepoint_token"); Call->setTailCall(ToReplace->isTailCall()); Call->setCallingConv(ToReplace->getCallingConv()); // In case if we can handle this set of attributes - set up function // attributes directly on statepoint and return attributes later for // gc_result intrinsic. Call->setAttributes(OriginalAttrs.getFnAttributes()); Token = Call; // Put the following gc_result and gc_relocate calls immediately after // the old call (which we're about to delete). assert(ToReplace->getNextNode() && "not a terminator, must have next"); Builder.SetInsertPoint(ToReplace->getNextNode()); Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc()); } else if (CS.isInvoke()) { InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction()); // Insert the new invoke into the old block. We'll remove the old one in a // moment at which point this will become the new terminator for the // original block. Builder.SetInsertPoint(ToReplace->getParent()); InvokeInst *Invoke = Builder.CreateGCStatepointInvoke( ID, NumPatchBytes, CS.getCalledValue(), ToReplace->getNormalDest(), ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None, "safepoint_token"); Invoke->setCallingConv(ToReplace->getCallingConv()); // In case if we can handle this set of attributes - set up function // attributes directly on statepoint and return attributes later for // gc_result intrinsic. Invoke->setAttributes(OriginalAttrs.getFnAttributes()); Token = Invoke; // We'll insert the gc.result into the normal block BasicBlock *NormalDest = ToReplace->getNormalDest(); // Can not insert gc.result in case of phi nodes preset. // Should have removed this cases prior to running this function assert(!isa<PHINode>(NormalDest->begin())); Instruction *IP = &*(NormalDest->getFirstInsertionPt()); Builder.SetInsertPoint(IP); } else { llvm_unreachable("unexpect type of CallSite"); } assert(Token); // Handle the return value of the original call - update all uses to use a // gc_result hanging off the statepoint node we just inserted // Only add the gc_result iff there is actually a used result if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) { std::string TakenName = CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : ""; CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName); GCResult->setAttributes(OriginalAttrs.getRetAttributes()); return GCResult; } else { // No return value for the call. return nullptr; } }
// First thing we need to do is scan the whole function for values that are // live across unwind edges. Each value that is live across an unwind edge // we spill into a stack location, guaranteeing that there is nothing live // across the unwind edge. This process also splits all critical edges // coming out of invoke's. void LowerInvoke:: splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) { // First step, split all critical edges from invoke instructions. for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { InvokeInst *II = Invokes[i]; SplitCriticalEdge(II, 0, this); SplitCriticalEdge(II, 1, this); assert(!isa<PHINode>(II->getNormalDest()) && !isa<PHINode>(II->getUnwindDest()) && "critical edge splitting left single entry phi nodes?"); } Function *F = Invokes.back()->getParent()->getParent(); // To avoid having to handle incoming arguments specially, we lower each arg // to a copy instruction in the entry block. This ensures that the argument // value itself cannot be live across the entry block. BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin(); while (isa<AllocaInst>(AfterAllocaInsertPt) && isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize())) ++AfterAllocaInsertPt; for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI) { // This is always a no-op cast because we're casting AI to AI->getType() so // src and destination types are identical. BitCast is the only possibility. CastInst *NC = new BitCastInst( AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt); AI->replaceAllUsesWith(NC); // Normally its is forbidden to replace a CastInst's operand because it // could cause the opcode to reflect an illegal conversion. However, we're // replacing it here with the same value it was constructed with to simply // make NC its user. NC->setOperand(0, AI); } // Finally, scan the code looking for instructions with bad live ranges. for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) { // Ignore obvious cases we don't have to handle. In particular, most // instructions either have no uses or only have a single use inside the // current block. Ignore them quickly. Instruction *Inst = II; if (Inst->use_empty()) continue; if (Inst->hasOneUse() && cast<Instruction>(Inst->use_back())->getParent() == BB && !isa<PHINode>(Inst->use_back())) continue; // If this is an alloca in the entry block, it's not a real register // value. if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst)) if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin()) continue; // Avoid iterator invalidation by copying users to a temporary vector. std::vector<Instruction*> Users; for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end(); UI != E; ++UI) { Instruction *User = cast<Instruction>(*UI); if (User->getParent() != BB || isa<PHINode>(User)) Users.push_back(User); } // Scan all of the uses and see if the live range is live across an unwind // edge. If we find a use live across an invoke edge, create an alloca // and spill the value. std::set<InvokeInst*> InvokesWithStoreInserted; // Find all of the blocks that this value is live in. std::set<BasicBlock*> LiveBBs; LiveBBs.insert(Inst->getParent()); while (!Users.empty()) { Instruction *U = Users.back(); Users.pop_back(); if (!isa<PHINode>(U)) { MarkBlocksLiveIn(U->getParent(), LiveBBs); } else { // Uses for a PHI node occur in their predecessor block. PHINode *PN = cast<PHINode>(U); for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == Inst) MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs); } } // Now that we know all of the blocks that this thing is live in, see if // it includes any of the unwind locations. bool NeedsSpill = false; for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest(); if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) { NeedsSpill = true; } } // If we decided we need a spill, do it. if (NeedsSpill) { ++NumSpilled; DemoteRegToStack(*Inst, true); } } }
bool CSDataRando::processCallSite(CallSite CS, FuncInfo &FI, PointerEquivalenceAnalysis &P, DSGraph *G) { bool IndirectCall = !isa<Function>(CS.getCalledValue()->stripPointerCasts()); if (IndirectCall) { NumIndirectCalls++; } CallSite OriginalCS = originalCallSite(FI, CS); if (!DSA->canEncryptCall(OriginalCS)) { if (IndirectCall) { NumIndirectCantEncrypt++; } return false; } DSCallSite DSCS = G->getDSCallSiteForCallSite(OriginalCS); const Function *Callee = getEffectiveCallee(DSCS, FI, G); if (!Callee) { if (IndirectCall) { NumIndirectCantEncrypt++; } return false; } FuncInfo &CalleeInfo = FunctionInfo[Callee]; Value *Clone = getCloneCalledValue(CS, CalleeInfo); if (!Clone || CalleeInfo.ArgNodes.empty()) { if (IndirectCall) { NumIndirectCantEncrypt++; } return false; } // We create a mapping of the formal argument nodes in the callee function and // actual argument nodes in the caller function's graph. DSGraph::NodeMapTy NodeMap; DSGraph *CalleeG = DSA->getDSGraph(*Callee); // getArgNodesForCall places the return node and the vanode in the // first two slots of the vector, followed by the nodes for the regular // pointer arguments. std::vector<DSNodeHandle> ArgNodes; getArgNodesForCall(CalleeG, DSCS, ArgNodes); // First the return value DSNodeHandle CalleeRetNode = ArgNodes[0]; DSGraph::computeNodeMapping(CalleeRetNode, DSCS.getRetVal(), NodeMap); // Then VarArgs DSNodeHandle CalleeVarArgNode = ArgNodes[1]; DSGraph::computeNodeMapping(CalleeVarArgNode, DSCS.getVAVal(), NodeMap); // And last the regular arguments. for (unsigned int i = 0; i < DSCS.getNumPtrArgs() && i + 2 < ArgNodes.size(); i++) { DSGraph::computeNodeMapping(ArgNodes[i + 2], DSCS.getPtrArg(i), NodeMap); } // Collect the arguments and masks to pass to call SmallVector<Value*, 8> Args; unsigned int i = 0; for (unsigned int e = CS.getFunctionType()->getNumParams(); i < e; i++) { Args.push_back(CS.getArgOperand(i)); } for (const DSNode *N : CalleeInfo.ArgNodes) { Value *Mask = P.getMaskForNode(NodeMap[N]); Args.push_back(Mask); } // VarArgs go after masks for (unsigned int e = CS.arg_size(); i < e; i++) { Args.push_back(CS.getArgOperand(i)); } // Do replacement Instruction *CI = CS.getInstruction(); Value *Call; if (CS.isCall()) { Call = CallInst::Create(Clone, Args, "", CI); } else { InvokeInst *II = cast<InvokeInst>(CI); Call = InvokeInst::Create(Clone, II->getNormalDest(), II->getUnwindDest(), Args, "", II); } CallSite NewCS(Call); NewCS.setCallingConv(CS.getCallingConv()); CI->replaceAllUsesWith(Call); P.replace(CI, Call); CI->eraseFromParent(); return true; }
/// Replaces the given call site (Call or Invoke) with a gc.statepoint /// intrinsic with an empty deoptimization arguments list. This does /// NOT do explicit relocation for GC support. static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */ Pass *P) { BasicBlock *BB = CS.getInstruction()->getParent(); Function *F = BB->getParent(); Module *M = F->getParent(); assert(M && "must be set"); // TODO: technically, a pass is not allowed to get functions from within a // function pass since it might trigger a new function addition. Refactor // this logic out to the initialization of the pass. Doesn't appear to // matter in practice. // Fill in the one generic type'd argument (the function is also vararg) std::vector<Type *> argTypes; argTypes.push_back(CS.getCalledValue()->getType()); Function *gc_statepoint_decl = Intrinsic::getDeclaration( M, Intrinsic::experimental_gc_statepoint, argTypes); // Then go ahead and use the builder do actually do the inserts. We insert // immediately before the previous instruction under the assumption that all // arguments will be available here. We can't insert afterwards since we may // be replacing a terminator. Instruction *insertBefore = CS.getInstruction(); IRBuilder<> Builder(insertBefore); // First, create the statepoint (with all live ptrs as arguments). std::vector<llvm::Value *> args; // target, #args, unused, args Value *Target = CS.getCalledValue(); args.push_back(Target); int callArgSize = CS.arg_size(); args.push_back( ConstantInt::get(Type::getInt32Ty(M->getContext()), callArgSize)); // TODO: add a 'Needs GC-rewrite' later flag args.push_back(ConstantInt::get(Type::getInt32Ty(M->getContext()), 0)); // Copy all the arguments of the original call args.insert(args.end(), CS.arg_begin(), CS.arg_end()); // Create the statepoint given all the arguments Instruction *token = nullptr; AttributeSet return_attributes; if (CS.isCall()) { CallInst *toReplace = cast<CallInst>(CS.getInstruction()); CallInst *call = Builder.CreateCall(gc_statepoint_decl, args, "safepoint_token"); call->setTailCall(toReplace->isTailCall()); call->setCallingConv(toReplace->getCallingConv()); // Before we have to worry about GC semantics, all attributes are legal AttributeSet new_attrs = toReplace->getAttributes(); // In case if we can handle this set of sttributes - set up function attrs // directly on statepoint and return attrs later for gc_result intrinsic. call->setAttributes(new_attrs.getFnAttributes()); return_attributes = new_attrs.getRetAttributes(); // TODO: handle param attributes token = call; // Put the following gc_result and gc_relocate calls immediately after the // the old call (which we're about to delete) BasicBlock::iterator next(toReplace); assert(BB->end() != next && "not a terminator, must have next"); next++; Instruction *IP = &*(next); Builder.SetInsertPoint(IP); Builder.SetCurrentDebugLocation(IP->getDebugLoc()); } else if (CS.isInvoke()) { InvokeInst *toReplace = cast<InvokeInst>(CS.getInstruction()); // Insert the new invoke into the old block. We'll remove the old one in a // moment at which point this will become the new terminator for the // original block. InvokeInst *invoke = InvokeInst::Create( gc_statepoint_decl, toReplace->getNormalDest(), toReplace->getUnwindDest(), args, "", toReplace->getParent()); invoke->setCallingConv(toReplace->getCallingConv()); // Currently we will fail on parameter attributes and on certain // function attributes. AttributeSet new_attrs = toReplace->getAttributes(); // In case if we can handle this set of sttributes - set up function attrs // directly on statepoint and return attrs later for gc_result intrinsic. invoke->setAttributes(new_attrs.getFnAttributes()); return_attributes = new_attrs.getRetAttributes(); token = invoke; // We'll insert the gc.result into the normal block BasicBlock *normalDest = normalizeBBForInvokeSafepoint( toReplace->getNormalDest(), invoke->getParent()); Instruction *IP = &*(normalDest->getFirstInsertionPt()); Builder.SetInsertPoint(IP); } else { llvm_unreachable("unexpect type of CallSite"); } assert(token); // Handle the return value of the original call - update all uses to use a // gc_result hanging off the statepoint node we just inserted // Only add the gc_result iff there is actually a used result if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) { Instruction *gc_result = nullptr; std::vector<Type *> types; // one per 'any' type types.push_back(CS.getType()); // result type auto get_gc_result_id = [&](Type &Ty) { if (Ty.isIntegerTy()) { return Intrinsic::experimental_gc_result_int; } else if (Ty.isFloatingPointTy()) { return Intrinsic::experimental_gc_result_float; } else if (Ty.isPointerTy()) { return Intrinsic::experimental_gc_result_ptr; } else { llvm_unreachable("non java type encountered"); } }; Intrinsic::ID Id = get_gc_result_id(*CS.getType()); Value *gc_result_func = Intrinsic::getDeclaration(M, Id, types); std::vector<Value *> args; args.push_back(token); gc_result = Builder.CreateCall( gc_result_func, args, CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : ""); cast<CallInst>(gc_result)->setAttributes(return_attributes); return gc_result; } else { // No return value for the call. return nullptr; } }
// Specialize F by replacing the arguments (keys) in replacements with the // constants (values). Replace all calls to F with those constants with // a call to the specialized function. Returns the specialized function static Function* SpecializeFunction(Function* F, ValueMap<const Value*, Value*>& replacements) { // arg numbers of deleted arguments DenseMap<unsigned, const Argument*> deleted; for (ValueMap<const Value*, Value*>::iterator repb = replacements.begin(), repe = replacements.end(); repb != repe; ++repb) { Argument const *arg = cast<const Argument>(repb->first); deleted[arg->getArgNo()] = arg; } Function* NF = CloneFunction(F, replacements, /*ModuleLevelChanges=*/false); NF->setLinkage(GlobalValue::InternalLinkage); F->getParent()->getFunctionList().push_back(NF); for (Value::use_iterator ii = F->use_begin(), ee = F->use_end(); ii != ee; ) { Value::use_iterator i = ii; ++ii; User *U = *i; CallSite CS(U); if (CS) { if (CS.getCalledFunction() == F) { SmallVector<Value*, 6> args; // Assemble the non-specialized arguments for the updated callsite. // In the process, make sure that the specialized arguments are // constant and match the specialization. If that's not the case, // this callsite needs to call the original or some other // specialization; don't change it here. CallSite::arg_iterator as = CS.arg_begin(), ae = CS.arg_end(); for (CallSite::arg_iterator ai = as; ai != ae; ++ai) { DenseMap<unsigned, const Argument*>::iterator delit = deleted.find( std::distance(as, ai)); if (delit == deleted.end()) args.push_back(cast<Value>(ai)); else { Constant *ci = dyn_cast<Constant>(ai); if (!(ci && ci == replacements[delit->second])) goto next_use; } } Value* NCall; if (CallInst *CI = dyn_cast<CallInst>(U)) { NCall = CallInst::Create(NF, args.begin(), args.end(), CI->getName(), CI); cast<CallInst>(NCall)->setTailCall(CI->isTailCall()); cast<CallInst>(NCall)->setCallingConv(CI->getCallingConv()); } else { InvokeInst *II = cast<InvokeInst>(U); NCall = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), args.begin(), args.end(), II->getName(), II); cast<InvokeInst>(NCall)->setCallingConv(II->getCallingConv()); } CS.getInstruction()->replaceAllUsesWith(NCall); CS.getInstruction()->eraseFromParent(); ++numReplaced; } } next_use:; } return NF; }
/// Replaces the given call site (Call or Invoke) with a gc.statepoint /// intrinsic with an empty deoptimization arguments list. This does /// NOT do explicit relocation for GC support. static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */ Pass *P) { assert(CS.getInstruction()->getParent()->getParent()->getParent() && "must be set"); // TODO: technically, a pass is not allowed to get functions from within a // function pass since it might trigger a new function addition. Refactor // this logic out to the initialization of the pass. Doesn't appear to // matter in practice. // Then go ahead and use the builder do actually do the inserts. We insert // immediately before the previous instruction under the assumption that all // arguments will be available here. We can't insert afterwards since we may // be replacing a terminator. IRBuilder<> Builder(CS.getInstruction()); // Note: The gc args are not filled in at this time, that's handled by // RewriteStatepointsForGC (which is currently under review). // Create the statepoint given all the arguments Instruction *Token = nullptr; AttributeSet OriginalAttrs; if (CS.isCall()) { CallInst *ToReplace = cast<CallInst>(CS.getInstruction()); CallInst *Call = Builder.CreateGCStatepointCall( CS.getCalledValue(), makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None, "safepoint_token"); Call->setTailCall(ToReplace->isTailCall()); Call->setCallingConv(ToReplace->getCallingConv()); // Before we have to worry about GC semantics, all attributes are legal // TODO: handle param attributes OriginalAttrs = ToReplace->getAttributes(); // In case if we can handle this set of attributes - set up function // attributes directly on statepoint and return attributes later for // gc_result intrinsic. Call->setAttributes(OriginalAttrs.getFnAttributes()); Token = Call; // Put the following gc_result and gc_relocate calls immediately after the // the old call (which we're about to delete). assert(ToReplace->getNextNode() && "not a terminator, must have next"); Builder.SetInsertPoint(ToReplace->getNextNode()); Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc()); } else if (CS.isInvoke()) { InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction()); // Insert the new invoke into the old block. We'll remove the old one in a // moment at which point this will become the new terminator for the // original block. Builder.SetInsertPoint(ToReplace->getParent()); InvokeInst *Invoke = Builder.CreateGCStatepointInvoke( CS.getCalledValue(), ToReplace->getNormalDest(), ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()), Builder.getInt32(0), None, "safepoint_token"); // Currently we will fail on parameter attributes and on certain // function attributes. OriginalAttrs = ToReplace->getAttributes(); // In case if we can handle this set of attributes - set up function // attributes directly on statepoint and return attributes later for // gc_result intrinsic. Invoke->setAttributes(OriginalAttrs.getFnAttributes()); Token = Invoke; // We'll insert the gc.result into the normal block BasicBlock *NormalDest = normalizeBBForInvokeSafepoint( ToReplace->getNormalDest(), Invoke->getParent()); Builder.SetInsertPoint(NormalDest->getFirstInsertionPt()); } else { llvm_unreachable("unexpect type of CallSite"); } assert(Token); // Handle the return value of the original call - update all uses to use a // gc_result hanging off the statepoint node we just inserted // Only add the gc_result iff there is actually a used result if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) { std::string TakenName = CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : ""; CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName); GCResult->setAttributes(OriginalAttrs.getRetAttributes()); return GCResult; } else { // No return value for the call. return nullptr; } }
bool ICFGBuilder::runOnModule(Module &M) { MicroBasicBlockBuilder &MBBB = getAnalysis<MicroBasicBlockBuilder>(); forallbb(M, bb) { for (mbb_iterator mi = MBBB.begin(bb), E = MBBB.end(bb); mi != E; ++mi) getOrInsertMBB(mi); } forallbb(M, bb) { for (mbb_iterator mi = MBBB.begin(bb), E = MBBB.end(bb); mi != E; ++mi) { // The ICFG will not contain any inter-thread edge. // It's also difficult to handle them. How to deal with the return // edges? They are supposed to go to the pthread_join sites. if (mi->end() != bb->end() && !is_pthread_create(mi->end())) { FPCallGraph &CG = getAnalysis<FPCallGraph>(); FuncList callees = CG.getCalledFunctions(mi->end()); bool calls_decl = false; for (size_t i = 0; i < callees.size(); ++i) { Function *callee = callees[i]; if (callee->isDeclaration()) { calls_decl = true; } else { MicroBasicBlock *entry_mbb = MBBB.begin(callee->begin()); addEdge(mi, entry_mbb); } } if (calls_decl) { mbb_iterator next_mbb = mi; ++next_mbb; addEdge(mi, next_mbb); } } else { for (succ_iterator si = succ_begin(bb); si != succ_end(bb); ++si) { MicroBasicBlock *succ_mbb = MBBB.begin(*si); addEdge(mi, succ_mbb); } TerminatorInst *ti = bb->getTerminator(); if (is_ret(ti)) { FPCallGraph &CG = getAnalysis<FPCallGraph>(); InstList call_sites = CG.getCallSites(bb->getParent()); for (size_t i = 0; i < call_sites.size(); ++i) { Instruction *call_site = call_sites[i]; // Ignore inter-thread edges. if (is_pthread_create(call_site)) continue; MicroBasicBlock *next_mbb; if (isa<CallInst>(call_site)) { BasicBlock::iterator next = call_site; ++next; next_mbb = MBBB.parent(next); } else { assert(isa<InvokeInst>(call_site)); InvokeInst *inv = dyn_cast<InvokeInst>(call_site); if (isa<ReturnInst>(ti)) { next_mbb = MBBB.begin(inv->getNormalDest()); } else { next_mbb = MBBB.begin(inv->getUnwindDest()); } } addEdge(mi, next_mbb); } } } } } return false; }
void MemoryInstrumenter::instrumentMalloc(const CallSite &CS) { DataLayout &TD = getAnalysis<DataLayout>(); TargetLibraryInfo& TLI = getAnalysis<TargetLibraryInfo>(); Function *Callee = CS.getCalledFunction(); assert(DynAAUtils::IsMalloc(Callee)); Instruction *Ins = CS.getInstruction(); // Calculate where to insert. // <Loc> will be the next instruction executed. BasicBlock::iterator Loc; if (!Ins->isTerminator()) { Loc = Ins; ++Loc; } else { assert(isa<InvokeInst>(Ins)); InvokeInst *II = cast<InvokeInst>(Ins); assert(II->getNormalDest()->getUniquePredecessor()); Loc = II->getNormalDest()->getFirstInsertionPt(); } IRBuilder<> Builder(Loc); Value *Start = NULL; Value *Size = NULL; Value *Success = NULL; // Indicate whether the allocation succeeded. StringRef CalleeName = Callee->getName(); if (CalleeName == "malloc" || CalleeName == "valloc") { Start = Ins; Size = UndefValue::get(LongType); Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType)); } else if (CalleeName.startswith("_Zn")) { Start = Ins; Size = CS.getArgument(0); } else if (CalleeName == "calloc") { // calloc() takes two size_t, i.e. i64. // Therefore, no need to worry Mul will have two operands with different // types. Also, Size will always be of type i64. Start = Ins; assert(CS.getArgument(0)->getType() == LongType); assert(CS.getArgument(1)->getType() == LongType); Size = BinaryOperator::Create(Instruction::Mul, CS.getArgument(0), CS.getArgument(1), "", Loc); Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType)); } else if (CalleeName == "memalign" || CalleeName == "realloc") { Start = Ins; Size = CS.getArgument(1); Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType)); } else if (CalleeName == "strdup" || CalleeName == "__strdup") { Start = Ins; // Use strlen to compute the length of the allocated memory. Value *StrLen = EmitStrLen(Ins, Builder, &TD, &TLI); // size = strlen(result) + 1 Size = Builder.CreateAdd(StrLen, ConstantInt::get(LongType, 1)); Success = Builder.CreateICmpNE(Ins, ConstantPointerNull::get(CharStarType)); } else if (CalleeName == "getline") { // getline(char **lineptr, size_t *n, FILE *stream) // start = *lineptr // size = *n // succ = (<rv> != -1) Start = Builder.CreateLoad(CS.getArgument(0)); Size = Builder.CreateLoad(CS.getArgument(1)); Success = Builder.CreateICmpNE(Ins, ConstantInt::get(Ins->getType(), -1)); } else { assert(false && "Unhandled malloc function call"); } // start = malloc(size) // if (success) // HookMemAlloc // Loc: instrumentMemoryAllocation(Start, Size, Success, Loc); }
/// splitLiveRangesAcrossInvokes - Each value that is live across an unwind edge /// we spill into a stack location, guaranteeing that there is nothing live /// across the unwind edge. This process also splits all critical edges /// coming out of invoke's. /// FIXME: Move this function to a common utility file (Local.cpp?) so /// both SjLj and LowerInvoke can use it. void SjLjEHPass:: splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) { // First step, split all critical edges from invoke instructions. for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { InvokeInst *II = Invokes[i]; SplitCriticalEdge(II, 0, this); // FIXME: New EH - This if-condition will be always true in the new scheme. if (II->getUnwindDest()->isLandingPad()) { SmallVector<BasicBlock*, 2> NewBBs; SplitLandingPadPredecessors(II->getUnwindDest(), II->getParent(), ".1", ".2", this, NewBBs); LPadSuccMap[II] = *succ_begin(NewBBs[0]); } else { SplitCriticalEdge(II, 1, this); } assert(!isa<PHINode>(II->getNormalDest()) && !isa<PHINode>(II->getUnwindDest()) && "Critical edge splitting left single entry phi nodes?"); } Function *F = Invokes.back()->getParent()->getParent(); // To avoid having to handle incoming arguments specially, we lower each arg // to a copy instruction in the entry block. This ensures that the argument // value itself cannot be live across the entry block. BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin(); while (isa<AllocaInst>(AfterAllocaInsertPt) && isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize())) ++AfterAllocaInsertPt; for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI) { Type *Ty = AI->getType(); // Aggregate types can't be cast, but are legal argument types, so we have // to handle them differently. We use an extract/insert pair as a // lightweight method to achieve the same goal. if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt); Instruction *NI = InsertValueInst::Create(AI, EI, 0); NI->insertAfter(EI); AI->replaceAllUsesWith(NI); // Set the operand of the instructions back to the AllocaInst. EI->setOperand(0, AI); NI->setOperand(0, AI); } else { // This is always a no-op cast because we're casting AI to AI->getType() // so src and destination types are identical. BitCast is the only // possibility. CastInst *NC = new BitCastInst( AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt); AI->replaceAllUsesWith(NC); // Set the operand of the cast instruction back to the AllocaInst. // Normally it's forbidden to replace a CastInst's operand because it // could cause the opcode to reflect an illegal conversion. However, // we're replacing it here with the same value it was constructed with. // We do this because the above replaceAllUsesWith() clobbered the // operand, but we want this one to remain. NC->setOperand(0, AI); } } // Finally, scan the code looking for instructions with bad live ranges. for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) { // Ignore obvious cases we don't have to handle. In particular, most // instructions either have no uses or only have a single use inside the // current block. Ignore them quickly. Instruction *Inst = II; if (Inst->use_empty()) continue; if (Inst->hasOneUse() && cast<Instruction>(Inst->use_back())->getParent() == BB && !isa<PHINode>(Inst->use_back())) continue; // If this is an alloca in the entry block, it's not a real register // value. if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst)) if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin()) continue; // Avoid iterator invalidation by copying users to a temporary vector. SmallVector<Instruction*,16> Users; for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end(); UI != E; ++UI) { Instruction *User = cast<Instruction>(*UI); if (User->getParent() != BB || isa<PHINode>(User)) Users.push_back(User); } // Find all of the blocks that this value is live in. std::set<BasicBlock*> LiveBBs; LiveBBs.insert(Inst->getParent()); while (!Users.empty()) { Instruction *U = Users.back(); Users.pop_back(); if (!isa<PHINode>(U)) { MarkBlocksLiveIn(U->getParent(), LiveBBs); } else { // Uses for a PHI node occur in their predecessor block. PHINode *PN = cast<PHINode>(U); for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == Inst) MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs); } } // Now that we know all of the blocks that this thing is live in, see if // it includes any of the unwind locations. bool NeedsSpill = false; for (unsigned i = 0, e = Invokes.size(); i != e; ++i) { BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest(); if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) { NeedsSpill = true; } } // If we decided we need a spill, do it. // FIXME: Spilling this way is overkill, as it forces all uses of // the value to be reloaded from the stack slot, even those that aren't // in the unwind blocks. We should be more selective. if (NeedsSpill) { ++NumSpilled; DemoteRegToStack(*Inst, true); } } }