static bool ExpandOpForIntSize(Module *M, unsigned Bits, bool Mul) { IntegerType *IntTy = IntegerType::get(M->getContext(), Bits); SmallVector<Type *, 1> Types; Types.push_back(IntTy); Intrinsic::ID ID = (Mul ? Intrinsic::umul_with_overflow : Intrinsic::uadd_with_overflow); std::string Name = Intrinsic::getName(ID, Types); Function *Intrinsic = M->getFunction(Name); if (!Intrinsic) return false; for (Value::use_iterator CallIter = Intrinsic->use_begin(), E = Intrinsic->use_end(); CallIter != E; ) { CallInst *Call = dyn_cast<CallInst>(*CallIter++); if (!Call) { report_fatal_error("ExpandArithWithOverflow: Taking the address of a " "*.with.overflow intrinsic is not allowed"); } Value *VariableArg; ConstantInt *ConstantArg; if (ConstantInt *C = dyn_cast<ConstantInt>(Call->getArgOperand(0))) { VariableArg = Call->getArgOperand(1); ConstantArg = C; } else if (ConstantInt *C = dyn_cast<ConstantInt>(Call->getArgOperand(1))) { VariableArg = Call->getArgOperand(0); ConstantArg = C; } else { errs() << "Use: " << *Call << "\n"; report_fatal_error("ExpandArithWithOverflow: At least one argument of " "*.with.overflow must be a constant"); } Value *ArithResult = BinaryOperator::Create( (Mul ? Instruction::Mul : Instruction::Add), VariableArg, ConstantArg, Call->getName() + ".arith", Call); uint64_t ArgMax; if (Mul) { ArgMax = UintTypeMax(Bits) / ConstantArg->getZExtValue(); } else { ArgMax = UintTypeMax(Bits) - ConstantArg->getZExtValue(); } Value *OverflowResult = new ICmpInst( Call, CmpInst::ICMP_UGT, VariableArg, ConstantInt::get(IntTy, ArgMax), Call->getName() + ".overflow"); // Construct the struct result. Value *NewStruct = UndefValue::get(Call->getType()); NewStruct = CreateInsertValue(NewStruct, 0, ArithResult, Call); NewStruct = CreateInsertValue(NewStruct, 1, OverflowResult, Call); Call->replaceAllUsesWith(NewStruct); Call->eraseFromParent(); } Intrinsic->eraseFromParent(); return true; }
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into /// an invoke, we have to turn all of the calls that can throw into /// invokes. This function analyze BB to see if there are any calls, and if so, /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI /// nodes in that block with the values specified in InvokeDestPHIValues. /// /// Returns true to indicate that the next block should be skipped. static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, InvokeInliningInfo &Invoke) { LandingPadInst *LPI = Invoke.getLandingPadInst(); for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { Instruction *I = BBI++; if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) { unsigned NumClauses = LPI->getNumClauses(); L->reserveClauses(NumClauses); for (unsigned i = 0; i != NumClauses; ++i) L->addClause(LPI->getClause(i)); } // We only need to check for function calls: inlined invoke // instructions require no special handling. CallInst *CI = dyn_cast<CallInst>(I); // If this call cannot unwind, don't convert it to an invoke. // Inline asm calls cannot throw. if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) continue; // Convert this function call into an invoke instruction. First, split the // basic block. BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); // Delete the unconditional branch inserted by splitBasicBlock BB->getInstList().pop_back(); // Create the new invoke instruction. ImmutableCallSite CS(CI); SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, Invoke.getOuterResumeDest(), InvokeArgs, CI->getName(), BB); II->setCallingConv(CI->getCallingConv()); II->setAttributes(CI->getAttributes()); // Make sure that anything using the call now uses the invoke! This also // updates the CallGraph if present, because it uses a WeakVH. CI->replaceAllUsesWith(II); // Delete the original call Split->getInstList().pop_front(); // Update any PHI nodes in the exceptional block to indicate that there is // now a new entry in them. Invoke.addIncomingPHIValuesFor(BB); return false; } return false; }
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into /// an invoke, we have to turn all of the calls that can throw into /// invokes. This function analyze BB to see if there are any calls, and if so, /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI /// nodes in that block with the values specified in InvokeDestPHIValues. /// static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *InvokeDest, const SmallVectorImpl<Value*> &InvokeDestPHIValues) { for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { Instruction *I = BBI++; // We only need to check for function calls: inlined invoke // instructions require no special handling. CallInst *CI = dyn_cast<CallInst>(I); if (CI == 0) continue; // If this call cannot unwind, don't convert it to an invoke. if (CI->doesNotThrow()) continue; // Convert this function call into an invoke instruction. // First, split the basic block. BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); // Next, create the new invoke instruction, inserting it at the end // of the old basic block. ImmutableCallSite CS(CI); SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest, InvokeArgs.begin(), InvokeArgs.end(), CI->getName(), BB->getTerminator()); II->setCallingConv(CI->getCallingConv()); II->setAttributes(CI->getAttributes()); // Make sure that anything using the call now uses the invoke! This also // updates the CallGraph if present, because it uses a WeakVH. CI->replaceAllUsesWith(II); // Delete the unconditional branch inserted by splitBasicBlock BB->getInstList().pop_back(); Split->getInstList().pop_front(); // Delete the original call // Update any PHI nodes in the exceptional block to indicate that // there is now a new entry in them. unsigned i = 0; for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I, ++i) cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB); // This basic block is now complete, the caller will continue scanning the // next one. return; } }
// Return the copied value if the value is a bs copy, otherwise null Value *getBSCopyValue(Value *v) { CallInst *call = dyn_cast<CallInst>(v); if (!call) return nullptr; // This is kind of dubious return call->getName().find(".__rmc_bs_copy") != StringRef::npos? call->getOperand(0) : nullptr; }
// visitCallInst - This converts all LLVM call instructions into invoke // instructions. The except part of the invoke goes to the "LongJmpBlkPre" // that grabs the exception and proceeds to determine if it's a longjmp // exception or not. void LowerSetJmp::visitCallInst(CallInst& CI) { if (CI.getCalledFunction()) if (!IsTransformableFunction(CI.getCalledFunction()->getName()) || CI.getCalledFunction()->isIntrinsic()) return; BasicBlock* OldBB = CI.getParent(); // If not reachable from a setjmp call, don't transform. if (!DFSBlocks.count(OldBB)) return; BasicBlock* NewBB = OldBB->splitBasicBlock(CI); assert(NewBB && "Couldn't split BB of \"call\" instruction!!"); DFSBlocks.insert(NewBB); NewBB->setName("Call2Invoke"); Function* Func = OldBB->getParent(); // Construct the new "invoke" instruction. TerminatorInst* Term = OldBB->getTerminator(); std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end()); InvokeInst* II = InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func], Params.begin(), Params.end(), CI.getName(), Term); II->setCallingConv(CI.getCallingConv()); II->setParamAttrs(CI.getParamAttrs()); // Replace the old call inst with the invoke inst and remove the call. CI.replaceAllUsesWith(II); CI.getParent()->getInstList().erase(&CI); // The old terminator is useless now that we have the invoke inst. Term->getParent()->getInstList().erase(Term); ++CallsTransformed; }
void visitCallInst(CallInst &I) { string intrinsic = I.getCalledFunction()->getName().str(); if(intrinsic.find("modmul") != -1) { CallInst *enterMontpro1 = enterMontgomery(I.getOperand(0), I.getOperand(2), &I); CallInst *enterMontpro2 = enterMontgomery(I.getOperand(1), I.getOperand(2), &I); CallInst *mulMontpro = mulMontgomery(I.getName().str(), enterMontpro1, enterMontpro2, I.getOperand(2), &I); CallInst *exitMontpro = leaveMontgomery(mulMontpro, I.getOperand(2), &I); I.replaceAllUsesWith(exitMontpro); I.removeFromParent(); } else if(intrinsic.find("modexp") != -1) { CallInst *enterMontpro1 = enterMontgomery(I.getOperand(0), I.getOperand(2), &I); CallInst *expMontpro = expMontgomery(I.getName().str(), enterMontpro1, I.getOperand(1), I.getOperand(2), &I); CallInst *exitMontpro = leaveMontgomery(expMontpro, I.getOperand(2), &I); I.replaceAllUsesWith(exitMontpro); I.eraseFromParent(); } }
/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls /// in the body of the inlined function into invokes and turn unwind /// instructions into branches to the invoke unwind dest. /// /// II is the invoke instruction being inlined. FirstNewBlock is the first /// block of the inlined code (the last block is the end of the function), /// and InlineCodeInfo is information about the code that got inlined. static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo) { BasicBlock *InvokeDest = II->getUnwindDest(); std::vector<Value*> InvokeDestPHIValues; // If there are PHI nodes in the unwind destination block, we need to // keep track of which values came into them from this invoke, then remove // the entry for this block. BasicBlock *InvokeBlock = II->getParent(); for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) { PHINode *PN = cast<PHINode>(I); // Save the value to use for this edge. InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock)); } Function *Caller = FirstNewBlock->getParent(); // The inlined code is currently at the end of the function, scan from the // start of the inlined code to its end, checking for stuff we need to // rewrite. if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) { for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB) { if (InlinedCodeInfo.ContainsCalls) { for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){ Instruction *I = BBI++; // We only need to check for function calls: inlined invoke // instructions require no special handling. if (!isa<CallInst>(I)) continue; CallInst *CI = cast<CallInst>(I); // If this call cannot unwind, don't convert it to an invoke. if (CI->doesNotThrow()) continue; // Convert this function call into an invoke instruction. // First, split the basic block. BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); // Next, create the new invoke instruction, inserting it at the end // of the old basic block. SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end()); InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest, InvokeArgs.begin(), InvokeArgs.end(), CI->getName(), BB->getTerminator()); II->setCallingConv(CI->getCallingConv()); II->setAttributes(CI->getAttributes()); // Make sure that anything using the call now uses the invoke! CI->replaceAllUsesWith(II); // Delete the unconditional branch inserted by splitBasicBlock BB->getInstList().pop_back(); Split->getInstList().pop_front(); // Delete the original call // Update any PHI nodes in the exceptional block to indicate that // there is now a new entry in them. unsigned i = 0; for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I, ++i) { PHINode *PN = cast<PHINode>(I); PN->addIncoming(InvokeDestPHIValues[i], BB); } // This basic block is now complete, start scanning the next one. break; } } if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { // An UnwindInst requires special handling when it gets inlined into an // invoke site. Once this happens, we know that the unwind would cause // a control transfer to the invoke exception destination, so we can // transform it into a direct branch to the exception destination. BranchInst::Create(InvokeDest, UI); // Delete the unwind instruction! UI->eraseFromParent(); // Update any PHI nodes in the exceptional block to indicate that // there is now a new entry in them. unsigned i = 0; for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I, ++i) { PHINode *PN = cast<PHINode>(I); PN->addIncoming(InvokeDestPHIValues[i], BB); } } } } // Now that everything is happy, we have one final detail. The PHI nodes in // the exception destination block still have entries due to the original // invoke instruction. Eliminate these entries (which might even delete the // PHI node) now. InvokeDest->removePredecessor(II->getParent()); }
// Replace "packW" and "unpackW" intrinsics by insert/extract operations and // update the uses accordingly. void FunctionVectorizer::generatePackUnpackCode(Function* f, const WFVInfo& info) { assert (f); SmallVector<CallInst*, 16> eraseVec; for (auto &BB : *f) { Instruction* allocPos = BB.getFirstInsertionPt(); for (auto &I : BB) { Instruction* inst = &I; if (isUnpackWFunctionCall(inst)) { DEBUG_WFV( outs() << "generateUnpackCode(" << *inst << " )\n"; ); CallInst* unpackCall = cast<CallInst>(inst); Value* value = unpackCall->getArgOperand(0); Value* indexVal = unpackCall->getArgOperand(1); // Extract scalar values. Value* extract = generateHorizontalExtract(value, indexVal, unpackCall->getName(), allocPos, unpackCall, info); // If the type only matches structurally, create an additional bitcast. Type* oldType = unpackCall->getType(); Type* newType = extract->getType(); if (oldType != newType) { assert (newType->canLosslesslyBitCastTo(oldType) || WFV::typesMatch(oldType, newType)); Instruction* bc = new BitCastInst(extract, oldType, "", unpackCall); // Copy properties from unpackCall. WFV::copyMetadata(bc, *unpackCall); extract = bc; } // Rewire the use. assert (unpackCall->getNumUses() == 1); Value* use = *unpackCall->use_begin(); assert (isa<Instruction>(use)); Instruction* scalarUse = cast<Instruction>(use); scalarUse->replaceUsesOfWith(unpackCall, extract); // Erase now unused unpack call. eraseVec.push_back(unpackCall); // If the returned extract operation is an alloca, we have to // make sure that all changes to that memory location are // correctly written back to the original memory from which // the sub-element was extracted. // This means we have to insert merge and store operations // after every use of this value (including "forwarded" uses // via casts, phis, and GEPs). // However, we must only merge back those values that were // modified. This is not only for efficiency, but also for // correctness, since there may be uninitialized pointers in // a structure, which we must not load/store from/to (see // test_struct_extra05 with all analyses disabled). if (isa<AllocaInst>(extract) || (isa<BitCastInst>(extract) && isa<AllocaInst>(cast<BitCastInst>(extract)->getOperand(0)))) { generateWriteBackOperations(cast<Instruction>(extract), cast<Instruction>(extract), value, indexVal, info); } } else if (isPackWFunctionCall(inst)) { DEBUG_WFV( outs() << "generatePackCode(" << *inst << " )\n"; ); CallInst* packCall = cast<CallInst>(inst); assert (WFV::isVectorizedType(*packCall->getType()) && "packCall should have vector return type after inst vectorization!"); SmallVector<Value*, 8> scalarVals(info.mVectorizationFactor); // Get scalar results for merge. for (unsigned i=0; i<info.mVectorizationFactor; ++i) { scalarVals[i] = packCall->getArgOperand(i); } // Merge scalar results. Instruction* merge = generateHorizontalMerge(scalarVals, packCall->getType(), "", packCall, info); // Rewire the uses. packCall->replaceAllUsesWith(merge); // Copy properties from packCall. WFV::copyMetadata(merge, *packCall); // Erase now unused pack call. eraseVec.push_back(packCall); }
bool TracingNoGiri::visitSpecialCall(CallInst &CI) { Function *CalledFunc = CI.getCalledFunction(); // We do not support indirect calls to special functions. if (CalledFunc == nullptr) return false; // Do not consider a function special if it has a function body; in this // case, the programmer has supplied his or her version of the function, and // we will instrument it. if (!CalledFunc->isDeclaration()) return false; // Check the name of the function against a list of known special functions. std::string name = CalledFunc->getName().str(); if (name.substr(0,12) == "llvm.memset.") { instrumentLock(&CI); // Get the destination pointer and cast it to a void pointer. Value *dstPointer = CI.getOperand(0); dstPointer = castTo(dstPointer, VoidPtrType, dstPointer->getName(), &CI); // Get the number of bytes that will be written into the buffer. Value *NumElts = CI.getOperand(2); // Get the ID of the external funtion call instruction. Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); // Create the call to the run-time to record the external call instruction. std::vector<Value *> args = make_vector(CallID, dstPointer, NumElts, 0); CallInst::Create(RecordStore, args, "", &CI); instrumentUnlock(&CI); ++NumExtFuns; // Update statistics return true; } else if (name.substr(0,12) == "llvm.memcpy." || name.substr(0,13) == "llvm.memmove." || name == "strcpy") { instrumentLock(&CI); /* Record Load src, [CI] Load dst [CI] */ // Get the destination and source pointers and cast them to void pointers. Value *dstPointer = CI.getOperand(0); Value *srcPointer = CI.getOperand(1); dstPointer = castTo(dstPointer, VoidPtrType, dstPointer->getName(), &CI); srcPointer = castTo(srcPointer, VoidPtrType, srcPointer->getName(), &CI); // Get the ID of the ext fun call instruction. Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); // Create the call to the run-time to record the loads and stores of // external call instruction. if(name == "strcpy") { // FIXME: If the tracer function should be inserted before or after???? std::vector<Value *> args = make_vector(CallID, srcPointer, 0); CallInst::Create(RecordStrLoad, args, "", &CI); args = make_vector(CallID, dstPointer, 0); CallInst *recStore = CallInst::Create(RecordStrStore, args, "", &CI); CI.moveBefore(recStore); } else { // get the num elements to be transfered Value *NumElts = CI.getOperand(2); std::vector<Value *> args = make_vector(CallID, srcPointer, NumElts, 0); CallInst::Create(RecordLoad, args, "", &CI); args = make_vector(CallID, dstPointer, NumElts, 0); CallInst::Create(RecordStore, args, "", &CI); } instrumentUnlock(&CI); ++NumExtFuns; // Update statistics return true; } else if (name == "strcat") { /* Record Load dst, Load Src, Store dst-end before call inst */ instrumentLock(&CI); // Get the destination and source pointers and cast them to void pointers. Value *dstPointer = CI.getOperand(0); Value *srcPointer = CI.getOperand(1); dstPointer = castTo(dstPointer, VoidPtrType, dstPointer->getName(), &CI); srcPointer = castTo(srcPointer, VoidPtrType, srcPointer->getName(), &CI); // Get the ID of the ext fun call instruction. Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); // Create the call to the run-time to record the loads and stores of // external call instruction. // CHECK: If the tracer function should be inserted before or after???? std::vector<Value *> args = make_vector(CallID, dstPointer, 0); CallInst::Create(RecordStrLoad, args, "", &CI); args = make_vector(CallID, srcPointer, 0); CallInst::Create(RecordStrLoad, args, "", &CI); // Record the addresses before concat as they will be lost after concat args = make_vector(CallID, dstPointer, srcPointer, 0); CallInst::Create(RecordStrcatStore, args, "", &CI); instrumentUnlock(&CI); ++NumExtFuns; // Update statistics return true; } else if (name == "strlen") { /* Record Load */ instrumentLock(&CI); // Get the destination and source pointers and cast them to void pointers. Value *srcPointer = CI.getOperand(0); srcPointer = castTo(srcPointer, VoidPtrType, srcPointer->getName(), &CI); // Get the ID of the ext fun call instruction. Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); std::vector<Value *> args = make_vector(CallID, srcPointer, 0); CallInst::Create(RecordStrLoad, args, "", &CI); instrumentUnlock(&CI); ++NumExtFuns; // Update statistics return true; } else if (name == "calloc") { instrumentLock(&CI); // Get the number of bytes that will be written into the buffer. Value *NumElts = BinaryOperator::Create(BinaryOperator::Mul, CI.getOperand(0), CI.getOperand(1), "calloc par1 * par2", &CI); // Get the destination pointer and cast it to a void pointer. // Instruction * dstPointerInst; Value *dstPointer = castTo(&CI, VoidPtrType, CI.getName(), &CI); /* // To move after call inst, we need to know if cast is a constant expr or inst if ((dstPointerInst = dyn_cast<Instruction>(dstPointer))) { CI.moveBefore(dstPointerInst); // dstPointerInst->insertAfter(&CI); // ((Instruction *)NumElts)->insertAfter(dstPointerInst); } else { CI.moveBefore((Instruction *)NumElts); // ((Instruction *)NumElts)->insertAfter(&CI); } dstPointer = dstPointerInst; // Assign to dstPointer for instrn or non-instrn values */ // Get the ID of the external funtion call instruction. Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); // // Create the call to the run-time to record the external call instruction. // std::vector<Value *> args = make_vector(CallID, dstPointer, NumElts, 0); CallInst *recStore = CallInst::Create(RecordStore, args, "", &CI); CI.moveBefore(recStore); //recStore->insertAfter((Instruction *)NumElts); // Moove cast, #byte computation and store to after call inst CI.moveBefore(cast<Instruction>(NumElts)); instrumentUnlock(&CI); ++NumExtFuns; // Update statistics return true; } else if (name == "tolower" || name == "toupper") { // Not needed as there are no loads and stores /* } else if (name == "strncpy/itoa/stdarg/scanf/fscanf/sscanf/fread/complex/strftime/strptime/asctime/ctime") { */ } else if (name == "fscanf") { // TODO // In stead of parsing format string, can we use the type of the arguments?? } else if (name == "sscanf") { // TODO } else if (name == "sprintf") { instrumentLock(&CI); // Get the pointer to the destination buffer. Value *dstPointer = CI.getOperand(0); dstPointer = castTo(dstPointer, VoidPtrType, dstPointer->getName(), &CI); // Get the ID of the call instruction. Value *CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); // Scan through the arguments looking for what appears to be a character // string. Generate load records for each of these strings. for (unsigned index = 2; index < CI.getNumOperands(); ++index) { if (CI.getOperand(index)->getType() == VoidPtrType) { // Create the call to the run-time to record the load from the string. // What about other loads?? Value *Ptr = CI.getOperand(index); std::vector<Value *> args = make_vector(CallID, Ptr, 0); CallInst::Create(RecordStrLoad, args, "", &CI); ++NumLoadStrings; // Update statistics } } // Create the call to the run-time to record the external call instruction. std::vector<Value *> args = make_vector(CallID, dstPointer, 0); CallInst *recStore = CallInst::Create(RecordStrStore, args, "", &CI); CI.moveBefore(recStore); instrumentUnlock(&CI); ++NumStoreStrings; // Update statistics return true; } else if (name == "fgets") { instrumentLock(&CI); // Get the pointer to the destination buffer. Value * dstPointer = CI.getOperand(0); dstPointer = castTo(dstPointer, VoidPtrType, dstPointer->getName(), &CI); // Get the ID of the ext fun call instruction. Value * CallID = ConstantInt::get(Int32Type, lsNumPass->getID(&CI)); // Create the call to the run-time to record the external call instruction. std::vector<Value *> args = make_vector(CallID, dstPointer, 0); CallInst *recStore = CallInst::Create(RecordStrStore, args, "", &CI); CI.moveBefore(recStore); instrumentUnlock(&CI); // Update statistics ++NumStoreStrings; return true; } return false; }
void insertCallToAccessFunction(Function *F, Function *cF) { CallInst *I; Instruction *bI; std::vector<Value *> Args; std::vector<Type *> ArgsTy; Module *M = F->getParent(); std::string name; Function *nF, *tF; FunctionType *FTy; std::stringstream out; Value::user_iterator i = F->user_begin(), e = F->user_end(); while (i != e) { Args.clear(); ArgsTy.clear(); /************* C codes ***********/ if (isa<CallInst>(*i)) { I = dyn_cast<CallInst>(*i); // call to the access function F Args.push_back(I->getArgOperand(0)); ArgsTy.push_back(I->getArgOperand(0)->getType()); // call to the execute function cF Args.push_back(cF); ArgsTy.push_back(PointerType::get(cF->getFunctionType(), 0)); unsigned int t; for (t = 1; t < I->getNumArgOperands(); t++) { Args.push_back(I->getArgOperand(t)); ArgsTy.push_back(I->getArgOperand(t)->getType()); // errs() << *(I->getArgOperand(t)) << " is or not " << // isa<GlobalVariable>(I->getArgOperand(t)) << "\n"; } tF = dyn_cast<Function>(I->getCalledFunction()); FTy = FunctionType::get(tF->getReturnType(), ArgsTy, 0); out.str(std::string()); out << "task_DAE_" << I->getNumArgOperands() - 1; nF = (Function *)M->getOrInsertFunction(out.str(), FTy); CallInst *ci = CallInst::Create(nF, Args, I->getName(), I); i++; I->replaceAllUsesWith(ci); I->eraseFromParent(); } /************* C++ codes ***********/ else { Value::user_iterator bit = (*i)->user_begin(), bite = (*i)->user_end(); Type *iTy = (*i)->getType(); i++; while (bit != bite) { Args.clear(); ArgsTy.clear(); I = dyn_cast<CallInst>(*bit); bit++; // call to the access function F Args.push_back(I->getArgOperand(0)); ArgsTy.push_back(I->getArgOperand(0)->getType()); // call to the execute function cF bI = new BitCastInst(cF, (iTy), "_TPR", I); Args.push_back(bI); ArgsTy.push_back(bI->getType()); unsigned int t; for (t = 1; t < I->getNumArgOperands(); t++) { Args.push_back(I->getArgOperand(t)); ArgsTy.push_back(I->getArgOperand(t)->getType()); } tF = dyn_cast<Function>(I->getCalledFunction()); FTy = FunctionType::get(tF->getReturnType(), ArgsTy, 0); out.str(std::string()); out << "task_DAE_" << I->getNumArgOperands() - 1; nF = (Function *)M->getOrInsertFunction(out.str(), FTy); CallInst *ci = CallInst::Create(nF, Args, I->getName(), I); I->replaceAllUsesWith(ci); I->eraseFromParent(); } } } }
void DSWP::insertConsume(Instruction *u, Instruction *v, DType dtype, int channel, int uthread, int vthread) { Instruction *oldu = dyn_cast<Instruction>(newToOld[u]); Instruction *insPos = placeEquivalents[vthread][oldu]; if (insPos == NULL) { insPos = dyn_cast<Instruction>(instMap[vthread][oldu]); if (insPos == NULL) { error("can't insert nowhere"); } } // call sync_consume(channel) Function *fun = module->getFunction("sync_consume"); vector<Value *> args; args.push_back(ConstantInt::get(Type::getInt32Ty(*context), channel)); CallInst *call = CallInst::Create(fun, args, "c" + itoa(channel), insPos); if (dtype == REG) { CastInst *cast; string name = call->getName().str() + "_val"; if (u->getType()->isIntegerTy()) { cast = new TruncInst(call, u->getType(), name); } else if (u->getType()->isFloatingPointTy()) { if (u->getType()->isFloatTy()) error("cannot deal with double"); cast = new BitCastInst(call, u->getType(), name); } else if (u->getType()->isPointerTy()){ cast = new IntToPtrInst(call, u->getType(), name); } else { error("what's the hell type"); } cast->insertBefore(insPos); // replace the uses for (Instruction::use_iterator ui = oldu->use_begin(), ue = oldu->use_end(); ui != ue; ++ui) { Instruction *user = dyn_cast<Instruction>(*ui); if (user == NULL) { error("used by a non-instruction?"); } // make sure it's in the same function... if (user->getParent()->getParent() != v->getParent()->getParent()) { continue; } // call replaceUses so that it handles phi nodes map<Value *, Value *> reps; reps[oldu] = cast; replaceUses(user, reps); } } /* TODO: need to handle true memory dependences more than just syncing? else if (dtype == DTRUE) { //READ after WRITE error("check mem dep!!"); if (!isa<LoadInst>(v)) { error("not true dependency"); } BitCastInst *cast = new BitCastInst(call, v->getType(), call->getName().str() + "_ptr"); cast->insertBefore(v); // replace the v with 'cast' in v's thread: // (other thread with be dealed using dependence) for (Instruction::use_iterator ui = v->use_begin(), ue = v->use_end(); ui != ue; ui++) { Instruction *user = dyn_cast<Instruction>(*ui); if (user == NULL) { error("how could it be NULL"); } // int userthread = this->getNewInstAssigned(user); if (user->getParent()->getParent() != v->getParent()->getParent()) { continue; } for (unsigned i = 0; i < user->getNumOperands(); i++) { Value * op = user->getOperand(i); if (op == v) { user->setOperand(i, cast); } } } } */ else { // nothing to do } }
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into /// an invoke, we have to turn all of the calls that can throw into /// invokes. This function analyze BB to see if there are any calls, and if so, /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI /// nodes in that block with the values specified in InvokeDestPHIValues. /// /// Returns true to indicate that the next block should be skipped. static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, InvokeInliningInfo &Invoke) { for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { Instruction *I = BBI++; // We only need to check for function calls: inlined invoke // instructions require no special handling. CallInst *CI = dyn_cast<CallInst>(I); if (CI == 0) continue; // LIBUNWIND: merge selector instructions. if (EHSelectorInst *Inner = dyn_cast<EHSelectorInst>(CI)) { EHSelectorInst *Outer = Invoke.getOuterSelector(); if (!Outer) continue; bool innerIsOnlyCleanup = isCleanupOnlySelector(Inner); bool outerIsOnlyCleanup = isCleanupOnlySelector(Outer); // If both selectors contain only cleanups, we don't need to do // anything. TODO: this is really just a very specific instance // of a much more general optimization. if (innerIsOnlyCleanup && outerIsOnlyCleanup) continue; // Otherwise, we just append the outer selector to the inner selector. SmallVector<Value*, 16> NewSelector; for (unsigned i = 0, e = Inner->getNumArgOperands(); i != e; ++i) NewSelector.push_back(Inner->getArgOperand(i)); for (unsigned i = 2, e = Outer->getNumArgOperands(); i != e; ++i) NewSelector.push_back(Outer->getArgOperand(i)); CallInst *NewInner = IRBuilder<>(Inner).CreateCall(Inner->getCalledValue(), NewSelector); // No need to copy attributes, calling convention, etc. NewInner->takeName(Inner); Inner->replaceAllUsesWith(NewInner); Inner->eraseFromParent(); continue; } // If this call cannot unwind, don't convert it to an invoke. if (CI->doesNotThrow()) continue; // Convert this function call into an invoke instruction. // First, split the basic block. BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); // Delete the unconditional branch inserted by splitBasicBlock BB->getInstList().pop_back(); // LIBUNWIND: If this is a call to @llvm.eh.resume, just branch // directly to the new landing pad. if (Invoke.forwardEHResume(CI, BB)) { // TODO: 'Split' is now unreachable; clean it up. // We want to leave the original call intact so that the call // graph and other structures won't get misled. We also have to // avoid processing the next block, or we'll iterate here forever. return true; } // Otherwise, create the new invoke instruction. ImmutableCallSite CS(CI); SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, Invoke.getOuterUnwindDest(), InvokeArgs, CI->getName(), BB); II->setCallingConv(CI->getCallingConv()); II->setAttributes(CI->getAttributes()); // Make sure that anything using the call now uses the invoke! This also // updates the CallGraph if present, because it uses a WeakVH. CI->replaceAllUsesWith(II); Split->getInstList().pop_front(); // Delete the original call // Update any PHI nodes in the exceptional block to indicate that // there is now a new entry in them. Invoke.addIncomingPHIValuesFor(BB); return false; } return false; }
// Convert the given call to use normalized argument/return types. template <class T> static bool ConvertCall(T *Call, Pass *P) { // Don't try to change calls to intrinsics. if (isa<IntrinsicInst>(Call)) return false; FunctionType *FTy = cast<FunctionType>( Call->getCalledValue()->getType()->getPointerElementType()); FunctionType *NFTy = NormalizeFunctionType(FTy); if (NFTy == FTy) return false; // No change needed. // Convert arguments. SmallVector<Value *, 8> Args; for (unsigned I = 0; I < Call->getNumArgOperands(); ++I) { Value *Arg = Call->getArgOperand(I); if (NFTy->getParamType(I) != FTy->getParamType(I)) { Instruction::CastOps CastType = Call->getAttributes().hasAttribute(I + 1, Attribute::SExt) ? Instruction::SExt : Instruction::ZExt; Arg = CopyDebug(CastInst::Create(CastType, Arg, NFTy->getParamType(I), "arg_ext", Call), Call); } Args.push_back(Arg); } Value *CastFunc = CopyDebug(new BitCastInst(Call->getCalledValue(), NFTy->getPointerTo(), Call->getName() + ".arg_cast", Call), Call); Value *Result = NULL; if (CallInst *OldCall = dyn_cast<CallInst>(Call)) { CallInst *NewCall = CopyDebug(CallInst::Create(CastFunc, Args, "", OldCall), OldCall); NewCall->takeName(OldCall); NewCall->setAttributes(OldCall->getAttributes()); NewCall->setCallingConv(OldCall->getCallingConv()); NewCall->setTailCall(OldCall->isTailCall()); Result = NewCall; if (FTy->getReturnType() != NFTy->getReturnType()) { Result = CopyDebug(new TruncInst(NewCall, FTy->getReturnType(), NewCall->getName() + ".ret_trunc", Call), Call); } } else if (InvokeInst *OldInvoke = dyn_cast<InvokeInst>(Call)) { BasicBlock *Parent = OldInvoke->getParent(); BasicBlock *NormalDest = OldInvoke->getNormalDest(); BasicBlock *UnwindDest = OldInvoke->getUnwindDest(); if (FTy->getReturnType() != NFTy->getReturnType()) { if (BasicBlock *SplitDest = SplitCriticalEdge(Parent, NormalDest)) { NormalDest = SplitDest; } } InvokeInst *New = CopyDebug(InvokeInst::Create(CastFunc, NormalDest, UnwindDest, Args, "", OldInvoke), OldInvoke); New->takeName(OldInvoke); if (FTy->getReturnType() != NFTy->getReturnType()) { Result = CopyDebug(new TruncInst(New, FTy->getReturnType(), New->getName() + ".ret_trunc", NormalDest->getTerminator()), OldInvoke); } else { Result = New; } New->setAttributes(OldInvoke->getAttributes()); New->setCallingConv(OldInvoke->getCallingConv()); } Call->replaceAllUsesWith(Result); Call->eraseFromParent(); return true; }