Ejemplo n.º 1
0
bool SpDefUseInstrumenter::runOnModule(Module &M) {

  cerr << "instrument: --- Def-Use pair Spectrum ---\n";

  Function *Main = M.getFunction("main");
  LLVMContext &C = M.getContext();
  
  if (Main == 0) {
    cerr << "WARNING: cannot insert def-use instrumentation into a module"
         << " with no main function!\n";
    return false;  // No main, no instrumentation!
  }

  // Add library function prototype
  Constant *SpFn = M.getOrInsertFunction("_updateSpectrum", 
                          Type::getVoidTy(C), 
                          Type::getInt32Ty(C),  // spectrum index
                          Type::getInt32Ty(C),  // component index
                          NULL);


  unsigned spectrumIndex = IndexManager::getSpectrumIndex();
  unsigned nDefs = 0;
  unsigned nUses = 0;

  // Loop through all functions within module
  for (Module::iterator F = M.begin(), ME = M.end(); F != ME; ++F) {

    // skip function declarations
    if(F->isDeclaration()) 
      continue;

    // skip the _registerAll function
    if(F->getName()=="_registerAll")
      continue;
    
    // Loop through all basic blocks within function
    for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) {
      //skip dead blocks
      //is this really safe??
      BasicBlock *bb = B;
      if (B!=F->begin() && (pred_begin(bb)==pred_end(bb))) continue; //skip dead blocks

      // Loop through all instructions within basic block
      for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE; I++) {
    
        if(isa<DbgDeclareInst>(*I)) {

          // extract source file information from debug intrinsic
          DbgDeclareInst &DDI = cast<DbgDeclareInst>(*I);
          std::string file, dir;
          std::string name;
          GlobalVariable *gv = cast<GlobalVariable>(DDI.getVariable());
          if(!gv->hasInitializer()) continue;
          ConstantStruct *cs = cast<ConstantStruct>(gv->getInitializer());
          llvm::GetConstantStringInfo(cs->getOperand(2), name);
          unsigned int line = unsigned(cast<ConstantInt>(cs->getOperand(4))->getZExtValue());
          Value *V = cast<Value>(cs->getOperand(3));
          GlobalVariable *gv2 = cast<GlobalVariable>(cast<ConstantExpr>(V)->getOperand(0));
          if(!gv2->hasInitializer()) continue;
          ConstantStruct *cs2 = cast<ConstantStruct>(gv2->getInitializer());
          llvm::GetConstantStringInfo(cs2->getOperand(3), file);
          llvm::GetConstantStringInfo(cs2->getOperand(4), dir);

          // get the allocation instruction of the variable definition
          AllocaInst *AI;
          if(isa<AllocaInst>(DDI.getAddress())) {
            AI = cast<AllocaInst>(DDI.getAddress());
          } else if (isa<BitCastInst>(DDI.getAddress())) {
            AI = cast<AllocaInst>(cast<BitCastInst>(DDI.getAddress())->getOperand(0));
          } else {
            continue;
          }

          nDefs++;

          // add calls to lib function for each use of the variable
          int currUses = 0;
          for(AllocaInst::use_iterator U = AI->use_begin(), UE = AI->use_end(); U != UE; ++U) {
            if(isa<Instruction>(*U)) {

              User *user = *U;
              Instruction *insertInst = (Instruction*)user;

              // find most likely context location of use
              int useline = line;
              std::string usefile = file, usedir = dir;
              BasicBlock *parent = insertInst->getParent();
              BasicBlock::iterator inst = parent->begin();
              while(((Instruction *)inst) != insertInst  &&  inst != parent->end()) {
            	  /*TODO: solve DbgStopPointInst problem*/
            	/*if(isa<DbgStopPointInst>(*inst)) {
                  DbgStopPointInst &DSPI = cast<DbgStopPointInst>(*inst);
                  llvm::GetConstantStringInfo(DSPI.getDirectory(), usedir);
                  llvm::GetConstantStringInfo(DSPI.getFileName(), usefile);
                  useline = DSPI.getLine();
                }*/
                inst++;
              }

              std::stringstream usename;
              usename << name << "(use_" << currUses << ")";
              // add source context of this invariant to context file
              ContextManager::addSpectrumContext(
                spectrumIndex,  // spectrumIndex
                nUses,          // componentIndex
                usedir,         // path
                usefile,        // file
                useline,        // line
                usename.str()); // name
              currUses++;

              std::vector<Value*> Args(2);
              Args[0] = ConstantInt::get(Type::getInt32Ty(C), spectrumIndex);
              Args[1] = ConstantInt::get(Type::getInt32Ty(C), nUses++);

              CallInst::Create(SpFn, Args.begin(), Args.end(), "", insertInst);
            }
          }
        }
      }
    }
  }

  // add the registration of the instrumented spectrum points in the _registerAll() function
  addSpectrumRegistration(M, spectrumIndex, nUses, "Def-Use_Pairs");
  
  std::cerr << "instrument: " << nDefs << " defines with a total number of " << nUses << " uses instrumented\n";

  // notify change of program 
  return true;
}
Ejemplo n.º 2
0
bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
  SmallVector<ReturnInst*,16> Returns;
  SmallVector<UnwindInst*,16> Unwinds;
  SmallVector<InvokeInst*,16> Invokes;

  // Look through the terminators of the basic blocks to find invokes, returns
  // and unwinds.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
      // Remember all return instructions in case we insert an invoke into this
      // function.
      Returns.push_back(RI);
    } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Invokes.push_back(II);
    } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
      Unwinds.push_back(UI);
    }
  }

  NumInvokes += Invokes.size();
  NumUnwinds += Unwinds.size();

  // If we don't have any invokes, there's nothing to do.
  if (Invokes.empty()) return false;

  // Find the eh.selector.*, eh.exception and alloca calls.
  //
  // Remember any allocas() that aren't in the entry block, as the
  // jmpbuf saved SP will need to be updated for them.
  //
  // We'll use the first eh.selector to determine the right personality
  // function to use. For SJLJ, we always use the same personality for the
  // whole function, not on a per-selector basis.
  // FIXME: That's a bit ugly. Better way?
  SmallVector<CallInst*,16> EH_Selectors;
  SmallVector<CallInst*,16> EH_Exceptions;
  SmallVector<Instruction*,16> JmpbufUpdatePoints;

  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    // Note: Skip the entry block since there's nothing there that interests
    // us. eh.selector and eh.exception shouldn't ever be there, and we
    // want to disregard any allocas that are there.
    // 
    // FIXME: This is awkward. The new EH scheme won't need to skip the entry
    //        block.
    if (BB == F.begin()) {
      if (InvokeInst *II = dyn_cast<InvokeInst>(F.begin()->getTerminator())) {
        // FIXME: This will be always non-NULL in the new EH.
        if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
          if (!PersonalityFn) PersonalityFn = LPI->getPersonalityFn();
      }

      continue;
    }

    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        if (CI->getCalledFunction() == SelectorFn) {
          if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
          EH_Selectors.push_back(CI);
        } else if (CI->getCalledFunction() == ExceptionFn) {
          EH_Exceptions.push_back(CI);
        } else if (CI->getCalledFunction() == StackRestoreFn) {
          JmpbufUpdatePoints.push_back(CI);
        }
      } else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
        JmpbufUpdatePoints.push_back(AI);
      } else if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
        // FIXME: This will be always non-NULL in the new EH.
        if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
          if (!PersonalityFn) PersonalityFn = LPI->getPersonalityFn();
      }
    }
  }

  // If we don't have any eh.selector calls, we can't determine the personality
  // function. Without a personality function, we can't process exceptions.
  if (!PersonalityFn) return false;

  // We have invokes, so we need to add register/unregister calls to get this
  // function onto the global unwind stack.
  //
  // First thing we need to do is scan the whole function for values that are
  // live across unwind edges.  Each value that is live across an unwind edge we
  // spill into a stack location, guaranteeing that there is nothing live across
  // the unwind edge.  This process also splits all critical edges coming out of
  // invoke's.
  splitLiveRangesAcrossInvokes(Invokes);


  SmallVector<LandingPadInst*, 16> LandingPads;
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
      // FIXME: This will be always non-NULL in the new EH.
      if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
        LandingPads.push_back(LPI);
  }


  BasicBlock *EntryBB = F.begin();
  // Create an alloca for the incoming jump buffer ptr and the new jump buffer
  // that needs to be restored on all exits from the function.  This is an
  // alloca because the value needs to be added to the global context list.
  unsigned Align = 4; // FIXME: Should be a TLI check?
  AllocaInst *FunctionContext =
    new AllocaInst(FunctionContextTy, 0, Align,
                   "fcn_context", F.begin()->begin());

  Value *Idxs[2];
  Type *Int32Ty = Type::getInt32Ty(F.getContext());
  Value *Zero = ConstantInt::get(Int32Ty, 0);
  // We need to also keep around a reference to the call_site field
  Idxs[0] = Zero;
  Idxs[1] = ConstantInt::get(Int32Ty, 1);
  CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, "call_site",
                                       EntryBB->getTerminator());

  // The exception selector comes back in context->data[1]
  Idxs[1] = ConstantInt::get(Int32Ty, 2);
  Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, "fc_data",
                                            EntryBB->getTerminator());
  Idxs[1] = ConstantInt::get(Int32Ty, 1);
  Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs,
                                                  "exc_selector_gep",
                                                  EntryBB->getTerminator());
  // The exception value comes back in context->data[0]
  Idxs[1] = Zero;
  Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs,
                                                   "exception_gep",
                                                   EntryBB->getTerminator());

  // The result of the eh.selector call will be replaced with a a reference to
  // the selector value returned in the function context. We leave the selector
  // itself so the EH analysis later can use it.
  for (int i = 0, e = EH_Selectors.size(); i < e; ++i) {
    CallInst *I = EH_Selectors[i];
    Value *SelectorVal = new LoadInst(SelectorAddr, "select_val", true, I);
    I->replaceAllUsesWith(SelectorVal);
  }

  // eh.exception calls are replaced with references to the proper location in
  // the context. Unlike eh.selector, the eh.exception calls are removed
  // entirely.
  for (int i = 0, e = EH_Exceptions.size(); i < e; ++i) {
    CallInst *I = EH_Exceptions[i];
    // Possible for there to be duplicates, so check to make sure the
    // instruction hasn't already been removed.
    if (!I->getParent()) continue;
    Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
    Type *Ty = Type::getInt8PtrTy(F.getContext());
    Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);

    I->replaceAllUsesWith(Val);
    I->eraseFromParent();
  }

  for (unsigned i = 0, e = LandingPads.size(); i != e; ++i)
    ReplaceLandingPadVal(F, LandingPads[i], ExceptionAddr, SelectorAddr);

  // The entry block changes to have the eh.sjlj.setjmp, with a conditional
  // branch to a dispatch block for non-zero returns. If we return normally,
  // we're not handling an exception and just register the function context and
  // continue.

  // Create the dispatch block.  The dispatch block is basically a big switch
  // statement that goes to all of the invoke landing pads.
  BasicBlock *DispatchBlock =
    BasicBlock::Create(F.getContext(), "eh.sjlj.setjmp.catch", &F);

  // Insert a load of the callsite in the dispatch block, and a switch on its
  // value. By default, we issue a trap statement.
  BasicBlock *TrapBlock =
    BasicBlock::Create(F.getContext(), "trapbb", &F);
  CallInst::Create(Intrinsic::getDeclaration(F.getParent(), Intrinsic::trap),
                   "", TrapBlock);
  new UnreachableInst(F.getContext(), TrapBlock);

  Value *DispatchLoad = new LoadInst(CallSite, "invoke.num", true,
                                     DispatchBlock);
  SwitchInst *DispatchSwitch =
    SwitchInst::Create(DispatchLoad, TrapBlock, Invokes.size(),
                       DispatchBlock);
  // Split the entry block to insert the conditional branch for the setjmp.
  BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
                                                   "eh.sjlj.setjmp.cont");

  // Populate the Function Context
  //   1. LSDA address
  //   2. Personality function address
  //   3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)

  // LSDA address
  Idxs[0] = Zero;
  Idxs[1] = ConstantInt::get(Int32Ty, 4);
  Value *LSDAFieldPtr =
    GetElementPtrInst::Create(FunctionContext, Idxs, "lsda_gep",
                              EntryBB->getTerminator());
  Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
                                 EntryBB->getTerminator());
  new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());

  Idxs[1] = ConstantInt::get(Int32Ty, 3);
  Value *PersonalityFieldPtr =
    GetElementPtrInst::Create(FunctionContext, Idxs, "lsda_gep",
                              EntryBB->getTerminator());
  new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
                EntryBB->getTerminator());

  // Save the frame pointer.
  Idxs[1] = ConstantInt::get(Int32Ty, 5);
  Value *JBufPtr
    = GetElementPtrInst::Create(FunctionContext, Idxs, "jbuf_gep",
                                EntryBB->getTerminator());
  Idxs[1] = ConstantInt::get(Int32Ty, 0);
  Value *FramePtr =
    GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep",
                              EntryBB->getTerminator());

  Value *Val = CallInst::Create(FrameAddrFn,
                                ConstantInt::get(Int32Ty, 0),
                                "fp",
                                EntryBB->getTerminator());
  new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());

  // Save the stack pointer.
  Idxs[1] = ConstantInt::get(Int32Ty, 2);
  Value *StackPtr =
    GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep",
                              EntryBB->getTerminator());

  Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
  new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());

  // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
  Value *SetjmpArg =
    CastInst::Create(Instruction::BitCast, JBufPtr,
                     Type::getInt8PtrTy(F.getContext()), "",
                     EntryBB->getTerminator());
  Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
                                        "dispatch",
                                        EntryBB->getTerminator());

  // Add a call to dispatch_setup after the setjmp call. This is expanded to any
  // target-specific setup that needs to be done.
  CallInst::Create(DispatchSetupFn, DispatchVal, "", EntryBB->getTerminator());

  // check the return value of the setjmp. non-zero goes to dispatcher.
  Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
                                 ICmpInst::ICMP_EQ, DispatchVal, Zero,
                                 "notunwind");
  // Nuke the uncond branch.
  EntryBB->getTerminator()->eraseFromParent();

  // Put in a new condbranch in its place.
  BranchInst::Create(ContBlock, DispatchBlock, IsNormal, EntryBB);

  // Register the function context and make sure it's known to not throw
  CallInst *Register =
    CallInst::Create(RegisterFn, FunctionContext, "",
                     ContBlock->getTerminator());
  Register->setDoesNotThrow();

  // At this point, we are all set up, update the invoke instructions to mark
  // their call_site values, and fill in the dispatch switch accordingly.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
    markInvokeCallSite(Invokes[i], i+1, CallSite, DispatchSwitch);

  // Mark call instructions that aren't nounwind as no-action (call_site ==
  // -1). Skip the entry block, as prior to then, no function context has been
  // created for this function and any unexpected exceptions thrown will go
  // directly to the caller's context, which is what we want anyway, so no need
  // to do anything here.
  for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
    for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        // Ignore calls to the EH builtins (eh.selector, eh.exception)
        Constant *Callee = CI->getCalledFunction();
        if (Callee != SelectorFn && Callee != ExceptionFn
            && !CI->doesNotThrow())
          insertCallSiteStore(CI, -1, CallSite);
      } else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
        insertCallSiteStore(RI, -1, CallSite);
      }
  }

  // Replace all unwinds with a branch to the unwind handler.
  // ??? Should this ever happen with sjlj exceptions?
  for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
    BranchInst::Create(TrapBlock, Unwinds[i]);
    Unwinds[i]->eraseFromParent();
  }

  // Following any allocas not in the entry block, update the saved SP in the
  // jmpbuf to the new value.
  for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
    Instruction *AI = JmpbufUpdatePoints[i];
    Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
    StackAddr->insertAfter(AI);
    Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
    StoreStackAddr->insertAfter(StackAddr);
  }

  // Finally, for any returns from this function, if this function contains an
  // invoke, add a call to unregister the function context.
  for (unsigned i = 0, e = Returns.size(); i != e; ++i)
    CallInst::Create(UnregisterFn, FunctionContext, "", Returns[i]);

  return true;
}
Ejemplo n.º 3
0
/*
 * Rewrite OpenMP call sites and their associated kernel functions  -- the folloiwng pattern
   call void @GOMP_parallel_start(void (i8*)* @_Z20initialize_variablesiPfS_.omp_fn.4, i8* %.omp_data_o.5571, i32 0) nounwind
  call void @_Z20initialize_variablesiPfS_.omp_fn.4(i8* %.omp_data_o.5571) nounwind
  call void @GOMP_parallel_end() nounwind
 */
void HeteroOMPTransform::rewrite_omp_call_sites(Module &M) {
	SmallVector<Instruction *, 16> toDelete;
	DenseMap<Value *, Value *> ValueMap;
	
	for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I){
		if (!I->isDeclaration()) {
			
			for (Function::iterator BBI = I->begin(), BBE = I->end(); BBI != BBE; ++BBI) {
				bool match = false;
				for (BasicBlock::iterator INSNI = BBI->begin(), INSNE = BBI->end(); INSNI != INSNE; ++INSNI) {
					if (isa<CallInst>(INSNI)) {
						CallSite CI(cast<Instruction>(INSNI));
						if (CI.getCalledFunction() != NULL){ 
							string called_func_name = CI.getCalledFunction()->getName();
							if (called_func_name == OMP_PARALLEL_START_NAME && CI.arg_size() == 3) {
								// change alloc to malloc_shared
								// %5 = call i8* @_Z13malloc_sharedm(i64 20)       ; <i8*> [#uses=5]
								// %6 = bitcast i8* %5 to float*                   ; <float*> [#uses=2]
								AllocaInst *AllocCall;
								Value *arg_0 = CI.getArgument(0); // function
								Value *arg_1 = CI.getArgument(1);  // context
								Value *loop_ub = NULL;
								Function *function;
								BitCastInst* BCI;
								Function *kernel_function;
								BasicBlock::iterator iI(*INSNI); 
								//BasicBlock::iterator iJ = iI+1; 
								iI++; iI++;
								//BasicBlock::iterator iK = iI;  
								CallInst /**next,*/ *next_next; 
								if (arg_0 != NULL && arg_1 != NULL /*&& (next = dyn_cast<CallInst>(*iJ))*/ 
									&& (next_next = dyn_cast<CallInst>(iI)) && (next_next->getCalledFunction() != NULL) 
									&& (next_next->getCalledFunction()->getName() == OMP_PARALLEL_END_NAME)
									&& (BCI = dyn_cast<BitCastInst>(arg_1)) && (AllocCall = dyn_cast<AllocaInst>(BCI->getOperand(0))) 
									&& (function = dyn_cast<Function>(arg_0)) && (loop_ub = find_loop_upper_bound (AllocCall)) 
									&& (kernel_function=convert_to_kernel_function (M, function))){
									
										SmallVector<Value*, 16> Args;
										Args.push_back(AllocCall->getArraySize());
										Instruction *MallocCall = CallInst::Create(mallocFnTy, Args, "", AllocCall);
										CastInst *MallocCast = CastInst::Create(Instruction::BitCast, MallocCall, AllocCall->getType(), "", AllocCall);
										ValueMap[AllocCall] = MallocCast;
										//AllocCall->replaceAllUsesWith(MallocCall);
										// Add offload function
										Args.clear();
										Args.push_back(loop_ub);
										Args.push_back(BCI);
										Args.push_back(kernel_function);
										if (offloadFnTy == NULL) {
											init_offload_type(M, kernel_function);
										}
										
										Instruction *call = CallInst::Create(offloadFnTy, Args, "", INSNI);
										
										if (find(toDelete.begin(), toDelete.end(), AllocCall) == toDelete.end()){
											toDelete.push_back(AllocCall);
										}
										toDelete.push_back(&(*INSNI));
										match = true;
								}
							}
							else if (called_func_name == OMP_PARALLEL_END_NAME && CI.arg_size() == 0 && match) {
								toDelete.push_back(&(*INSNI));
								match = false;
							}
							else if (match) {
								toDelete.push_back(&(*INSNI));
							}
						}
					}
				}
			}
		}

	}

	/* Replace AllocCalls by MallocCalls */
	for(DenseMap<Value *, Value *>::iterator I = ValueMap.begin(), E = ValueMap.end(); I != E; I++) {
		I->first->replaceAllUsesWith(I->second);
	}

	/* delete the instructions for get_omp_num_thread and get_omp_thread_num */
	while(!toDelete.empty()) {
		Instruction *g = toDelete.back();
		toDelete.pop_back();
		g->eraseFromParent();
	}

}
Ejemplo n.º 4
0
bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
  SmallPtrSet<CallGraphNode *, 8> SCCNodes;
  CallGraph &CG = getAnalysis<CallGraph>();
  bool MadeChange = false;

  // Fill SCCNodes with the elements of the SCC.  Used for quickly
  // looking up whether a given CallGraphNode is in this SCC.
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
    SCCNodes.insert(*I);

  // First pass, scan all of the functions in the SCC, simplifying them
  // according to what we know.
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
    if (Function *F = (*I)->getFunction())
      MadeChange |= SimplifyFunction(F);

  // Next, check to see if any callees might throw or if there are any external
  // functions in this SCC: if so, we cannot prune any functions in this SCC.
  // Definitions that are weak and not declared non-throwing might be 
  // overridden at linktime with something that throws, so assume that.
  // If this SCC includes the unwind instruction, we KNOW it throws, so
  // obviously the SCC might throw.
  //
  bool SCCMightUnwind = false, SCCMightReturn = false;
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); 
       (!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F == 0) {
      SCCMightUnwind = true;
      SCCMightReturn = true;
    } else if (F->isDeclaration() || F->mayBeOverridden()) {
      SCCMightUnwind |= !F->doesNotThrow();
      SCCMightReturn |= !F->doesNotReturn();
    } else {
      bool CheckUnwind = !SCCMightUnwind && !F->doesNotThrow();
      bool CheckReturn = !SCCMightReturn && !F->doesNotReturn();

      if (!CheckUnwind && !CheckReturn)
        continue;

      // Check to see if this function performs an unwind or calls an
      // unwinding function.
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        if (CheckUnwind && isa<UnwindInst>(BB->getTerminator())) {
          // Uses unwind!
          SCCMightUnwind = true;
        } else if (CheckReturn && isa<ReturnInst>(BB->getTerminator())) {
          SCCMightReturn = true;
        }

        // Invoke instructions don't allow unwinding to continue, so we are
        // only interested in call instructions.
        if (CheckUnwind && !SCCMightUnwind)
          for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
            if (CallInst *CI = dyn_cast<CallInst>(I)) {
              if (CI->doesNotThrow()) {
                // This call cannot throw.
              } else if (Function *Callee = CI->getCalledFunction()) {
                CallGraphNode *CalleeNode = CG[Callee];
                // If the callee is outside our current SCC then we may
                // throw because it might.
                if (!SCCNodes.count(CalleeNode)) {
                  SCCMightUnwind = true;
                  break;
                }
              } else {
                // Indirect call, it might throw.
                SCCMightUnwind = true;
                break;
              }
            }
        if (SCCMightUnwind && SCCMightReturn) break;
      }
    }
  }

  // If the SCC doesn't unwind or doesn't throw, note this fact.
  if (!SCCMightUnwind || !SCCMightReturn)
    for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
      Attributes NewAttributes = Attribute::None;

      if (!SCCMightUnwind)
        NewAttributes |= Attribute::NoUnwind;
      if (!SCCMightReturn)
        NewAttributes |= Attribute::NoReturn;

      Function *F = (*I)->getFunction();
      const AttrListPtr &PAL = F->getAttributes();
      const AttrListPtr &NPAL = PAL.addAttr(~0, NewAttributes);
      if (PAL != NPAL) {
        MadeChange = true;
        F->setAttributes(NPAL);
      }
    }

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    // Convert any invoke instructions to non-throwing functions in this node
    // into call instructions with a branch.  This makes the exception blocks
    // dead.
    if (Function *F = (*I)->getFunction())
      MadeChange |= SimplifyFunction(F);
  }

  return MadeChange;
}
Ejemplo n.º 5
0
/// InputFilename is a LLVM bitcode file. Read it using bitcode reader.
/// Collect global functions and symbol names in symbols vector.
/// Collect external references in references vector.
/// Return LTO_READ_SUCCESS if there is no error.
enum LTOStatus
LTO::readLLVMObjectFile(const std::string &InputFilename,
                        NameToSymbolMap &symbols,
                        std::set<std::string> &references)
{
  Module *m = getModule(InputFilename);
  if (!m)
    return LTO_READ_FAILURE;

  // Collect Target info
  getTarget(m);

  if (!Target)
    return LTO_READ_FAILURE;
  
  // Use mangler to add GlobalPrefix to names to match linker names.
  // FIXME : Instead of hard coding "-" use GlobalPrefix.
  Mangler mangler(*m, Target->getTargetAsmInfo()->getGlobalPrefix());
  modules.push_back(m);
  
  for (Module::iterator f = m->begin(), e = m->end(); f != e; ++f) {
    LTOLinkageTypes lt = getLTOLinkageType(f);
    LTOVisibilityTypes vis = getLTOVisibilityType(f);
    if (!f->isDeclaration() && lt != LTOInternalLinkage
        && strncmp (f->getName().c_str(), "llvm.", 5)) {
      int alignment = ( 16 > f->getAlignment() ? 16 : f->getAlignment());
      LLVMSymbol *newSymbol = new LLVMSymbol(lt, vis, f, f->getName(), 
                                             mangler.getValueName(f),
                                             Log2_32(alignment));
      symbols[newSymbol->getMangledName()] = newSymbol;
      allSymbols[newSymbol->getMangledName()] = newSymbol;
    }

    // Collect external symbols referenced by this function.
    for (Function::iterator b = f->begin(), fe = f->end(); b != fe; ++b) 
      for (BasicBlock::iterator i = b->begin(), be = b->end(); 
           i != be; ++i) {
        for (unsigned count = 0, total = i->getNumOperands(); 
             count != total; ++count)
          findExternalRefs(i->getOperand(count), references, mangler);
      }
  }
    
  for (Module::global_iterator v = m->global_begin(), e = m->global_end();
       v !=  e; ++v) {
    LTOLinkageTypes lt = getLTOLinkageType(v);
    LTOVisibilityTypes vis = getLTOVisibilityType(v);
    if (!v->isDeclaration() && lt != LTOInternalLinkage
        && strncmp (v->getName().c_str(), "llvm.", 5)) {
      const TargetData *TD = Target->getTargetData();
      LLVMSymbol *newSymbol = new LLVMSymbol(lt, vis, v, v->getName(), 
                                             mangler.getValueName(v),
                                             TD->getPreferredAlignmentLog(v));
      symbols[newSymbol->getMangledName()] = newSymbol;
      allSymbols[newSymbol->getMangledName()] = newSymbol;

      for (unsigned count = 0, total = v->getNumOperands(); 
           count != total; ++count)
        findExternalRefs(v->getOperand(count), references, mangler);

    }
  }
  
  return LTO_READ_SUCCESS;
}
Ejemplo n.º 6
0
bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
  SmallVector<ReturnInst*,16> Returns;
  SmallVector<InvokeInst*,16> Invokes;
  UnreachableInst* UnreachablePlaceholder = 0;

  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
    if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
      // Remember all return instructions in case we insert an invoke into this
      // function.
      Returns.push_back(RI);
    } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Invokes.push_back(II);
    }

  if (Invokes.empty()) return false;

  NumInvokes += Invokes.size();

  // TODO: This is not an optimal way to do this.  In particular, this always
  // inserts setjmp calls into the entries of functions with invoke instructions
  // even though there are possibly paths through the function that do not
  // execute any invokes.  In particular, for functions with early exits, e.g.
  // the 'addMove' method in hexxagon, it would be nice to not have to do the
  // setjmp stuff on the early exit path.  This requires a bit of dataflow, but
  // would not be too hard to do.

  // If we have an invoke instruction, insert a setjmp that dominates all
  // invokes.  After the setjmp, use a cond branch that goes to the original
  // code path on zero, and to a designated 'catch' block of nonzero.
  Value *OldJmpBufPtr = 0;
  if (!Invokes.empty()) {
    // First thing we need to do is scan the whole function for values that are
    // live across unwind edges.  Each value that is live across an unwind edge
    // we spill into a stack location, guaranteeing that there is nothing live
    // across the unwind edge.  This process also splits all critical edges
    // coming out of invoke's.
    splitLiveRangesLiveAcrossInvokes(Invokes);

    BasicBlock *EntryBB = F.begin();

    // Create an alloca for the incoming jump buffer ptr and the new jump buffer
    // that needs to be restored on all exits from the function.  This is an
    // alloca because the value needs to be live across invokes.
    unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
    AllocaInst *JmpBuf =
      new AllocaInst(JBLinkTy, 0, Align,
                     "jblink", F.begin()->begin());

    Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
                     ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
    OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "OldBuf",
                                             EntryBB->getTerminator());

    // Copy the JBListHead to the alloca.
    Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
                                 EntryBB->getTerminator());
    new StoreInst(OldBuf, OldJmpBufPtr, true, EntryBB->getTerminator());

    // Add the new jumpbuf to the list.
    new StoreInst(JmpBuf, JBListHead, true, EntryBB->getTerminator());

    // Create the catch block.  The catch block is basically a big switch
    // statement that goes to all of the invoke catch blocks.
    BasicBlock *CatchBB =
            BasicBlock::Create(F.getContext(), "setjmp.catch", &F);

    // Create an alloca which keeps track of the stack pointer before every
    // invoke, this allows us to properly restore the stack pointer after
    // long jumping.
    AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0,
                                          "stackptr", EntryBB->begin());

    // Create an alloca which keeps track of which invoke is currently
    // executing.  For normal calls it contains zero.
    AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0,
                                           "invokenum",EntryBB->begin());
    new StoreInst(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), 
                  InvokeNum, true, EntryBB->getTerminator());

    // Insert a load in the Catch block, and a switch on its value.  By default,
    // we go to a block that just does an unwind (which is the correct action
    // for a standard call). We insert an unreachable instruction here and
    // modify the block to jump to the correct unwinding pad later.
    BasicBlock *UnwindBB = BasicBlock::Create(F.getContext(), "unwindbb", &F);
    UnreachablePlaceholder = new UnreachableInst(F.getContext(), UnwindBB);

    Value *CatchLoad = new LoadInst(InvokeNum, "invoke.num", true, CatchBB);
    SwitchInst *CatchSwitch =
      SwitchInst::Create(CatchLoad, UnwindBB, Invokes.size(), CatchBB);

    // Now that things are set up, insert the setjmp call itself.

    // Split the entry block to insert the conditional branch for the setjmp.
    BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
                                                     "setjmp.cont");

    Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
    Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "TheJmpBuf",
                                                 EntryBB->getTerminator());
    JmpBufPtr = new BitCastInst(JmpBufPtr,
                        Type::getInt8PtrTy(F.getContext()),
                                "tmp", EntryBB->getTerminator());
    Value *SJRet = CallInst::Create(SetJmpFn, JmpBufPtr, "sjret",
                                    EntryBB->getTerminator());

    // Compare the return value to zero.
    Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
                                   ICmpInst::ICMP_EQ, SJRet,
                                   Constant::getNullValue(SJRet->getType()),
                                   "notunwind");
    // Nuke the uncond branch.
    EntryBB->getTerminator()->eraseFromParent();

    // Put in a new condbranch in its place.
    BranchInst::Create(ContBlock, CatchBB, IsNormal, EntryBB);

    // At this point, we are all set up, rewrite each invoke instruction.
    for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
      rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch);
  }

  // We know that there is at least one unwind.

  // Create three new blocks, the block to load the jmpbuf ptr and compare
  // against null, the block to do the longjmp, and the error block for if it
  // is null.  Add them at the end of the function because they are not hot.
  BasicBlock *UnwindHandler = BasicBlock::Create(F.getContext(),
                                                "dounwind", &F);
  BasicBlock *UnwindBlock = BasicBlock::Create(F.getContext(), "unwind", &F);
  BasicBlock *TermBlock = BasicBlock::Create(F.getContext(), "unwinderror", &F);

  // If this function contains an invoke, restore the old jumpbuf ptr.
  Value *BufPtr;
  if (OldJmpBufPtr) {
    // Before the return, insert a copy from the saved value to the new value.
    BufPtr = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", UnwindHandler);
    new StoreInst(BufPtr, JBListHead, UnwindHandler);
  } else {
    BufPtr = new LoadInst(JBListHead, "ehlist", UnwindHandler);
  }

  // Load the JBList, if it's null, then there was no catch!
  Value *NotNull = new ICmpInst(*UnwindHandler, ICmpInst::ICMP_NE, BufPtr,
                                Constant::getNullValue(BufPtr->getType()),
                                "notnull");
  BranchInst::Create(UnwindBlock, TermBlock, NotNull, UnwindHandler);

  // Create the block to do the longjmp.
  // Get a pointer to the jmpbuf and longjmp.
  Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
                   ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
  Idx[0] = GetElementPtrInst::Create(BufPtr, Idx, "JmpBuf", UnwindBlock);
  Idx[0] = new BitCastInst(Idx[0],
             Type::getInt8PtrTy(F.getContext()),
                           "tmp", UnwindBlock);
  Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
  CallInst::Create(LongJmpFn, Idx, "", UnwindBlock);
  new UnreachableInst(F.getContext(), UnwindBlock);

  // Set up the term block ("throw without a catch").
  new UnreachableInst(F.getContext(), TermBlock);

  // Insert a call to abort()
  CallInst::Create(AbortFn, "",
                   TermBlock->getTerminator())->setTailCall();

  // Replace the inserted unreachable with a branch to the unwind handler.
  if (UnreachablePlaceholder) {
    BranchInst::Create(UnwindHandler, UnreachablePlaceholder);
    UnreachablePlaceholder->eraseFromParent();
  }

  // Finally, for any returns from this function, if this function contains an
  // invoke, restore the old jmpbuf pointer to its input value.
  if (OldJmpBufPtr) {
    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
      ReturnInst *R = Returns[i];

      // Before the return, insert a copy from the saved value to the new value.
      Value *OldBuf = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", true, R);
      new StoreInst(OldBuf, JBListHead, true, R);
    }
  }

  return true;
}
// UnifyAllExitNodes - Unify all exit nodes of the CFG by creating a new
// BasicBlock, and converting all returns to unconditional branches to this
// new basic block.  The singular exit node is returned.
//
// If there are no return stmts in the Function, a null pointer is returned.
//
bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
  // Loop over all of the blocks in a function, tracking all of the blocks that
  // return.
  //
  std::vector<BasicBlock*> ReturningBlocks;
  std::vector<BasicBlock*> UnwindingBlocks;
  std::vector<BasicBlock*> UnreachableBlocks;
  for(Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
    if (isa<ReturnInst>(I->getTerminator()))
      ReturningBlocks.push_back(I);
    else if (isa<UnwindInst>(I->getTerminator()))
      UnwindingBlocks.push_back(I);
    else if (isa<UnreachableInst>(I->getTerminator()))
      UnreachableBlocks.push_back(I);

  // Handle unwinding blocks first.
  if (UnwindingBlocks.empty()) {
    UnwindBlock = 0;
  } else if (UnwindingBlocks.size() == 1) {
    UnwindBlock = UnwindingBlocks.front();
  } else {
    UnwindBlock = BasicBlock::Create("UnifiedUnwindBlock", &F);
    new UnwindInst(UnwindBlock);

    for (std::vector<BasicBlock*>::iterator I = UnwindingBlocks.begin(),
           E = UnwindingBlocks.end(); I != E; ++I) {
      BasicBlock *BB = *I;
      BB->getInstList().pop_back();  // Remove the unwind insn
      BranchInst::Create(UnwindBlock, BB);
    }
  }

  // Then unreachable blocks.
  if (UnreachableBlocks.empty()) {
    UnreachableBlock = 0;
  } else if (UnreachableBlocks.size() == 1) {
    UnreachableBlock = UnreachableBlocks.front();
  } else {
    UnreachableBlock = BasicBlock::Create("UnifiedUnreachableBlock", &F);
    new UnreachableInst(UnreachableBlock);

    for (std::vector<BasicBlock*>::iterator I = UnreachableBlocks.begin(),
           E = UnreachableBlocks.end(); I != E; ++I) {
      BasicBlock *BB = *I;
      BB->getInstList().pop_back();  // Remove the unreachable inst.
      BranchInst::Create(UnreachableBlock, BB);
    }
  }

  // Now handle return blocks.
  if (ReturningBlocks.empty()) {
    ReturnBlock = 0;
    return false;                          // No blocks return
  } else if (ReturningBlocks.size() == 1) {
    ReturnBlock = ReturningBlocks.front(); // Already has a single return block
    return false;
  }

  // Otherwise, we need to insert a new basic block into the function, add a PHI
  // nodes (if the function returns values), and convert all of the return
  // instructions into unconditional branches.
  //
  BasicBlock *NewRetBlock = BasicBlock::Create("UnifiedReturnBlock", &F);

  SmallVector<Value *, 4> Phis;
  unsigned NumRetVals = ReturningBlocks[0]->getTerminator()->getNumOperands();
  if (NumRetVals == 0)
    ReturnInst::Create(NULL, NewRetBlock);
  else if (const StructType *STy = dyn_cast<StructType>(F.getReturnType())) {
    Instruction *InsertPt = NULL;
    if (NumRetVals == 0)
      InsertPt = NewRetBlock->getFirstNonPHI();
    PHINode *PN = NULL;
    for (unsigned i = 0; i < NumRetVals; ++i) {
      if (InsertPt)
        PN = PHINode::Create(STy->getElementType(i), "UnifiedRetVal." 
                         + utostr(i), InsertPt);
      else
        PN = PHINode::Create(STy->getElementType(i), "UnifiedRetVal." 
                         + utostr(i), NewRetBlock);
      Phis.push_back(PN);
      InsertPt = PN;
    }
    ReturnInst::Create(&Phis[0], NumRetVals, NewRetBlock);
  }
  else {
    // If the function doesn't return void... add a PHI node to the block...
    PHINode *PN = PHINode::Create(F.getReturnType(), "UnifiedRetVal");
    NewRetBlock->getInstList().push_back(PN);
    Phis.push_back(PN);
    ReturnInst::Create(PN, NewRetBlock);
  }

  // Loop over all of the blocks, replacing the return instruction with an
  // unconditional branch.
  //
  for (std::vector<BasicBlock*>::iterator I = ReturningBlocks.begin(),
         E = ReturningBlocks.end(); I != E; ++I) {
    BasicBlock *BB = *I;

    // Add an incoming element to the PHI node for every return instruction that
    // is merging into this new block...
    if (!Phis.empty()) {
      for (unsigned i = 0; i < NumRetVals; ++i) 
        cast<PHINode>(Phis[i])->addIncoming(BB->getTerminator()->getOperand(i), 
                                            BB);
    }

    BB->getInstList().pop_back();  // Remove the return insn
    BranchInst::Create(NewRetBlock, BB);
  }
  ReturnBlock = NewRetBlock;
  return true;
}
Ejemplo n.º 8
0
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass.
//  Clone functions that take LoadInsts as arguments
//
// Inputs:
//  M - A reference to the LLVM module to transform
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
//  true  - The module was modified.
//  false - The module was not modified.
//
bool LoadArgs::runOnModule(Module& M) {
  std::map<std::pair<Function*, const Type * > , Function* > fnCache;
  bool changed;
  do { 
    changed = false;
    for (Module::iterator Func = M.begin(); Func != M.end(); ++Func) {
      for (Function::iterator B = Func->begin(), FE = Func->end(); B != FE; ++B) {
        for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE;) {
          CallInst *CI = dyn_cast<CallInst>(I++);
          if(!CI)
            continue;

          if(CI->hasByValArgument())
            continue;
          // if the CallInst calls a function, that is externally defined,
          // or might be changed, ignore this call site.
          Function *F = CI->getCalledFunction();
          if (!F || (F->isDeclaration() || F->mayBeOverridden())) 
            continue;
          if(F->hasStructRetAttr())
            continue;
          if(F->isVarArg())
            continue;

          // find the argument we must replace
          Function::arg_iterator ai = F->arg_begin(), ae = F->arg_end();
          unsigned argNum = 0;
          for(; argNum < CI->getNumArgOperands();argNum++, ++ai) {
            // do not care about dead arguments
            if(ai->use_empty())
              continue;
            if(F->getAttributes().getParamAttributes(argNum).hasAttrSomewhere(Attribute::SExt) ||
               F->getAttributes().getParamAttributes(argNum).hasAttrSomewhere(Attribute::ZExt))
              continue;
            if (isa<LoadInst>(CI->getArgOperand(argNum)))
              break;
          }

          // if no argument was a GEP operator to be changed 
          if(ai == ae)
            continue;

          LoadInst *LI = dyn_cast<LoadInst>(CI->getArgOperand(argNum));
          Instruction * InsertPt = &(Func->getEntryBlock().front());
          AllocaInst *NewVal = new AllocaInst(LI->getType(), "",InsertPt);

          StoreInst *Copy = new StoreInst(LI, NewVal);
          Copy->insertAfter(LI);
          /*if(LI->getParent() != CI->getParent())
            continue;
          // Also check that there is no store after the load.
          // TODO: Check if the load/store do not alias.
          BasicBlock::iterator bii = LI->getParent()->begin();
          Instruction *BII = bii;
          while(BII != LI) {
            ++bii;
            BII = bii;
          }
          while(BII != CI) {
            if(isa<StoreInst>(BII))
              break;
            ++bii;
            BII = bii;
          }
          if(isa<StoreInst>(bii)){
            continue;
          }*/

          // Construct the new Type
          // Appends the struct Type at the beginning
          std::vector<Type*>TP;
          for(unsigned c = 0; c < CI->getNumArgOperands();c++) {
            if(c == argNum)
              TP.push_back(LI->getPointerOperand()->getType());
            TP.push_back(CI->getArgOperand(c)->getType());
          }

          //return type is same as that of original instruction
          FunctionType *NewFTy = FunctionType::get(CI->getType(), TP, false);
          numSimplified++;
          //if(numSimplified > 1000)
          //return true;

          Function *NewF;
          std::map<std::pair<Function*, const Type* > , Function* >::iterator Test;
          Test = fnCache.find(std::make_pair(F, NewFTy));
          if(Test != fnCache.end()) {
            NewF = Test->second;
          } else {
            NewF = Function::Create(NewFTy,
                                    GlobalValue::InternalLinkage,
                                    F->getName().str() + ".TEST",
                                    &M);

            fnCache[std::make_pair(F, NewFTy)] = NewF;
            Function::arg_iterator NI = NewF->arg_begin();

            ValueToValueMapTy ValueMap;

            unsigned count = 0;
            for (Function::arg_iterator II = F->arg_begin(); NI != NewF->arg_end(); ++count, ++NI) {
              if(count == argNum) {
                NI->setName("LDarg");
                continue;
              }
              ValueMap[II] = NI;
              NI->setName(II->getName());
              NI->addAttr(F->getAttributes().getParamAttributes(II->getArgNo() + 1));
              ++II;
            }
            // Perform the cloning.
            SmallVector<ReturnInst*,100> Returns;
            CloneFunctionInto(NewF, F, ValueMap, false, Returns);
            std::vector<Value*> fargs;
            for(Function::arg_iterator ai = NewF->arg_begin(), 
                ae= NewF->arg_end(); ai != ae; ++ai) {
              fargs.push_back(ai);
            }

            NewF->setAttributes(NewF->getAttributes().addAttributes(
                F->getContext(), 0, F->getAttributes().getRetAttributes()));
            NewF->setAttributes(NewF->getAttributes().addAttributes(
                F->getContext(), ~0, F->getAttributes().getFnAttributes()));
            //Get the point to insert the GEP instr.
            Instruction *InsertPoint;
            for (BasicBlock::iterator insrt = NewF->front().begin(); isa<AllocaInst>(InsertPoint = insrt); ++insrt) {;}
            LoadInst *LI_new = new LoadInst(fargs.at(argNum), "", InsertPoint);
            fargs.at(argNum+1)->replaceAllUsesWith(LI_new);
          }
          
          //this does not seem to be a good idea
          AttributeSet NewCallPAL=AttributeSet();
	  
          // Get the initial attributes of the call
          AttributeSet CallPAL = CI->getAttributes();
          AttributeSet RAttrs = CallPAL.getRetAttributes();
          AttributeSet FnAttrs = CallPAL.getFnAttributes();
          if (!RAttrs.isEmpty())
            NewCallPAL=NewCallPAL.addAttributes(F->getContext(),0, RAttrs);

          SmallVector<Value*, 8> Args;
          for(unsigned j =0;j<CI->getNumArgOperands();j++) {
            if(j == argNum) {
              Args.push_back(NewVal);
            }
            Args.push_back(CI->getArgOperand(j));
            // position in the NewCallPAL
            AttributeSet Attrs = CallPAL.getParamAttributes(j+1);
            if (!Attrs.isEmpty())
              NewCallPAL=NewCallPAL.addAttributes(F->getContext(),Args.size(), Attrs);
          }
          // Create the new attributes vec.
          if (!FnAttrs.isEmpty())
            NewCallPAL=NewCallPAL.addAttributes(F->getContext(),~0, FnAttrs);

          CallInst *CallI = CallInst::Create(NewF,Args,"", CI);
          CallI->setCallingConv(CI->getCallingConv());
          CallI->setAttributes(NewCallPAL);
          CI->replaceAllUsesWith(CallI);
          CI->eraseFromParent();
          changed = true;
        }
      }
    }
  } while(changed);
  return true;
}
bool TailCallElim::runOnFunction(Function &F) {
  // If this function is a varargs function, we won't be able to PHI the args
  // right, so don't even try to convert it...
  if (F.getFunctionType()->isVarArg()) return false;

  BasicBlock *OldEntry = 0;
  bool TailCallsAreMarkedTail = false;
  SmallVector<PHINode*, 8> ArgumentPHIs;
  bool MadeChange = false;
  bool FunctionContainsEscapingAllocas = false;

  // CannotTCETailMarkedCall - If true, we cannot perform TCE on tail calls
  // marked with the 'tail' attribute, because doing so would cause the stack
  // size to increase (real TCE would deallocate variable sized allocas, TCE
  // doesn't).
  bool CannotTCETailMarkedCall = false;

  // Loop over the function, looking for any returning blocks, and keeping track
  // of whether this function has any non-trivially used allocas.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (FunctionContainsEscapingAllocas && CannotTCETailMarkedCall)
      break;

    FunctionContainsEscapingAllocas |=
      CheckForEscapingAllocas(BB, CannotTCETailMarkedCall);
  }
  
  /// FIXME: The code generator produces really bad code when an 'escaping
  /// alloca' is changed from being a static alloca to being a dynamic alloca.
  /// Until this is resolved, disable this transformation if that would ever
  /// happen.  This bug is PR962.
  if (FunctionContainsEscapingAllocas)
    return false;

  // Second pass, change any tail calls to loops.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
      bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
                                          ArgumentPHIs,CannotTCETailMarkedCall);
      if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
        Change = FoldReturnAndProcessPred(BB, Ret, OldEntry,
                                          TailCallsAreMarkedTail, ArgumentPHIs,
                                          CannotTCETailMarkedCall);
      MadeChange |= Change;
    }
  }

  // If we eliminated any tail recursions, it's possible that we inserted some
  // silly PHI nodes which just merge an initial value (the incoming operand)
  // with themselves.  Check to see if we did and clean up our mess if so.  This
  // occurs when a function passes an argument straight through to its tail
  // call.
  if (!ArgumentPHIs.empty()) {
    for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) {
      PHINode *PN = ArgumentPHIs[i];

      // If the PHI Node is a dynamic constant, replace it with the value it is.
      if (Value *PNV = SimplifyInstruction(PN)) {
        PN->replaceAllUsesWith(PNV);
        PN->eraseFromParent();
      }
    }
  }

  // Finally, if this function contains no non-escaping allocas, or calls
  // setjmp, mark all calls in the function as eligible for tail calls
  //(there is no stack memory for them to access).
  if (!FunctionContainsEscapingAllocas && !F.callsFunctionThatReturnsTwice())
    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
        if (CallInst *CI = dyn_cast<CallInst>(I)) {
          CI->setTailCall();
          MadeChange = true;
        }

  return MadeChange;
}
Ejemplo n.º 10
0
bool llvm::InputValues::runOnModule(Module& M) {

	module = &M;

	initializeWhiteList();

    collectMainArguments();
int one=0;
 errs()<<"\n------------------------------------------------\n";
	for(Module::iterator Fit = M.begin(), Fend = M.end(); Fit != Fend; Fit++){

		for (Function::iterator BBit = Fit->begin(), BBend = Fit->end(); BBit != BBend; BBit++) {
			
			for (BasicBlock::iterator Iit = BBit->begin(), Iend = BBit->end(); Iit != Iend; Iit++) {
			
				if(one<2)
				{
			    Value* V_in;
			     //FIXME: Temporary assignment of the tainted source for testing...
			     if(BBit->getName()=="BB_146")
			     {
			    //   errs() << "Inside BB_0";
			      if(LoadInst *LI = dyn_cast<LoadInst>(Iit))  
			       {
			     //   LI->dump();
			        V_in = LI->getPointerOperand();
			        errs() <<"\ntaint source " << LI->getName() << " ";
			        errs() << V_in;
			        insertInInputDepValues(V_in);
			        //NumInputValues++;
			        one++;
			        }
			     }
			     
			     }



				if (CallInst *CI = dyn_cast<CallInst>(Iit)) {

					if (isMarkedCallInst(CI)){

						//Values returned by marked instructions
						insertInInputDepValues(CI);

						for(unsigned int i = 0; i < CI->getNumOperands(); i++){

							if (CI->getOperand(i)->getType()->isPointerTy()){
								//Arguments with pointer type of marked functions
						//		insertInInputDepValues(CI->getOperand(i));
							}

						}

					}

				}

			}

		}

	}

 errs()<<"\n------------------------------------------------\n";
	NumInputValues = inputValues.size();

	//We don't modify anything, so we must return false;
	return false;
}
Ejemplo n.º 11
0
/// splitLiveRangesAcrossInvokes - Each value that is live across an unwind edge
/// we spill into a stack location, guaranteeing that there is nothing live
/// across the unwind edge.  This process also splits all critical edges
/// coming out of invoke's.
void SjLjEHPass::
splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);
    SplitCriticalEdge(II, 1, this);
    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    // This is always a no-op cast because we're casting AI to AI->getType() so
    // src and destination types are identical. BitCast is the only possibility.
    CastInst *NC = new BitCastInst(
      AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
    AI->replaceAllUsesWith(NC);
    // Normally its is forbidden to replace a CastInst's operand because it
    // could cause the opcode to reflect an illegal conversion. However, we're
    // replacing it here with the same value it was constructed with to simply
    // make NC its user.
    NC->setOperand(0, AI);
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*,16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
Ejemplo n.º 12
0
// RemoveDeadStuffFromFunction - Remove any arguments and return values from F
// that are not in LiveValues. Transform the function and all of the callees of
// the function to not have these arguments and return values.
//
bool DAE::RemoveDeadStuffFromFunction(Function *F) {
  // Don't modify fully live functions
  if (LiveFunctions.count(F))
    return false;

  // Start by computing a new prototype for the function, which is the same as
  // the old function, but has fewer arguments and a different return type.
  FunctionType *FTy = F->getFunctionType();
  std::vector<Type*> Params;

  // Keep track of if we have a live 'returned' argument
  bool HasLiveReturnedArg = false;

  // Set up to build a new list of parameter attributes.
  SmallVector<AttributeSet, 8> AttributesVec;
  const AttributeSet &PAL = F->getAttributes();

  // Remember which arguments are still alive.
  SmallVector<bool, 10> ArgAlive(FTy->getNumParams(), false);
  // Construct the new parameter list from non-dead arguments. Also construct
  // a new set of parameter attributes to correspond. Skip the first parameter
  // attribute, since that belongs to the return value.
  unsigned i = 0;
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
       I != E; ++I, ++i) {
    RetOrArg Arg = CreateArg(F, i);
    if (LiveValues.erase(Arg)) {
      Params.push_back(I->getType());
      ArgAlive[i] = true;

      // Get the original parameter attributes (skipping the first one, that is
      // for the return value.
      if (PAL.hasAttributes(i + 1)) {
        AttrBuilder B(PAL, i + 1);
        if (B.contains(Attribute::Returned))
          HasLiveReturnedArg = true;
        AttributesVec.
          push_back(AttributeSet::get(F->getContext(), Params.size(), B));
      }
    } else {
      ++NumArgumentsEliminated;
      DEBUG(dbgs() << "DAE - Removing argument " << i << " (" << I->getName()
            << ") from " << F->getName() << "\n");
    }
  }

  // Find out the new return value.
  Type *RetTy = FTy->getReturnType();
  Type *NRetTy = NULL;
  unsigned RetCount = NumRetVals(F);

  // -1 means unused, other numbers are the new index
  SmallVector<int, 5> NewRetIdxs(RetCount, -1);
  std::vector<Type*> RetTypes;

  // If there is a function with a live 'returned' argument but a dead return
  // value, then there are two possible actions:
  // 1) Eliminate the return value and take off the 'returned' attribute on the
  //    argument.
  // 2) Retain the 'returned' attribute and treat the return value (but not the
  //    entire function) as live so that it is not eliminated.
  // 
  // It's not clear in the general case which option is more profitable because,
  // even in the absence of explicit uses of the return value, code generation
  // is free to use the 'returned' attribute to do things like eliding
  // save/restores of registers across calls. Whether or not this happens is
  // target and ABI-specific as well as depending on the amount of register
  // pressure, so there's no good way for an IR-level pass to figure this out.
  //
  // Fortunately, the only places where 'returned' is currently generated by
  // the FE are places where 'returned' is basically free and almost always a
  // performance win, so the second option can just be used always for now.
  //
  // This should be revisited if 'returned' is ever applied more liberally.
  if (RetTy->isVoidTy() || HasLiveReturnedArg) {
    NRetTy = RetTy;
  } else {
    StructType *STy = dyn_cast<StructType>(RetTy);
    if (STy)
      // Look at each of the original return values individually.
      for (unsigned i = 0; i != RetCount; ++i) {
        RetOrArg Ret = CreateRet(F, i);
        if (LiveValues.erase(Ret)) {
          RetTypes.push_back(STy->getElementType(i));
          NewRetIdxs[i] = RetTypes.size() - 1;
        } else {
          ++NumRetValsEliminated;
          DEBUG(dbgs() << "DAE - Removing return value " << i << " from "
                << F->getName() << "\n");
        }
      }
    else
      // We used to return a single value.
      if (LiveValues.erase(CreateRet(F, 0))) {
        RetTypes.push_back(RetTy);
        NewRetIdxs[0] = 0;
      } else {
        DEBUG(dbgs() << "DAE - Removing return value from " << F->getName()
              << "\n");
        ++NumRetValsEliminated;
      }
    if (RetTypes.size() > 1)
      // More than one return type? Return a struct with them. Also, if we used
      // to return a struct and didn't change the number of return values,
      // return a struct again. This prevents changing {something} into
      // something and {} into void.
      // Make the new struct packed if we used to return a packed struct
      // already.
      NRetTy = StructType::get(STy->getContext(), RetTypes, STy->isPacked());
    else if (RetTypes.size() == 1)
      // One return type? Just a simple value then, but only if we didn't use to
      // return a struct with that simple value before.
      NRetTy = RetTypes.front();
    else if (RetTypes.size() == 0)
      // No return types? Make it void, but only if we didn't use to return {}.
      NRetTy = Type::getVoidTy(F->getContext());
  }

  assert(NRetTy && "No new return type found?");

  // The existing function return attributes.
  AttributeSet RAttrs = PAL.getRetAttributes();

  // Remove any incompatible attributes, but only if we removed all return
  // values. Otherwise, ensure that we don't have any conflicting attributes
  // here. Currently, this should not be possible, but special handling might be
  // required when new return value attributes are added.
  if (NRetTy->isVoidTy())
    RAttrs =
      AttributeSet::get(NRetTy->getContext(), AttributeSet::ReturnIndex,
                        AttrBuilder(RAttrs, AttributeSet::ReturnIndex).
         removeAttributes(AttributeFuncs::
                          typeIncompatible(NRetTy, AttributeSet::ReturnIndex),
                          AttributeSet::ReturnIndex));
  else
    assert(!AttrBuilder(RAttrs, AttributeSet::ReturnIndex).
             hasAttributes(AttributeFuncs::
                           typeIncompatible(NRetTy, AttributeSet::ReturnIndex),
                           AttributeSet::ReturnIndex) &&
           "Return attributes no longer compatible?");

  if (RAttrs.hasAttributes(AttributeSet::ReturnIndex))
    AttributesVec.push_back(AttributeSet::get(NRetTy->getContext(), RAttrs));

  if (PAL.hasAttributes(AttributeSet::FunctionIndex))
    AttributesVec.push_back(AttributeSet::get(F->getContext(),
                                              PAL.getFnAttributes()));

  // Reconstruct the AttributesList based on the vector we constructed.
  AttributeSet NewPAL = AttributeSet::get(F->getContext(), AttributesVec);

  // Create the new function type based on the recomputed parameters.
  FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg());

  // No change?
  if (NFTy == FTy)
    return false;

  // Create the new function body and insert it into the module...
  Function *NF = Function::Create(NFTy, F->getLinkage());
  NF->copyAttributesFrom(F);
  NF->setAttributes(NewPAL);
  // Insert the new function before the old function, so we won't be processing
  // it again.
  F->getParent()->getFunctionList().insert(F, NF);
  NF->takeName(F);

  // Loop over all of the callers of the function, transforming the call sites
  // to pass in a smaller number of arguments into the new function.
  //
  std::vector<Value*> Args;
  while (!F->use_empty()) {
    CallSite CS(F->use_back());
    Instruction *Call = CS.getInstruction();

    AttributesVec.clear();
    const AttributeSet &CallPAL = CS.getAttributes();

    // The call return attributes.
    AttributeSet RAttrs = CallPAL.getRetAttributes();

    // Adjust in case the function was changed to return void.
    RAttrs =
      AttributeSet::get(NF->getContext(), AttributeSet::ReturnIndex,
                        AttrBuilder(RAttrs, AttributeSet::ReturnIndex).
        removeAttributes(AttributeFuncs::
                         typeIncompatible(NF->getReturnType(),
                                          AttributeSet::ReturnIndex),
                         AttributeSet::ReturnIndex));
    if (RAttrs.hasAttributes(AttributeSet::ReturnIndex))
      AttributesVec.push_back(AttributeSet::get(NF->getContext(), RAttrs));

    // Declare these outside of the loops, so we can reuse them for the second
    // loop, which loops the varargs.
    CallSite::arg_iterator I = CS.arg_begin();
    unsigned i = 0;
    // Loop over those operands, corresponding to the normal arguments to the
    // original function, and add those that are still alive.
    for (unsigned e = FTy->getNumParams(); i != e; ++I, ++i)
      if (ArgAlive[i]) {
        Args.push_back(*I);
        // Get original parameter attributes, but skip return attributes.
        if (CallPAL.hasAttributes(i + 1)) {
          AttrBuilder B(CallPAL, i + 1);
          // If the return type has changed, then get rid of 'returned' on the
          // call site. The alternative is to make all 'returned' attributes on
          // call sites keep the return value alive just like 'returned'
          // attributes on function declaration but it's less clearly a win
          // and this is not an expected case anyway
          if (NRetTy != RetTy && B.contains(Attribute::Returned))
            B.removeAttribute(Attribute::Returned);
          AttributesVec.
            push_back(AttributeSet::get(F->getContext(), Args.size(), B));
        }
      }

    // Push any varargs arguments on the list. Don't forget their attributes.
    for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) {
      Args.push_back(*I);
      if (CallPAL.hasAttributes(i + 1)) {
        AttrBuilder B(CallPAL, i + 1);
        AttributesVec.
          push_back(AttributeSet::get(F->getContext(), Args.size(), B));
      }
    }

    if (CallPAL.hasAttributes(AttributeSet::FunctionIndex))
      AttributesVec.push_back(AttributeSet::get(Call->getContext(),
                                                CallPAL.getFnAttributes()));

    // Reconstruct the AttributesList based on the vector we constructed.
    AttributeSet NewCallPAL = AttributeSet::get(F->getContext(), AttributesVec);

    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args, "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(NewCallPAL);
    } else {
      New = CallInst::Create(NF, Args, "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(NewCallPAL);
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    New->setDebugLoc(Call->getDebugLoc());

    Args.clear();

    if (!Call->use_empty()) {
      if (New->getType() == Call->getType()) {
        // Return type not changed? Just replace users then.
        Call->replaceAllUsesWith(New);
        New->takeName(Call);
      } else if (New->getType()->isVoidTy()) {
        // Our return value has uses, but they will get removed later on.
        // Replace by null for now.
        if (!Call->getType()->isX86_MMXTy())
          Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
      } else {
        assert(RetTy->isStructTy() &&
               "Return type changed, but not into a void. The old return type"
               " must have been a struct!");
        Instruction *InsertPt = Call;
        if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
          BasicBlock::iterator IP = II->getNormalDest()->begin();
          while (isa<PHINode>(IP)) ++IP;
          InsertPt = IP;
        }

        // We used to return a struct. Instead of doing smart stuff with all the
        // uses of this struct, we will just rebuild it using
        // extract/insertvalue chaining and let instcombine clean that up.
        //
        // Start out building up our return value from undef
        Value *RetVal = UndefValue::get(RetTy);
        for (unsigned i = 0; i != RetCount; ++i)
          if (NewRetIdxs[i] != -1) {
            Value *V;
            if (RetTypes.size() > 1)
              // We are still returning a struct, so extract the value from our
              // return value
              V = ExtractValueInst::Create(New, NewRetIdxs[i], "newret",
                                           InsertPt);
            else
              // We are now returning a single element, so just insert that
              V = New;
            // Insert the value at the old position
            RetVal = InsertValueInst::Create(RetVal, V, i, "oldret", InsertPt);
          }
        // Now, replace all uses of the old call instruction with the return
        // struct we built
        Call->replaceAllUsesWith(RetVal);
        New->takeName(Call);
      }
    }

    // Finally, remove the old call from the program, reducing the use-count of
    // F.
    Call->eraseFromParent();
  }

  // Since we have now created the new function, splice the body of the old
  // function right into the new function, leaving the old rotting hulk of the
  // function empty.
  NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());

  // Loop over the argument list, transferring uses of the old arguments over to
  // the new arguments, also transferring over the names as well.
  i = 0;
  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
       I2 = NF->arg_begin(); I != E; ++I, ++i)
    if (ArgAlive[i]) {
      // If this is a live argument, move the name and users over to the new
      // version.
      I->replaceAllUsesWith(I2);
      I2->takeName(I);
      ++I2;
    } else {
      // If this argument is dead, replace any uses of it with null constants
      // (these are guaranteed to become unused later on).
      if (!I->getType()->isX86_MMXTy())
        I->replaceAllUsesWith(Constant::getNullValue(I->getType()));
    }

  // If we change the return value of the function we must rewrite any return
  // instructions.  Check this now.
  if (F->getReturnType() != NF->getReturnType())
    for (Function::iterator BB = NF->begin(), E = NF->end(); BB != E; ++BB)
      if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
        Value *RetVal;

        if (NFTy->getReturnType()->isVoidTy()) {
          RetVal = 0;
        } else {
          assert (RetTy->isStructTy());
          // The original return value was a struct, insert
          // extractvalue/insertvalue chains to extract only the values we need
          // to return and insert them into our new result.
          // This does generate messy code, but we'll let it to instcombine to
          // clean that up.
          Value *OldRet = RI->getOperand(0);
          // Start out building up our return value from undef
          RetVal = UndefValue::get(NRetTy);
          for (unsigned i = 0; i != RetCount; ++i)
            if (NewRetIdxs[i] != -1) {
              ExtractValueInst *EV = ExtractValueInst::Create(OldRet, i,
                                                              "oldret", RI);
              if (RetTypes.size() > 1) {
                // We're still returning a struct, so reinsert the value into
                // our new return value at the new index

                RetVal = InsertValueInst::Create(RetVal, EV, NewRetIdxs[i],
                                                 "newret", RI);
              } else {
                // We are now only returning a simple value, so just return the
                // extracted value.
                RetVal = EV;
              }
            }
        }
        // Replace the return instruction with one returning the new return
        // value (possibly 0 if we became void).
        ReturnInst::Create(F->getContext(), RetVal, RI);
        BB->getInstList().erase(RI);
      }

  // Patch the pointer to LLVM function in debug info descriptor.
  FunctionDIMap::iterator DI = FunctionDIs.find(F);
  if (DI != FunctionDIs.end())
    DI->second.replaceFunction(NF);

  // Now that the old function is dead, delete it.
  F->eraseFromParent();

  return true;
}
Ejemplo n.º 13
0
/// DeleteDeadVarargs - If this is an function that takes a ... list, and if
/// llvm.vastart is never called, the varargs list is dead for the function.
bool DAE::DeleteDeadVarargs(Function &Fn) {
  assert(Fn.getFunctionType()->isVarArg() && "Function isn't varargs!");
  if (Fn.isDeclaration() || !Fn.hasLocalLinkage()) return false;

  // Ensure that the function is only directly called.
  if (Fn.hasAddressTaken())
    return false;

  // Okay, we know we can transform this function if safe.  Scan its body
  // looking for calls to llvm.vastart.
  for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
      if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
        if (II->getIntrinsicID() == Intrinsic::vastart)
          return false;
      }
    }
  }

  // If we get here, there are no calls to llvm.vastart in the function body,
  // remove the "..." and adjust all the calls.

  // Start by computing a new prototype for the function, which is the same as
  // the old function, but doesn't have isVarArg set.
  FunctionType *FTy = Fn.getFunctionType();

  std::vector<Type*> Params(FTy->param_begin(), FTy->param_end());
  FunctionType *NFTy = FunctionType::get(FTy->getReturnType(),
                                                Params, false);
  unsigned NumArgs = Params.size();

  // Create the new function body and insert it into the module...
  Function *NF = Function::Create(NFTy, Fn.getLinkage());
  NF->copyAttributesFrom(&Fn);
  Fn.getParent()->getFunctionList().insert(&Fn, NF);
  NF->takeName(&Fn);

  // Loop over all of the callers of the function, transforming the call sites
  // to pass in a smaller number of arguments into the new function.
  //
  std::vector<Value*> Args;
  for (Value::use_iterator I = Fn.use_begin(), E = Fn.use_end(); I != E; ) {
    CallSite CS(*I++);
    if (!CS)
      continue;
    Instruction *Call = CS.getInstruction();

    // Pass all the same arguments.
    Args.assign(CS.arg_begin(), CS.arg_begin() + NumArgs);

    // Drop any attributes that were on the vararg arguments.
    AttributeSet PAL = CS.getAttributes();
    if (!PAL.isEmpty() && PAL.getSlotIndex(PAL.getNumSlots() - 1) > NumArgs) {
      SmallVector<AttributeSet, 8> AttributesVec;
      for (unsigned i = 0; PAL.getSlotIndex(i) <= NumArgs; ++i)
        AttributesVec.push_back(PAL.getSlotAttributes(i));
      if (PAL.hasAttributes(AttributeSet::FunctionIndex))
        AttributesVec.push_back(AttributeSet::get(Fn.getContext(),
                                                  PAL.getFnAttributes()));
      PAL = AttributeSet::get(Fn.getContext(), AttributesVec);
    }

    Instruction *New;
    if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
      New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
                               Args, "", Call);
      cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
      cast<InvokeInst>(New)->setAttributes(PAL);
    } else {
      New = CallInst::Create(NF, Args, "", Call);
      cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
      cast<CallInst>(New)->setAttributes(PAL);
      if (cast<CallInst>(Call)->isTailCall())
        cast<CallInst>(New)->setTailCall();
    }
    New->setDebugLoc(Call->getDebugLoc());

    Args.clear();

    if (!Call->use_empty())
      Call->replaceAllUsesWith(New);

    New->takeName(Call);

    // Finally, remove the old call from the program, reducing the use-count of
    // F.
    Call->eraseFromParent();
  }

  // Since we have now created the new function, splice the body of the old
  // function right into the new function, leaving the old rotting hulk of the
  // function empty.
  NF->getBasicBlockList().splice(NF->begin(), Fn.getBasicBlockList());

  // Loop over the argument list, transferring uses of the old arguments over to
  // the new arguments, also transferring over the names as well.  While we're at
  // it, remove the dead arguments from the DeadArguments list.
  //
  for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end(),
       I2 = NF->arg_begin(); I != E; ++I, ++I2) {
    // Move the name and users over to the new version.
    I->replaceAllUsesWith(I2);
    I2->takeName(I);
  }

  // Patch the pointer to LLVM function in debug info descriptor.
  FunctionDIMap::iterator DI = FunctionDIs.find(&Fn);
  if (DI != FunctionDIs.end())
    DI->second.replaceFunction(NF);

  // Fix up any BlockAddresses that refer to the function.
  Fn.replaceAllUsesWith(ConstantExpr::getBitCast(NF, Fn.getType()));
  // Delete the bitcast that we just created, so that NF does not
  // appear to be address-taken.
  NF->removeDeadConstantUsers();
  // Finally, nuke the old function.
  Fn.eraseFromParent();
  return true;
}
Ejemplo n.º 14
0
bool GCOVProfiler::emitProfileArcs() {
  NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
  if (!CU_Nodes) return false;

  bool Result = false;  
  bool InsertIndCounterIncrCode = false;
  for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
    DICompileUnit CU(CU_Nodes->getOperand(i));
    DIArray SPs = CU.getSubprograms();
    SmallVector<std::pair<GlobalVariable *, MDNode *>, 8> CountersBySP;
    for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
      DISubprogram SP(SPs.getElement(i));
      if (!SP.Verify()) continue;
      Function *F = SP.getFunction();
      if (!F) continue;
      if (!Result) Result = true;
      unsigned Edges = 0;
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        TerminatorInst *TI = BB->getTerminator();
        if (isa<ReturnInst>(TI))
          ++Edges;
        else
          Edges += TI->getNumSuccessors();
      }
      
      ArrayType *CounterTy =
        ArrayType::get(Type::getInt64Ty(*Ctx), Edges);
      GlobalVariable *Counters =
        new GlobalVariable(*M, CounterTy, false,
                           GlobalValue::InternalLinkage,
                           Constant::getNullValue(CounterTy),
                           "__llvm_gcov_ctr");
      CountersBySP.push_back(std::make_pair(Counters, (MDNode*)SP));
      
      UniqueVector<BasicBlock *> ComplexEdgePreds;
      UniqueVector<BasicBlock *> ComplexEdgeSuccs;
      
      unsigned Edge = 0;
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        TerminatorInst *TI = BB->getTerminator();
        int Successors = isa<ReturnInst>(TI) ? 1 : TI->getNumSuccessors();
        if (Successors) {
          IRBuilder<> Builder(TI);
          
          if (Successors == 1) {
            Value *Counter = Builder.CreateConstInBoundsGEP2_64(Counters, 0,
                                                                Edge);
            Value *Count = Builder.CreateLoad(Counter);
            Count = Builder.CreateAdd(Count, Builder.getInt64(1));
            Builder.CreateStore(Count, Counter);
          } else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
            Value *Sel = Builder.CreateSelect(BI->getCondition(),
                                              Builder.getInt64(Edge),
                                              Builder.getInt64(Edge + 1));
            SmallVector<Value *, 2> Idx;
            Idx.push_back(Builder.getInt64(0));
            Idx.push_back(Sel);
            Value *Counter = Builder.CreateInBoundsGEP(Counters, Idx);
            Value *Count = Builder.CreateLoad(Counter);
            Count = Builder.CreateAdd(Count, Builder.getInt64(1));
            Builder.CreateStore(Count, Counter);
          } else {
            ComplexEdgePreds.insert(BB);
            for (int i = 0; i != Successors; ++i)
              ComplexEdgeSuccs.insert(TI->getSuccessor(i));
          }
          Edge += Successors;
        }
      }
      
      if (!ComplexEdgePreds.empty()) {
        GlobalVariable *EdgeTable =
          buildEdgeLookupTable(F, Counters,
                               ComplexEdgePreds, ComplexEdgeSuccs);
        GlobalVariable *EdgeState = getEdgeStateValue();
        
        for (int i = 0, e = ComplexEdgePreds.size(); i != e; ++i) {
          IRBuilder<> Builder(ComplexEdgePreds[i+1]->getTerminator());
          Builder.CreateStore(Builder.getInt32(i), EdgeState);
        }
        for (int i = 0, e = ComplexEdgeSuccs.size(); i != e; ++i) {
          // call runtime to perform increment
          BasicBlock::iterator InsertPt =
            ComplexEdgeSuccs[i+1]->getFirstInsertionPt();
          IRBuilder<> Builder(InsertPt);
          Value *CounterPtrArray =
            Builder.CreateConstInBoundsGEP2_64(EdgeTable, 0,
                                               i * ComplexEdgePreds.size());

          // Build code to increment the counter.
          InsertIndCounterIncrCode = true;
          Builder.CreateCall2(getIncrementIndirectCounterFunc(),
                              EdgeState, CounterPtrArray);
        }
      }
    }

    Function *WriteoutF = insertCounterWriteout(CountersBySP);
    Function *FlushF = insertFlush(CountersBySP);

    // Create a small bit of code that registers the "__llvm_gcov_writeout" to
    // be executed at exit and the "__llvm_gcov_flush" function to be executed
    // when "__gcov_flush" is called.
    FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
    Function *F = Function::Create(FTy, GlobalValue::InternalLinkage,
                                   "__llvm_gcov_init", M);
    F->setUnnamedAddr(true);
    F->setLinkage(GlobalValue::InternalLinkage);
    F->addFnAttr(Attribute::NoInline);
    if (Options.NoRedZone)
      F->addFnAttr(Attribute::NoRedZone);

    BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
    IRBuilder<> Builder(BB);

    FTy = FunctionType::get(Type::getVoidTy(*Ctx), false);
    Type *Params[] = {
      PointerType::get(FTy, 0),
      PointerType::get(FTy, 0)
    };
    FTy = FunctionType::get(Builder.getVoidTy(), Params, false);

    // Inialize the environment and register the local writeout and flush
    // functions.
    Constant *GCOVInit = M->getOrInsertFunction("llvm_gcov_init", FTy);
    Builder.CreateCall2(GCOVInit, WriteoutF, FlushF);
    Builder.CreateRetVoid();

    appendToGlobalCtors(*M, F, 0);
  }

  if (InsertIndCounterIncrCode)
    insertIndirectCounterIncrement();

  return Result;
}
Ejemplo n.º 15
0
//
// Method: runOnFunction()
//
// Description:
//  Entry point for this LLVM pass.
//
// Return value:
//  true  - The function was modified.
//  false - The function was not modified.
//
bool
BreakConstantGEPs::runOnFunction (Function & F) {

  if (!pocl::Workgroup::isKernelToProcess(F)) return false;   

  bool modified = false;

  // Worklist of values to check for constant GEP expressions
  std::vector<Instruction *> Worklist;

  //
  // Initialize the worklist by finding all instructions that have one or more
  // operands containing a constant GEP expression.
  //
  for (Function::iterator BB = F.begin(); BB != F.end(); ++BB) {
    for (BasicBlock::iterator i = BB->begin(); i != BB->end(); ++i) {
      //
      // Scan through the operands of this instruction.  If it is a constant
      // expression GEP, insert an instruction GEP before the instruction.
      //
      Instruction * I = &*i;
      for (unsigned index = 0; index < I->getNumOperands(); ++index) {
        if (hasConstantGEP (I->getOperand(index))) {
          Worklist.push_back (I);
        }
      }
    }
  }

  //
  // Determine whether we will modify anything.
  //
  if (Worklist.size()) modified = true;

  //
  // While the worklist is not empty, take an item from it, convert the
  // operands into instructions if necessary, and determine if the newly
  // added instructions need to be processed as well.
  //
  while (Worklist.size()) {
    Instruction * I = Worklist.back();
    Worklist.pop_back();

    //
    // Scan through the operands of this instruction and convert each into an
    // instruction.  Note that this works a little differently for phi
    // instructions because the new instruction must be added to the
    // appropriate predecessor block.
    //
    if (PHINode * PHI = dyn_cast<PHINode>(I)) {
      for (unsigned index = 0; index < PHI->getNumIncomingValues(); ++index) {
        //
        // For PHI Nodes, if an operand is a constant expression with a GEP, we
        // want to insert the new instructions in the predecessor basic block.
        //
        // Note: It seems that it's possible for a phi to have the same
        // incoming basic block listed multiple times; this seems okay as long
        // the same value is listed for the incoming block.
        //
        Instruction * InsertPt = PHI->getIncomingBlock(index)->getTerminator();
        if (ConstantExpr * CE = hasConstantGEP (PHI->getIncomingValue(index))) {
          Instruction * NewInst = convertExpression (CE, InsertPt);
          for (unsigned i2 = index; i2 < PHI->getNumIncomingValues(); ++i2) {
            if ((PHI->getIncomingBlock (i2)) == PHI->getIncomingBlock (index))
              PHI->setIncomingValue (i2, NewInst);
          }
          Worklist.push_back (NewInst);
        }
      }
    } else {
      for (unsigned index = 0; index < I->getNumOperands(); ++index) {
        //
        // For other instructions, we want to insert instructions replacing
        // constant expressions immediently before the instruction using the
        // constant expression.
        //
        if (ConstantExpr * CE = hasConstantGEP (I->getOperand(index))) {
          Instruction * NewInst = convertExpression (CE, I);
          I->replaceUsesOfWith (CE, NewInst);
          Worklist.push_back (NewInst);
        }
      }
    }
  }

  return modified;
}
bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
                                       BasicBlock *&OldEntry,
                                       bool &TailCallsAreMarkedTail,
                                       SmallVector<PHINode*, 8> &ArgumentPHIs,
                                       bool CannotTailCallElimCallsMarkedTail) {
  // If we are introducing accumulator recursion to eliminate operations after
  // the call instruction that are both associative and commutative, the initial
  // value for the accumulator is placed in this variable.  If this value is set
  // then we actually perform accumulator recursion elimination instead of
  // simple tail recursion elimination.  If the operation is an LLVM instruction
  // (eg: "add") then it is recorded in AccumulatorRecursionInstr.  If not, then
  // we are handling the case when the return instruction returns a constant C
  // which is different to the constant returned by other return instructions
  // (which is recorded in AccumulatorRecursionEliminationInitVal).  This is a
  // special case of accumulator recursion, the operation being "return C".
  Value *AccumulatorRecursionEliminationInitVal = 0;
  Instruction *AccumulatorRecursionInstr = 0;

  // Ok, we found a potential tail call.  We can currently only transform the
  // tail call if all of the instructions between the call and the return are
  // movable to above the call itself, leaving the call next to the return.
  // Check that this is the case now.
  BasicBlock::iterator BBI = CI;
  for (++BBI; &*BBI != Ret; ++BBI) {
    if (CanMoveAboveCall(BBI, CI)) continue;
    
    // If we can't move the instruction above the call, it might be because it
    // is an associative and commutative operation that could be transformed
    // using accumulator recursion elimination.  Check to see if this is the
    // case, and if so, remember the initial accumulator value for later.
    if ((AccumulatorRecursionEliminationInitVal =
                           CanTransformAccumulatorRecursion(BBI, CI))) {
      // Yes, this is accumulator recursion.  Remember which instruction
      // accumulates.
      AccumulatorRecursionInstr = BBI;
    } else {
      return false;   // Otherwise, we cannot eliminate the tail recursion!
    }
  }

  // We can only transform call/return pairs that either ignore the return value
  // of the call and return void, ignore the value of the call and return a
  // constant, return the value returned by the tail call, or that are being
  // accumulator recursion variable eliminated.
  if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
      !isa<UndefValue>(Ret->getReturnValue()) &&
      AccumulatorRecursionEliminationInitVal == 0 &&
      !getCommonReturnValue(0, CI)) {
    // One case remains that we are able to handle: the current return
    // instruction returns a constant, and all other return instructions
    // return a different constant.
    if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret))
      return false; // Current return instruction does not return a constant.
    // Check that all other return instructions return a common constant.  If
    // so, record it in AccumulatorRecursionEliminationInitVal.
    AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI);
    if (!AccumulatorRecursionEliminationInitVal)
      return false;
  }

  BasicBlock *BB = Ret->getParent();
  Function *F = BB->getParent();

  // OK! We can transform this tail call.  If this is the first one found,
  // create the new entry block, allowing us to branch back to the old entry.
  if (OldEntry == 0) {
    OldEntry = &F->getEntryBlock();
    BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry);
    NewEntry->takeName(OldEntry);
    OldEntry->setName("tailrecurse");
    BranchInst::Create(OldEntry, NewEntry);

    // If this tail call is marked 'tail' and if there are any allocas in the
    // entry block, move them up to the new entry block.
    TailCallsAreMarkedTail = CI->isTailCall();
    if (TailCallsAreMarkedTail)
      // Move all fixed sized allocas from OldEntry to NewEntry.
      for (BasicBlock::iterator OEBI = OldEntry->begin(), E = OldEntry->end(),
             NEBI = NewEntry->begin(); OEBI != E; )
        if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
          if (isa<ConstantInt>(AI->getArraySize()))
            AI->moveBefore(NEBI);

    // Now that we have created a new block, which jumps to the entry
    // block, insert a PHI node for each argument of the function.
    // For now, we initialize each PHI to only have the real arguments
    // which are passed in.
    Instruction *InsertPos = OldEntry->begin();
    for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
         I != E; ++I) {
      PHINode *PN = PHINode::Create(I->getType(), 2,
                                    I->getName() + ".tr", InsertPos);
      I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
      PN->addIncoming(I, NewEntry);
      ArgumentPHIs.push_back(PN);
    }
  }

  // If this function has self recursive calls in the tail position where some
  // are marked tail and some are not, only transform one flavor or another.  We
  // have to choose whether we move allocas in the entry block to the new entry
  // block or not, so we can't make a good choice for both.  NOTE: We could do
  // slightly better here in the case that the function has no entry block
  // allocas.
  if (TailCallsAreMarkedTail && !CI->isTailCall())
    return false;

  // Ok, now that we know we have a pseudo-entry block WITH all of the
  // required PHI nodes, add entries into the PHI node for the actual
  // parameters passed into the tail-recursive call.
  for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
    ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB);

  // If we are introducing an accumulator variable to eliminate the recursion,
  // do so now.  Note that we _know_ that no subsequent tail recursion
  // eliminations will happen on this function because of the way the
  // accumulator recursion predicate is set up.
  //
  if (AccumulatorRecursionEliminationInitVal) {
    Instruction *AccRecInstr = AccumulatorRecursionInstr;
    // Start by inserting a new PHI node for the accumulator.
    pred_iterator PB = pred_begin(OldEntry), PE = pred_end(OldEntry);
    PHINode *AccPN =
      PHINode::Create(AccumulatorRecursionEliminationInitVal->getType(),
                      std::distance(PB, PE) + 1,
                      "accumulator.tr", OldEntry->begin());

    // Loop over all of the predecessors of the tail recursion block.  For the
    // real entry into the function we seed the PHI with the initial value,
    // computed earlier.  For any other existing branches to this block (due to
    // other tail recursions eliminated) the accumulator is not modified.
    // Because we haven't added the branch in the current block to OldEntry yet,
    // it will not show up as a predecessor.
    for (pred_iterator PI = PB; PI != PE; ++PI) {
      BasicBlock *P = *PI;
      if (P == &F->getEntryBlock())
        AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P);
      else
        AccPN->addIncoming(AccPN, P);
    }

    if (AccRecInstr) {
      // Add an incoming argument for the current block, which is computed by
      // our associative and commutative accumulator instruction.
      AccPN->addIncoming(AccRecInstr, BB);

      // Next, rewrite the accumulator recursion instruction so that it does not
      // use the result of the call anymore, instead, use the PHI node we just
      // inserted.
      AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
    } else {
      // Add an incoming argument for the current block, which is just the
      // constant returned by the current return instruction.
      AccPN->addIncoming(Ret->getReturnValue(), BB);
    }

    // Finally, rewrite any return instructions in the program to return the PHI
    // node instead of the "initval" that they do currently.  This loop will
    // actually rewrite the return value we are destroying, but that's ok.
    for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI)
      if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator()))
        RI->setOperand(0, AccPN);
    ++NumAccumAdded;
  }

  // Now that all of the PHI nodes are in place, remove the call and
  // ret instructions, replacing them with an unconditional branch.
  BranchInst *NewBI = BranchInst::Create(OldEntry, Ret);
  NewBI->setDebugLoc(CI->getDebugLoc());

  BB->getInstList().erase(Ret);  // Remove return.
  BB->getInstList().erase(CI);   // Remove call.
  ++NumEliminated;
  return true;
}
Ejemplo n.º 17
0
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
  CallGraph &CG = getAnalysis<CallGraph>();
  const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
  const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();

  SmallPtrSet<Function*, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F) SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;

  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function*, int>, 8> InlineHistory;

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) continue;

    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        CallSite CS(cast<Value>(I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;

        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
          continue;

        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;

  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);


  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, TD);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;

      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
        DEBUG(dbgs() << "    -> Deleting dead call: "
                     << *CS.getInstruction() << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // We can only inline direct calls to non-declarations.
        if (Callee == 0 || Callee->isDeclaration()) continue;

        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;


        // If the policy determines that we should inline this function,
        // try to do so.
        if (!shouldInline(CS))
          continue;

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime))
          continue;
        ++NumInlined;

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
               i != e; ++i) {
            Value *Ptr = InlineInfo.InlinedCalls[i];
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
          }
        }
      }

      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&

          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: "
              << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];

        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();

        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin()+CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}
Ejemplo n.º 18
0
// insertFastDiv - Substitutes the div/rem instruction with code that checks the
// value of the operands and uses a shorter-faster div/rem instruction when
// possible and the longer-slower div/rem instruction otherwise.
static bool insertFastDiv(Function &F,
                          Function::iterator &I,
                          BasicBlock::iterator &J,
                          IntegerType *BypassType,
                          bool UseDivOp,
                          bool UseSignedOp,
                          DivCacheTy &PerBBDivCache) {
  // Get instruction operands
  Instruction *Instr = J;
  Value *Dividend = Instr->getOperand(0);
  Value *Divisor = Instr->getOperand(1);

  if (isa<ConstantInt>(Divisor) ||
      (isa<ConstantInt>(Dividend) && isa<ConstantInt>(Divisor))) {
    // Operations with immediate values should have
    // been solved and replaced during compile time.
    return false;
  }

  // Basic Block is split before divide
  BasicBlock *MainBB = I;
  BasicBlock *SuccessorBB = I->splitBasicBlock(J);
  ++I; //advance iterator I to successorBB

  // Add new basic block for slow divide operation
  BasicBlock *SlowBB = BasicBlock::Create(F.getContext(), "",
                                          MainBB->getParent(), SuccessorBB);
  SlowBB->moveBefore(SuccessorBB);
  IRBuilder<> SlowBuilder(SlowBB, SlowBB->begin());
  Value *SlowQuotientV;
  Value *SlowRemainderV;
  if (UseSignedOp) {
    SlowQuotientV = SlowBuilder.CreateSDiv(Dividend, Divisor);
    SlowRemainderV = SlowBuilder.CreateSRem(Dividend, Divisor);
  } else {
    SlowQuotientV = SlowBuilder.CreateUDiv(Dividend, Divisor);
    SlowRemainderV = SlowBuilder.CreateURem(Dividend, Divisor);
  }
  SlowBuilder.CreateBr(SuccessorBB);

  // Add new basic block for fast divide operation
  BasicBlock *FastBB = BasicBlock::Create(F.getContext(), "",
                                          MainBB->getParent(), SuccessorBB);
  FastBB->moveBefore(SlowBB);
  IRBuilder<> FastBuilder(FastBB, FastBB->begin());
  Value *ShortDivisorV = FastBuilder.CreateCast(Instruction::Trunc, Divisor,
                                                BypassType);
  Value *ShortDividendV = FastBuilder.CreateCast(Instruction::Trunc, Dividend,
                                                 BypassType);

  // udiv/urem because optimization only handles positive numbers
  Value *ShortQuotientV = FastBuilder.CreateExactUDiv(ShortDividendV,
                                                      ShortDivisorV);
  Value *ShortRemainderV = FastBuilder.CreateURem(ShortDividendV,
                                                  ShortDivisorV);
  Value *FastQuotientV = FastBuilder.CreateCast(Instruction::ZExt,
                                                ShortQuotientV,
                                                Dividend->getType());
  Value *FastRemainderV = FastBuilder.CreateCast(Instruction::ZExt,
                                                 ShortRemainderV,
                                                 Dividend->getType());
  FastBuilder.CreateBr(SuccessorBB);

  // Phi nodes for result of div and rem
  IRBuilder<> SuccessorBuilder(SuccessorBB, SuccessorBB->begin());
  PHINode *QuoPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
  QuoPhi->addIncoming(SlowQuotientV, SlowBB);
  QuoPhi->addIncoming(FastQuotientV, FastBB);
  PHINode *RemPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
  RemPhi->addIncoming(SlowRemainderV, SlowBB);
  RemPhi->addIncoming(FastRemainderV, FastBB);

  // Replace Instr with appropriate phi node
  if (UseDivOp)
    Instr->replaceAllUsesWith(QuoPhi);
  else
    Instr->replaceAllUsesWith(RemPhi);
  Instr->eraseFromParent();

  // Combine operands into a single value with OR for value testing below
  MainBB->getInstList().back().eraseFromParent();
  IRBuilder<> MainBuilder(MainBB, MainBB->end());
  Value *OrV = MainBuilder.CreateOr(Dividend, Divisor);

  // BitMask is inverted to check if the operands are
  // larger than the bypass type
  uint64_t BitMask = ~BypassType->getBitMask();
  Value *AndV = MainBuilder.CreateAnd(OrV, BitMask);

  // Compare operand values and branch
  Value *ZeroV = MainBuilder.getInt32(0);
  Value *CmpV = MainBuilder.CreateICmpEQ(AndV, ZeroV);
  MainBuilder.CreateCondBr(CmpV, FastBB, SlowBB);

  // point iterator J at first instruction of successorBB
  J = I->begin();

  // Cache phi nodes to be used later in place of other instances
  // of div or rem with the same sign, dividend, and divisor
  DivOpInfo Key(UseSignedOp, Dividend, Divisor);
  DivPhiNodes Value(QuoPhi, RemPhi);
  PerBBDivCache.insert(std::pair<DivOpInfo, DivPhiNodes>(Key, Value));
  return true;
}
Ejemplo n.º 19
0
/*
 * Main args are always input
 * Functions currently considered as input functions:
 * scanf
 * fscanf
 * gets
 * fgets
 * fread
 * fgetc
 * getc
 * getchar
 * recv
 * recvmsg
 * read
 * recvfrom
 * fread
 */
bool InputDep::runOnModule(Module &M) {
	//	DEBUG (errs() << "Function " << F.getName() << "\n";);
	NumInputValues = 0;
	bool inserted;
	Function* main = M.getFunction("main");
	if (main) {
		MDNode *mdn = main->begin()->begin()->getMetadata("dbg");
		for (Function::arg_iterator Arg = main->arg_begin(), aEnd =
				main->arg_end(); Arg != aEnd; Arg++) {
			inputDepValues.insert(Arg);
			NumInputValues++;
			if (mdn) {
				DILocation Loc(mdn);
				unsigned Line = Loc.getLineNumber();
				lineNo[Arg] = Line-1; //educated guess (can only get line numbers from insts, suppose main is declared one line before 1st inst
			}
		}


	}
	for (Module::iterator F = M.begin(), eM = M.end(); F != eM; ++F) {
		for (Function::iterator BB = F->begin(), e = F->end(); BB != e; ++BB) {
			for (BasicBlock::iterator I = BB->begin(), ee = BB->end(); I != ee; ++I) {
				if (CallInst *CI = dyn_cast<CallInst>(I)) {
					Function *Callee = CI->getCalledFunction();
					if (Callee) {
						Value* V;
						inserted = false;
						StringRef Name = Callee->getName();
						if (Name.equals("main")) {
							errs() << "main\n";
							V = CI->getArgOperand(1); //char* argv[]
							inputDepValues.insert(V);
							inserted = true;
							//errs() << "Input    " << *V << "\n";
						}
						if (Name.equals("__isoc99_scanf") || Name.equals(
								"scanf")) {
							for (unsigned i = 1, eee = CI->getNumArgOperands(); i
									!= eee; ++i) { // skip format string (i=1)
								V = CI->getArgOperand(i);
								if (V->getType()->isPointerTy()) {
									inputDepValues.insert(V);
									inserted = true;
									//errs() << "Input    " << *V << "\n";
								}
							}
						} else if (Name.equals("__isoc99_fscanf")
								|| Name.equals("fscanf")) {
							for (unsigned i = 2, eee = CI->getNumArgOperands(); i
									!= eee; ++i) { // skip file pointer and format string (i=1)
								V = CI->getArgOperand(i);
								if (V->getType()->isPointerTy()) {
									inputDepValues.insert(V);
									inserted = true;
									//errs() << "Input    " << *V << "\n";
								}
							}
						} else if ((Name.equals("gets") || Name.equals("fgets")
								|| Name.equals("fread"))
								|| Name.equals("getwd")
								|| Name.equals("getcwd")) {
							V = CI->getArgOperand(0); //the first argument receives the input for these functions
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
						} else if ((Name.equals("fgetc") || Name.equals("getc")
								|| Name.equals("getchar"))) {
							inputDepValues.insert(CI);
							inserted = true;
							//errs() << "Input    " << *CI << "\n";
						} else if (Name.equals("recv")
								|| Name.equals("recvmsg")
								|| Name.equals("read")) {
							Value* V = CI->getArgOperand(1);
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
						} else if (Name.equals("recvfrom")) {
							V = CI->getArgOperand(1);
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
							V = CI->getArgOperand(4);
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
						}
						if (inserted) {
							if (MDNode *mdn = I->getMetadata("dbg")) {
								NumInputValues++;
								DILocation Loc(mdn);
								unsigned Line = Loc.getLineNumber();
								lineNo[V] = Line;
							}
						}
					}
				}
			}
		}
	}
	DEBUG(printer());
	return false;
}
Ejemplo n.º 20
0
// Check to see if this function returns one or more constants. If so, replace
// all callers that use those return values with the constant value. This will
// leave in the actual return values and instructions, but deadargelim will
// clean that up.
//
// Additionally if a function always returns one of its arguments directly,
// callers will be updated to use the value they pass in directly instead of
// using the return value.
bool IPCP::PropagateConstantReturn(Function &F) {
  if (F.getReturnType()->isVoidTy())
    return false; // No return value.

  // If this function could be overridden later in the link stage, we can't
  // propagate information about its results into callers.
  if (F.mayBeOverridden())
    return false;
    
  // Check to see if this function returns a constant.
  SmallVector<Value *,4> RetVals;
  StructType *STy = dyn_cast<StructType>(F.getReturnType());
  if (STy)
    for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i) 
      RetVals.push_back(UndefValue::get(STy->getElementType(i)));
  else
    RetVals.push_back(UndefValue::get(F.getReturnType()));

  unsigned NumNonConstant = 0;
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
    if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
      for (unsigned i = 0, e = RetVals.size(); i != e; ++i) {
        // Already found conflicting return values?
        Value *RV = RetVals[i];
        if (!RV)
          continue;

        // Find the returned value
        Value *V;
        if (!STy)
          V = RI->getOperand(0);
        else
          V = FindInsertedValue(RI->getOperand(0), i);

        if (V) {
          // Ignore undefs, we can change them into anything
          if (isa<UndefValue>(V))
            continue;
          
          // Try to see if all the rets return the same constant or argument.
          if (isa<Constant>(V) || isa<Argument>(V)) {
            if (isa<UndefValue>(RV)) {
              // No value found yet? Try the current one.
              RetVals[i] = V;
              continue;
            }
            // Returning the same value? Good.
            if (RV == V)
              continue;
          }
        }
        // Different or no known return value? Don't propagate this return
        // value.
        RetVals[i] = 0;
        // All values non-constant? Stop looking.
        if (++NumNonConstant == RetVals.size())
          return false;
      }
    }

  // If we got here, the function returns at least one constant value.  Loop
  // over all users, replacing any uses of the return value with the returned
  // constant.
  bool MadeChange = false;
  for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
    CallSite CS(*UI);
    Instruction* Call = CS.getInstruction();

    // Not a call instruction or a call instruction that's not calling F
    // directly?
    if (!Call || !CS.isCallee(UI))
      continue;
    
    // Call result not used?
    if (Call->use_empty())
      continue;

    MadeChange = true;

    if (STy == 0) {
      Value* New = RetVals[0];
      if (Argument *A = dyn_cast<Argument>(New))
        // Was an argument returned? Then find the corresponding argument in
        // the call instruction and use that.
        New = CS.getArgument(A->getArgNo());
      Call->replaceAllUsesWith(New);
      continue;
    }
   
    for (Value::use_iterator I = Call->use_begin(), E = Call->use_end();
         I != E;) {
      Instruction *Ins = cast<Instruction>(*I);

      // Increment now, so we can remove the use
      ++I;

      // Find the index of the retval to replace with
      int index = -1;
      if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Ins))
        if (EV->hasIndices())
          index = *EV->idx_begin();

      // If this use uses a specific return value, and we have a replacement,
      // replace it.
      if (index != -1) {
        Value *New = RetVals[index];
        if (New) {
          if (Argument *A = dyn_cast<Argument>(New))
            // Was an argument returned? Then find the corresponding argument in
            // the call instruction and use that.
            New = CS.getArgument(A->getArgNo());
          Ins->replaceAllUsesWith(New);
          Ins->eraseFromParent();
        }
      }
    }
  }

  if (MadeChange) ++NumReturnValProped;
  return MadeChange;
}
Ejemplo n.º 21
0
/*
 * Clone a given function removing dead stores
 */
Function* DeadStoreEliminationPass::cloneFunctionWithoutDeadStore(Function *Fn,
    Instruction* caller, std::string suffix) {

  Function *NF = Function::Create(Fn->getFunctionType(), Fn->getLinkage());
  NF->copyAttributesFrom(Fn);

  // Copy the parameter names, to ease function inspection afterwards.
  Function::arg_iterator NFArg = NF->arg_begin();
  for (Function::arg_iterator Arg = Fn->arg_begin(), ArgEnd = Fn->arg_end();
      Arg != ArgEnd; ++Arg, ++NFArg) {
    NFArg->setName(Arg->getName());
  }

  // To avoid name collision, we should select another name.
  NF->setName(Fn->getName() + suffix);

  // Fill clone content
  ValueToValueMapTy VMap;
  SmallVector<ReturnInst*, 8> Returns;
  Function::arg_iterator NI = NF->arg_begin();
  for (Function::arg_iterator I = Fn->arg_begin();
      NI != NF->arg_end(); ++I, ++NI) {
    VMap[I] = NI;
  }
  CloneAndPruneFunctionInto(NF, Fn, VMap, false, Returns);

  // Remove dead stores
  std::set<Value*> deadArgs = deadArguments[caller];
  std::set<Value*> removeStoresTo;
  Function::arg_iterator NFArgIter = NF->arg_begin();
  for (Function::arg_iterator FnArgIter = Fn->arg_begin(); FnArgIter !=
      Fn->arg_end(); ++FnArgIter, ++NFArgIter) {
    Value *FnArg = FnArgIter;
    if (deadArgs.count(FnArg)) {
      removeStoresTo.insert(NFArgIter);
    }
  }
  std::vector<Instruction*> toRemove;
  for (Function::iterator BB = NF->begin(); BB != NF->end(); ++BB) {
    for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
      Instruction *inst = I;
      if (!isa<StoreInst>(inst)) continue;
      StoreInst *SI = dyn_cast<StoreInst>(inst);
      Value *ptrOp = SI->getPointerOperand();
      if (removeStoresTo.count(ptrOp)) {
        DEBUG(errs() << "will remove this store: " << *inst << "\n");
        toRemove.push_back(inst);
      }
    }
  }
  for (std::vector<Instruction*>::iterator it = toRemove.begin();
      it != toRemove.end(); ++it) {
    Instruction* inst = *it;
    inst->eraseFromParent();
    RemovedStores++;
  }

  // Insert the clone function before the original
  Fn->getParent()->getFunctionList().insert(Fn, NF);

  return NF;
}
Ejemplo n.º 22
0
/// IsFunctionMallocLike - A function is malloc-like if it returns either null
/// or a pointer that doesn't alias any other pointer visible to the caller.
bool FunctionAttrs::IsFunctionMallocLike(Function *F,
                              SmallPtrSet<Function*, 8> &SCCNodes) const {
  SmallSetVector<Value *, 8> FlowsToReturn;
  for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I)
    if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator()))
      FlowsToReturn.insert(Ret->getReturnValue());

  for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
    Value *RetVal = FlowsToReturn[i];

    if (Constant *C = dyn_cast<Constant>(RetVal)) {
      if (!C->isNullValue() && !isa<UndefValue>(C))
        return false;

      continue;
    }

    if (isa<Argument>(RetVal))
      return false;

    if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
      switch (RVI->getOpcode()) {
        // Extend the analysis by looking upwards.
        case Instruction::BitCast:
        case Instruction::GetElementPtr:
          FlowsToReturn.insert(RVI->getOperand(0));
          continue;
        case Instruction::Select: {
          SelectInst *SI = cast<SelectInst>(RVI);
          FlowsToReturn.insert(SI->getTrueValue());
          FlowsToReturn.insert(SI->getFalseValue());
          continue;
        }
        case Instruction::PHI: {
          PHINode *PN = cast<PHINode>(RVI);
          for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            FlowsToReturn.insert(PN->getIncomingValue(i));
          continue;
        }

        // Check whether the pointer came from an allocation.
        case Instruction::Alloca:
          break;
        case Instruction::Call:
        case Instruction::Invoke: {
          CallSite CS(RVI);
          if (CS.paramHasAttr(0, Attribute::NoAlias))
            break;
          if (CS.getCalledFunction() &&
              SCCNodes.count(CS.getCalledFunction()))
            break;
        } // fall-through
        default:
          return false;  // Did not come from an allocation.
      }

    if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false))
      return false;
  }

  return true;
}
//
// Returns of float, double and complex need to be handled with a helper
// function.
//
static bool fixupFPReturnAndCall
  (Function &F, Module *M,  const MipsSubtarget &Subtarget) {
  bool Modified = false;
  LLVMContext &C = M->getContext();
  Type *MyVoid = Type::getVoidTy(C);
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
    for (BasicBlock::iterator I = BB->begin(), E = BB->end();
         I != E; ++I) {
      Instruction &Inst = *I;
      if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
        Value *RVal = RI->getReturnValue();
        if (!RVal) continue;
        //
        // If there is a return value and it needs a helper function,
        // figure out which one and add a call before the actual
        // return to this helper. The purpose of the helper is to move
        // floating point values from their soft float return mapping to
        // where they would have been mapped to in floating point registers.
        //
        Type *T = RVal->getType();
        FPReturnVariant RV = whichFPReturnVariant(T);
        if (RV == NoFPRet) continue;
        static const char* Helper[NoFPRet] =
          {"__mips16_ret_sf", "__mips16_ret_df", "__mips16_ret_sc",
           "__mips16_ret_dc"};
        const char *Name = Helper[RV];
        AttributeSet A;
        Value *Params[] = {RVal};
        Modified = true;
        //
        // These helper functions have a different calling ABI so
        // this __Mips16RetHelper indicates that so that later
        // during call setup, the proper call lowering to the helper
        // functions will take place.
        //
        A = A.addAttribute(C, AttributeSet::FunctionIndex,
                           "__Mips16RetHelper");
        A = A.addAttribute(C, AttributeSet::FunctionIndex,
                           Attribute::ReadNone);
        A = A.addAttribute(C, AttributeSet::FunctionIndex,
                           Attribute::NoInline);
        Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL));
        CallInst::Create(F, Params, "", &Inst );
      } else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
          const Value* V = CI->getCalledValue();
          const Type* T = nullptr;
          if (V) T = V->getType();
          const PointerType *PFT=nullptr;
          if (T) PFT = dyn_cast<PointerType>(T);
          const FunctionType *FT=nullptr;
          if (PFT) FT = dyn_cast<FunctionType>(PFT->getElementType());
          Function *F_ =  CI->getCalledFunction();
          if (FT && needsFPReturnHelper(*FT) &&
              !(F_ && isIntrinsicInline(F_))) {
            Modified=true;
            F.addFnAttr("saveS2");
          }
          if (F_ && !isIntrinsicInline(F_)) {
          // pic mode calls are handled by already defined
          // helper functions
            if (needsFPReturnHelper(*F_)) {
              Modified=true;
              F.addFnAttr("saveS2");
            }
            if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
              if (needsFPHelperFromSig(*F_)) {
                assureFPCallStub(*F_, M, Subtarget);
                Modified=true;
              }
            }
          }
      }
    }
  return Modified;
}
Ejemplo n.º 24
0
bool EdgeProfiler::runOnModule(Module &M) {
  Function *Main = M.getFunction("main");
  if (Main == 0) {
    errs() << "WARNING: cannot insert edge profiling into a module"
           << " with no main function!\n";
    return false;  // No main, no instrumentation!
  }

  std::set<BasicBlock*> BlocksToInstrument;
  unsigned NumEdges = 0;
  for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
    if (F->isDeclaration()) continue;
    // Reserve space for (0,entry) edge.
    ++NumEdges;
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
      // Keep track of which blocks need to be instrumented.  We don't want to
      // instrument blocks that are added as the result of breaking critical
      // edges!
      BlocksToInstrument.insert(BB);
      NumEdges += BB->getTerminator()->getNumSuccessors();
    }
  }

  Type *ATy = ArrayType::get(Type::getInt32Ty(M.getContext()), NumEdges);
  GlobalVariable *Counters =
    new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
                       Constant::getNullValue(ATy), "EdgeProfCounters");
  NumEdgesInserted = NumEdges;

  // Instrument all of the edges...
  unsigned i = 0;
  for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
    if (F->isDeclaration()) continue;
    // Create counter for (0,entry) edge.
    IncrementCounterInBlock(&F->getEntryBlock(), i++, Counters);
    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      if (BlocksToInstrument.count(BB)) {  // Don't instrument inserted blocks
        // Okay, we have to add a counter of each outgoing edge.  If the
        // outgoing edge is not critical don't split it, just insert the counter
        // in the source or destination of the edge.
        TerminatorInst *TI = BB->getTerminator();
        for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
          // If the edge is critical, split it.
          SplitCriticalEdge(TI, s, this);

          // Okay, we are guaranteed that the edge is no longer critical.  If we
          // only have a single successor, insert the counter in this block,
          // otherwise insert it in the successor block.
          if (TI->getNumSuccessors() == 1) {
            // Insert counter at the start of the block
            IncrementCounterInBlock(BB, i++, Counters, false);
          } else {
            // Insert counter at the start of the block
            IncrementCounterInBlock(TI->getSuccessor(s), i++, Counters);
          }
        }
      }
  }

  // Add the initialization call to main.
  InsertProfilingInitCall(Main, "llvm_start_edge_profiling", Counters);
  return true;
}
Ejemplo n.º 25
0
/// splitLiveRangesAcrossInvokes - Each value that is live across an unwind edge
/// we spill into a stack location, guaranteeing that there is nothing live
/// across the unwind edge.  This process also splits all critical edges
/// coming out of invoke's.
/// FIXME: Move this function to a common utility file (Local.cpp?) so
/// both SjLj and LowerInvoke can use it.
void SjLjEHPass::
splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);

    // FIXME: New EH - This if-condition will be always true in the new scheme.
    if (II->getUnwindDest()->isLandingPad()) {
      SmallVector<BasicBlock*, 2> NewBBs;
      SplitLandingPadPredecessors(II->getUnwindDest(), II->getParent(),
                                  ".1", ".2", this, NewBBs);
      LPadSuccMap[II] = *succ_begin(NewBBs[0]);
    } else {
      SplitCriticalEdge(II, 1, this);
    }

    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "Critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    Type *Ty = AI->getType();
    // Aggregate types can't be cast, but are legal argument types, so we have
    // to handle them differently. We use an extract/insert pair as a
    // lightweight method to achieve the same goal.
    if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
      Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
      Instruction *NI = InsertValueInst::Create(AI, EI, 0);
      NI->insertAfter(EI);
      AI->replaceAllUsesWith(NI);
      // Set the operand of the instructions back to the AllocaInst.
      EI->setOperand(0, AI);
      NI->setOperand(0, AI);
    } else {
      // This is always a no-op cast because we're casting AI to AI->getType()
      // so src and destination types are identical. BitCast is the only
      // possibility.
      CastInst *NC = new BitCastInst(
        AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
      AI->replaceAllUsesWith(NC);
      // Set the operand of the cast instruction back to the AllocaInst.
      // Normally it's forbidden to replace a CastInst's operand because it
      // could cause the opcode to reflect an illegal conversion. However,
      // we're replacing it here with the same value it was constructed with.
      // We do this because the above replaceAllUsesWith() clobbered the
      // operand, but we want this one to remain.
      NC->setOperand(0, AI);
    }
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*,16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      // FIXME: Spilling this way is overkill, as it forces all uses of
      // the value to be reloaded from the stack slot, even those that aren't
      // in the unwind blocks. We should be more selective.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
Ejemplo n.º 26
0
bool klee::PhiCleanerPass::runOnFunction(Function &f) {
    bool changed = false;

    for (Function::iterator b = f.begin(), be = f.end(); b != be; ++b) {
        BasicBlock::iterator it = b->begin();

        if (it->getOpcode() == Instruction::PHI) {
            PHINode *reference = cast<PHINode>(it);

            std::set<Value*> phis;
            phis.insert(reference);

            unsigned numBlocks = reference->getNumIncomingValues();
            for (++it; isa<PHINode>(*it); ++it) {
                PHINode *pi = cast<PHINode>(it);

                assert(numBlocks == pi->getNumIncomingValues());

                // see if it is out of order
                unsigned i;
                for (i=0; i<numBlocks; i++)
                    if (pi->getIncomingBlock(i) != reference->getIncomingBlock(i))
                        break;

                if (i!=numBlocks) {
                    std::vector<Value*> values;
                    values.reserve(numBlocks);
                    for (unsigned i=0; i<numBlocks; i++)
                        values[i] = pi->getIncomingValueForBlock(reference->getIncomingBlock(i));
                    for (unsigned i=0; i<numBlocks; i++) {
                        pi->setIncomingBlock(i, reference->getIncomingBlock(i));
                        pi->setIncomingValue(i, values[i]);
                    }
                    changed = true;
                }

                // see if it uses any previously defined phi nodes
                for (i=0; i<numBlocks; i++) {
                    Value *value = pi->getIncomingValue(i);

                    if (phis.find(value) != phis.end()) {
                        // fix by making a "move" at the end of the incoming block
                        // to a new temporary, which is thus known not to be a phi
                        // result. we could be somewhat more efficient about this
                        // by sharing temps and by reordering phi instructions so
                        // this isn't completely necessary, but in the end this is
                        // just a pathological case which does not occur very
                        // often.
                        Instruction *tmp =
                            new BitCastInst(value,
                                            value->getType(),
                                            value->getName() + ".phiclean",
                                            pi->getIncomingBlock(i)->getTerminator());
                        pi->setIncomingValue(i, tmp);
                    }

                    changed = true;
                }

                phis.insert(pi);
            }
        }
    }

    return changed;
}
bool PruneEH::runOnSCC(const std::vector<CallGraphNode *> &SCC) {
  CallGraph &CG = getAnalysis<CallGraph>();
  bool MadeChange = false;

  // First pass, scan all of the functions in the SCC, simplifying them
  // according to what we know.
  for (unsigned i = 0, e = SCC.size(); i != e; ++i)
    if (Function *F = SCC[i]->getFunction())
      MadeChange |= SimplifyFunction(F);

  // Next, check to see if any callees might throw or if there are any external
  // functions in this SCC: if so, we cannot prune any functions in this SCC.
  // If this SCC includes the unwind instruction, we KNOW it throws, so
  // obviously the SCC might throw.
  //
  bool SCCMightUnwind = false, SCCMightReturn = false;
  for (unsigned i = 0, e = SCC.size();
       (!SCCMightUnwind || !SCCMightReturn) && i != e; ++i) {
    Function *F = SCC[i]->getFunction();
    if (F == 0 || (F->isDeclaration() && !F->getIntrinsicID())) {
      SCCMightUnwind = true;
      SCCMightReturn = true;
    } else {
      if (F->isDeclaration())
        SCCMightReturn = true;

      // Check to see if this function performs an unwind or calls an
      // unwinding function.
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        if (isa<UnwindInst>(BB->getTerminator())) {  // Uses unwind!
          SCCMightUnwind = true;
        } else if (isa<ReturnInst>(BB->getTerminator())) {
          SCCMightReturn = true;
        }

        // Invoke instructions don't allow unwinding to continue, so we are
        // only interested in call instructions.
        if (!SCCMightUnwind)
          for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
            if (CallInst *CI = dyn_cast<CallInst>(I)) {
              if (Function *Callee = CI->getCalledFunction()) {
                CallGraphNode *CalleeNode = CG[Callee];
                // If the callee is outside our current SCC, or if it is not
                // known to throw, then we might throw also.
                if (std::find(SCC.begin(), SCC.end(), CalleeNode) == SCC.end()&&
                    !DoesNotUnwind.count(CalleeNode)) {
                  SCCMightUnwind = true;
                  break;
                }
              } else {
                // Indirect call, it might throw.
                SCCMightUnwind = true;
                break;
              }
            }
        if (SCCMightUnwind && SCCMightReturn) break;
      }
    }
  }

  // If the SCC doesn't unwind or doesn't throw, note this fact.
  if (!SCCMightUnwind)
    for (unsigned i = 0, e = SCC.size(); i != e; ++i)
      DoesNotUnwind.insert(SCC[i]);
  if (!SCCMightReturn)
    for (unsigned i = 0, e = SCC.size(); i != e; ++i)
      DoesNotReturn.insert(SCC[i]);

  for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
    // Convert any invoke instructions to non-throwing functions in this node
    // into call instructions with a branch.  This makes the exception blocks
    // dead.
    if (Function *F = SCC[i]->getFunction())
      MadeChange |= SimplifyFunction(F);
  }

  return MadeChange;
}
Ejemplo n.º 28
0
Function* Decompiler::decompileFunction(unsigned Address) {
  // Check that Address is inside the current section.
  // TODO: Find a better way to do this check. What we really care about is
  // avoiding reads to library calls and areas of memory we can't "see".
  const object::SectionRef Sect = Dis->getCurrentSection();
  uint64_t SectStart, SectEnd;
  Sect.getAddress(SectStart);
  Sect.getSize(SectEnd);
  SectEnd += SectStart;
  if (Address < SectStart || Address > SectEnd) {
    errs() << "Address out of bounds for section (is this a library call?): "
           << format("%1" PRIx64, Address) << "\n";
    return NULL;
  }

  MachineFunction *MF = Dis->disassemble(Address);

  // Get Function Name
  // TODO: Determine Function Type
  FunctionType *FType = FunctionType::get(Type::getPrimitiveType(*Context,
      Type::VoidTyID), false);
  Function *F =
    cast<Function>(Mod->getOrInsertFunction(MF->getName(), FType));

  if (!F->empty()) {
    return F;
  }

  // Create a basic block to hold entry point (alloca) information
  BasicBlock *entry = getOrCreateBasicBlock("entry", F);

  // For each basic block
  MachineFunction::iterator BI = MF->begin(), BE = MF->end();
  while (BI != BE) {
    // Add branch from "entry"
    if (BI == MF->begin()) {
      entry->getInstList().push_back(
        BranchInst::Create(getOrCreateBasicBlock(BI->getName(), F)));
    } else {
      getOrCreateBasicBlock(BI->getName(), F);
    }
    ++BI;
  }

  BI = MF->begin();
  while (BI != BE) {
    if (decompileBasicBlock(BI, F) == NULL) {
      printError("Unable to decompile basic block!");
    }
    ++BI;
  }

  // During Decompilation, did any "in-between" basic blocks get created?
  // Nothing ever splits the entry block, so we skip it.
  for (Function::iterator I = ++F->begin(), E = F->end(); I != E; ++I) {
    if (!(I->empty())) {
      continue;
    }
    // Right now, the only way to get the right offset is to parse its name
    // it sucks, but it works.
    StringRef Name = I->getName();
    if (Name == "end" || Name == "entry") continue; // these can be empty

    size_t Off = F->getName().size() + 1;
    size_t Size = Name.size() - Off;
    StringRef BBAddrStr = Name.substr(Off, Size);
    unsigned long long BBAddr;
    getAsUnsignedInteger(BBAddrStr, 10, BBAddr);
    BBAddr += Address;
    DEBUG(errs() << "Split Target: " << Name << "\t Address: "
                 << BBAddr << "\n");
    // split Block at AddrStr
    Function::iterator SB;      // Split basic block
    BasicBlock::iterator SI, SE;    // Split instruction
    // Note the ++, nothing ever splits the entry block.
    for (SB = ++F->begin(); SB != E; ++SB) {
      DEBUG(outs() << "SB: " << SB->getName()
        << "\tRange: " << Dis->getDebugOffset(SB->begin()->getDebugLoc())
        << " " << Dis->getDebugOffset(SB->getTerminator()->getDebugLoc())
        << "\n");
      if (SB->empty() || BBAddr < getBasicBlockAddress(SB)
        || BBAddr > Dis->getDebugOffset(SB->getTerminator()->getDebugLoc())) {
        continue;
      }
      // Reorder instructions based on Debug Location
      sortBasicBlock(SB);
      DEBUG(errs() << "Found Split Block: " << SB->getName() << "\n");
      // Find iterator to split on.
      for (SI = SB->begin(), SE = SB->end(); SI != SE; ++SI) {
        // outs() << "SI: " << SI->getDebugLoc().getLine() << "\n";
        if (Dis->getDebugOffset(SI->getDebugLoc()) == BBAddr) break;
        if (Dis->getDebugOffset(SI->getDebugLoc()) > BBAddr) {
          errs() << "Could not find address inside basic block!\n"
                 << "SI: " << Dis->getDebugOffset(SI->getDebugLoc()) << "\n"
                 << "BBAddr: " << BBAddr << "\n";
          break;
        }
      }
      break;
    }
    if (!SB || SI == SE || SB == E) {
      errs() << "Decompiler: Failed to find instruction offset in function!\n";
      continue;
    }
    // outs() << SB->getName() << " " << SI->getName() << "\n";
    // outs() << "Creating Block...";
    splitBasicBlockIntoBlock(SB, SI, I);
  }

  // Clean up unnecessary stores and loads
  FunctionPassManager FPM(Mod);
  // FPM.add(createPromoteMemoryToRegisterPass()); // See Scalar.h for more.
  FPM.add(createTypeRecoveryPass());
  FPM.run(*F);

  return F;
}
Ejemplo n.º 29
0
/**
 * Generate code for
 */
void HeteroOMPTransform::gen_code_per_f (Function* NF, Function* F, Instruction *max_threads){
	
	Function::arg_iterator FI = F->arg_begin();
	Argument *ctxname = &*FI;

	Function::arg_iterator DestI = NF->arg_begin();
	DestI->setName(ctxname->getName()); 
	Argument *ctx_name = &(*DestI);
	DestI++;
	DestI->setName("tid");
	Argument *num_iters = &(*DestI);

#ifdef EXPLICIT_REWRITE
	DenseMap<const Value*, Value *> ValueMap;
#else
	ValueToValueMapTy ValueMap;
#endif

	//get the old basic block and create a new one
	Function::const_iterator BI = F->begin();
	const BasicBlock &FB = *BI;
	BasicBlock *NFBB = BasicBlock::Create(FB.getContext(), "", NF);
	if (FB.hasName()){
		NFBB->setName(FB.getName());
	}
	ValueMap[&FB] = NFBB;

	//ValueMap[numiters] = num_iters;
	ValueMap[ctxname] = ctx_name;

#if EXPLICIT_REWRITE
	for (BasicBlock::const_iterator II = FB.begin(), IE = FB.end(); II != IE; ++II) {
		Instruction *NFInst = II->clone(/*F->getContext()*/);
		//	DEBUG(dbgs()<<*II<<"\n");
		if (II->hasName()) NFInst->setName(II->getName());
		const Instruction *FInst = &(*II);
		rewrite_instruction((Instruction *)FInst, NFInst, ValueMap);
		NFBB->getInstList().push_back(NFInst);
		ValueMap[II] = NFInst;
	}
	BI++;

	for (Function::const_iterator /*BI=F->begin(),*/BE = F->end();BI != BE; ++BI) {
		const BasicBlock &FBB = *BI;
		BasicBlock *NFBB = BasicBlock::Create(FBB.getContext(), "", NF);
		ValueMap[&FBB] = NFBB;
		if (FBB.hasName()){
			NFBB->setName(FBB.getName());
			//DEBUG(dbgs()<<NFBB->getName()<<"\n");
		}
		for (BasicBlock::const_iterator II = FBB.begin(), IE = FBB.end(); II != IE; ++II) {
			Instruction *NFInst = II->clone(/*F->getContext()*/);
			if (II->hasName()) NFInst->setName(II->getName());
			const Instruction *FInst = &(*II);
			rewrite_instruction((Instruction *)FInst, NFInst, ValueMap);
			NFBB->getInstList().push_back(NFInst);
			ValueMap[II] = NFInst;
		}
	}
	// Remap the instructions again to take care of forward jumps
	for (Function::iterator BB = NF->begin(), BE=NF->end(); BB != BE; ++ BB) {
		for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II){
			int opIdx = 0;
			//DEBUG(dbgs()<<*II<<"\n");
			for (User::op_iterator i = II->op_begin(), e = II->op_end(); i != e; ++i, opIdx++) {
				Value *V = *i;
				if (ValueMap[V] != NULL) {
					II->setOperand(opIdx, ValueMap[V]);
				}
			}
		}
	}
#else
	SmallVector<ReturnInst*, 8> Returns;  // Ignore returns cloned.
	CloneFunctionInto(NF, F, ValueMap, false, Returns, "");
#endif

	//max_threads->dump();
	/* Remap openmp omp_num_threads() and omp_thread_num() */ 
	/*
	 * define internal void @_Z20initialize_variablesiPfS_.omp_fn.4(i8* nocapture %.omp_data_i) nounwind ssp {
     * entry:
     * %0 = bitcast i8* %.omp_data_i to i32*           ; <i32*> [#uses=1]
     * %1 = load i32* %0, align 8                      ; <i32> [#uses=2]
     * %2 = tail call i32 @omp_get_num_threads() nounwind readnone ; <i32> [#uses=2]
     * %3 = tail call i32 @omp_get_thread_num() nounwind readnone ; <i32> [#uses=2]
	   %4 = sdiv i32 %1, %2
	   %5 = mul nsw i32 %4, %2
       %6 = icmp ne i32 %5, %1
       %7 = zext i1 %6 to i32
	 */
	vector<Instruction *> toDelete;
	for (Function::iterator BB = NF->begin(), BE=NF->end(); BB != BE; ++ BB) {
		for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II){
			if (isa<CallInst>(II)) {
				CallSite CI(cast<Instruction>(II));
				if (CI.getCalledFunction() != NULL){ 
					string called_func_name = CI.getCalledFunction()->getName();
					if (called_func_name == OMP_GET_NUM_THREADS_NAME && CI.arg_size() == 0) {
						II->replaceAllUsesWith(ValueMap[max_threads]);
						toDelete.push_back(II);
					}
					else if (called_func_name == OMP_GET_THREAD_NUM_NAME && CI.arg_size() == 0) {
						II->replaceAllUsesWith(num_iters);
						toDelete.push_back(II);
					}
				}
			}
		}
	}


	/* Delete the last branch instruction of the first basic block -- Assuming it is safe */
	Function::iterator nfBB = NF->begin();
	TerminatorInst *lastI = nfBB->getTerminator();
	BranchInst *bI;
	BasicBlock *returnBlock;
	if ((bI = dyn_cast<BranchInst>(lastI)) && bI->isConditional() && 
		(returnBlock = bI->getSuccessor(1)) && 
		(returnBlock->getName() == "return")) {
		/* modify to a unconditional branch to next basic block and not return */
		Instruction *bbI = BranchInst::Create(bI->getSuccessor(0),lastI);
		bbI->dump();
		toDelete.push_back(lastI);
	}

	//NF->dump();
	while(!toDelete.empty()) {
		Instruction *g = toDelete.back();
		//g->replaceAllUsesWith(UndefValue::get(g->getType()));
		toDelete.pop_back();
		g->eraseFromParent();
	}

	//NF->dump();
}
Ejemplo n.º 30
0
void CallTargetFinder<dsa>::findIndTargets(Module &M)
{
  dsa* T = &getAnalysis<dsa>();
  const DSCallGraph & callgraph = T->getCallGraph();
  DSGraph* G = T->getGlobalsGraph();
  DSGraph::ScalarMapTy& SM = G->getScalarMap();
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
    if (!I->isDeclaration())
      for (Function::iterator F = I->begin(), FE = I->end(); F != FE; ++F)
        for (BasicBlock::iterator B = F->begin(), BE = F->end(); B != BE; ++B)
          if (isa<CallInst>(B) || isa<InvokeInst>(B)) {
            CallSite cs(B);
            AllSites.push_back(cs);
            Function* CF = cs.getCalledFunction();

            if (isa<UndefValue>(cs.getCalledValue())) continue;
            if (isa<InlineAsm>(cs.getCalledValue())) continue;

            //
            // If the called function is casted from one function type to
            // another, peer into the cast instruction and pull out the actual
            // function being called.
            //
            if (!CF)
              CF = dyn_cast<Function>(cs.getCalledValue()->stripPointerCasts());

            if (!CF) {
              Value * calledValue = cs.getCalledValue()->stripPointerCasts();
              if (isa<ConstantPointerNull>(calledValue)) {
                ++DirCall;
                CompleteSites.insert(cs);
              } else {
                IndCall++;

                DSCallGraph::callee_iterator csi = callgraph.callee_begin(cs),
                                   cse = callgraph.callee_end(cs);
                while(csi != cse) {
                  const Function *F = *csi;
                  DSCallGraph::scc_iterator sccii = callgraph.scc_begin(F),
                    sccee = callgraph.scc_end(F);
                  for(;sccii != sccee; ++sccii) {
                    DSGraph::ScalarMapTy::const_iterator I = SM.find(SM.getLeaderForGlobal(*sccii));
                    if (I != SM.end()) {
                      IndMap[cs].push_back (*sccii);
                    }
                  }
                  ++csi;
                }
                const Function *F1 = (cs).getInstruction()->getParent()->getParent();
                F1 = callgraph.sccLeader(&*F1);
                
                DSCallGraph::scc_iterator sccii = callgraph.scc_begin(F1),
                  sccee = callgraph.scc_end(F1);
                for(;sccii != sccee; ++sccii) {
                  DSGraph::ScalarMapTy::const_iterator I = SM.find(SM.getLeaderForGlobal(*sccii));
                  if (I != SM.end()) {
                    IndMap[cs].push_back (*sccii);
                  }
                }

                DSNode* N = T->getDSGraph(*cs.getCaller())
                  ->getNodeForValue(cs.getCalledValue()).getNode();
                assert (N && "CallTarget: findIndTargets: No DSNode!");

                if (!N->isIncompleteNode() && !N->isExternalNode() && IndMap[cs].size()) {
                  CompleteSites.insert(cs);
                  ++CompleteInd;
                } 
                if (!N->isIncompleteNode() && !N->isExternalNode() && !IndMap[cs].size()) {
                  ++CompleteEmpty;
                  DEBUG(errs() << "Call site empty: '"
                                << cs.getInstruction()->getName()
                                << "' In '"
                                << cs.getInstruction()->getParent()->getParent()->getName()
                                << "'\n");
                }
              }
            } else {
              ++DirCall;
              IndMap[cs].push_back(CF);
              CompleteSites.insert(cs);
            }
          }
}