コード例 #1
0
ファイル: InputDep.cpp プロジェクト: dtzWill/ecosoc
/*
 * Main args are always input
 * Functions currently considered as input functions:
 * scanf
 * fscanf
 * gets
 * fgets
 * fread
 * fgetc
 * getc
 * getchar
 * recv
 * recvmsg
 * read
 * recvfrom
 * fread
 */
bool InputDep::runOnModule(Module &M) {
	//	DEBUG (errs() << "Function " << F.getName() << "\n";);
	NumInputValues = 0;
	bool inserted;
	Function* main = M.getFunction("main");
	if (main) {
		MDNode *mdn = main->begin()->begin()->getMetadata("dbg");
		for (Function::arg_iterator Arg = main->arg_begin(), aEnd =
				main->arg_end(); Arg != aEnd; Arg++) {
			inputDepValues.insert(Arg);
			NumInputValues++;
			if (mdn) {
				DILocation Loc(mdn);
				unsigned Line = Loc.getLineNumber();
				lineNo[Arg] = Line-1; //educated guess (can only get line numbers from insts, suppose main is declared one line before 1st inst
			}
		}


	}
	for (Module::iterator F = M.begin(), eM = M.end(); F != eM; ++F) {
		for (Function::iterator BB = F->begin(), e = F->end(); BB != e; ++BB) {
			for (BasicBlock::iterator I = BB->begin(), ee = BB->end(); I != ee; ++I) {
				if (CallInst *CI = dyn_cast<CallInst>(I)) {
					Function *Callee = CI->getCalledFunction();
					if (Callee) {
						Value* V;
						inserted = false;
						StringRef Name = Callee->getName();
						if (Name.equals("main")) {
							errs() << "main\n";
							V = CI->getArgOperand(1); //char* argv[]
							inputDepValues.insert(V);
							inserted = true;
							//errs() << "Input    " << *V << "\n";
						}
						if (Name.equals("__isoc99_scanf") || Name.equals(
								"scanf")) {
							for (unsigned i = 1, eee = CI->getNumArgOperands(); i
									!= eee; ++i) { // skip format string (i=1)
								V = CI->getArgOperand(i);
								if (V->getType()->isPointerTy()) {
									inputDepValues.insert(V);
									inserted = true;
									//errs() << "Input    " << *V << "\n";
								}
							}
						} else if (Name.equals("__isoc99_fscanf")
								|| Name.equals("fscanf")) {
							for (unsigned i = 2, eee = CI->getNumArgOperands(); i
									!= eee; ++i) { // skip file pointer and format string (i=1)
								V = CI->getArgOperand(i);
								if (V->getType()->isPointerTy()) {
									inputDepValues.insert(V);
									inserted = true;
									//errs() << "Input    " << *V << "\n";
								}
							}
						} else if ((Name.equals("gets") || Name.equals("fgets")
								|| Name.equals("fread"))
								|| Name.equals("getwd")
								|| Name.equals("getcwd")) {
							V = CI->getArgOperand(0); //the first argument receives the input for these functions
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
						} else if ((Name.equals("fgetc") || Name.equals("getc")
								|| Name.equals("getchar"))) {
							inputDepValues.insert(CI);
							inserted = true;
							//errs() << "Input    " << *CI << "\n";
						} else if (Name.equals("recv")
								|| Name.equals("recvmsg")
								|| Name.equals("read")) {
							Value* V = CI->getArgOperand(1);
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
						} else if (Name.equals("recvfrom")) {
							V = CI->getArgOperand(1);
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
							V = CI->getArgOperand(4);
							if (V->getType()->isPointerTy()) {
								inputDepValues.insert(V);
								inserted = true;
								//errs() << "Input    " << *V << "\n";
							}
						}
						if (inserted) {
							if (MDNode *mdn = I->getMetadata("dbg")) {
								NumInputValues++;
								DILocation Loc(mdn);
								unsigned Line = Loc.getLineNumber();
								lineNo[V] = Line;
							}
						}
					}
				}
			}
		}
	}
	DEBUG(printer());
	return false;
}
コード例 #2
0
ファイル: StatsTracker.cpp プロジェクト: Icefroge/cloud9
void StatsTracker::computeReachableUncovered() {
  KModule *km = executor.kmodule;
  Module *m = km->module;
  static bool init = true;
  const InstructionInfoTable &infos = *km->infos;
  StatisticManager &sm = *theStatisticManager;
  
  if (init) {
    init = false;

    // Compute call targets. It would be nice to use alias information
    // instead of assuming all indirect calls hit all escaping
    // functions, eh?
    for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
         fnIt != fn_ie; ++fnIt) {
      for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end(); 
           bbIt != bb_ie; ++bbIt) {
        for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end(); 
             it != ie; ++it) {
          if (isa<CallInst>(it) || isa<InvokeInst>(it)) {
            if (isa<InlineAsm>(it->getOperand(0))) {
              // We can never call through here so assume no targets
              // (which should be correct anyhow).
              callTargets.insert(std::make_pair(it,
                                                std::vector<Function*>()));
            } else if (Function *target = getDirectCallTarget(it)) {
              callTargets[it].push_back(target);
            } else {
              callTargets[it] = 
                std::vector<Function*>(km->escapingFunctions.begin(),
                                       km->escapingFunctions.end());
            }
          }
        }
      }
    }

    // Compute function callers as reflexion of callTargets.
    for (calltargets_ty::iterator it = callTargets.begin(), 
           ie = callTargets.end(); it != ie; ++it)
      for (std::vector<Function*>::iterator fit = it->second.begin(), 
             fie = it->second.end(); fit != fie; ++fit) 
        functionCallers[*fit].push_back(it->first);

    // Initialize minDistToReturn to shortest paths through
    // functions. 0 is unreachable.
    std::vector<Instruction *> instructions;
    for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
         fnIt != fn_ie; ++fnIt) {
      if (fnIt->isDeclaration()) {
        if (fnIt->doesNotReturn()) {
          functionShortestPath[fnIt] = 0;
        } else {
          functionShortestPath[fnIt] = 1; // whatever
        }
        continue;
      } else {
        functionShortestPath[fnIt] = 0;
      }

      KFunction *kf = km->functionMap[fnIt];

      for (unsigned i = 0; i < kf->numInstructions; ++i) {
        Instruction *inst = kf->instrPostOrder[i]->inst;
        instructions.push_back(inst);
        sm.setIndexedValue(stats::minDistToReturn,
                           kf->instrPostOrder[i]->info->id,
                           isa<ReturnInst>(inst));
      }
    }
    
    // I'm so lazy it's not even worklisted.
    bool changed;
    do {
      changed = false;
      for (std::vector<Instruction*>::iterator it = instructions.begin(),
             ie = instructions.end(); it != ie; ++it) {
        Instruction *inst = *it;
        unsigned bestThrough = 0;

        if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
          std::vector<Function*> &targets = callTargets[inst];
          for (std::vector<Function*>::iterator fnIt = targets.begin(),
                 ie = targets.end(); fnIt != ie; ++fnIt) {
            uint64_t dist = functionShortestPath[*fnIt];
            if (dist) {
              dist = 1+dist; // count instruction itself
              if (bestThrough==0 || dist<bestThrough)
                bestThrough = dist;
            }
          }
        } else {
          bestThrough = 1;
        }
       
        if (bestThrough) {
          unsigned id = infos.getInfo(*it).id;
          uint64_t best, cur = best = sm.getIndexedValue(stats::minDistToReturn, id);
          std::vector<Instruction*> succs = getSuccs(*it);
          for (std::vector<Instruction*>::iterator it2 = succs.begin(),
                 ie = succs.end(); it2 != ie; ++it2) {
            uint64_t dist = sm.getIndexedValue(stats::minDistToReturn,
                                               infos.getInfo(*it2).id);
            if (dist) {
              uint64_t val = bestThrough + dist;
              if (best==0 || val<best)
                best = val;
            }
          }
          if (best != cur) {
            sm.setIndexedValue(stats::minDistToReturn, id, best);
            changed = true;

            // Update shortest path if this is the entry point.
            Function *f = inst->getParent()->getParent();
            if (inst==f->begin()->begin())
              functionShortestPath[f] = best;
          }
        }
      }
    } while (changed);
  }

  // compute minDistToUncovered, 0 is unreachable
  std::vector<Instruction *> instructions;
  std::vector<unsigned> ids;

  for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
       fnIt != fn_ie; ++fnIt) {
    if (fnIt->isDeclaration())
      continue;

    KFunction *kf = km->functionMap[fnIt];

    for (unsigned i = 0; i < kf->numInstructions; ++i) {
      Instruction *inst = kf->instrPostOrder[i]->inst;
      unsigned id = kf->instrPostOrder[i]->info->id;
      instructions.push_back(inst);
      ids.push_back(id);
      sm.setIndexedValue(stats::minDistToGloballyUncovered,
                         id,
                         sm.getIndexedValue(stats::globallyUncoveredInstructions, id));
    }
  }
  
  // I'm so lazy it's not even worklisted.
  bool changed;
  do {
    changed = false;
    for (unsigned i = 0; i < instructions.size(); ++i) {
      Instruction *inst = instructions[i];
      unsigned id = ids[i];

      uint64_t best, cur = best = sm.getIndexedValue(stats::minDistToGloballyUncovered, 
                                                     id);
      unsigned bestThrough = 0;
      
      if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
        std::vector<Function*> &targets = callTargets[inst];
        for (std::vector<Function*>::iterator fnIt = targets.begin(),
               ie = targets.end(); fnIt != ie; ++fnIt) {
          uint64_t dist = functionShortestPath[*fnIt];
          if (dist) {
            dist = 1+dist; // count instruction itself
            if (bestThrough==0 || dist<bestThrough)
              bestThrough = dist;
          }

          if (!(*fnIt)->isDeclaration()) {
            uint64_t calleeDist = sm.getIndexedValue(stats::minDistToGloballyUncovered,
                                                     infos.getFunctionInfo(*fnIt).id);
            if (calleeDist) {
              calleeDist = 1+calleeDist; // count instruction itself
              if (best==0 || calleeDist<best)
                best = calleeDist;
            }
          }
        }
      } else {
        bestThrough = 1;
      }
      
      if (bestThrough) {
        std::vector<Instruction*> succs = getSuccs(inst);
        for (std::vector<Instruction*>::iterator it2 = succs.begin(),
               ie = succs.end(); it2 != ie; ++it2) {
          uint64_t dist = sm.getIndexedValue(stats::minDistToGloballyUncovered,
                                             infos.getInfo(*it2).id);
          if (dist) {
            uint64_t val = bestThrough + dist;
            if (best==0 || val<best)
              best = val;
          }
        }
      }

      if (best != cur) {
        sm.setIndexedValue(stats::minDistToGloballyUncovered, 
                           infos.getInfo(inst).id, 
                           best);
        changed = true;
      }
    }
  } while (changed);

  for (std::set<ExecutionState*>::iterator it = executor.states.begin(),
         ie = executor.states.end(); it != ie; ++it) {
    ExecutionState *es = *it;
    uint64_t currentFrameMinDist = 0;
    for (ExecutionState::stack_ty::iterator sfIt = es->stack().begin(),
           sf_ie = es->stack().end(); sfIt != sf_ie; ++sfIt) {
      ExecutionState::stack_ty::iterator next = sfIt + 1;
      KInstIterator kii;

      if (next==es->stack().end()) {
        kii = es->pc();
      } else {
        kii = next->caller;
        ++kii;
      }
      
      sfIt->minDistToUncoveredOnReturn = currentFrameMinDist;
      
      currentFrameMinDist = computeMinDistToUncovered(kii, currentFrameMinDist);
    }
  }

  LOG(INFO) << "Processed " << instructions.size() << " instructions in static analysis";
}
コード例 #3
0
ファイル: SjLjEHPrepare.cpp プロジェクト: PhongNgo/llvm
/// lowerAcrossUnwindEdges - Find all variables which are alive across an unwind
/// edge and spill them.
void SjLjEHPass::lowerAcrossUnwindEdges(Function &F,
                                        ArrayRef<InvokeInst*> Invokes) {
  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator
         BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
    for (BasicBlock::iterator
           II = BB->begin(), IIE = BB->end(); II != IIE; ++II) {
      // Ignore obvious cases we don't have to handle. In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block. Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F.begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*, 16> Users;
      for (Value::use_iterator
             UI = Inst->use_begin(), E = Inst->use_end(); UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Find all of the blocks that this value is live in.
      SmallPtrSet<BasicBlock*, 64> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          DEBUG(dbgs() << "SJLJ Spill: " << *Inst << " around "
                << UnwindBlock->getName() << "\n");
          NeedsSpill = true;
          break;
        }
      }

      // If we decided we need a spill, do it.
      // FIXME: Spilling this way is overkill, as it forces all uses of
      // the value to be reloaded from the stack slot, even those that aren't
      // in the unwind blocks. We should be more selective.
      if (NeedsSpill) {
        DemoteRegToStack(*Inst, true);
        ++NumSpilled;
      }
    }
  }

  // Go through the landing pads and remove any PHIs there.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
    LandingPadInst *LPI = UnwindBlock->getLandingPadInst();

    // Place PHIs into a set to avoid invalidating the iterator.
    SmallPtrSet<PHINode*, 8> PHIsToDemote;
    for (BasicBlock::iterator
           PN = UnwindBlock->begin(); isa<PHINode>(PN); ++PN)
      PHIsToDemote.insert(cast<PHINode>(PN));
    if (PHIsToDemote.empty()) continue;

    // Demote the PHIs to the stack.
    for (SmallPtrSet<PHINode*, 8>::iterator
           I = PHIsToDemote.begin(), E = PHIsToDemote.end(); I != E; ++I)
      DemotePHIToStack(*I);

    // Move the landingpad instruction back to the top of the landing pad block.
    LPI->moveBefore(UnwindBlock->begin());
  }
}
コード例 #4
0
ファイル: CallTargets.cpp プロジェクト: brills/pfpa
void CallTargetFinder<dsa>::findIndTargets(Module &M)
{
  dsa* T = &getAnalysis<dsa>();
  const DSCallGraph & callgraph = T->getCallGraph();
  DSGraph* G = T->getGlobalsGraph();
  DSGraph::ScalarMapTy& SM = G->getScalarMap();
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
    if (!I->isDeclaration())
      for (Function::iterator F = I->begin(), FE = I->end(); F != FE; ++F)
        for (BasicBlock::iterator B = F->begin(), BE = F->end(); B != BE; ++B)
          if (isa<CallInst>(B) || isa<InvokeInst>(B)) {
            CallSite cs(B);
            AllSites.push_back(cs);
            Function* CF = cs.getCalledFunction();

            //
            // If the called function is casted from one function type to
            // another, peer into the cast instruction and pull out the actual
            // function being called.
            //
            if (!CF)
              CF = dyn_cast<Function>(cs.getCalledValue()->stripPointerCasts());

            if (!CF) {
              Value * calledValue = cs.getCalledValue()->stripPointerCasts();
              if (isa<ConstantPointerNull>(calledValue)) {
                ++DirCall;
                CompleteSites.insert(cs);
              } else {
                IndCall++;

                DSCallGraph::callee_iterator csi = callgraph.callee_begin(cs),
                                   cse = callgraph.callee_end(cs);
                while(csi != cse) {
                  const Function *F = *csi;
                  DSCallGraph::scc_iterator sccii = callgraph.scc_begin(F),
                    sccee = callgraph.scc_end(F);
                  for(;sccii != sccee; ++sccii) {
                    DSGraph::ScalarMapTy::const_iterator I = SM.find(SM.getLeaderForGlobal(*sccii));
                    if (I != SM.end()) {
                      IndMap[cs].push_back (*sccii);
                    }
                  }
                  ++csi;
                }
                const Function *F1 = (cs).getInstruction()->getParent()->getParent();
                F1 = callgraph.sccLeader(&*F1);
                
                DSCallGraph::scc_iterator sccii = callgraph.scc_begin(F1),
                  sccee = callgraph.scc_end(F1);
                for(;sccii != sccee; ++sccii) {
                  DSGraph::ScalarMapTy::const_iterator I = SM.find(SM.getLeaderForGlobal(*sccii));
                  if (I != SM.end()) {
                    IndMap[cs].push_back (*sccii);
                  }
                }

                DSNode* N = T->getDSGraph(*cs.getCaller())
                  ->getNodeForValue(cs.getCalledValue()).getNode();
                assert (N && "CallTarget: findIndTargets: No DSNode!\n");

                if (!N->isIncompleteNode() && !N->isExternalNode() && IndMap[cs].size()) {
                  CompleteSites.insert(cs);
                  ++CompleteInd;
                } 
                if (!N->isIncompleteNode() && !N->isExternalNode() && !IndMap[cs].size()) {
                  ++CompleteEmpty;
                  DEBUG(errs() << "Call site empty: '"
                                << cs.getInstruction()->getName()
                                << "' In '"
                                << cs.getInstruction()->getParent()->getParent()->getName()
                                << "'\n");
                }
              }
            } else {
              ++DirCall;
              IndMap[cs].push_back(CF);
              CompleteSites.insert(cs);
            }
          }
}
コード例 #5
0
ファイル: StatsTracker.cpp プロジェクト: Icefroge/cloud9
// TODO(sbucur): Break this into multiple methods
void StatsTracker::getCallgraphProfile(data::GlobalProfile &globalProfile) {
  Module *m = executor.kmodule->module;
  uint64_t istatsMask = 0;

  StatisticManager &sm = *theStatisticManager;
  unsigned nStats = sm.getNumStatistics();

  istatsMask |= 1<<sm.getStatisticID("Queries");
  istatsMask |= 1<<sm.getStatisticID("QueriesValid");
  istatsMask |= 1<<sm.getStatisticID("QueriesInvalid");
  istatsMask |= 1<<sm.getStatisticID("QueryTime");
  istatsMask |= 1<<sm.getStatisticID("ResolveTime");
  istatsMask |= 1<<sm.getStatisticID("Instructions");
  istatsMask |= 1<<sm.getStatisticID("InstructionTimes");
  istatsMask |= 1<<sm.getStatisticID("InstructionRealTimes");
  istatsMask |= 1<<sm.getStatisticID("Forks");
  istatsMask |= 1<<sm.getStatisticID("GloballyCoveredInstructions");
  istatsMask |= 1<<sm.getStatisticID("GloballyUncoveredInstructions");
  istatsMask |= 1<<sm.getStatisticID("States");
  istatsMask |= 1<<sm.getStatisticID("MinDistToUncovered");

  for (unsigned i=0; i<nStats; i++) {
    if (istatsMask & (1<<i)) {
      Statistic &s = sm.getStatistic(i);
      globalProfile.add_cost_label(s.getName());
    }
  }

  globalProfile.set_time_stamp(::time(NULL));

  // set state counts, decremented after we process so that we don't
  // have to zero all records each time.
  if (istatsMask & (1<<stats::states.getID()))
    updateStateStatistics(1);

  CallSiteSummaryTable callSiteStats;
  if (UseCallPaths)
    callPathManager.getSummaryStatistics(callSiteStats);

  for (Module::iterator fnIt = m->begin(), fn_ie = m->end();
       fnIt != fn_ie; ++fnIt) {
    if (fnIt->isDeclaration())
      continue;

    data::FunctionProfile *functionProfile = globalProfile.add_function_profile();
    functionProfile->set_function_id(executor.kmodule->functionMap[&(*fnIt)]->nameID);

    for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end();
         bbIt != bb_ie; ++bbIt) {
      for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end();
           it != ie; ++it) {
        Instruction *instr = &*it;
        const InstructionInfo &ii = executor.kmodule->infos->getInfo(instr);
        unsigned index = ii.id;

        data::LineProfile *lineProfile = functionProfile->add_line_profile();
        executor.kmodule->fillInstructionDebugInfo(
            instr, *lineProfile->mutable_debug_info());

        for (unsigned i=0; i<nStats; i++) {
          if (istatsMask&(1<<i)) {
            lineProfile->add_cost_value(
                sm.getIndexedValue(sm.getStatistic(i), index));
          }
        }

        if (UseCallPaths &&
            (isa<CallInst>(instr) || isa<InvokeInst>(instr))) {
          CallSiteSummaryTable::iterator it = callSiteStats.find(instr);
          if (it!=callSiteStats.end()) {
            for (std::map<llvm::Function*, CallSiteInfo>::iterator
                   fit = it->second.begin(), fie = it->second.end();
                 fit != fie; ++fit) {
              Function *f = fit->first;
              CallSiteInfo &csi = fit->second;

              data::CallSiteProfile *callsiteProfile = lineProfile->add_call_site_profile();
              executor.kmodule->fillFunctionDebugInfo(
                  f, *callsiteProfile->mutable_debug_info());

              callsiteProfile->set_call_count(csi.count);

              for (unsigned i=0; i<nStats; i++) {
                if (istatsMask&(1<<i)) {
                  Statistic &s = sm.getStatistic(i);
                  uint64_t value;

                  // Hack, ignore things that don't make sense on
                  // call paths.
                  if (&s == &stats::globallyUncoveredInstructions) {
                    value = 0;
                  } else {
                    value = csi.statistics.getValue(s);
                  }

                  callsiteProfile->add_cost_value(value);
                }
              }
            }
          }
        }
      }
    }
  }

  if (istatsMask & (1<<stats::states.getID()))
    updateStateStatistics((uint64_t)-1);
}
コード例 #6
0
/// InlineFunction - This function inlines the called function into the basic
/// block of the caller.  This returns false if it is not possible to inline
/// this call.  The program is still in a well defined state if this occurs
/// though.
///
/// Note that this only does one level of inlining.  For example, if the
/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
/// exists in the instruction stream.  Similarly this will inline a recursive
/// function by one level.
bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
                          bool InsertLifetime) {
  Instruction *TheCall = CS.getInstruction();
  assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
         "Instruction not in function!");

  // If IFI has any state in it, zap it before we fill it in.
  IFI.reset();
  
  const Function *CalledFunc = CS.getCalledFunction();
  if (CalledFunc == 0 ||          // Can't inline external function or indirect
      CalledFunc->isDeclaration() || // call, or call to a vararg function!
      CalledFunc->getFunctionType()->isVarArg()) return false;

  // If the call to the callee is not a tail call, we must clear the 'tail'
  // flags on any calls that we inline.
  bool MustClearTailCallFlags =
    !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());

  // If the call to the callee cannot throw, set the 'nounwind' flag on any
  // calls that we inline.
  bool MarkNoUnwind = CS.doesNotThrow();

  BasicBlock *OrigBB = TheCall->getParent();
  Function *Caller = OrigBB->getParent();

  // GC poses two hazards to inlining, which only occur when the callee has GC:
  //  1. If the caller has no GC, then the callee's GC must be propagated to the
  //     caller.
  //  2. If the caller has a differing GC, it is invalid to inline.
  if (CalledFunc->hasGC()) {
    if (!Caller->hasGC())
      Caller->setGC(CalledFunc->getGC());
    else if (CalledFunc->getGC() != Caller->getGC())
      return false;
  }

  // Get the personality function from the callee if it contains a landing pad.
  Value *CalleePersonality = 0;
  for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
       I != E; ++I)
    if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
      const BasicBlock *BB = II->getUnwindDest();
      const LandingPadInst *LP = BB->getLandingPadInst();
      CalleePersonality = LP->getPersonalityFn();
      break;
    }

  // Find the personality function used by the landing pads of the caller. If it
  // exists, then check to see that it matches the personality function used in
  // the callee.
  if (CalleePersonality) {
    for (Function::const_iterator I = Caller->begin(), E = Caller->end();
         I != E; ++I)
      if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
        const BasicBlock *BB = II->getUnwindDest();
        const LandingPadInst *LP = BB->getLandingPadInst();

        // If the personality functions match, then we can perform the
        // inlining. Otherwise, we can't inline.
        // TODO: This isn't 100% true. Some personality functions are proper
        //       supersets of others and can be used in place of the other.
        if (LP->getPersonalityFn() != CalleePersonality)
          return false;

        break;
      }
  }

  // Get an iterator to the last basic block in the function, which will have
  // the new function inlined after it.
  Function::iterator LastBlock = &Caller->back();

  // Make sure to capture all of the return instructions from the cloned
  // function.
  SmallVector<ReturnInst*, 8> Returns;
  ClonedCodeInfo InlinedFunctionInfo;
  Function::iterator FirstNewBlock;

  { // Scope to destroy VMap after cloning.
    ValueToValueMapTy VMap;

    assert(CalledFunc->arg_size() == CS.arg_size() &&
           "No varargs calls can be inlined!");

    // Calculate the vector of arguments to pass into the function cloner, which
    // matches up the formal to the actual argument values.
    CallSite::arg_iterator AI = CS.arg_begin();
    unsigned ArgNo = 0;
    for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
         E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
      Value *ActualArg = *AI;

      // When byval arguments actually inlined, we need to make the copy implied
      // by them explicit.  However, we don't do this if the callee is readonly
      // or readnone, because the copy would be unneeded: the callee doesn't
      // modify the struct.
      if (CS.isByValArgument(ArgNo)) {
        ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
                                        CalledFunc->getParamAlignment(ArgNo+1));
 
        // Calls that we inline may use the new alloca, so we need to clear
        // their 'tail' flags if HandleByValArgument introduced a new alloca and
        // the callee has calls.
        MustClearTailCallFlags |= ActualArg != *AI;
      }

      VMap[I] = ActualArg;
    }

    // We want the inliner to prune the code as it copies.  We would LOVE to
    // have no dead or constant instructions leftover after inlining occurs
    // (which can happen, e.g., because an argument was constant), but we'll be
    // happy with whatever the cloner can do.
    CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 
                              /*ModuleLevelChanges=*/false, Returns, ".i",
                              &InlinedFunctionInfo, IFI.TD, TheCall);

    // Remember the first block that is newly cloned over.
    FirstNewBlock = LastBlock; ++FirstNewBlock;

    // Update the callgraph if requested.
    if (IFI.CG)
      UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);

    // Update inlined instructions' line number information.
    fixupLineNumbers(Caller, FirstNewBlock, TheCall);
  }

  // If there are any alloca instructions in the block that used to be the entry
  // block for the callee, move them to the entry block of the caller.  First
  // calculate which instruction they should be inserted before.  We insert the
  // instructions at the end of the current alloca list.
  {
    BasicBlock::iterator InsertPoint = Caller->begin()->begin();
    for (BasicBlock::iterator I = FirstNewBlock->begin(),
         E = FirstNewBlock->end(); I != E; ) {
      AllocaInst *AI = dyn_cast<AllocaInst>(I++);
      if (AI == 0) continue;
      
      // If the alloca is now dead, remove it.  This often occurs due to code
      // specialization.
      if (AI->use_empty()) {
        AI->eraseFromParent();
        continue;
      }

      if (!isa<Constant>(AI->getArraySize()))
        continue;
      
      // Keep track of the static allocas that we inline into the caller.
      IFI.StaticAllocas.push_back(AI);
      
      // Scan for the block of allocas that we can move over, and move them
      // all at once.
      while (isa<AllocaInst>(I) &&
             isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
        IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
        ++I;
      }

      // Transfer all of the allocas over in a block.  Using splice means
      // that the instructions aren't removed from the symbol table, then
      // reinserted.
      Caller->getEntryBlock().getInstList().splice(InsertPoint,
                                                   FirstNewBlock->getInstList(),
                                                   AI, I);
    }
  }

  // Leave lifetime markers for the static alloca's, scoping them to the
  // function we just inlined.
  if (InsertLifetime && !IFI.StaticAllocas.empty()) {
    IRBuilder<> builder(FirstNewBlock->begin());
    for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
      AllocaInst *AI = IFI.StaticAllocas[ai];

      // If the alloca is already scoped to something smaller than the whole
      // function then there's no need to add redundant, less accurate markers.
      if (hasLifetimeMarkers(AI))
        continue;

      builder.CreateLifetimeStart(AI);
      for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
        IRBuilder<> builder(Returns[ri]);
        builder.CreateLifetimeEnd(AI);
      }
    }
  }

  // If the inlined code contained dynamic alloca instructions, wrap the inlined
  // code with llvm.stacksave/llvm.stackrestore intrinsics.
  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
    Module *M = Caller->getParent();
    // Get the two intrinsics we care about.
    Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
    Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);

    // Insert the llvm.stacksave.
    CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
      .CreateCall(StackSave, "savedstack");

    // Insert a call to llvm.stackrestore before any return instructions in the
    // inlined function.
    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
      IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
    }
  }

  // If we are inlining tail call instruction through a call site that isn't
  // marked 'tail', we must remove the tail marker for any calls in the inlined
  // code.  Also, calls inlined through a 'nounwind' call site should be marked
  // 'nounwind'.
  if (InlinedFunctionInfo.ContainsCalls &&
      (MustClearTailCallFlags || MarkNoUnwind)) {
    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
         BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
        if (CallInst *CI = dyn_cast<CallInst>(I)) {
          if (MustClearTailCallFlags)
            CI->setTailCall(false);
          if (MarkNoUnwind)
            CI->setDoesNotThrow();
        }
  }

  // If we are inlining for an invoke instruction, we must make sure to rewrite
  // any call instructions into invoke instructions.
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
    HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);

  // If we cloned in _exactly one_ basic block, and if that block ends in a
  // return instruction, we splice the body of the inlined callee directly into
  // the calling basic block.
  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
    // Move all of the instructions right before the call.
    OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
                                 FirstNewBlock->begin(), FirstNewBlock->end());
    // Remove the cloned basic block.
    Caller->getBasicBlockList().pop_back();

    // If the call site was an invoke instruction, add a branch to the normal
    // destination.
    if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
      BranchInst::Create(II->getNormalDest(), TheCall);

    // If the return instruction returned a value, replace uses of the call with
    // uses of the returned value.
    if (!TheCall->use_empty()) {
      ReturnInst *R = Returns[0];
      if (TheCall == R->getReturnValue())
        TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
      else
        TheCall->replaceAllUsesWith(R->getReturnValue());
    }
    // Since we are now done with the Call/Invoke, we can delete it.
    TheCall->eraseFromParent();

    // Since we are now done with the return instruction, delete it also.
    Returns[0]->eraseFromParent();

    // We are now done with the inlining.
    return true;
  }

  // Otherwise, we have the normal case, of more than one block to inline or
  // multiple return sites.

  // We want to clone the entire callee function into the hole between the
  // "starter" and "ender" blocks.  How we accomplish this depends on whether
  // this is an invoke instruction or a call instruction.
  BasicBlock *AfterCallBB;
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {

    // Add an unconditional branch to make this look like the CallInst case...
    BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);

    // Split the basic block.  This guarantees that no PHI nodes will have to be
    // updated due to new incoming edges, and make the invoke case more
    // symmetric to the call case.
    AfterCallBB = OrigBB->splitBasicBlock(NewBr,
                                          CalledFunc->getName()+".exit");

  } else {  // It's a call
    // If this is a call instruction, we need to split the basic block that
    // the call lives in.
    //
    AfterCallBB = OrigBB->splitBasicBlock(TheCall,
                                          CalledFunc->getName()+".exit");
  }

  // Change the branch that used to go to AfterCallBB to branch to the first
  // basic block of the inlined function.
  //
  TerminatorInst *Br = OrigBB->getTerminator();
  assert(Br && Br->getOpcode() == Instruction::Br &&
         "splitBasicBlock broken!");
  Br->setOperand(0, FirstNewBlock);


  // Now that the function is correct, make it a little bit nicer.  In
  // particular, move the basic blocks inserted from the end of the function
  // into the space made by splitting the source basic block.
  Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
                                     FirstNewBlock, Caller->end());

  // Handle all of the return instructions that we just cloned in, and eliminate
  // any users of the original call/invoke instruction.
  Type *RTy = CalledFunc->getReturnType();

  PHINode *PHI = 0;
  if (Returns.size() > 1) {
    // The PHI node should go at the front of the new basic block to merge all
    // possible incoming values.
    if (!TheCall->use_empty()) {
      PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
                            AfterCallBB->begin());
      // Anything that used the result of the function call should now use the
      // PHI node as their operand.
      TheCall->replaceAllUsesWith(PHI);
    }

    // Loop over all of the return instructions adding entries to the PHI node
    // as appropriate.
    if (PHI) {
      for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
        ReturnInst *RI = Returns[i];
        assert(RI->getReturnValue()->getType() == PHI->getType() &&
               "Ret value not consistent in function!");
        PHI->addIncoming(RI->getReturnValue(), RI->getParent());
      }
    }


    // Add a branch to the merge points and remove return instructions.
    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
      ReturnInst *RI = Returns[i];
      BranchInst::Create(AfterCallBB, RI);
      RI->eraseFromParent();
    }
  } else if (!Returns.empty()) {
    // Otherwise, if there is exactly one return value, just replace anything
    // using the return value of the call with the computed value.
    if (!TheCall->use_empty()) {
      if (TheCall == Returns[0]->getReturnValue())
        TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
      else
        TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
    }

    // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
    BasicBlock *ReturnBB = Returns[0]->getParent();
    ReturnBB->replaceAllUsesWith(AfterCallBB);

    // Splice the code from the return block into the block that it will return
    // to, which contains the code that was after the call.
    AfterCallBB->getInstList().splice(AfterCallBB->begin(),
                                      ReturnBB->getInstList());

    // Delete the return instruction now and empty ReturnBB now.
    Returns[0]->eraseFromParent();
    ReturnBB->eraseFromParent();
  } else if (!TheCall->use_empty()) {
    // No returns, but something is using the return value of the call.  Just
    // nuke the result.
    TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
  }

  // Since we are now done with the Call/Invoke, we can delete it.
  TheCall->eraseFromParent();

  // We should always be able to fold the entry block of the function into the
  // single predecessor of the block...
  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);

  // Splice the code entry block into calling block, right before the
  // unconditional branch.
  CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
  OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());

  // Remove the unconditional branch.
  OrigBB->getInstList().erase(Br);

  // Now we can remove the CalleeEntry block, which is now empty.
  Caller->getBasicBlockList().erase(CalleeEntry);

  // If we inserted a phi node, check to see if it has a single value (e.g. all
  // the entries are the same or undef).  If so, remove the PHI so it doesn't
  // block other optimizations.
  if (PHI) {
    if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
      PHI->replaceAllUsesWith(V);
      PHI->eraseFromParent();
    }
  }

  return true;
}
コード例 #7
0
ファイル: NVPTXGenericToNVVM.cpp プロジェクト: Drup/llvm
bool GenericToNVVM::runOnModule(Module &M) {
  // Create a clone of each global variable that has the default address space.
  // The clone is created with the global address space  specifier, and the pair
  // of original global variable and its clone is placed in the GVMap for later
  // use.

  for (Module::global_iterator I = M.global_begin(), E = M.global_end();
       I != E;) {
    GlobalVariable *GV = I++;
    if (GV->getType()->getAddressSpace() == llvm::ADDRESS_SPACE_GENERIC &&
        !llvm::isTexture(*GV) && !llvm::isSurface(*GV) &&
        !llvm::isSampler(*GV) && !GV->getName().startswith("llvm.")) {
      GlobalVariable *NewGV = new GlobalVariable(
          M, GV->getType()->getElementType(), GV->isConstant(),
          GV->getLinkage(),
          GV->hasInitializer() ? GV->getInitializer() : nullptr,
          "", GV, GV->getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL);
      NewGV->copyAttributesFrom(GV);
      GVMap[GV] = NewGV;
    }
  }

  // Return immediately, if every global variable has a specific address space
  // specifier.
  if (GVMap.empty()) {
    return false;
  }

  // Walk through the instructions in function defitinions, and replace any use
  // of original global variables in GVMap with a use of the corresponding
  // copies in GVMap.  If necessary, promote constants to instructions.
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
    if (I->isDeclaration()) {
      continue;
    }
    IRBuilder<> Builder(I->getEntryBlock().getFirstNonPHIOrDbg());
    for (Function::iterator BBI = I->begin(), BBE = I->end(); BBI != BBE;
         ++BBI) {
      for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end(); II != IE;
           ++II) {
        for (unsigned i = 0, e = II->getNumOperands(); i < e; ++i) {
          Value *Operand = II->getOperand(i);
          if (isa<Constant>(Operand)) {
            II->setOperand(
                i, remapConstant(&M, I, cast<Constant>(Operand), Builder));
          }
        }
      }
    }
    ConstantToValueMap.clear();
  }

  // Walk through the metadata section and update the debug information
  // associated with the global variables in the default address space.
  for (Module::named_metadata_iterator I = M.named_metadata_begin(),
                                       E = M.named_metadata_end();
       I != E; I++) {
    remapNamedMDNode(&M, I);
  }

  // Walk through the global variable  initializers, and replace any use of
  // original global variables in GVMap with a use of the corresponding copies
  // in GVMap.  The copies need to be bitcast to the original global variable
  // types, as we cannot use cvta in global variable initializers.
  for (GVMapTy::iterator I = GVMap.begin(), E = GVMap.end(); I != E;) {
    GlobalVariable *GV = I->first;
    GlobalVariable *NewGV = I->second;

    // Remove GV from the map so that it can be RAUWed.  Note that
    // DenseMap::erase() won't invalidate any iterators but this one.
    auto Next = std::next(I);
    GVMap.erase(I);
    I = Next;

    Constant *BitCastNewGV = ConstantExpr::getPointerCast(NewGV, GV->getType());
    // At this point, the remaining uses of GV should be found only in global
    // variable initializers, as other uses have been already been removed
    // while walking through the instructions in function definitions.
    GV->replaceAllUsesWith(BitCastNewGV);
    std::string Name = GV->getName();
    GV->eraseFromParent();
    NewGV->setName(Name);
  }
  assert(GVMap.empty() && "Expected it to be empty by now");

  return true;
}
コード例 #8
0
bool PruneEH::runOnSCC(const std::vector<CallGraphNode *> &SCC) {
  CallGraph &CG = getAnalysis<CallGraph>();
  bool MadeChange = false;

  // First pass, scan all of the functions in the SCC, simplifying them
  // according to what we know.
  for (unsigned i = 0, e = SCC.size(); i != e; ++i)
    if (Function *F = SCC[i]->getFunction())
      MadeChange |= SimplifyFunction(F);

  // Next, check to see if any callees might throw or if there are any external
  // functions in this SCC: if so, we cannot prune any functions in this SCC.
  // If this SCC includes the unwind instruction, we KNOW it throws, so
  // obviously the SCC might throw.
  //
  bool SCCMightUnwind = false, SCCMightReturn = false;
  for (unsigned i = 0, e = SCC.size();
       (!SCCMightUnwind || !SCCMightReturn) && i != e; ++i) {
    Function *F = SCC[i]->getFunction();
    if (F == 0 || (F->isDeclaration() && !F->getIntrinsicID())) {
      SCCMightUnwind = true;
      SCCMightReturn = true;
    } else {
      if (F->isDeclaration())
        SCCMightReturn = true;

      // Check to see if this function performs an unwind or calls an
      // unwinding function.
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        if (isa<UnwindInst>(BB->getTerminator())) {  // Uses unwind!
          SCCMightUnwind = true;
        } else if (isa<ReturnInst>(BB->getTerminator())) {
          SCCMightReturn = true;
        }

        // Invoke instructions don't allow unwinding to continue, so we are
        // only interested in call instructions.
        if (!SCCMightUnwind)
          for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
            if (CallInst *CI = dyn_cast<CallInst>(I)) {
              if (Function *Callee = CI->getCalledFunction()) {
                CallGraphNode *CalleeNode = CG[Callee];
                // If the callee is outside our current SCC, or if it is not
                // known to throw, then we might throw also.
                if (std::find(SCC.begin(), SCC.end(), CalleeNode) == SCC.end()&&
                    !DoesNotUnwind.count(CalleeNode)) {
                  SCCMightUnwind = true;
                  break;
                }
              } else {
                // Indirect call, it might throw.
                SCCMightUnwind = true;
                break;
              }
            }
        if (SCCMightUnwind && SCCMightReturn) break;
      }
    }
  }

  // If the SCC doesn't unwind or doesn't throw, note this fact.
  if (!SCCMightUnwind)
    for (unsigned i = 0, e = SCC.size(); i != e; ++i)
      DoesNotUnwind.insert(SCC[i]);
  if (!SCCMightReturn)
    for (unsigned i = 0, e = SCC.size(); i != e; ++i)
      DoesNotReturn.insert(SCC[i]);

  for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
    // Convert any invoke instructions to non-throwing functions in this node
    // into call instructions with a branch.  This makes the exception blocks
    // dead.
    if (Function *F = SCC[i]->getFunction())
      MadeChange |= SimplifyFunction(F);
  }

  return MadeChange;
}
コード例 #9
0
ファイル: HeteroOMPTransform.cpp プロジェクト: IntelLabs/iHRC
/*
 * Rewrite OpenMP call sites and their associated kernel functions  -- the folloiwng pattern
   call void @GOMP_parallel_start(void (i8*)* @_Z20initialize_variablesiPfS_.omp_fn.4, i8* %.omp_data_o.5571, i32 0) nounwind
  call void @_Z20initialize_variablesiPfS_.omp_fn.4(i8* %.omp_data_o.5571) nounwind
  call void @GOMP_parallel_end() nounwind
 */
void HeteroOMPTransform::rewrite_omp_call_sites(Module &M) {
	SmallVector<Instruction *, 16> toDelete;
	DenseMap<Value *, Value *> ValueMap;
	
	for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I){
		if (!I->isDeclaration()) {
			
			for (Function::iterator BBI = I->begin(), BBE = I->end(); BBI != BBE; ++BBI) {
				bool match = false;
				for (BasicBlock::iterator INSNI = BBI->begin(), INSNE = BBI->end(); INSNI != INSNE; ++INSNI) {
					if (isa<CallInst>(INSNI)) {
						CallSite CI(cast<Instruction>(INSNI));
						if (CI.getCalledFunction() != NULL){ 
							string called_func_name = CI.getCalledFunction()->getName();
							if (called_func_name == OMP_PARALLEL_START_NAME && CI.arg_size() == 3) {
								// change alloc to malloc_shared
								// %5 = call i8* @_Z13malloc_sharedm(i64 20)       ; <i8*> [#uses=5]
								// %6 = bitcast i8* %5 to float*                   ; <float*> [#uses=2]
								AllocaInst *AllocCall;
								Value *arg_0 = CI.getArgument(0); // function
								Value *arg_1 = CI.getArgument(1);  // context
								Value *loop_ub = NULL;
								Function *function;
								BitCastInst* BCI;
								Function *kernel_function;
								BasicBlock::iterator iI(*INSNI); 
								//BasicBlock::iterator iJ = iI+1; 
								iI++; iI++;
								//BasicBlock::iterator iK = iI;  
								CallInst /**next,*/ *next_next; 
								if (arg_0 != NULL && arg_1 != NULL /*&& (next = dyn_cast<CallInst>(*iJ))*/ 
									&& (next_next = dyn_cast<CallInst>(iI)) && (next_next->getCalledFunction() != NULL) 
									&& (next_next->getCalledFunction()->getName() == OMP_PARALLEL_END_NAME)
									&& (BCI = dyn_cast<BitCastInst>(arg_1)) && (AllocCall = dyn_cast<AllocaInst>(BCI->getOperand(0))) 
									&& (function = dyn_cast<Function>(arg_0)) && (loop_ub = find_loop_upper_bound (AllocCall)) 
									&& (kernel_function=convert_to_kernel_function (M, function))){
									
										SmallVector<Value*, 16> Args;
										Args.push_back(AllocCall->getArraySize());
										Instruction *MallocCall = CallInst::Create(mallocFnTy, Args, "", AllocCall);
										CastInst *MallocCast = CastInst::Create(Instruction::BitCast, MallocCall, AllocCall->getType(), "", AllocCall);
										ValueMap[AllocCall] = MallocCast;
										//AllocCall->replaceAllUsesWith(MallocCall);
										// Add offload function
										Args.clear();
										Args.push_back(loop_ub);
										Args.push_back(BCI);
										Args.push_back(kernel_function);
										if (offloadFnTy == NULL) {
											init_offload_type(M, kernel_function);
										}
										
										Instruction *call = CallInst::Create(offloadFnTy, Args, "", INSNI);
										
										if (find(toDelete.begin(), toDelete.end(), AllocCall) == toDelete.end()){
											toDelete.push_back(AllocCall);
										}
										toDelete.push_back(&(*INSNI));
										match = true;
								}
							}
							else if (called_func_name == OMP_PARALLEL_END_NAME && CI.arg_size() == 0 && match) {
								toDelete.push_back(&(*INSNI));
								match = false;
							}
							else if (match) {
								toDelete.push_back(&(*INSNI));
							}
						}
					}
				}
			}
		}

	}

	/* Replace AllocCalls by MallocCalls */
	for(DenseMap<Value *, Value *>::iterator I = ValueMap.begin(), E = ValueMap.end(); I != E; I++) {
		I->first->replaceAllUsesWith(I->second);
	}

	/* delete the instructions for get_omp_num_thread and get_omp_thread_num */
	while(!toDelete.empty()) {
		Instruction *g = toDelete.back();
		toDelete.pop_back();
		g->eraseFromParent();
	}

}
コード例 #10
0
/// splitLiveRangesAcrossInvokes - Each value that is live across an unwind edge
/// we spill into a stack location, guaranteeing that there is nothing live
/// across the unwind edge.  This process also splits all critical edges
/// coming out of invoke's.
/// FIXME: Move this function to a common utility file (Local.cpp?) so
/// both SjLj and LowerInvoke can use it.
void SjLjEHPass::
splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
  // First step, split all critical edges from invoke instructions.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
    InvokeInst *II = Invokes[i];
    SplitCriticalEdge(II, 0, this);

    // FIXME: New EH - This if-condition will be always true in the new scheme.
    if (II->getUnwindDest()->isLandingPad()) {
      SmallVector<BasicBlock*, 2> NewBBs;
      SplitLandingPadPredecessors(II->getUnwindDest(), II->getParent(),
                                  ".1", ".2", this, NewBBs);
      LPadSuccMap[II] = *succ_begin(NewBBs[0]);
    } else {
      SplitCriticalEdge(II, 1, this);
    }

    assert(!isa<PHINode>(II->getNormalDest()) &&
           !isa<PHINode>(II->getUnwindDest()) &&
           "Critical edge splitting left single entry phi nodes?");
  }

  Function *F = Invokes.back()->getParent()->getParent();

  // To avoid having to handle incoming arguments specially, we lower each arg
  // to a copy instruction in the entry block.  This ensures that the argument
  // value itself cannot be live across the entry block.
  BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
  while (isa<AllocaInst>(AfterAllocaInsertPt) &&
        isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
    ++AfterAllocaInsertPt;
  for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
       AI != E; ++AI) {
    Type *Ty = AI->getType();
    // Aggregate types can't be cast, but are legal argument types, so we have
    // to handle them differently. We use an extract/insert pair as a
    // lightweight method to achieve the same goal.
    if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
      Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
      Instruction *NI = InsertValueInst::Create(AI, EI, 0);
      NI->insertAfter(EI);
      AI->replaceAllUsesWith(NI);
      // Set the operand of the instructions back to the AllocaInst.
      EI->setOperand(0, AI);
      NI->setOperand(0, AI);
    } else {
      // This is always a no-op cast because we're casting AI to AI->getType()
      // so src and destination types are identical. BitCast is the only
      // possibility.
      CastInst *NC = new BitCastInst(
        AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
      AI->replaceAllUsesWith(NC);
      // Set the operand of the cast instruction back to the AllocaInst.
      // Normally it's forbidden to replace a CastInst's operand because it
      // could cause the opcode to reflect an illegal conversion. However,
      // we're replacing it here with the same value it was constructed with.
      // We do this because the above replaceAllUsesWith() clobbered the
      // operand, but we want this one to remain.
      NC->setOperand(0, AI);
    }
  }

  // Finally, scan the code looking for instructions with bad live ranges.
  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
      // Ignore obvious cases we don't have to handle.  In particular, most
      // instructions either have no uses or only have a single use inside the
      // current block.  Ignore them quickly.
      Instruction *Inst = II;
      if (Inst->use_empty()) continue;
      if (Inst->hasOneUse() &&
          cast<Instruction>(Inst->use_back())->getParent() == BB &&
          !isa<PHINode>(Inst->use_back())) continue;

      // If this is an alloca in the entry block, it's not a real register
      // value.
      if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
        if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
          continue;

      // Avoid iterator invalidation by copying users to a temporary vector.
      SmallVector<Instruction*,16> Users;
      for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
           UI != E; ++UI) {
        Instruction *User = cast<Instruction>(*UI);
        if (User->getParent() != BB || isa<PHINode>(User))
          Users.push_back(User);
      }

      // Find all of the blocks that this value is live in.
      std::set<BasicBlock*> LiveBBs;
      LiveBBs.insert(Inst->getParent());
      while (!Users.empty()) {
        Instruction *U = Users.back();
        Users.pop_back();

        if (!isa<PHINode>(U)) {
          MarkBlocksLiveIn(U->getParent(), LiveBBs);
        } else {
          // Uses for a PHI node occur in their predecessor block.
          PHINode *PN = cast<PHINode>(U);
          for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
            if (PN->getIncomingValue(i) == Inst)
              MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
        }
      }

      // Now that we know all of the blocks that this thing is live in, see if
      // it includes any of the unwind locations.
      bool NeedsSpill = false;
      for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
        BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
        if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
          NeedsSpill = true;
        }
      }

      // If we decided we need a spill, do it.
      // FIXME: Spilling this way is overkill, as it forces all uses of
      // the value to be reloaded from the stack slot, even those that aren't
      // in the unwind blocks. We should be more selective.
      if (NeedsSpill) {
        ++NumSpilled;
        DemoteRegToStack(*Inst, true);
      }
    }
}
コード例 #11
0
bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
  SmallVector<ReturnInst*,16> Returns;
  SmallVector<UnwindInst*,16> Unwinds;
  SmallVector<InvokeInst*,16> Invokes;

  // Look through the terminators of the basic blocks to find invokes, returns
  // and unwinds.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
      // Remember all return instructions in case we insert an invoke into this
      // function.
      Returns.push_back(RI);
    } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Invokes.push_back(II);
    } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
      Unwinds.push_back(UI);
    }
  }

  NumInvokes += Invokes.size();
  NumUnwinds += Unwinds.size();

  // If we don't have any invokes, there's nothing to do.
  if (Invokes.empty()) return false;

  // Find the eh.selector.*, eh.exception and alloca calls.
  //
  // Remember any allocas() that aren't in the entry block, as the
  // jmpbuf saved SP will need to be updated for them.
  //
  // We'll use the first eh.selector to determine the right personality
  // function to use. For SJLJ, we always use the same personality for the
  // whole function, not on a per-selector basis.
  // FIXME: That's a bit ugly. Better way?
  SmallVector<CallInst*,16> EH_Selectors;
  SmallVector<CallInst*,16> EH_Exceptions;
  SmallVector<Instruction*,16> JmpbufUpdatePoints;

  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    // Note: Skip the entry block since there's nothing there that interests
    // us. eh.selector and eh.exception shouldn't ever be there, and we
    // want to disregard any allocas that are there.
    // 
    // FIXME: This is awkward. The new EH scheme won't need to skip the entry
    //        block.
    if (BB == F.begin()) {
      if (InvokeInst *II = dyn_cast<InvokeInst>(F.begin()->getTerminator())) {
        // FIXME: This will be always non-NULL in the new EH.
        if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
          if (!PersonalityFn) PersonalityFn = LPI->getPersonalityFn();
      }

      continue;
    }

    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        if (CI->getCalledFunction() == SelectorFn) {
          if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
          EH_Selectors.push_back(CI);
        } else if (CI->getCalledFunction() == ExceptionFn) {
          EH_Exceptions.push_back(CI);
        } else if (CI->getCalledFunction() == StackRestoreFn) {
          JmpbufUpdatePoints.push_back(CI);
        }
      } else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
        JmpbufUpdatePoints.push_back(AI);
      } else if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
        // FIXME: This will be always non-NULL in the new EH.
        if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
          if (!PersonalityFn) PersonalityFn = LPI->getPersonalityFn();
      }
    }
  }

  // If we don't have any eh.selector calls, we can't determine the personality
  // function. Without a personality function, we can't process exceptions.
  if (!PersonalityFn) return false;

  // We have invokes, so we need to add register/unregister calls to get this
  // function onto the global unwind stack.
  //
  // First thing we need to do is scan the whole function for values that are
  // live across unwind edges.  Each value that is live across an unwind edge we
  // spill into a stack location, guaranteeing that there is nothing live across
  // the unwind edge.  This process also splits all critical edges coming out of
  // invoke's.
  splitLiveRangesAcrossInvokes(Invokes);


  SmallVector<LandingPadInst*, 16> LandingPads;
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
      // FIXME: This will be always non-NULL in the new EH.
      if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
        LandingPads.push_back(LPI);
  }


  BasicBlock *EntryBB = F.begin();
  // Create an alloca for the incoming jump buffer ptr and the new jump buffer
  // that needs to be restored on all exits from the function.  This is an
  // alloca because the value needs to be added to the global context list.
  unsigned Align = 4; // FIXME: Should be a TLI check?
  AllocaInst *FunctionContext =
    new AllocaInst(FunctionContextTy, 0, Align,
                   "fcn_context", F.begin()->begin());

  Value *Idxs[2];
  Type *Int32Ty = Type::getInt32Ty(F.getContext());
  Value *Zero = ConstantInt::get(Int32Ty, 0);
  // We need to also keep around a reference to the call_site field
  Idxs[0] = Zero;
  Idxs[1] = ConstantInt::get(Int32Ty, 1);
  CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, "call_site",
                                       EntryBB->getTerminator());

  // The exception selector comes back in context->data[1]
  Idxs[1] = ConstantInt::get(Int32Ty, 2);
  Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, "fc_data",
                                            EntryBB->getTerminator());
  Idxs[1] = ConstantInt::get(Int32Ty, 1);
  Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs,
                                                  "exc_selector_gep",
                                                  EntryBB->getTerminator());
  // The exception value comes back in context->data[0]
  Idxs[1] = Zero;
  Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs,
                                                   "exception_gep",
                                                   EntryBB->getTerminator());

  // The result of the eh.selector call will be replaced with a a reference to
  // the selector value returned in the function context. We leave the selector
  // itself so the EH analysis later can use it.
  for (int i = 0, e = EH_Selectors.size(); i < e; ++i) {
    CallInst *I = EH_Selectors[i];
    Value *SelectorVal = new LoadInst(SelectorAddr, "select_val", true, I);
    I->replaceAllUsesWith(SelectorVal);
  }

  // eh.exception calls are replaced with references to the proper location in
  // the context. Unlike eh.selector, the eh.exception calls are removed
  // entirely.
  for (int i = 0, e = EH_Exceptions.size(); i < e; ++i) {
    CallInst *I = EH_Exceptions[i];
    // Possible for there to be duplicates, so check to make sure the
    // instruction hasn't already been removed.
    if (!I->getParent()) continue;
    Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
    Type *Ty = Type::getInt8PtrTy(F.getContext());
    Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);

    I->replaceAllUsesWith(Val);
    I->eraseFromParent();
  }

  for (unsigned i = 0, e = LandingPads.size(); i != e; ++i)
    ReplaceLandingPadVal(F, LandingPads[i], ExceptionAddr, SelectorAddr);

  // The entry block changes to have the eh.sjlj.setjmp, with a conditional
  // branch to a dispatch block for non-zero returns. If we return normally,
  // we're not handling an exception and just register the function context and
  // continue.

  // Create the dispatch block.  The dispatch block is basically a big switch
  // statement that goes to all of the invoke landing pads.
  BasicBlock *DispatchBlock =
    BasicBlock::Create(F.getContext(), "eh.sjlj.setjmp.catch", &F);

  // Insert a load of the callsite in the dispatch block, and a switch on its
  // value. By default, we issue a trap statement.
  BasicBlock *TrapBlock =
    BasicBlock::Create(F.getContext(), "trapbb", &F);
  CallInst::Create(Intrinsic::getDeclaration(F.getParent(), Intrinsic::trap),
                   "", TrapBlock);
  new UnreachableInst(F.getContext(), TrapBlock);

  Value *DispatchLoad = new LoadInst(CallSite, "invoke.num", true,
                                     DispatchBlock);
  SwitchInst *DispatchSwitch =
    SwitchInst::Create(DispatchLoad, TrapBlock, Invokes.size(),
                       DispatchBlock);
  // Split the entry block to insert the conditional branch for the setjmp.
  BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
                                                   "eh.sjlj.setjmp.cont");

  // Populate the Function Context
  //   1. LSDA address
  //   2. Personality function address
  //   3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)

  // LSDA address
  Idxs[0] = Zero;
  Idxs[1] = ConstantInt::get(Int32Ty, 4);
  Value *LSDAFieldPtr =
    GetElementPtrInst::Create(FunctionContext, Idxs, "lsda_gep",
                              EntryBB->getTerminator());
  Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
                                 EntryBB->getTerminator());
  new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());

  Idxs[1] = ConstantInt::get(Int32Ty, 3);
  Value *PersonalityFieldPtr =
    GetElementPtrInst::Create(FunctionContext, Idxs, "lsda_gep",
                              EntryBB->getTerminator());
  new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
                EntryBB->getTerminator());

  // Save the frame pointer.
  Idxs[1] = ConstantInt::get(Int32Ty, 5);
  Value *JBufPtr
    = GetElementPtrInst::Create(FunctionContext, Idxs, "jbuf_gep",
                                EntryBB->getTerminator());
  Idxs[1] = ConstantInt::get(Int32Ty, 0);
  Value *FramePtr =
    GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep",
                              EntryBB->getTerminator());

  Value *Val = CallInst::Create(FrameAddrFn,
                                ConstantInt::get(Int32Ty, 0),
                                "fp",
                                EntryBB->getTerminator());
  new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());

  // Save the stack pointer.
  Idxs[1] = ConstantInt::get(Int32Ty, 2);
  Value *StackPtr =
    GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep",
                              EntryBB->getTerminator());

  Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
  new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());

  // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
  Value *SetjmpArg =
    CastInst::Create(Instruction::BitCast, JBufPtr,
                     Type::getInt8PtrTy(F.getContext()), "",
                     EntryBB->getTerminator());
  Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
                                        "dispatch",
                                        EntryBB->getTerminator());

  // Add a call to dispatch_setup after the setjmp call. This is expanded to any
  // target-specific setup that needs to be done.
  CallInst::Create(DispatchSetupFn, DispatchVal, "", EntryBB->getTerminator());

  // check the return value of the setjmp. non-zero goes to dispatcher.
  Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
                                 ICmpInst::ICMP_EQ, DispatchVal, Zero,
                                 "notunwind");
  // Nuke the uncond branch.
  EntryBB->getTerminator()->eraseFromParent();

  // Put in a new condbranch in its place.
  BranchInst::Create(ContBlock, DispatchBlock, IsNormal, EntryBB);

  // Register the function context and make sure it's known to not throw
  CallInst *Register =
    CallInst::Create(RegisterFn, FunctionContext, "",
                     ContBlock->getTerminator());
  Register->setDoesNotThrow();

  // At this point, we are all set up, update the invoke instructions to mark
  // their call_site values, and fill in the dispatch switch accordingly.
  for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
    markInvokeCallSite(Invokes[i], i+1, CallSite, DispatchSwitch);

  // Mark call instructions that aren't nounwind as no-action (call_site ==
  // -1). Skip the entry block, as prior to then, no function context has been
  // created for this function and any unexpected exceptions thrown will go
  // directly to the caller's context, which is what we want anyway, so no need
  // to do anything here.
  for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
    for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        // Ignore calls to the EH builtins (eh.selector, eh.exception)
        Constant *Callee = CI->getCalledFunction();
        if (Callee != SelectorFn && Callee != ExceptionFn
            && !CI->doesNotThrow())
          insertCallSiteStore(CI, -1, CallSite);
      } else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
        insertCallSiteStore(RI, -1, CallSite);
      }
  }

  // Replace all unwinds with a branch to the unwind handler.
  // ??? Should this ever happen with sjlj exceptions?
  for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
    BranchInst::Create(TrapBlock, Unwinds[i]);
    Unwinds[i]->eraseFromParent();
  }

  // Following any allocas not in the entry block, update the saved SP in the
  // jmpbuf to the new value.
  for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
    Instruction *AI = JmpbufUpdatePoints[i];
    Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
    StackAddr->insertAfter(AI);
    Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
    StoreStackAddr->insertAfter(StackAddr);
  }

  // Finally, for any returns from this function, if this function contains an
  // invoke, add a call to unregister the function context.
  for (unsigned i = 0, e = Returns.size(); i != e; ++i)
    CallInst::Create(UnregisterFn, FunctionContext, "", Returns[i]);

  return true;
}
コード例 #12
0
ファイル: lto.cpp プロジェクト: marnen/rubinius
/// InputFilename is a LLVM bitcode file. Read it using bitcode reader.
/// Collect global functions and symbol names in symbols vector.
/// Collect external references in references vector.
/// Return LTO_READ_SUCCESS if there is no error.
enum LTOStatus
LTO::readLLVMObjectFile(const std::string &InputFilename,
                        NameToSymbolMap &symbols,
                        std::set<std::string> &references)
{
  Module *m = getModule(InputFilename);
  if (!m)
    return LTO_READ_FAILURE;

  // Collect Target info
  getTarget(m);

  if (!Target)
    return LTO_READ_FAILURE;
  
  // Use mangler to add GlobalPrefix to names to match linker names.
  // FIXME : Instead of hard coding "-" use GlobalPrefix.
  Mangler mangler(*m, Target->getTargetAsmInfo()->getGlobalPrefix());
  modules.push_back(m);
  
  for (Module::iterator f = m->begin(), e = m->end(); f != e; ++f) {
    LTOLinkageTypes lt = getLTOLinkageType(f);
    LTOVisibilityTypes vis = getLTOVisibilityType(f);
    if (!f->isDeclaration() && lt != LTOInternalLinkage
        && strncmp (f->getName().c_str(), "llvm.", 5)) {
      int alignment = ( 16 > f->getAlignment() ? 16 : f->getAlignment());
      LLVMSymbol *newSymbol = new LLVMSymbol(lt, vis, f, f->getName(), 
                                             mangler.getValueName(f),
                                             Log2_32(alignment));
      symbols[newSymbol->getMangledName()] = newSymbol;
      allSymbols[newSymbol->getMangledName()] = newSymbol;
    }

    // Collect external symbols referenced by this function.
    for (Function::iterator b = f->begin(), fe = f->end(); b != fe; ++b) 
      for (BasicBlock::iterator i = b->begin(), be = b->end(); 
           i != be; ++i) {
        for (unsigned count = 0, total = i->getNumOperands(); 
             count != total; ++count)
          findExternalRefs(i->getOperand(count), references, mangler);
      }
  }
    
  for (Module::global_iterator v = m->global_begin(), e = m->global_end();
       v !=  e; ++v) {
    LTOLinkageTypes lt = getLTOLinkageType(v);
    LTOVisibilityTypes vis = getLTOVisibilityType(v);
    if (!v->isDeclaration() && lt != LTOInternalLinkage
        && strncmp (v->getName().c_str(), "llvm.", 5)) {
      const TargetData *TD = Target->getTargetData();
      LLVMSymbol *newSymbol = new LLVMSymbol(lt, vis, v, v->getName(), 
                                             mangler.getValueName(v),
                                             TD->getPreferredAlignmentLog(v));
      symbols[newSymbol->getMangledName()] = newSymbol;
      allSymbols[newSymbol->getMangledName()] = newSymbol;

      for (unsigned count = 0, total = v->getNumOperands(); 
           count != total; ++count)
        findExternalRefs(v->getOperand(count), references, mangler);

    }
  }
  
  return LTO_READ_SUCCESS;
}
コード例 #13
0
ファイル: PruneEH.cpp プロジェクト: Sciumo/llvm
bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
  SmallPtrSet<CallGraphNode *, 8> SCCNodes;
  CallGraph &CG = getAnalysis<CallGraph>();
  bool MadeChange = false;

  // Fill SCCNodes with the elements of the SCC.  Used for quickly
  // looking up whether a given CallGraphNode is in this SCC.
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
    SCCNodes.insert(*I);

  // First pass, scan all of the functions in the SCC, simplifying them
  // according to what we know.
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
    if (Function *F = (*I)->getFunction())
      MadeChange |= SimplifyFunction(F);

  // Next, check to see if any callees might throw or if there are any external
  // functions in this SCC: if so, we cannot prune any functions in this SCC.
  // Definitions that are weak and not declared non-throwing might be 
  // overridden at linktime with something that throws, so assume that.
  // If this SCC includes the unwind instruction, we KNOW it throws, so
  // obviously the SCC might throw.
  //
  bool SCCMightUnwind = false, SCCMightReturn = false;
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); 
       (!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F == 0) {
      SCCMightUnwind = true;
      SCCMightReturn = true;
    } else if (F->isDeclaration() || F->mayBeOverridden()) {
      SCCMightUnwind |= !F->doesNotThrow();
      SCCMightReturn |= !F->doesNotReturn();
    } else {
      bool CheckUnwind = !SCCMightUnwind && !F->doesNotThrow();
      bool CheckReturn = !SCCMightReturn && !F->doesNotReturn();

      if (!CheckUnwind && !CheckReturn)
        continue;

      // Check to see if this function performs an unwind or calls an
      // unwinding function.
      for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
        if (CheckUnwind && isa<UnwindInst>(BB->getTerminator())) {
          // Uses unwind!
          SCCMightUnwind = true;
        } else if (CheckReturn && isa<ReturnInst>(BB->getTerminator())) {
          SCCMightReturn = true;
        }

        // Invoke instructions don't allow unwinding to continue, so we are
        // only interested in call instructions.
        if (CheckUnwind && !SCCMightUnwind)
          for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
            if (CallInst *CI = dyn_cast<CallInst>(I)) {
              if (CI->doesNotThrow()) {
                // This call cannot throw.
              } else if (Function *Callee = CI->getCalledFunction()) {
                CallGraphNode *CalleeNode = CG[Callee];
                // If the callee is outside our current SCC then we may
                // throw because it might.
                if (!SCCNodes.count(CalleeNode)) {
                  SCCMightUnwind = true;
                  break;
                }
              } else {
                // Indirect call, it might throw.
                SCCMightUnwind = true;
                break;
              }
            }
        if (SCCMightUnwind && SCCMightReturn) break;
      }
    }
  }

  // If the SCC doesn't unwind or doesn't throw, note this fact.
  if (!SCCMightUnwind || !SCCMightReturn)
    for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
      Attributes NewAttributes = Attribute::None;

      if (!SCCMightUnwind)
        NewAttributes |= Attribute::NoUnwind;
      if (!SCCMightReturn)
        NewAttributes |= Attribute::NoReturn;

      Function *F = (*I)->getFunction();
      const AttrListPtr &PAL = F->getAttributes();
      const AttrListPtr &NPAL = PAL.addAttr(~0, NewAttributes);
      if (PAL != NPAL) {
        MadeChange = true;
        F->setAttributes(NPAL);
      }
    }

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    // Convert any invoke instructions to non-throwing functions in this node
    // into call instructions with a branch.  This makes the exception blocks
    // dead.
    if (Function *F = (*I)->getFunction())
      MadeChange |= SimplifyFunction(F);
  }

  return MadeChange;
}
コード例 #14
0
/*
 * Clone a given function removing dead stores
 */
Function* DeadStoreEliminationPass::cloneFunctionWithoutDeadStore(Function *Fn,
    Instruction* caller, std::string suffix) {

  Function *NF = Function::Create(Fn->getFunctionType(), Fn->getLinkage());
  NF->copyAttributesFrom(Fn);

  // Copy the parameter names, to ease function inspection afterwards.
  Function::arg_iterator NFArg = NF->arg_begin();
  for (Function::arg_iterator Arg = Fn->arg_begin(), ArgEnd = Fn->arg_end();
      Arg != ArgEnd; ++Arg, ++NFArg) {
    NFArg->setName(Arg->getName());
  }

  // To avoid name collision, we should select another name.
  NF->setName(Fn->getName() + suffix);

  // Fill clone content
  ValueToValueMapTy VMap;
  SmallVector<ReturnInst*, 8> Returns;
  Function::arg_iterator NI = NF->arg_begin();
  for (Function::arg_iterator I = Fn->arg_begin();
      NI != NF->arg_end(); ++I, ++NI) {
    VMap[I] = NI;
  }
  CloneAndPruneFunctionInto(NF, Fn, VMap, false, Returns);

  // Remove dead stores
  std::set<Value*> deadArgs = deadArguments[caller];
  std::set<Value*> removeStoresTo;
  Function::arg_iterator NFArgIter = NF->arg_begin();
  for (Function::arg_iterator FnArgIter = Fn->arg_begin(); FnArgIter !=
      Fn->arg_end(); ++FnArgIter, ++NFArgIter) {
    Value *FnArg = FnArgIter;
    if (deadArgs.count(FnArg)) {
      removeStoresTo.insert(NFArgIter);
    }
  }
  std::vector<Instruction*> toRemove;
  for (Function::iterator BB = NF->begin(); BB != NF->end(); ++BB) {
    for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
      Instruction *inst = I;
      if (!isa<StoreInst>(inst)) continue;
      StoreInst *SI = dyn_cast<StoreInst>(inst);
      Value *ptrOp = SI->getPointerOperand();
      if (removeStoresTo.count(ptrOp)) {
        DEBUG(errs() << "will remove this store: " << *inst << "\n");
        toRemove.push_back(inst);
      }
    }
  }
  for (std::vector<Instruction*>::iterator it = toRemove.begin();
      it != toRemove.end(); ++it) {
    Instruction* inst = *it;
    inst->eraseFromParent();
    RemovedStores++;
  }

  // Insert the clone function before the original
  Fn->getParent()->getFunctionList().insert(Fn, NF);

  return NF;
}
コード例 #15
0
bool TailCallElim::runOnFunction(Function &F) {
  // If this function is a varargs function, we won't be able to PHI the args
  // right, so don't even try to convert it...
  if (F.getFunctionType()->isVarArg()) return false;

  TTI = &getAnalysis<TargetTransformInfo>();
  BasicBlock *OldEntry = 0;
  bool TailCallsAreMarkedTail = false;
  SmallVector<PHINode*, 8> ArgumentPHIs;
  bool MadeChange = false;

  // CanTRETailMarkedCall - If false, we cannot perform TRE on tail calls
  // marked with the 'tail' attribute, because doing so would cause the stack
  // size to increase (real TRE would deallocate variable sized allocas, TRE
  // doesn't).
  bool CanTRETailMarkedCall = true;

  // Find calls that can be marked tail.
  AllocaCaptureTracker ACT;
  for (Function::iterator BB = F.begin(), EE = F.end(); BB != EE; ++BB) {
    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
      if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
        CanTRETailMarkedCall &= CanTRE(AI);
        PointerMayBeCaptured(AI, &ACT);
        // If any allocas are captured, exit.
        if (ACT.Captured)
          return false;
      }
    }
  }

  // Second pass, change any tail recursive calls to loops.
  //
  // FIXME: The code generator produces really bad code when an 'escaping
  // alloca' is changed from being a static alloca to being a dynamic alloca.
  // Until this is resolved, disable this transformation if that would ever
  // happen.  This bug is PR962.
  if (ACT.UsesAlloca.empty()) {
    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
      if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
        bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
                                            ArgumentPHIs, !CanTRETailMarkedCall);
        if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
          Change = FoldReturnAndProcessPred(BB, Ret, OldEntry,
                                            TailCallsAreMarkedTail, ArgumentPHIs,
                                            !CanTRETailMarkedCall);
        MadeChange |= Change;
      }
    }
  }

  // If we eliminated any tail recursions, it's possible that we inserted some
  // silly PHI nodes which just merge an initial value (the incoming operand)
  // with themselves.  Check to see if we did and clean up our mess if so.  This
  // occurs when a function passes an argument straight through to its tail
  // call.
  if (!ArgumentPHIs.empty()) {
    for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) {
      PHINode *PN = ArgumentPHIs[i];

      // If the PHI Node is a dynamic constant, replace it with the value it is.
      if (Value *PNV = SimplifyInstruction(PN)) {
        PN->replaceAllUsesWith(PNV);
        PN->eraseFromParent();
      }
    }
  }

  // At this point, we know that the function does not have any captured
  // allocas. If additionally the function does not call setjmp, mark all calls
  // in the function that do not access stack memory with the tail keyword. This
  // implies ensuring that there does not exist any path from a call that takes
  // in an alloca but does not capture it and the call which we wish to mark
  // with "tail".
  if (!F.callsFunctionThatReturnsTwice()) {
    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        if (CallInst *CI = dyn_cast<CallInst>(I)) {
          if (!ACT.UsesAlloca.count(CI)) {
            CI->setTailCall();
            MadeChange = true;
          }
        }
      }
    }
  }

  return MadeChange;
}
コード例 #16
0
ファイル: HeteroOMPTransform.cpp プロジェクト: IntelLabs/iHRC
/**
 * Generate code for
 */
void HeteroOMPTransform::gen_code_per_f (Function* NF, Function* F, Instruction *max_threads){
	
	Function::arg_iterator FI = F->arg_begin();
	Argument *ctxname = &*FI;

	Function::arg_iterator DestI = NF->arg_begin();
	DestI->setName(ctxname->getName()); 
	Argument *ctx_name = &(*DestI);
	DestI++;
	DestI->setName("tid");
	Argument *num_iters = &(*DestI);

#ifdef EXPLICIT_REWRITE
	DenseMap<const Value*, Value *> ValueMap;
#else
	ValueToValueMapTy ValueMap;
#endif

	//get the old basic block and create a new one
	Function::const_iterator BI = F->begin();
	const BasicBlock &FB = *BI;
	BasicBlock *NFBB = BasicBlock::Create(FB.getContext(), "", NF);
	if (FB.hasName()){
		NFBB->setName(FB.getName());
	}
	ValueMap[&FB] = NFBB;

	//ValueMap[numiters] = num_iters;
	ValueMap[ctxname] = ctx_name;

#if EXPLICIT_REWRITE
	for (BasicBlock::const_iterator II = FB.begin(), IE = FB.end(); II != IE; ++II) {
		Instruction *NFInst = II->clone(/*F->getContext()*/);
		//	DEBUG(dbgs()<<*II<<"\n");
		if (II->hasName()) NFInst->setName(II->getName());
		const Instruction *FInst = &(*II);
		rewrite_instruction((Instruction *)FInst, NFInst, ValueMap);
		NFBB->getInstList().push_back(NFInst);
		ValueMap[II] = NFInst;
	}
	BI++;

	for (Function::const_iterator /*BI=F->begin(),*/BE = F->end();BI != BE; ++BI) {
		const BasicBlock &FBB = *BI;
		BasicBlock *NFBB = BasicBlock::Create(FBB.getContext(), "", NF);
		ValueMap[&FBB] = NFBB;
		if (FBB.hasName()){
			NFBB->setName(FBB.getName());
			//DEBUG(dbgs()<<NFBB->getName()<<"\n");
		}
		for (BasicBlock::const_iterator II = FBB.begin(), IE = FBB.end(); II != IE; ++II) {
			Instruction *NFInst = II->clone(/*F->getContext()*/);
			if (II->hasName()) NFInst->setName(II->getName());
			const Instruction *FInst = &(*II);
			rewrite_instruction((Instruction *)FInst, NFInst, ValueMap);
			NFBB->getInstList().push_back(NFInst);
			ValueMap[II] = NFInst;
		}
	}
	// Remap the instructions again to take care of forward jumps
	for (Function::iterator BB = NF->begin(), BE=NF->end(); BB != BE; ++ BB) {
		for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II){
			int opIdx = 0;
			//DEBUG(dbgs()<<*II<<"\n");
			for (User::op_iterator i = II->op_begin(), e = II->op_end(); i != e; ++i, opIdx++) {
				Value *V = *i;
				if (ValueMap[V] != NULL) {
					II->setOperand(opIdx, ValueMap[V]);
				}
			}
		}
	}
#else
	SmallVector<ReturnInst*, 8> Returns;  // Ignore returns cloned.
	CloneFunctionInto(NF, F, ValueMap, false, Returns, "");
#endif

	//max_threads->dump();
	/* Remap openmp omp_num_threads() and omp_thread_num() */ 
	/*
	 * define internal void @_Z20initialize_variablesiPfS_.omp_fn.4(i8* nocapture %.omp_data_i) nounwind ssp {
     * entry:
     * %0 = bitcast i8* %.omp_data_i to i32*           ; <i32*> [#uses=1]
     * %1 = load i32* %0, align 8                      ; <i32> [#uses=2]
     * %2 = tail call i32 @omp_get_num_threads() nounwind readnone ; <i32> [#uses=2]
     * %3 = tail call i32 @omp_get_thread_num() nounwind readnone ; <i32> [#uses=2]
	   %4 = sdiv i32 %1, %2
	   %5 = mul nsw i32 %4, %2
       %6 = icmp ne i32 %5, %1
       %7 = zext i1 %6 to i32
	 */
	vector<Instruction *> toDelete;
	for (Function::iterator BB = NF->begin(), BE=NF->end(); BB != BE; ++ BB) {
		for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II){
			if (isa<CallInst>(II)) {
				CallSite CI(cast<Instruction>(II));
				if (CI.getCalledFunction() != NULL){ 
					string called_func_name = CI.getCalledFunction()->getName();
					if (called_func_name == OMP_GET_NUM_THREADS_NAME && CI.arg_size() == 0) {
						II->replaceAllUsesWith(ValueMap[max_threads]);
						toDelete.push_back(II);
					}
					else if (called_func_name == OMP_GET_THREAD_NUM_NAME && CI.arg_size() == 0) {
						II->replaceAllUsesWith(num_iters);
						toDelete.push_back(II);
					}
				}
			}
		}
	}


	/* Delete the last branch instruction of the first basic block -- Assuming it is safe */
	Function::iterator nfBB = NF->begin();
	TerminatorInst *lastI = nfBB->getTerminator();
	BranchInst *bI;
	BasicBlock *returnBlock;
	if ((bI = dyn_cast<BranchInst>(lastI)) && bI->isConditional() && 
		(returnBlock = bI->getSuccessor(1)) && 
		(returnBlock->getName() == "return")) {
		/* modify to a unconditional branch to next basic block and not return */
		Instruction *bbI = BranchInst::Create(bI->getSuccessor(0),lastI);
		bbI->dump();
		toDelete.push_back(lastI);
	}

	//NF->dump();
	while(!toDelete.empty()) {
		Instruction *g = toDelete.back();
		//g->replaceAllUsesWith(UndefValue::get(g->getType()));
		toDelete.pop_back();
		g->eraseFromParent();
	}

	//NF->dump();
}
コード例 #17
0
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass. Search for insert/extractvalue instructions
//  that can be simplified.
//
// Inputs:
//  M - A reference to the LLVM module to transform.
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
// true  - The module was modified.
// false - The module was not modified.
//
bool SimplifyEV::runOnModule(Module& M) {
    // Repeat till no change
    bool changed;
    do {
        changed = false;
        for (Module::iterator F = M.begin(); F != M.end(); ++F) {
            for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) {
                for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE;) {
                    ExtractValueInst *EV = dyn_cast<ExtractValueInst>(I++);
                    if(!EV)
                        continue;
                    Value *Agg = EV->getAggregateOperand();
                    if (!EV->hasIndices()) {
                        EV->replaceAllUsesWith(Agg);
                        DEBUG(errs() << "EV:");
                        DEBUG(errs() << "ERASE:");
                        DEBUG(EV->dump());
                        EV->eraseFromParent();
                        numErased++;
                        changed = true;
                        continue;
                    }
                    if (Constant *C = dyn_cast<Constant>(Agg)) {
                        if (isa<UndefValue>(C)) {
                            EV->replaceAllUsesWith(UndefValue::get(EV->getType()));
                            DEBUG(errs() << "EV:");
                            DEBUG(errs() << "ERASE:");
                            DEBUG(EV->dump());
                            EV->eraseFromParent();
                            numErased++;
                            changed = true;
                            continue;
                        }
                        if (isa<ConstantAggregateZero>(C)) {
                            EV->replaceAllUsesWith(Constant::getNullValue(EV->getType()));
                            DEBUG(errs() << "EV:");
                            DEBUG(errs() << "ERASE:");
                            DEBUG(EV->dump());
                            EV->eraseFromParent();
                            numErased++;
                            changed = true;
                            continue;
                        }
                        if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
                            // Extract the element indexed by the first index out of the constant
                            Value *V = C->getOperand(*EV->idx_begin());
                            if (EV->getNumIndices() > 1) {
                                // Extract the remaining indices out of the constant indexed by the
                                // first index
                                ExtractValueInst *EV_new = ExtractValueInst::Create(V,
                                                           EV->getIndices().slice(1),
                                                           "", EV);
                                EV->replaceAllUsesWith(EV_new);
                                DEBUG(errs() << "EV:");
                                DEBUG(errs() << "ERASE:");
                                DEBUG(EV->dump());
                                EV->eraseFromParent();
                                numErased++;
                                changed = true;
                                continue;
                            }  else {
                                EV->replaceAllUsesWith(V);
                                DEBUG(errs() << "EV:");
                                DEBUG(errs() << "ERASE:");
                                DEBUG(EV->dump());
                                EV->eraseFromParent();
                                numErased++;
                                changed = true;
                                continue;
                            }
                        }
                        continue;
                    }
                    if (LoadInst * LI = dyn_cast<LoadInst>(Agg)) {
                        // if the Agg value came from a load instruction
                        // replace the extract value intruction with
                        // a gep and a load.
                        SmallVector<Value*, 8> Indices;
                        Type *Int32Ty = Type::getInt32Ty(M.getContext());
                        Indices.push_back(Constant::getNullValue(Int32Ty));
                        for (ExtractValueInst::idx_iterator I = EV->idx_begin(), E = EV->idx_end();
                                I != E; ++I) {
                            Indices.push_back(ConstantInt::get(Int32Ty, *I));
                        }

                        GetElementPtrInst *GEP = GetElementPtrInst::CreateInBounds(LI->getOperand(0), Indices,
                                                 LI->getName(), LI) ;
                        LoadInst *LINew = new LoadInst(GEP, "", LI);
                        EV->replaceAllUsesWith(LINew);
                        EV->eraseFromParent();
                        changed = true;
                        numErased++;
                        continue;
                    }
                    if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
                        bool done = false;
                        // We're extracting from an insertvalue instruction, compare the indices
                        const unsigned *exti, *exte, *insi, *inse;
                        for (exti = EV->idx_begin(), insi = IV->idx_begin(),
                                exte = EV->idx_end(), inse = IV->idx_end();
                                exti != exte && insi != inse;
                                ++exti, ++insi) {
                            if (*insi != *exti) {
                                // The insert and extract both reference distinctly different elements.
                                // This means the extract is not influenced by the insert, and we can
                                // replace the aggregate operand of the extract with the aggregate
                                // operand of the insert. i.e., replace
                                // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
                                // %E = extractvalue { i32, { i32 } } %I, 0
                                // with
                                // %E = extractvalue { i32, { i32 } } %A, 0
                                ExtractValueInst *EV_new = ExtractValueInst::Create(IV->getAggregateOperand(),
                                                           EV->getIndices(), "", EV);
                                EV->replaceAllUsesWith(EV_new);
                                DEBUG(errs() << "EV:");
                                DEBUG(errs() << "ERASE:");
                                DEBUG(EV->dump());
                                EV->eraseFromParent();
                                numErased++;
                                done = true;
                                changed = true;
                                break;
                            }
                        }
                        if(done)
                            continue;
                        if (exti == exte && insi == inse) {
                            // Both iterators are at the end: Index lists are identical. Replace
                            // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
                            // %C = extractvalue { i32, { i32 } } %B, 1, 0
                            // with "i32 42"
                            EV->replaceAllUsesWith(IV->getInsertedValueOperand());
                            DEBUG(errs() << "EV:");
                            DEBUG(errs() << "ERASE:");
                            DEBUG(EV->dump());
                            EV->eraseFromParent();
                            numErased++;
                            changed = true;
                            continue;
                        }
                        if (exti == exte) {
                            // The extract list is a prefix of the insert list. i.e. replace
                            // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
                            // %E = extractvalue { i32, { i32 } } %I, 1
                            // with
                            // %X = extractvalue { i32, { i32 } } %A, 1
                            // %E = insertvalue { i32 } %X, i32 42, 0
                            // by switching the order of the insert and extract (though the
                            // insertvalue should be left in, since it may have other uses).
                            Value *NewEV = ExtractValueInst::Create(IV->getAggregateOperand(),
                                                                    EV->getIndices(), "", EV);
                            Value *NewIV = InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
                                                                   makeArrayRef(insi, inse), "", EV);
                            EV->replaceAllUsesWith(NewIV);
                            DEBUG(errs() << "EV:");
                            DEBUG(errs() << "ERASE:");
                            DEBUG(EV->dump());
                            EV->eraseFromParent();
                            numErased++;
                            changed = true;
                            continue;
                        }
                        if (insi == inse) {
                            // The insert list is a prefix of the extract list
                            // We can simply remove the common indices from the extract and make it
                            // operate on the inserted value instead of the insertvalue result.
                            // i.e., replace
                            // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
                            // %E = extractvalue { i32, { i32 } } %I, 1, 0
                            // with
                            // %E extractvalue { i32 } { i32 42 }, 0
                            ExtractValueInst *EV_new = ExtractValueInst::Create(IV->getInsertedValueOperand(),
                                                       makeArrayRef(exti, exte), "", EV);
                            EV->replaceAllUsesWith(EV_new);
                            DEBUG(errs() << "EV:");
                            DEBUG(errs() << "ERASE:");
                            DEBUG(EV->dump());
                            EV->eraseFromParent();
                            numErased++;
                            changed = true;
                            continue;
                        }
                    }
                }
            }
        }
    } while(changed);
    return (numErased > 0);
}
コード例 #18
0
// insertFastDiv - Substitutes the div/rem instruction with code that checks the
// value of the operands and uses a shorter-faster div/rem instruction when
// possible and the longer-slower div/rem instruction otherwise.
static bool insertFastDiv(Function &F,
                          Function::iterator &I,
                          BasicBlock::iterator &J,
                          IntegerType *BypassType,
                          bool UseDivOp,
                          bool UseSignedOp,
                          DivCacheTy &PerBBDivCache) {
  // Get instruction operands
  Instruction *Instr = &*J;
  Value *Dividend = Instr->getOperand(0);
  Value *Divisor = Instr->getOperand(1);

  if (isa<ConstantInt>(Divisor) ||
      (isa<ConstantInt>(Dividend) && isa<ConstantInt>(Divisor))) {
    // Operations with immediate values should have
    // been solved and replaced during compile time.
    return false;
  }

  // Basic Block is split before divide
  BasicBlock *MainBB = &*I;
  BasicBlock *SuccessorBB = I->splitBasicBlock(J);
  ++I; //advance iterator I to successorBB

  // Add new basic block for slow divide operation
  BasicBlock *SlowBB = BasicBlock::Create(F.getContext(), "",
                                          MainBB->getParent(), SuccessorBB);
  SlowBB->moveBefore(SuccessorBB);
  IRBuilder<> SlowBuilder(SlowBB, SlowBB->begin());
  Value *SlowQuotientV;
  Value *SlowRemainderV;
  if (UseSignedOp) {
    SlowQuotientV = SlowBuilder.CreateSDiv(Dividend, Divisor);
    SlowRemainderV = SlowBuilder.CreateSRem(Dividend, Divisor);
  } else {
    SlowQuotientV = SlowBuilder.CreateUDiv(Dividend, Divisor);
    SlowRemainderV = SlowBuilder.CreateURem(Dividend, Divisor);
  }
  SlowBuilder.CreateBr(SuccessorBB);

  // Add new basic block for fast divide operation
  BasicBlock *FastBB = BasicBlock::Create(F.getContext(), "",
                                          MainBB->getParent(), SuccessorBB);
  FastBB->moveBefore(SlowBB);
  IRBuilder<> FastBuilder(FastBB, FastBB->begin());
  Value *ShortDivisorV = FastBuilder.CreateCast(Instruction::Trunc, Divisor,
                                                BypassType);
  Value *ShortDividendV = FastBuilder.CreateCast(Instruction::Trunc, Dividend,
                                                 BypassType);

  // udiv/urem because optimization only handles positive numbers
  Value *ShortQuotientV = FastBuilder.CreateExactUDiv(ShortDividendV,
                                                      ShortDivisorV);
  Value *ShortRemainderV = FastBuilder.CreateURem(ShortDividendV,
                                                  ShortDivisorV);
  Value *FastQuotientV = FastBuilder.CreateCast(Instruction::ZExt,
                                                ShortQuotientV,
                                                Dividend->getType());
  Value *FastRemainderV = FastBuilder.CreateCast(Instruction::ZExt,
                                                 ShortRemainderV,
                                                 Dividend->getType());
  FastBuilder.CreateBr(SuccessorBB);

  // Phi nodes for result of div and rem
  IRBuilder<> SuccessorBuilder(SuccessorBB, SuccessorBB->begin());
  PHINode *QuoPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
  QuoPhi->addIncoming(SlowQuotientV, SlowBB);
  QuoPhi->addIncoming(FastQuotientV, FastBB);
  PHINode *RemPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2);
  RemPhi->addIncoming(SlowRemainderV, SlowBB);
  RemPhi->addIncoming(FastRemainderV, FastBB);

  // Replace Instr with appropriate phi node
  if (UseDivOp)
    Instr->replaceAllUsesWith(QuoPhi);
  else
    Instr->replaceAllUsesWith(RemPhi);
  Instr->eraseFromParent();

  // Combine operands into a single value with OR for value testing below
  MainBB->getInstList().back().eraseFromParent();
  IRBuilder<> MainBuilder(MainBB, MainBB->end());
  Value *OrV = MainBuilder.CreateOr(Dividend, Divisor);

  // BitMask is inverted to check if the operands are
  // larger than the bypass type
  uint64_t BitMask = ~BypassType->getBitMask();
  Value *AndV = MainBuilder.CreateAnd(OrV, BitMask);

  // Compare operand values and branch
  Value *ZeroV = ConstantInt::getSigned(Dividend->getType(), 0);
  Value *CmpV = MainBuilder.CreateICmpEQ(AndV, ZeroV);
  MainBuilder.CreateCondBr(CmpV, FastBB, SlowBB);

  // point iterator J at first instruction of successorBB
  J = I->begin();

  // Cache phi nodes to be used later in place of other instances
  // of div or rem with the same sign, dividend, and divisor
  DivOpInfo Key(UseSignedOp, Dividend, Divisor);
  DivPhiNodes Value(QuoPhi, RemPhi);
  PerBBDivCache.insert(std::pair<DivOpInfo, DivPhiNodes>(Key, Value));
  return true;
}
コード例 #19
0
ファイル: StatsTracker.cpp プロジェクト: yangke/KATCH
void StatsTracker::computeReachableUncovered() {
  KModule *km = executor.kmodule;
  Module *m = km->module;
  static bool init = true;
  const InstructionInfoTable &infos = *km->infos;
  StatisticManager &sm = *theStatisticManager;
  
  if (init) {
    init = false;

    // Compute call targets. It would be nice to use alias information
    // instead of assuming all indirect calls hit all escaping
    // functions, eh?
    for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
         fnIt != fn_ie; ++fnIt) {
      for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end(); 
           bbIt != bb_ie; ++bbIt) {
        for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end(); 
             it != ie; ++it) {
          if (isa<CallInst>(it) || isa<InvokeInst>(it)) {
            CallSite cs(it);
            if (isa<InlineAsm>(cs.getCalledValue())) {
              // We can never call through here so assume no targets
              // (which should be correct anyhow).
              callTargets.insert(std::make_pair(it,
                                                std::vector<Function*>()));
            } else if (Function *target = getDirectCallTarget(cs)) {
              callTargets[it].push_back(target);
            } else {
              /* disable the escape analysis since it overapproximates
               * too much the possible aliases
              callTargets[it] = 
                std::vector<Function*>(km->escapingFunctions.begin(),
                                       km->escapingFunctions.end());
              */
            }
          }
        }
      }
    }

    // Compute function callers as reflexion of callTargets.
    for (calltargets_ty::iterator it = callTargets.begin(), 
           ie = callTargets.end(); it != ie; ++it)
      for (std::vector<Function*>::iterator fit = it->second.begin(), 
             fie = it->second.end(); fit != fie; ++fit) 
        functionCallers[*fit].push_back(it->first);

    // Initialize minDistToReturn to shortest paths through
    // functions. 0 is unreachable.
    std::vector<Instruction *> instructions;
    for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
         fnIt != fn_ie; ++fnIt) {
      if (fnIt->isDeclaration()) {
        if (fnIt->doesNotReturn()) {
          functionShortestPath[fnIt] = 0;
        } else {
          functionShortestPath[fnIt] = 1; // whatever
        }
      } else {
        functionShortestPath[fnIt] = 0;
      }

      // Not sure if I should bother to preorder here. XXX I should.
      for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end(); 
           bbIt != bb_ie; ++bbIt) {
        for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end(); 
             it != ie; ++it) {
          instructions.push_back(it);
          unsigned id = infos.getInfo(it).id;
          sm.setIndexedValue(stats::minDistToReturn, 
                             id, 
                             isa<ReturnInst>(it)
#if LLVM_VERSION_CODE < LLVM_VERSION(3, 1)
                             || isa<UnwindInst>(it)
#endif
                             );
        }
      }
    }
  
    std::reverse(instructions.begin(), instructions.end());
    
    // I'm so lazy it's not even worklisted.
    bool changed;
    do {
      changed = false;
      for (std::vector<Instruction*>::iterator it = instructions.begin(),
             ie = instructions.end(); it != ie; ++it) {
        Instruction *inst = *it;
        unsigned bestThrough = 0;

        if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
          std::vector<Function*> &targets = callTargets[inst];
          for (std::vector<Function*>::iterator fnIt = targets.begin(),
                 ie = targets.end(); fnIt != ie; ++fnIt) {
            uint64_t dist = functionShortestPath[*fnIt];
            if (dist) {
              dist = 1+dist; // count instruction itself
              if (bestThrough==0 || dist<bestThrough)
                bestThrough = dist;
            }
          }
        } else {
          bestThrough = 1;
        }
       
        if (bestThrough) {
          unsigned id = infos.getInfo(*it).id;
          uint64_t best, cur = best = sm.getIndexedValue(stats::minDistToReturn, id);
          std::vector<Instruction*> succs = getSuccs(*it);
          for (std::vector<Instruction*>::iterator it2 = succs.begin(),
                 ie = succs.end(); it2 != ie; ++it2) {
            uint64_t dist = sm.getIndexedValue(stats::minDistToReturn,
                                               infos.getInfo(*it2).id);
            if (dist) {
              uint64_t val = bestThrough + dist;
              if (best==0 || val<best)
                best = val;
            }
          }
          // there's a corner case here when a function only includes a single
          // instruction (a ret). in that case, we MUST update
          // functionShortestPath, or it will remain 0 (erroneously indicating
          // that no return instructions are reachable)
          Function *f = inst->getParent()->getParent();
          if (best != cur
              || (inst == f->begin()->begin()
                  && functionShortestPath[f] != best)) {
            sm.setIndexedValue(stats::minDistToReturn, id, best);
            changed = true;

            // Update shortest path if this is the entry point.
            if (inst==f->begin()->begin())
              functionShortestPath[f] = best;
          }
        }
      }
    } while (changed);
  }

  // compute minDistToUncovered, 0 is unreachable
  std::vector<Instruction *> instructions;
  for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
       fnIt != fn_ie; ++fnIt) {
    // Not sure if I should bother to preorder here.
    for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end(); 
         bbIt != bb_ie; ++bbIt) {
      for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end(); 
           it != ie; ++it) {
        unsigned id = infos.getInfo(it).id;
        instructions.push_back(&*it);
        const InstructionInfo& ii = km->infos->getInfo(&*it);
        uint64_t initial = executor.patchContainer.inPatch(ii.file, ii.line, ii.assemblyLine) &&
          sm.getIndexedValue(stats::uncoveredInstructions, id);
        sm.setIndexedValue(stats::minDistToUncovered, 
                           id, 
                           initial);
      }
    }
  }
  
  std::reverse(instructions.begin(), instructions.end());
  
  // I'm so lazy it's not even worklisted.
  bool changed;
  do {
    changed = false;
    for (std::vector<Instruction*>::iterator it = instructions.begin(),
           ie = instructions.end(); it != ie; ++it) {
      Instruction *inst = *it;
      uint64_t best, cur = best = sm.getIndexedValue(stats::minDistToUncovered, 
                                                     infos.getInfo(inst).id);
      unsigned bestThrough = 0;
      
      /* KATCH: don't 'step into' function calls when computing
       * the minimum distance.
       * This heuristic seems to perform better overall but
       * can hurt us if the result of the function call is relevant
       * for reaching the target.
       * A better solution would be to first determine whether the
       * call is relevant.
       */
#ifdef KATCH_USE_FUNCTION_CALLS
      if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
        std::vector<Function*> &targets = callTargets[inst];
        for (std::vector<Function*>::iterator fnIt = targets.begin(),
               ie = targets.end(); fnIt != ie; ++fnIt) {
          uint64_t dist = functionShortestPath[*fnIt];
          if (dist) {
            dist = 1+dist; // count instruction itself
            if (bestThrough==0 || dist<bestThrough)
              bestThrough = dist;
          }

          if (!(*fnIt)->isDeclaration()) {
            uint64_t calleeDist = sm.getIndexedValue(stats::minDistToUncovered,
                                                     infos.getFunctionInfo(*fnIt).id);
            if (calleeDist) {
              calleeDist = 1+calleeDist; // count instruction itself
              if (best==0 || calleeDist<best)
                best = calleeDist;
            }
          }
        }
      } else
#endif
      {
        TerminatorInst* tinst;
        if ((tinst = dyn_cast<TerminatorInst>(inst))) {
          if (tinst->getNumSuccessors() > 1)
            bestThrough = 1;
        }
      }

      std::vector<Instruction*> succs = getSuccsAndCalls(m, inst);
      for (std::vector<Instruction*>::iterator it2 = succs.begin(),
           ie = succs.end(); it2 != ie; ++it2) {
        uint64_t dist = sm.getIndexedValue(stats::minDistToUncovered,
                                           infos.getInfo(*it2).id);
        if (dist) {
          uint64_t val = bestThrough + dist;
          if (best==0 || val<best)
            best = val;
        }
      }

      if (best != cur) {
        sm.setIndexedValue(stats::minDistToUncovered, 
                           infos.getInfo(inst).id, 
                           best);
        changed = true;
      }
    }
  } while (changed);

  for (std::set<ExecutionState*>::iterator it = executor.states.begin(),
         ie = executor.states.end(); it != ie; ++it) {
    ExecutionState *es = *it;
    uint64_t currentFrameMinDist = 0;
    for (ExecutionState::stack_ty::iterator sfIt = es->stack.begin(),
           sf_ie = es->stack.end(); sfIt != sf_ie; ++sfIt) {
      ExecutionState::stack_ty::iterator next = sfIt + 1;
      KInstIterator kii;

      if (next==es->stack.end()) {
        kii = es->pc;
      } else {
        kii = next->caller;
        ++kii;
      }
      
      sfIt->minDistToUncoveredOnReturn = currentFrameMinDist;
      
      currentFrameMinDist = computeMinDistToUncovered(kii, currentFrameMinDist);
    }
  }
}
コード例 #20
0
///
/// runOnFunction 
///
bool LongLongMemAccessLowering::runOnFunction(Function &F) {
  std::vector<StoreInst *> storeInstV;
  std::vector<LoadInst *> loadInstV;

  for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
    for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE; ++BI) {
      Instruction *inst = BI;
      if (StoreInst *storeInst = dyn_cast<StoreInst>(inst)) {
        if (!storeInst->getValueOperand()->getType()->isIntegerTy(64)) {
          continue;
        }
        assert(cast<PointerType>(storeInst->getPointerOperand()->getType())
            ->getElementType()->isIntegerTy(64));
        storeInstV.push_back(storeInst);
      }
      else if (LoadInst *loadInst = dyn_cast<LoadInst>(inst)) {
        if (!loadInst->getType()->isIntegerTy(64)) {
          continue;
        }
        assert(cast<PointerType>(loadInst->getPointerOperand()->getType())
            ->getElementType()->isIntegerTy(64));
        loadInstV.push_back(loadInst);
      }
    }
  }

  for (unsigned i = 0; i < storeInstV.size(); ++i) {
    StoreInst *inst = storeInstV.at(i);
    Value *storeVal = inst->getValueOperand();
    Value *storePointer = inst->getPointerOperand();

    // Insert new instructions
    BitCastInst *storeAddrLo = new BitCastInst(storePointer, Type::getInt32PtrTy(F.getContext()), "", inst);
    ConstantInt *offsetConst = ConstantInt::getSigned(Type::getInt32Ty(F.getContext()), 1);
    std::vector<Value *> gepIdxList(1, offsetConst);
    GetElementPtrInst *storeAddrHi = GetElementPtrInst::Create(storeAddrLo, gepIdxList, "", inst);
    TruncInst *storeValLo = new TruncInst(storeVal, Type::getInt32Ty(F.getContext()), "", inst);
    Value *aShrOffset = ConstantInt::getSigned(Type::getInt64Ty(F.getContext()), 32);
    BinaryOperator *storeValAShr = BinaryOperator::Create(Instruction::AShr, storeVal,
        aShrOffset, "", inst);
    TruncInst *storeValHi = new TruncInst(storeValAShr, Type::getInt32Ty(F.getContext()), "", inst);

    StoreInst *storeLo = new StoreInst(storeValLo, storeAddrLo, inst);
    StoreInst *storeHi = new StoreInst(storeValHi, storeAddrHi, inst);
    storeLo->setAlignment(4);
    storeHi->setAlignment(4);
    
    // Remove inst
    inst->eraseFromParent();
  }

  for (unsigned i = 0; i < loadInstV.size(); ++i) {
    LoadInst *inst = loadInstV.at(i);
    Value *loadPointer = inst->getPointerOperand();

    // Insert new instructions
    BitCastInst *loadAddrLo = new BitCastInst(loadPointer, Type::getInt32PtrTy(F.getContext()), "", inst);
    ConstantInt *offsetConst = ConstantInt::getSigned(Type::getInt32Ty(F.getContext()), 1);
    std::vector<Value *> gepIdxList(1, offsetConst);
    GetElementPtrInst *loadAddrHi = GetElementPtrInst::Create(loadAddrLo, gepIdxList, "", inst);

    LoadInst *loadLo = new LoadInst(loadAddrLo, "", inst);
    LoadInst *loadHi = new LoadInst(loadAddrHi, "", inst);

    ZExtInst *loadLoLL = new ZExtInst(loadLo, Type::getInt64Ty(F.getContext()), "", inst);
    ZExtInst *loadHiLL = new ZExtInst(loadHi, Type::getInt64Ty(F.getContext()), "", inst);

    Value *shlOffset = ConstantInt::getSigned(Type::getInt64Ty(F.getContext()), 32);
    BinaryOperator *loadHiLLShl = BinaryOperator::Create(Instruction::Shl, loadHiLL, shlOffset, "", inst);
    BinaryOperator *loadValue = BinaryOperator::Create(Instruction::Or, loadLoLL, loadHiLLShl, "");

    // Replace inst with new "loaded" value, the old value is deleted
    ReplaceInstWithInst(inst, loadValue);
  }

  return true; // function is modified
}
コード例 #21
0
ファイル: Mips16HardFloat.cpp プロジェクト: c-ong/llvm
//
// Returns of float, double and complex need to be handled with a helper
// function.
//
static bool fixupFPReturnAndCall
  (Function &F, Module *M,  const MipsSubtarget &Subtarget) {
  bool Modified = false;
  LLVMContext &C = M->getContext();
  Type *MyVoid = Type::getVoidTy(C);
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
    for (BasicBlock::iterator I = BB->begin(), E = BB->end();
         I != E; ++I) {
      Instruction &Inst = *I;
      if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
        Value *RVal = RI->getReturnValue();
        if (!RVal) continue;
        //
        // If there is a return value and it needs a helper function,
        // figure out which one and add a call before the actual
        // return to this helper. The purpose of the helper is to move
        // floating point values from their soft float return mapping to
        // where they would have been mapped to in floating point registers.
        //
        Type *T = RVal->getType();
        FPReturnVariant RV = whichFPReturnVariant(T);
        if (RV == NoFPRet) continue;
        static const char* Helper[NoFPRet] =
          {"__mips16_ret_sf", "__mips16_ret_df", "__mips16_ret_sc",
           "__mips16_ret_dc"};
        const char *Name = Helper[RV];
        AttributeSet A;
        Value *Params[] = {RVal};
        Modified = true;
        //
        // These helper functions have a different calling ABI so
        // this __Mips16RetHelper indicates that so that later
        // during call setup, the proper call lowering to the helper
        // functions will take place.
        //
        A = A.addAttribute(C, AttributeSet::FunctionIndex,
                           "__Mips16RetHelper");
        A = A.addAttribute(C, AttributeSet::FunctionIndex,
                           Attribute::ReadNone);
        A = A.addAttribute(C, AttributeSet::FunctionIndex,
                           Attribute::NoInline);
        Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL));
        CallInst::Create(F, Params, "", &Inst );
      } else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
          const Value* V = CI->getCalledValue();
          const Type* T = 0;
          if (V) T = V->getType();
          const PointerType *PFT=0;
          if (T) PFT = dyn_cast<PointerType>(T);
          const FunctionType *FT=0;
          if (PFT) FT = dyn_cast<FunctionType>(PFT->getElementType());
          Function *F_ =  CI->getCalledFunction();
          if (FT && needsFPReturnHelper(*FT) &&
              !(F_ && isIntrinsicInline(F_))) {
            Modified=true;
            F.addFnAttr("saveS2");
          }
          if (F_ && !isIntrinsicInline(F_)) {
          // pic mode calls are handled by already defined
          // helper functions
            if (needsFPReturnHelper(*F_)) {
              Modified=true;
              F.addFnAttr("saveS2");
            }
            if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
              if (needsFPHelperFromSig(*F_)) {
                assureFPCallStub(*F_, M, Subtarget);
                Modified=true;
              }
            }
          }
      }
    }
  return Modified;
}
コード例 #22
0
int compile(list<string> args, list<string> kgen_args,
            string merge, list<string> merge_args,
            string input, string output, int arch,
            string host_compiler, string fileprefix)
{
    //
    // The LLVM compiler to emit IR.
    //
    const char* llvm_compiler = "kernelgen-gfortran";

    //
    // Interpret kernelgen compile options.
    //
    for (list<string>::iterator iarg = kgen_args.begin(),
            iearg = kgen_args.end(); iarg != iearg; iarg++)
    {
        const char* arg = (*iarg).c_str();
        if (!strncmp(arg, "-Wk,--llvm-compiler=", 20))
            llvm_compiler = arg + 20;
    }

    //
    // Generate temporary output file.
    // Check if output file is specified in the command line.
    // Replace or add output to the temporary file.
    //
    cfiledesc tmp_output = cfiledesc::mktemp(fileprefix);
    bool output_specified = false;
    for (list<string>::iterator iarg = args.begin(),
            iearg = args.end(); iarg != iearg; iarg++)
    {
        const char* arg = (*iarg).c_str();
        if (!strcmp(arg, "-o"))
        {
            iarg++;
            *iarg = tmp_output.getFilename();
            output_specified = true;
            break;
        }
    }
    if (!output_specified)
    {
        args.push_back("-o");
        args.push_back(tmp_output.getFilename());
    }

    //
    // 1) Compile source code using regular host compiler.
    //
    {
        if (verbose)
        {
            cout << host_compiler;
            for (list<string>::iterator iarg = args.begin(),
                    iearg = args.end(); iarg != iearg; iarg++)
                cout << " " << *iarg;
            cout << endl;
        }
        int status = execute(host_compiler, args, "", NULL, NULL);
        if (status) return status;
    }

    //
    // 2) Emit LLVM IR.
    //
    string out = "";
    {
        list<string> emit_ir_args;
        for (list<string>::iterator iarg = args.begin(),
                iearg = args.end(); iarg != iearg; iarg++)
        {
            const char* arg = (*iarg).c_str();
            if (!strcmp(arg, "-c") || !strcmp(arg, "-o"))
            {
                iarg++;
                continue;
            }
            if (!strcmp(arg, "-g"))
            {
                continue;
            }
            emit_ir_args.push_back(*iarg);
        }
        emit_ir_args.push_back("-fplugin=/opt/kernelgen/lib/dragonegg.so");
        emit_ir_args.push_back("-fplugin-arg-dragonegg-emit-ir");
        emit_ir_args.push_back("-S");
        emit_ir_args.push_back(input);
        emit_ir_args.push_back("-o");
        emit_ir_args.push_back("-");
        if (verbose)
        {
            cout << llvm_compiler;
            for (list<string>::iterator iarg = emit_ir_args.begin(),
                    iearg = emit_ir_args.end(); iarg != iearg; iarg++)
                cout << " " << *iarg;
            cout << endl;
        }
        int status = execute(llvm_compiler, emit_ir_args, "", &out, NULL);
        if (status) return status;
    }

    //
    // 3) Record existing module functions.
    //
    LLVMContext &context = getGlobalContext();
    SMDiagnostic diag;
    MemoryBuffer* buffer1 = MemoryBuffer::getMemBuffer(out);
    auto_ptr<Module> m1;
    m1.reset(ParseIR(buffer1, diag, context));

    //m1.get()->dump();

    //
    // 4) Inline calls and extract loops into new functions.
    //
    MemoryBuffer* buffer2 = MemoryBuffer::getMemBuffer(out);
    auto_ptr<Module> m2;
    m2.reset(ParseIR(buffer2, diag, context));
    {
        PassManager manager;
        manager.add(createInstructionCombiningPass());
        manager.run(*m2.get());
    }
    std::vector<CallInst *> LoopFuctionCalls;
    {
        PassManager manager;
        manager.add(createBranchedLoopExtractorPass(LoopFuctionCalls));
        manager.run(*m2.get());
    }

    //m2.get()->dump();

    //
    // 5) Replace call to loop functions with call to launcher.
    // Append "always inline" attribute to all other functions.
    //
    Type* int32Ty = Type::getInt32Ty(context);
    Function* launch = Function::Create(
                           TypeBuilder<types::i<32>(types::i<8>*, types::i<64>, types::i<32>*), true>::get(context),
                           GlobalValue::ExternalLinkage, "kernelgen_launch", m2.get());
    for (Module::iterator f1 = m2.get()->begin(), fe1 = m2.get()->end(); f1 != fe1; f1++)
    {
        Function* func = f1;
        if (func->isDeclaration()) continue;

        // Search for the current function in original module
        // functions list.
        // If function is not in list of original module, then
        // it is generated by the loop extractor.
        // Append "always inline" attribute to all other functions.
        if (m1.get()->getFunction(func->getName()))
        {
            const AttrListPtr attr = func->getAttributes();
            const AttrListPtr attr_new = attr.addAttr(~0U, Attribute::AlwaysInline);
            func->setAttributes(attr_new);
            continue;
        }

        // Each such function must be extracted to the
        // standalone module and packed into resulting
        // object file data section.
        if (verbose)
            cout << "Preparing loop function " << func->getName().data() <<
                 " ..." << endl;

        // Reset to default visibility.
        func->setVisibility(GlobalValue::DefaultVisibility);

        // Reset to default linkage.
        func->setLinkage(GlobalValue::ExternalLinkage);

        // Replace call to this function in module with call to launcher.
        bool found = false;
        for (Module::iterator f2 = m2->begin(), fe2 = m2->end(); (f2 != fe2) && !found; f2++)
            for (Function::iterator bb = f2->begin(); (bb != f2->end()) && !found; bb++)
                for (BasicBlock::iterator i = bb->begin(); i != bb->end(); i++)
                {
                    // Check if instruction in focus is a call.
                    CallInst* call = dyn_cast<CallInst>(cast<Value>(i));
                    if (!call) continue;

                    // Check if function is called (needs -instcombine pass).
                    Function* callee = call->getCalledFunction();
                    if (!callee) continue;
                    if (callee->isDeclaration()) continue;
                    if (callee->getName() != func->getName()) continue;

                    // Create a constant array holding original called
                    // function name.
                    Constant* name = ConstantArray::get(
                                         context, callee->getName(), true);

                    // Create and initialize the memory buffer for name.
                    ArrayType* nameTy = cast<ArrayType>(name->getType());
                    AllocaInst* nameAlloc = new AllocaInst(nameTy, "", call);
                    StoreInst* nameInit = new StoreInst(name, nameAlloc, "", call);
                    Value* Idx[2];
                    Idx[0] = Constant::getNullValue(Type::getInt32Ty(context));
                    Idx[1] = ConstantInt::get(Type::getInt32Ty(context), 0);
                    GetElementPtrInst* namePtr = GetElementPtrInst::Create(nameAlloc, Idx, "", call);

                    // Add pointer to the original function string name.
                    SmallVector<Value*, 16> call_args;
                    call_args.push_back(namePtr);

                    // Add size of the aggregated arguments structure.
                    {
                        BitCastInst* BC = new BitCastInst(
                            call->getArgOperand(0), Type::getInt64PtrTy(context),
                            "", call);

                        LoadInst* LI = new LoadInst(BC, "", call);
                        call_args.push_back(LI);
                    }

                    // Add original aggregated structure argument.
                    call_args.push_back(call->getArgOperand(0));

                    // Create new function call with new call arguments
                    // and copy old call properties.
                    CallInst* newcall = CallInst::Create(launch, call_args, "", call);
                    //newcall->takeName(call);
                    newcall->setCallingConv(call->getCallingConv());
                    newcall->setAttributes(call->getAttributes());
                    newcall->setDebugLoc(call->getDebugLoc());

                    // Replace old call with new one.
                    call->replaceAllUsesWith(newcall);
                    call->eraseFromParent();

                    found = true;
                    break;
                }
    }

    //m2.get()->dump();

    //
    // 6) Apply optimization passes to the resulting common
    // module.
    //
    {
        PassManager manager;
        manager.add(createLowerSetJmpPass());
        PassManagerBuilder builder;
        builder.Inliner = createFunctionInliningPass();
        builder.OptLevel = 3;
        builder.DisableSimplifyLibCalls = true;
        builder.populateModulePassManager(manager);
        manager.run(*m2.get());
    }

    //m2.get()->dump();

    //
    // 7) Embed the resulting module into object file.
    //
    {
        string ir_string;
        raw_string_ostream ir(ir_string);
        ir << (*m2.get());
        celf e(tmp_output.getFilename(), output);
        e.getSection(".data")->addSymbol(
            "__kernelgen_" + string(input),
            ir_string.c_str(), ir_string.size() + 1);
    }

    return 0;
}
コード例 #23
0
bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
  SmallVector<LoadInst *, 4> aggrLoads;
  SmallVector<MemTransferInst *, 4> aggrMemcpys;
  SmallVector<MemSetInst *, 4> aggrMemsets;

  const DataLayout &DL = F.getParent()->getDataLayout();
  LLVMContext &Context = F.getParent()->getContext();

  //
  // Collect all the aggrLoads, aggrMemcpys and addrMemsets.
  //
  //const BasicBlock *firstBB = &F.front();  // first BB in F
  for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
    //BasicBlock *bb = BI;
    for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
         ++II) {
      if (LoadInst *load = dyn_cast<LoadInst>(II)) {

        if (!load->hasOneUse())
          continue;

        if (DL.getTypeStoreSize(load->getType()) < MaxAggrCopySize)
          continue;

        User *use = load->user_back();
        if (StoreInst *store = dyn_cast<StoreInst>(use)) {
          if (store->getOperand(0) != load) //getValueOperand
            continue;
          aggrLoads.push_back(load);
        }
      } else if (MemTransferInst *intr = dyn_cast<MemTransferInst>(II)) {
        Value *len = intr->getLength();
        // If the number of elements being copied is greater
        // than MaxAggrCopySize, lower it to a loop
        if (ConstantInt *len_int = dyn_cast<ConstantInt>(len)) {
          if (len_int->getZExtValue() >= MaxAggrCopySize) {
            aggrMemcpys.push_back(intr);
          }
        } else {
          // turn variable length memcpy/memmov into loop
          aggrMemcpys.push_back(intr);
        }
      } else if (MemSetInst *memsetintr = dyn_cast<MemSetInst>(II)) {
        Value *len = memsetintr->getLength();
        if (ConstantInt *len_int = dyn_cast<ConstantInt>(len)) {
          if (len_int->getZExtValue() >= MaxAggrCopySize) {
            aggrMemsets.push_back(memsetintr);
          }
        } else {
          // turn variable length memset into loop
          aggrMemsets.push_back(memsetintr);
        }
      }
    }
  }
  if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0) &&
      (aggrMemsets.size() == 0))
    return false;

  //
  // Do the transformation of an aggr load/copy/set to a loop
  //
  for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
    LoadInst *load = aggrLoads[i];
    StoreInst *store = dyn_cast<StoreInst>(*load->user_begin());
    Value *srcAddr = load->getOperand(0);
    Value *dstAddr = store->getOperand(1);
    unsigned numLoads = DL.getTypeStoreSize(load->getType());
    Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);

    convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
                          store->isVolatile(), Context, F);

    store->eraseFromParent();
    load->eraseFromParent();
  }

  for (unsigned i = 0, e = aggrMemcpys.size(); i != e; ++i) {
    MemTransferInst *cpy = aggrMemcpys[i];
    Value *len = cpy->getLength();
    // llvm 2.7 version of memcpy does not have volatile
    // operand yet. So always making it non-volatile
    // optimistically, so that we don't see unnecessary
    // st.volatile in ptx
    convertTransferToLoop(cpy, cpy->getSource(), cpy->getDest(), len, false,
                          false, Context, F);
    cpy->eraseFromParent();
  }

  for (unsigned i = 0, e = aggrMemsets.size(); i != e; ++i) {
    MemSetInst *memsetinst = aggrMemsets[i];
    Value *len = memsetinst->getLength();
    Value *val = memsetinst->getValue();
    convertMemSetToLoop(memsetinst, memsetinst->getDest(), len, val, Context,
                        F);
    memsetinst->eraseFromParent();
  }

  return true;
}
コード例 #24
0
//
// Method: runOnFunction()
//
// Description:
//  This is the entry point for this LLVM function pass.  The pass manager will
//  call this method for every function in the Module that will be transformed.
//
// Inputs:
//  F - A reference to the function to transform.
//
// Outputs:
//  F - The function will be modified to register and unregister stack objects.
//
// Return value:
//  true  - The function was modified.
//  false - The function was not modified.
//
bool
RegisterStackObjPass::runOnFunction(Function & F) {
  //
  // Get prerequisite analysis information.
  //
  TD = &F.getParent()->getDataLayout();
  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  DF = &getAnalysis<DominanceFrontier>();

  //
  // Get pointers to the functions for registering and unregistering pointers.
  //
  PoolRegister = F.getParent()->getFunction ("pool_register_stack");
  StackFree    = F.getParent()->getFunction ("pool_unregister_stack");
  assert (PoolRegister);
  assert (StackFree);

  // The set of registered stack objects
  std::vector<CallInst *> PoolRegisters;

  // The set of stack objects within the function.
  std::vector<AllocaInst *> AllocaList;

  // The set of instructions that can cause the function to return to its
  // caller.
  std::vector<Instruction *> ExitPoints;

  //
  // Scan the function to register allocas and find locations where registered
  // allocas to be deregistered.
  //
  for (Function::iterator BI = F.begin(); BI != F.end(); ++BI) {
    //
    // Create a list of alloca instructions to register.  Note that we create
    // the list ahead of time because registerAllocaInst() will create new
    // alloca instructions.
    //
    for (BasicBlock::iterator I = BI->begin(); I != BI->end(); ++I) {
      if (AllocaInst * AI = dyn_cast<AllocaInst>(I)) {
        //
        // Ensure that the alloca is not within a loop; we don't support yet.
        //
#if 0
        if (LI->getLoopFor (BI)) {
          assert (0 &&
                  "Register Stack Objects: No support for alloca in loop!\n");
          abort();
        }
        AllocaList.push_back (AI);
#else
        if (!(LI->getLoopFor (BI))) {
          AllocaList.push_back (AI);
        }
#endif
      }
    }

    //
    // Add calls to register the allocated stack objects.
    //
    while (AllocaList.size()) {
      AllocaInst * AI = AllocaList.back();
      AllocaList.pop_back();
      if (CallInst * CI = registerAllocaInst (AI))
        PoolRegisters.push_back(CI);
    }

    //
    // If the terminator instruction of this basic block can return control
    // flow back to the caller, mark it as a place where a deregistration
    // is needed.
    //
    Instruction * Terminator = BI->getTerminator();
    if ((isa<ReturnInst>(Terminator)) || (isa<ResumeInst>(Terminator))) {
      ExitPoints.push_back (Terminator);
    }
  }

  //
  // Insert poolunregister calls for all of the registered allocas.
  //
  insertPoolFrees (PoolRegisters, ExitPoints, &F.getContext());

  //
  // Conservatively assume that we've changed the function.
  //
  return true;
}
コード例 #25
0
ファイル: StatsTracker.cpp プロジェクト: Icefroge/cloud9
void StatsTracker::writeIStats() {
  Module *m = executor.kmodule->module;
  uint64_t istatsMask = 0;
  std::ostream &of = *istatsFile;
  
  of.seekp(0, std::ios::end);
  unsigned istatsSize = of.tellp();
  of.seekp(0);

  of << "version: 1\n";
  of << "creator: klee\n";
  of << "pid: " << sys::Process::GetCurrentUserId() << "\n";
  of << "cmd: " << m->getModuleIdentifier() << "\n\n";
  of << "\n";
  
  StatisticManager &sm = *theStatisticManager;
  unsigned nStats = sm.getNumStatistics();

  // Max is 13, sadly
  istatsMask |= 1<<sm.getStatisticID("Queries");
  istatsMask |= 1<<sm.getStatisticID("QueriesValid");
  istatsMask |= 1<<sm.getStatisticID("QueriesInvalid");
  istatsMask |= 1<<sm.getStatisticID("QueryTime");
  istatsMask |= 1<<sm.getStatisticID("ResolveTime");
  istatsMask |= 1<<sm.getStatisticID("Instructions");
  istatsMask |= 1<<sm.getStatisticID("InstructionTimes");
  istatsMask |= 1<<sm.getStatisticID("InstructionRealTimes");
  istatsMask |= 1<<sm.getStatisticID("Forks");
  istatsMask |= 1<<sm.getStatisticID("CoveredInstructions");
  istatsMask |= 1<<sm.getStatisticID("UncoveredInstructions");
  istatsMask |= 1<<sm.getStatisticID("States");
  istatsMask |= 1<<sm.getStatisticID("MinDistToUncovered");

  of << "positions: instr line\n";

  for (unsigned i=0; i<nStats; i++) {
    if (istatsMask & (1<<i)) {
      Statistic &s = sm.getStatistic(i);
      of << "event: " << s.getShortName() << " : " 
         << s.getName() << "\n";
    }
  }

  of << "events: ";
  for (unsigned i=0; i<nStats; i++) {
    if (istatsMask & (1<<i))
      of << sm.getStatistic(i).getShortName() << " ";
  }
  of << "\n";
  
  // set state counts, decremented after we process so that we don't
  // have to zero all records each time.
  if (istatsMask & (1<<stats::states.getID()))
    updateStateStatistics(1);

  std::string sourceFile = "";

  CallSiteSummaryTable callSiteStats;
  if (UseCallPaths)
    callPathManager.getSummaryStatistics(callSiteStats);

  of << "ob=" << objectFilename << "\n";

  for (Module::iterator fnIt = m->begin(), fn_ie = m->end(); 
       fnIt != fn_ie; ++fnIt) {
    if (!fnIt->isDeclaration()) {
      of << "fn=" << CXXDemangle(fnIt->getName().str()) << "\n";
      for (Function::iterator bbIt = fnIt->begin(), bb_ie = fnIt->end(); 
           bbIt != bb_ie; ++bbIt) {
        for (BasicBlock::iterator it = bbIt->begin(), ie = bbIt->end(); 
             it != ie; ++it) {
          Instruction *instr = &*it;
          const InstructionInfo &ii = executor.kmodule->infos->getInfo(instr);
          unsigned index = ii.id;
          if (ii.file!=sourceFile) {
            of << "fl=" << ii.file << "\n";
            sourceFile = ii.file;
          }
          of << ii.assemblyLine << " ";
          of << ii.line << " ";
          for (unsigned i=0; i<nStats; i++)
            if (istatsMask&(1<<i))
              of << sm.getIndexedValue(sm.getStatistic(i), index) << " ";
          of << "\n";

          if (UseCallPaths && 
              (isa<CallInst>(instr) || isa<InvokeInst>(instr))) {
            CallSiteSummaryTable::iterator it = callSiteStats.find(instr);
            if (it!=callSiteStats.end()) {
              for (std::map<llvm::Function*, CallSiteInfo>::iterator
                     fit = it->second.begin(), fie = it->second.end(); 
                   fit != fie; ++fit) {
                Function *f = fit->first;
                CallSiteInfo &csi = fit->second;
                const InstructionInfo &fii = 
                  executor.kmodule->infos->getFunctionInfo(f);
  
                if (fii.file!="" && fii.file!=sourceFile)
                  of << "cfl=" << fii.file << "\n";
                of << "cfn=" << CXXDemangle(f->getName().str()) << "\n";
                of << "calls=" << csi.count << " ";
                of << fii.assemblyLine << " ";
                of << fii.line << "\n";

                of << ii.assemblyLine << " ";
                of << ii.line << " ";
                for (unsigned i=0; i<nStats; i++) {
                  if (istatsMask&(1<<i)) {
                    Statistic &s = sm.getStatistic(i);
                    uint64_t value;

                    // Hack, ignore things that don't make sense on
                    // call paths.
                    if (&s == &stats::locallyUncoveredInstructions) {
                      value = 0;
                    } else {
                      value = csi.statistics.getValue(s);
                    }

                    of << value << " ";
                  }
                }
                of << "\n";
              }
            }
          }
        }
      }
    }
  }

  if (istatsMask & (1<<stats::states.getID()))
    updateStateStatistics((uint64_t)-1);
  
  // Clear then end of the file if necessary (no truncate op?).
  unsigned pos = of.tellp();
  for (unsigned i=pos; i<istatsSize; ++i)
    of << '\n';
  
  of.flush();
}
コード例 #26
0
//
// Method: runOnFunction()
//
// Description:
//  Entry point for this LLVM pass.
//
// Return value:
//  true  - The function was modified.
//  false - The function was not modified.
//
bool
BreakConstantGEPs::runOnFunction (Function & F) {

    if (!pocl::Workgroup::isKernelToProcess(F)) return false;

    bool modified = false;

    // Worklist of values to check for constant GEP expressions
    std::vector<Instruction *> Worklist;

    //
    // Initialize the worklist by finding all instructions that have one or more
    // operands containing a constant GEP expression.
    //
    for (Function::iterator BB = F.begin(); BB != F.end(); ++BB) {
        for (BasicBlock::iterator i = BB->begin(); i != BB->end(); ++i) {
            //
            // Scan through the operands of this instruction.  If it is a constant
            // expression GEP, insert an instruction GEP before the instruction.
            //
            Instruction * I = i;
            for (unsigned index = 0; index < I->getNumOperands(); ++index) {
                if (hasConstantGEP (I->getOperand(index))) {
                    Worklist.push_back (I);
                }
            }
        }
    }

    //
    // Determine whether we will modify anything.
    //
    if (Worklist.size()) modified = true;

    //
    // While the worklist is not empty, take an item from it, convert the
    // operands into instructions if necessary, and determine if the newly
    // added instructions need to be processed as well.
    //
    while (Worklist.size()) {
        Instruction * I = Worklist.back();
        Worklist.pop_back();

        //
        // Scan through the operands of this instruction and convert each into an
        // instruction.  Note that this works a little differently for phi
        // instructions because the new instruction must be added to the
        // appropriate predecessor block.
        //
        if (PHINode * PHI = dyn_cast<PHINode>(I)) {
            for (unsigned index = 0; index < PHI->getNumIncomingValues(); ++index) {
                //
                // For PHI Nodes, if an operand is a constant expression with a GEP, we
                // want to insert the new instructions in the predecessor basic block.
                //
                // Note: It seems that it's possible for a phi to have the same
                // incoming basic block listed multiple times; this seems okay as long
                // the same value is listed for the incoming block.
                //
                Instruction * InsertPt = PHI->getIncomingBlock(index)->getTerminator();
                if (ConstantExpr * CE = hasConstantGEP (PHI->getIncomingValue(index))) {
                    Instruction * NewInst = convertExpression (CE, InsertPt);
                    for (unsigned i2 = index; i2 < PHI->getNumIncomingValues(); ++i2) {
                        if ((PHI->getIncomingBlock (i2)) == PHI->getIncomingBlock (index))
                            PHI->setIncomingValue (i2, NewInst);
                    }
                    Worklist.push_back (NewInst);
                }
            }
        } else {
            for (unsigned index = 0; index < I->getNumOperands(); ++index) {
                //
                // For other instructions, we want to insert instructions replacing
                // constant expressions immediently before the instruction using the
                // constant expression.
                //
                if (ConstantExpr * CE = hasConstantGEP (I->getOperand(index))) {
                    Instruction * NewInst = convertExpression (CE, I);
                    I->replaceUsesOfWith (CE, NewInst);
                    Worklist.push_back (NewInst);
                }
            }
        }
    }

    return modified;
}
コード例 #27
0
ファイル: SimplifyGEP.cpp プロジェクト: cschreiner/smack
//
// Method: runOnModule()
//
// Description:
//  Entry point for this LLVM pass.
//  Find all GEPs, and simplify them.
//
// Inputs:
//  M - A reference to the LLVM module to transform
//
// Outputs:
//  M - The transformed LLVM module.
//
// Return value:
//  true  - The module was modified.
//  false - The module was not modified.
//
bool SimplifyGEP::runOnModule(Module& M) {
  TD = &getAnalysis<DataLayout>();
  preprocess(M);
  for (Module::iterator F = M.begin(); F != M.end(); ++F){
    for (Function::iterator B = F->begin(), FE = F->end(); B != FE; ++B) {      
      for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE; I++) {
        if(!(isa<GetElementPtrInst>(I)))
          continue;
        GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
        Value *PtrOp = GEP->getOperand(0);
        Value *StrippedPtr = PtrOp->stripPointerCasts();
        // Check if the GEP base pointer is enclosed in a cast
        if (StrippedPtr != PtrOp) {
          const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
          bool HasZeroPointerIndex = false;
          if (ConstantInt *C = dyn_cast<ConstantInt>(GEP->getOperand(1)))
            HasZeroPointerIndex = C->isZero();
          // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
          // into     : GEP [10 x i8]* X, i32 0, ...
          //
          // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
          //           into     : GEP i8* X, ...
          // 
          // This occurs when the program declares an array extern like "int X[];"
          if (HasZeroPointerIndex) {
            const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
            if (const ArrayType *CATy =
                dyn_cast<ArrayType>(CPTy->getElementType())) {
              // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
              if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
                // -> GEP i8* X, ...
                SmallVector<Value*, 8> Idx(GEP->idx_begin()+1, GEP->idx_end());
                GetElementPtrInst *Res =
                  GetElementPtrInst::Create(StrippedPtr, Idx, GEP->getName(), GEP);
                Res->setIsInBounds(GEP->isInBounds());
                GEP->replaceAllUsesWith(Res);
                continue;
              }

              if (const ArrayType *XATy =
                  dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
                // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
                if (CATy->getElementType() == XATy->getElementType()) {
                  // -> GEP [10 x i8]* X, i32 0, ...
                  // At this point, we know that the cast source type is a pointer
                  // to an array of the same type as the destination pointer
                  // array.  Because the array type is never stepped over (there
                  // is a leading zero) we can fold the cast into this GEP.
                  GEP->setOperand(0, StrippedPtr);
                  continue;
                }
              }
            }   
          } else if (GEP->getNumOperands() == 2) {
            // Transform things like:
            // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
            // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
            Type *SrcElTy = StrippedPtrTy->getElementType();
            Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
            if (TD && SrcElTy->isArrayTy() &&
                TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
                TD->getTypeAllocSize(ResElTy)) {
              Value *Idx[2];
              Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP->getContext()));
              Idx[1] = GEP->getOperand(1);
              Value *NewGEP = GetElementPtrInst::Create(StrippedPtr, Idx,
                                                        GEP->getName(), GEP);
              // V and GEP are both pointer types --> BitCast
              GEP->replaceAllUsesWith(new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP));
              continue;
            }

            // Transform things like:
            // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
            //   (where tmp = 8*tmp2) into:
            // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast

            if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
              uint64_t ArrayEltSize =
                TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());

              // Check to see if "tmp" is a scale by a multiple of ArrayEltSize.  We
              // allow either a mul, shift, or constant here.
              Value *NewIdx = 0;
              ConstantInt *Scale = 0;
              if (ArrayEltSize == 1) {
                NewIdx = GEP->getOperand(1);
                Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
              } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
                NewIdx = ConstantInt::get(CI->getType(), 1);
                Scale = CI;
              } else if (Instruction *Inst =dyn_cast<Instruction>(GEP->getOperand(1))){
                if (Inst->getOpcode() == Instruction::Shl &&
                    isa<ConstantInt>(Inst->getOperand(1))) {
                  ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
                  uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
                  Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
                                           1ULL << ShAmtVal);
                  NewIdx = Inst->getOperand(0);
                } else if (Inst->getOpcode() == Instruction::Mul &&
                           isa<ConstantInt>(Inst->getOperand(1))) {
                  Scale = cast<ConstantInt>(Inst->getOperand(1));
                  NewIdx = Inst->getOperand(0);
                }
              }

              // If the index will be to exactly the right offset with the scale taken
              // out, perform the transformation. Note, we don't know whether Scale is
              // signed or not. We'll use unsigned version of division/modulo
              // operation after making sure Scale doesn't have the sign bit set.
              if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
                  Scale->getZExtValue() % ArrayEltSize == 0) {
                Scale = ConstantInt::get(Scale->getType(),
                                         Scale->getZExtValue() / ArrayEltSize);
                if (Scale->getZExtValue() != 1) {
                  Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
                                                             false /*ZExt*/);
                  NewIdx = BinaryOperator::Create(BinaryOperator::Mul, NewIdx, C, "idxscale");
                }

                // Insert the new GEP instruction.
                Value *Idx[2];
                Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP->getContext()));
                Idx[1] = NewIdx;
                Value *NewGEP = GetElementPtrInst::Create(StrippedPtr, Idx,
                                                          GEP->getName(), GEP);
                GEP->replaceAllUsesWith(new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP));
                continue;
              }
            }
          }
        }
      }
    }
  }

  return true;
}
コード例 #28
0
bool AddressSanitizer::handleFunction(Module &M, Function &F) {
  if (BL->isIn(F)) return false;
  if (&F == AsanCtorFunction) return false;

  // If needed, insert __asan_init before checking for AddressSafety attr.
  maybeInsertAsanInitAtFunctionEntry(F);

  if (!F.hasFnAttr(Attribute::AddressSafety)) return false;

  if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
    return false;
  // We want to instrument every address only once per basic block
  // (unless there are calls between uses).
  SmallSet<Value*, 16> TempsToInstrument;
  SmallVector<Instruction*, 16> ToInstrument;
  SmallVector<Instruction*, 8> NoReturnCalls;
  bool IsWrite;

  // Fill the set of memory operations to instrument.
  for (Function::iterator FI = F.begin(), FE = F.end();
       FI != FE; ++FI) {
    TempsToInstrument.clear();
    int NumInsnsPerBB = 0;
    for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
         BI != BE; ++BI) {
      if (LooksLikeCodeInBug11395(BI)) return false;
      if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) {
        if (ClOpt && ClOptSameTemp) {
          if (!TempsToInstrument.insert(Addr))
            continue;  // We've seen this temp in the current BB.
        }
      } else if (isa<MemIntrinsic>(BI) && ClMemIntrin) {
        // ok, take it.
      } else {
        if (CallInst *CI = dyn_cast<CallInst>(BI)) {
          // A call inside BB.
          TempsToInstrument.clear();
          if (CI->doesNotReturn()) {
            NoReturnCalls.push_back(CI);
          }
        }
        continue;
      }
      ToInstrument.push_back(BI);
      NumInsnsPerBB++;
      if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
        break;
    }
  }

  AsanFunctionContext AFC(F);

  // Instrument.
  int NumInstrumented = 0;
  for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
    Instruction *Inst = ToInstrument[i];
    if (ClDebugMin < 0 || ClDebugMax < 0 ||
        (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
      if (isInterestingMemoryAccess(Inst, &IsWrite))
        instrumentMop(AFC, Inst);
      else
        instrumentMemIntrinsic(AFC, cast<MemIntrinsic>(Inst));
    }
    NumInstrumented++;
  }

  DEBUG(dbgs() << F);

  bool ChangedStack = poisonStackInFunction(M, F);

  // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
  // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
  for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
    Instruction *CI = NoReturnCalls[i];
    IRBuilder<> IRB(CI);
    IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName,
                                         IRB.getVoidTy(), NULL));
  }

  return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
}
コード例 #29
0
ファイル: SjLjEHPrepare.cpp プロジェクト: PhongNgo/llvm
/// setupEntryBlockAndCallSites - Setup the entry block by creating and filling
/// the function context and marking the call sites with the appropriate
/// values. These values are used by the DWARF EH emitter.
bool SjLjEHPass::setupEntryBlockAndCallSites(Function &F) {
  SmallVector<ReturnInst*,     16> Returns;
  SmallVector<InvokeInst*,     16> Invokes;
  SmallSetVector<LandingPadInst*, 16> LPads;

  // Look through the terminators of the basic blocks to find invokes.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
    if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Invokes.push_back(II);
      LPads.insert(II->getUnwindDest()->getLandingPadInst());
    } else if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
      Returns.push_back(RI);
    }

  if (Invokes.empty()) return false;

  NumInvokes += Invokes.size();

  lowerIncomingArguments(F);
  lowerAcrossUnwindEdges(F, Invokes);

  Value *FuncCtx =
    setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
  BasicBlock *EntryBB = F.begin();
  Type *Int32Ty = Type::getInt32Ty(F.getContext());

  Value *Idxs[2] = {
    ConstantInt::get(Int32Ty, 0), 0
  };

  // Get a reference to the jump buffer.
  Idxs[1] = ConstantInt::get(Int32Ty, 5);
  Value *JBufPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "jbuf_gep",
                                             EntryBB->getTerminator());

  // Save the frame pointer.
  Idxs[1] = ConstantInt::get(Int32Ty, 0);
  Value *FramePtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep",
                                              EntryBB->getTerminator());

  Value *Val = CallInst::Create(FrameAddrFn,
                                ConstantInt::get(Int32Ty, 0),
                                "fp",
                                EntryBB->getTerminator());
  new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());

  // Save the stack pointer.
  Idxs[1] = ConstantInt::get(Int32Ty, 2);
  Value *StackPtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep",
                                              EntryBB->getTerminator());

  Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
  new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());

  // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
  Value *SetjmpArg = CastInst::Create(Instruction::BitCast, JBufPtr,
                                      Type::getInt8PtrTy(F.getContext()), "",
                                      EntryBB->getTerminator());
  CallInst::Create(BuiltinSetjmpFn, SetjmpArg, "", EntryBB->getTerminator());

  // Store a pointer to the function context so that the back-end will know
  // where to look for it.
  Value *FuncCtxArg = CastInst::Create(Instruction::BitCast, FuncCtx,
                                       Type::getInt8PtrTy(F.getContext()), "",
                                       EntryBB->getTerminator());
  CallInst::Create(FuncCtxFn, FuncCtxArg, "", EntryBB->getTerminator());

  // At this point, we are all set up, update the invoke instructions to mark
  // their call_site values.
  for (unsigned I = 0, E = Invokes.size(); I != E; ++I) {
    insertCallSiteStore(Invokes[I], I + 1);

    ConstantInt *CallSiteNum =
      ConstantInt::get(Type::getInt32Ty(F.getContext()), I + 1);

    // Record the call site value for the back end so it stays associated with
    // the invoke.
    CallInst::Create(CallSiteFn, CallSiteNum, "", Invokes[I]);
  }

  // Mark call instructions that aren't nounwind as no-action (call_site ==
  // -1). Skip the entry block, as prior to then, no function context has been
  // created for this function and any unexpected exceptions thrown will go
  // directly to the caller's context, which is what we want anyway, so no need
  // to do anything here.
  for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;)
    for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        if (!CI->doesNotThrow())
          insertCallSiteStore(CI, -1);
      } else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
        insertCallSiteStore(RI, -1);
      }

  // Register the function context and make sure it's known to not throw
  CallInst *Register = CallInst::Create(RegisterFn, FuncCtx, "",
                                        EntryBB->getTerminator());
  Register->setDoesNotThrow();

  // Following any allocas not in the entry block, update the saved SP in the
  // jmpbuf to the new value.
  for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
    if (BB == F.begin())
      continue;
    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
      if (CallInst *CI = dyn_cast<CallInst>(I)) {
        if (CI->getCalledFunction() != StackRestoreFn)
          continue;
      } else if (!isa<AllocaInst>(I)) {
        continue;
      }
      Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
      StackAddr->insertAfter(I);
      Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
      StoreStackAddr->insertAfter(StackAddr);
    }
  }

  // Finally, for any returns from this function, if this function contains an
  // invoke, add a call to unregister the function context.
  for (unsigned I = 0, E = Returns.size(); I != E; ++I)
    CallInst::Create(UnregisterFn, FuncCtx, "", Returns[I]);

  return true;
}
コード例 #30
0
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
  CallGraph &CG = getAnalysis<CallGraph>();
  const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
  const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();

  SmallPtrSet<Function*, 8> SCCFunctions;
  DEBUG(dbgs() << "Inliner visiting SCC:");
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (F) SCCFunctions.insert(F);
    DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
  }

  // Scan through and identify all call sites ahead of time so that we only
  // inline call sites in the original functions, not call sites that result
  // from inlining other functions.
  SmallVector<std::pair<CallSite, int>, 16> CallSites;

  // When inlining a callee produces new call sites, we want to keep track of
  // the fact that they were inlined from the callee.  This allows us to avoid
  // infinite inlining in some obscure cases.  To represent this, we use an
  // index into the InlineHistory vector.
  SmallVector<std::pair<Function*, int>, 8> InlineHistory;

  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) continue;

    for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
        CallSite CS(cast<Value>(I));
        // If this isn't a call, or it is a call to an intrinsic, it can
        // never be inlined.
        if (!CS || isa<IntrinsicInst>(I))
          continue;

        // If this is a direct call to an external function, we can never inline
        // it.  If it is an indirect call, inlining may resolve it to be a
        // direct call, so we keep it.
        if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
          continue;

        CallSites.push_back(std::make_pair(CS, -1));
      }
  }

  DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");

  // If there are no calls in this function, exit early.
  if (CallSites.empty())
    return false;

  // Now that we have all of the call sites, move the ones to functions in the
  // current SCC to the end of the list.
  unsigned FirstCallInSCC = CallSites.size();
  for (unsigned i = 0; i < FirstCallInSCC; ++i)
    if (Function *F = CallSites[i].first.getCalledFunction())
      if (SCCFunctions.count(F))
        std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);


  InlinedArrayAllocasTy InlinedArrayAllocas;
  InlineFunctionInfo InlineInfo(&CG, TD);

  // Now that we have all of the call sites, loop over them and inline them if
  // it looks profitable to do so.
  bool Changed = false;
  bool LocalChange;
  do {
    LocalChange = false;
    // Iterate over the outer loop because inlining functions can cause indirect
    // calls to become direct calls.
    for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
      CallSite CS = CallSites[CSi].first;

      Function *Caller = CS.getCaller();
      Function *Callee = CS.getCalledFunction();

      // If this call site is dead and it is to a readonly function, we should
      // just delete the call instead of trying to inline it, regardless of
      // size.  This happens because IPSCCP propagates the result out of the
      // call and then we're left with the dead call.
      if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
        DEBUG(dbgs() << "    -> Deleting dead call: "
                     << *CS.getInstruction() << "\n");
        // Update the call graph by deleting the edge from Callee to Caller.
        CG[Caller]->removeCallEdgeFor(CS);
        CS.getInstruction()->eraseFromParent();
        ++NumCallsDeleted;
      } else {
        // We can only inline direct calls to non-declarations.
        if (Callee == 0 || Callee->isDeclaration()) continue;

        // If this call site was obtained by inlining another function, verify
        // that the include path for the function did not include the callee
        // itself.  If so, we'd be recursively inlining the same function,
        // which would provide the same callsites, which would cause us to
        // infinitely inline.
        int InlineHistoryID = CallSites[CSi].second;
        if (InlineHistoryID != -1 &&
            InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
          continue;


        // If the policy determines that we should inline this function,
        // try to do so.
        if (!shouldInline(CS))
          continue;

        // Attempt to inline the function.
        if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                  InlineHistoryID, InsertLifetime))
          continue;
        ++NumInlined;

        // If inlining this function gave us any new call sites, throw them
        // onto our worklist to process.  They are useful inline candidates.
        if (!InlineInfo.InlinedCalls.empty()) {
          // Create a new inline history entry for this, so that we remember
          // that these new callsites came about due to inlining Callee.
          int NewHistoryID = InlineHistory.size();
          InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));

          for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
               i != e; ++i) {
            Value *Ptr = InlineInfo.InlinedCalls[i];
            CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
          }
        }
      }

      // If we inlined or deleted the last possible call site to the function,
      // delete the function body now.
      if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
          // TODO: Can remove if in SCC now.
          !SCCFunctions.count(Callee) &&

          // The function may be apparently dead, but if there are indirect
          // callgraph references to the node, we cannot delete it yet, this
          // could invalidate the CGSCC iterator.
          CG[Callee]->getNumReferences() == 0) {
        DEBUG(dbgs() << "    -> Deleting dead function: "
              << Callee->getName() << "\n");
        CallGraphNode *CalleeNode = CG[Callee];

        // Remove any call graph edges from the callee to its callees.
        CalleeNode->removeAllCalledFunctions();

        // Removing the node for callee from the call graph and delete it.
        delete CG.removeFunctionFromModule(CalleeNode);
        ++NumDeleted;
      }

      // Remove this call site from the list.  If possible, use
      // swap/pop_back for efficiency, but do not use it if doing so would
      // move a call site to a function in this SCC before the
      // 'FirstCallInSCC' barrier.
      if (SCC.isSingular()) {
        CallSites[CSi] = CallSites.back();
        CallSites.pop_back();
      } else {
        CallSites.erase(CallSites.begin()+CSi);
      }
      --CSi;

      Changed = true;
      LocalChange = true;
    }
  } while (LocalChange);

  return Changed;
}