Exemplo n.º 1
0
/// Handle a rare case where the disintegrated nodes instructions
/// no longer dominate all their uses. Not sure if this is really nessasary
void StructurizeCFG::rebuildSSA() {
  SSAUpdater Updater;
  for (const auto &BB : ParentRegion->blocks())
    for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
         II != IE; ++II) {

      bool Initialized = false;
      for (auto I = II->use_begin(), E = II->use_end(); I != E;) {
        Use &U = *I++;
        Instruction *User = cast<Instruction>(U.getUser());
        if (User->getParent() == BB) {
          continue;

        } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
          if (UserPN->getIncomingBlock(U) == BB)
            continue;
        }

        if (DT->dominates(II, User))
          continue;

        if (!Initialized) {
          Value *Undef = UndefValue::get(II->getType());
          Updater.Initialize(II->getType(), "");
          Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
          Updater.AddAvailableValue(BB, II);
          Initialized = true;
        }
        Updater.RewriteUseAfterInsertions(U);
      }
    }
}
Exemplo n.º 2
0
void MemoryInstrumenter::checkFeatures(Module &M) {
  // Check whether any memory allocation function can
  // potentially be pointed by function pointers.
  // Also, all intrinsic functions will be called directly,
  // i.e. not via function pointers.
  for (Module::iterator F = M.begin(); F != M.end(); ++F) {
    if (DynAAUtils::IsMalloc(F) || F->isIntrinsic()) {
      for (Value::use_iterator UI = F->use_begin(); UI != F->use_end(); ++UI) {
        User *Usr = *UI;
        assert(isa<CallInst>(Usr) || isa<InvokeInst>(Usr));
        CallSite CS(cast<Instruction>(Usr));
        for (unsigned i = 0; i < CS.arg_size(); ++i)
          assert(CS.getArgument(i) != F);
      }
    }
  }

  // Check whether memory allocation functions are captured.
  for (Module::iterator F = M.begin(); F != M.end(); ++F) {
    // 0 is the return, 1 is the first parameter.
    if (F->isDeclaration() && F->doesNotAlias(0) && !DynAAUtils::IsMalloc(F)) {
      errs().changeColor(raw_ostream::RED);
      errs() << F->getName() << "'s return value is marked noalias, ";
      errs() << "but the function is not treated as malloc.\n";
      errs().resetColor();
    }
  }

  // Sequential types except pointer types shouldn't be used as the type of
  // an instruction, a function parameter, or a global variable.
  for (Module::global_iterator GI = M.global_begin(), E = M.global_end();
       GI != E; ++GI) {
    if (isa<SequentialType>(GI->getType()))
      assert(GI->getType()->isPointerTy());
  }
  for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
    for (Function::arg_iterator AI = F->arg_begin(); AI != F->arg_end(); ++AI) {
      if (isa<SequentialType>(AI->getType()))
        assert(AI->getType()->isPointerTy());
    }
  }
  for (Module::iterator F = M.begin(); F != M.end(); ++F) {
    for (Function::iterator BB = F->begin(); BB != F->end(); ++BB) {
      for (BasicBlock::iterator Ins = BB->begin(); Ins != BB->end(); ++Ins) {
        if (isa<SequentialType>(Ins->getType()))
          assert(Ins->getType()->isPointerTy());
      }
    }
  }

  // We don't support multi-process programs for now.
  if (!HookFork)
    assert(M.getFunction("fork") == NULL);
}
Exemplo n.º 3
0
 virtual bool runOnFunction(Function& f)
 {
   CurrentFile::set(__FILE__);
   bool changed = false;
   // Make sure this is a function that we can use
   if (f.isDeclaration() /*|| !f.isDFFunction()*/ )
   {
     return changed ;
   }
   for(Function::iterator BB = f.begin(); BB != f.end(); ++BB)
   {
     begin:
     for(BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
     {
       if( !dynamic_cast<TerminatorInst*>(&*II) )
       {
         II->replaceAllUsesWith(UndefValue::get(II->getType()));
         II->eraseFromParent();
         goto begin;
       }
     }
   }
   changed = true;
   return changed;
 }
/// DeleteBasicBlock - remove the specified basic block from the program,
/// updating the callgraph to reflect any now-obsolete edges due to calls that
/// exist in the BB.
void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
  assert(pred_begin(BB) == pred_end(BB) && "BB is not dead!");
  CallGraph &CG = getAnalysis<CallGraph>();

  CallGraphNode *CGN = CG[BB->getParent()];
  for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) {
    --I;
    if (CallInst *CI = dyn_cast<CallInst>(I)) {
      if (Function *Callee = CI->getCalledFunction())
        CGN->removeCallEdgeTo(CG[Callee]);
    } else if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
      if (Function *Callee = II->getCalledFunction())
        CGN->removeCallEdgeTo(CG[Callee]);
    }
    if (!I->use_empty())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }

  // Get the list of successors of this block.
  std::vector<BasicBlock*> Succs(succ_begin(BB), succ_end(BB));

  for (unsigned i = 0, e = Succs.size(); i != e; ++i)
    Succs[i]->removePredecessor(BB);

  BB->eraseFromParent();
}
Exemplo n.º 5
0
void IntTest::pbzip2_like(Module &M) {
	TestBanner X("pbzip2-like");

	vector<StoreInst *> writes;
	Function *f_rand = M.getFunction("rand");
	assert(f_rand);
	Function *f_producer = M.getFunction("_Z8producerPv.SLICER");
	assert(f_producer);
	// Search along the CFG. We need to make sure reads and writes are in
	// a consistent order. 
	for (Function::iterator bb = f_producer->begin();
			bb != f_producer->end(); ++bb) {
		for (BasicBlock::iterator ins = bb->begin(); ins != bb->end(); ++ins) {
			if (CallInst *ci = dyn_cast<CallInst>(ins)) {
				if (ci->getCalledFunction() == f_rand) {
					for (BasicBlock::iterator j = bb->begin(); j != bb->end(); ++j) {
						if (StoreInst *si = dyn_cast<StoreInst>(j))
							writes.push_back(si);
					}
				}
			}
		}
	}
	errs() << "=== writes ===\n";
	for (size_t i = 0; i < writes.size(); ++i) {
		errs() << *writes[i] << "\n";
	}

	vector<LoadInst *> reads;
	Function *f_consumer = M.getFunction("_Z8consumerPv.SLICER");
	assert(f_consumer);
	for (Function::iterator bb = f_consumer->begin();
			bb != f_consumer->end(); ++bb) {
		for (BasicBlock::iterator ins = bb->begin(); ins != bb->end(); ++ins) {
			if (ins->getOpcode() == Instruction::Add &&
					ins->getType()->isIntegerTy(8)) {
				LoadInst *li = dyn_cast<LoadInst>(ins->getOperand(0));
				assert(li);
				reads.push_back(li);
			}
		}
	}
	errs() << "=== reads ===\n";
	for (size_t i = 0; i < reads.size(); ++i) {
		errs() << *reads[i] << "\n";
	}

	assert(writes.size() == reads.size());
	AliasAnalysis &AA = getAnalysis<AdvancedAlias>();
	for (size_t i = 0; i < writes.size(); ++i) {
		for (size_t j = i + 1; j < reads.size(); ++j) {
			errs() << "i = " << i << ", j = " << j << "... ";
			AliasAnalysis::AliasResult res = AA.alias(
					writes[i]->getPointerOperand(),
					reads[j]->getPointerOperand());
			assert(res == AliasAnalysis::NoAlias);
			print_pass(errs());
		}
	}
}
Exemplo n.º 6
0
// Collect the list of loop induction variables with respect to which it might
// be possible to reroll the loop.
void LoopReroll::collectPossibleIVs(Loop *L,
                                    SmallInstructionVector &PossibleIVs) {
  BasicBlock *Header = L->getHeader();
  for (BasicBlock::iterator I = Header->begin(),
       IE = Header->getFirstInsertionPt(); I != IE; ++I) {
    if (!isa<PHINode>(I))
      continue;
    if (!I->getType()->isIntegerTy())
      continue;

    if (const SCEVAddRecExpr *PHISCEV =
        dyn_cast<SCEVAddRecExpr>(SE->getSCEV(I))) {
      if (PHISCEV->getLoop() != L)
        continue;
      if (!PHISCEV->isAffine())
        continue;
      if (const SCEVConstant *IncSCEV =
          dyn_cast<SCEVConstant>(PHISCEV->getStepRecurrence(*SE))) {
        if (!IncSCEV->getValue()->getValue().isStrictlyPositive())
          continue;
        if (IncSCEV->getValue()->uge(MaxInc))
          continue;

        DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " <<
              *PHISCEV << "\n");
        PossibleIVs.push_back(I);
      }
    }
  }
}
Exemplo n.º 7
0
/// ChangeToUnreachable - Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
  BasicBlock *BB = I->getParent();
  // Loop over all of the successors, removing BB's entry from any PHI
  // nodes.
  for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
    (*SI)->removePredecessor(BB);
  
  // Insert a call to llvm.trap right before this.  This turns the undefined
  // behavior into a hard fail instead of falling through into random code.
  if (UseLLVMTrap) {
    Function *TrapFn =
      Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
    CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
    CallTrap->setDebugLoc(I->getDebugLoc());
  }
  new UnreachableInst(I->getContext(), I);
  
  // All instructions after this are dead.
  BasicBlock::iterator BBI = I, BBE = BB->end();
  while (BBI != BBE) {
    if (!BBI->use_empty())
      BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
    BB->getInstList().erase(BBI++);
  }
}
Exemplo n.º 8
0
/// \brief Analyze a basic block for its contribution to the inline cost.
///
/// This method walks the analyzer over every instruction in the given basic
/// block and accounts for their cost during inlining at this callsite. It
/// aborts early if the threshold has been exceeded or an impossible to inline
/// construct has been detected. It returns false if inlining is no longer
/// viable, and true if inlining remains viable.
bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
                                SmallPtrSetImpl<const Value *> &EphValues) {
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    // FIXME: Currently, the number of instructions in a function regardless of
    // our ability to simplify them during inline to constants or dead code,
    // are actually used by the vector bonus heuristic. As long as that's true,
    // we have to special case debug intrinsics here to prevent differences in
    // inlining due to debug symbols. Eventually, the number of unsimplified
    // instructions shouldn't factor into the cost computation, but until then,
    // hack around it here.
    if (isa<DbgInfoIntrinsic>(I))
      continue;

    // Skip ephemeral values.
    if (EphValues.count(I))
      continue;

    ++NumInstructions;
    if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
      ++NumVectorInstructions;

    // If the instruction simplified to a constant, there is no cost to this
    // instruction. Visit the instructions using our InstVisitor to account for
    // all of the per-instruction logic. The visit tree returns true if we
    // consumed the instruction in any way, and false if the instruction's base
    // cost should count against inlining.
    if (Base::visit(I))
      ++NumInstructionsSimplified;
    else
      Cost += InlineConstants::InstrCost;

    // If the visit this instruction detected an uninlinable pattern, abort.
    if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
        HasIndirectBr)
      return false;

    // If the caller is a recursive function then we don't want to inline
    // functions which allocate a lot of stack space because it would increase
    // the caller stack usage dramatically.
    if (IsCallerRecursive &&
        AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
      return false;

    if (NumVectorInstructions > NumInstructions/2)
      VectorBonus = FiftyPercentVectorBonus;
    else if (NumVectorInstructions > NumInstructions/10)
      VectorBonus = TenPercentVectorBonus;
    else
      VectorBonus = 0;

    // Check if we've past the threshold so we don't spin in huge basic
    // blocks that will never inline.
    if (Cost > (Threshold + VectorBonus))
      return false;
  }

  return true;
}
Exemplo n.º 9
0
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
                                         ArrayRef<BasicBlock *> Preds,
                                         const char *Suffix, DominatorTree *DT,
                                         LoopInfo *LI, bool PreserveLCSSA) {
  // Do not attempt to split that which cannot be split.
  if (!BB->canSplitPredecessors())
    return nullptr;

  // For the landingpads we need to act a bit differently.
  // Delegate this work to the SplitLandingPadPredecessors.
  if (BB->isLandingPad()) {
    SmallVector<BasicBlock*, 2> NewBBs;
    std::string NewName = std::string(Suffix) + ".split-lp";

    SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs, DT,
                                LI, PreserveLCSSA);
    return NewBBs[0];
  }

  // Create new basic block, insert right before the original block.
  BasicBlock *NewBB = BasicBlock::Create(
      BB->getContext(), BB->getName() + Suffix, BB->getParent(), BB);

  // The new block unconditionally branches to the old block.
  BranchInst *BI = BranchInst::Create(BB, NewBB);
  BI->setDebugLoc(BB->getFirstNonPHIOrDbg()->getDebugLoc());

  // Move the edges from Preds to point to NewBB instead of BB.
  for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
    // This is slightly more strict than necessary; the minimum requirement
    // is that there be no more than one indirectbr branching to BB. And
    // all BlockAddress uses would need to be updated.
    assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) &&
           "Cannot split an edge from an IndirectBrInst");
    Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB);
  }

  // Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
  // node becomes an incoming value for BB's phi node.  However, if the Preds
  // list is empty, we need to insert dummy entries into the PHI nodes in BB to
  // account for the newly created predecessor.
  if (Preds.empty()) {
    // Insert dummy values as the incoming value.
    for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
      cast<PHINode>(I)->addIncoming(UndefValue::get(I->getType()), NewBB);
    return NewBB;
  }

  // Update DominatorTree, LoopInfo, and LCCSA analysis information.
  bool HasLoopExit = false;
  UpdateAnalysisInformation(BB, NewBB, Preds, DT, LI, PreserveLCSSA,
                            HasLoopExit);

  // Update the PHI nodes in BB with the values coming from NewBB.
  UpdatePHINodes(BB, NewBB, Preds, BI, HasLoopExit);
  return NewBB;
}
Exemplo n.º 10
0
void MemoryInstrumenter::checkFeatures(Module &M) {
    // Check whether any memory allocation function can
    // potentially be pointed by function pointers.
    // Also, all intrinsic functions will be called directly,
    // i.e. not via function pointers.
    for (Module::iterator F = M.begin(); F != M.end(); ++F) {
        if (DynAAUtils::IsMalloc(F) || F->isIntrinsic()) {
            for (Value::use_iterator UI = F->use_begin(); UI != F->use_end(); ++UI) {
                User *Usr = *UI;
                assert(isa<CallInst>(Usr) || isa<InvokeInst>(Usr));
                CallSite CS(cast<Instruction>(Usr));
                for (unsigned i = 0; i < CS.arg_size(); ++i)
                    assert(CS.getArgument(i) != F);
            }
        }
    }

    // Check whether memory allocation functions are captured.
    for (Module::iterator F = M.begin(); F != M.end(); ++F) {
        // 0 is the return, 1 is the first parameter.
        if (F->isDeclaration() && F->doesNotAlias(0) && !DynAAUtils::IsMalloc(F)) {
            errs().changeColor(raw_ostream::RED);
            errs() << F->getName() << "'s return value is marked noalias, ";
            errs() << "but the function is not treated as malloc.\n";
            errs().resetColor();
        }
    }

    // Global variables shouldn't be of the array type.
    for (Module::global_iterator GI = M.global_begin(), E = M.global_end();
            GI != E; ++GI) {
        assert(!GI->getType()->isArrayTy());
    }
    // A function parameter or an instruction can be an array, but we don't
    // instrument such constructs for now. Issue a warning on such cases.
    for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
        for (Function::arg_iterator AI = F->arg_begin(); AI != F->arg_end(); ++AI) {
            if (AI->getType()->isArrayTy()) {
                errs().changeColor(raw_ostream::RED);
                errs() << F->getName() << ":" << *AI << " is an array\n";
                errs().resetColor();
            }
        }
    }
    for (Module::iterator F = M.begin(); F != M.end(); ++F) {
        for (Function::iterator BB = F->begin(); BB != F->end(); ++BB) {
            for (BasicBlock::iterator Ins = BB->begin(); Ins != BB->end(); ++Ins) {
                if (Ins->getType()->isArrayTy()) {
                    errs().changeColor(raw_ostream::RED);
                    errs() << F->getName() << ":" << *Ins << " is an array\n";
                    errs().resetColor();
                }
            }
        }
    }
}
Exemplo n.º 11
0
// bypassSlowDivision - This optimization identifies DIV instructions that can
// be profitably bypassed and carried out with a shorter, faster divide.
bool llvm::bypassSlowDivision(Function &F,
                              Function::iterator &I,
                              const DenseMap<unsigned int, unsigned int> &BypassWidths) {
  DivCacheTy DivCache;

  bool MadeChange = false;
  for (BasicBlock::iterator J = I->begin(); J != I->end(); J++) {

    // Get instruction details
    unsigned Opcode = J->getOpcode();
    bool UseDivOp = Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
    bool UseRemOp = Opcode == Instruction::SRem || Opcode == Instruction::URem;
    bool UseSignedOp = Opcode == Instruction::SDiv ||
                       Opcode == Instruction::SRem;

    // Only optimize div or rem ops
    if (!UseDivOp && !UseRemOp)
      continue;

    // Skip division on vector types, only optimize integer instructions
    if (!J->getType()->isIntegerTy())
      continue;

    // Get bitwidth of div/rem instruction
    IntegerType *T = cast<IntegerType>(J->getType());
    unsigned int bitwidth = T->getBitWidth();

    // Continue if bitwidth is not bypassed
    DenseMap<unsigned int, unsigned int>::const_iterator BI = BypassWidths.find(bitwidth);
    if (BI == BypassWidths.end())
      continue;

    // Get type for div/rem instruction with bypass bitwidth
    IntegerType *BT = IntegerType::get(J->getContext(), BI->second);

    MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp,
                                       UseSignedOp, DivCache);
  }

  return MadeChange;
}
Exemplo n.º 12
0
/// DeleteBasicBlock - remove the specified basic block from the program,
/// updating the callgraph to reflect any now-obsolete edges due to calls that
/// exist in the BB.
void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
  assert(pred_empty(BB) && "BB is not dead!");
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();

  Instruction *TokenInst = nullptr;

  CallGraphNode *CGN = CG[BB->getParent()];
  for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) {
    --I;

    if (I->getType()->isTokenTy()) {
      TokenInst = &*I;
      break;
    }

    if (auto CS = CallSite (&*I)) {
      const Function *Callee = CS.getCalledFunction();
      if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID()))
        CGN->removeCallEdgeFor(CS);
      else if (!Callee->isIntrinsic())
        CGN->removeCallEdgeFor(CS);
    }

    if (!I->use_empty())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }

  if (TokenInst) {
    if (!isa<TerminatorInst>(TokenInst))
      changeToUnreachable(TokenInst->getNextNode(), /*UseLLVMTrap=*/false);
  } else {
    // Get the list of successors of this block.
    std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB));

    for (unsigned i = 0, e = Succs.size(); i != e; ++i)
      Succs[i]->removePredecessor(BB);

    BB->eraseFromParent();
  }
}
Exemplo n.º 13
0
/// Handle a rare case where the disintegrated nodes instructions
/// no longer dominate all their uses. Not sure if this is really nessasary
void StructurizeCFG::rebuildSSA() {
  SSAUpdater Updater;
  for (Region::block_iterator I = ParentRegion->block_begin(),
                              E = ParentRegion->block_end();
       I != E; ++I) {

    BasicBlock *BB = *I;
    for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
         II != IE; ++II) {

      bool Initialized = false;
      for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) {

        Next = I->getNext();

        Instruction *User = cast<Instruction>(I->getUser());
        if (User->getParent() == BB) {
          continue;

        } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
          if (UserPN->getIncomingBlock(*I) == BB)
            continue;
        }

        if (DT->dominates(II, User))
          continue;

        if (!Initialized) {
          Value *Undef = UndefValue::get(II->getType());
          Updater.Initialize(II->getType(), "");
          Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
          Updater.AddAvailableValue(BB, II);
          Initialized = true;
        }
        Updater.RewriteUseAfterInsertions(*I);
      }
    }
  }
}
Exemplo n.º 14
0
bool SSIEverything::runOnFunction(Function &F) {
  SmallVector<Instruction *, 16> Insts;
  SSI &ssi = getAnalysis<SSI>();

  if (F.isDeclaration() || F.isIntrinsic()) return false;

  for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B)
    for (BasicBlock::iterator I = B->begin(), E = B->end(); I != E; ++I)
      if (!I->getType()->isVoidTy())
        Insts.push_back(I);

  ssi.createSSI(Insts);
  return true;
}
Exemplo n.º 15
0
/// ChangeToUnreachable - Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
static void ChangeToUnreachable(Instruction *I) {
    BasicBlock *BB = I->getParent();
    // Loop over all of the successors, removing BB's entry from any PHI
    // nodes.
    for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
        (*SI)->removePredecessor(BB);

    new UnreachableInst(I);

    // All instructions after this are dead.
    BasicBlock::iterator BBI = I, BBE = BB->end();
    while (BBI != BBE) {
        if (!BBI->use_empty())
            BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
        BB->getInstList().erase(BBI++);
    }
}
Exemplo n.º 16
0
/// \brief Analyze a basic block for its contribution to the inline cost.
///
/// This method walks the analyzer over every instruction in the given basic
/// block and accounts for their cost during inlining at this callsite. It
/// aborts early if the threshold has been exceeded or an impossible to inline
/// construct has been detected. It returns false if inlining is no longer
/// viable, and true if inlining remains viable.
bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
  for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end());
       I != E; ++I) {
    ++NumInstructions;
    if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
      ++NumVectorInstructions;

    // If the instruction simplified to a constant, there is no cost to this
    // instruction. Visit the instructions using our InstVisitor to account for
    // all of the per-instruction logic. The visit tree returns true if we
    // consumed the instruction in any way, and false if the instruction's base
    // cost should count against inlining.
    if (Base::visit(I))
      ++NumInstructionsSimplified;
    else
      Cost += InlineConstants::InstrCost;

    // If the visit this instruction detected an uninlinable pattern, abort.
    if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
      return false;

    // If the caller is a recursive function then we don't want to inline
    // functions which allocate a lot of stack space because it would increase
    // the caller stack usage dramatically.
    if (IsCallerRecursive &&
        AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
      return false;

    if (NumVectorInstructions > NumInstructions/2)
      VectorBonus = FiftyPercentVectorBonus;
    else if (NumVectorInstructions > NumInstructions/10)
      VectorBonus = TenPercentVectorBonus;
    else
      VectorBonus = 0;

    // Check if we've past the threshold so we don't spin in huge basic
    // blocks that will never inline.
    if (Cost > (Threshold + VectorBonus))
      return false;
  }

  return true;
}
Exemplo n.º 17
0
// Collect the vector of possible reduction variables.
void LoopReroll::collectPossibleReductions(Loop *L,
  ReductionTracker &Reductions) {
  BasicBlock *Header = L->getHeader();
  for (BasicBlock::iterator I = Header->begin(),
       IE = Header->getFirstInsertionPt(); I != IE; ++I) {
    if (!isa<PHINode>(I))
      continue;
    if (!I->getType()->isSingleValueType())
      continue;

    SimpleLoopReduction SLR(I, L);
    if (!SLR.valid())
      continue;

    DEBUG(dbgs() << "LRR: Possible reduction: " << *I << " (with " <<
          SLR.size() << " chained instructions)\n");
    Reductions.addSLR(SLR);
  }
}
Exemplo n.º 18
0
/// SplitBlockPredecessors - This method transforms BB by introducing a new
/// basic block into the function, and moving some of the predecessors of BB to
/// be predecessors of the new block.  The new predecessors are indicated by the
/// Preds array, which has NumPreds elements in it.  The new block is given a
/// suffix of 'Suffix'.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
/// LoopInfo, and LCCSA but no other analyses. In particular, it does not
/// preserve LoopSimplify (because it's complicated to handle the case where one
/// of the edges being split is an exit of a loop with other exits).
///
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB, 
                                         BasicBlock *const *Preds,
                                         unsigned NumPreds, const char *Suffix,
                                         Pass *P) {
  // Create new basic block, insert right before the original block.
  BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), BB->getName()+Suffix,
                                         BB->getParent(), BB);
  
  // The new block unconditionally branches to the old block.
  BranchInst *BI = BranchInst::Create(BB, NewBB);
  
  // Move the edges from Preds to point to NewBB instead of BB.
  for (unsigned i = 0; i != NumPreds; ++i) {
    // This is slightly more strict than necessary; the minimum requirement
    // is that there be no more than one indirectbr branching to BB. And
    // all BlockAddress uses would need to be updated.
    assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) &&
           "Cannot split an edge from an IndirectBrInst");
    Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB);
  }

  // Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
  // node becomes an incoming value for BB's phi node.  However, if the Preds
  // list is empty, we need to insert dummy entries into the PHI nodes in BB to
  // account for the newly created predecessor.
  if (NumPreds == 0) {
    // Insert dummy values as the incoming value.
    for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
      cast<PHINode>(I)->addIncoming(UndefValue::get(I->getType()), NewBB);
    return NewBB;
  }

  // Update DominatorTree, LoopInfo, and LCCSA analysis information.
  bool HasLoopExit = false;
  UpdateAnalysisInformation(BB, NewBB, ArrayRef<BasicBlock*>(Preds, NumPreds),
                            P, HasLoopExit);

  // Update the PHI nodes in BB with the values coming from NewBB.
  UpdatePHINodes(BB, NewBB, ArrayRef<BasicBlock*>(Preds, NumPreds), BI,
                 P, HasLoopExit);
  return NewBB;
}
Exemplo n.º 19
0
void IntroduceControlFlow(Function *F) {
  std::set<Instruction*> BoolInst;
  for (BasicBlock::iterator it = F->begin()->begin(),
       e = F->begin()->end(); it != e; ++it) {
    if (it->getType() == IntegerType::getInt1Ty(F->getContext()))
      BoolInst.insert(it);
  }

  for (std::set<Instruction*>::iterator it = BoolInst.begin(),
       e = BoolInst.end(); it != e; ++it) {
    Instruction *Instr = *it;
    BasicBlock *Curr = Instr->getParent();
    BasicBlock::iterator Loc= Instr;
    BasicBlock *Next = Curr->splitBasicBlock(Loc, "CF");
    Instr->moveBefore(Curr->getTerminator());
    if (Curr != &F->getEntryBlock()) {
      BranchInst::Create(Curr, Next, Instr, Curr->getTerminator());
      Curr->getTerminator()->eraseFromParent();
    }
  }
}
Exemplo n.º 20
0
/// isSafeToClone - Return true if the loop body is safe to clone in practice.
/// Routines that reform the loop CFG and split edges often fail on indirectbr.
bool Loop::isSafeToClone() const {
  // Return false if any loop blocks contain indirectbrs, or there are any calls
  // to noduplicate functions.
  for (Loop::block_iterator I = block_begin(), E = block_end(); I != E; ++I) {
    if (isa<IndirectBrInst>((*I)->getTerminator()))
      return false;

    if (const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator()))
      if (II->cannotDuplicate())
        return false;

    for (BasicBlock::iterator BI = (*I)->begin(), BE = (*I)->end(); BI != BE; ++BI) {
      if (const CallInst *CI = dyn_cast<CallInst>(BI)) {
        if (CI->cannotDuplicate())
          return false;
      }
      if (BI->getType()->isTokenTy() && BI->isUsedOutsideOfBlock(*I))
        return false;
    }
  }
  return true;
}
Exemplo n.º 21
0
static void ComputeNumbering(Function *F, DenseMap<Value*,unsigned> &Numbering){
  unsigned IN = 0;

  // Arguments get the first numbers.
  for (Function::arg_iterator
         AI = F->arg_begin(), AE = F->arg_end(); AI != AE; ++AI)
    if (!AI->hasName())
      Numbering[&*AI] = IN++;

  // Walk the basic blocks in order.
  for (Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) {
    if (!FI->hasName())
      Numbering[&*FI] = IN++;

    // Walk the instructions in order.
    for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
      // void instructions don't get numbers.
      if (!BI->hasName() && !BI->getType()->isVoidTy())
        Numbering[&*BI] = IN++;
  }

  assert(!Numbering.empty() && "asked for numbering but numbering was no-op");
}
Exemplo n.º 22
0
void MutationGen::genMutationFile(Function & F){
	int index = 0;
	
	for(Function::iterator FI = F.begin(); FI != F.end(); ++FI){
		BasicBlock *BB = FI;

		#if NEED_LOOP_INFO
		bool isLoop = LI->getLoopFor(BB);
		#endif
		
		for(BasicBlock::iterator BI = BB->begin(); BI != BB->end(); ++BI, index++){
			
			unsigned opc = BI->getOpcode();
			if( !((opc >= 14 && opc <= 31) || opc == 34 || opc == 52 || opc == 55) ){// omit alloca and getelementptr		
				continue;
			}

			int idxtmp = index;

			#if NEED_LOOP_INFO
			if(isLoop){
				assert(idxtmp != 0);
				idxtmp = 0 - idxtmp;
			}
			#endif
			
			switch(opc){
				case Instruction::Add:
				case Instruction::Sub:
				case Instruction::Mul:
				case Instruction::UDiv:
				case Instruction::SDiv:
				case Instruction::URem:
				case Instruction::SRem:{
					
					// TODO: add for i1, i8. Support i32 and i64 first
					if(! (BI->getType()->isIntegerTy(32) || BI->getType()->isIntegerTy(64))){
						continue;
					}
					
					genLVR(BI, F.getName(), idxtmp);
					genUOI(BI, F.getName(), idxtmp);
					genROV(BI, F.getName(), idxtmp);
					genABV(BI, F.getName(), idxtmp);					
					genAOR(BI, F.getName(), idxtmp);
					break;
				}
				case Instruction::ICmp:{
					if(! (BI->getOperand(0)->getType()->isIntegerTy(32) ||
						BI->getOperand(0)->getType()->isIntegerTy(64)) ){
						continue;
					}

					genLVR(BI, F.getName(), idxtmp);
					genUOI(BI, F.getName(), idxtmp);	
					genROV(BI, F.getName(), idxtmp);
					genABV(BI, F.getName(), idxtmp);			
					genROR(BI, F.getName(), idxtmp);
					break;
				}
				case Instruction::Shl:
				case Instruction::LShr:
				case Instruction::AShr:
				case Instruction::And:
				case Instruction::Or:
				case Instruction::Xor:{
					// TODO: add for i1, i8. Support i32 and i64 first
					if(! (BI->getType()->isIntegerTy(32) || BI->getType()->isIntegerTy(64))){
						continue;
					}
					genLVR(BI, F.getName(), idxtmp);
					genUOI(BI, F.getName(), idxtmp);
					genROV(BI, F.getName(), idxtmp);
					genABV(BI, F.getName(), idxtmp);					
					genLOR(BI, F.getName(), idxtmp);
					break;
				}			
				case Instruction::Call:
				{
					CallInst* call = cast<CallInst>(BI);

					// TODO: omit function-pointer
					if(call->getCalledFunction() == NULL){
						continue;
					}
					/*Value* callee = dyn_cast<Value>(&*(call->op_end() - 1));
					if(callee->getType()->isPointerTy()){
						continue;
					}*/
					
					StringRef name = call->getCalledFunction()->getName();
					if(name.startswith("llvm")){//omit llvm inside functions
						continue;
					}

					// TODO: add for ommiting i8. Support i32 and i64 first
					if(! ( isSupportedType(BI->getType())|| BI->getType()->isVoidTy() ) ){
						continue;
					}

					genLVR(BI, F.getName(), idxtmp);
					genUOI(BI, F.getName(), idxtmp);
					genROV(BI, F.getName(), idxtmp);
					genABV(BI, F.getName(), idxtmp);					
					genSTDCall(BI, F.getName(), idxtmp);
					break;
				}
				case Instruction::Store:{

					auto addr = BI->op_begin() + 1;// the pointer of the storeinst
					
					if( ! (dyn_cast<LoadInst>(&*addr) || 
							dyn_cast<AllocaInst>(&*addr) || 
							dyn_cast<Constant>(&*addr) || 
							dyn_cast<GetElementPtrInst>(&*addr)
						   ) 
					   ){
						continue;
					}

					// TODO:: add for i8
					Value* tobestore = dyn_cast<Value>(BI->op_begin());
					if(! isSupportedType(tobestore->getType())){
						continue;
					}
					
					genLVR(BI, F.getName(), idxtmp);
					genUOI(BI, F.getName(), idxtmp);
					genABV(BI, F.getName(), idxtmp);	
					genSTDStore(BI, F.getName(), idxtmp);
					break;
				}	
				case Instruction::GetElementPtr:{
					// TODO:
					break;
				}
				default:{
					
				}					
			}
			
		}
	}
	ofresult.flush();
}
Exemplo n.º 23
0
/// SplitBlockPredecessors - This method transforms BB by introducing a new
/// basic block into the function, and moving some of the predecessors of BB to
/// be predecessors of the new block.  The new predecessors are indicated by the
/// Preds array, which has NumPreds elements in it.  The new block is given a
/// suffix of 'Suffix'.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree and
/// DominanceFrontier, but no other analyses.
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB, 
                                         BasicBlock *const *Preds,
                                         unsigned NumPreds, const char *Suffix,
                                         Pass *P) {
  // Create new basic block, insert right before the original block.
  BasicBlock *NewBB =
    BasicBlock::Create(BB->getName()+Suffix, BB->getParent(), BB);
  
  // The new block unconditionally branches to the old block.
  BranchInst *BI = BranchInst::Create(BB, NewBB);
  
  // Move the edges from Preds to point to NewBB instead of BB.
  for (unsigned i = 0; i != NumPreds; ++i)
    Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB);
  
  // Update dominator tree and dominator frontier if available.
  DominatorTree *DT = P ? P->getAnalysisIfAvailable<DominatorTree>() : 0;
  if (DT)
    DT->splitBlock(NewBB);
  if (DominanceFrontier *DF = P ? P->getAnalysisIfAvailable<DominanceFrontier>():0)
    DF->splitBlock(NewBB);
  AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : 0;
  
  
  // Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
  // node becomes an incoming value for BB's phi node.  However, if the Preds
  // list is empty, we need to insert dummy entries into the PHI nodes in BB to
  // account for the newly created predecessor.
  if (NumPreds == 0) {
    // Insert dummy values as the incoming value.
    for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
      cast<PHINode>(I)->addIncoming(UndefValue::get(I->getType()), NewBB);
    return NewBB;
  }
  
  // Otherwise, create a new PHI node in NewBB for each PHI node in BB.
  for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ) {
    PHINode *PN = cast<PHINode>(I++);
    
    // Check to see if all of the values coming in are the same.  If so, we
    // don't need to create a new PHI node.
    Value *InVal = PN->getIncomingValueForBlock(Preds[0]);
    for (unsigned i = 1; i != NumPreds; ++i)
      if (InVal != PN->getIncomingValueForBlock(Preds[i])) {
        InVal = 0;
        break;
      }
    
    if (InVal) {
      // If all incoming values for the new PHI would be the same, just don't
      // make a new PHI.  Instead, just remove the incoming values from the old
      // PHI.
      for (unsigned i = 0; i != NumPreds; ++i)
        PN->removeIncomingValue(Preds[i], false);
    } else {
      // If the values coming into the block are not the same, we need a PHI.
      // Create the new PHI node, insert it into NewBB at the end of the block
      PHINode *NewPHI =
        PHINode::Create(PN->getType(), PN->getName()+".ph", BI);
      if (AA) AA->copyValue(PN, NewPHI);
      
      // Move all of the PHI values for 'Preds' to the new PHI.
      for (unsigned i = 0; i != NumPreds; ++i) {
        Value *V = PN->removeIncomingValue(Preds[i], false);
        NewPHI->addIncoming(V, Preds[i]);
      }
      InVal = NewPHI;
    }
    
    // Add an incoming value to the PHI node in the loop for the preheader
    // edge.
    PN->addIncoming(InVal, NewBB);
    
    // Check to see if we can eliminate this phi node.
    if (Value *V = PN->hasConstantValue(DT != 0)) {
      Instruction *I = dyn_cast<Instruction>(V);
      if (!I || DT == 0 || DT->dominates(I, PN)) {
        PN->replaceAllUsesWith(V);
        if (AA) AA->deleteValue(PN);
        PN->eraseFromParent();
      }
    }
  }
  
  return NewBB;
}
Exemplo n.º 24
0
/// RemoveBlockIfDead - If the specified block is dead, remove it, update loop
/// information, and remove any dead successors it has.
///
void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
                                     std::vector<Instruction*> &Worklist,
                                     Loop *L) {
  if (pred_begin(BB) != pred_end(BB)) {
    // This block isn't dead, since an edge to BB was just removed, see if there
    // are any easy simplifications we can do now.
    if (BasicBlock *Pred = BB->getSinglePredecessor()) {
      // If it has one pred, fold phi nodes in BB.
      while (isa<PHINode>(BB->begin()))
        ReplaceUsesOfWith(BB->begin(), 
                          cast<PHINode>(BB->begin())->getIncomingValue(0), 
                          Worklist, L, LPM);
      
      // If this is the header of a loop and the only pred is the latch, we now
      // have an unreachable loop.
      if (Loop *L = LI->getLoopFor(BB))
        if (loopHeader == BB && L->contains(Pred)) {
          // Remove the branch from the latch to the header block, this makes
          // the header dead, which will make the latch dead (because the header
          // dominates the latch).
          LPM->deleteSimpleAnalysisValue(Pred->getTerminator(), L);
          Pred->getTerminator()->eraseFromParent();
          new UnreachableInst(BB->getContext(), Pred);
          
          // The loop is now broken, remove it from LI.
          RemoveLoopFromHierarchy(L);
          
          // Reprocess the header, which now IS dead.
          RemoveBlockIfDead(BB, Worklist, L);
          return;
        }
      
      // If pred ends in a uncond branch, add uncond branch to worklist so that
      // the two blocks will get merged.
      if (BranchInst *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
        if (BI->isUnconditional())
          Worklist.push_back(BI);
    }
    return;
  }

  DEBUG(dbgs() << "Nuking dead block: " << *BB);
  
  // Remove the instructions in the basic block from the worklist.
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    RemoveFromWorklist(I, Worklist);
    
    // Anything that uses the instructions in this basic block should have their
    // uses replaced with undefs.
    // If I is not void type then replaceAllUsesWith undef.
    // This allows ValueHandlers and custom metadata to adjust itself.
    if (!I->getType()->isVoidTy())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }
  
  // If this is the edge to the header block for a loop, remove the loop and
  // promote all subloops.
  if (Loop *BBLoop = LI->getLoopFor(BB)) {
    if (BBLoop->getLoopLatch() == BB)
      RemoveLoopFromHierarchy(BBLoop);
  }

  // Remove the block from the loop info, which removes it from any loops it
  // was in.
  LI->removeBlock(BB);
  
  
  // Remove phi node entries in successors for this block.
  TerminatorInst *TI = BB->getTerminator();
  SmallVector<BasicBlock*, 4> Succs;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
    Succs.push_back(TI->getSuccessor(i));
    TI->getSuccessor(i)->removePredecessor(BB);
  }
  
  // Unique the successors, remove anything with multiple uses.
  array_pod_sort(Succs.begin(), Succs.end());
  Succs.erase(std::unique(Succs.begin(), Succs.end()), Succs.end());
  
  // Remove the basic block, including all of the instructions contained in it.
  LPM->deleteSimpleAnalysisValue(BB, L);  
  BB->eraseFromParent();
  // Remove successor blocks here that are not dead, so that we know we only
  // have dead blocks in this list.  Nondead blocks have a way of becoming dead,
  // then getting removed before we revisit them, which is badness.
  //
  for (unsigned i = 0; i != Succs.size(); ++i)
    if (pred_begin(Succs[i]) != pred_end(Succs[i])) {
      // One exception is loop headers.  If this block was the preheader for a
      // loop, then we DO want to visit the loop so the loop gets deleted.
      // We know that if the successor is a loop header, that this loop had to
      // be the preheader: the case where this was the latch block was handled
      // above and headers can only have two predecessors.
      if (!LI->isLoopHeader(Succs[i])) {
        Succs.erase(Succs.begin()+i);
        --i;
      }
    }
  
  for (unsigned i = 0, e = Succs.size(); i != e; ++i)
    RemoveBlockIfDead(Succs[i], Worklist, L);
}
Exemplo n.º 25
0
/// SimplifyStoreAtEndOfBlock - Turn things like:
///   if () { *P = v1; } else { *P = v2 }
/// into a phi node with a store in the successor.
///
/// Simplify things like:
///   *P = v1; if () { *P = v2; }
/// into a phi node with a store in the successor.
///
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
  BasicBlock *StoreBB = SI.getParent();

  // Check to see if the successor block has exactly two incoming edges.  If
  // so, see if the other predecessor contains a store to the same location.
  // if so, insert a PHI node (if needed) and move the stores down.
  BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);

  // Determine whether Dest has exactly two predecessors and, if so, compute
  // the other predecessor.
  pred_iterator PI = pred_begin(DestBB);
  BasicBlock *P = *PI;
  BasicBlock *OtherBB = nullptr;

  if (P != StoreBB)
    OtherBB = P;

  if (++PI == pred_end(DestBB))
    return false;

  P = *PI;
  if (P != StoreBB) {
    if (OtherBB)
      return false;
    OtherBB = P;
  }
  if (++PI != pred_end(DestBB))
    return false;

  // Bail out if all the relevant blocks aren't distinct (this can happen,
  // for example, if SI is in an infinite loop)
  if (StoreBB == DestBB || OtherBB == DestBB)
    return false;

  // Verify that the other block ends in a branch and is not otherwise empty.
  BasicBlock::iterator BBI = OtherBB->getTerminator();
  BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
  if (!OtherBr || BBI == OtherBB->begin())
    return false;

  // If the other block ends in an unconditional branch, check for the 'if then
  // else' case.  there is an instruction before the branch.
  StoreInst *OtherStore = nullptr;
  if (OtherBr->isUnconditional()) {
    --BBI;
    // Skip over debugging info.
    while (isa<DbgInfoIntrinsic>(BBI) ||
           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
      if (BBI==OtherBB->begin())
        return false;
      --BBI;
    }
    // If this isn't a store, isn't a store to the same location, or is not the
    // right kind of store, bail out.
    OtherStore = dyn_cast<StoreInst>(BBI);
    if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
        !SI.isSameOperationAs(OtherStore))
      return false;
  } else {
    // Otherwise, the other block ended with a conditional branch. If one of the
    // destinations is StoreBB, then we have the if/then case.
    if (OtherBr->getSuccessor(0) != StoreBB &&
        OtherBr->getSuccessor(1) != StoreBB)
      return false;

    // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
    // if/then triangle.  See if there is a store to the same ptr as SI that
    // lives in OtherBB.
    for (;; --BBI) {
      // Check to see if we find the matching store.
      if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
        if (OtherStore->getOperand(1) != SI.getOperand(1) ||
            !SI.isSameOperationAs(OtherStore))
          return false;
        break;
      }
      // If we find something that may be using or overwriting the stored
      // value, or if we run out of instructions, we can't do the xform.
      if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
          BBI == OtherBB->begin())
        return false;
    }

    // In order to eliminate the store in OtherBr, we have to
    // make sure nothing reads or overwrites the stored value in
    // StoreBB.
    for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
      // FIXME: This should really be AA driven.
      if (I->mayReadFromMemory() || I->mayWriteToMemory())
        return false;
    }
  }

  // Insert a PHI node now if we need it.
  Value *MergedVal = OtherStore->getOperand(0);
  if (MergedVal != SI.getOperand(0)) {
    PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
    PN->addIncoming(SI.getOperand(0), SI.getParent());
    PN->addIncoming(OtherStore->getOperand(0), OtherBB);
    MergedVal = InsertNewInstBefore(PN, DestBB->front());
  }

  // Advance to a place where it is safe to insert the new store and
  // insert it.
  BBI = DestBB->getFirstInsertionPt();
  StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
                                   SI.isVolatile(),
                                   SI.getAlignment(),
                                   SI.getOrdering(),
                                   SI.getSynchScope());
  InsertNewInstBefore(NewSI, *BBI);
  NewSI->setDebugLoc(OtherStore->getDebugLoc());

  // If the two stores had the same TBAA tag, preserve it.
  if (MDNode *TBAATag = SI.getMetadata(LLVMContext::MD_tbaa))
    if ((TBAATag = MDNode::getMostGenericTBAA(TBAATag,
                               OtherStore->getMetadata(LLVMContext::MD_tbaa))))
      NewSI->setMetadata(LLVMContext::MD_tbaa, TBAATag);


  // Nuke the old stores.
  EraseInstFromFunction(SI);
  EraseInstFromFunction(*OtherStore);
  return true;
}
Exemplo n.º 26
0
Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
  Value *Val = SI.getOperand(0);
  Value *Ptr = SI.getOperand(1);

  // Attempt to improve the alignment.
  if (DL) {
    unsigned KnownAlign =
      getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
                                 DL);
    unsigned StoreAlign = SI.getAlignment();
    unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
      DL->getABITypeAlignment(Val->getType());

    if (KnownAlign > EffectiveStoreAlign)
      SI.setAlignment(KnownAlign);
    else if (StoreAlign == 0)
      SI.setAlignment(EffectiveStoreAlign);
  }

  // Don't hack volatile/atomic stores.
  // FIXME: Some bits are legal for atomic stores; needs refactoring.
  if (!SI.isSimple()) return nullptr;

  // If the RHS is an alloca with a single use, zapify the store, making the
  // alloca dead.
  if (Ptr->hasOneUse()) {
    if (isa<AllocaInst>(Ptr))
      return EraseInstFromFunction(SI);
    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
      if (isa<AllocaInst>(GEP->getOperand(0))) {
        if (GEP->getOperand(0)->hasOneUse())
          return EraseInstFromFunction(SI);
      }
    }
  }

  // Do really simple DSE, to catch cases where there are several consecutive
  // stores to the same location, separated by a few arithmetic operations. This
  // situation often occurs with bitfield accesses.
  BasicBlock::iterator BBI = &SI;
  for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
       --ScanInsts) {
    --BBI;
    // Don't count debug info directives, lest they affect codegen,
    // and we skip pointer-to-pointer bitcasts, which are NOPs.
    if (isa<DbgInfoIntrinsic>(BBI) ||
        (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
      ScanInsts++;
      continue;
    }

    if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
      // Prev store isn't volatile, and stores to the same location?
      if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
                                                        SI.getOperand(1))) {
        ++NumDeadStore;
        ++BBI;
        EraseInstFromFunction(*PrevSI);
        continue;
      }
      break;
    }

    // If this is a load, we have to stop.  However, if the loaded value is from
    // the pointer we're loading and is producing the pointer we're storing,
    // then *this* store is dead (X = load P; store X -> P).
    if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
      if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
          LI->isSimple())
        return EraseInstFromFunction(SI);

      // Otherwise, this is a load from some other location.  Stores before it
      // may not be dead.
      break;
    }

    // Don't skip over loads or things that can modify memory.
    if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
      break;
  }

  // store X, null    -> turns into 'unreachable' in SimplifyCFG
  if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
    if (!isa<UndefValue>(Val)) {
      SI.setOperand(0, UndefValue::get(Val->getType()));
      if (Instruction *U = dyn_cast<Instruction>(Val))
        Worklist.Add(U);  // Dropped a use.
    }
    return nullptr;  // Do not modify these!
  }

  // store undef, Ptr -> noop
  if (isa<UndefValue>(Val))
    return EraseInstFromFunction(SI);

  // If the pointer destination is a cast, see if we can fold the cast into the
  // source instead.
  if (isa<CastInst>(Ptr))
    if (Instruction *Res = InstCombineStoreToCast(*this, SI))
      return Res;
  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
    if (CE->isCast())
      if (Instruction *Res = InstCombineStoreToCast(*this, SI))
        return Res;


  // If this store is the last instruction in the basic block (possibly
  // excepting debug info instructions), and if the block ends with an
  // unconditional branch, try to move it to the successor block.
  BBI = &SI;
  do {
    ++BBI;
  } while (isa<DbgInfoIntrinsic>(BBI) ||
           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
  if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
    if (BI->isUnconditional())
      if (SimplifyStoreAtEndOfBlock(SI))
        return nullptr;  // xform done!

  return nullptr;
}
Exemplo n.º 27
0
/// SplitBlockPredecessors - This method transforms BB by introducing a new
/// basic block into the function, and moving some of the predecessors of BB to
/// be predecessors of the new block.  The new predecessors are indicated by the
/// Preds array, which has NumPreds elements in it.  The new block is given a
/// suffix of 'Suffix'.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
/// DominanceFrontier, LoopInfo, and LCCSA but no other analyses.
/// In particular, it does not preserve LoopSimplify (because it's
/// complicated to handle the case where one of the edges being split
/// is an exit of a loop with other exits).
///
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB, 
                                         BasicBlock *const *Preds,
                                         unsigned NumPreds, const char *Suffix,
                                         Pass *P) {
  // Create new basic block, insert right before the original block.
  BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), BB->getName()+Suffix,
                                         BB->getParent(), BB);
  
  // The new block unconditionally branches to the old block.
  BranchInst *BI = BranchInst::Create(BB, NewBB);
  
  LoopInfo *LI = P ? P->getAnalysisIfAvailable<LoopInfo>() : 0;
  Loop *L = LI ? LI->getLoopFor(BB) : 0;
  bool PreserveLCSSA = P->mustPreserveAnalysisID(LCSSAID);

  // Move the edges from Preds to point to NewBB instead of BB.
  // While here, if we need to preserve loop analyses, collect
  // some information about how this split will affect loops.
  bool HasLoopExit = false;
  bool IsLoopEntry = !!L;
  bool SplitMakesNewLoopHeader = false;
  for (unsigned i = 0; i != NumPreds; ++i) {
    // This is slightly more strict than necessary; the minimum requirement
    // is that there be no more than one indirectbr branching to BB. And
    // all BlockAddress uses would need to be updated.
    assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) &&
           "Cannot split an edge from an IndirectBrInst");

    Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB);

    if (LI) {
      // If we need to preserve LCSSA, determine if any of
      // the preds is a loop exit.
      if (PreserveLCSSA)
        if (Loop *PL = LI->getLoopFor(Preds[i]))
          if (!PL->contains(BB))
            HasLoopExit = true;
      // If we need to preserve LoopInfo, note whether any of the
      // preds crosses an interesting loop boundary.
      if (L) {
        if (L->contains(Preds[i]))
          IsLoopEntry = false;
        else
          SplitMakesNewLoopHeader = true;
      }
    }
  }

  // Update dominator tree and dominator frontier if available.
  DominatorTree *DT = P ? P->getAnalysisIfAvailable<DominatorTree>() : 0;
  if (DT)
    DT->splitBlock(NewBB);
  if (DominanceFrontier *DF = P ? P->getAnalysisIfAvailable<DominanceFrontier>():0)
    DF->splitBlock(NewBB);

  // Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
  // node becomes an incoming value for BB's phi node.  However, if the Preds
  // list is empty, we need to insert dummy entries into the PHI nodes in BB to
  // account for the newly created predecessor.
  if (NumPreds == 0) {
    // Insert dummy values as the incoming value.
    for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
      cast<PHINode>(I)->addIncoming(UndefValue::get(I->getType()), NewBB);
    return NewBB;
  }

  AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : 0;

  if (L) {
    if (IsLoopEntry) {
      // Add the new block to the nearest enclosing loop (and not an
      // adjacent loop). To find this, examine each of the predecessors and
      // determine which loops enclose them, and select the most-nested loop
      // which contains the loop containing the block being split.
      Loop *InnermostPredLoop = 0;
      for (unsigned i = 0; i != NumPreds; ++i)
        if (Loop *PredLoop = LI->getLoopFor(Preds[i])) {
          // Seek a loop which actually contains the block being split (to
          // avoid adjacent loops).
          while (PredLoop && !PredLoop->contains(BB))
            PredLoop = PredLoop->getParentLoop();
          // Select the most-nested of these loops which contains the block.
          if (PredLoop &&
              PredLoop->contains(BB) &&
              (!InnermostPredLoop ||
               InnermostPredLoop->getLoopDepth() < PredLoop->getLoopDepth()))
            InnermostPredLoop = PredLoop;
        }
      if (InnermostPredLoop)
        InnermostPredLoop->addBasicBlockToLoop(NewBB, LI->getBase());
    } else {
      L->addBasicBlockToLoop(NewBB, LI->getBase());
      if (SplitMakesNewLoopHeader)
        L->moveToHeader(NewBB);
    }
  }
  
  // Otherwise, create a new PHI node in NewBB for each PHI node in BB.
  for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ) {
    PHINode *PN = cast<PHINode>(I++);
    
    // Check to see if all of the values coming in are the same.  If so, we
    // don't need to create a new PHI node, unless it's needed for LCSSA.
    Value *InVal = 0;
    if (!HasLoopExit) {
      InVal = PN->getIncomingValueForBlock(Preds[0]);
      for (unsigned i = 1; i != NumPreds; ++i)
        if (InVal != PN->getIncomingValueForBlock(Preds[i])) {
          InVal = 0;
          break;
        }
    }

    if (InVal) {
      // If all incoming values for the new PHI would be the same, just don't
      // make a new PHI.  Instead, just remove the incoming values from the old
      // PHI.
      for (unsigned i = 0; i != NumPreds; ++i)
        PN->removeIncomingValue(Preds[i], false);
    } else {
      // If the values coming into the block are not the same, we need a PHI.
      // Create the new PHI node, insert it into NewBB at the end of the block
      PHINode *NewPHI =
        PHINode::Create(PN->getType(), PN->getName()+".ph", BI);
      if (AA) AA->copyValue(PN, NewPHI);
      
      // Move all of the PHI values for 'Preds' to the new PHI.
      for (unsigned i = 0; i != NumPreds; ++i) {
        Value *V = PN->removeIncomingValue(Preds[i], false);
        NewPHI->addIncoming(V, Preds[i]);
      }
      InVal = NewPHI;
    }
    
    // Add an incoming value to the PHI node in the loop for the preheader
    // edge.
    PN->addIncoming(InVal, NewBB);
  }
  
  return NewBB;
}
Exemplo n.º 28
0
bool PPCCTRLoops::mightUseCTR(BasicBlock *BB) {
  for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
       J != JE; ++J) {
    if (CallInst *CI = dyn_cast<CallInst>(J)) {
      // Inline ASM is okay, unless it clobbers the ctr register.
      if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
        if (asmClobbersCTR(IA))
          return true;
        continue;
      }

      if (Function *F = CI->getCalledFunction()) {
        // Most intrinsics don't become function calls, but some might.
        // sin, cos, exp and log are always calls.
        unsigned Opcode = 0;
        if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
          switch (F->getIntrinsicID()) {
          default: continue;
          // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr
          // we're definitely using CTR.
          case Intrinsic::ppc_is_decremented_ctr_nonzero:
          case Intrinsic::ppc_mtctr:
            return true;

// VisualStudio defines setjmp as _setjmp
#if defined(_MSC_VER) && defined(setjmp) && \
                       !defined(setjmp_undefined_for_msvc)
#  pragma push_macro("setjmp")
#  undef setjmp
#  define setjmp_undefined_for_msvc
#endif

          case Intrinsic::setjmp:

#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
 // let's return it to _setjmp state
#  pragma pop_macro("setjmp")
#  undef setjmp_undefined_for_msvc
#endif

          case Intrinsic::longjmp:

          // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
          // because, although it does clobber the counter register, the
          // control can't then return to inside the loop unless there is also
          // an eh_sjlj_setjmp.
          case Intrinsic::eh_sjlj_setjmp:

          case Intrinsic::memcpy:
          case Intrinsic::memmove:
          case Intrinsic::memset:
          case Intrinsic::powi:
          case Intrinsic::log:
          case Intrinsic::log2:
          case Intrinsic::log10:
          case Intrinsic::exp:
          case Intrinsic::exp2:
          case Intrinsic::pow:
          case Intrinsic::sin:
          case Intrinsic::cos:
            return true;
          case Intrinsic::copysign:
            if (CI->getArgOperand(0)->getType()->getScalarType()->
                isPPC_FP128Ty())
              return true;
            else
              continue; // ISD::FCOPYSIGN is never a library call.
          case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break;
          case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break;
          case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break;
          case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break;
          case Intrinsic::rint:               Opcode = ISD::FRINT;      break;
          case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break;
          case Intrinsic::round:              Opcode = ISD::FROUND;     break;
          case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break;
          case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break;
          case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break;
          case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break;
          }
        }

        // PowerPC does not use [US]DIVREM or other library calls for
        // operations on regular types which are not otherwise library calls
        // (i.e. soft float or atomics). If adapting for targets that do,
        // additional care is required here.

        LibFunc Func;
        if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
            LibInfo->getLibFunc(F->getName(), Func) &&
            LibInfo->hasOptimizedCodeGen(Func)) {
          // Non-read-only functions are never treated as intrinsics.
          if (!CI->onlyReadsMemory())
            return true;

          // Conversion happens only for FP calls.
          if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
            return true;

          switch (Func) {
          default: return true;
          case LibFunc_copysign:
          case LibFunc_copysignf:
            continue; // ISD::FCOPYSIGN is never a library call.
          case LibFunc_copysignl:
            return true;
          case LibFunc_fabs:
          case LibFunc_fabsf:
          case LibFunc_fabsl:
            continue; // ISD::FABS is never a library call.
          case LibFunc_sqrt:
          case LibFunc_sqrtf:
          case LibFunc_sqrtl:
            Opcode = ISD::FSQRT; break;
          case LibFunc_floor:
          case LibFunc_floorf:
          case LibFunc_floorl:
            Opcode = ISD::FFLOOR; break;
          case LibFunc_nearbyint:
          case LibFunc_nearbyintf:
          case LibFunc_nearbyintl:
            Opcode = ISD::FNEARBYINT; break;
          case LibFunc_ceil:
          case LibFunc_ceilf:
          case LibFunc_ceill:
            Opcode = ISD::FCEIL; break;
          case LibFunc_rint:
          case LibFunc_rintf:
          case LibFunc_rintl:
            Opcode = ISD::FRINT; break;
          case LibFunc_round:
          case LibFunc_roundf:
          case LibFunc_roundl:
            Opcode = ISD::FROUND; break;
          case LibFunc_trunc:
          case LibFunc_truncf:
          case LibFunc_truncl:
            Opcode = ISD::FTRUNC; break;
          case LibFunc_fmin:
          case LibFunc_fminf:
          case LibFunc_fminl:
            Opcode = ISD::FMINNUM; break;
          case LibFunc_fmax:
          case LibFunc_fmaxf:
          case LibFunc_fmaxl:
            Opcode = ISD::FMAXNUM; break;
          }
        }

        if (Opcode) {
          EVT EVTy =
              TLI->getValueType(*DL, CI->getArgOperand(0)->getType(), true);

          if (EVTy == MVT::Other)
            return true;

          if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
            continue;
          else if (EVTy.isVector() &&
                   TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
            continue;

          return true;
        }
      }

      return true;
    } else if (isa<BinaryOperator>(J) &&
               J->getType()->getScalarType()->isPPC_FP128Ty()) {
      // Most operations on ppc_f128 values become calls.
      return true;
    } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
               isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
      CastInst *CI = cast<CastInst>(J);
      if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
          CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
          isLargeIntegerTy(!TM->isPPC64(), CI->getSrcTy()->getScalarType()) ||
          isLargeIntegerTy(!TM->isPPC64(), CI->getDestTy()->getScalarType()))
        return true;
    } else if (isLargeIntegerTy(!TM->isPPC64(),
                                J->getType()->getScalarType()) &&
               (J->getOpcode() == Instruction::UDiv ||
                J->getOpcode() == Instruction::SDiv ||
                J->getOpcode() == Instruction::URem ||
                J->getOpcode() == Instruction::SRem)) {
      return true;
    } else if (!TM->isPPC64() &&
               isLargeIntegerTy(false, J->getType()->getScalarType()) &&
               (J->getOpcode() == Instruction::Shl ||
                J->getOpcode() == Instruction::AShr ||
                J->getOpcode() == Instruction::LShr)) {
      // Only on PPC32, for 128-bit integers (specifically not 64-bit
      // integers), these might be runtime calls.
      return true;
    } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
      // On PowerPC, indirect jumps use the counter register.
      return true;
    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
      if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
        return true;
    }

    // FREM is always a call.
    if (J->getOpcode() == Instruction::FRem)
      return true;

    if (STI->useSoftFloat()) {
      switch(J->getOpcode()) {
      case Instruction::FAdd:
      case Instruction::FSub:
      case Instruction::FMul:
      case Instruction::FDiv:
      case Instruction::FPTrunc:
      case Instruction::FPExt:
      case Instruction::FPToUI:
      case Instruction::FPToSI:
      case Instruction::UIToFP:
      case Instruction::SIToFP:
      case Instruction::FCmp:
        return true;
      }
    }

    for (Value *Operand : J->operands())
      if (memAddrUsesCTR(*TM, Operand))
        return true;
  }

  return false;
}
Exemplo n.º 29
0
    virtual bool runOnFunction(Function &F) {
      DEBUG(errs() << "Running on " << F.getName() << "\n");
      DEBUG(F.dump());
      Changed = false;
      BaseMap.clear();
      BoundsMap.clear();
      AbrtBB = 0;
      valid = true;

      if (!rootNode) {
        rootNode = getAnalysis<CallGraph>().getRoot();
        // No recursive functions for now.
        // In the future we may insert runtime checks for stack depth.
        for (scc_iterator<CallGraphNode*> SCCI = scc_begin(rootNode),
             E = scc_end(rootNode); SCCI != E; ++SCCI) {
          const std::vector<CallGraphNode*> &nextSCC = *SCCI;
          if (nextSCC.size() > 1 || SCCI.hasLoop()) {
            errs() << "INVALID: Recursion detected, callgraph SCC components: ";
            for (std::vector<CallGraphNode*>::const_iterator I = nextSCC.begin(),
                 E = nextSCC.end(); I != E; ++I) {
              Function *FF = (*I)->getFunction();
              if (FF) {
                errs() << FF->getName() << ", ";
                badFunctions.insert(FF);
              }
            }
            if (SCCI.hasLoop())
              errs() << "(self-loop)";
            errs() << "\n";
          }
          // we could also have recursion via function pointers, but we don't
          // allow calls to unknown functions, see runOnFunction() below
        }
      }

      BasicBlock::iterator It = F.getEntryBlock().begin();
      while (isa<AllocaInst>(It) || isa<PHINode>(It)) ++It;
      EP = &*It;

      TD = &getAnalysis<TargetData>();
      SE = &getAnalysis<ScalarEvolution>();
      PT = &getAnalysis<PointerTracking>();
      DT = &getAnalysis<DominatorTree>();

      std::vector<Instruction*> insns;

      BasicBlock *LastBB = 0;
      bool skip = false;
      for (inst_iterator I=inst_begin(F),E=inst_end(F); I != E;++I) {
        Instruction *II = &*I;
	if (II->getParent() != LastBB) {
	    LastBB = II->getParent();
	    skip = DT->getNode(LastBB) == 0;
	}
	if (skip)
	    continue;
        if (isa<LoadInst>(II) || isa<StoreInst>(II) || isa<MemIntrinsic>(II))
          insns.push_back(II);
        if (CallInst *CI = dyn_cast<CallInst>(II)) {
          Value *V = CI->getCalledValue()->stripPointerCasts();
          Function *F = dyn_cast<Function>(V);
          if (!F) {
            printLocation(CI, true);
            errs() << "Could not determine call target\n";
            valid = 0;
            continue;
          }
          if (!F->isDeclaration())
            continue;
          insns.push_back(CI);
        }
      }
      while (!insns.empty()) {
        Instruction *II = insns.back();
        insns.pop_back();
        DEBUG(dbgs() << "checking " << *II << "\n");
        if (LoadInst *LI = dyn_cast<LoadInst>(II)) {
          const Type *Ty = LI->getType();
          valid &= validateAccess(LI->getPointerOperand(),
                                  TD->getTypeAllocSize(Ty), LI);
        } else if (StoreInst *SI = dyn_cast<StoreInst>(II)) {
          const Type *Ty = SI->getOperand(0)->getType();
          valid &= validateAccess(SI->getPointerOperand(),
                                  TD->getTypeAllocSize(Ty), SI);
        } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
          valid &= validateAccess(MI->getDest(), MI->getLength(), MI);
          if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
            valid &= validateAccess(MTI->getSource(), MI->getLength(), MI);
          }
        } else if (CallInst *CI = dyn_cast<CallInst>(II)) {
          Value *V = CI->getCalledValue()->stripPointerCasts();
          Function *F = cast<Function>(V);
          const FunctionType *FTy = F->getFunctionType();
	  CallSite CS(CI);

          if (F->getName().equals("memcmp") && FTy->getNumParams() == 3) {
            valid &= validateAccess(CS.getArgument(0), CS.getArgument(2), CI);
            valid &= validateAccess(CS.getArgument(1), CS.getArgument(2), CI);
            continue;
          }
	  unsigned i;
#ifdef CLAMBC_COMPILER
	  i = 0;
#else
	  i = 1;// skip hidden ctx*
#endif
          for (;i<FTy->getNumParams();i++) {
            if (isa<PointerType>(FTy->getParamType(i))) {
              Value *Ptr = CS.getArgument(i);
              if (i+1 >= FTy->getNumParams()) {
                printLocation(CI, false);
                errs() << "Call to external function with pointer parameter last cannot be analyzed\n";
                errs() << *CI << "\n";
                valid = 0;
                break;
              }
              Value *Size = CS.getArgument(i+1);
              if (!Size->getType()->isIntegerTy()) {
                printLocation(CI, false);
                errs() << "Pointer argument must be followed by integer argument representing its size\n";
                errs() << *CI << "\n";
                valid = 0;
                break;
              }
              valid &= validateAccess(Ptr, Size, CI);
            }
          }
        }
      }
      if (badFunctions.count(&F))
        valid = 0;

      if (!valid) {
	DEBUG(F.dump());
        ClamBCModule::stop("Verification found errors!", &F);
	// replace function with call to abort
        std::vector<const Type*>args;
        FunctionType* abrtTy = FunctionType::get(
          Type::getVoidTy(F.getContext()),args,false);
        Constant *func_abort =
          F.getParent()->getOrInsertFunction("abort", abrtTy);

	BasicBlock *BB = &F.getEntryBlock();
	Instruction *I = &*BB->begin();
	Instruction *UI = new UnreachableInst(F.getContext(), I);
	CallInst *AbrtC = CallInst::Create(func_abort, "", UI);
        AbrtC->setCallingConv(CallingConv::C);
        AbrtC->setTailCall(true);
        AbrtC->setDoesNotReturn(true);
        AbrtC->setDoesNotThrow(true);
	// remove all instructions from entry
	BasicBlock::iterator BBI = I, BBE=BB->end();
	while (BBI != BBE) {
	    if (!BBI->use_empty())
		BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
	    BB->getInstList().erase(BBI++);
	}
      }
      return Changed;
    }
Exemplo n.º 30
0
bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
  for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
       J != JE; ++J) {
    if (CallInst *CI = dyn_cast<CallInst>(J)) {
      if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
        // Inline ASM is okay, unless it clobbers the ctr register.
        InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
        for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
          InlineAsm::ConstraintInfo &C = CIV[i];
          if (C.Type != InlineAsm::isInput)
            for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
              if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
                return true;
        }

        continue;
      }

      if (!TM)
        return true;
      const TargetLowering *TLI = TM->getTargetLowering();

      if (Function *F = CI->getCalledFunction()) {
        // Most intrinsics don't become function calls, but some might.
        // sin, cos, exp and log are always calls.
        unsigned Opcode;
        if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
          switch (F->getIntrinsicID()) {
          default: continue;

// VisualStudio defines setjmp as _setjmp
#if defined(_MSC_VER) && defined(setjmp) && \
                       !defined(setjmp_undefined_for_msvc)
#  pragma push_macro("setjmp")
#  undef setjmp
#  define setjmp_undefined_for_msvc
#endif

          case Intrinsic::setjmp:

#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
 // let's return it to _setjmp state
#  pragma pop_macro("setjmp")
#  undef setjmp_undefined_for_msvc
#endif

          case Intrinsic::longjmp:

          // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
          // because, although it does clobber the counter register, the
          // control can't then return to inside the loop unless there is also
          // an eh_sjlj_setjmp.
          case Intrinsic::eh_sjlj_setjmp:

          case Intrinsic::memcpy:
          case Intrinsic::memmove:
          case Intrinsic::memset:
          case Intrinsic::powi:
          case Intrinsic::log:
          case Intrinsic::log2:
          case Intrinsic::log10:
          case Intrinsic::exp:
          case Intrinsic::exp2:
          case Intrinsic::pow:
          case Intrinsic::sin:
          case Intrinsic::cos:
            return true;
          case Intrinsic::copysign:
            if (CI->getArgOperand(0)->getType()->getScalarType()->
                isPPC_FP128Ty())
              return true;
            else
              continue; // ISD::FCOPYSIGN is never a library call.
          case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
          case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
          case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
          case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
          case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
          case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
          case Intrinsic::round:     Opcode = ISD::FROUND;     break;
          }
        }

        // PowerPC does not use [US]DIVREM or other library calls for
        // operations on regular types which are not otherwise library calls
        // (i.e. soft float or atomics). If adapting for targets that do,
        // additional care is required here.

        LibFunc::Func Func;
        if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
            LibInfo->getLibFunc(F->getName(), Func) &&
            LibInfo->hasOptimizedCodeGen(Func)) {
          // Non-read-only functions are never treated as intrinsics.
          if (!CI->onlyReadsMemory())
            return true;

          // Conversion happens only for FP calls.
          if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
            return true;

          switch (Func) {
          default: return true;
          case LibFunc::copysign:
          case LibFunc::copysignf:
            continue; // ISD::FCOPYSIGN is never a library call.
          case LibFunc::copysignl:
            return true;
          case LibFunc::fabs:
          case LibFunc::fabsf:
          case LibFunc::fabsl:
            continue; // ISD::FABS is never a library call.
          case LibFunc::sqrt:
          case LibFunc::sqrtf:
          case LibFunc::sqrtl:
            Opcode = ISD::FSQRT; break;
          case LibFunc::floor:
          case LibFunc::floorf:
          case LibFunc::floorl:
            Opcode = ISD::FFLOOR; break;
          case LibFunc::nearbyint:
          case LibFunc::nearbyintf:
          case LibFunc::nearbyintl:
            Opcode = ISD::FNEARBYINT; break;
          case LibFunc::ceil:
          case LibFunc::ceilf:
          case LibFunc::ceill:
            Opcode = ISD::FCEIL; break;
          case LibFunc::rint:
          case LibFunc::rintf:
          case LibFunc::rintl:
            Opcode = ISD::FRINT; break;
          case LibFunc::round:
          case LibFunc::roundf:
          case LibFunc::roundl:
            Opcode = ISD::FROUND; break;
          case LibFunc::trunc:
          case LibFunc::truncf:
          case LibFunc::truncl:
            Opcode = ISD::FTRUNC; break;
          }

          MVT VTy =
            TLI->getSimpleValueType(CI->getArgOperand(0)->getType(), true);
          if (VTy == MVT::Other)
            return true;
          
          if (TLI->isOperationLegalOrCustom(Opcode, VTy))
            continue;
          else if (VTy.isVector() &&
                   TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType()))
            continue;

          return true;
        }
      }

      return true;
    } else if (isa<BinaryOperator>(J) &&
               J->getType()->getScalarType()->isPPC_FP128Ty()) {
      // Most operations on ppc_f128 values become calls.
      return true;
    } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
               isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
      CastInst *CI = cast<CastInst>(J);
      if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
          CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
          (TT.isArch32Bit() &&
           (CI->getSrcTy()->getScalarType()->isIntegerTy(64) ||
            CI->getDestTy()->getScalarType()->isIntegerTy(64))
          ))
        return true;
    } else if (TT.isArch32Bit() &&
               J->getType()->getScalarType()->isIntegerTy(64) &&
               (J->getOpcode() == Instruction::UDiv ||
                J->getOpcode() == Instruction::SDiv ||
                J->getOpcode() == Instruction::URem ||
                J->getOpcode() == Instruction::SRem)) {
      return true;
    } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
      // On PowerPC, indirect jumps use the counter register.
      return true;
    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
      if (!TM)
        return true;
      const TargetLowering *TLI = TM->getTargetLowering();

      if (TLI->supportJumpTables() &&
          SI->getNumCases()+1 >= (unsigned) TLI->getMinimumJumpTableEntries())
        return true;
    }
  }

  return false;
}