Esempio n. 1
0
 CallSite* CallSite::empty(STATE, Symbol* name, Executable* executable, int ip) {
   CallSite* cache =
     state->new_object_dirty<CallSite>(G(call_site));
   cache->name_ = name;
   cache->executor_ = empty_cache;
   cache->fallback_ = empty_cache;
   cache->updater_  = empty_cache_updater;
   cache->executable(state, executable);
   cache->ip_ = ip;
   return cache;
 }
Esempio n. 2
0
void EmitMemSet(IRBuilder<> &B, Value *Dst, Value *Val, Value *Len,
                const Analysis &A) {
  Dst = B.CreateBitCast(Dst, PointerType::getUnqual(B.getInt8Ty()));

  CallSite CS =
      B.CreateMemSet(Dst, Val, Len, 1 /*Align*/, false /*isVolatile*/);
  if (A.CGNode) {
    A.CGNode->addCalledFunction(
        CS, A.CG->getOrInsertFunction(CS.getCalledFunction()));
  }
}
Esempio n. 3
0
void MemoryInstrumenter::instrumentFork(const CallSite &CS) {
    Instruction *Ins = CS.getInstruction();
    assert(!Ins->isTerminator());
    Function *Callee = CS.getCalledFunction();
    StringRef CalleeName = Callee->getName();
    assert(CalleeName == "fork" || CalleeName == "vfork");

    BasicBlock::iterator Loc = Ins;
    CallInst::Create(BeforeForkHook, "", Loc);
    ++Loc;
    CallInst::Create(AfterForkHook, Ins, "", Loc);
}
Esempio n. 4
0
// Calculate the state a call-site is in.
int WinEHStatePass::getStateForCallSite(
    DenseMap<BasicBlock *, ColorVector> &BlockColors, WinEHFuncInfo &FuncInfo,
    CallSite CS) {
  if (auto *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
    // Look up the state number of the EH pad this unwinds to.
    assert(FuncInfo.InvokeStateMap.count(II) && "invoke has no state!");
    return FuncInfo.InvokeStateMap[II];
  }
  // Possibly throwing call instructions have no actions to take after
  // an unwind. Ensure they are in the -1 state.
  return getBaseStateForBB(BlockColors, FuncInfo, CS.getParent());
}
// Check if it is ok to perform this promotion.
bool SRETPromotion::isSafeToUpdateAllCallers(Function *F) {

  if (F->use_empty())
    // No users. OK to modify signature.
    return true;

  for (Value::use_iterator FnUseI = F->use_begin(), FnUseE = F->use_end();
       FnUseI != FnUseE; ++FnUseI) {
    // The function is passed in as an argument to (possibly) another function,
    // we can't change it!
    CallSite CS = CallSite::get(*FnUseI);
    Instruction *Call = CS.getInstruction();
    // The function is used by something else than a call or invoke instruction,
    // we can't change it!
    if (!Call || !CS.isCallee(FnUseI))
      return false;
    CallSite::arg_iterator AI = CS.arg_begin();
    Value *FirstArg = *AI;

    if (!isa<AllocaInst>(FirstArg))
      return false;

    // Check FirstArg's users.
    for (Value::use_iterator ArgI = FirstArg->use_begin(), 
           ArgE = FirstArg->use_end(); ArgI != ArgE; ++ArgI) {

      // If FirstArg user is a CallInst that does not correspond to current
      // call site then this function F is not suitable for sret promotion.
      if (CallInst *CI = dyn_cast<CallInst>(ArgI)) {
        if (CI != Call)
          return false;
      }
      // If FirstArg user is a GEP whose all users are not LoadInst then
      // this function F is not suitable for sret promotion.
      else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(ArgI)) {
        // TODO : Use dom info and insert PHINodes to collect get results
        // from multiple call sites for this GEP.
        if (GEP->getParent() != Call->getParent())
          return false;
        for (Value::use_iterator GEPI = GEP->use_begin(), GEPE = GEP->use_end();
             GEPI != GEPE; ++GEPI) 
          if (!isa<LoadInst>(GEPI))
            return false;
      } 
      // Any other FirstArg users make this function unsuitable for sret 
      // promotion.
      else
        return false;
    }
  }

  return true;
}
Esempio n. 6
0
/// \brief Simplify arguments going into a particular callsite.
///
/// This is important to do each time we add a callsite due to inlining so that
/// constants and other entities which feed into inline cost estimation are
/// properly recognized when analyzing the new callsite. Consider:
///   void outer(int x) {
///     if (x < 42)
///       return inner(42 - x);
///     ...
///   }
///   void inner(int x) {
///     ...
///   }
///
/// The inliner gives calls to 'outer' with a constant argument a bonus because
/// it will delete one side of a branch. But the resulting call to 'inner'
/// will, after inlining, also have a constant operand. We need to do just
/// enough constant folding to expose this for callsite arguments. The rest
/// will be taken care of after the inliner finishes running.
static void simplifyCallSiteArguments(const TargetData *TD, CallSite CS) {
  // FIXME: It would be nice to avoid this smallvector if RAUW doesn't
  // invalidate operand iterators in any cases.
  SmallVector<std::pair<Value *, Value*>, 4> SimplifiedArgs;
  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
       I != E; ++I)
    if (Instruction *Inst = dyn_cast<Instruction>(*I))
      if (Value *SimpleArg = SimplifyInstruction(Inst, TD))
        SimplifiedArgs.push_back(std::make_pair(Inst, SimpleArg));
  for (unsigned Idx = 0, Size = SimplifiedArgs.size(); Idx != Size; ++Idx)
    SimplifiedArgs[Idx].first->replaceAllUsesWith(SimplifiedArgs[Idx].second);
}
Esempio n. 7
0
/// \brief Get the inline cost for the always-inliner.
///
/// The always inliner *only* handles functions which are marked with the
/// attribute to force inlining. As such, it is dramatically simpler and avoids
/// using the powerful (but expensive) inline cost analysis. Instead it uses
/// a very simple and boring direct walk of the instructions looking for
/// impossible-to-inline constructs.
///
/// Note, it would be possible to go to some lengths to cache the information
/// computed here, but as we only expect to do this for relatively few and
/// small functions which have the explicit attribute to force inlining, it is
/// likely not worth it in practice.
InlineCost AlwaysInlinerLegacyPass::getInlineCost(CallSite CS) {
  Function *Callee = CS.getCalledFunction();

  // Only inline direct calls to functions with always-inline attributes
  // that are viable for inlining. FIXME: We shouldn't even get here for
  // declarations.
  if (Callee && !Callee->isDeclaration() &&
      CS.hasFnAttr(Attribute::AlwaysInline) && isInlineViable(*Callee))
    return InlineCost::getAlways();

  return InlineCost::getNever();
}
Esempio n. 8
0
extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
                                                      unsigned index,
                                                      uint64_t bytes)
{
  CallSite Call = CallSite(unwrap<Instruction>(Instr));
  AttrBuilder B;
  B.addDereferenceableAttr(bytes);
  Call.setAttributes(
    Call.getAttributes().addAttributes(Call->getContext(), index,
                                       AttributeSet::get(Call->getContext(),
                                                         index, B)));
}
Esempio n. 9
0
bool WinEHStatePass::isStateStoreNeeded(EHPersonality Personality,
                                        CallSite CS) {
  if (!CS)
    return false;

  // If the function touches memory, it needs a state store.
  if (isAsynchronousEHPersonality(Personality))
    return !CS.doesNotAccessMemory();

  // If the function throws, it needs a state store.
  return !CS.doesNotThrow();
}
Esempio n. 10
0
/// visitStrdupCall - Handle strdup().
///
void FuncTransform::visitStrdupCall(CallSite CS) {
  assert(CS.arg_end()-CS.arg_begin() == 1 && "strdup takes one argument!");
  Instruction *I = CS.getInstruction();
  assert (getDSNodeHFor(I).getNode() && "strdup has NULL DSNode!\n");
  Value *PH = getPoolHandle(I);

  Type* Int8Type = Type::getInt8Ty(CS.getInstruction()->getContext());


#if 0
  assert (PH && "PH for strdup is null!\n");
#else
  if (!PH) {
    errs() << "strdup: NoPH\n";
    return;
  }
#endif
  Value *OldPtr = CS.getArgument(0);

  static Type *VoidPtrTy = PointerType::getUnqual(Int8Type);
  if (OldPtr->getType() != VoidPtrTy)
    OldPtr = CastInst::CreatePointerCast(OldPtr, VoidPtrTy, OldPtr->getName(), I);

  std::string Name = I->getName(); I->setName("");
  Value* Opts[3] = {PH, OldPtr, 0};
  Instruction *V = CallInst::Create(PAInfo.PoolStrdup, Opts, Name, I);
  Instruction *Casted = V;
  if (V->getType() != I->getType())
    Casted = CastInst::CreatePointerCast(V, I->getType(), V->getName(), I);

  // Update def-use info
  I->replaceAllUsesWith(Casted);

  // If we are modifying the original function, update the DSGraph.
  if (!FI.Clone) {
    // V and Casted now point to whatever the original allocation did.
    G->getScalarMap().replaceScalar(I, V);
    if (V != Casted)
      G->getScalarMap()[Casted] = G->getScalarMap()[V];
  } else {             // Otherwise, update the NewToOldValueMap
    UpdateNewToOldValueMap(I, V, V != Casted ? Casted : 0);
  }

  // If this was an invoke, fix up the CFG.
  if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
    BranchInst::Create (II->getNormalDest(), I);
    II->getUnwindDest()->removePredecessor(II->getParent(), true);
  }

  // Remove old allocation instruction.
  I->eraseFromParent();
}
Esempio n. 11
0
// look for a very simple pattern
//    coro.save
//    no other calls
//    resume or destroy call
//    coro.suspend
//
// If there are other calls between coro.save and coro.suspend, they can
// potentially resume or destroy the coroutine, so it is unsafe to eliminate a
// suspend point.
static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
                                 CoroBeginInst *CoroBegin) {
  auto *Save = Suspend->getCoroSave();
  auto *BB = Suspend->getParent();
  if (BB != Save->getParent())
    return false;

  CallSite SingleCallSite;

  // Check that we have only one CallSite.
  for (Instruction *I = Save->getNextNode(); I != Suspend;
       I = I->getNextNode()) {
    if (isa<CoroFrameInst>(I))
      continue;
    if (isa<CoroSubFnInst>(I))
      continue;
    if (CallSite CS = CallSite(I)) {
      if (SingleCallSite)
        return false;
      else
        SingleCallSite = CS;
    }
  }
  auto *CallInstr = SingleCallSite.getInstruction();
  if (!CallInstr)
    return false;

  auto *Callee = SingleCallSite.getCalledValue()->stripPointerCasts();

  // See if the callsite is for resumption or destruction of the coroutine.
  auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
  if (!SubFn)
    return false;

  // Does not refer to the current coroutine, we cannot do anything with it.
  if (SubFn->getFrame() != CoroBegin)
    return false;

  // Replace llvm.coro.suspend with the value that results in resumption over
  // the resume or cleanup path.
  Suspend->replaceAllUsesWith(SubFn->getRawIndex());
  Suspend->eraseFromParent();
  Save->eraseFromParent();

  // No longer need a call to coro.resume or coro.destroy.
  CallInstr->eraseFromParent();

  if (SubFn->user_empty())
    SubFn->eraseFromParent();

  return true;
}
Esempio n. 12
0
/// replaceCallEdge - This method replaces the edge in the node for the
/// specified call site with a new one.  Note that this method takes linear
/// time, so it should be used sparingly.
void CallGraphNode::replaceCallEdge(CallSite CS,
                                    CallSite NewCS, CallGraphNode *NewNode){
  for (CalledFunctionsVector::iterator I = CalledFunctions.begin(); ; ++I) {
    assert(I != CalledFunctions.end() && "Cannot find callsite to remove!");
    if (I->first == CS.getInstruction()) {
      I->second->DropRef();
      I->first = NewCS.getInstruction();
      I->second = NewNode;
      NewNode->AddRef();
      return;
    }
  }
}
Esempio n. 13
0
static bool needsStatepoint(const CallSite &CS) {
  if (callsGCLeafFunction(CS))
    return false;
  if (CS.isCall()) {
    CallInst *call = cast<CallInst>(CS.getInstruction());
    if (call->isInlineAsm())
      return false;
  }
  if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) {
    return false;
  }
  return true;
}
Esempio n. 14
0
FunctionList DSNodeEquivs::getCallees(CallSite &CS) {
  const Function *CalledFunc = CS.getCalledFunction();

  // If the called function is casted from one function type to another, peer
  // into the cast instruction and pull out the actual function being called.
  if (ConstantExpr *CExpr = dyn_cast<ConstantExpr>(CS.getCalledValue())) {
    if (CExpr->getOpcode() == Instruction::BitCast &&
        isa<Function>(CExpr->getOperand(0)))
      CalledFunc = cast<Function>(CExpr->getOperand(0));
  }

  FunctionList Callees;

  // Direct calls are simple.
  if (CalledFunc) {
    Callees.push_back(CalledFunc);
    return Callees;
  }

  // Okay, indirect call.
  // Ask the DSCallGraph what this calls...

  TDDataStructures &TDDS = getAnalysis<TDDataStructures>();
  const DSCallGraph &DSCG = TDDS.getCallGraph();

  DSCallGraph::callee_iterator CalleeIt = DSCG.callee_begin(CS);
  DSCallGraph::callee_iterator CalleeItEnd = DSCG.callee_end(CS);
  for (; CalleeIt != CalleeItEnd; ++CalleeIt)
    Callees.push_back(*CalleeIt);

  // If the callgraph doesn't give us what we want, query the DSGraph
  // ourselves.
  if (Callees.empty()) {
    Instruction *Inst = CS.getInstruction();
    Function *Parent = Inst->getParent()->getParent();
    Value *CalledValue = CS.getCalledValue();
    DSNodeHandle &NH = TDDS.getDSGraph(*Parent)->getNodeForValue(CalledValue);

    if (!NH.isNull()) {
      DSNode *Node = NH.getNode();
      Node->addFullFunctionList(Callees);
    }
  }

  // For debugging, dump out the callsites we are unable to get callees for.
  DEBUG(
  if (Callees.empty()) {
    errs() << "Failed to get callees for callsite:\n";
    CS.getInstruction()->dump();
  });
Esempio n. 15
0
/*!
 * Issue a warning if the function which has indirect call sites can not be reached from program entry.
 */
void PTACallGraph::vefityCallGraph()
{
    CallEdgeMap::const_iterator it = indirectCallMap.begin();
    CallEdgeMap::const_iterator eit = indirectCallMap.end();
    for (; it != eit; ++it) {
        const FunctionSet& targets = it->second;
        if (targets.empty() == false) {
            CallSite cs = it->first;
            const Function* func = cs.getInstruction()->getParent()->getParent();
            if (getCallGraphNode(func)->isReachableFromProgEntry() == false)
                wrnMsg(func->getName().str() + " has indirect call site but not reachable from main");
        }
    }
}
Esempio n. 16
0
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
  const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
                                               TLI);
  if (!FnData)
    return unknown();

  // handle strdup-like functions separately
  if (FnData->AllocTy == StrDupLike) {
    APInt Size(IntTyBits, GetStringLength(CS.getArgument(0)));
    if (!Size)
      return unknown();

    // strndup limits strlen
    if (FnData->FstParam > 0) {
      ConstantInt *Arg= dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
      if (!Arg)
        return unknown();

      APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
      if (Size.ugt(MaxSize))
        Size = MaxSize + 1;
    }
    return std::make_pair(Size, Zero);
  }

  ConstantInt *Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam));
  if (!Arg)
    return unknown();

  APInt Size = Arg->getValue().zextOrSelf(IntTyBits);
  // size determined by just 1 parameter
  if (FnData->SndParam < 0)
    return std::make_pair(Size, Zero);

  Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->SndParam));
  if (!Arg)
    return unknown();

  Size *= Arg->getValue().zextOrSelf(IntTyBits);
  return std::make_pair(Size, Zero);

  // TODO: handle more standard functions (+ wchar cousins):
  // - strdup / strndup
  // - strcpy / strncpy
  // - strcat / strncat
  // - memcpy / memmove
  // - strcat / strncat
  // - memset
}
/*
 * Verify if a given value has references after a call site.
 */
bool DeadStoreEliminationPass::isRefAfterCallSite(Value* v, CallSite &CS) {
  BasicBlock* CSBB = CS.getInstruction()->getParent();

  // Collect basic blocks to inspect
  std::vector<BasicBlock*> BBToInspect;
  std::set<BasicBlock*> BBToInspectSet;
  BBToInspect.push_back(CSBB);
  BBToInspectSet.insert(CSBB);
  for (unsigned int i = 0; i < BBToInspect.size(); ++i) {
    BasicBlock* BB = BBToInspect.at(i);
    TerminatorInst* terminator = BB->getTerminator();
    if (terminator && terminator->getNumSuccessors() > 0) {
      unsigned numSuccessors = terminator->getNumSuccessors();
      for (unsigned i = 0; i < numSuccessors; ++i) {
        // Collect successors
        BasicBlock* successor = terminator->getSuccessor(i);
        if (!BBToInspectSet.count(successor)) {
          BBToInspect.push_back(successor);
          BBToInspectSet.insert(successor);
        }
      }
    }
  }

  // Inspect if any instruction after CS references v
  AliasAnalysis::Location loc(v, getPointerSize(v, *AA), NULL);
  for (unsigned int i = 0; i < BBToInspect.size(); ++i) {
    BasicBlock* BB = BBToInspect.at(i);
    BasicBlock::iterator I = BB->begin();
    if (BB == CSBB) {
      Instruction* callInst = CS.getInstruction();
      Instruction* inst;
      do {
        inst = I;
        ++I;
      } while (inst != callInst);
    }
    for (BasicBlock::iterator IE = BB->end(); I != IE; ++I) {
      Instruction* inst = I;
      DEBUG(errs() << "Verifying if instruction " << *inst << " refs " << *v << ": ");
      AliasAnalysis::ModRefResult mrf = AA->getModRefInfo(inst, loc);
      DEBUG(errs() << mrf << "\n");
      if (mrf == AliasAnalysis::Ref || mrf == AliasAnalysis::ModRef) {
        return true;
      }
    }
  }
  return false;
}
Esempio n. 18
0
static bool isCondRelevantToAnyCallArgument(ICmpInst *Cmp, CallSite CS) {
  assert(isa<Constant>(Cmp->getOperand(1)) && "Expected a constant operand.");
  Value *Op0 = Cmp->getOperand(0);
  unsigned ArgNo = 0;
  for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E;
       ++I, ++ArgNo) {
    // Don't consider constant or arguments that are already known non-null.
    if (isa<Constant>(*I) || CS.paramHasAttr(ArgNo, Attribute::NonNull))
      continue;

    if (*I == Op0)
      return true;
  }
  return false;
}
// Checks to see if a given CallSite is making an indirect call, including
// cases where the indirect call is made through a bitcast.
static bool isIndirectCall(CallSite &CS) {
  if (CS.getCalledFunction())
    return false;

  // Check the value to see if it is merely a bitcast of a function. In
  // this case, it will translate to a direct function call in the resulting
  // assembly, so we won't treat it as an indirect call here.
  const Value *V = CS.getCalledValue();
  if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
    return !(CE->isCast() && isa<Function>(CE->getOperand(0)));
  }

  // Otherwise, since we know it's a call, it must be an indirect call
  return true;
}
Esempio n. 20
0
void Preparer::expandMalloc(CallSite CS) {
  Function *Callee = CS.getCalledFunction();
  assert(Callee);
  StringRef CalleeName = Callee->getName();
  if (CalleeName == "malloc" || CalleeName == "valloc") {
    Value *Size = CS.getArgument(0);
    Value *ExpandedSize = BinaryOperator::Create(
        Instruction::Add,
        Size,
        ConstantInt::get(cast<IntegerType>(Size->getType()), 1),
        "expanded.size",
        CS.getInstruction());
    CS.setArgument(0, ExpandedSize);
  }
}
Esempio n. 21
0
InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
                                             int Threshold) {
  // Cannot inline indirect calls.
  if (!Callee)
    return llvm::InlineCost::getNever();

  // Calls to functions with always-inline attributes should be inlined
  // whenever possible.
  if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
                                           Attribute::AlwaysInline)) {
    if (isInlineViable(*Callee))
      return llvm::InlineCost::getAlways();
    return llvm::InlineCost::getNever();
  }

  // Don't inline functions which can be redefined at link-time to mean
  // something else.  Don't inline functions marked noinline or call sites
  // marked noinline.
  if (Callee->mayBeOverridden() ||
      Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
                                           Attribute::NoInline) ||
      CS.isNoInline())
    return llvm::InlineCost::getNever();

  //DUETTO: Do not inline server/client methods called from the other side
  const Function* caller=CS.getCaller();
  if((caller->hasFnAttribute(Attribute::Client) && Callee->hasFnAttribute(Attribute::Server)) ||
     (caller->hasFnAttribute(Attribute::Server) && Callee->hasFnAttribute(Attribute::Client)))
  {
    return llvm::InlineCost::getNever();
  }

  DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
        << "...\n");

  CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
  bool ShouldInline = CA.analyzeCall(CS);

  DEBUG(CA.dump());

  // Check if there was a reason to force inlining or no inlining.
  if (!ShouldInline && CA.getCost() < CA.getThreshold())
    return InlineCost::getNever();
  if (ShouldInline && CA.getCost() >= CA.getThreshold())
    return InlineCost::getAlways();

  return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
}
Esempio n. 22
0
static bool canSplitCallSite(CallSite CS, TargetTransformInfo &TTI) {
  // FIXME: As of now we handle only CallInst. InvokeInst could be handled
  // without too much effort.
  Instruction *Instr = CS.getInstruction();
  if (!isa<CallInst>(Instr))
    return false;

  BasicBlock *CallSiteBB = Instr->getParent();
  // Allow splitting a call-site only when the CodeSize cost of the
  // instructions before the call is less then DuplicationThreshold. The
  // instructions before the call will be duplicated in the split blocks and
  // corresponding uses will be updated.
  unsigned Cost = 0;
  for (auto &InstBeforeCall :
       llvm::make_range(CallSiteBB->begin(), Instr->getIterator())) {
    Cost += TTI.getInstructionCost(&InstBeforeCall,
                                   TargetTransformInfo::TCK_CodeSize);
    if (Cost >= DuplicationThreshold)
      return false;
  }

  // Need 2 predecessors and cannot split an edge from an IndirectBrInst.
  SmallVector<BasicBlock *, 2> Preds(predecessors(CallSiteBB));
  if (Preds.size() != 2 || isa<IndirectBrInst>(Preds[0]->getTerminator()) ||
      isa<IndirectBrInst>(Preds[1]->getTerminator()))
    return false;

  return CallSiteBB->canSplitPredecessors();
}
/*
 * Check if a given instruction has its address taken.
 */
bool DeadStoreEliminationPass::hasAddressTaken(const Instruction *AI, CallSite& CS) {
  const Instruction* callInst = CS.getInstruction();
  for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
      UI != UE; ++UI) {
    const User *U = *UI;
    if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
      if (AI == SI->getValueOperand())
        return true;
    } else if (const PtrToIntInst *SI = dyn_cast<PtrToIntInst>(U)) {
      if (AI == SI->getOperand(0))
        return true;
    } else if (isa<CallInst>(U) && dyn_cast<Instruction>(U) != callInst) {
      return true;
    } else if (isa<InvokeInst>(U) && dyn_cast<Instruction>(U) != callInst) {
      return true;
    } else if (const SelectInst *SI = dyn_cast<SelectInst>(U)) {
      if (hasAddressTaken(SI, CS))
        return true;
    } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
      // Keep track of what PHI nodes we have already visited to ensure
      // they are only visited once.
      if (VisitedPHIs.insert(PN))
        if (hasAddressTaken(PN, CS))
          return true;
    } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
      if (hasAddressTaken(GEP, CS))
        return true;
    } else if (const BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
      if (hasAddressTaken(BI, CS))
        return true;
    }
  }
  return false;
}
void OptimizeReturned::visitCallSite(CallSite CS) {
  for (unsigned i = 0, e = CS.getNumArgOperands(); i < e; ++i)
    if (CS.paramHasAttr(1 + i, Attribute::Returned)) {
      Instruction *Inst = CS.getInstruction();
      Value *Arg = CS.getArgOperand(i);
      // Ignore constants, globals, undef, etc.
      if (isa<Constant>(Arg))
        continue;
      // Like replaceDominatedUsesWith but using Instruction/Use dominance.
      for (auto UI = Arg->use_begin(), UE = Arg->use_end(); UI != UE;) {
        Use &U = *UI++;
        if (DT->dominates(Inst, U))
          U.set(Inst);
      }
    }
}
Esempio n. 25
0
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
                                             int Threshold) {
  // Don't inline functions which can be redefined at link-time to mean
  // something else.  Don't inline functions marked noinline or call sites
  // marked noinline.
  if (!Callee || Callee->mayBeOverridden() ||
      Callee->getFnAttributes().hasAttribute(Attributes::NoInline) ||
      CS.isNoInline())
    return llvm::InlineCost::getNever();

  DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
        << "...\n");

  CallAnalyzer CA(TD, *Callee, Threshold);
  bool ShouldInline = CA.analyzeCall(CS);

  DEBUG(CA.dump());

  // Check if there was a reason to force inlining or no inlining.
  if (!ShouldInline && CA.getCost() < CA.getThreshold())
    return InlineCost::getNever();
  if (ShouldInline && (CA.isAlwaysInline() ||
                       CA.getCost() >= CA.getThreshold()))
    return InlineCost::getAlways();

  return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
}
Esempio n. 26
0
InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
                                             int Threshold) {
  // Cannot inline indirect calls.
  if (!Callee)
    return llvm::InlineCost::getNever();

  // Calls to functions with always-inline attributes should be inlined
  // whenever possible.
  if (CS.hasFnAttr(Attribute::AlwaysInline)) {
    if (isInlineViable(*Callee))
      return llvm::InlineCost::getAlways();
    return llvm::InlineCost::getNever();
  }

  // Never inline functions with conflicting attributes (unless callee has
  // always-inline attribute).
  if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee))
    return llvm::InlineCost::getNever();

  // Don't inline this call if the caller has the optnone attribute.
  if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
    return llvm::InlineCost::getNever();

  // Don't inline functions which can be redefined at link-time to mean
  // something else.  Don't inline functions marked noinline or call sites
  // marked noinline.
  if (Callee->mayBeOverridden() ||
      Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline())
    return llvm::InlineCost::getNever();

  DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
        << "...\n");

  CallAnalyzer CA(Callee->getDataLayout(), TTIWP->getTTI(*Callee),
                  ACT->getAssumptionCache(*Callee), *Callee, Threshold);
  bool ShouldInline = CA.analyzeCall(CS);

  DEBUG(CA.dump());

  // Check if there was a reason to force inlining or no inlining.
  if (!ShouldInline && CA.getCost() < CA.getThreshold())
    return InlineCost::getNever();
  if (ShouldInline && CA.getCost() >= CA.getThreshold())
    return InlineCost::getAlways();

  return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
}
Esempio n. 27
0
int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) {
  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];

  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee, TD);

  bool isDirectCall = CS.getCalledFunction() == Callee;
  Instruction *TheCall = CS.getInstruction();
  int Bonus = 0;

  // If there is only one call of the function, and it has internal linkage,
  // make it almost guaranteed to be inlined.
  //
  if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
    Bonus += InlineConstants::LastCallToStaticBonus;

  // If the instruction after the call, or if the normal destination of the
  // invoke is an unreachable instruction, the function is noreturn.  As such,
  // there is little point in inlining this.
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
    if (isa<UnreachableInst>(II->getNormalDest()->begin()))
      Bonus += InlineConstants::NoreturnPenalty;
  } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
    Bonus += InlineConstants::NoreturnPenalty;

  // If this function uses the coldcc calling convention, prefer not to inline
  // it.
  if (Callee->getCallingConv() == CallingConv::Cold)
    Bonus += InlineConstants::ColdccPenalty;

  // Add to the inline quality for properties that make the call valuable to
  // inline.  This includes factors that indicate that the result of inlining
  // the function will be optimizable.  Currently this just looks at arguments
  // passed into the function.
  //
  CallSite::arg_iterator I = CS.arg_begin();
  for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
       FI != FE; ++I, ++FI)
    // Compute any constant bonus due to inlining we want to give here.
    if (isa<Constant>(I))
      Bonus += CountBonusForConstant(FI, cast<Constant>(I));

  return Bonus;
}
Esempio n. 28
0
/// OptimizeInlineAsmInst - If there are any memory operands, use
/// OptimizeMemoryInst to sink their address computing into the block when
/// possible / profitable.
bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
                                           DenseMap<Value*,Value*> &SunkAddrs) {
  bool MadeChange = false;
  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());

  // Do a prepass over the constraints, canonicalizing them, and building up the
  // ConstraintOperands list.
  std::vector<InlineAsm::ConstraintInfo>
    ConstraintInfos = IA->ParseConstraints();

  /// ConstraintOperands - Information about all of the constraints.
  std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands;
  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
    ConstraintOperands.
      push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i]));
    TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back();

    // Compute the value type for each operand.
    switch (OpInfo.Type) {
    case InlineAsm::isOutput:
      if (OpInfo.isIndirect)
        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
      break;
    case InlineAsm::isInput:
      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
      break;
    case InlineAsm::isClobber:
      // Nothing to do.
      break;
    }

    // Compute the constraint code and ConstraintType to use.
    TLI->ComputeConstraintToUse(OpInfo, SDValue(),
                             OpInfo.ConstraintType == TargetLowering::C_Memory);

    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
        OpInfo.isIndirect) {
      Value *OpVal = OpInfo.CallOperandVal;
      MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs);
    }
  }

  return MadeChange;
}
Esempio n. 29
0
/// Infer nonnull attributes for the arguments at the specified callsite.
static bool processCallSite(CallSite CS, LazyValueInfo *LVI) {
  SmallVector<unsigned, 4> Indices;
  unsigned ArgNo = 0;

  for (Value *V : CS.args()) {
    PointerType *Type = dyn_cast<PointerType>(V->getType());
    // Try to mark pointer typed parameters as non-null.  We skip the
    // relatively expensive analysis for constants which are obviously either
    // null or non-null to start with.
    if (Type && !CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
        !isa<Constant>(V) && 
        LVI->getPredicateAt(ICmpInst::ICMP_EQ, V,
                            ConstantPointerNull::get(Type),
                            CS.getInstruction()) == LazyValueInfo::False)
      Indices.push_back(ArgNo + 1);
    ArgNo++;
  }

  assert(ArgNo == CS.arg_size() && "sanity check");

  if (Indices.empty())
    return false;

  AttributeSet AS = CS.getAttributes();
  LLVMContext &Ctx = CS.getInstruction()->getContext();
  AS = AS.addAttribute(Ctx, Indices, Attribute::get(Ctx, Attribute::NonNull));
  CS.setAttributes(AS);

  return true;
}
Esempio n. 30
0
/// AllCalleesPassInValidPointerForArgument - Return true if we can prove that
/// all callees pass in a valid pointer for the specified function argument.
static bool AllCalleesPassInValidPointerForArgument(Argument *Arg) {
  Function *Callee = Arg->getParent();

  unsigned ArgNo = std::distance(Callee->arg_begin(),
                                 Function::arg_iterator(Arg));

  // Look at all call sites of the function.  At this pointer we know we only
  // have direct callees.
  for (Value::use_iterator UI = Callee->use_begin(), E = Callee->use_end();
       UI != E; ++UI) {
    CallSite CS = CallSite::get(*UI);
    assert(CS.getInstruction() && "Should only have direct calls!");

    if (!IsAlwaysValidPointer(CS.getArgument(ArgNo)))
      return false;
  }
  return true;
}