示例#1
0
// getSpecializationBonus - The heuristic used to determine the per-call
// performance boost for using a specialization of Callee with argument
// specializedArgNo replaced by a constant.
int InlineCostAnalyzer::getSpecializationBonus(Function *Callee,
         SmallVectorImpl<unsigned> &SpecializedArgNos)
{
  if (Callee->mayBeOverridden())
    return 0;
  
  int Bonus = 0;
  // If this function uses the coldcc calling convention, prefer not to
  // specialize it.
  if (Callee->getCallingConv() == CallingConv::Cold)
    Bonus -= InlineConstants::ColdccPenalty;
  
  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
  
  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee);

  unsigned ArgNo = 0;
  unsigned i = 0;
  for (Function::arg_iterator I = Callee->arg_begin(), E = Callee->arg_end();
       I != E; ++I, ++ArgNo)
    if (ArgNo == SpecializedArgNos[i]) {
      ++i;
      Bonus += CountBonusForConstant(I);
    }

  // Calls usually take a long time, so they make the specialization gain 
  // smaller.
  Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;

  return Bonus;
}
示例#2
0
// getSpecializationBonus - The heuristic used to determine the per-call
// performance boost for using a specialization of Callee with argument
// specializedArgNo replaced by a constant.
int InlineCostAnalyzer::getSpecializationBonus(Function *Callee,
         SmallVectorImpl<unsigned> &SpecializedArgNos)
{
  if (Callee->mayBeOverridden())
    return 0;
  
  int Bonus = 0;
  // If this function uses the coldcc calling convention, prefer not to
  // specialize it.
  if (Callee->getCallingConv() == CallingConv::Cold)
    Bonus -= InlineConstants::ColdccPenalty;
  
  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
  
  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee);


  for (unsigned i = 0, s = SpecializedArgNos.size();
       i < s; ++i )
  {
    Bonus += CalleeFI->ArgumentWeights[SpecializedArgNos[i]].ConstantBonus;
  }
  // Calls usually take a long time, so they make the specialization gain 
  // smaller.
  Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;

  return Bonus;
}
示例#3
0
// Output for a pure analysis pass should happen in the print method.
// It is called automatically after the analysis pass has finished collecting
// its information.
void
WeightedCallGraphPass::print(raw_ostream &out, const Module *m) const {
  auto &cgPass = getAnalysis<CallGraphPass>();

  
  // Print out all functions
  for ( auto &kvPair : cgPass.funcs ) {
    FunctionInfo fi = kvPair.second;
    out << fi.getFunction()->getName() << "," << fi.weight;

    unsigned siteID = 0;
    for ( auto ci : fi.directCalls ) {
      out << "," << siteID << "," << ci.filename << "," << ci.lineNum;
      ++siteID;
    }
    out << "\n";
  }
  

  // Separate functions and edges by a blank line
  out << "\n";

  for ( auto &kvPair : cgPass.funcs ) {
    FunctionInfo fi = kvPair.second;
    for ( auto ci : fi.directCalls ) {
      out << fi.getFunction()->getName() << "," << ci.callSiteNum << 
          "," << ci.getFunction()->getName() << "\n";
    }
  }
}
示例#4
0
void GenericFunctionEffectAnalysis<FunctionEffects>::analyzeCall(
    FunctionInfo *functionInfo, FullApplySite fullApply,
    FunctionOrder &bottomUpOrder, int recursionDepth) {

  FunctionEffects applyEffects;
  if (applyEffects.summarizeCall(fullApply)) {
    functionInfo->functionEffects.mergeFromApply(applyEffects, fullApply);
    return;
  }

  if (recursionDepth >= MaxRecursionDepth) {
    functionInfo->functionEffects.setWorstEffects();
    return;
  }
  CalleeList callees = BCA->getCalleeList(fullApply);
  if (!callees.allCalleesVisible() ||
      // @callee_owned function calls implicitly release the context, which
      // may call deinits of boxed values.
      // TODO: be less conservative about what destructors might be called.
      fullApply.getOrigCalleeType()->isCalleeConsumed()) {
    functionInfo->functionEffects.setWorstEffects();
    return;
  }
  // Derive the effects of the apply from the known callees.
  // Defer merging callee effects until the callee is scheduled
  for (SILFunction *callee : callees) {
    FunctionInfo *calleeInfo = getFunctionInfo(callee);
    calleeInfo->addCaller(functionInfo, fullApply);
    if (!calleeInfo->isVisited()) {
      // Recursively visit the called function.
      analyzeFunction(calleeInfo, bottomUpOrder, recursionDepth + 1);
      bottomUpOrder.tryToSchedule(calleeInfo);
    }
  }
}
示例#5
0
// getSpecializationCost - The heuristic used to determine the code-size
// impact of creating a specialized version of Callee with argument
// SpecializedArgNo replaced by a constant.
InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee,
                               SmallVectorImpl<unsigned> &SpecializedArgNos)
{
  // Don't specialize functions which can be redefined at link-time to mean
  // something else.
  if (Callee->mayBeOverridden())
    return llvm::InlineCost::getNever();
  
  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
  
  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee);

  int Cost = 0;
  
  // Look at the orginal size of the callee.  Each instruction counts as 5.
  Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;

  // Offset that with the amount of code that can be constant-folded
  // away with the given arguments replaced by constants.
  for (SmallVectorImpl<unsigned>::iterator an = SpecializedArgNos.begin(),
       ae = SpecializedArgNos.end(); an != ae; ++an)
    Cost -= CalleeFI->ArgumentWeights[*an].ConstantWeight;

  return llvm::InlineCost::get(Cost);
}
示例#6
0
const AccessSummaryAnalysis::FunctionSummary &
AccessSummaryAnalysis::getOrCreateSummary(SILFunction *fn) {
  FunctionInfo *info = getFunctionInfo(fn);
  if (!info->isValid())
    recompute(info);

  return info->getSummary();
}
示例#7
0
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee) {
  Instruction *TheCall = CS.getInstruction();
  Function *Caller = TheCall->getParent()->getParent();

  // Don't inline functions which can be redefined at link-time to mean
  // something else.  Don't inline functions marked noinline or call sites
  // marked noinline.
  if (Callee->mayBeOverridden() || Callee->hasFnAttr(Attribute::NoInline) ||
      CS.isNoInline())
    return llvm::InlineCost::getNever();

  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];

  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee, TD);

  // If we should never inline this, return a huge cost.
  if (CalleeFI->NeverInline())
    return InlineCost::getNever();

  // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
  // could move this up and avoid computing the FunctionInfo for
  // things we are going to just return always inline for. This
  // requires handling setjmp somewhere else, however.
  if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
    return InlineCost::getAlways();

  if (CalleeFI->Metrics.usesDynamicAlloca) {
    // Get information about the caller.
    FunctionInfo &CallerFI = CachedFunctionInfo[Caller];

    // If we haven't calculated this information yet, do so now.
    if (CallerFI.Metrics.NumBlocks == 0) {
      CallerFI.analyzeFunction(Caller, TD);

      // Recompute the CalleeFI pointer, getting Caller could have invalidated
      // it.
      CalleeFI = &CachedFunctionInfo[Callee];
    }

    // Don't inline a callee with dynamic alloca into a caller without them.
    // Functions containing dynamic alloca's are inefficient in various ways;
    // don't create more inefficiency.
    if (!CallerFI.Metrics.usesDynamicAlloca)
      return InlineCost::getNever();
  }

  // InlineCost - This value measures how good of an inline candidate this call
  // site is to inline.  A lower inline cost make is more likely for the call to
  // be inlined.  This value may go negative due to the fact that bonuses
  // are negative numbers.
  //
  int InlineCost = getInlineSize(CS, Callee) + getInlineBonuses(CS, Callee);
  return llvm::InlineCost::get(InlineCost);
}
示例#8
0
int
FunctionInfo::Compare(const FunctionInfo& a, const FunctionInfo& b)
{
    int result = ConstString::Compare(a.GetName(), b.GetName());
    if (result)
        return result;

    return Declaration::Compare(a.m_declaration, b.m_declaration);
}
示例#9
0
  /// Add mod/ref info from another function into ours, saturating towards
  /// MRI_ModRef.
  void addFunctionInfo(const FunctionInfo &FI) {
    addModRefInfo(FI.getModRefInfo());

    if (FI.mayReadAnyGlobal())
      setMayReadAnyGlobal();

    if (AlignedMap *P = FI.Info.getPointer())
      for (const auto &G : P->Map)
        addModRefInfoForGlobal(*G.first, G.second);
  }
示例#10
0
    Var CrossSite::ProfileThunk(RecyclableObject* callable, CallInfo callInfo, ...)
    {
        JavascriptFunction* function = JavascriptFunction::FromVar(callable);
        Assert(function->GetTypeId() == TypeIds_Function);
        Assert(function->GetEntryPoint() == CrossSite::ProfileThunk);
        RUNTIME_ARGUMENTS(args, callInfo);
        ScriptContext * scriptContext = function->GetScriptContext();
        // It is not safe to access the function body if the script context is not alive.
        scriptContext->VerifyAliveWithHostContext(!function->IsExternal(),
            scriptContext->GetThreadContext()->GetPreviousHostScriptContext());

        JavascriptMethod entryPoint;
        FunctionInfo *funcInfo = function->GetFunctionInfo();

        TTD_XSITE_LOG(callable->GetScriptContext(), "DefaultOrProfileThunk", callable);

#ifdef ENABLE_WASM
        if (WasmScriptFunction::Is(function))
        {
            AsmJsFunctionInfo* asmInfo = funcInfo->GetFunctionBody()->GetAsmJsFunctionInfo();
            Assert(asmInfo);
            if (asmInfo->IsWasmDeferredParse())
            {
                entryPoint = WasmLibrary::WasmDeferredParseExternalThunk;
            }
            else
            {
                entryPoint = Js::AsmJsExternalEntryPoint;
            }
        } else
#endif
        if (funcInfo->HasBody())
        {
#if ENABLE_DEBUG_CONFIG_OPTIONS
            char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
#endif
            entryPoint = ScriptFunction::FromVar(function)->GetEntryPointInfo()->jsMethod;
            if (funcInfo->IsDeferred() && scriptContext->IsProfiling())
            {
                // if the current entrypoint is deferred parse we need to update it appropriately for the profiler mode.
                entryPoint = Js::ScriptContext::GetProfileModeThunk(entryPoint);
            }
            OUTPUT_TRACE(Js::ScriptProfilerPhase, _u("CrossSite::ProfileThunk FunctionNumber : %s, Entrypoint : 0x%08X\n"), funcInfo->GetFunctionProxy()->GetDebugNumberSet(debugStringBuffer), entryPoint);
        }
        else
        {
            entryPoint = ProfileEntryThunk;
        }


        return CommonThunk(function, entryPoint, args);
    }
示例#11
0
bool AccessSummaryAnalysis::propagateFromCalleeToCaller(
    FunctionInfo *callerInfo, ArgumentFlow flow) {
  // For a given flow from a caller's argument to a callee's argument,
  // propagate the argument summary information to the caller.

  FunctionInfo *calleeInfo = flow.CalleeFunctionInfo;
  const auto &calleeArgument =
      calleeInfo->getSummary().getAccessForArgument(flow.CalleeArgumentIndex);
  auto &callerArgument =
      callerInfo->getSummary().getAccessForArgument(flow.CallerArgumentIndex);

  bool changed = callerArgument.mergeWith(calleeArgument);
  return changed;
}
示例#12
0
void DebugInfo::recordPerfMap(DwarfChunk* chunk) {
  if (!m_perfMap) return;
  if (RuntimeOption::EvalProfileBC) return;
  for (FuncPtrDB::const_iterator it = chunk->m_functions.begin();
      it != chunk->m_functions.end();
      ++it) {
    FunctionInfo* fi = *it;
    if (!fi->perfSynced()) {
      fprintf(m_perfMap, "%lx %x %s\n",
	      reinterpret_cast<uintptr_t>(fi->range.begin()),
	      fi->range.size(),
	      fi->name.c_str());
      fi->setPerfSynced();
    }
  }
  fflush(m_perfMap);
}
示例#13
0
void AccessSummaryAnalysis::processCall(FunctionInfo *callerInfo,
                                        unsigned callerArgumentIndex,
                                        SILFunction *callee,
                                        unsigned argumentIndex,
                                        FunctionOrder &order) {
  // Record the flow of an argument from  the caller to the callee so that
  // the interprocedural analysis can iterate to a fixpoint.
  FunctionInfo *calleeInfo = getFunctionInfo(callee);
  ArgumentFlow flow = {callerArgumentIndex, argumentIndex, calleeInfo};
  callerInfo->recordFlow(flow);
  if (!calleeInfo->isVisited()) {
    processFunction(calleeInfo, order);
    order.tryToSchedule(calleeInfo);
  }

  propagateFromCalleeToCaller(callerInfo, flow);
}
示例#14
0
int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) {
  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];

  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee, TD);

  bool isDirectCall = CS.getCalledFunction() == Callee;
  Instruction *TheCall = CS.getInstruction();
  int Bonus = 0;

  // If there is only one call of the function, and it has internal linkage,
  // make it almost guaranteed to be inlined.
  //
  if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
    Bonus += InlineConstants::LastCallToStaticBonus;

  // If the instruction after the call, or if the normal destination of the
  // invoke is an unreachable instruction, the function is noreturn.  As such,
  // there is little point in inlining this.
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
    if (isa<UnreachableInst>(II->getNormalDest()->begin()))
      Bonus += InlineConstants::NoreturnPenalty;
  } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
    Bonus += InlineConstants::NoreturnPenalty;

  // If this function uses the coldcc calling convention, prefer not to inline
  // it.
  if (Callee->getCallingConv() == CallingConv::Cold)
    Bonus += InlineConstants::ColdccPenalty;

  // Add to the inline quality for properties that make the call valuable to
  // inline.  This includes factors that indicate that the result of inlining
  // the function will be optimizable.  Currently this just looks at arguments
  // passed into the function.
  //
  CallSite::arg_iterator I = CS.arg_begin();
  for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
       FI != FE; ++I, ++FI)
    // Compute any constant bonus due to inlining we want to give here.
    if (isa<Constant>(I))
      Bonus += CountBonusForConstant(FI, cast<Constant>(I));

  return Bonus;
}
示例#15
0
/** No descriptions */
void StackInfo::PushFunction( FunctionInfo* pFunctionInfo, INT64 llCycleCount )
{
	if ( _sFunctionStack.size() > 0 )
	{
		// Increment the recursive count of this callee function info so we don't double-book entries
		FunctionInfo* pCallerFunctionInfo = _sFunctionStack.top().pFunctionInfo;
		FunctionID fidCallee = pFunctionInfo->fid;
		
		CalleeFunctionInfo* pCalleeFunctionInfo = pCallerFunctionInfo->GetCalleeFunctionInfo( fidCallee );
		pCalleeFunctionInfo->nRecursiveCount++;
		pCalleeFunctionInfo->nCalls++;
	}

	// Increment the recursive count of this function info so we don't double-book entries
	pFunctionInfo->nRecursiveCount++;
	pFunctionInfo->nCalls++;

	_sFunctionStack.push( StackEntryInfo( pFunctionInfo, llCycleCount ) );
}
示例#16
0
void AccessSummaryAnalysis::recompute(FunctionInfo *initial) {
  allocNewUpdateID();

  FunctionOrder order(getCurrentUpdateID());

  // Summarize the function and its callees.
  processFunction(initial, order);

  // Build the bottom-up order.
  order.tryToSchedule(initial);
  order.finishScheduling();

  // Iterate the interprocedural analysis to a fixed point.
  bool needAnotherIteration;
  do {
    needAnotherIteration = false;
    for (FunctionInfo *calleeInfo : order) {
      for (const auto &callerEntry : calleeInfo->getCallers()) {
        assert(callerEntry.isValid());
        if (!order.wasRecomputedWithCurrentUpdateID(calleeInfo))
          continue;

        FunctionInfo *callerInfo = callerEntry.Caller;

        // Propagate from callee to caller.
        for (const auto &argumentFlow : callerInfo->getArgumentFlows()) {
          if (argumentFlow.CalleeFunctionInfo != calleeInfo)
            continue;

          bool changed = propagateFromCalleeToCaller(callerInfo, argumentFlow);
          if (changed && !callerInfo->isScheduledAfter(calleeInfo)) {
            needAnotherIteration = true;
          }
        }
      }
    }
  } while (needAnotherIteration);
}
示例#17
0
    Var CrossSite::DefaultThunk(RecyclableObject* callable, CallInfo callInfo, ...)
    {
        JavascriptFunction* function = JavascriptFunction::FromVar(callable);
        Assert(function->GetTypeId() == TypeIds_Function);
        Assert(function->GetEntryPoint() == CrossSite::DefaultThunk);
        RUNTIME_ARGUMENTS(args, callInfo);

        // It is not safe to access the function body if the script context is not alive.
        function->GetScriptContext()->VerifyAliveWithHostContext(!function->IsExternal(),
            ThreadContext::GetContextForCurrentThread()->GetPreviousHostScriptContext());

        JavascriptMethod entryPoint;
        FunctionInfo *funcInfo = function->GetFunctionInfo();

        TTD_XSITE_LOG(callable->GetScriptContext(), "DefaultOrProfileThunk", callable);

        if (funcInfo->HasBody())
        {
#ifdef ASMJS_PLAT
            if (funcInfo->GetFunctionProxy()->IsFunctionBody() &&
                funcInfo->GetFunctionBody()->GetIsAsmJsFunction())
            {
#ifdef ENABLE_WASM
                AsmJsFunctionInfo* asmInfo = funcInfo->GetFunctionBody()->GetAsmJsFunctionInfo();
                if (asmInfo && asmInfo->IsWasmDeferredParse())
                {
                    entryPoint = WasmLibrary::WasmDeferredParseExternalThunk;
                }
                else
#endif
                {
                    entryPoint = Js::AsmJsExternalEntryPoint;
                }
            }
            else
#endif
            {
                entryPoint = ScriptFunction::FromVar(function)->GetEntryPointInfo()->jsMethod;
            }
        }
        else
        {
            entryPoint = funcInfo->GetOriginalEntryPoint();
        }
        return CommonThunk(function, entryPoint, args);
    }
示例#18
0
MinimizationResult Minimizer::minimize2(const Function & f, const FunctionInfo & info, const ParValues & fixed_parameters){
    dynamic_cast<const DefFunctionInfo&>(info); // throws bad_cast
    ParIds pids = fixed_parameters.get_parameters();
    if(pids.size()==0){
        return minimize(f, info.get_start(), info.get_step(), info.get_ranges());
    }
    else{
        ParValues start(info.get_start());
        ParValues step(info.get_step());
        Ranges ranges(info.get_ranges());
        const ParIds & info_fixed = info.get_fixed_parameters();
        for(ParIds::const_iterator pit = pids.begin(); pit!=pids.end(); ++pit){
            if(!info_fixed.contains(*pit)){
                throw invalid_argument("fixed parameter in minimize2 which is not fixed in info. This is not allowed.");
            }
            double val = fixed_parameters.get(*pit);
            start.set(*pit, val);
            step.set(*pit, 0.0);
            ranges.set(*pit, make_pair(val, val));
        }
        return minimize(f, start, step, ranges);
    }
}
示例#19
0
文件: dwarf.cpp 项目: shixiao/hhvm
DwarfChunk* DwarfInfo::addTracelet(TCRange range,
                                   folly::Optional<std::string> name,
                                   const Func *func,
                                   const Op* instr,
                                   bool exit,
                                   bool inPrologue) {
  DwarfChunk* chunk = nullptr;
  FunctionInfo* f = new FunctionInfo(range, exit);
  const Unit* unit = func ? func->unit(): nullptr;
  if (name) {
    f->name = *name;
  } else {
    assert(func != nullptr);
    f->name = lookupFunction(func, exit, inPrologue, true);
    auto names = func->localNames();
    for (int i = 0; i < func->numNamedLocals(); i++) {
      f->m_namedLocals.push_back(names[i]->toCppString());
    }
  }
  f->file = lookupFile(unit);

  TCA start = range.begin();
  const TCA end = range.end();

  Lock lock(s_lock);
  auto const it = m_functions.lower_bound(range.begin());
  auto const fi = it->second;
  if (it != m_functions.end() && fi->name == f->name &&
      fi->file == f->file &&
      start > fi->range.begin() &&
      end > fi->range.end()) {
    // XXX: verify that overlapping address come from jmp fixups
    start = fi->range.end();
    fi->range.extend(end);
    m_functions[end] = fi;
    m_functions.erase(it);
    delete f;
    f = m_functions[end];
    assert(f->m_chunk != nullptr);
    f->m_chunk->clearSynced();
    f->clearPerfSynced();
  } else {
    m_functions[end] = f;
  }

  addLineEntries(TCRange(start, end, range.isAcold()), unit, instr, f);

  if (f->m_chunk == nullptr) {
    if (m_dwarfChunks.size() == 0 || m_dwarfChunks[0] == nullptr) {
      // new chunk of base size
      chunk = new DwarfChunk();
      m_dwarfChunks.push_back(chunk);
    } else if (m_dwarfChunks[0]->m_functions.size()
                 < RuntimeOption::EvalGdbSyncChunks) {
      // reuse first chunk
      chunk = m_dwarfChunks[0];
      chunk->clearSynced();
    } else {
      // compact chunks
      compactChunks();
      m_dwarfChunks[0] = chunk = new DwarfChunk();
    }
    chunk->m_functions.push_back(f);
    f->m_chunk = chunk;
  }

#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__CYGWIN__)
  if (f->m_chunk->m_functions.size() >= RuntimeOption::EvalGdbSyncChunks) {
    ElfWriter e = ElfWriter(f->m_chunk);
  }
#endif

  return f->m_chunk;
}
示例#20
0
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
                               Function *Callee,
                               SmallPtrSet<const Function*, 16> &NeverInline) {
  Instruction *TheCall = CS.getInstruction();
  Function *Caller = TheCall->getParent()->getParent();
  bool isDirectCall = CS.getCalledFunction() == Callee;

  // Don't inline functions which can be redefined at link-time to mean
  // something else.  Don't inline functions marked noinline or call sites
  // marked noinline.
  if (Callee->mayBeOverridden() ||
      Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
      CS.isNoInline())
    return llvm::InlineCost::getNever();

  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
  
  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee);

  // If we should never inline this, return a huge cost.
  if (CalleeFI->NeverInline())
    return InlineCost::getNever();

  // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
  // could move this up and avoid computing the FunctionInfo for
  // things we are going to just return always inline for. This
  // requires handling setjmp somewhere else, however.
  if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
    return InlineCost::getAlways();
    
  if (CalleeFI->Metrics.usesDynamicAlloca) {
    // Get infomation about the caller.
    FunctionInfo &CallerFI = CachedFunctionInfo[Caller];

    // If we haven't calculated this information yet, do so now.
    if (CallerFI.Metrics.NumBlocks == 0) {
      CallerFI.analyzeFunction(Caller);
     
      // Recompute the CalleeFI pointer, getting Caller could have invalidated
      // it.
      CalleeFI = &CachedFunctionInfo[Callee];
    }

    // Don't inline a callee with dynamic alloca into a caller without them.
    // Functions containing dynamic alloca's are inefficient in various ways;
    // don't create more inefficiency.
    if (!CallerFI.Metrics.usesDynamicAlloca)
      return InlineCost::getNever();
  }

  // InlineCost - This value measures how good of an inline candidate this call
  // site is to inline.  A lower inline cost make is more likely for the call to
  // be inlined.  This value may go negative.
  //
  int InlineCost = 0;

  // Add to the inline quality for properties that make the call valuable to
  // inline.  This includes factors that indicate that the result of inlining
  // the function will be optimizable.  Currently this just looks at arguments
  // passed into the function.
  //
  unsigned ArgNo = 0;
  CallSite::arg_iterator I = CS.arg_begin();
  for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
       FI != FE; ++I, ++FI, ++ArgNo) {

    // If an alloca is passed in, inlining this function is likely to allow
    // significant future optimization possibilities (like scalar promotion, and
    // scalarization), so encourage the inlining of the function.
    //
    if (isa<AllocaInst>(I))
      InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;

    // If this is a constant being passed into the function, use the argument
    // weights calculated for the callee to determine how much will be folded
    // away with this information.
    else if (isa<Constant>(I)) {
      InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
       
      // Compute any constant bonus due to inlining we want to give here.
      InlineCost -= CountBonusForConstant(FI);
    }
  }
  
  // Each argument passed in has a cost at both the caller and the callee
  // sides.  Measurements show that each argument costs about the same as an
  // instruction.
  InlineCost -= (CS.arg_size() * InlineConstants::InstrCost);

  // If there is only one call of the function, and it has internal linkage,
  // make it almost guaranteed to be inlined.
  //
  if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
    InlineCost += InlineConstants::LastCallToStaticBonus;

  // Now that we have considered all of the factors that make the call site more
  // likely to be inlined, look at factors that make us not want to inline it.

  // If the instruction after the call, or if the normal destination of the
  // invoke is an unreachable instruction, the function is noreturn.  As such,
  // there is little point in inlining this.
  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
    if (isa<UnreachableInst>(II->getNormalDest()->begin()))
      InlineCost += InlineConstants::NoreturnPenalty;
  } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
    InlineCost += InlineConstants::NoreturnPenalty;
  
  // If this function uses the coldcc calling convention, prefer not to inline
  // it.
  if (Callee->getCallingConv() == CallingConv::Cold)
    InlineCost += InlineConstants::ColdccPenalty;

  // Calls usually take a long time, so they make the inlining gain smaller.
  InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;

  // Look at the size of the callee. Each instruction counts as 5.
  InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;

  return llvm::InlineCost::get(InlineCost);
}
示例#21
0
void SideEffectAnalysis::analyzeInstruction(FunctionInfo *FInfo,
                                            SILInstruction *I,
                                            FunctionOrder &BottomUpOrder,
                                            int RecursionDepth) {
  if (FullApplySite FAS = FullApplySite::isa(I)) {
    // Is this a call to a semantics function?
    ArraySemanticsCall ASC(I);
    if (ASC && ASC.hasSelf()) {
      FunctionEffects ApplyEffects(FAS.getNumArguments());
      if (getSemanticEffects(ApplyEffects, ASC)) {
        FInfo->FE.mergeFromApply(ApplyEffects, FAS);
        return;
      }
    }

    if (SILFunction *SingleCallee = FAS.getCalleeFunction()) {
      // Does the function have any @effects?
      if (getDefinedEffects(FInfo->FE, SingleCallee))
        return;
    }

    if (RecursionDepth < MaxRecursionDepth) {
      CalleeList Callees = BCA->getCalleeList(FAS);
      if (Callees.allCalleesVisible() &&
          // @callee_owned function calls implicitly release the context, which
          // may call deinits of boxed values.
          // TODO: be less conservative about what destructors might be called.
          !FAS.getOrigCalleeType()->isCalleeConsumed()) {
        // Derive the effects of the apply from the known callees.
        for (SILFunction *Callee : Callees) {
          FunctionInfo *CalleeInfo = getFunctionInfo(Callee);
          CalleeInfo->addCaller(FInfo, FAS);
          if (!CalleeInfo->isVisited()) {
            // Recursively visit the called function.
            analyzeFunction(CalleeInfo, BottomUpOrder, RecursionDepth + 1);
            BottomUpOrder.tryToSchedule(CalleeInfo);
          }
        }
        return;
      }
    }
    // Be conservative for everything else.
    FInfo->FE.setWorstEffects();
    return;
  }
  // Handle some kind of instructions specially.
  switch (I->getKind()) {
    case ValueKind::FixLifetimeInst:
      // A fix_lifetime instruction acts like a read on the operand. Retains can move after it
      // but the last release can't move before it.
      FInfo->FE.getEffectsOn(I->getOperand(0))->Reads = true;
      return;
    case ValueKind::AllocStackInst:
    case ValueKind::DeallocStackInst:
      return;
    case ValueKind::StrongRetainInst:
    case ValueKind::StrongRetainUnownedInst:
    case ValueKind::RetainValueInst:
    case ValueKind::UnownedRetainInst:
      FInfo->FE.getEffectsOn(I->getOperand(0))->Retains = true;
      return;
    case ValueKind::StrongReleaseInst:
    case ValueKind::ReleaseValueInst:
    case ValueKind::UnownedReleaseInst:
      FInfo->FE.getEffectsOn(I->getOperand(0))->Releases = true;
      
      // TODO: Check the call graph to be less conservative about what
      // destructors might be called.
      FInfo->FE.setWorstEffects();
      return;
    case ValueKind::LoadInst:
      FInfo->FE.getEffectsOn(cast<LoadInst>(I)->getOperand())->Reads = true;
      return;
    case ValueKind::StoreInst:
      FInfo->FE.getEffectsOn(cast<StoreInst>(I)->getDest())->Writes = true;
      return;
    case ValueKind::CondFailInst:
      FInfo->FE.Traps = true;
      return;
    case ValueKind::PartialApplyInst:
      FInfo->FE.AllocsObjects = true;
      return;
    case ValueKind::BuiltinInst: {
      auto &BI = cast<BuiltinInst>(I)->getBuiltinInfo();
      switch (BI.ID) {
        case BuiltinValueKind::IsUnique:
          // TODO: derive this information in a more general way, e.g. add it
          // to Builtins.def
          FInfo->FE.ReadsRC = true;
          break;
        default:
          break;
      }
      // Detailed memory effects of builtins are handled below by checking the
      // memory behavior of the instruction.
      break;
      }
    default:
      break;
  }

  if (isa<AllocationInst>(I)) {
    // Excluding AllocStackInst (which is handled above).
    FInfo->FE.AllocsObjects = true;
  }
  
  // Check the general memory behavior for instructions we didn't handle above.
  switch (I->getMemoryBehavior()) {
    case MemoryBehavior::None:
      break;
    case MemoryBehavior::MayRead:
      FInfo->FE.GlobalEffects.Reads = true;
      break;
    case MemoryBehavior::MayWrite:
      FInfo->FE.GlobalEffects.Writes = true;
      break;
    case MemoryBehavior::MayReadWrite:
      FInfo->FE.GlobalEffects.Reads = true;
      FInfo->FE.GlobalEffects.Writes = true;
      break;
    case MemoryBehavior::MayHaveSideEffects:
      FInfo->FE.setWorstEffects();
      break;
  }
  if (I->mayTrap())
    FInfo->FE.Traps = true;
}
示例#22
0
DwarfChunk* DwarfInfo::addTracelet(TCRange range, const char* name,
  const Unit *unit, const Opcode *instr, bool exit, bool inPrologue) {
  DwarfChunk* chunk = NULL;
  FunctionInfo* f = new FunctionInfo(range, exit);
  if (name) {
    f->name = std::string(name);
  } else {
    f->name = lookupFunction(unit, instr, exit, inPrologue, true);
  }
  f->file = lookupFile(unit);

  TCA start = range.begin();
  const TCA end = range.end();
  {
    Lock lock(s_lock);
    FuncDB::iterator it = m_functions.lower_bound(range.begin());
    FunctionInfo* fi = it->second;
    if (it != m_functions.end() && fi->name == f->name &&
        fi->file == f->file &&
        start > fi->range.begin() &&
        end > fi->range.end()) {
      // XXX: verify that overlapping address come from jmp fixups
      start = fi->range.end();
      fi->range.truncate(end);
      m_functions[end] = fi;
      m_functions.erase(it);
      delete(f);
      f = m_functions[end];
      ASSERT(f->m_chunk != NULL);
      f->m_chunk->clearSynced();
      f->clearPerfSynced();
    } else {
      m_functions[end] = f;
    }
  }

  addLineEntries(TCRange(start, end, range.isAstubs()), unit, instr, f);
  if (f->m_chunk == NULL) {
    Lock lock(s_lock);
    if (m_dwarfChunks.size() == 0 || m_dwarfChunks[0] == NULL) {
      // new chunk of base size
      chunk = new DwarfChunk();
      m_dwarfChunks.push_back(chunk);
    } else if (m_dwarfChunks[0]->m_functions.size()
                 < RuntimeOption::EvalGdbSyncChunks) {
      // reuse first chunk
      chunk = m_dwarfChunks[0];
      chunk->clearSynced();
    } else {
      // compact chunks
      compactChunks();
      m_dwarfChunks[0] = chunk = new DwarfChunk();
    }
    chunk->m_functions.push_back(f);
    f->m_chunk = chunk;
  }

  if (f->m_chunk->m_functions.size() >= RuntimeOption::EvalGdbSyncChunks) {
    Lock lock(s_lock);
    ElfWriter e = ElfWriter(f->m_chunk);
  }

  return f->m_chunk;
}
示例#23
0
int InlineCostAnalyzer::getInlineSize(CallSite CS, Function *Callee) {
  // Get information about the callee.
  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];

  // If we haven't calculated this information yet, do so now.
  if (CalleeFI->Metrics.NumBlocks == 0)
    CalleeFI->analyzeFunction(Callee, TD);

  // InlineCost - This value measures how good of an inline candidate this call
  // site is to inline.  A lower inline cost make is more likely for the call to
  // be inlined.  This value may go negative.
  //
  int InlineCost = 0;

  // Compute any size reductions we can expect due to arguments being passed into
  // the function.
  //
  unsigned ArgNo = 0;
  CallSite::arg_iterator I = CS.arg_begin();
  for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
       FI != FE; ++I, ++FI, ++ArgNo) {

    // If an alloca is passed in, inlining this function is likely to allow
    // significant future optimization possibilities (like scalar promotion, and
    // scalarization), so encourage the inlining of the function.
    //
    if (isa<AllocaInst>(I))
      InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;

    // If this is a constant being passed into the function, use the argument
    // weights calculated for the callee to determine how much will be folded
    // away with this information.
    else if (isa<Constant>(I))
      InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
  }

  const DenseMap<std::pair<unsigned, unsigned>, unsigned> &ArgPairWeights
    = CalleeFI->PointerArgPairWeights;
  for (DenseMap<std::pair<unsigned, unsigned>, unsigned>::const_iterator I
         = ArgPairWeights.begin(), E = ArgPairWeights.end();
       I != E; ++I)
    if (CS.getArgument(I->first.first)->stripInBoundsConstantOffsets() ==
        CS.getArgument(I->first.second)->stripInBoundsConstantOffsets())
      InlineCost -= I->second;

  // Each argument passed in has a cost at both the caller and the callee
  // sides.  Measurements show that each argument costs about the same as an
  // instruction.
  InlineCost -= (CS.arg_size() * InlineConstants::InstrCost);

  // Now that we have considered all of the factors that make the call site more
  // likely to be inlined, look at factors that make us not want to inline it.

  // Calls usually take a long time, so they make the inlining gain smaller.
  InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;

  // Look at the size of the callee. Each instruction counts as 5.
  InlineCost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;

  return InlineCost;
}