Exemplo n.º 1
0
void DeclContext::localUncachedLookup(DeclarationName Name, 
                                  llvm::SmallVectorImpl<NamedDecl *> &Results) {
  Results.clear();
  
  // If there's no external storage, just perform a normal lookup and copy
  // the results.
  if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage()) {
    lookup_result LookupResults = lookup(Name);
    Results.insert(Results.end(), LookupResults.first, LookupResults.second);
    return;
  }

  // If we have a lookup table, check there first. Maybe we'll get lucky.
  if (LookupPtr) {
    StoredDeclsMap::iterator Pos = LookupPtr->find(Name);
    if (Pos != LookupPtr->end()) {
      Results.insert(Results.end(), 
                     Pos->second.getLookupResult().first,
                     Pos->second.getLookupResult().second);
      return;
    }
  }
  
  // Slow case: grovel through the declarations in our chain looking for 
  // matches.
  for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
    if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
      if (ND->getDeclName() == Name)
        Results.push_back(ND);
  }
}
Exemplo n.º 2
0
// Devirtualize and specialize a group of applies, returning a
// worklist of newly exposed function references that should be
// considered for inlining before continuing with the caller that has
// the passed-in applies.
//
// The returned worklist is stacked such that the last things we want
// to process are earlier on the list.
//
// Returns true if any changes were made.
bool SILPerformanceInliner::devirtualizeAndSpecializeApplies(
                                  llvm::SmallVectorImpl<ApplySite> &Applies,
                                  SILModuleTransform *MT,
                                  ClassHierarchyAnalysis *CHA,
                               llvm::SmallVectorImpl<SILFunction *> &WorkList) {
  assert(WorkList.empty() && "Expected empty worklist for return results!");

  bool ChangedAny = false;

  // The set of all new function references generated by
  // devirtualization and specialization.
  llvm::SetVector<SILFunction *> NewRefs;

  // Process all applies passed in, plus any new ones that are pushed
  // on as a result of specializing the referenced functions.
  while (!Applies.empty()) {
    auto Apply = Applies.back();
    Applies.pop_back();

    bool ChangedApply = false;
    if (auto FullApply = FullApplySite::isa(Apply.getInstruction())) {
      if (auto NewApply = devirtualize(FullApply, CHA)) {
        ChangedApply = true;

        Apply = ApplySite(NewApply.getInstruction());
      }
    }

    llvm::SmallVector<ApplySite, 4> NewApplies;
    if (auto NewApply = specializeGeneric(Apply, NewApplies)) {
      ChangedApply = true;

      Apply = NewApply;
      Applies.insert(Applies.end(), NewApplies.begin(), NewApplies.end());
    }

    if (ChangedApply) {
      ChangedAny = true;

      auto *NewCallee = Apply.getCalleeFunction();
      assert(NewCallee && "Expected directly referenced function!");

      // Track all new references to function definitions.
      if (NewCallee->isDefinition())
        NewRefs.insert(NewCallee);


      // TODO: Do we need to invalidate everything at this point?
      // What about side-effects analysis? What about type analysis?
      MT->invalidateAnalysis(Apply.getFunction(),
                             SILAnalysis::InvalidationKind::Everything);
    }
  }

  // Copy out all the new function references gathered.
  if (ChangedAny)
    WorkList.insert(WorkList.end(), NewRefs.begin(), NewRefs.end());

  return ChangedAny;
}
Exemplo n.º 3
0
// OutputPossibleOverflows - We've found a possible overflow earlier,
// now check whether Body might contain a comparison which might be
// preventing the overflow.
// This doesn't do flow analysis, range analysis, or points-to analysis; it's
// just a dumb "is there a comparison" scan.  The aim here is to
// detect the most blatent cases of overflow and educate the
// programmer.
void MallocOverflowSecurityChecker::OutputPossibleOverflows(
  llvm::SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
  const Decl *D, BugReporter &BR, AnalysisManager &mgr) const {
  // By far the most common case: nothing to check.
  if (PossibleMallocOverflows.empty())
    return;

  // Delete any possible overflows which have a comparison.
  CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
  c.Visit(mgr.getAnalysisContext(D)->getBody());

  // Output warnings for all overflows that are left.
  for (CheckOverflowOps::theVecType::iterator
       i = PossibleMallocOverflows.begin(),
       e = PossibleMallocOverflows.end();
       i != e;
       ++i) {
    SourceRange R = i->mulop->getSourceRange();
    BR.EmitBasicReport("MallocOverflowSecurityChecker",
      "the computation of the size of the memory allocation may overflow",
      PathDiagnosticLocation::createOperatorLoc(i->mulop,
                                                BR.getSourceManager()),
      &R, 1);
  }
}
Exemplo n.º 4
0
static void orderEdges(const llvm::SmallPtrSetImpl<CallGraphEdge *> &Edges,
                       llvm::SmallVectorImpl<CallGraphEdge *> &OrderedEdges) {
  for (auto *Edge : Edges)
    OrderedEdges.push_back(Edge);

  std::sort(OrderedEdges.begin(), OrderedEdges.end(),
            [](CallGraphEdge *left, CallGraphEdge *right) {
              return left->getOrdinal() < right->getOrdinal();
            });
}
Exemplo n.º 5
0
static SILValue allIncomingValuesEqual(
    llvm::SmallVectorImpl<std::pair<SILBasicBlock *,
                                    SILValue >> &IncomingValues) {
  SILValue First = stripRCIdentityPreservingInsts(IncomingValues[0].second);
  if (std::all_of(std::next(IncomingValues.begin()), IncomingValues.end(),
                     [&First](std::pair<SILBasicBlock *, SILValue> P) -> bool {
                       return stripRCIdentityPreservingInsts(P.second) == First;
                     }))
    return First;
  return SILValue();
}
Exemplo n.º 6
0
void 
DeclContext::collectAllContexts(llvm::SmallVectorImpl<DeclContext *> &Contexts){
  Contexts.clear();
  
  if (DeclKind != Decl::Namespace) {
    Contexts.push_back(this);
    return;
  }
  
  NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
  for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
       N = N->getPreviousDecl())
    Contexts.push_back(N);
  
  std::reverse(Contexts.begin(), Contexts.end());
}
Exemplo n.º 7
0
/// \brief Attempt to inline all calls smaller than our threshold.
/// returns True if a function was inlined.
bool SILPerformanceInliner::inlineCallsIntoFunction(SILFunction *Caller,
                                                    DominanceAnalysis *DA,
                                                    SILLoopAnalysis *LA,
                             llvm::SmallVectorImpl<FullApplySite> &NewApplies) {
  // Don't optimize functions that are marked with the opt.never attribute.
  if (!Caller->shouldOptimize())
    return false;

  // Construct a log of all of the names of the functions that we've inlined
  // in the current iteration.
  SmallVector<StringRef, 16> InlinedFunctionNames;
  StringRef CallerName = Caller->getName();

  DEBUG(llvm::dbgs() << "Visiting Function: " << CallerName << "\n");

  assert(NewApplies.empty() && "Expected empty vector to store results in!");

  // First step: collect all the functions we want to inline.  We
  // don't change anything yet so that the dominator information
  // remains valid.
  SmallVector<FullApplySite, 8> AppliesToInline;
  collectAppliesToInline(Caller, AppliesToInline, DA, LA);

  if (AppliesToInline.empty())
    return false;

  // Second step: do the actual inlining.
  for (auto AI : AppliesToInline) {
    SILFunction *Callee = AI.getCalleeFunction();
    assert(Callee && "apply_inst does not have a direct callee anymore");

    DEBUG(llvm::dbgs() << "    Inline:" <<  *AI.getInstruction());

    if (!Callee->shouldOptimize()) {
      DEBUG(llvm::dbgs() << "    Cannot inline function " << Callee->getName()
                         << " marked to be excluded from optimizations.\n");
      continue;
    }
    
    SmallVector<SILValue, 8> Args;
    for (const auto &Arg : AI.getArguments())
      Args.push_back(Arg);

    // As we inline and clone we need to collect new applies.
    auto Filter = [](SILInstruction *I) -> bool {
      return bool(FullApplySite::isa(I));
    };

    CloneCollector Collector(Filter);

    // Notice that we will skip all of the newly inlined ApplyInsts. That's
    // okay because we will visit them in our next invocation of the inliner.
    TypeSubstitutionMap ContextSubs;
    SILInliner Inliner(*Caller, *Callee,
                       SILInliner::InlineKind::PerformanceInline,
                       ContextSubs, AI.getSubstitutions(),
                       Collector.getCallback());

    // Record the name of the inlined function (for cycle detection).
    InlinedFunctionNames.push_back(Callee->getName());

    auto Success = Inliner.inlineFunction(AI, Args);
    (void) Success;
    // We've already determined we should be able to inline this, so
    // we expect it to have happened.
    assert(Success && "Expected inliner to inline this function!");
    llvm::SmallVector<FullApplySite, 4> AppliesFromInlinee;
    for (auto &P : Collector.getInstructionPairs())
      AppliesFromInlinee.push_back(FullApplySite(P.first));

    recursivelyDeleteTriviallyDeadInstructions(AI.getInstruction(), true);

    NewApplies.insert(NewApplies.end(), AppliesFromInlinee.begin(),
                      AppliesFromInlinee.end());
    DA->invalidate(Caller, SILAnalysis::InvalidationKind::Everything);
    NumFunctionsInlined++;
  }

  // Record the names of the functions that we inlined.
  // We'll use this list to detect cycles in future iterations of
  // the inliner.
  for (auto CalleeName : InlinedFunctionNames) {
    InlinedFunctions.insert(std::make_pair(CallerName, CalleeName));
  }

  DEBUG(llvm::dbgs() << "\n");
  return true;
}