Esempio n. 1
0
/// Runs <code>xcrun -f clang</code> in order to find the location of Clang for
/// the currently active Xcode.
///
/// We get the "currently active" part by passing through the DEVELOPER_DIR
/// environment variable (along with the rest of the environment).
static bool findXcodeClangPath(llvm::SmallVectorImpl<char> &path) {
  assert(path.empty());

  auto xcrunPath = llvm::sys::findProgramByName("xcrun");
  if (!xcrunPath.getError()) {
    const char *args[] = {"-f", "clang", nullptr};
    sys::TaskQueue queue;
    queue.addTask(xcrunPath->c_str(), args, /*Env=*/llvm::None,
                  /*Context=*/nullptr,
                  /*SeparateErrors=*/true);
    queue.execute(nullptr,
                  [&path](sys::ProcessId PID, int returnCode, StringRef output,
                          StringRef errors,
                          sys::TaskProcessInformation ProcInfo,
                          void *unused) -> sys::TaskFinishedResponse {
                    if (returnCode == 0) {
                      output = output.rtrim();
                      path.append(output.begin(), output.end());
                    }
                    return sys::TaskFinishedResponse::ContinueExecution;
                  });
  }

  return !path.empty();
}
Esempio n. 2
0
// Devirtualize and specialize a group of applies, returning a
// worklist of newly exposed function references that should be
// considered for inlining before continuing with the caller that has
// the passed-in applies.
//
// The returned worklist is stacked such that the last things we want
// to process are earlier on the list.
//
// Returns true if any changes were made.
bool SILPerformanceInliner::devirtualizeAndSpecializeApplies(
                                  llvm::SmallVectorImpl<ApplySite> &Applies,
                                  SILModuleTransform *MT,
                                  ClassHierarchyAnalysis *CHA,
                               llvm::SmallVectorImpl<SILFunction *> &WorkList) {
  assert(WorkList.empty() && "Expected empty worklist for return results!");

  bool ChangedAny = false;

  // The set of all new function references generated by
  // devirtualization and specialization.
  llvm::SetVector<SILFunction *> NewRefs;

  // Process all applies passed in, plus any new ones that are pushed
  // on as a result of specializing the referenced functions.
  while (!Applies.empty()) {
    auto Apply = Applies.back();
    Applies.pop_back();

    bool ChangedApply = false;
    if (auto FullApply = FullApplySite::isa(Apply.getInstruction())) {
      if (auto NewApply = devirtualize(FullApply, CHA)) {
        ChangedApply = true;

        Apply = ApplySite(NewApply.getInstruction());
      }
    }

    llvm::SmallVector<ApplySite, 4> NewApplies;
    if (auto NewApply = specializeGeneric(Apply, NewApplies)) {
      ChangedApply = true;

      Apply = NewApply;
      Applies.insert(Applies.end(), NewApplies.begin(), NewApplies.end());
    }

    if (ChangedApply) {
      ChangedAny = true;

      auto *NewCallee = Apply.getCalleeFunction();
      assert(NewCallee && "Expected directly referenced function!");

      // Track all new references to function definitions.
      if (NewCallee->isDefinition())
        NewRefs.insert(NewCallee);


      // TODO: Do we need to invalidate everything at this point?
      // What about side-effects analysis? What about type analysis?
      MT->invalidateAnalysis(Apply.getFunction(),
                             SILAnalysis::InvalidationKind::Everything);
    }
  }

  // Copy out all the new function references gathered.
  if (ChangedAny)
    WorkList.insert(WorkList.end(), NewRefs.begin(), NewRefs.end());

  return ChangedAny;
}
Esempio n. 3
0
// OutputPossibleOverflows - We've found a possible overflow earlier,
// now check whether Body might contain a comparison which might be
// preventing the overflow.
// This doesn't do flow analysis, range analysis, or points-to analysis; it's
// just a dumb "is there a comparison" scan.  The aim here is to
// detect the most blatent cases of overflow and educate the
// programmer.
void MallocOverflowSecurityChecker::OutputPossibleOverflows(
  llvm::SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
  const Decl *D, BugReporter &BR, AnalysisManager &mgr) const {
  // By far the most common case: nothing to check.
  if (PossibleMallocOverflows.empty())
    return;

  // Delete any possible overflows which have a comparison.
  CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
  c.Visit(mgr.getAnalysisContext(D)->getBody());

  // Output warnings for all overflows that are left.
  for (CheckOverflowOps::theVecType::iterator
       i = PossibleMallocOverflows.begin(),
       e = PossibleMallocOverflows.end();
       i != e;
       ++i) {
    SourceRange R = i->mulop->getSourceRange();
    BR.EmitBasicReport("MallocOverflowSecurityChecker",
      "the computation of the size of the memory allocation may overflow",
      PathDiagnosticLocation::createOperatorLoc(i->mulop,
                                                BR.getSourceManager()),
      &R, 1);
  }
}
Esempio n. 4
0
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
                    llvm::SmallVectorImpl<llvm::Type*> &elementTypes) {
  ASTContext &C = CGM.getContext();

  // The header is basically a 'struct { void *; int; int; void *; void *; }'.
  CharUnits ptrSize, ptrAlign, intSize, intAlign;
  llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.UInt32Ty);
  llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.Int32Ty);

  // Are there crazy embedded platforms where this isn't true?
  assert(intSize <= ptrSize && "layout assumptions horribly violated");

  CharUnits headerSize = ptrSize;
  if (2 * intSize < ptrAlign) headerSize += ptrSize;
  else headerSize += 2 * intSize;
  headerSize += 2 * ptrSize;

  info.BlockAlign = ptrAlign;
  info.BlockSize = headerSize;

  assert(elementTypes.empty());
  llvm::Type *i8p = CGM.getTypes().ConvertType(C.UInt32Ty);
  llvm::Type *intTy = CGM.getTypes().ConvertType(C.Int32Ty);
  elementTypes.push_back(i8p);
  elementTypes.push_back(intTy);
  elementTypes.push_back(intTy);
  elementTypes.push_back(i8p);
  elementTypes.push_back(CGM.getBlockDescriptorType());

  assert(elementTypes.size() == BlockHeaderSize);
}
Esempio n. 5
0
static void collectAllAppliesInFunction(SILFunction *F,
                                llvm::SmallVectorImpl<ApplySite> &Applies) {
  assert(Applies.empty() && "Expected empty vector to store into!");

  for (auto &B : *F)
    for (auto &I : B)
      if (auto Apply = ApplySite::isa(&I))
        Applies.push_back(Apply);
}
Esempio n. 6
0
ApplySite SILPerformanceInliner::specializeGeneric(
    ApplySite Apply, llvm::SmallVectorImpl<ApplySite> &NewApplies) {
  assert(NewApplies.empty() && "Expected out parameter for new applies!");

  if (!Apply.hasSubstitutions())
    return ApplySite();

  auto *Callee = Apply.getCalleeFunction();

  if (!Callee || Callee->isExternalDeclaration())
    return ApplySite();

  auto Filter = [](SILInstruction *I) -> bool {
    return ApplySite::isa(I) != ApplySite();
  };

  CloneCollector Collector(Filter);

  SILFunction *SpecializedFunction;
  auto Specialized = trySpecializeApplyOfGeneric(Apply,
                                                 SpecializedFunction,
                                                 Collector);

  if (!Specialized)
    return ApplySite();

  // Track the new applies from the specialization.
  for (auto NewCallSite : Collector.getInstructionPairs())
    NewApplies.push_back(ApplySite(NewCallSite.first));

  auto FullApply = FullApplySite::isa(Apply.getInstruction());

  if (!FullApply) {
    assert(!FullApplySite::isa(Specialized.getInstruction()) &&
           "Unexpected full apply generated!");

    // Replace the old apply with the new and delete the old.
    replaceDeadApply(Apply, Specialized.getInstruction());

    return ApplySite(Specialized);
  }

  // Replace the old apply with the new and delete the old.
  replaceDeadApply(Apply, Specialized.getInstruction());

  return Specialized;
}
Esempio n. 7
0
bool Popen(const std::string& Cmd, llvm::SmallVectorImpl<char>& Buf, bool RdE) {
  if (FILE *PF = ::popen(RdE ? (Cmd + " 2>&1").c_str() : Cmd.c_str(), "r")) {
    Buf.resize(0);
    const size_t Chunk = Buf.capacity_in_bytes();
    while (true) {
      const size_t Len = Buf.size();
      Buf.resize(Len + Chunk);
      const size_t R = ::fread(&Buf[Len], sizeof(char), Chunk, PF);
      if (R < Chunk) {
        Buf.resize(Len + R);
        break;
      }
    }
    ::pclose(PF);
    return !Buf.empty();
  }
  return false;
}
void ConstantAggregateBuilderBase::getGEPIndicesTo(
                               llvm::SmallVectorImpl<llvm::Constant*> &indices,
                               size_t position) const {
  // Recurse on the parent builder if present.
  if (Parent) {
    Parent->getGEPIndicesTo(indices, Begin);

  // Otherwise, add an index to drill into the first level of pointer. 
  } else {
    assert(indices.empty());
    indices.push_back(llvm::ConstantInt::get(Builder.CGM.Int32Ty, 0));
  }

  assert(position >= Begin);
  // We have to use i32 here because struct GEPs demand i32 indices.
  // It's rather unlikely to matter in practice.
  indices.push_back(llvm::ConstantInt::get(Builder.CGM.Int32Ty,
                                           position - Begin));
}
Esempio n. 9
0
/// If AI is the version of an initializer where we pass in either an apply or
/// an alloc_ref to initialize in place, validate that we are able to continue
/// optimizing and return To
static bool getDeadInstsAfterInitializerRemoved(
    ApplyInst *AI, llvm::SmallVectorImpl<SILInstruction *> &ToDestroy) {
  assert(ToDestroy.empty() && "We assume that ToDestroy is empty, so on "
                              "failure we can clear without worrying about the "
                              "caller accumulating and thus our eliminating "
                              "passed in state.");
  SILValue Arg0 = AI->getArgument(0);

  if (Arg0->getType().isExistentialType()) {
    // This is a version of the initializer which receives a pre-allocated
    // buffer as first argument. To completely eliminate the allocation, we must
    // destroy the extra allocations as well as the initializer,
    if (auto *Result = dyn_cast<ApplyInst>(Arg0)) {
      ToDestroy.emplace_back(Result);
      return true;
    }

    return false;
  }

  if (auto *ARI = dyn_cast<AllocRefInst>(Arg0)) {
    if (all_of(ARI->getUses(), [&](Operand *Op) -> bool {
          if (Op->getUser() == AI)
            return true;
          if (auto *SRI = dyn_cast<StrongReleaseInst>(Op->getUser())) {
            ToDestroy.emplace_back(SRI);
            return true;
          }
          return false;
        })) {
      return true;
    }
  }

  // We may have added elements to the array before we failed. To avoid such a
  // problem, we clear the out array here. We assert at the beginning that the
  // out array is empty, so this is safe.
  ToDestroy.clear();
  return true;
}
Esempio n. 10
0
void MicrosoftCXXABI::
GetNullMemberPointerFields(const MemberPointerType *MPT,
                           llvm::SmallVectorImpl<llvm::Constant *> &fields) {
  assert(fields.empty());
  const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
  MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
  if (MPT->isMemberFunctionPointer()) {
    // FunctionPointerOrVirtualThunk
    fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy));
  } else {
    if (nullFieldOffsetIsZero(Inheritance))
      fields.push_back(getZeroInt());  // FieldOffset
    else
      fields.push_back(getAllOnesInt());  // FieldOffset
  }

  if (hasVBPtrOffsetField(Inheritance))
    fields.push_back(getZeroInt());
  if (hasNonVirtualBaseAdjustmentField(MPT, Inheritance))
    fields.push_back(getZeroInt());
  if (hasVirtualBaseAdjustmentField(Inheritance))
    fields.push_back(getAllOnesInt());
}
Esempio n. 11
0
/// \brief Attempt to inline all calls smaller than our threshold.
/// returns True if a function was inlined.
bool SILPerformanceInliner::inlineCallsIntoFunction(SILFunction *Caller,
                                                    DominanceAnalysis *DA,
                                                    SILLoopAnalysis *LA,
                             llvm::SmallVectorImpl<FullApplySite> &NewApplies) {
  // Don't optimize functions that are marked with the opt.never attribute.
  if (!Caller->shouldOptimize())
    return false;

  // Construct a log of all of the names of the functions that we've inlined
  // in the current iteration.
  SmallVector<StringRef, 16> InlinedFunctionNames;
  StringRef CallerName = Caller->getName();

  DEBUG(llvm::dbgs() << "Visiting Function: " << CallerName << "\n");

  assert(NewApplies.empty() && "Expected empty vector to store results in!");

  // First step: collect all the functions we want to inline.  We
  // don't change anything yet so that the dominator information
  // remains valid.
  SmallVector<FullApplySite, 8> AppliesToInline;
  collectAppliesToInline(Caller, AppliesToInline, DA, LA);

  if (AppliesToInline.empty())
    return false;

  // Second step: do the actual inlining.
  for (auto AI : AppliesToInline) {
    SILFunction *Callee = AI.getCalleeFunction();
    assert(Callee && "apply_inst does not have a direct callee anymore");

    DEBUG(llvm::dbgs() << "    Inline:" <<  *AI.getInstruction());

    if (!Callee->shouldOptimize()) {
      DEBUG(llvm::dbgs() << "    Cannot inline function " << Callee->getName()
                         << " marked to be excluded from optimizations.\n");
      continue;
    }
    
    SmallVector<SILValue, 8> Args;
    for (const auto &Arg : AI.getArguments())
      Args.push_back(Arg);

    // As we inline and clone we need to collect new applies.
    auto Filter = [](SILInstruction *I) -> bool {
      return bool(FullApplySite::isa(I));
    };

    CloneCollector Collector(Filter);

    // Notice that we will skip all of the newly inlined ApplyInsts. That's
    // okay because we will visit them in our next invocation of the inliner.
    TypeSubstitutionMap ContextSubs;
    SILInliner Inliner(*Caller, *Callee,
                       SILInliner::InlineKind::PerformanceInline,
                       ContextSubs, AI.getSubstitutions(),
                       Collector.getCallback());

    // Record the name of the inlined function (for cycle detection).
    InlinedFunctionNames.push_back(Callee->getName());

    auto Success = Inliner.inlineFunction(AI, Args);
    (void) Success;
    // We've already determined we should be able to inline this, so
    // we expect it to have happened.
    assert(Success && "Expected inliner to inline this function!");
    llvm::SmallVector<FullApplySite, 4> AppliesFromInlinee;
    for (auto &P : Collector.getInstructionPairs())
      AppliesFromInlinee.push_back(FullApplySite(P.first));

    recursivelyDeleteTriviallyDeadInstructions(AI.getInstruction(), true);

    NewApplies.insert(NewApplies.end(), AppliesFromInlinee.begin(),
                      AppliesFromInlinee.end());
    DA->invalidate(Caller, SILAnalysis::InvalidationKind::Everything);
    NumFunctionsInlined++;
  }

  // Record the names of the functions that we inlined.
  // We'll use this list to detect cycles in future iterations of
  // the inliner.
  for (auto CalleeName : InlinedFunctionNames) {
    InlinedFunctions.insert(std::make_pair(CallerName, CalleeName));
  }

  DEBUG(llvm::dbgs() << "\n");
  return true;
}
Store BasicStoreManager::RemoveDeadBindings(Store store, Stmt* Loc,
        SymbolReaper& SymReaper,
        llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
    BindingsTy B = GetBindings(store);
    typedef SVal::symbol_iterator symbol_iterator;

    // Iterate over the variable bindings.
    for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
        if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
            if (SymReaper.isLive(Loc, VR))
                RegionRoots.push_back(VR);
            else
                continue;
        }
        else if (isa<ObjCIvarRegion>(I.getKey())) {
            RegionRoots.push_back(I.getKey());
        }
        else
            continue;

        // Mark the bindings in the data as live.
        SVal X = I.getData();
        for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
            SymReaper.markLive(*SI);
    }

    // Scan for live variables and live symbols.
    llvm::SmallPtrSet<const MemRegion*, 10> Marked;

    while (!RegionRoots.empty()) {
        const MemRegion* MR = RegionRoots.back();
        RegionRoots.pop_back();

        while (MR) {
            if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(MR)) {
                SymReaper.markLive(SymR->getSymbol());
                break;
            }
            else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
                if (Marked.count(MR))
                    break;

                Marked.insert(MR);
                SVal X = Retrieve(store, loc::MemRegionVal(MR));

                // FIXME: We need to handle symbols nested in region definitions.
                for (symbol_iterator SI=X.symbol_begin(),SE=X.symbol_end(); SI!=SE; ++SI)
                    SymReaper.markLive(*SI);

                if (!isa<loc::MemRegionVal>(X))
                    break;

                const loc::MemRegionVal& LVD = cast<loc::MemRegionVal>(X);
                RegionRoots.push_back(LVD.getRegion());
                break;
            }
            else if (const SubRegion* R = dyn_cast<SubRegion>(MR))
                MR = R->getSuperRegion();
            else
                break;
        }
    }

    // Remove dead variable bindings.
    for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
        const MemRegion* R = I.getKey();

        if (!Marked.count(R)) {
            store = Remove(store, ValMgr.makeLoc(R));
            SVal X = I.getData();

            for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
                SymReaper.maybeDead(*SI);
        }
    }

    return store;
}