/// Checks whether any of the arguments to the apply are closures and diagnoses
/// if any of the @inout_aliasable captures passed to those closures have
/// in-progress accesses that would conflict with any access the summary
/// says the closure would perform.
//
/// TODO: We currently fail to statically diagnose non-escaping closures pased
/// via @block_storage convention. To enforce this case, we should statically
/// recognize when the apply takes a block argument that has been initialized to
/// a non-escaping closure.
static void checkForViolationsInNoEscapeClosures(
    const StorageMap &Accesses, FullApplySite FAS, AccessSummaryAnalysis *ASA,
    llvm::SmallVectorImpl<ConflictingAccess> &ConflictingAccesses) {

  SILFunction *Callee = FAS.getCalleeFunction();
  if (Callee && !Callee->empty()) {
    // Check for violation with directly called closure
    checkForViolationWithCall(Accesses, Callee, 0, FAS.getArguments(), ASA,
                              ConflictingAccesses);
  }

  // Check for violation with closures passed as arguments
  for (SILValue Argument : FAS.getArguments()) {
    auto *PAI = lookThroughForPartialApply(Argument);
    if (!PAI)
      continue;

    SILFunction *Closure = PAI->getCalleeFunction();
    if (!Closure || Closure->empty())
      continue;

    // Check the closure's captures, which are a suffix of the closure's
    // parameters.
    unsigned StartIndex =
        Closure->getArguments().size() - PAI->getNumCallArguments();
    checkForViolationWithCall(Accesses, Closure, StartIndex,
                              PAI->getArguments(), ASA, ConflictingAccesses);
  }
}
示例#2
0
// Analyzing the body of this class destructor is valid because the object is
// dead. This means that the object is never passed to objc_setAssociatedObject,
// so its destructor cannot be extended at runtime.
static SILFunction *getDestructor(AllocRefInst *ARI) {
  // We only support classes.
  ClassDecl *ClsDecl = ARI->getType().getClassOrBoundGenericClass();
  if (!ClsDecl)
    return nullptr;

  // Look up the destructor of ClsDecl.
  DestructorDecl *Destructor = ClsDecl->getDestructor();
  assert(Destructor && "getDestructor() should never return a nullptr.");

  // Find the destructor name via SILDeclRef.
  // FIXME: When destructors get moved into vtables, update this to use the
  // vtable for the class.
  SILDeclRef Ref(Destructor);
  SILFunction *Fn = ARI->getModule().lookUpFunction(Ref);
  if (!Fn || Fn->empty()) {
    DEBUG(llvm::dbgs() << "    Could not find destructor.\n");
    return nullptr;
  }

  DEBUG(llvm::dbgs() << "    Found destructor!\n");

  // If the destructor has an objc_method calling convention, we cannot
  // analyze it since it could be swapped out from under us at runtime.
  if (Fn->getRepresentation() == SILFunctionTypeRepresentation::ObjCMethod) {
    DEBUG(llvm::dbgs() << "        Found objective-c destructor. Can't "
          "analyze!\n");
    return nullptr;
  }

  return Fn;
}
示例#3
0
void AccessSummaryAnalysis::processPartialApply(FunctionInfo *callerInfo,
                                                unsigned callerArgumentIndex,
                                                PartialApplyInst *apply,
                                                Operand *applyArgumentOperand,
                                                FunctionOrder &order) {
  SILFunction *calleeFunction = apply->getCalleeFunction();
  assert(calleeFunction && !calleeFunction->empty() &&
         "Missing definition of noescape closure?");

  // Make sure the partial_apply is not calling the result of another
  // partial_apply.
  assert(isa<FunctionRefInst>(apply->getCallee()) &&
         "Noescape partial apply of non-functionref?");

  assert(llvm::all_of(apply->getUses(),
                      hasExpectedUsesOfNoEscapePartialApply) &&
         "noescape partial_apply has unexpected use!");

  // The argument index in the called function.
  ApplySite site(apply);
  unsigned calleeArgumentIndex = site.getCalleeArgIndex(*applyArgumentOperand);

  processCall(callerInfo, callerArgumentIndex, calleeFunction,
              calleeArgumentIndex, order);
}
示例#4
0
/// Could this operand to an apply escape that function by being
/// stored or returned?
static bool applyArgumentEscapes(FullApplySite Apply, Operand *O) {
  SILFunction *F = Apply.getReferencedFunction();
  // If we cannot examine the function body, assume the worst.
  if (!F || F->empty())
    return true;

  // Check the uses of the operand, but do not recurse down into other
  // apply instructions.
  auto calleeArg = F->getArgument(Apply.getCalleeArgIndex(*O));
  return partialApplyEscapes(calleeArg, /* examineApply = */ false);
}
示例#5
0
/// checkPartialApplyBody - Check the body of a partial apply to see
/// if the box pointer argument passed to it has uses that would
/// disqualify it from being promoted to a stack location.  Return
/// true if this partial apply will not block our promoting the box.
static bool checkPartialApplyBody(Operand *O) {
  SILFunction *F = ApplySite(O->getUser()).getReferencedFunction();
  // If we cannot examine the function body, assume the worst.
  if (!F || F->empty())
    return false;

  // We don't actually use these because we're not recursively
  // rewriting the partial applies we find.
  llvm::SmallVector<Operand *, 1> PromotedOperands;
  auto calleeArg = F->getArgument(ApplySite(O->getUser()).getCalleeArgIndex(*O));
  return !findUnexpectedBoxUse(calleeArg, /* examinePartialApply = */ false,
                               /* inAppliedFunction = */ true,
                               PromotedOperands);
}
void AccessEnforcementDom::run() {
  SILFunction *func = getFunction();
  if (func->empty())
    return;

  PostOrderFunctionInfo *PO = getAnalysis<PostOrderAnalysis>()->get(func);
  auto DAA = DominatedAccessAnalysis(PO).analyze();

  DominanceAnalysis *domAnalysis = getAnalysis<DominanceAnalysis>();
  DominanceInfo *domInfo = domAnalysis->get(func);
  SILLoopAnalysis *loopAnalysis = PM->getAnalysis<SILLoopAnalysis>();
  SILLoopInfo *loopInfo = loopAnalysis->get(func);

  DominatedAccessRemoval eliminationPass(*func, domInfo, loopInfo, DAA);
  if (eliminationPass.optimize())
    invalidateAnalysis(SILAnalysis::InvalidationKind::Instructions);
}
示例#7
0
void AccessSummaryAnalysis::processFullApply(FunctionInfo *callerInfo,
                                             unsigned callerArgumentIndex,
                                             FullApplySite apply,
                                             Operand *argumentOperand,
                                             FunctionOrder &order) {
  unsigned operandNumber = argumentOperand->getOperandNumber();
  assert(operandNumber > 0 && "Summarizing apply for non-argument?");

  unsigned calleeArgumentIndex = operandNumber - 1;
  SILFunction *callee = apply.getCalleeFunction();
  // We can't apply a summary for function whose body we can't see.
  // Since user-provided closures are always in the same module as their callee
  // This likely indicates a missing begin_access before an open-coded
  // call.
  if (!callee || callee->empty())
    return;

  processCall(callerInfo, callerArgumentIndex, callee, calleeArgumentIndex,
              order);
}
/// For each argument in the range of the callee arguments being applied at the
/// given apply site, use the summary analysis to determine whether the
/// arguments will be accessed in a way that conflicts with any currently in
/// progress accesses. If so, diagnose.
static void checkCaptureAccess(ApplySite Apply, AccessState &State) {
  SILFunction *Callee = Apply.getCalleeFunction();
  if (!Callee || Callee->empty())
    return;

  const AccessSummaryAnalysis::FunctionSummary &FS =
      State.ASA->getOrCreateSummary(Callee);

  for (unsigned ArgumentIndex : range(Apply.getNumArguments())) {

    unsigned CalleeIndex =
        Apply.getCalleeArgIndexOfFirstAppliedArg() + ArgumentIndex;

    const AccessSummaryAnalysis::ArgumentSummary &AS =
        FS.getAccessForArgument(CalleeIndex);

    const auto &SubAccesses = AS.getSubAccesses();

    // Is the capture accessed in the callee?
    if (SubAccesses.empty())
      continue;

    SILValue Argument = Apply.getArgument(ArgumentIndex);
    assert(Argument->getType().isAddress());

    // A valid AccessedStorage should alway sbe found because Unsafe accesses
    // are not tracked by AccessSummaryAnalysis.
    const AccessedStorage &Storage = findValidAccessedStorage(Argument);
    auto AccessIt = State.Accesses->find(Storage);

    // Are there any accesses in progress at the time of the call?
    if (AccessIt == State.Accesses->end())
      continue;

    const AccessInfo &Info = AccessIt->getSecond();
    if (auto Conflict = findConflictingArgumentAccess(AS, Storage, Info))
      State.ConflictingAccesses.push_back(*Conflict);
  }
}
示例#9
0
/// Returns the callee SILFunction called at a call site, in the case
/// that the call is transparent (as in, both that the call is marked
/// with the transparent flag and that callee function is actually transparently
/// determinable from the SIL) or nullptr otherwise. This assumes that the SIL
/// is already in SSA form.
///
/// In the case that a non-null value is returned, FullArgs contains effective
/// argument operands for the callee function.
static SILFunction *getCalleeFunction(
    SILFunction *F, FullApplySite AI, bool &IsThick,
    SmallVectorImpl<std::pair<SILValue, ParameterConvention>> &CaptureArgs,
    SmallVectorImpl<SILValue> &FullArgs, PartialApplyInst *&PartialApply) {
  IsThick = false;
  PartialApply = nullptr;
  CaptureArgs.clear();
  FullArgs.clear();

  for (const auto &Arg : AI.getArguments())
    FullArgs.push_back(Arg);
  SILValue CalleeValue = AI.getCallee();

  if (auto *LI = dyn_cast<LoadInst>(CalleeValue)) {
    // Conservatively only see through alloc_box; we assume this pass is run
    // immediately after SILGen
    auto *PBI = dyn_cast<ProjectBoxInst>(LI->getOperand());
    if (!PBI)
      return nullptr;
    auto *ABI = dyn_cast<AllocBoxInst>(PBI->getOperand());
    if (!ABI)
      return nullptr;
    // Ensure there are no other uses of alloc_box than the project_box and
    // retains, releases.
    for (Operand *ABIUse : ABI->getUses())
      if (ABIUse->getUser() != PBI &&
          !isa<StrongRetainInst>(ABIUse->getUser()) &&
          !isa<StrongReleaseInst>(ABIUse->getUser()))
        return nullptr;

    // Scan forward from the alloc box to find the first store, which
    // (conservatively) must be in the same basic block as the alloc box
    StoreInst *SI = nullptr;
    for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end();
         I != E; ++I) {
      // If we find the load instruction first, then the load is loading from
      // a non-initialized alloc; this shouldn't really happen but I'm not
      // making any assumptions
      if (&*I == LI)
        return nullptr;
      if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest() == PBI) {
        // We found a store that we know dominates the load; now ensure there
        // are no other uses of the project_box except loads.
        for (Operand *PBIUse : PBI->getUses())
          if (PBIUse->getUser() != SI && !isa<LoadInst>(PBIUse->getUser()))
            return nullptr;
        // We can conservatively see through the store
        break;
      }
    }
    if (!SI)
      return nullptr;
    CalleeValue = SI->getSrc();
  }

  // PartialApply/ThinToThick -> ConvertFunction patterns are generated
  // by @noescape closures.
  //
  // FIXME: We don't currently handle mismatched return types, however, this
  // would be a good optimization to handle and would be as simple as inserting
  // a cast.
  auto skipFuncConvert = [](SILValue CalleeValue) {
    // We can also allow a thin @escape to noescape conversion as such:
    // %1 = function_ref @thin_closure_impl : $@convention(thin) () -> ()
    // %2 = convert_function %1 :
    //      $@convention(thin) () -> () to $@convention(thin) @noescape () -> ()
    // %3 = thin_to_thick_function %2 :
    //  $@convention(thin) @noescape () -> () to
    //            $@noescape @callee_guaranteed () -> ()
    // %4 = apply %3() : $@noescape @callee_guaranteed () -> ()
    if (auto *ThinToNoescapeCast = dyn_cast<ConvertFunctionInst>(CalleeValue)) {
      auto FromCalleeTy =
          ThinToNoescapeCast->getOperand()->getType().castTo<SILFunctionType>();
      if (FromCalleeTy->getExtInfo().hasContext())
        return CalleeValue;
      auto ToCalleeTy = ThinToNoescapeCast->getType().castTo<SILFunctionType>();
      auto EscapingCalleeTy = ToCalleeTy->getWithExtInfo(
          ToCalleeTy->getExtInfo().withNoEscape(false));
      if (FromCalleeTy != EscapingCalleeTy)
        return CalleeValue;
      return ThinToNoescapeCast->getOperand();
    }

    auto *CFI = dyn_cast<ConvertEscapeToNoEscapeInst>(CalleeValue);
    if (!CFI)
      return CalleeValue;

    // TODO: Handle argument conversion. All the code in this file needs to be
    // cleaned up and generalized. The argument conversion handling in
    // optimizeApplyOfConvertFunctionInst should apply to any combine
    // involving an apply, not just a specific pattern.
    //
    // For now, just handle conversion that doesn't affect argument types,
    // return types, or throws. We could trivially handle any other
    // representation change, but the only one that doesn't affect the ABI and
    // matters here is @noescape, so just check for that.
    auto FromCalleeTy = CFI->getOperand()->getType().castTo<SILFunctionType>();
    auto ToCalleeTy = CFI->getType().castTo<SILFunctionType>();
    auto EscapingCalleeTy =
      ToCalleeTy->getWithExtInfo(ToCalleeTy->getExtInfo().withNoEscape(false));
    if (FromCalleeTy != EscapingCalleeTy)
      return CalleeValue;

    return CFI->getOperand();
  };

  // Look through a escape to @noescape conversion.
  CalleeValue = skipFuncConvert(CalleeValue);

  // We are allowed to see through exactly one "partial apply" instruction or
  // one "thin to thick function" instructions, since those are the patterns
  // generated when using auto closures.
  if (auto *PAI = dyn_cast<PartialApplyInst>(CalleeValue)) {

    // Collect the applied arguments and their convention.
    collectPartiallyAppliedArguments(PAI, CaptureArgs, FullArgs);

    CalleeValue = PAI->getCallee();
    IsThick = true;
    PartialApply = PAI;
  } else if (auto *TTTFI = dyn_cast<ThinToThickFunctionInst>(CalleeValue)) {
    CalleeValue = TTTFI->getOperand();
    IsThick = true;
  }

  CalleeValue = skipFuncConvert(CalleeValue);

  auto *FRI = dyn_cast<FunctionRefInst>(CalleeValue);
  if (!FRI)
    return nullptr;

  SILFunction *CalleeFunction = FRI->getReferencedFunction();

  switch (CalleeFunction->getRepresentation()) {
  case SILFunctionTypeRepresentation::Thick:
  case SILFunctionTypeRepresentation::Thin:
  case SILFunctionTypeRepresentation::Method:
  case SILFunctionTypeRepresentation::Closure:
  case SILFunctionTypeRepresentation::WitnessMethod:
    break;
    
  case SILFunctionTypeRepresentation::CFunctionPointer:
  case SILFunctionTypeRepresentation::ObjCMethod:
  case SILFunctionTypeRepresentation::Block:
    return nullptr;
  }

  // If the CalleeFunction is a not-transparent definition, we can not process
  // it.
  if (CalleeFunction->isTransparent() == IsNotTransparent)
    return nullptr;

  // If CalleeFunction is a declaration, see if we can load it.
  if (CalleeFunction->empty())
    AI.getModule().loadFunction(CalleeFunction);

  // If we fail to load it, bail.
  if (CalleeFunction->empty())
    return nullptr;

  if (F->isSerialized() &&
      !CalleeFunction->hasValidLinkageForFragileInline()) {
    if (!CalleeFunction->hasValidLinkageForFragileRef()) {
      llvm::errs() << "caller: " << F->getName() << "\n";
      llvm::errs() << "callee: " << CalleeFunction->getName() << "\n";
      llvm_unreachable("Should never be inlining a resilient function into "
                       "a fragile function");
    }
    return nullptr;
  }

  return CalleeFunction;
}
示例#10
0
/// \brief Returns the callee SILFunction called at a call site, in the case
/// that the call is transparent (as in, both that the call is marked
/// with the transparent flag and that callee function is actually transparently
/// determinable from the SIL) or nullptr otherwise. This assumes that the SIL
/// is already in SSA form.
///
/// In the case that a non-null value is returned, FullArgs contains effective
/// argument operands for the callee function.
static SILFunction *
getCalleeFunction(FullApplySite AI, bool &IsThick,
                  SmallVectorImpl<SILValue>& CaptureArgs,
                  SmallVectorImpl<SILValue>& FullArgs,
                  PartialApplyInst *&PartialApply,
                  SILModule::LinkingMode Mode) {
  IsThick = false;
  PartialApply = nullptr;
  CaptureArgs.clear();
  FullArgs.clear();

  for (const auto &Arg : AI.getArguments())
    FullArgs.push_back(Arg);
  SILValue CalleeValue = AI.getCallee();

  if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    // Conservatively only see through alloc_box; we assume this pass is run
    // immediately after SILGen
    SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand());
    if (!ABI)
      return nullptr;
    assert(LI->getOperand().getResultNumber() == 1);

    // Scan forward from the alloc box to find the first store, which
    // (conservatively) must be in the same basic block as the alloc box
    StoreInst *SI = nullptr;
    for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end();
         I != E; ++I) {
      // If we find the load instruction first, then the load is loading from
      // a non-initialized alloc; this shouldn't really happen but I'm not
      // making any assumptions
      if (static_cast<SILInstruction*>(I) == LI)
        return nullptr;
      if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest().getDef() == ABI) {
        // We found a store that we know dominates the load; now ensure there
        // are no other uses of the alloc other than loads, retains, releases
        // and dealloc stacks
        for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE;
             ++UI)
          if (UI.getUser() != SI && !isa<LoadInst>(UI.getUser()) &&
              !isa<StrongRetainInst>(UI.getUser()) &&
              !isa<StrongReleaseInst>(UI.getUser()))
            return nullptr;
        // We can conservatively see through the store
        break;
      }
    }
    if (!SI)
      return nullptr;
    CalleeValue = SI->getSrc();
  }

  // We are allowed to see through exactly one "partial apply" instruction or
  // one "thin to thick function" instructions, since those are the patterns
  // generated when using auto closures.
  if (PartialApplyInst *PAI =
        dyn_cast<PartialApplyInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);

    for (const auto &Arg : PAI->getArguments()) {
      CaptureArgs.push_back(Arg);
      FullArgs.push_back(Arg);
    }

    CalleeValue = PAI->getCallee();
    IsThick = true;
    PartialApply = PAI;
  } else if (ThinToThickFunctionInst *TTTFI =
               dyn_cast<ThinToThickFunctionInst>(CalleeValue)) {
    assert(CalleeValue.getResultNumber() == 0);
    CalleeValue = TTTFI->getOperand();
    IsThick = true;
  }

  FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue);

  if (!FRI)
    return nullptr;

  SILFunction *CalleeFunction = FRI->getReferencedFunction();

  switch (CalleeFunction->getRepresentation()) {
  case SILFunctionTypeRepresentation::Thick:
  case SILFunctionTypeRepresentation::Thin:
  case SILFunctionTypeRepresentation::Method:
  case SILFunctionTypeRepresentation::WitnessMethod:
    break;
    
  case SILFunctionTypeRepresentation::CFunctionPointer:
  case SILFunctionTypeRepresentation::ObjCMethod:
  case SILFunctionTypeRepresentation::Block:
    return nullptr;
  }

  // If CalleeFunction is a declaration, see if we can load it. If we fail to
  // load it, bail.
  if (CalleeFunction->empty()
      && !AI.getModule().linkFunction(CalleeFunction, Mode))
    return nullptr;
  return CalleeFunction;
}
示例#11
0
static bool removeUnreachableBlocks(SILFunction &F, SILModule &M,
                                    UnreachableUserCodeReportingState *State) {
  if (F.empty())
    return false;

  SILBasicBlockSet Reachable;
  SmallVector<SILBasicBlock*, 128> Worklist;
  Worklist.push_back(&F.front());
  Reachable.insert(&F.front());

  // Collect all reachable blocks by walking the successors.
  do {
    SILBasicBlock *BB = Worklist.pop_back_val();
    for (auto SI = BB->succ_begin(), SE = BB->succ_end(); SI != SE; ++SI) {
      if (Reachable.insert(*SI).second)
        Worklist.push_back(*SI);
    }
  } while (!Worklist.empty());
  assert(Reachable.size() <= F.size());

  // If everything is reachable, we are done.
  if (Reachable.size() == F.size())
    return false;

  // Diagnose user written unreachable code.
  if (State) {
    for (auto BI = State->PossiblyUnreachableBlocks.begin(),
              BE = State->PossiblyUnreachableBlocks.end(); BI != BE; ++BI) {
      const SILBasicBlock *BB = *BI;
      if (!Reachable.count(BB)) {
        llvm::SmallPtrSet<const SILBasicBlock *, 1> visited;
        diagnoseUnreachableBlock(**BI, M, Reachable, State, BB, visited);
      }
    }
  }

  // Remove references from the dead blocks.
  for (auto I = F.begin(), E = F.end(); I != E; ++I) {
    SILBasicBlock *BB = &*I;
    if (Reachable.count(BB))
      continue;

    // Drop references to other blocks.
    recursivelyDeleteTriviallyDeadInstructions(BB->getTerminator(), true);
    NumInstructionsRemoved++;
  }

  // Delete dead instructions and everything that could become dead after
  // their deletion.
  llvm::SmallVector<SILInstruction*, 32> ToBeDeleted;
  for (auto BI = F.begin(), BE = F.end(); BI != BE; ++BI)
    if (!Reachable.count(&*BI))
      for (auto I = BI->begin(), E = BI->end(); I != E; ++I)
        ToBeDeleted.push_back(&*I);
  recursivelyDeleteTriviallyDeadInstructions(ToBeDeleted, true);
  NumInstructionsRemoved += ToBeDeleted.size();

  // Delete the dead blocks.
  for (auto I = F.begin(), E = F.end(); I != E;)
    if (!Reachable.count(&*I)) {
      I = F.getBlocks().erase(I);
      NumBlocksRemoved++;
    } else
      ++I;

  return true;
}
static void checkStaticExclusivity(SILFunction &Fn, PostOrderFunctionInfo *PO,
                                   AccessSummaryAnalysis *ASA) {
  // The implementation relies on the following SIL invariants:
  //    - All incoming edges to a block must have the same in-progress
  //      accesses. This enables the analysis to not perform a data flow merge
  //      on incoming edges.
  //    - Further, for a given address each of the in-progress
  //      accesses must have begun in the same order on all edges. This ensures
  //      consistent diagnostics across changes to the exploration of the CFG.
  //    - On return from a function there are no in-progress accesses. This
  //      enables a sanity check for lean analysis state at function exit.
  //    - Each end_access instruction corresponds to exactly one begin access
  //      instruction. (This is encoded in the EndAccessInst itself)
  //    - begin_access arguments cannot be basic block arguments.
  //      This enables the analysis to look back to find the *single* storage
  //      storage location accessed.

  if (Fn.empty())
    return;

  AccessState State(ASA);

  // For each basic block, track the stack of current accesses on
  // exit from that block.
  llvm::SmallDenseMap<SILBasicBlock *, Optional<StorageMap>, 32>
      BlockOutAccesses;

  BlockOutAccesses[Fn.getEntryBlock()] = StorageMap();

  for (auto *BB : PO->getReversePostOrder()) {
    Optional<StorageMap> &BBState = BlockOutAccesses[BB];

    // Because we use a reverse post-order traversal, unless this is the entry
    // at least one of its predecessors must have been reached. Use the out
    // state for that predecessor as our in state. The SIL verifier guarantees
    // that all incoming edges must have the same current accesses.
    for (auto *Pred : BB->getPredecessorBlocks()) {
      auto it = BlockOutAccesses.find(Pred);
      if (it == BlockOutAccesses.end())
        continue;

      const Optional<StorageMap> &PredAccesses = it->getSecond();
      if (PredAccesses) {
        BBState = PredAccesses;
        break;
      }
    }

    // The in-progress accesses for the current program point, represented
    // as map from storage locations to the accesses in progress for the
    // location.
    State.Accesses = BBState.getPointer();
    for (auto &I : *BB)
      checkForViolationsAtInstruction(I, State);
  }

  // Now that we've collected violations and suppressed calls, emit
  // diagnostics.
  for (auto &Violation : State.ConflictingAccesses) {
    diagnoseExclusivityViolation(Violation, State.CallsToSwap,
                                 Fn.getASTContext());
  }
}
示例#13
0
// Returns the callee of an apply_inst if it is basically inlinable.
SILFunction *swift::getEligibleFunction(FullApplySite AI,
                                        InlineSelection WhatToInline) {
  SILFunction *Callee = AI.getReferencedFunction();

  if (!Callee) {
    return nullptr;
  }

  // Not all apply sites can be inlined, even if they're direct.
  if (!SILInliner::canInline(AI))
    return nullptr;

  ModuleDecl *SwiftModule = Callee->getModule().getSwiftModule();
  bool IsInStdlib = (SwiftModule->isStdlibModule() ||
                     SwiftModule->isOnoneSupportModule());

  // Don't inline functions that are marked with the @_semantics or @_effects
  // attribute if the inliner is asked not to inline them.
  if (Callee->hasSemanticsAttrs() || Callee->hasEffectsKind()) {
    if (WhatToInline == InlineSelection::NoSemanticsAndGlobalInit) {
      if (shouldSkipApplyDuringEarlyInlining(AI))
        return nullptr;
      if (Callee->hasSemanticsAttr("inline_late"))
        return nullptr;
    }
    // The "availability" semantics attribute is treated like global-init.
    if (Callee->hasSemanticsAttrs() &&
        WhatToInline != InlineSelection::Everything &&
        (Callee->hasSemanticsAttrThatStartsWith("availability") ||
         (Callee->hasSemanticsAttrThatStartsWith("inline_late")))) {
      return nullptr;
    }
    if (Callee->hasSemanticsAttrs() &&
        WhatToInline == InlineSelection::Everything) {
      if (Callee->hasSemanticsAttrThatStartsWith("inline_late") && IsInStdlib) {
        return nullptr;
      }
    }

  } else if (Callee->isGlobalInit()) {
    if (WhatToInline != InlineSelection::Everything) {
      return nullptr;
    }
  }

  // We can't inline external declarations.
  if (Callee->empty() || Callee->isExternalDeclaration()) {
    return nullptr;
  }

  // Explicitly disabled inlining.
  if (Callee->getInlineStrategy() == NoInline) {
    return nullptr;
  }

  if (!Callee->shouldOptimize()) {
    return nullptr;
  }

  SILFunction *Caller = AI.getFunction();

  // We don't support inlining a function that binds dynamic self because we
  // have no mechanism to preserve the original function's local self metadata.
  if (mayBindDynamicSelf(Callee)) {
    // Check if passed Self is the same as the Self of the caller.
    // In this case, it is safe to inline because both functions
    // use the same Self.
    if (AI.hasSelfArgument() && Caller->hasSelfParam()) {
      auto CalleeSelf = stripCasts(AI.getSelfArgument());
      auto CallerSelf = Caller->getSelfArgument();
      if (CalleeSelf != SILValue(CallerSelf))
        return nullptr;
    } else
      return nullptr;
  }

  // Detect self-recursive calls.
  if (Caller == Callee) {
    return nullptr;
  }

  // A non-fragile function may not be inlined into a fragile function.
  if (Caller->isSerialized() &&
      !Callee->hasValidLinkageForFragileInline()) {
    if (!Callee->hasValidLinkageForFragileRef()) {
      llvm::errs() << "caller: " << Caller->getName() << "\n";
      llvm::errs() << "callee: " << Callee->getName() << "\n";
      llvm_unreachable("Should never be inlining a resilient function into "
                       "a fragile function");
    }
    return nullptr;
  }

  // Inlining self-recursive functions into other functions can result
  // in excessive code duplication since we run the inliner multiple
  // times in our pipeline
  if (calleeIsSelfRecursive(Callee)) {
    return nullptr;
  }

  if (!EnableSILInliningOfGenerics && AI.hasSubstitutions()) {
    // Inlining of generics is not allowed unless it is an @inline(__always)
    // or transparent function.
    if (Callee->getInlineStrategy() != AlwaysInline && !Callee->isTransparent())
      return nullptr;
  }

  // We cannot inline function with layout constraints on its generic types
  // if the corresponding substitution type does not have the same constraints.
  // The reason for this restriction is that we'd need to be able to express
  // in SIL something like casting a value of generic type T into a value of
  // generic type T: _LayoutConstraint, which is impossible currently.
  if (EnableSILInliningOfGenerics && AI.hasSubstitutions()) {
    if (!isCallerAndCalleeLayoutConstraintsCompatible(AI))
      return nullptr;
  }

  // IRGen cannot handle partial_applies containing opened_existentials
  // in its substitutions list.
  if (calleeHasPartialApplyWithOpenedExistentials(AI)) {
    return nullptr;
  }

  return Callee;
}
示例#14
0
// Returns the callee of an apply_inst if it is basically inlineable.
SILFunction *SILPerformanceInliner::getEligibleFunction(FullApplySite AI) {

  SILFunction *Callee = AI.getCalleeFunction();
  
  if (!Callee) {
    DEBUG(llvm::dbgs() << "        FAIL: Cannot find inlineable callee.\n");
    return nullptr;
  }

  // Don't inline functions that are marked with the @_semantics or @effects
  // attribute if the inliner is asked not to inline them.
  if (Callee->hasSemanticsAttrs() || Callee->hasEffectsKind()) {
    if (WhatToInline == InlineSelection::NoSemanticsAndGlobalInit) {
      DEBUG(llvm::dbgs() << "        FAIL: Function " << Callee->getName()
            << " has special semantics or effects attribute.\n");
      return nullptr;
    }
    // The "availability" semantics attribute is treated like global-init.
    if (Callee->hasSemanticsAttrs() &&
        WhatToInline != InlineSelection::Everything &&
        Callee->hasSemanticsAttrThatStartsWith("availability")) {
      return nullptr;
    }
  } else if (Callee->isGlobalInit()) {
    if (WhatToInline != InlineSelection::Everything) {
      DEBUG(llvm::dbgs() << "        FAIL: Function " << Callee->getName()
            << " has the global-init attribute.\n");
      return nullptr;
    }
  }

  // We can't inline external declarations.
  if (Callee->empty() || Callee->isExternalDeclaration()) {
    DEBUG(llvm::dbgs() << "        FAIL: Cannot inline external " <<
          Callee->getName() << ".\n");
    return nullptr;
  }

  // Explicitly disabled inlining.
  if (Callee->getInlineStrategy() == NoInline) {
    DEBUG(llvm::dbgs() << "        FAIL: noinline attribute on " <<
          Callee->getName() << ".\n");
    return nullptr;
  }
  
  if (!Callee->shouldOptimize()) {
    DEBUG(llvm::dbgs() << "        FAIL: optimizations disabled on " <<
          Callee->getName() << ".\n");
    return nullptr;
  }

  // We don't support this yet.
  if (AI.hasSubstitutions()) {
    DEBUG(llvm::dbgs() << "        FAIL: Generic substitutions on " <<
          Callee->getName() << ".\n");
    return nullptr;
  }

  // We don't support inlining a function that binds dynamic self because we
  // have no mechanism to preserve the original function's local self metadata.
  if (computeMayBindDynamicSelf(Callee)) {
    DEBUG(llvm::dbgs() << "        FAIL: Binding dynamic Self in " <<
          Callee->getName() << ".\n");
    return nullptr;
  }

  SILFunction *Caller = AI.getFunction();

  // Detect inlining cycles.
  if (hasInliningCycle(Caller, Callee)) {
    DEBUG(llvm::dbgs() << "        FAIL: Detected a recursion inlining " <<
          Callee->getName() << ".\n");
    return nullptr;
  }

  // A non-fragile function may not be inlined into a fragile function.
  if (Caller->isFragile() && !Callee->isFragile()) {
    DEBUG(llvm::dbgs() << "        FAIL: Can't inline fragile " <<
          Callee->getName() << ".\n");
    return nullptr;
  }

  // Inlining self-recursive functions into other functions can result
  // in excessive code duplication since we run the inliner multiple
  // times in our pipeline
  if (calleeIsSelfRecursive(Callee)) {
    DEBUG(llvm::dbgs() << "        FAIL: Callee is self-recursive in "
                       << Callee->getName() << ".\n");
    return nullptr;
  }

  DEBUG(llvm::dbgs() << "        Eligible callee: " <<
        Callee->getName() << "\n");
  
  return Callee;
}
static void checkStaticExclusivity(SILFunction &Fn, PostOrderFunctionInfo *PO,
                                   AccessSummaryAnalysis *ASA) {
  // The implementation relies on the following SIL invariants:
  //    - All incoming edges to a block must have the same in-progress
  //      accesses. This enables the analysis to not perform a data flow merge
  //      on incoming edges.
  //    - Further, for a given address each of the in-progress
  //      accesses must have begun in the same order on all edges. This ensures
  //      consistent diagnostics across changes to the exploration of the CFG.
  //    - On return from a function there are no in-progress accesses. This
  //      enables a sanity check for lean analysis state at function exit.
  //    - Each end_access instruction corresponds to exactly one begin access
  //      instruction. (This is encoded in the EndAccessInst itself)
  //    - begin_access arguments cannot be basic block arguments.
  //      This enables the analysis to look back to find the *single* storage
  //      storage location accessed.

  if (Fn.empty())
    return;

  // Collects calls the Standard Library swap() for Fix-Its.
  llvm::SmallVector<ApplyInst *, 8> CallsToSwap;

  // Stores the accesses that have been found to conflict. Used to defer
  // emitting diagnostics until we can determine whether they should
  // be suppressed.
  llvm::SmallVector<ConflictingAccess, 4> ConflictingAccesses;

  // For each basic block, track the stack of current accesses on
  // exit from that block.
  llvm::SmallDenseMap<SILBasicBlock *, Optional<StorageMap>, 32>
      BlockOutAccesses;

  BlockOutAccesses[Fn.getEntryBlock()] = StorageMap();

  for (auto *BB : PO->getReversePostOrder()) {
    Optional<StorageMap> &BBState = BlockOutAccesses[BB];

    // Because we use a reverse post-order traversal, unless this is the entry
    // at least one of its predecessors must have been reached. Use the out
    // state for that predecessor as our in state. The SIL verifier guarantees
    // that all incoming edges must have the same current accesses.
    for (auto *Pred : BB->getPredecessorBlocks()) {
      auto it = BlockOutAccesses.find(Pred);
      if (it == BlockOutAccesses.end())
        continue;

      const Optional<StorageMap> &PredAccesses = it->getSecond();
      if (PredAccesses) {
        BBState = PredAccesses;
        break;
      }
    }

    // The in-progress accesses for the current program point, represented
    // as map from storage locations to the accesses in progress for the
    // location.
    StorageMap &Accesses = *BBState;

    for (auto &I : *BB) {
      // Apply transfer functions. Beginning an access
      // increments the read or write count for the storage location;
      // Ending onr decrements the count.
      if (auto *BAI = dyn_cast<BeginAccessInst>(&I)) {
        SILAccessKind Kind = BAI->getAccessKind();
        const AccessedStorage &Storage = findAccessedStorage(BAI->getSource());
        AccessInfo &Info = Accesses[Storage];
        const IndexTrieNode *SubPath = ASA->findSubPathAccessed(BAI);
        if (auto Conflict = shouldReportAccess(Info, Kind, SubPath)) {
          ConflictingAccesses.emplace_back(Storage, *Conflict,
                                           RecordedAccess(BAI, SubPath));
        }

        Info.beginAccess(BAI, SubPath);
        continue;
      }

      if (auto *EAI = dyn_cast<EndAccessInst>(&I)) {
        auto It = Accesses.find(findAccessedStorage(EAI->getSource()));
        AccessInfo &Info = It->getSecond();

        BeginAccessInst *BAI = EAI->getBeginAccess();
        const IndexTrieNode *SubPath = ASA->findSubPathAccessed(BAI);
        Info.endAccess(EAI, SubPath);

        // If the storage location has no more in-progress accesses, remove
        // it to keep the StorageMap lean.
        if (!Info.hasAccessesInProgress())
          Accesses.erase(It);
        continue;
      }

      if (auto *AI = dyn_cast<ApplyInst>(&I)) {
        // Record calls to swap() for potential Fix-Its.
        if (isCallToStandardLibrarySwap(AI, Fn.getASTContext()))
          CallsToSwap.push_back(AI);
        else
          checkForViolationsInNoEscapeClosures(Accesses, AI, ASA,
                                               ConflictingAccesses);
        continue;
      }

      if (auto *TAI = dyn_cast<TryApplyInst>(&I)) {
        checkForViolationsInNoEscapeClosures(Accesses, TAI, ASA,
                                             ConflictingAccesses);
        continue;
      }
      // Sanity check to make sure entries are properly removed.
      assert((!isa<ReturnInst>(&I) || Accesses.size() == 0) &&
             "Entries were not properly removed?!");
    }
  }

  // Now that we've collected violations and suppressed calls, emit
  // diagnostics.
  for (auto &Violation : ConflictingAccesses) {
    diagnoseExclusivityViolation(Violation, CallsToSwap, Fn.getASTContext());
  }
}
示例#16
0
// Returns the callee of an apply_inst if it is basically inlineable.
SILFunction *SILPerformanceInliner::getEligibleFunction(FullApplySite AI) {

  SILFunction *Callee = AI.getReferencedFunction();

  if (!Callee) {
    return nullptr;
  }

  // Don't inline functions that are marked with the @_semantics or @effects
  // attribute if the inliner is asked not to inline them.
  if (Callee->hasSemanticsAttrs() || Callee->hasEffectsKind()) {
    if (WhatToInline == InlineSelection::NoSemanticsAndGlobalInit) {
      return nullptr;
    }
    // The "availability" semantics attribute is treated like global-init.
    if (Callee->hasSemanticsAttrs() &&
        WhatToInline != InlineSelection::Everything &&
        Callee->hasSemanticsAttrThatStartsWith("availability")) {
      return nullptr;
    }
  } else if (Callee->isGlobalInit()) {
    if (WhatToInline != InlineSelection::Everything) {
      return nullptr;
    }
  }

  // We can't inline external declarations.
  if (Callee->empty() || Callee->isExternalDeclaration()) {
    return nullptr;
  }

  // Explicitly disabled inlining.
  if (Callee->getInlineStrategy() == NoInline) {
    return nullptr;
  }
  
  if (!Callee->shouldOptimize()) {
    return nullptr;
  }

  // We don't support this yet.
  if (AI.hasSubstitutions())
    return nullptr;

  SILFunction *Caller = AI.getFunction();

  // We don't support inlining a function that binds dynamic self because we
  // have no mechanism to preserve the original function's local self metadata.
  if (mayBindDynamicSelf(Callee)) {
    // Check if passed Self is the same as the Self of the caller.
    // In this case, it is safe to inline because both functions
    // use the same Self.
    if (AI.hasSelfArgument() && Caller->hasSelfParam()) {
      auto CalleeSelf = stripCasts(AI.getSelfArgument());
      auto CallerSelf = Caller->getSelfArgument();
      if (CalleeSelf != SILValue(CallerSelf))
        return nullptr;
    } else
      return nullptr;
  }

  // Detect self-recursive calls.
  if (Caller == Callee) {
    return nullptr;
  }

  // A non-fragile function may not be inlined into a fragile function.
  if (Caller->isFragile() &&
      !Callee->hasValidLinkageForFragileInline()) {
    if (!Callee->hasValidLinkageForFragileRef()) {
      llvm::errs() << "caller: " << Caller->getName() << "\n";
      llvm::errs() << "callee: " << Callee->getName() << "\n";
      llvm_unreachable("Should never be inlining a resilient function into "
                       "a fragile function");
    }
    return nullptr;
  }

  // Inlining self-recursive functions into other functions can result
  // in excessive code duplication since we run the inliner multiple
  // times in our pipeline
  if (calleeIsSelfRecursive(Callee)) {
    return nullptr;
  }

  return Callee;
}