コード例 #1
0
ファイル: CopyForwarding.cpp プロジェクト: AaronPelzer/swift
/// Check that all users of the destination address of the copy are dominated by
/// the copy. There is no path around copy that could initialize %dest with a
/// different value.
bool CopyForwarding::areCopyDestUsersDominatedBy(
    CopyAddrInst *Copy, SmallVectorImpl<Operand *> &DestUses) {

  SILValue CopyDest = Copy->getDest();
  DominanceInfo *DT = nullptr;

  for (auto *Use : CopyDest.getUses()) {
    auto *UserInst = Use->getUser();
    if (UserInst == Copy)
      continue;

    // Initialize the dominator tree info.
    if (!DT)
      DT = DomAnalysis->get(Copy->getFunction());

    // Check dominance of the parent blocks.
    if (!DT->dominates(Copy->getParent(), UserInst->getParent()))
      return false;

    bool CheckDominanceInBlock = Copy->getParent() == UserInst->getParent();
    // Check whether Copy is before UserInst.
    if (CheckDominanceInBlock) {
      auto SI = Copy->getIterator(), SE = Copy->getParent()->end();
      for (++SI; SI != SE; ++SI)
        if (&*SI == UserInst)
          break;
      if (SI == SE)
        return false;
    }

    // We can forward to this use.
    DestUses.push_back(Use);
  }
  return true;
}
コード例 #2
0
ファイル: ARCAnalysis.cpp プロジェクト: 150vb/swift
/// TODO: Refactor this code so the decision on whether or not to accept an
/// instruction.
bool swift::getFinalReleasesForValue(SILValue V, ReleaseTracker &Tracker) {
  llvm::SmallPtrSet<SILBasicBlock *, 16> LiveIn;
  llvm::SmallPtrSet<SILBasicBlock *, 16> UseBlocks;

  // First attempt to get the BB where this value resides.
  auto *DefBB = V->getParentBB();
  if (!DefBB)
    return false;

  bool seenRelease = false;
  SILInstruction *OneRelease = nullptr;

  // We'll treat this like a liveness problem where the value is the def. Each
  // block that has a use of the value has the value live-in unless it is the
  // block with the value.
  for (auto *UI : V->getUses()) {
    auto *User = UI->getUser();
    auto *BB = User->getParent();

    if (!Tracker.isUserAcceptable(User))
      return false;
    Tracker.trackUser(User);

    if (BB != DefBB)
      LiveIn.insert(BB);

    // Also keep track of the blocks with uses.
    UseBlocks.insert(BB);

    // Try to speed up the trivial case of single release/dealloc.
    if (isa<StrongReleaseInst>(User) || isa<DeallocBoxInst>(User)) {
      if (!seenRelease)
        OneRelease = User;
      else
        OneRelease = nullptr;

      seenRelease = true;
    }
  }

  // Only a single release/dealloc? We're done!
  if (OneRelease) {
    Tracker.trackLastRelease(OneRelease);
    return true;
  }

  propagateLiveness(LiveIn, DefBB);

  // Now examine each block we saw a use in. If it has no successors
  // that are in LiveIn, then the last use in the block is the final
  // release/dealloc.
  for (auto *BB : UseBlocks)
    if (!successorHasLiveIn(BB, LiveIn))
      if (!addLastUse(V, BB, Tracker))
        return false;

  return true;
}
コード例 #3
0
ファイル: LoopRotate.cpp プロジェクト: uygar/swift
static void updateSSAForUseOfValue(
    SILSSAUpdater &Updater, SmallVectorImpl<SILPhiArgument *> &InsertedPHIs,
    const llvm::DenseMap<ValueBase *, SILValue> &ValueMap,
    SILBasicBlock *Header, SILBasicBlock *EntryCheckBlock,
    SILValue Res) {
  // Find the mapped instruction.
  assert(ValueMap.count(Res) && "Expected to find value in map!");
  SILValue MappedValue = ValueMap.find(Res)->second;
  assert(MappedValue);
  assert(Res->getType() == MappedValue->getType() && "The types must match");

  InsertedPHIs.clear();
  Updater.Initialize(Res->getType());
  Updater.AddAvailableValue(Header, Res);
  Updater.AddAvailableValue(EntryCheckBlock, MappedValue);


  // Because of the way that phi nodes are represented we have to collect all
  // uses before we update SSA. Modifying one phi node can invalidate another
  // unrelated phi nodes operands through the common branch instruction (that
  // has to be modified). This would invalidate a plain ValueUseIterator.
  // Instead we collect uses wrapping uses in branches specially so that we
  // can reconstruct the use even after the branch has been modified.
  SmallVector<UseWrapper, 8> StoredUses;
  for (auto *U : Res->getUses())
    StoredUses.push_back(UseWrapper(U));
  for (auto U : StoredUses) {
    Operand *Use = U;
    SILInstruction *User = Use->getUser();
    assert(User && "Missing user");

    // Ignore uses in the same basic block.
    if (User->getParent() == Header)
      continue;

    assert(User->getParent() != EntryCheckBlock &&
           "The entry check block should dominate the header");
    Updater.RewriteUse(*Use);
  }
  // Canonicalize inserted phis to avoid extra BB Args.
  for (SILPhiArgument *Arg : InsertedPHIs) {
    if (SILValue Inst = replaceBBArgWithCast(Arg)) {
      Arg->replaceAllUsesWith(Inst);
      // DCE+SimplifyCFG runs as a post-pass cleanup.
      // DCE replaces dead arg values with undef.
      // SimplifyCFG deletes the dead BB arg.
    }
  }
}
コード例 #4
0
ファイル: CopyForwarding.cpp プロジェクト: adrfer/swift
/// Given an address defined by 'Def', find the object root and all direct uses,
/// not including:
/// - 'Def' itself
/// - Transitive uses of 'Def' (listed elsewhere in DestUserInsts)
/// 
/// If the returned root is not 'Def' itself, then 'Def' must be an address
/// projection that can be trivially rematerialized with the root as its
/// operand.
static ValueBase *
findAddressRootAndUsers(ValueBase *Def,
                        SmallPtrSetImpl<SILInstruction*> &RootUserInsts) {
  if (isa<InitEnumDataAddrInst>(Def) || isa<InitExistentialAddrInst>(Def)) {
    SILValue InitRoot = cast<SILInstruction>(Def)->getOperand(0);
    for (auto *Use : InitRoot.getUses()) {
      auto *UserInst = Use->getUser();
      if (UserInst == Def)
        continue;
      RootUserInsts.insert(UserInst);
    }
    return InitRoot.getDef();
  }
  return Def;
}
コード例 #5
0
ファイル: AliasAnalysis.cpp プロジェクト: adrfer/swift
/// Look at the origin/user ValueBase of V to see if any of them are
/// TypedAccessOracle which enable one to ascertain via undefined behavior the
/// "true" type of the instruction.
static SILType findTypedAccessType(SILValue V) {
  // First look at the origin of V and see if we have any instruction that is a
  // typed oracle.
  if (auto *I = dyn_cast<SILInstruction>(V))
    if (isTypedAccessOracle(I))
      return V.getType();

  // Then look at any uses of V that potentially could act as a typed access
  // oracle.
  for (auto Use : V.getUses())
    if (isTypedAccessOracle(Use->getUser()))
      return V.getType();

  // Otherwise return an empty SILType
  return SILType();
}
コード例 #6
0
/// Return all recursive users of V, looking through users which propagate
/// RCIdentity. *NOTE* This ignores obvious ARC escapes where the a potential
/// user of the RC is not managed by ARC.
///
/// We only use the instruction analysis here.
void RCIdentityFunctionInfo::getRCUsers(
    SILValue InputValue, llvm::SmallVectorImpl<SILInstruction *> &Users) {
  // Add V to the worklist.
  llvm::SmallVector<SILValue, 8> Worklist;
  Worklist.push_back(InputValue);

  // A set used to ensure we only visit users once.
  llvm::SmallPtrSet<SILInstruction *, 8> VisitedInsts;

  // Then until we finish the worklist...
  while (!Worklist.empty()) {
    // Pop off the top value.
    SILValue V = Worklist.pop_back_val();

    // For each user of V...
    for (auto *Op : V.getUses()) {
      SILInstruction *User = Op->getUser();

      // If we have already visited this user, continue.
      if (!VisitedInsts.insert(User).second)
        continue;

      // Otherwise attempt to strip off one layer of RC identical instructions
      // from User.
      SILValue StrippedRCID = stripRCIdentityPreservingInsts(User);

      // If StrippedRCID is not V, then we know that User's result is
      // conservatively not RCIdentical to V.
      if (StrippedRCID != V) {
        // If the user is extracting a trivial field of an aggregate structure
        // that does not overlap with the ref counted part of the aggregate, we
        // can ignore it.
        if (isNonOverlappingTrivialAccess(User))
          continue;

        // Otherwise, it is an RC user that our user wants.
        Users.push_back(User);
        continue;
      }

      // Otherwise, add all of User's uses to our list to continue searching.
      for (unsigned i = 0, e = User->getNumTypes(); i != e; ++i) {
        Worklist.push_back(SILValue(User, i));
      }
    }
  }
}
コード例 #7
0
ファイル: AllocBoxToStack.cpp プロジェクト: AaronTKD/swift
static bool partialApplyEscapes(SILValue V, bool examineApply) {
  for (auto UI : V->getUses()) {
    auto *User = UI->getUser();

    // These instructions do not cause the address to escape.
    if (!useCaptured(UI))
      continue;

    if (auto apply = dyn_cast<ApplyInst>(User)) {
      // Applying a function does not cause the function to escape.
      if (UI->getOperandNumber() == 0)
        continue;

      // apply instructions do not capture the pointer when it is passed
      // indirectly
      if (isIndirectConvention(
            apply->getArgumentConvention(UI->getOperandNumber()-1)))
        continue;

      // Optionally drill down into an apply to see if the operand is
      // captured in or returned from the apply.
      if (examineApply && !partialApplyArgumentEscapes(UI))
        continue;
    }

    // partial_apply instructions do not allow the pointer to escape
    // when it is passed indirectly, unless the partial_apply itself
    // escapes
    if (auto partialApply = dyn_cast<PartialApplyInst>(User)) {
      auto args = partialApply->getArguments();
      auto params = partialApply->getSubstCalleeType()
        ->getParameters();
      params = params.slice(params.size() - args.size(), args.size());
      if (params[UI->getOperandNumber()-1].isIndirect()) {
        if (partialApplyEscapes(partialApply, /*examineApply = */ true))
          return true;
        continue;
      }
    }

    return true;
  }

  return false;
}
コード例 #8
0
ファイル: CopyForwarding.cpp プロジェクト: AaronPelzer/swift
/// Attempt to forward, then backward propagate this copy.
///
/// The caller has already proven that lifetime of the value being copied ends
/// at the copy. (Either it is a [take] or is immediately destroyed).
///
/// If the forwarded copy is not an [init], then insert a destroy of the copy's
/// dest.
bool CopyForwarding::propagateCopy(CopyAddrInst *CopyInst) {
  if (!EnableCopyForwarding)
    return false;

  SILValue CopyDest = CopyInst->getDest();
  SILBasicBlock *BB = CopyInst->getParent();

  // Gather a list of CopyDest users in this block.
  SmallPtrSet<SILInstruction*, 16> DestUserInsts;
  for (auto UI : CopyDest.getUses()) {
    SILInstruction *UserInst = UI->getUser();
    if (UserInst != CopyInst && UI->getUser()->getParent() == BB)
      DestUserInsts.insert(UI->getUser());
  }
  // Note that DestUserInsts is likely empty when the dest is an 'out' argument,
  // allowing us to go straight to backward propagation.
  if (forwardPropagateCopy(CopyInst, DestUserInsts)) {
    DEBUG(llvm::dbgs() << "  Forwarding Copy:" << *CopyInst);
    if (!CopyInst->isInitializationOfDest()) {
      // Replace the original copy with a destroy. We may be able to hoist it
      // more in another pass but don't currently iterate.
      SILBuilderWithScope(CopyInst)
          .createDestroyAddr(CopyInst->getLoc(), CopyInst->getDest());
    }
    CopyInst->eraseFromParent();
    HasChanged = true;
    ++NumCopyForward;
    return true;
  }
  // Forward propagation failed. Attempt to backward propagate.
  if (CopyInst->isInitializationOfDest()
      && backwardPropagateCopy(CopyInst, DestUserInsts)) {
    DEBUG(llvm::dbgs() << "  Reversing Copy:" << *CopyInst);
    CopyInst->eraseFromParent();
    HasChanged = true;
    ++NumCopyBackward;
    return true;
  }
  return false;
}
コード例 #9
0
// Attempt to remove the array allocated at NewAddrValue and release its
// refcounted elements.
//
// This is tightly coupled with the implementation of array.uninitialized.
// The call to allocate an uninitialized array returns two values:
// (Array<E> ArrayBase, UnsafeMutable<E> ArrayElementStorage)
//
// TODO: This relies on the lowest level array.uninitialized not being
// inlined. To do better we could either run this pass before semantic inlining,
// or we could also handle calls to array.init.
static bool removeAndReleaseArray(SILValue NewArrayValue) {
  TupleExtractInst *ArrayDef = nullptr;
  TupleExtractInst *StorageAddress = nullptr;
  for (auto *Op : NewArrayValue->getUses()) {
    auto *TupleElt = dyn_cast<TupleExtractInst>(Op->getUser());
    if (!TupleElt)
      return false;
    switch (TupleElt->getFieldNo()) {
    default:
      return false;
    case 0:
      ArrayDef = TupleElt;
      break;
    case 1:
      StorageAddress = TupleElt;
      break;
    }
  }
  if (!ArrayDef)
    return false; // No Array object to delete.

  assert(!ArrayDef->getType().isTrivial(ArrayDef->getModule()) &&
         "Array initialization should produce the proper tuple type.");

  // Analyze the array object uses.
  DeadObjectAnalysis DeadArray(ArrayDef);
  if (!DeadArray.analyze())
    return false;

  // Require all stores to be into the array storage not the array object,
  // otherwise bail.
  bool HasStores = false;
  DeadArray.visitStoreLocations([&](ArrayRef<StoreInst*>){ HasStores = true; });
  if (HasStores)
    return false;

  // Remove references to empty arrays.
  if (!StorageAddress) {
    removeInstructions(DeadArray.getAllUsers());
    return true;
  }
  assert(StorageAddress->getType().isTrivial(ArrayDef->getModule()) &&
         "Array initialization should produce the proper tuple type.");

  // Analyze the array storage uses.
  DeadObjectAnalysis DeadStorage(StorageAddress);
  if (!DeadStorage.analyze())
    return false;

  // Find array object lifetime.
  ValueLifetimeAnalysis VLA(ArrayDef);
  ValueLifetime Lifetime = VLA.computeFromUserList(DeadArray.getAllUsers());

  // Check that all storage users are in the Array's live blocks and never the
  // last user.
  for (auto *User : DeadStorage.getAllUsers()) {
    auto *BB = User->getParent();
    if (!VLA.successorHasLiveIn(BB)
        && VLA.findLastSpecifiedUseInBlock(BB) == User) {
        return false;
    }
  }
  // For each store location, insert releases.
  // This makes a strong assumption that the allocated object is released on all
  // paths in which some object initialization occurs.
  SILSSAUpdater SSAUp;
  DeadStorage.visitStoreLocations([&] (ArrayRef<StoreInst*> Stores) {
      insertReleases(Stores, Lifetime.getLastUsers(), SSAUp);
    });

  // Delete all uses of the dead array and its storage address.
  removeInstructions(DeadArray.getAllUsers());
  removeInstructions(DeadStorage.getAllUsers());

  return true;
}
コード例 #10
0
ファイル: DeadObjectElimination.cpp プロジェクト: apple/swift
// Attempt to remove the array allocated at NewAddrValue and release its
// refcounted elements.
//
// This is tightly coupled with the implementation of array.uninitialized.
// The call to allocate an uninitialized array returns two values:
// (Array<E> ArrayBase, UnsafeMutable<E> ArrayElementStorage)
//
// TODO: This relies on the lowest level array.uninitialized not being
// inlined. To do better we could either run this pass before semantic inlining,
// or we could also handle calls to array.init.
static bool removeAndReleaseArray(SILValue NewArrayValue, bool &CFGChanged) {
  TupleExtractInst *ArrayDef = nullptr;
  TupleExtractInst *StorageAddress = nullptr;
  for (auto *Op : NewArrayValue->getUses()) {
    auto *TupleElt = dyn_cast<TupleExtractInst>(Op->getUser());
    if (!TupleElt)
      return false;
    if (TupleElt->getFieldNo() == 0 && !ArrayDef) {
      ArrayDef = TupleElt;
    } else if (TupleElt->getFieldNo() == 1 && !StorageAddress) {
      StorageAddress = TupleElt;
    } else {
      return false;
    }
  }
  if (!ArrayDef)
    return false; // No Array object to delete.

  assert(!ArrayDef->getType().isTrivial(ArrayDef->getModule()) &&
         "Array initialization should produce the proper tuple type.");

  // Analyze the array object uses.
  DeadObjectAnalysis DeadArray(ArrayDef);
  if (!DeadArray.analyze())
    return false;

  // Require all stores to be into the array storage not the array object,
  // otherwise bail.
  bool HasStores = false;
  DeadArray.visitStoreLocations([&](ArrayRef<StoreInst*>){ HasStores = true; });
  if (HasStores)
    return false;

  // Remove references to empty arrays.
  if (!StorageAddress) {
    removeInstructions(DeadArray.getAllUsers());
    return true;
  }
  assert(StorageAddress->getType().isTrivial(ArrayDef->getModule()) &&
         "Array initialization should produce the proper tuple type.");

  // Analyze the array storage uses.
  DeadObjectAnalysis DeadStorage(StorageAddress);
  if (!DeadStorage.analyze())
    return false;

  // Find array object lifetime.
  ValueLifetimeAnalysis VLA(NewArrayValue, DeadArray.getAllUsers());

  // Check that all storage users are in the Array's live blocks.
  for (auto *User : DeadStorage.getAllUsers()) {
    if (!VLA.isWithinLifetime(User))
      return false;
  }
  // For each store location, insert releases.
  // This makes a strong assumption that the allocated object is released on all
  // paths in which some object initialization occurs.
  SILSSAUpdater SSAUp;
  ValueLifetimeAnalysis::Frontier ArrayFrontier;
  CFGChanged |= !VLA.computeFrontier(ArrayFrontier,
                                     ValueLifetimeAnalysis::IgnoreExitEdges);

  DeadStorage.visitStoreLocations([&] (ArrayRef<StoreInst*> Stores) {
      insertReleases(Stores, ArrayFrontier, SSAUp);
    });

  // Delete all uses of the dead array and its storage address.
  removeInstructions(DeadArray.getAllUsers());
  removeInstructions(DeadStorage.getAllUsers());

  return true;
}
コード例 #11
0
ファイル: PMOMemoryUseCollector.cpp プロジェクト: karwa/swift
bool ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
  assert(Pointer->getType().isAddress() &&
         "Walked through the pointer to the value?");
  SILType PointeeType = Pointer->getType().getObjectType();

  /// This keeps track of instructions in the use list that touch multiple tuple
  /// elements and should be scalarized.  This is done as a second phase to
  /// avoid invalidating the use iterator.
  ///
  SmallVector<SILInstruction *, 4> UsesToScalarize;

  for (auto *UI : Pointer->getUses()) {
    auto *User = UI->getUser();

    // struct_element_addr P, #field indexes into the current element.
    if (auto *SEAI = dyn_cast<StructElementAddrInst>(User)) {
      if (!collectStructElementUses(SEAI, BaseEltNo))
        return false;
      continue;
    }

    // Instructions that compute a subelement are handled by a helper.
    if (auto *TEAI = dyn_cast<TupleElementAddrInst>(User)) {
      if (!collectTupleElementUses(TEAI, BaseEltNo))
        return false;
      continue;
    }

    // Look through begin_access.
    if (auto I = dyn_cast<BeginAccessInst>(User)) {
      if (!collectUses(I, BaseEltNo))
        return false;
      continue;
    }

    // Ignore end_access.
    if (isa<EndAccessInst>(User)) {
      continue;
    }

    // Loads are a use of the value.
    if (isa<LoadInst>(User)) {
      if (PointeeType.is<TupleType>())
        UsesToScalarize.push_back(User);
      else
        addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::Load);
      continue;
    }

#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
    if (isa<Load##Name##Inst>(User)) { \
      Uses.push_back(PMOMemoryUse(User, PMOUseKind::Load, BaseEltNo, 1)); \
      continue; \
    }
#include "swift/AST/ReferenceStorage.def"

    // Stores *to* the allocation are writes.
    if (isa<StoreInst>(User) && UI->getOperandNumber() == 1) {
      if (PointeeType.is<TupleType>()) {
        UsesToScalarize.push_back(User);
        continue;
      }

      // Coming out of SILGen, we assume that raw stores are initializations,
      // unless they have trivial type (which we classify as InitOrAssign).
      PMOUseKind Kind;
      if (InStructSubElement)
        Kind = PMOUseKind::PartialStore;
      else if (PointeeType.isTrivial(User->getModule()))
        Kind = PMOUseKind::InitOrAssign;
      else
        Kind = PMOUseKind::Initialization;

      addElementUses(BaseEltNo, PointeeType, User, Kind);
      continue;
    }

#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
    if (auto *SI = dyn_cast<Store##Name##Inst>(User)) { \
      if (UI->getOperandNumber() == 1) { \
        PMOUseKind Kind; \
        if (InStructSubElement) \
          Kind = PMOUseKind::PartialStore; \
        else if (SI->isInitializationOfDest()) \
          Kind = PMOUseKind::Initialization; \
        else \
          Kind = PMOUseKind::Assign; \
        Uses.push_back(PMOMemoryUse(User, Kind, BaseEltNo, 1)); \
        continue; \
      } \
    }
#include "swift/AST/ReferenceStorage.def"

    if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
      // If this is a copy of a tuple, we should scalarize it so that we don't
      // have an access that crosses elements.
      if (PointeeType.is<TupleType>()) {
        UsesToScalarize.push_back(CAI);
        continue;
      }

      // If this is the source of the copy_addr, then this is a load.  If it is
      // the destination, then this is an unknown assignment.  Note that we'll
      // revisit this instruction and add it to Uses twice if it is both a load
      // and store to the same aggregate.
      PMOUseKind Kind;
      if (UI->getOperandNumber() == 0)
        Kind = PMOUseKind::Load;
      else if (InStructSubElement)
        Kind = PMOUseKind::PartialStore;
      else if (CAI->isInitializationOfDest())
        Kind = PMOUseKind::Initialization;
      else
        Kind = PMOUseKind::Assign;

      addElementUses(BaseEltNo, PointeeType, User, Kind);
      continue;
    }

    // The apply instruction does not capture the pointer when it is passed
    // through 'inout' arguments or for indirect returns.  InOut arguments are
    // treated as uses and may-store's, but an indirect return is treated as a
    // full store.
    //
    // Note that partial_apply instructions always close over their argument.
    //
    if (auto *Apply = dyn_cast<ApplyInst>(User)) {
      auto substConv = Apply->getSubstCalleeConv();
      unsigned ArgumentNumber = UI->getOperandNumber() - 1;

      // If this is an out-parameter, it is like a store.
      unsigned NumIndirectResults = substConv.getNumIndirectSILResults();
      if (ArgumentNumber < NumIndirectResults) {
        // We do not support initializing sub members. This is an old
        // restriction from when this code was used by Definite
        // Initialization. With proper code review, we can remove this, but for
        // now, lets be conservative.
        if (InStructSubElement) {
          return false;
        }
        addElementUses(BaseEltNo, PointeeType, User,
                       PMOUseKind::Initialization);
        continue;

        // Otherwise, adjust the argument index.
      } else {
        ArgumentNumber -= NumIndirectResults;
      }

      auto ParamConvention =
          substConv.getParameters()[ArgumentNumber].getConvention();

      switch (ParamConvention) {
      case ParameterConvention::Direct_Owned:
      case ParameterConvention::Direct_Unowned:
      case ParameterConvention::Direct_Guaranteed:
        llvm_unreachable("address value passed to indirect parameter");

      // If this is an in-parameter, it is like a load.
      case ParameterConvention::Indirect_In:
      case ParameterConvention::Indirect_In_Constant:
      case ParameterConvention::Indirect_In_Guaranteed:
        addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::IndirectIn);
        continue;

      // If this is an @inout parameter, it is like both a load and store.
      case ParameterConvention::Indirect_Inout:
      case ParameterConvention::Indirect_InoutAliasable: {
        // If we're in the initializer for a struct, and this is a call to a
        // mutating method, we model that as an escape of self.  If an
        // individual sub-member is passed as inout, then we model that as an
        // inout use.
        addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::InOutUse);
        continue;
      }
      }
      llvm_unreachable("bad parameter convention");
    }

    // init_existential_addr is modeled as an initialization store.
    if (isa<InitExistentialAddrInst>(User)) {
      // init_existential_addr should not apply to struct subelements.
      if (InStructSubElement) {
        return false;
      }
      Uses.push_back(
          PMOMemoryUse(User, PMOUseKind::Initialization, BaseEltNo, 1));
      continue;
    }

    // open_existential_addr is a use of the protocol value,
    // so it is modeled as a load.
    if (isa<OpenExistentialAddrInst>(User)) {
      Uses.push_back(PMOMemoryUse(User, PMOUseKind::Load, BaseEltNo, 1));
      // TODO: Is it safe to ignore all uses of the open_existential_addr?
      continue;
    }

    // We model destroy_addr as a release of the entire value.
    if (isa<DestroyAddrInst>(User)) {
      Releases.push_back(User);
      continue;
    }

    if (isa<DeallocStackInst>(User)) {
      continue;
    }

    // Sanitizer instrumentation is not user visible, so it should not
    // count as a use and must not affect compile-time diagnostics.
    if (isSanitizerInstrumentation(User))
      continue;

    // Otherwise, the use is something complicated, it escapes.
    addElementUses(BaseEltNo, PointeeType, User, PMOUseKind::Escape);
  }

  // Now that we've walked all of the immediate uses, scalarize any operations
  // working on tuples if we need to for canonicalization or analysis reasons.
  if (!UsesToScalarize.empty()) {
    SILInstruction *PointerInst = Pointer->getDefiningInstruction();
    SmallVector<SILValue, 4> ElementAddrs;
    SILBuilderWithScope AddrBuilder(++SILBasicBlock::iterator(PointerInst),
                                    PointerInst);
    getScalarizedElementAddresses(Pointer, AddrBuilder, PointerInst->getLoc(),
                                  ElementAddrs);

    SmallVector<SILValue, 4> ElementTmps;
    for (auto *User : UsesToScalarize) {
      ElementTmps.clear();

      LLVM_DEBUG(llvm::errs() << "  *** Scalarizing: " << *User << "\n");

      // Scalarize LoadInst
      if (auto *LI = dyn_cast<LoadInst>(User)) {
        SILValue Result = scalarizeLoad(LI, ElementAddrs);
        LI->replaceAllUsesWith(Result);
        LI->eraseFromParent();
        continue;
      }

      // Scalarize StoreInst
      if (auto *SI = dyn_cast<StoreInst>(User)) {
        SILBuilderWithScope B(User, SI);
        getScalarizedElements(SI->getOperand(0), ElementTmps, SI->getLoc(), B);

        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createStore(SI->getLoc(), ElementTmps[i], ElementAddrs[i],
                        StoreOwnershipQualifier::Unqualified);
        SI->eraseFromParent();
        continue;
      }

      // Scalarize CopyAddrInst.
      auto *CAI = cast<CopyAddrInst>(User);
      SILBuilderWithScope B(User, CAI);

      // Determine if this is a copy *from* or *to* "Pointer".
      if (CAI->getSrc() == Pointer) {
        // Copy from pointer.
        getScalarizedElementAddresses(CAI->getDest(), B, CAI->getLoc(),
                                      ElementTmps);
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createCopyAddr(CAI->getLoc(), ElementAddrs[i], ElementTmps[i],
                           CAI->isTakeOfSrc(), CAI->isInitializationOfDest());

      } else {
        getScalarizedElementAddresses(CAI->getSrc(), B, CAI->getLoc(),
                                      ElementTmps);
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createCopyAddr(CAI->getLoc(), ElementTmps[i], ElementAddrs[i],
                           CAI->isTakeOfSrc(), CAI->isInitializationOfDest());
      }
      CAI->eraseFromParent();
    }

    // Now that we've scalarized some stuff, recurse down into the newly created
    // element address computations to recursively process it.  This can cause
    // further scalarization.
    if (llvm::any_of(ElementAddrs, [&](SILValue V) {
          return !collectTupleElementUses(cast<TupleElementAddrInst>(V),
                                          BaseEltNo);
        })) {
      return false;
    }
  }

  return true;
}
コード例 #12
0
ファイル: DIMemoryUseCollector.cpp プロジェクト: khour/swift
void ElementUseCollector::collectUses(SILValue Pointer, unsigned BaseEltNo) {
  assert(Pointer->getType().isAddress() &&
         "Walked through the pointer to the value?");
  SILType PointeeType = Pointer->getType().getObjectType();

  /// This keeps track of instructions in the use list that touch multiple tuple
  /// elements and should be scalarized.  This is done as a second phase to
  /// avoid invalidating the use iterator.
  ///
  SmallVector<SILInstruction*, 4> UsesToScalarize;

  for (auto *UI : Pointer->getUses()) {
    auto *User = UI->getUser();

    // struct_element_addr P, #field indexes into the current element.
    if (auto *SEAI = dyn_cast<StructElementAddrInst>(User)) {
      collectStructElementUses(SEAI, BaseEltNo);
      continue;
    }

    // Instructions that compute a subelement are handled by a helper.
    if (auto *TEAI = dyn_cast<TupleElementAddrInst>(User)) {
      collectTupleElementUses(TEAI, BaseEltNo);
      continue;
    }

    // Look through begin_access.
    if (auto I = dyn_cast<BeginAccessInst>(User)) {
      collectUses(I, BaseEltNo);
      continue;
    }

    // Ignore end_access.
    if (isa<EndAccessInst>(User)) {
      continue;
    }
    
    // Loads are a use of the value.
    if (isa<LoadInst>(User)) {
      if (PointeeType.is<TupleType>())
        UsesToScalarize.push_back(User);
      else
        addElementUses(BaseEltNo, PointeeType, User, DIUseKind::Load);
      continue;
    }

    if (isa<LoadWeakInst>(User)) {
      Uses.push_back(DIMemoryUse(User, DIUseKind::Load, BaseEltNo, 1));
      continue;
    }

    // Stores *to* the allocation are writes.
    if ((isa<StoreInst>(User) || isa<AssignInst>(User)) &&
        UI->getOperandNumber() == 1) {
      if (PointeeType.is<TupleType>()) {
        UsesToScalarize.push_back(User);
        continue;
      }
      
      // Coming out of SILGen, we assume that raw stores are initializations,
      // unless they have trivial type (which we classify as InitOrAssign).
      DIUseKind Kind;
      if (InStructSubElement)
        Kind = DIUseKind::PartialStore;
      else if (isa<AssignInst>(User))
        Kind = DIUseKind::InitOrAssign;
      else if (PointeeType.isTrivial(User->getModule()))
        Kind = DIUseKind::InitOrAssign;
      else
        Kind = DIUseKind::Initialization;
      
      addElementUses(BaseEltNo, PointeeType, User, Kind);
      continue;
    }

    if (auto *SWI = dyn_cast<StoreWeakInst>(User))
      if (UI->getOperandNumber() == 1) {
        DIUseKind Kind;
        if (InStructSubElement)
          Kind = DIUseKind::PartialStore;
        else if (SWI->isInitializationOfDest())
          Kind = DIUseKind::Initialization;
        else
          Kind = DIUseKind::Assign;
        Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
        continue;
      }

    if (auto *SUI = dyn_cast<StoreUnownedInst>(User))
      if (UI->getOperandNumber() == 1) {
        DIUseKind Kind;
        if (InStructSubElement)
          Kind = DIUseKind::PartialStore;
        else if (SUI->isInitializationOfDest())
          Kind = DIUseKind::Initialization;
        else
          Kind = DIUseKind::Assign;
        Uses.push_back(DIMemoryUse(User, Kind, BaseEltNo, 1));
        continue;
      }

    if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
      // If this is a copy of a tuple, we should scalarize it so that we don't
      // have an access that crosses elements.
      if (PointeeType.is<TupleType>()) {
        UsesToScalarize.push_back(CAI);
        continue;
      }
      
      // If this is the source of the copy_addr, then this is a load.  If it is
      // the destination, then this is an unknown assignment.  Note that we'll
      // revisit this instruction and add it to Uses twice if it is both a load
      // and store to the same aggregate.
      DIUseKind Kind;
      if (UI->getOperandNumber() == 0)
        Kind = DIUseKind::Load;
      else if (InStructSubElement)
        Kind = DIUseKind::PartialStore;
      else if (CAI->isInitializationOfDest())
        Kind = DIUseKind::Initialization;
      else
        Kind = DIUseKind::Assign;

      addElementUses(BaseEltNo, PointeeType, User, Kind);
      continue;
    }
    
    // The apply instruction does not capture the pointer when it is passed
    // through 'inout' arguments or for indirect returns.  InOut arguments are
    // treated as uses and may-store's, but an indirect return is treated as a
    // full store.
    //
    // Note that partial_apply instructions always close over their argument.
    //
    if (auto *Apply = dyn_cast<ApplyInst>(User)) {
      auto substConv = Apply->getSubstCalleeConv();
      unsigned ArgumentNumber = UI->getOperandNumber()-1;

      // If this is an out-parameter, it is like a store.
      unsigned NumIndirectResults = substConv.getNumIndirectSILResults();
      if (ArgumentNumber < NumIndirectResults) {
        assert(!InStructSubElement && "We're initializing sub-members?");
        addElementUses(BaseEltNo, PointeeType, User,
                       DIUseKind::Initialization);
        continue;

      // Otherwise, adjust the argument index.      
      } else {
        ArgumentNumber -= NumIndirectResults;
      }

      auto ParamConvention =
          substConv.getParameters()[ArgumentNumber].getConvention();

      switch (ParamConvention) {
      case ParameterConvention::Direct_Owned:
      case ParameterConvention::Direct_Unowned:
      case ParameterConvention::Direct_Guaranteed:
        llvm_unreachable("address value passed to indirect parameter");

      // If this is an in-parameter, it is like a load.
      case ParameterConvention::Indirect_In:
      case ParameterConvention::Indirect_In_Constant:
      case ParameterConvention::Indirect_In_Guaranteed:
        addElementUses(BaseEltNo, PointeeType, User, DIUseKind::IndirectIn);
        continue;

      // If this is an @inout parameter, it is like both a load and store.
      case ParameterConvention::Indirect_Inout:
      case ParameterConvention::Indirect_InoutAliasable: {
        // If we're in the initializer for a struct, and this is a call to a
        // mutating method, we model that as an escape of self.  If an
        // individual sub-member is passed as inout, then we model that as an
        // inout use.
        addElementUses(BaseEltNo, PointeeType, User, DIUseKind::InOutUse);
        continue;
      }
      }
      llvm_unreachable("bad parameter convention");
    }
    
    // init_enum_data_addr is treated like a tuple_element_addr or other instruction
    // that is looking into the memory object (i.e., the memory object needs to
    // be explicitly initialized by a copy_addr or some other use of the
    // projected address).
    if (auto I = dyn_cast<InitEnumDataAddrInst>(User)) {
      assert(!InStructSubElement &&
             "init_enum_data_addr shouldn't apply to struct subelements");
      // Keep track of the fact that we're inside of an enum.  This informs our
      // recursion that tuple stores are not scalarized outside, and that stores
      // should not be treated as partial stores.
      llvm::SaveAndRestore<bool> X(InEnumSubElement, true);
      collectUses(I, BaseEltNo);
      continue;
    }

    // init_existential_addr is modeled as an initialization store.
    if (isa<InitExistentialAddrInst>(User)) {
      assert(!InStructSubElement &&
             "init_existential_addr should not apply to struct subelements");
      Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
                                 BaseEltNo, 1));
      continue;
    }
    
    // inject_enum_addr is modeled as an initialization store.
    if (isa<InjectEnumAddrInst>(User)) {
      assert(!InStructSubElement &&
             "inject_enum_addr the subelement of a struct unless in a ctor");
      Uses.push_back(DIMemoryUse(User, DIUseKind::Initialization,
                                 BaseEltNo, 1));
      continue;
    }

    // open_existential_addr is a use of the protocol value,
    // so it is modeled as a load.
    if (isa<OpenExistentialAddrInst>(User)) {
      Uses.push_back(DIMemoryUse(User, DIUseKind::Load, BaseEltNo, 1));
      // TODO: Is it safe to ignore all uses of the open_existential_addr?
      continue;
    }

    // We model destroy_addr as a release of the entire value.
    if (isa<DestroyAddrInst>(User)) {
      Releases.push_back(User);
      continue;
    }

    if (isa<DeallocStackInst>(User)) {
      continue;
    }

    // Sanitizer instrumentation is not user visible, so it should not
    // count as a use and must not affect compile-time diagnostics.
    if (isSanitizerInstrumentation(User, Module.getASTContext()))
      continue;

    // Otherwise, the use is something complicated, it escapes.
    addElementUses(BaseEltNo, PointeeType, User, DIUseKind::Escape);
  }

  // Now that we've walked all of the immediate uses, scalarize any operations
  // working on tuples if we need to for canonicalization or analysis reasons.
  if (!UsesToScalarize.empty()) {
    SILInstruction *PointerInst = Pointer->getDefiningInstruction();
    SmallVector<SILValue, 4> ElementAddrs;
    SILBuilderWithScope AddrBuilder(++SILBasicBlock::iterator(PointerInst),
                                    PointerInst);
    getScalarizedElementAddresses(Pointer, AddrBuilder, PointerInst->getLoc(),
                                  ElementAddrs);
    
    SmallVector<SILValue, 4> ElementTmps;
    for (auto *User : UsesToScalarize) {
      ElementTmps.clear();

      DEBUG(llvm::errs() << "  *** Scalarizing: " << *User << "\n");

      // Scalarize LoadInst
      if (auto *LI = dyn_cast<LoadInst>(User)) {
        SILValue Result = scalarizeLoad(LI, ElementAddrs);
        LI->replaceAllUsesWith(Result);
        LI->eraseFromParent();
        continue;
      }

      // Scalarize AssignInst
      if (auto *AI = dyn_cast<AssignInst>(User)) {
        SILBuilderWithScope B(User, AI);
        getScalarizedElements(AI->getOperand(0), ElementTmps, AI->getLoc(), B);

        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createAssign(AI->getLoc(), ElementTmps[i], ElementAddrs[i]);
        AI->eraseFromParent();
        continue;
      }
      
      // Scalarize StoreInst
      if (auto *SI = dyn_cast<StoreInst>(User)) {
        SILBuilderWithScope B(User, SI);
        getScalarizedElements(SI->getOperand(0), ElementTmps, SI->getLoc(), B);
        
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createStore(SI->getLoc(), ElementTmps[i], ElementAddrs[i],
                        StoreOwnershipQualifier::Unqualified);
        SI->eraseFromParent();
        continue;
      }
      
      // Scalarize CopyAddrInst.
      auto *CAI = cast<CopyAddrInst>(User);
      SILBuilderWithScope B(User, CAI);

      // Determine if this is a copy *from* or *to* "Pointer".
      if (CAI->getSrc() == Pointer) {
        // Copy from pointer.
        getScalarizedElementAddresses(CAI->getDest(), B, CAI->getLoc(),
                                      ElementTmps);
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createCopyAddr(CAI->getLoc(), ElementAddrs[i], ElementTmps[i],
                           CAI->isTakeOfSrc(), CAI->isInitializationOfDest());
        
      } else {
        getScalarizedElementAddresses(CAI->getSrc(), B, CAI->getLoc(),
                                      ElementTmps);
        for (unsigned i = 0, e = ElementAddrs.size(); i != e; ++i)
          B.createCopyAddr(CAI->getLoc(), ElementTmps[i], ElementAddrs[i],
                           CAI->isTakeOfSrc(), CAI->isInitializationOfDest());
      }
      CAI->eraseFromParent();
    }
    
    // Now that we've scalarized some stuff, recurse down into the newly created
    // element address computations to recursively process it.  This can cause
    // further scalarization.
    for (auto EltPtr : ElementAddrs)
      collectTupleElementUses(cast<TupleElementAddrInst>(EltPtr), BaseEltNo);
  }
}
コード例 #13
0
static void checkNoEscapePartialApplyUse(Operand *oper, FollowUse followUses) {
  SILInstruction *user = oper->getUser();

  // Ignore uses that are totally uninteresting.
  if (isIncidentalUse(user) || onlyAffectsRefCount(user))
    return;

  // Before checking conversions in general below (getSingleValueCopyOrCast),
  // check for convert_function to [without_actually_escaping]. Assume such
  // conversion are not actually escaping without following their uses.
  if (auto *CFI = dyn_cast<ConvertFunctionInst>(user)) {
    if (CFI->withoutActuallyEscaping())
      return;
  }

  // Look through copies, borrows, and conversions.
  //
  // Note: This handles ConversionInst, which already includes everything in
  // swift::stripConvertFunctions.
  if (SingleValueInstruction *copy = getSingleValueCopyOrCast(user)) {
    // Only follow the copied operand. Other operands are incidental,
    // as in the second operand of mark_dependence.
    if (oper->getOperandNumber() == 0)
      followUses(copy);

    return;
  }

  switch (user->getKind()) {
  default:
    break;

  // Look through Optionals.
  case SILInstructionKind::EnumInst:
    // @noescape block storage can be passed as an Optional (Nullable).
    followUses(cast<EnumInst>(user));
    return;

  // Look through Phis.
  case SILInstructionKind::BranchInst: {
    const SILPhiArgument *arg = cast<BranchInst>(user)->getArgForOperand(oper);
    followUses(arg);
    return;
  }
  case SILInstructionKind::CondBranchInst: {
    const SILPhiArgument *arg =
        cast<CondBranchInst>(user)->getArgForOperand(oper);
    if (arg) // If the use isn't the branch condition, follow it.
      followUses(arg);
    return;
  }
  // Look through ObjC closures.
  case SILInstructionKind::StoreInst:
    if (oper->getOperandNumber() == StoreInst::Src) {
      if (auto *PBSI = dyn_cast<ProjectBlockStorageInst>(
            cast<StoreInst>(user)->getDest())) {
        SILValue storageAddr = PBSI->getOperand();
        // The closure is stored to block storage. Recursively visit all
        // uses of any initialized block storage values derived from this
        // storage address..
        for (Operand *oper : storageAddr->getUses()) {
          if (auto *IBS = dyn_cast<InitBlockStorageHeaderInst>(oper->getUser()))
            followUses(IBS);
        }
        return;
      }
    }
    break;

  case SILInstructionKind::IsEscapingClosureInst:
    // May be generated by withoutActuallyEscaping.
    return;

  case SILInstructionKind::PartialApplyInst: {
    // Recurse through partial_apply to handle special cases before handling
    // ApplySites in general below.
    PartialApplyInst *PAI = cast<PartialApplyInst>(user);
    // Use the same logic as checkForViolationAtApply applied to a def-use
    // traversal.
    //
    // checkForViolationAtApply recurses through partial_apply chains.
    if (oper->get() == PAI->getCallee()) {
      followUses(PAI);
      return;
    }
    // checkForViolationAtApply also uses findClosuresForAppliedArg which in
    // turn checks isPartialApplyOfReabstractionThunk.
    //
    // A closure with @inout_aliasable arguments may be applied to a
    // thunk as "escaping", but as long as the thunk is only used as a
    // '@noescape" type then it is safe.
    if (isPartialApplyOfReabstractionThunk(PAI)) {
      // Don't follow thunks that were generated by withoutActuallyEscaping.
      SILFunction *thunkDef = PAI->getReferencedFunction();
      if (!thunkDef->isWithoutActuallyEscapingThunk())
        followUses(PAI);
      return;
    }
    // Handle this use like a normal applied argument.
    break;
  }
  };

  // Handle ApplySites in general after checking PartialApply above.
  if (isa<ApplySite>(user)) {
    SILValue arg = oper->get();
    auto argumentFnType = getSILFunctionTypeForValue(arg);
    if (argumentFnType && argumentFnType->isNoEscape()) {
      // Verify that the inverse operation, finding a partial_apply from a
      // @noescape argument, is consistent.
      TinyPtrVector<PartialApplyInst *> partialApplies;
      findClosuresForFunctionValue(arg, partialApplies);
      assert(!partialApplies.empty()
             && "cannot find partial_apply from @noescape function argument");
      return;
    }
    llvm::dbgs() << "Applied argument must be @noescape function type: " << *arg;
  }
  else
    llvm::dbgs() << "Unexpected partial_apply use: " << *user;

  llvm_unreachable("A partial_apply with @inout_aliasable may only be "
                   "used as a @noescape function type argument.");
}
コード例 #14
0
/// Simplify the following two frontend patterns:
///
///   %payload_addr = init_enum_data_addr %payload_allocation
///   store %payload to %payload_addr
///   inject_enum_addr %payload_allocation, $EnumType.case
///
///   inject_enum_add %nopayload_allocation, $EnumType.case
///
/// for a concrete enum type $EnumType.case to:
///
///   %1 = enum $EnumType, $EnumType.case, %payload
///   store %1 to %payload_addr
///
///   %1 = enum $EnumType, $EnumType.case
///   store %1 to %nopayload_addr
///
/// We leave the cleaning up to mem2reg.
SILInstruction *
SILCombiner::visitInjectEnumAddrInst(InjectEnumAddrInst *IEAI) {
  // Given an inject_enum_addr of a concrete type without payload, promote it to
  // a store of an enum. Mem2reg/load forwarding will clean things up for us. We
  // can't handle the payload case here due to the flow problems caused by the
  // dependency in between the enum and its data.

  assert(IEAI->getOperand()->getType().isAddress() && "Must be an address");
  Builder.setCurrentDebugScope(IEAI->getDebugScope());

  if (IEAI->getOperand()->getType().isAddressOnly(IEAI->getModule())) {
    // Check for the following pattern inside the current basic block:
    // inject_enum_addr %payload_allocation, $EnumType.case1
    // ... no insns storing anything into %payload_allocation
    // select_enum_addr  %payload_allocation,
    //                   case $EnumType.case1: %Result1,
    //                   case case $EnumType.case2: %bResult2
    //                   ...
    //
    // Replace the select_enum_addr by %Result1

    auto *Term = IEAI->getParent()->getTerminator();
    if (isa<CondBranchInst>(Term) || isa<SwitchValueInst>(Term)) {
      auto BeforeTerm = std::prev(std::prev(IEAI->getParent()->end()));
      auto *SEAI = dyn_cast<SelectEnumAddrInst>(BeforeTerm);
      if (!SEAI)
        return nullptr;

      if (SEAI->getOperand() != IEAI->getOperand())
        return nullptr;

      SILBasicBlock::iterator II = IEAI->getIterator();
      StoreInst *SI = nullptr;
      for (;;) {
        SILInstruction *CI = &*II;
        if (CI == SEAI)
          break;
        ++II;
        SI = dyn_cast<StoreInst>(CI);
        if (SI) {
          if (SI->getDest() == IEAI->getOperand())
            return nullptr;
        }
        // Allow all instructions in between, which don't have any dependency to
        // the store.
        if (AA->mayWriteToMemory(&*II, IEAI->getOperand()))
          return nullptr;
      }

      auto *InjectedEnumElement = IEAI->getElement();
      auto Result = SEAI->getCaseResult(InjectedEnumElement);

      // Replace select_enum_addr by the result
      replaceInstUsesWith(*SEAI, Result);
      return nullptr;
    }

    // Check for the following pattern inside the current basic block:
    // inject_enum_addr %payload_allocation, $EnumType.case1
    // ... no insns storing anything into %payload_allocation
    // switch_enum_addr  %payload_allocation,
    //                   case $EnumType.case1: %bbX,
    //                   case case $EnumType.case2: %bbY
    //                   ...
    //
    // Replace the switch_enum_addr by select_enum_addr, switch_value.
    if (auto *SEI = dyn_cast<SwitchEnumAddrInst>(Term)) {
      if (SEI->getOperand() != IEAI->getOperand())
        return nullptr;

      SILBasicBlock::iterator II = IEAI->getIterator();
      StoreInst *SI = nullptr;
      for (;;) {
        SILInstruction *CI = &*II;
        if (CI == SEI)
          break;
        ++II;
        SI = dyn_cast<StoreInst>(CI);
        if (SI) {
          if (SI->getDest() == IEAI->getOperand())
            return nullptr;
        }
        // Allow all instructions in between, which don't have any dependency to
        // the store.
        if (AA->mayWriteToMemory(&*II, IEAI->getOperand()))
          return nullptr;
      }

      // Replace switch_enum_addr by a branch instruction.
      SILBuilderWithScope B(SEI);
      SmallVector<std::pair<EnumElementDecl *, SILValue>, 8> CaseValues;
      SmallVector<std::pair<SILValue, SILBasicBlock *>, 8> CaseBBs;

      auto IntTy = SILType::getBuiltinIntegerType(32, B.getASTContext());

      for (int i = 0, e = SEI->getNumCases(); i < e; ++i) {
        auto Pair = SEI->getCase(i);
        auto *IL = B.createIntegerLiteral(SEI->getLoc(), IntTy, APInt(32, i, false));
        SILValue ILValue = SILValue(IL);
        CaseValues.push_back(std::make_pair(Pair.first, ILValue));
        CaseBBs.push_back(std::make_pair(ILValue, Pair.second));
      }

      SILValue DefaultValue;
      SILBasicBlock *DefaultBB = nullptr;

      if (SEI->hasDefault()) {
        auto *IL = B.createIntegerLiteral(
          SEI->getLoc(), IntTy,
          APInt(32, static_cast<uint64_t>(SEI->getNumCases()), false));
        DefaultValue = SILValue(IL);
        DefaultBB = SEI->getDefaultBB();
      }

      auto *SEAI = B.createSelectEnumAddr(SEI->getLoc(), SEI->getOperand(), IntTy, DefaultValue, CaseValues);

      B.createSwitchValue(SEI->getLoc(), SILValue(SEAI), DefaultBB, CaseBBs);

      return eraseInstFromFunction(*SEI);
    }

    return nullptr;
  }

  // If the enum does not have a payload create the enum/store since we don't
  // need to worry about payloads.
  if (!IEAI->getElement()->hasArgumentType()) {
    EnumInst *E =
      Builder.createEnum(IEAI->getLoc(), SILValue(), IEAI->getElement(),
                          IEAI->getOperand()->getType().getObjectType());
    Builder.createStore(IEAI->getLoc(), E, IEAI->getOperand(),
                        StoreOwnershipQualifier::Unqualified);
    return eraseInstFromFunction(*IEAI);
  }

  // Ok, we have a payload enum, make sure that we have a store previous to
  // us...
  SILValue ASO = IEAI->getOperand();
  if (!isa<AllocStackInst>(ASO)) {
    return nullptr;
  }
  InitEnumDataAddrInst *DataAddrInst = nullptr;
  InjectEnumAddrInst *EnumAddrIns = nullptr;
  llvm::SmallPtrSet<SILInstruction *, 32> WriteSet;
  for (auto UsersIt : ASO->getUses()) {
    SILInstruction *CurrUser = UsersIt->getUser();
    if (CurrUser->isDeallocatingStack()) {
      // we don't care about the dealloc stack instructions
      continue;
    }
    if (isDebugInst(CurrUser) || isa<LoadInst>(CurrUser)) {
      // These Instructions are a non-risky use we can ignore
      continue;
    }
    if (auto *CurrInst = dyn_cast<InitEnumDataAddrInst>(CurrUser)) {
      if (DataAddrInst) {
        return nullptr;
      }
      DataAddrInst = CurrInst;
      continue;
    }
    if (auto *CurrInst = dyn_cast<InjectEnumAddrInst>(CurrUser)) {
      if (EnumAddrIns) {
        return nullptr;
      }
      EnumAddrIns = CurrInst;
      continue;
    }
    if (isa<StoreInst>(CurrUser)) {
      // The only MayWrite Instruction we can safely handle
      WriteSet.insert(CurrUser);
      continue;
    }
    // It is too risky to continue if it is any other instruction.
    return nullptr;
  }

  if (!DataAddrInst || !EnumAddrIns) {
    return nullptr;
  }
  assert((EnumAddrIns == IEAI) &&
         "Found InitEnumDataAddrInst differs from IEAI");
  // Found the DataAddrInst to this enum payload. Check if it has only use.
  if (!hasOneNonDebugUse(DataAddrInst))
    return nullptr;

  StoreInst *SI = dyn_cast<StoreInst>(getSingleNonDebugUser(DataAddrInst));
  ApplyInst *AI = dyn_cast<ApplyInst>(getSingleNonDebugUser(DataAddrInst));
  if (!SI && !AI) {
    return nullptr;
  }

  // Make sure the enum pattern instructions are the only ones which write to
  // this location
  if (!WriteSet.empty()) {
    // Analyze the instructions (implicit dominator analysis)
    // If we find any of MayWriteSet, return nullptr
    SILBasicBlock *InitEnumBB = DataAddrInst->getParent();
    assert(InitEnumBB && "DataAddrInst is not in a valid Basic Block");
    llvm::SmallVector<SILInstruction *, 64> Worklist;
    Worklist.push_back(IEAI);
    llvm::SmallPtrSet<SILBasicBlock *, 16> Preds;
    Preds.insert(IEAI->getParent());
    while (!Worklist.empty()) {
      SILInstruction *CurrIns = Worklist.pop_back_val();
      SILBasicBlock *CurrBB = CurrIns->getParent();

      if (CurrBB->isEntry() && CurrBB != InitEnumBB) {
        // reached prologue without encountering the init bb
        return nullptr;
      }

      for (auto InsIt = ++CurrIns->getIterator().getReverse();
           InsIt != CurrBB->rend(); ++InsIt) {
        SILInstruction *Ins = &*InsIt;
        if (Ins == DataAddrInst) {
          // don't care about what comes before init enum in the basic block
          break;
        }
        if (WriteSet.count(Ins) != 0) {
          return nullptr;
        }
      }

      if (CurrBB == InitEnumBB) {
        continue;
      }

      // Go to predecessors and do all that again
      for (SILBasicBlock *Pred : CurrBB->getPredecessorBlocks()) {
        // If it's already in the set, then we've already queued and/or
        // processed the predecessors.
        if (Preds.insert(Pred).second) {
          Worklist.push_back(&*Pred->rbegin());
        }
      }
    }
  }

  if (SI) {
    assert((SI->getDest() == DataAddrInst) &&
           "Can't find StoreInst with DataAddrInst as its destination");
    // In that case, create the payload enum/store.
    EnumInst *E = Builder.createEnum(
        DataAddrInst->getLoc(), SI->getSrc(), DataAddrInst->getElement(),
        DataAddrInst->getOperand()->getType().getObjectType());
    Builder.createStore(DataAddrInst->getLoc(), E, DataAddrInst->getOperand(),
                        StoreOwnershipQualifier::Unqualified);
    // Cleanup.
    eraseInstFromFunction(*SI);
    eraseInstFromFunction(*DataAddrInst);
    return eraseInstFromFunction(*IEAI);
  }

  // Check whether we have an apply initializing the enum.
  //  %iedai = init_enum_data_addr %enum_addr
  //         = apply(%iedai,...)
  //  inject_enum_addr %enum_addr
  //
  // We can localize the store to an alloc_stack.
  // Allowing us to perform the same optimization as for the store.
  //
  //  %alloca = alloc_stack
  //            apply(%alloca,...)
  //  %load = load %alloca
  //  %1 = enum $EnumType, $EnumType.case, %load
  //  store %1 to %nopayload_addr
  //
  assert(AI && "Must have an apply");
  unsigned ArgIdx = 0;
  Operand *EnumInitOperand = nullptr;
  for (auto &Opd : AI->getArgumentOperands()) {
    // Found an apply that initializes the enum. We can optimize this by
    // localizing the initialization to an alloc_stack and loading from it.
    DataAddrInst = dyn_cast<InitEnumDataAddrInst>(Opd.get());
    if (DataAddrInst && DataAddrInst->getOperand() == IEAI->getOperand() &&
        ArgIdx < AI->getSubstCalleeType()->getNumIndirectResults()) {
      EnumInitOperand = &Opd;
      break;
    }
    ++ArgIdx;
  }

  if (!EnumInitOperand) {
    return nullptr;
  }

  // Localize the address access.
  Builder.setInsertionPoint(AI);
  auto *AllocStack = Builder.createAllocStack(DataAddrInst->getLoc(),
                                              EnumInitOperand->get()->getType());
  EnumInitOperand->set(AllocStack);
  Builder.setInsertionPoint(std::next(SILBasicBlock::iterator(AI)));
  SILValue Load(Builder.createLoad(DataAddrInst->getLoc(), AllocStack,
                                   LoadOwnershipQualifier::Unqualified));
  EnumInst *E = Builder.createEnum(
      DataAddrInst->getLoc(), Load, DataAddrInst->getElement(),
      DataAddrInst->getOperand()->getType().getObjectType());
  Builder.createStore(DataAddrInst->getLoc(), E, DataAddrInst->getOperand(),
                      StoreOwnershipQualifier::Unqualified);
  Builder.createDeallocStack(DataAddrInst->getLoc(), AllocStack);
  eraseInstFromFunction(*DataAddrInst);
  return eraseInstFromFunction(*IEAI);
}