Exemple #1
0
bool RecurrenceDescriptor::getSourceExtensionKind(
    Instruction *Start, Instruction *Exit, Type *RT, bool &IsSigned,
    SmallPtrSetImpl<Instruction *> &Visited,
    SmallPtrSetImpl<Instruction *> &CI) {

  SmallVector<Instruction *, 8> Worklist;
  bool FoundOneOperand = false;
  unsigned DstSize = RT->getPrimitiveSizeInBits();
  Worklist.push_back(Exit);

  // Traverse the instructions in the reduction expression, beginning with the
  // exit value.
  while (!Worklist.empty()) {
    Instruction *I = Worklist.pop_back_val();
    for (Use &U : I->operands()) {

      // Terminate the traversal if the operand is not an instruction, or we
      // reach the starting value.
      Instruction *J = dyn_cast<Instruction>(U.get());
      if (!J || J == Start)
        continue;

      // Otherwise, investigate the operation if it is also in the expression.
      if (Visited.count(J)) {
        Worklist.push_back(J);
        continue;
      }

      // If the operand is not in Visited, it is not a reduction operation, but
      // it does feed into one. Make sure it is either a single-use sign- or
      // zero-extend instruction.
      CastInst *Cast = dyn_cast<CastInst>(J);
      bool IsSExtInst = isa<SExtInst>(J);
      if (!Cast || !Cast->hasOneUse() || !(isa<ZExtInst>(J) || IsSExtInst))
        return false;

      // Ensure the source type of the extend is no larger than the reduction
      // type. It is not necessary for the types to be identical.
      unsigned SrcSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
      if (SrcSize > DstSize)
        return false;

      // Furthermore, ensure that all such extends are of the same kind.
      if (FoundOneOperand) {
        if (IsSigned != IsSExtInst)
          return false;
      } else {
        FoundOneOperand = true;
        IsSigned = IsSExtInst;
      }

      // Lastly, if the source type of the extend matches the reduction type,
      // add the extend to CI so that we can avoid accounting for it in the
      // cost model.
      if (SrcSize == DstSize)
        CI.insert(Cast);
    }
  }
  return true;
}
Exemple #2
0
bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
                                        SmallPtrSetImpl<Instruction *> &Set) {
  for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E; ++Use)
    if (!Set.count(dyn_cast<Instruction>(*Use)))
      return false;
  return true;
}
Exemple #3
0
void MemorySSAUpdater::removeBlocks(
    const SmallPtrSetImpl<BasicBlock *> &DeadBlocks) {
  // First delete all uses of BB in MemoryPhis.
  for (BasicBlock *BB : DeadBlocks) {
    TerminatorInst *TI = BB->getTerminator();
    assert(TI && "Basic block expected to have a terminator instruction");
    for (BasicBlock *Succ : successors(TI))
      if (!DeadBlocks.count(Succ))
        if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
          MP->unorderedDeleteIncomingBlock(BB);
          if (MP->getNumIncomingValues() == 1)
            removeMemoryAccess(MP);
        }
    // Drop all references of all accesses in BB
    if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
      for (MemoryAccess &MA : *Acc)
        MA.dropAllReferences();
  }

  // Next, delete all memory accesses in each block
  for (BasicBlock *BB : DeadBlocks) {
    MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
    if (!Acc)
      continue;
    for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
      MemoryAccess *MA = &*AB;
      ++AB;
      MSSA->removeFromLookups(MA);
      MSSA->removeFromLists(MA);
    }
  }
}
Exemple #4
0
/// Walk up the CFG from StartPos (which is in StartBB) and find local and
/// non-local dependencies on Arg.
///
/// TODO: Cache results?
void
llvm::objcarc::FindDependencies(DependenceKind Flavor,
                                const Value *Arg,
                                BasicBlock *StartBB, Instruction *StartInst,
                                SmallPtrSetImpl<Instruction *> &DependingInsts,
                                SmallPtrSetImpl<const BasicBlock *> &Visited,
                                ProvenanceAnalysis &PA) {
  BasicBlock::iterator StartPos = StartInst;

  SmallVector<std::pair<BasicBlock *, BasicBlock::iterator>, 4> Worklist;
  Worklist.push_back(std::make_pair(StartBB, StartPos));
  do {
    std::pair<BasicBlock *, BasicBlock::iterator> Pair =
      Worklist.pop_back_val();
    BasicBlock *LocalStartBB = Pair.first;
    BasicBlock::iterator LocalStartPos = Pair.second;
    BasicBlock::iterator StartBBBegin = LocalStartBB->begin();
    for (;;) {
      if (LocalStartPos == StartBBBegin) {
        pred_iterator PI(LocalStartBB), PE(LocalStartBB, false);
        if (PI == PE)
          // If we've reached the function entry, produce a null dependence.
          DependingInsts.insert(nullptr);
        else
          // Add the predecessors to the worklist.
          do {
            BasicBlock *PredBB = *PI;
            if (Visited.insert(PredBB))
              Worklist.push_back(std::make_pair(PredBB, PredBB->end()));
          } while (++PI != PE);
        break;
      }

      Instruction *Inst = --LocalStartPos;
      if (Depends(Flavor, Inst, Arg, PA)) {
        DependingInsts.insert(Inst);
        break;
      }
    }
  } while (!Worklist.empty());

  // Determine whether the original StartBB post-dominates all of the blocks we
  // visited. If not, insert a sentinal indicating that most optimizations are
  // not safe.
  for (SmallPtrSet<const BasicBlock *, 4>::const_iterator I = Visited.begin(),
       E = Visited.end(); I != E; ++I) {
    const BasicBlock *BB = *I;
    if (BB == StartBB)
      continue;
    const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
    for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) {
      const BasicBlock *Succ = *SI;
      if (Succ != StartBB && !Visited.count(Succ)) {
        DependingInsts.insert(reinterpret_cast<Instruction *>(-1));
        return;
      }
    }
  }
}
Exemple #5
0
/// \brief Analyze a basic block for its contribution to the inline cost.
///
/// This method walks the analyzer over every instruction in the given basic
/// block and accounts for their cost during inlining at this callsite. It
/// aborts early if the threshold has been exceeded or an impossible to inline
/// construct has been detected. It returns false if inlining is no longer
/// viable, and true if inlining remains viable.
bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
                                SmallPtrSetImpl<const Value *> &EphValues) {
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    // FIXME: Currently, the number of instructions in a function regardless of
    // our ability to simplify them during inline to constants or dead code,
    // are actually used by the vector bonus heuristic. As long as that's true,
    // we have to special case debug intrinsics here to prevent differences in
    // inlining due to debug symbols. Eventually, the number of unsimplified
    // instructions shouldn't factor into the cost computation, but until then,
    // hack around it here.
    if (isa<DbgInfoIntrinsic>(I))
      continue;

    // Skip ephemeral values.
    if (EphValues.count(I))
      continue;

    ++NumInstructions;
    if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
      ++NumVectorInstructions;

    // If the instruction simplified to a constant, there is no cost to this
    // instruction. Visit the instructions using our InstVisitor to account for
    // all of the per-instruction logic. The visit tree returns true if we
    // consumed the instruction in any way, and false if the instruction's base
    // cost should count against inlining.
    if (Base::visit(I))
      ++NumInstructionsSimplified;
    else
      Cost += InlineConstants::InstrCost;

    // If the visit this instruction detected an uninlinable pattern, abort.
    if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
        HasIndirectBr)
      return false;

    // If the caller is a recursive function then we don't want to inline
    // functions which allocate a lot of stack space because it would increase
    // the caller stack usage dramatically.
    if (IsCallerRecursive &&
        AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
      return false;

    if (NumVectorInstructions > NumInstructions/2)
      VectorBonus = FiftyPercentVectorBonus;
    else if (NumVectorInstructions > NumInstructions/10)
      VectorBonus = TenPercentVectorBonus;
    else
      VectorBonus = 0;

    // Check if we've past the threshold so we don't spin in huge basic
    // blocks that will never inline.
    if (Cost > (Threshold + VectorBonus))
      return false;
  }

  return true;
}
void VPlanHCFGTransforms::VPInstructionsToVPRecipes(
    VPlanPtr &Plan,
    LoopVectorizationLegality::InductionList *Inductions,
    SmallPtrSetImpl<Instruction *> &DeadInstructions) {

  VPRegionBlock *TopRegion = dyn_cast<VPRegionBlock>(Plan->getEntry());
  ReversePostOrderTraversal<VPBlockBase *> RPOT(TopRegion->getEntry());
  for (VPBlockBase *Base : RPOT) {
    // Do not widen instructions in pre-header and exit blocks.
    if (Base->getNumPredecessors() == 0 || Base->getNumSuccessors() == 0)
      continue;

    VPBasicBlock *VPBB = Base->getEntryBasicBlock();
    VPRecipeBase *LastRecipe = nullptr;
    // Introduce each ingredient into VPlan.
    for (auto I = VPBB->begin(), E = VPBB->end(); I != E;) {
      VPRecipeBase *Ingredient = &*I++;
      // Can only handle VPInstructions.
      VPInstruction *VPInst = cast<VPInstruction>(Ingredient);
      Instruction *Inst = cast<Instruction>(VPInst->getUnderlyingValue());
      if (DeadInstructions.count(Inst)) {
        Ingredient->eraseFromParent();
        continue;
      }

      VPRecipeBase *NewRecipe = nullptr;
      // Create VPWidenMemoryInstructionRecipe for loads and stores.
      if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
        NewRecipe = new VPWidenMemoryInstructionRecipe(*Inst, nullptr /*Mask*/);
      else if (PHINode *Phi = dyn_cast<PHINode>(Inst)) {
        InductionDescriptor II = Inductions->lookup(Phi);
        if (II.getKind() == InductionDescriptor::IK_IntInduction ||
            II.getKind() == InductionDescriptor::IK_FpInduction) {
          NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi);
        } else
          NewRecipe = new VPWidenPHIRecipe(Phi);
      } else {
        // If the last recipe is a VPWidenRecipe, add Inst to it instead of
        // creating a new recipe.
        if (VPWidenRecipe *WidenRecipe =
                dyn_cast_or_null<VPWidenRecipe>(LastRecipe)) {
          WidenRecipe->appendInstruction(Inst);
          Ingredient->eraseFromParent();
          continue;
        }
        NewRecipe = new VPWidenRecipe(Inst);
      }

      NewRecipe->insertBefore(Ingredient);
      LastRecipe = NewRecipe;
      Ingredient->eraseFromParent();
    }
  }
}
Exemple #7
0
bool RecurrenceDescriptor::hasMultipleUsesOf(
    Instruction *I, SmallPtrSetImpl<Instruction *> &Insts) {
  unsigned NumUses = 0;
  for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E;
       ++Use) {
    if (Insts.count(dyn_cast<Instruction>(*Use)))
      ++NumUses;
    if (NumUses > 1)
      return true;
  }

  return false;
}
Exemple #8
0
static SmallPtrSet<Instruction *, 8>
getNotRelocatableInstructions(CoroBeginInst *CoroBegin,
                              SmallPtrSetImpl<BasicBlock *> &RelocBlocks) {
  SmallPtrSet<Instruction *, 8> DoNotRelocate;
  // Collect all instructions that we should not relocate
  SmallVector<Instruction *, 8> Work;

  // Start with CoroBegin and terminators of all preceding blocks.
  Work.push_back(CoroBegin);
  BasicBlock *CoroBeginBB = CoroBegin->getParent();
  for (BasicBlock *BB : RelocBlocks)
    if (BB != CoroBeginBB)
      Work.push_back(BB->getTerminator());

  // For every instruction in the Work list, place its operands in DoNotRelocate
  // set.
  do {
    Instruction *Current = Work.pop_back_val();
    LLVM_DEBUG(dbgs() << "CoroSplit: Will not relocate: " << *Current << "\n");
    DoNotRelocate.insert(Current);
    for (Value *U : Current->operands()) {
      auto *I = dyn_cast<Instruction>(U);
      if (!I)
        continue;

      if (auto *A = dyn_cast<AllocaInst>(I)) {
        // Stores to alloca instructions that occur before the coroutine frame
        // is allocated should not be moved; the stored values may be used by
        // the coroutine frame allocator. The operands to those stores must also
        // remain in place.
        for (const auto &User : A->users())
          if (auto *SI = dyn_cast<llvm::StoreInst>(User))
            if (RelocBlocks.count(SI->getParent()) != 0 &&
                DoNotRelocate.count(SI) == 0) {
              Work.push_back(SI);
              DoNotRelocate.insert(SI);
            }
        continue;
      }

      if (DoNotRelocate.count(I) == 0) {
        Work.push_back(I);
        DoNotRelocate.insert(I);
      }
    }
  } while (!Work.empty());
  return DoNotRelocate;
}
/// Visit each register class belonging to the given register bank.
///
/// A class belongs to the bank iff any of these apply:
/// * It is explicitly specified
/// * It is a subclass of a class that is a member.
/// * It is a class containing subregisters of the registers of a class that
///   is a member. This is known as a subreg-class.
///
/// This function must be called for each explicitly specified register class.
///
/// \param RC The register class to search.
/// \param Kind A debug string containing the path the visitor took to reach RC.
/// \param VisitFn The action to take for each class visited. It may be called
///                multiple times for a given class if there are multiple paths
///                to the class.
static void visitRegisterBankClasses(
    CodeGenRegBank &RegisterClassHierarchy, const CodeGenRegisterClass *RC,
    const Twine Kind,
    std::function<void(const CodeGenRegisterClass *, StringRef)> VisitFn,
    SmallPtrSetImpl<const CodeGenRegisterClass *> &VisitedRCs) {

  // Make sure we only visit each class once to avoid infinite loops.
  if (VisitedRCs.count(RC))
    return;
  VisitedRCs.insert(RC);

  // Visit each explicitly named class.
  VisitFn(RC, Kind.str());

  for (const auto &PossibleSubclass : RegisterClassHierarchy.getRegClasses()) {
    std::string TmpKind =
        (Twine(Kind) + " (" + PossibleSubclass.getName() + ")").str();

    // Visit each subclass of an explicitly named class.
    if (RC != &PossibleSubclass && RC->hasSubClass(&PossibleSubclass))
      visitRegisterBankClasses(RegisterClassHierarchy, &PossibleSubclass,
                               TmpKind + " " + RC->getName() + " subclass",
                               VisitFn, VisitedRCs);

    // Visit each class that contains only subregisters of RC with a common
    // subregister-index.
    //
    // More precisely, PossibleSubclass is a subreg-class iff Reg:SubIdx is in
    // PossibleSubclass for all registers Reg from RC using any
    // subregister-index SubReg
    for (const auto &SubIdx : RegisterClassHierarchy.getSubRegIndices()) {
      BitVector BV(RegisterClassHierarchy.getRegClasses().size());
      PossibleSubclass.getSuperRegClasses(&SubIdx, BV);
      if (BV.test(RC->EnumValue)) {
        std::string TmpKind2 = (Twine(TmpKind) + " " + RC->getName() +
                                " class-with-subregs: " + RC->getName())
                                   .str();
        VisitFn(&PossibleSubclass, TmpKind2);
      }
    }
  }
}
Exemple #10
0
bool GuardWideningImpl::isAvailableAt(Value *V, Instruction *Loc,
                                      SmallPtrSetImpl<Instruction *> &Visited) {
  auto *Inst = dyn_cast<Instruction>(V);
  if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))
    return true;

  if (!isSafeToSpeculativelyExecute(Inst, Loc, &DT) ||
      Inst->mayReadFromMemory())
    return false;

  Visited.insert(Inst);

  // We only want to go _up_ the dominance chain when recursing.
  assert(!isa<PHINode>(Loc) &&
         "PHIs should return false for isSafeToSpeculativelyExecute");
  assert(DT.isReachableFromEntry(Inst->getParent()) &&
         "We did a DFS from the block entry!");
  return all_of(Inst->operands(),
                [&](Value *Op) { return isAvailableAt(Op, Loc, Visited); });
}
Exemple #11
0
/// Perform backward copy-propagation. Find the initialization point of the
/// copy's source and replace the initializer's address with the copy's dest.
bool CopyForwarding::backwardPropagateCopy(
  CopyAddrInst *CopyInst,
  SmallPtrSetImpl<SILInstruction*> &DestUserInsts) {

  SILValue CopySrc = CopyInst->getSrc();
  ValueBase *CopyDestDef = CopyInst->getDest().getDef();

  // Scan backward recording all operands that use CopySrc until we see the
  // most recent init of CopySrc.
  bool seenInit = false;
  SmallVector<Operand*, 16> ValueUses;
  SmallVector<DebugValueAddrInst*, 4> DebugValueInstsToDelete;
  auto SI = CopyInst->getIterator(), SE = CopyInst->getParent()->begin();
  while (SI != SE) {
    --SI;
    SILInstruction *UserInst = &*SI;

    // If we see another use of Dest, then Dest is live after the Src location
    // is initialized, so we really need the copy.
    if (DestUserInsts.count(UserInst) || UserInst == CopyDestDef) {
      if (auto *DVAI = dyn_cast<DebugValueAddrInst>(UserInst)) {
        DebugValueInstsToDelete.push_back(DVAI);
        continue;
      }
      DEBUG(llvm::dbgs() << "  Skipping copy" << *CopyInst
            << "    dest used by " << *UserInst);
      return false;
    }
    // Early check to avoid scanning unrelated instructions.
    if (!SrcUserInsts.count(UserInst)
        && !(isa<DebugValueAddrInst>(UserInst)
             && SrcDebugValueInsts.count(cast<DebugValueAddrInst>(UserInst))))
      continue;

    AnalyzeBackwardUse AnalyzeUse(CopySrc);
    seenInit = AnalyzeUse.visit(UserInst);
    // If this use cannot be analyzed, then abort.
    if (!AnalyzeUse.Oper)
      return false;
    // Otherwise record the operand.
    ValueUses.push_back(AnalyzeUse.Oper);
    // If this is an init, we're done searching.
    if (seenInit)
      break;
  }
  if (!seenInit)
    return false;

  for (auto *DVAI : DebugValueInstsToDelete)
    DVAI->eraseFromParent();
  
  // Convert a reinitialization of this address into a destroy, followed by an
  // initialization. Replacing a copy with a destroy+init is not by itself
  // profitable. However, it does allow us to eliminate the later copy, and the
  // init copy may be eliminated later.
  if (auto Copy = dyn_cast<CopyAddrInst>(&*SI)) {
    if (Copy->getDest() == CopySrc && !Copy->isInitializationOfDest()) {
      SILBuilderWithScope(Copy).createDestroyAddr(Copy->getLoc(), CopySrc);
      Copy->setIsInitializationOfDest(IsInitialization);
    }
  }
  // Now that an init was found, it is safe to substitute all recorded uses
  // with the copy's dest.
  for (auto *Oper : ValueUses) {
    Oper->set(CopyInst->getDest());
    if (isa<CopyAddrInst>(Oper->getUser()))
      HasForwardedToCopy = true;
  }
  return true;
}
Exemple #12
0
/// Perform forward copy-propagation. Find a set of uses that the given copy can
/// forward to and replace them with the copy's source.
///
/// We must only replace uses of this copy's value. To do this, we search
/// forward in the current block from the copy that initializes the value to the
/// point of deinitialization. Typically, this will be a point at which the
/// value is passed as an 'in' argument:
/// \code
/// %copy = alloc_stack $T
/// ...
/// CurrentBlock:
/// copy_addr %arg to [initialization] %copy#1 : $*T
/// ...
/// %ret = apply %callee<T>(%copy#1) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> ()
/// \endcode
///
/// If the last use (deinit) is a copy, replace it with a destroy+copy[init].
///
/// The caller has already guaranteed that the lifetime of the copy's source
/// ends at this copy. Either the copy is a [take] or a destroy can be hoisted
/// to the copy.
bool CopyForwarding::forwardPropagateCopy(
  CopyAddrInst *CopyInst,
  SmallPtrSetImpl<SILInstruction*> &DestUserInsts) {

  // Looking at
  //
  //    copy_addr %Src, [init] %Dst
  //
  // We can reuse %Src if it is destroyed at %Src and not initialized again. To
  // know that we can safely replace all uses of %Dst with source we must know
  // that it is uniquely named and cannot be accessed outside of the function
  // (an alloc_stack instruction qualifies for this, an inout parameter does
  // not).  Additionally, we must know that all accesses to %Dst further on must
  // have had this copy on their path (there might be reinitialization of %Dst
  // later, but there must no be a path around this copy that reads from %Dst).
  SmallVector<Operand *, 16> DestUses;
  if (isa<AllocStackInst>(CopyInst->getDest()) && /* Uniquely identified name */
      isSourceDeadAtCopy(CopyInst) &&
      areCopyDestUsersDominatedBy(CopyInst, DestUses)) {

    // Replace all uses of Dest with a use of Src.
    for (auto *Oper : DestUses) {
      Oper->set(CopyInst->getSrc());
      if (isa<CopyAddrInst>(Oper->getUser()))
        HasForwardedToCopy = true;
    }

    // The caller will Remove the destroy_addr of %src.
    assert((DestroyPoints.empty() ||
            (!CopyInst->isTakeOfSrc() && DestroyPoints.size() == 1)) &&
           "Must only have one destroy");

    // The caller will remove the copy_addr.
    return true;
  }

  SILValue CopyDest = CopyInst->getDest();
  SILInstruction *DefDealloc = nullptr;
  if (isa<AllocStackInst>(CurrentDef)) {
    SILValue StackAddr(CurrentDef.getDef(), 0);
    if (!StackAddr.hasOneUse()) {
      DEBUG(llvm::dbgs() << "  Skipping copy" << *CopyInst
            << "  stack address has multiple uses.\n");
      return false;
    }
    DefDealloc = StackAddr.use_begin()->getUser();
  }

  // Scan forward recording all operands that use CopyDest until we see the
  // next deinit of CopyDest.
  SmallVector<Operand*, 16> ValueUses;
  auto SI = CopyInst->getIterator(), SE = CopyInst->getParent()->end();
  for (++SI; SI != SE; ++SI) {
    SILInstruction *UserInst = &*SI;
    // If we see another use of Src, then the source location is reinitialized
    // before the Dest location is deinitialized. So we really need the copy.
    if (SrcUserInsts.count(UserInst)) {
      DEBUG(llvm::dbgs() << "  Skipping copy" << *CopyInst
            << "  source used by" << *UserInst);
      return false;
    }
    if (UserInst == DefDealloc) {
      DEBUG(llvm::dbgs() << "  Skipping copy" << *CopyInst
            << "    dealloc_stack before dest use.\n");
      return false;
    }
    // Early check to avoid scanning unrelated instructions.
    if (!DestUserInsts.count(UserInst))
      continue;

    AnalyzeForwardUse AnalyzeUse(CopyDest);
    bool seenDeinit = AnalyzeUse.visit(UserInst);
    // If this use cannot be analyzed, then abort.
    if (!AnalyzeUse.Oper)
      return false;
    // Otherwise record the operand.
    ValueUses.push_back(AnalyzeUse.Oper);
    // If this is a deinit, we're done searching.
    if (seenDeinit)
      break;
  }
  if (SI == SE)
    return false;

  // Convert a reinitialization of this address into a destroy, followed by an
  // initialization. Replacing a copy with a destroy+init is not by itself
  // profitable. However, it does allow eliminating the earlier copy, and we may
  // later be able to eliminate this initialization copy.
  if (auto Copy = dyn_cast<CopyAddrInst>(&*SI)) {
    if (Copy->getDest() == CopyDest) {
      assert(!Copy->isInitializationOfDest() && "expected a deinit");

      DestroyAddrInst *Destroy =
          SILBuilderWithScope(Copy).createDestroyAddr(Copy->getLoc(), CopyDest);
      Copy->setIsInitializationOfDest(IsInitialization);

      assert(ValueUses.back()->getUser() == Copy && "bad value use");
      ValueUses.back() = &Destroy->getOperandRef();
    }
  }
  // Now that a deinit was found, it is safe to substitute all recorded uses
  // with the copy's source.
  for (auto *Oper : ValueUses) {
    Oper->set(CopyInst->getSrc());
    if (isa<CopyAddrInst>(Oper->getUser()))
      HasForwardedToCopy = true;
  }
  return true;
}
/// Determine which blocks the value is live in.
///
/// These are blocks which lead to uses.  Knowing this allows us to avoid
/// inserting PHI nodes into blocks which don't lead to uses (thus, the
/// inserted phi nodes would be dead).
void PromoteMem2Reg::ComputeLiveInBlocks(
    AllocaInst *AI, AllocaInfo &Info,
    const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
    SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
  // To determine liveness, we must iterate through the predecessors of blocks
  // where the def is live.  Blocks are added to the worklist if we need to
  // check their predecessors.  Start with all the using blocks.
  SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
                                                    Info.UsingBlocks.end());

  // If any of the using blocks is also a definition block, check to see if the
  // definition occurs before or after the use.  If it happens before the use,
  // the value isn't really live-in.
  for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) {
    BasicBlock *BB = LiveInBlockWorklist[i];
    if (!DefBlocks.count(BB))
      continue;

    // Okay, this is a block that both uses and defines the value.  If the first
    // reference to the alloca is a def (store), then we know it isn't live-in.
    for (BasicBlock::iterator I = BB->begin();; ++I) {
      if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
        if (SI->getOperand(1) != AI)
          continue;

        // We found a store to the alloca before a load.  The alloca is not
        // actually live-in here.
        LiveInBlockWorklist[i] = LiveInBlockWorklist.back();
        LiveInBlockWorklist.pop_back();
        --i;
        --e;
        break;
      }

      if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
        if (LI->getOperand(0) != AI)
          continue;

        // Okay, we found a load before a store to the alloca.  It is actually
        // live into this block.
        break;
      }
    }
  }

  // Now that we have a set of blocks where the phi is live-in, recursively add
  // their predecessors until we find the full region the value is live.
  while (!LiveInBlockWorklist.empty()) {
    BasicBlock *BB = LiveInBlockWorklist.pop_back_val();

    // The block really is live in here, insert it into the set.  If already in
    // the set, then it has already been processed.
    if (!LiveInBlocks.insert(BB).second)
      continue;

    // Since the value is live into BB, it is either defined in a predecessor or
    // live into it to.  Add the preds to the worklist unless they are a
    // defining block.
    for (BasicBlock *P : predecessors(BB)) {
      // The value is not live into a predecessor if it defines the value.
      if (DefBlocks.count(P))
        continue;

      // Otherwise it is, add to the worklist.
      LiveInBlockWorklist.push_back(P);
    }
  }
}