static SILValue getIncomingValueForPred(SILBasicBlock *BB, SILBasicBlock *Pred, unsigned Index) { TermInst *TI = Pred->getTerminator(); switch (TI->getTermKind()) { // TODO: This list is conservative. I think we can probably handle more of // these. case TermKind::UnreachableInst: case TermKind::ReturnInst: case TermKind::ThrowInst: case TermKind::TryApplyInst: case TermKind::SwitchValueInst: case TermKind::SwitchEnumAddrInst: case TermKind::CheckedCastAddrBranchInst: case TermKind::DynamicMethodBranchInst: return SILValue(); case TermKind::BranchInst: return cast<BranchInst>(TI)->getArg(Index); case TermKind::CondBranchInst: return cast<CondBranchInst>(TI)->getArgForDestBB(BB, Index); case TermKind::CheckedCastBranchInst: return cast<CheckedCastBranchInst>(TI)->getOperand(); case TermKind::SwitchEnumInst: return cast<SwitchEnumInst>(TI)->getOperand(); } llvm_unreachable("Unhandled TermKind?!"); }
void StackAllocationPromoter::fixPhiPredBlock(BlockSet &PhiBlocks, SILBasicBlock *Dest, SILBasicBlock *Pred) { TermInst *TI = Pred->getTerminator(); DEBUG(llvm::dbgs() << "*** Fixing the terminator " << TI << ".\n"); SILValue Def = getLiveOutValue(PhiBlocks, Pred); DEBUG(llvm::dbgs() << "*** Found the definition: " << *Def); addArgumentToBranch(Def, Dest, TI); TI->eraseFromParent(); }
/// Emit dealloc_stack for all temporaries. void PartialApplyCombiner::deallocateTemporaries() { // Insert dealloc_stack instructions at all function exit points. for (SILBasicBlock &BB : *PAI->getFunction()) { TermInst *Term = BB.getTerminator(); if (!Term->isFunctionExiting()) continue; for (auto Op : Tmps) { Builder.setInsertionPoint(Term); Builder.createDeallocStack(PAI->getLoc(), Op); } } }
/// \brief Adds a new argument to an edge between a branch and a destination /// block. /// /// \param Branch The terminator to add the argument to. /// \param Dest The destination block of the edge. /// \param Val The value to the arguments of the branch. /// \return The created branch. The old branch is deleted. /// The argument is appended at the end of the argument tuple. TermInst *swift::addNewEdgeValueToBranch(TermInst *Branch, SILBasicBlock *Dest, SILValue Val) { SILBuilderWithScope Builder(Branch); TermInst *NewBr = nullptr; if (CondBranchInst *CBI = dyn_cast<CondBranchInst>(Branch)) { SmallVector<SILValue, 8> TrueArgs; SmallVector<SILValue, 8> FalseArgs; for (auto A : CBI->getTrueArgs()) TrueArgs.push_back(A); for (auto A : CBI->getFalseArgs()) FalseArgs.push_back(A); if (Dest == CBI->getTrueBB()) { TrueArgs.push_back(Val); assert(TrueArgs.size() == Dest->getNumBBArg()); } if (Dest == CBI->getFalseBB()) { FalseArgs.push_back(Val); assert(FalseArgs.size() == Dest->getNumBBArg()); } NewBr = Builder.createCondBranch(CBI->getLoc(), CBI->getCondition(), CBI->getTrueBB(), TrueArgs, CBI->getFalseBB(), FalseArgs); } else if (BranchInst *BI = dyn_cast<BranchInst>(Branch)) { SmallVector<SILValue, 8> Args; for (auto A : BI->getArgs()) Args.push_back(A); Args.push_back(Val); assert(Args.size() == Dest->getNumBBArg()); NewBr = Builder.createBranch(BI->getLoc(), BI->getDestBB(), Args); } else { NewBr->dump(); // At the moment we can only add arguments to br and cond_br. llvm_unreachable("Can't add argument to terminator"); return NewBr; } Branch->dropAllReferences(); Branch->eraseFromParent(); return NewBr; }
bool StackPromoter::promote() { llvm::SetVector<SILBasicBlock *> ReachableBlocks; // First step: find blocks which end up in a no-return block (terminated by // an unreachable instruction). // Search for function-exiting blocks, i.e. return and throw. for (SILBasicBlock &BB : *F) { TermInst *TI = BB.getTerminator(); if (TI->isFunctionExiting()) ReachableBlocks.insert(&BB); } // Propagate the reachability up the control flow graph. unsigned Idx = 0; while (Idx < ReachableBlocks.size()) { SILBasicBlock *BB = ReachableBlocks[Idx++]; for (SILBasicBlock *Pred : BB->getPredecessorBlocks()) ReachableBlocks.insert(Pred); } bool Changed = false; // Search the whole function for stack promotable allocations. for (SILBasicBlock &BB : *F) { // Don't stack promote any allocation inside a code region which ends up in // a no-return block. Such allocations may missing their final release. // We would insert the deallocation too early, which may result in a // use-after-free problem. if (ReachableBlocks.count(&BB) == 0) continue; for (auto Iter = BB.begin(); Iter != BB.end();) { // The allocation instruction may be moved, so increment Iter prior to // doing the optimization. SILInstruction *I = &*Iter++; if (auto *ARI = dyn_cast<AllocRefInst>(I)) { Changed |= tryPromoteAlloc(ARI); } } } return Changed; }
/// We rotated a loop if it has the following properties. /// /// * It has an exiting header with a conditional branch. /// * It has a preheader (the function will try to create one for critical edges /// from cond_br). /// /// We will rotate at most up to the basic block passed as an argument. /// We will not rotate a loop where the header is equal to the latch except is /// RotateSingleBlockLoops is true. /// /// Note: The code relies on the 'UpTo' basic block to stay within the rotate /// loop for termination. bool swift::rotateLoop(SILLoop *L, DominanceInfo *DT, SILLoopInfo *LI, bool RotateSingleBlockLoops, SILBasicBlock *UpTo, bool ShouldVerify) { assert(L != nullptr && DT != nullptr && LI != nullptr && "Missing loop information"); auto *Header = L->getHeader(); if (!Header) return false; // We need a preheader - this is also a canonicalization for follow-up // passes. auto *Preheader = L->getLoopPreheader(); if (!Preheader) { LLVM_DEBUG(llvm::dbgs() << *L << " no preheader\n"); LLVM_DEBUG(L->getHeader()->getParent()->dump()); return false; } if (!RotateSingleBlockLoops && (Header == UpTo || isSingleBlockLoop(L))) return false; assert(RotateSingleBlockLoops || L->getBlocks().size() != 1); // Need a conditional branch that guards the entry into the loop. auto *LoopEntryBranch = dyn_cast<CondBranchInst>(Header->getTerminator()); if (!LoopEntryBranch) return false; // The header needs to exit the loop. if (!L->isLoopExiting(Header)) { LLVM_DEBUG(llvm::dbgs() << *L << " not an exiting header\n"); LLVM_DEBUG(L->getHeader()->getParent()->dump()); return false; } // We need a single backedge and the latch must not exit the loop if it is // also the header. auto *Latch = L->getLoopLatch(); if (!Latch) { LLVM_DEBUG(llvm::dbgs() << *L << " no single latch\n"); return false; } // Make sure we can duplicate the header. SmallVector<SILInstruction *, 8> MoveToPreheader; if (!canDuplicateOrMoveToPreheader(L, Preheader, Header, MoveToPreheader)) { LLVM_DEBUG(llvm::dbgs() << *L << " instructions in header preventing rotating\n"); return false; } auto *NewHeader = LoopEntryBranch->getTrueBB(); auto *Exit = LoopEntryBranch->getFalseBB(); if (L->contains(Exit)) std::swap(NewHeader, Exit); assert(L->contains(NewHeader) && !L->contains(Exit) && "Could not find loop header and exit block"); // We don't want to rotate such that we merge two headers of separate loops // into one. This can be turned into an assert again once we have guaranteed // preheader insertions. if (!NewHeader->getSinglePredecessorBlock() && Header != Latch) return false; // Now that we know we can perform the rotation - move the instructions that // need moving. for (auto *Inst : MoveToPreheader) Inst->moveBefore(Preheader->getTerminator()); LLVM_DEBUG(llvm::dbgs() << " Rotating " << *L); // Map the values for the duplicated header block. We are duplicating the // header instructions into the end of the preheader. llvm::DenseMap<ValueBase *, SILValue> ValueMap; // The original 'phi' argument values are just the values coming from the // preheader edge. ArrayRef<SILArgument *> PHIs = Header->getArguments(); OperandValueArrayRef PreheaderArgs = cast<BranchInst>(Preheader->getTerminator())->getArgs(); assert(PHIs.size() == PreheaderArgs.size() && "Basic block arguments and incoming edge mismatch"); // Here we also store the value index to use into the value map (versus // non-argument values where the operand use decides which value index to // use). for (unsigned Idx = 0, E = PHIs.size(); Idx != E; ++Idx) ValueMap[PHIs[Idx]] = PreheaderArgs[Idx]; // The other instructions are just cloned to the preheader. TermInst *PreheaderBranch = Preheader->getTerminator(); for (auto &Inst : *Header) { if (SILInstruction *cloned = Inst.clone(PreheaderBranch)) { mapOperands(cloned, ValueMap); // The actual operand will sort out which result idx to use. auto instResults = Inst.getResults(); auto clonedResults = cloned->getResults(); assert(instResults.size() == clonedResults.size()); for (auto i : indices(instResults)) ValueMap[instResults[i]] = clonedResults[i]; } } PreheaderBranch->dropAllReferences(); PreheaderBranch->eraseFromParent(); // If there were any uses of instructions in the duplicated loop entry check // block rewrite them using the ssa updater. rewriteNewLoopEntryCheckBlock(Header, Preheader, ValueMap); L->moveToHeader(NewHeader); // Now the original preheader dominates all of headers children and the // original latch dominates the header. updateDomTree(DT, Preheader, Latch, Header); assert(DT->getNode(NewHeader)->getIDom() == DT->getNode(Preheader)); assert(!DT->dominates(Header, Exit) || DT->getNode(Exit)->getIDom() == DT->getNode(Preheader)); assert(DT->getNode(Header)->getIDom() == DT->getNode(Latch) || ((Header == Latch) && DT->getNode(Header)->getIDom() == DT->getNode(Preheader))); // Beautify the IR. Move the old header to after the old latch as it is now // the latch. Header->moveAfter(Latch); // Merge the old latch with the old header if possible. mergeBasicBlockWithSuccessor(Latch, DT, LI); // Create a new preheader. splitIfCriticalEdge(Preheader, NewHeader, DT, LI); if (ShouldVerify) { DT->verify(); LI->verify(); Latch->getParent()->verify(); } LLVM_DEBUG(llvm::dbgs() << " to " << *L); LLVM_DEBUG(L->getHeader()->getParent()->dump()); return true; }
static bool constantFoldTerminator(SILBasicBlock &BB, UnreachableUserCodeReportingState *State) { TermInst *TI = BB.getTerminator(); // Process conditional branches with constant conditions. if (CondBranchInst *CBI = dyn_cast<CondBranchInst>(TI)) { SILValue V = CBI->getCondition(); SILInstruction *CondI = dyn_cast<SILInstruction>(V); SILLocation Loc = CBI->getLoc(); if (IntegerLiteralInst *ConstCond = dyn_cast_or_null<IntegerLiteralInst>(CondI)) { SILBuilderWithScope B(&BB, CBI); // Determine which of the successors is unreachable and create a new // terminator that only branches to the reachable successor. SILBasicBlock *UnreachableBlock = nullptr; bool CondIsTrue = false; if (ConstCond->getValue() == APInt(1, /*value*/ 0, false)) { B.createBranch(Loc, CBI->getFalseBB(), CBI->getFalseArgs()); UnreachableBlock = CBI->getTrueBB(); } else { assert(ConstCond->getValue() == APInt(1, /*value*/ 1, false) && "Our representation of true/false does not match."); B.createBranch(Loc, CBI->getTrueBB(), CBI->getTrueArgs()); UnreachableBlock = CBI->getFalseBB(); CondIsTrue = true; } recursivelyDeleteTriviallyDeadInstructions(TI, true); NumInstructionsRemoved++; // Produce an unreachable code warning for this basic block if it // contains user code (only if we are not within an inlined function or a // template instantiation). // FIXME: Do not report if we are within a template instantiation. if (Loc.is<RegularLocation>() && State && !State->PossiblyUnreachableBlocks.count(UnreachableBlock)) { // If this is the first time we see this unreachable block, store it // along with the folded branch info. State->PossiblyUnreachableBlocks.insert(UnreachableBlock); State->MetaMap.insert( std::pair<const SILBasicBlock*, UnreachableInfo>( UnreachableBlock, UnreachableInfo{UnreachableKind::FoldedBranch, Loc, CondIsTrue})); } NumTerminatorsFolded++; return true; } } // Constant fold switch enum. // %1 = enum $Bool, #Bool.false!unionelt // switch_enum %1 : $Bool, case #Bool.true!unionelt: bb1, // case #Bool.false!unionelt: bb2 // => // br bb2 if (SwitchEnumInst *SUI = dyn_cast<SwitchEnumInst>(TI)) { if (EnumInst *TheEnum = dyn_cast<EnumInst>(SUI->getOperand())) { const EnumElementDecl *TheEnumElem = TheEnum->getElement(); SILBasicBlock *TheSuccessorBlock = nullptr; int ReachableBlockIdx = -1; for (unsigned Idx = 0; Idx < SUI->getNumCases(); ++Idx) { const EnumElementDecl *EI; SILBasicBlock *BI; std::tie(EI, BI) = SUI->getCase(Idx); if (EI == TheEnumElem) { TheSuccessorBlock = BI; ReachableBlockIdx = Idx; break; } } if (!TheSuccessorBlock) if (SUI->hasDefault()) { SILBasicBlock *DB= SUI->getDefaultBB(); if (!isa<UnreachableInst>(DB->getTerminator())) { TheSuccessorBlock = DB; ReachableBlockIdx = SUI->getNumCases(); } } // Not fully covered switches will be diagnosed later. SILGen represents // them with a Default basic block with an unrechable instruction. // We are going to produce an error on all unreachable instructions not // eliminated by DCE. if (!TheSuccessorBlock) return false; // Replace the switch with a branch to the TheSuccessorBlock. SILBuilderWithScope B(&BB, TI); SILLocation Loc = TI->getLoc(); if (!TheSuccessorBlock->bbarg_empty()) { assert(TheEnum->hasOperand()); B.createBranch(Loc, TheSuccessorBlock, TheEnum->getOperand()); } else B.createBranch(Loc, TheSuccessorBlock); // Produce diagnostic info if we are not within an inlined function or // template instantiation. // FIXME: Do not report if we are within a template instantiation. assert(ReachableBlockIdx >= 0); if (Loc.is<RegularLocation>() && State) { // Find the first unreachable block in the switch so that we could use // it for better diagnostics. SILBasicBlock *UnreachableBlock = nullptr; if (SUI->getNumCases() > 1) { // More than one case. UnreachableBlock = (ReachableBlockIdx == 0) ? SUI->getCase(1).second: SUI->getCase(0).second; } else { if (SUI->getNumCases() == 1 && SUI->hasDefault()) { // One case and a default. UnreachableBlock = (ReachableBlockIdx == 0) ? SUI->getDefaultBB(): SUI->getCase(0).second; } } // Generate diagnostic info. if (UnreachableBlock && !State->PossiblyUnreachableBlocks.count(UnreachableBlock)) { State->PossiblyUnreachableBlocks.insert(UnreachableBlock); State->MetaMap.insert( std::pair<const SILBasicBlock*, UnreachableInfo>( UnreachableBlock, UnreachableInfo{UnreachableKind::FoldedSwitchEnum, Loc, true})); } } recursivelyDeleteTriviallyDeadInstructions(TI, true); NumTerminatorsFolded++; return true; } } // Constant fold switch int. // %1 = integer_literal $Builtin.Int64, 2 // switch_value %1 : $Builtin.Int64, case 1: bb1, case 2: bb2 // => // br bb2 if (SwitchValueInst *SUI = dyn_cast<SwitchValueInst>(TI)) { if (IntegerLiteralInst *SwitchVal = dyn_cast<IntegerLiteralInst>(SUI->getOperand())) { SILBasicBlock *TheSuccessorBlock = 0; for (unsigned Idx = 0; Idx < SUI->getNumCases(); ++Idx) { APInt AI; SILValue EI; SILBasicBlock *BI; std::tie(EI, BI) = SUI->getCase(Idx); // TODO: Check that EI is really an IntegerLiteralInst AI = dyn_cast<IntegerLiteralInst>(EI)->getValue(); if (AI == SwitchVal->getValue()) TheSuccessorBlock = BI; } if (!TheSuccessorBlock) if (SUI->hasDefault()) TheSuccessorBlock = SUI->getDefaultBB(); // Add the branch instruction with the block. if (TheSuccessorBlock) { SILBuilderWithScope B(&BB, TI); B.createBranch(TI->getLoc(), TheSuccessorBlock); recursivelyDeleteTriviallyDeadInstructions(TI, true); NumTerminatorsFolded++; return true; } // TODO: Warn on unreachable user code here as well. } } return false; }
/// \brief Populate the body of the cloned closure, modifying instructions as /// necessary. This is where we create the actual specialized BB Arguments. void ClosureSpecCloner::populateCloned() { SILFunction *Cloned = getCloned(); SILFunction *ClosureUser = CallSiteDesc.getApplyCallee(); // Create arguments for the entry block. SILBasicBlock *ClosureUserEntryBB = &*ClosureUser->begin(); SILBasicBlock *ClonedEntryBB = Cloned->createBasicBlock(); SmallVector<SILValue, 4> entryArgs; entryArgs.reserve(ClosureUserEntryBB->getArguments().size()); // Remove the closure argument. SILArgument *ClosureArg = nullptr; for (size_t i = 0, e = ClosureUserEntryBB->args_size(); i != e; ++i) { SILArgument *Arg = ClosureUserEntryBB->getArgument(i); if (i == CallSiteDesc.getClosureIndex()) { ClosureArg = Arg; entryArgs.push_back(SILValue()); continue; } // Otherwise, create a new argument which copies the original argument SILValue MappedValue = ClonedEntryBB->createFunctionArgument(Arg->getType(), Arg->getDecl()); entryArgs.push_back(MappedValue); } // Next we need to add in any arguments that are not captured as arguments to // the cloned function. // // We do not insert the new mapped arguments into the value map since there by // definition is nothing in the partial apply user function that references // such arguments. After this pass is done the only thing that will reference // the arguments is the partial apply that we will create. SILFunction *ClosedOverFun = CallSiteDesc.getClosureCallee(); auto ClosedOverFunConv = ClosedOverFun->getConventions(); unsigned NumTotalParams = ClosedOverFunConv.getNumParameters(); unsigned NumNotCaptured = NumTotalParams - CallSiteDesc.getNumArguments(); llvm::SmallVector<SILValue, 4> NewPAIArgs; for (auto &PInfo : ClosedOverFunConv.getParameters().slice(NumNotCaptured)) { auto paramTy = ClosedOverFunConv.getSILType(PInfo); SILValue MappedValue = ClonedEntryBB->createFunctionArgument(paramTy); NewPAIArgs.push_back(MappedValue); } SILBuilder &Builder = getBuilder(); Builder.setInsertionPoint(ClonedEntryBB); // Clone FRI and PAI, and replace usage of the removed closure argument // with result of cloned PAI. SILValue FnVal = Builder.createFunctionRef(CallSiteDesc.getLoc(), ClosedOverFun); auto *NewClosure = CallSiteDesc.createNewClosure(Builder, FnVal, NewPAIArgs); // Clone a chain of ConvertFunctionInsts. This can create further // reabstraction partial_apply instructions. SmallVector<PartialApplyInst*, 4> NeedsRelease; SILValue ConvertedCallee = cloneCalleeConversion( CallSiteDesc.getClosureCallerArg(), NewClosure, Builder, NeedsRelease); // Make sure that we actually emit the releases for reabstraction thunks. We // have guaranteed earlier that we only allow reabstraction thunks if the // closure was passed trivial. assert(NeedsRelease.empty() || CallSiteDesc.isTrivialNoEscapeParameter()); entryArgs[CallSiteDesc.getClosureIndex()] = ConvertedCallee; // Visit original BBs in depth-first preorder, starting with the // entry block, cloning all instructions and terminators. cloneFunctionBody(ClosureUser, ClonedEntryBB, entryArgs); // Then insert a release in all non failure exit BBs if our partial apply was // guaranteed. This is b/c it was passed at +0 originally and we need to // balance the initial increment of the newly created closure(s). bool ClosureHasRefSemantics = CallSiteDesc.closureHasRefSemanticContext(); if ((CallSiteDesc.isClosureGuaranteed() || CallSiteDesc.isTrivialNoEscapeParameter()) && (ClosureHasRefSemantics || !NeedsRelease.empty())) { for (SILBasicBlock *BB : CallSiteDesc.getNonFailureExitBBs()) { SILBasicBlock *OpBB = getOpBasicBlock(BB); TermInst *TI = OpBB->getTerminator(); auto Loc = CleanupLocation::get(NewClosure->getLoc()); // If we have an exit, we place the release right before it so we know // that it will be executed at the end of the epilogue. if (TI->isFunctionExiting()) { Builder.setInsertionPoint(TI); if (ClosureHasRefSemantics) Builder.createReleaseValue(Loc, SILValue(NewClosure), Builder.getDefaultAtomicity()); for (auto PAI : NeedsRelease) Builder.createReleaseValue(Loc, SILValue(PAI), Builder.getDefaultAtomicity()); continue; } // We use casts where findAllNonFailureExitBBs should have made sure that // this is true. This will ensure that the code is updated when we hit the // cast failure in debug builds. auto *Unreachable = cast<UnreachableInst>(TI); auto PrevIter = std::prev(SILBasicBlock::iterator(Unreachable)); auto NoReturnApply = FullApplySite::isa(&*PrevIter); // We insert the release value right before the no return apply so that if // the partial apply is passed into the no-return function as an @owned // value, we will retain the partial apply before we release it and // potentially eliminate it. Builder.setInsertionPoint(NoReturnApply.getInstruction()); if (ClosureHasRefSemantics) Builder.createReleaseValue(Loc, SILValue(NewClosure), Builder.getDefaultAtomicity()); for (auto PAI : NeedsRelease) Builder.createReleaseValue(Loc, SILValue(PAI), Builder.getDefaultAtomicity()); } } }
// Attempt to insert a new access in the loop preheader. If successful, insert // the new access in DominatedAccessAnalysis so it can be used to dominate other // accesses. Also convert the current access to static and update the current // storageToDomMap since the access may already have been recorded (when it was // still dynamic). // // This function cannot add or remove instructions in the current block, but // may add instructions to the current loop's preheader. // // The required conditions for inserting a new dominating access are: // // 1. The new preheader access is not enclosed in another scope that doesn't // also enclose the current scope. // // This is inferred from the loop structure; any scope that encloses the // preheader must also enclose the entire loop. // // 2. The current access is not enclosed in another scope that doesn't also // enclose the preheader. // // As before, it is sufficient to check this access' isInner flags in // DominatedAccessAnalysis; if this access isn't enclosed by any scope within // the function, then it can't be enclosed within a scope inside the loop. // // 3. The current header has no nested conflict within its scope. // // 4. The access' source operand is available in the loop preheader. void DominatedAccessRemoval::tryInsertLoopPreheaderAccess( BeginAccessInst *BAI, DomAccessedStorage currAccessInfo) { // 2. the current access may be enclosed. if (currAccessInfo.isInner()) return; // 3. the current access must be instantaneous. if (!BAI->hasNoNestedConflict()) return; SILLoop *currLoop = loopInfo->getLoopFor(BAI->getParent()); if (!currLoop) return; SILBasicBlock *preheader = currLoop->getLoopPreheader(); if (!preheader) return; // 4. The source operand must be available in the preheader. auto sourceOperand = BAI->getOperand(); auto *sourceBB = sourceOperand->getParentBlock(); if (!domInfo->dominates(sourceBB, preheader)) return; // Insert a new access scope immediately before the // preheader's terminator. TermInst *preheaderTerm = preheader->getTerminator(); SILBuilderWithScope scopeBuilder(preheaderTerm); BeginAccessInst *newBegin = scopeBuilder.createBeginAccess( preheaderTerm->getLoc(), sourceOperand, BAI->getAccessKind(), SILAccessEnforcement::Dynamic, true /*no nested conflict*/, BAI->isFromBuiltin()); scopeBuilder.createEndAccess(preheaderTerm->getLoc(), newBegin, false); LLVM_DEBUG(llvm::dbgs() << "Created loop preheader access: " << *newBegin << "\n" << "dominating: " << *BAI << "\n"); BAI->setEnforcement(SILAccessEnforcement::Static); hasChanged = true; // Insert the new dominating instruction in both DominatedAccessAnalysis and // storageToDomMap if it has uniquely identifiable storage. if (!currAccessInfo.isUniquelyIdentifiedOrClass()) return; AccessedStorage storage = static_cast<AccessedStorage>(currAccessInfo); storage.resetSubclassData(); // Create a DomAccessedStorage for the new access with no flags set. DAA.accessMap.try_emplace(newBegin, DomAccessedStorage(storage)); // Track the new access as long as no other accesses from the same storage are // already tracked. This also necessarily replaces the current access, which // was just made static. DominatingAccess newDomAccess(newBegin, domInfo->getNode(preheader)); auto iterAndInserted = storageToDomMap.try_emplace(storage, newDomAccess); if (!iterAndInserted.second) { DominatingAccess &curDomAccess = iterAndInserted.first->second; if (curDomAccess.beginAccess == BAI) curDomAccess = newDomAccess; } }