static SILValue cleanupLoadedCalleeValue(SILValue CalleeValue, LoadInst *LI) { auto *PBI = cast<ProjectBoxInst>(LI->getOperand()); auto *ABI = cast<AllocBoxInst>(PBI->getOperand()); // The load instruction must have no more uses left to erase it. if (!LI->use_empty()) return SILValue(); LI->eraseFromParent(); // Look through uses of the alloc box the load is loading from to find up to // one store and up to one strong release. StrongReleaseInst *SRI = nullptr; for (Operand *ABIUse : ABI->getUses()) { if (SRI == nullptr && isa<StrongReleaseInst>(ABIUse->getUser())) { SRI = cast<StrongReleaseInst>(ABIUse->getUser()); continue; } if (ABIUse->getUser() == PBI) continue; return SILValue(); } StoreInst *SI = nullptr; for (Operand *PBIUse : PBI->getUses()) { if (SI == nullptr && isa<StoreInst>(PBIUse->getUser())) { SI = cast<StoreInst>(PBIUse->getUser()); continue; } return SILValue(); } // If we found a store, record its source and erase it. if (SI) { CalleeValue = SI->getSrc(); SI->eraseFromParent(); } else { CalleeValue = SILValue(); } // If we found a strong release, replace it with a strong release of the // source of the store and erase it. if (SRI) { if (CalleeValue) SILBuilderWithScope(SRI).emitStrongReleaseAndFold(SRI->getLoc(), CalleeValue); SRI->eraseFromParent(); } assert(PBI->use_empty()); PBI->eraseFromParent(); assert(ABI->use_empty()); ABI->eraseFromParent(); return CalleeValue; }
SILInstruction* SILCombiner::visitAllocExistentialBoxInst(AllocExistentialBoxInst *AEBI) { // Optimize away the pattern below that happens when exceptions are created // and in some cases, due to inlining, are not needed. // // %6 = alloc_existential_box $ErrorType, $ColorError // %7 = enum $VendingMachineError, #ColorError.Red // store %7 to %6#1 : $*ColorError // debug_value %6#0 : $ErrorType // strong_release %6#0 : $ErrorType StoreInst *SingleStore = nullptr; StrongReleaseInst *SingleRelease = nullptr; // For each user U of the alloc_existential_box... for (auto U : getNonDebugUses(*AEBI)) { // Record stores into the box. if (auto *SI = dyn_cast<StoreInst>(U->getUser())) { // If this is not the only store into the box then bail out. if (SingleStore) return nullptr; SingleStore = SI; continue; } // Record releases of the box. if (auto *RI = dyn_cast<StrongReleaseInst>(U->getUser())) { // If this is not the only release of the box then bail out. if (SingleRelease) return nullptr; SingleRelease = RI; continue; } // If there are other users to the box then bail out. return nullptr; } if (SingleStore && SingleRelease) { // Release the value that was stored into the existential box. The box // is going away so we need to release the stored value now. Builder.setInsertionPoint(SingleStore); Builder.createReleaseValue(AEBI->getLoc(), SingleStore->getSrc()); // Erase the instruction that stores into the box and the release that // releases the box, and finally, release the box. eraseInstFromFunction(*SingleRelease); eraseInstFromFunction(*SingleStore); return eraseInstFromFunction(*AEBI); } return nullptr; }
/// Returns the callee SILFunction called at a call site, in the case /// that the call is transparent (as in, both that the call is marked /// with the transparent flag and that callee function is actually transparently /// determinable from the SIL) or nullptr otherwise. This assumes that the SIL /// is already in SSA form. /// /// In the case that a non-null value is returned, FullArgs contains effective /// argument operands for the callee function. static SILFunction *getCalleeFunction( SILFunction *F, FullApplySite AI, bool &IsThick, SmallVectorImpl<std::pair<SILValue, ParameterConvention>> &CaptureArgs, SmallVectorImpl<SILValue> &FullArgs, PartialApplyInst *&PartialApply) { IsThick = false; PartialApply = nullptr; CaptureArgs.clear(); FullArgs.clear(); for (const auto &Arg : AI.getArguments()) FullArgs.push_back(Arg); SILValue CalleeValue = AI.getCallee(); if (auto *LI = dyn_cast<LoadInst>(CalleeValue)) { // Conservatively only see through alloc_box; we assume this pass is run // immediately after SILGen auto *PBI = dyn_cast<ProjectBoxInst>(LI->getOperand()); if (!PBI) return nullptr; auto *ABI = dyn_cast<AllocBoxInst>(PBI->getOperand()); if (!ABI) return nullptr; // Ensure there are no other uses of alloc_box than the project_box and // retains, releases. for (Operand *ABIUse : ABI->getUses()) if (ABIUse->getUser() != PBI && !isa<StrongRetainInst>(ABIUse->getUser()) && !isa<StrongReleaseInst>(ABIUse->getUser())) return nullptr; // Scan forward from the alloc box to find the first store, which // (conservatively) must be in the same basic block as the alloc box StoreInst *SI = nullptr; for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end(); I != E; ++I) { // If we find the load instruction first, then the load is loading from // a non-initialized alloc; this shouldn't really happen but I'm not // making any assumptions if (&*I == LI) return nullptr; if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest() == PBI) { // We found a store that we know dominates the load; now ensure there // are no other uses of the project_box except loads. for (Operand *PBIUse : PBI->getUses()) if (PBIUse->getUser() != SI && !isa<LoadInst>(PBIUse->getUser())) return nullptr; // We can conservatively see through the store break; } } if (!SI) return nullptr; CalleeValue = SI->getSrc(); } // PartialApply/ThinToThick -> ConvertFunction patterns are generated // by @noescape closures. // // FIXME: We don't currently handle mismatched return types, however, this // would be a good optimization to handle and would be as simple as inserting // a cast. auto skipFuncConvert = [](SILValue CalleeValue) { // We can also allow a thin @escape to noescape conversion as such: // %1 = function_ref @thin_closure_impl : $@convention(thin) () -> () // %2 = convert_function %1 : // $@convention(thin) () -> () to $@convention(thin) @noescape () -> () // %3 = thin_to_thick_function %2 : // $@convention(thin) @noescape () -> () to // $@noescape @callee_guaranteed () -> () // %4 = apply %3() : $@noescape @callee_guaranteed () -> () if (auto *ThinToNoescapeCast = dyn_cast<ConvertFunctionInst>(CalleeValue)) { auto FromCalleeTy = ThinToNoescapeCast->getOperand()->getType().castTo<SILFunctionType>(); if (FromCalleeTy->getExtInfo().hasContext()) return CalleeValue; auto ToCalleeTy = ThinToNoescapeCast->getType().castTo<SILFunctionType>(); auto EscapingCalleeTy = ToCalleeTy->getWithExtInfo( ToCalleeTy->getExtInfo().withNoEscape(false)); if (FromCalleeTy != EscapingCalleeTy) return CalleeValue; return ThinToNoescapeCast->getOperand(); } auto *CFI = dyn_cast<ConvertEscapeToNoEscapeInst>(CalleeValue); if (!CFI) return CalleeValue; // TODO: Handle argument conversion. All the code in this file needs to be // cleaned up and generalized. The argument conversion handling in // optimizeApplyOfConvertFunctionInst should apply to any combine // involving an apply, not just a specific pattern. // // For now, just handle conversion that doesn't affect argument types, // return types, or throws. We could trivially handle any other // representation change, but the only one that doesn't affect the ABI and // matters here is @noescape, so just check for that. auto FromCalleeTy = CFI->getOperand()->getType().castTo<SILFunctionType>(); auto ToCalleeTy = CFI->getType().castTo<SILFunctionType>(); auto EscapingCalleeTy = ToCalleeTy->getWithExtInfo(ToCalleeTy->getExtInfo().withNoEscape(false)); if (FromCalleeTy != EscapingCalleeTy) return CalleeValue; return CFI->getOperand(); }; // Look through a escape to @noescape conversion. CalleeValue = skipFuncConvert(CalleeValue); // We are allowed to see through exactly one "partial apply" instruction or // one "thin to thick function" instructions, since those are the patterns // generated when using auto closures. if (auto *PAI = dyn_cast<PartialApplyInst>(CalleeValue)) { // Collect the applied arguments and their convention. collectPartiallyAppliedArguments(PAI, CaptureArgs, FullArgs); CalleeValue = PAI->getCallee(); IsThick = true; PartialApply = PAI; } else if (auto *TTTFI = dyn_cast<ThinToThickFunctionInst>(CalleeValue)) { CalleeValue = TTTFI->getOperand(); IsThick = true; } CalleeValue = skipFuncConvert(CalleeValue); auto *FRI = dyn_cast<FunctionRefInst>(CalleeValue); if (!FRI) return nullptr; SILFunction *CalleeFunction = FRI->getReferencedFunction(); switch (CalleeFunction->getRepresentation()) { case SILFunctionTypeRepresentation::Thick: case SILFunctionTypeRepresentation::Thin: case SILFunctionTypeRepresentation::Method: case SILFunctionTypeRepresentation::Closure: case SILFunctionTypeRepresentation::WitnessMethod: break; case SILFunctionTypeRepresentation::CFunctionPointer: case SILFunctionTypeRepresentation::ObjCMethod: case SILFunctionTypeRepresentation::Block: return nullptr; } // If the CalleeFunction is a not-transparent definition, we can not process // it. if (CalleeFunction->isTransparent() == IsNotTransparent) return nullptr; // If CalleeFunction is a declaration, see if we can load it. if (CalleeFunction->empty()) AI.getModule().loadFunction(CalleeFunction); // If we fail to load it, bail. if (CalleeFunction->empty()) return nullptr; if (F->isSerialized() && !CalleeFunction->hasValidLinkageForFragileInline()) { if (!CalleeFunction->hasValidLinkageForFragileRef()) { llvm::errs() << "caller: " << F->getName() << "\n"; llvm::errs() << "callee: " << CalleeFunction->getName() << "\n"; llvm_unreachable("Should never be inlining a resilient function into " "a fragile function"); } return nullptr; } return CalleeFunction; }
/// \brief Removes instructions that create the callee value if they are no /// longer necessary after inlining. static void cleanupCalleeValue(SILValue CalleeValue, ArrayRef<SILValue> CaptureArgs, ArrayRef<SILValue> FullArgs) { SmallVector<SILInstruction*, 16> InstsToDelete; for (SILValue V : FullArgs) { if (SILInstruction *I = dyn_cast<SILInstruction>(V)) if (I != CalleeValue.getDef() && isInstructionTriviallyDead(I)) InstsToDelete.push_back(I); } recursivelyDeleteTriviallyDeadInstructions(InstsToDelete, true); // Handle the case where the callee of the apply is a load instruction. if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand()); assert(ABI && LI->getOperand().getResultNumber() == 1); // The load instruction must have no more uses left to erase it. if (!LI->use_empty()) return; LI->eraseFromParent(); // Look through uses of the alloc box the load is loading from to find up to // one store and up to one strong release. StoreInst *SI = nullptr; StrongReleaseInst *SRI = nullptr; for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE; ++UI) { if (SI == nullptr && isa<StoreInst>(UI.getUser())) { SI = cast<StoreInst>(UI.getUser()); assert(SI->getDest() == SILValue(ABI, 1)); } else if (SRI == nullptr && isa<StrongReleaseInst>(UI.getUser())) { SRI = cast<StrongReleaseInst>(UI.getUser()); assert(SRI->getOperand() == SILValue(ABI, 0)); } else return; } // If we found a store, record its source and erase it. if (SI) { CalleeValue = SI->getSrc(); SI->eraseFromParent(); } else { CalleeValue = SILValue(); } // If we found a strong release, replace it with a strong release of the // source of the store and erase it. if (SRI) { if (CalleeValue.isValid()) SILBuilderWithScope(SRI) .emitStrongReleaseAndFold(SRI->getLoc(), CalleeValue); SRI->eraseFromParent(); } assert(ABI->use_empty()); ABI->eraseFromParent(); if (!CalleeValue.isValid()) return; } if (auto *PAI = dyn_cast<PartialApplyInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); SILValue Callee = PAI->getCallee(); if (!tryDeleteDeadClosure(PAI)) return; CalleeValue = Callee; } if (auto *TTTFI = dyn_cast<ThinToThickFunctionInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); SILValue Callee = TTTFI->getCallee(); if (!tryDeleteDeadClosure(TTTFI)) return; CalleeValue = Callee; } if (FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); if (!FRI->use_empty()) return; FRI->eraseFromParent(); } }
/// \brief Returns the callee SILFunction called at a call site, in the case /// that the call is transparent (as in, both that the call is marked /// with the transparent flag and that callee function is actually transparently /// determinable from the SIL) or nullptr otherwise. This assumes that the SIL /// is already in SSA form. /// /// In the case that a non-null value is returned, FullArgs contains effective /// argument operands for the callee function. static SILFunction * getCalleeFunction(FullApplySite AI, bool &IsThick, SmallVectorImpl<SILValue>& CaptureArgs, SmallVectorImpl<SILValue>& FullArgs, PartialApplyInst *&PartialApply, SILModule::LinkingMode Mode) { IsThick = false; PartialApply = nullptr; CaptureArgs.clear(); FullArgs.clear(); for (const auto &Arg : AI.getArguments()) FullArgs.push_back(Arg); SILValue CalleeValue = AI.getCallee(); if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); // Conservatively only see through alloc_box; we assume this pass is run // immediately after SILGen SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand()); if (!ABI) return nullptr; assert(LI->getOperand().getResultNumber() == 1); // Scan forward from the alloc box to find the first store, which // (conservatively) must be in the same basic block as the alloc box StoreInst *SI = nullptr; for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end(); I != E; ++I) { // If we find the load instruction first, then the load is loading from // a non-initialized alloc; this shouldn't really happen but I'm not // making any assumptions if (static_cast<SILInstruction*>(I) == LI) return nullptr; if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest().getDef() == ABI) { // We found a store that we know dominates the load; now ensure there // are no other uses of the alloc other than loads, retains, releases // and dealloc stacks for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE; ++UI) if (UI.getUser() != SI && !isa<LoadInst>(UI.getUser()) && !isa<StrongRetainInst>(UI.getUser()) && !isa<StrongReleaseInst>(UI.getUser())) return nullptr; // We can conservatively see through the store break; } } if (!SI) return nullptr; CalleeValue = SI->getSrc(); } // We are allowed to see through exactly one "partial apply" instruction or // one "thin to thick function" instructions, since those are the patterns // generated when using auto closures. if (PartialApplyInst *PAI = dyn_cast<PartialApplyInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); for (const auto &Arg : PAI->getArguments()) { CaptureArgs.push_back(Arg); FullArgs.push_back(Arg); } CalleeValue = PAI->getCallee(); IsThick = true; PartialApply = PAI; } else if (ThinToThickFunctionInst *TTTFI = dyn_cast<ThinToThickFunctionInst>(CalleeValue)) { assert(CalleeValue.getResultNumber() == 0); CalleeValue = TTTFI->getOperand(); IsThick = true; } FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue); if (!FRI) return nullptr; SILFunction *CalleeFunction = FRI->getReferencedFunction(); switch (CalleeFunction->getRepresentation()) { case SILFunctionTypeRepresentation::Thick: case SILFunctionTypeRepresentation::Thin: case SILFunctionTypeRepresentation::Method: case SILFunctionTypeRepresentation::WitnessMethod: break; case SILFunctionTypeRepresentation::CFunctionPointer: case SILFunctionTypeRepresentation::ObjCMethod: case SILFunctionTypeRepresentation::Block: return nullptr; } // If CalleeFunction is a declaration, see if we can load it. If we fail to // load it, bail. if (CalleeFunction->empty() && !AI.getModule().linkFunction(CalleeFunction, Mode)) return nullptr; return CalleeFunction; }
/// Simplify the following two frontend patterns: /// /// %payload_addr = init_enum_data_addr %payload_allocation /// store %payload to %payload_addr /// inject_enum_addr %payload_allocation, $EnumType.case /// /// inject_enum_add %nopayload_allocation, $EnumType.case /// /// for a concrete enum type $EnumType.case to: /// /// %1 = enum $EnumType, $EnumType.case, %payload /// store %1 to %payload_addr /// /// %1 = enum $EnumType, $EnumType.case /// store %1 to %nopayload_addr /// /// We leave the cleaning up to mem2reg. SILInstruction * SILCombiner::visitInjectEnumAddrInst(InjectEnumAddrInst *IEAI) { // Given an inject_enum_addr of a concrete type without payload, promote it to // a store of an enum. Mem2reg/load forwarding will clean things up for us. We // can't handle the payload case here due to the flow problems caused by the // dependency in between the enum and its data. assert(IEAI->getOperand().getType().isAddress() && "Must be an address"); Builder.setCurrentDebugScope(IEAI->getDebugScope()); if (IEAI->getOperand().getType().isAddressOnly(IEAI->getModule())) { // Check for the following pattern inside the current basic block: // inject_enum_addr %payload_allocation, $EnumType.case1 // ... no insns storing anything into %payload_allocation // select_enum_addr %payload_allocation, // case $EnumType.case1: %Result1, // case case $EnumType.case2: %bResult2 // ... // // Replace the select_enum_addr by %Result1 auto *Term = IEAI->getParent()->getTerminator(); if (isa<CondBranchInst>(Term) || isa<SwitchValueInst>(Term)) { auto BeforeTerm = prev(prev(IEAI->getParent()->end())); auto *SEAI = dyn_cast<SelectEnumAddrInst>(BeforeTerm); if (!SEAI) return nullptr; if (SEAI->getOperand() != IEAI->getOperand()) return nullptr; SILBasicBlock::iterator II = IEAI->getIterator(); StoreInst *SI = nullptr; for (;;) { SILInstruction *CI = &*II; if (CI == SEAI) break; ++II; SI = dyn_cast<StoreInst>(CI); if (SI) { if (SI->getDest() == IEAI->getOperand()) return nullptr; } // Allow all instructions inbetween, which don't have any dependency to // the store. if (AA->mayWriteToMemory(&*II, IEAI->getOperand())) return nullptr; } auto *InjectedEnumElement = IEAI->getElement(); auto Result = SEAI->getCaseResult(InjectedEnumElement); // Replace select_enum_addr by the result replaceInstUsesWith(*SEAI, Result.getDef()); return nullptr; } // Check for the following pattern inside the current basic block: // inject_enum_addr %payload_allocation, $EnumType.case1 // ... no insns storing anything into %payload_allocation // switch_enum_addr %payload_allocation, // case $EnumType.case1: %bbX, // case case $EnumType.case2: %bbY // ... // // Replace the switch_enum_addr by select_enum_addr, switch_value. if (auto *SEI = dyn_cast<SwitchEnumAddrInst>(Term)) { if (SEI->getOperand() != IEAI->getOperand()) return nullptr; SILBasicBlock::iterator II = IEAI->getIterator(); StoreInst *SI = nullptr; for (;;) { SILInstruction *CI = &*II; if (CI == SEI) break; ++II; SI = dyn_cast<StoreInst>(CI); if (SI) { if (SI->getDest() == IEAI->getOperand()) return nullptr; } // Allow all instructions inbetween, which don't have any dependency to // the store. if (AA->mayWriteToMemory(&*II, IEAI->getOperand())) return nullptr; } // Replace switch_enum_addr by a branch instruction. SILBuilderWithScope B(SEI); SmallVector<std::pair<EnumElementDecl *, SILValue>, 8> CaseValues; SmallVector<std::pair<SILValue, SILBasicBlock *>, 8> CaseBBs; auto IntTy = SILType::getBuiltinIntegerType(32, B.getASTContext()); for (int i = 0, e = SEI->getNumCases(); i < e; ++i) { auto Pair = SEI->getCase(i); auto *IL = B.createIntegerLiteral(SEI->getLoc(), IntTy, APInt(32, i, false)); SILValue ILValue = SILValue(IL); CaseValues.push_back(std::make_pair(Pair.first, ILValue)); CaseBBs.push_back(std::make_pair(ILValue, Pair.second)); } SILValue DefaultValue; SILBasicBlock *DefaultBB = nullptr; if (SEI->hasDefault()) { auto *IL = B.createIntegerLiteral(SEI->getLoc(), IntTy, APInt(32, SEI->getNumCases(), false)); DefaultValue = SILValue(IL); DefaultBB = SEI->getDefaultBB(); } auto *SEAI = B.createSelectEnumAddr(SEI->getLoc(), SEI->getOperand(), IntTy, DefaultValue, CaseValues); B.createSwitchValue(SEI->getLoc(), SILValue(SEAI), DefaultBB, CaseBBs); return eraseInstFromFunction(*SEI); } return nullptr; } // If the enum does not have a payload create the enum/store since we don't // need to worry about payloads. if (!IEAI->getElement()->hasArgumentType()) { EnumInst *E = Builder.createEnum(IEAI->getLoc(), SILValue(), IEAI->getElement(), IEAI->getOperand().getType().getObjectType()); Builder.createStore(IEAI->getLoc(), E, IEAI->getOperand()); return eraseInstFromFunction(*IEAI); } // Ok, we have a payload enum, make sure that we have a store previous to // us... SILBasicBlock::iterator II = IEAI->getIterator(); StoreInst *SI = nullptr; InitEnumDataAddrInst *DataAddrInst = nullptr; for (;;) { if (II == IEAI->getParent()->begin()) return nullptr; --II; SI = dyn_cast<StoreInst>(&*II); if (SI) { // Find a Store whose destination is taken from an init_enum_data_addr // whose address is same allocation as our inject_enum_addr. DataAddrInst = dyn_cast<InitEnumDataAddrInst>(SI->getDest().getDef()); if (DataAddrInst && DataAddrInst->getOperand() == IEAI->getOperand()) break; } // Allow all instructions inbetween, which don't have any dependency to // the store. if (AA->mayWriteToMemory(&*II, IEAI->getOperand())) return nullptr; } // Found the store to this enum payload. Check if the store is the only use. if (!DataAddrInst->hasOneUse()) return nullptr; // In that case, create the payload enum/store. EnumInst *E = Builder.createEnum(DataAddrInst->getLoc(), SI->getSrc(), DataAddrInst->getElement(), DataAddrInst->getOperand().getType().getObjectType()); Builder.createStore(DataAddrInst->getLoc(), E, DataAddrInst->getOperand()); // Cleanup. eraseInstFromFunction(*SI); eraseInstFromFunction(*DataAddrInst); return eraseInstFromFunction(*IEAI); }
/// Simplify the following two frontend patterns: /// /// %payload_addr = init_enum_data_addr %payload_allocation /// store %payload to %payload_addr /// inject_enum_addr %payload_allocation, $EnumType.case /// /// inject_enum_add %nopayload_allocation, $EnumType.case /// /// for a concrete enum type $EnumType.case to: /// /// %1 = enum $EnumType, $EnumType.case, %payload /// store %1 to %payload_addr /// /// %1 = enum $EnumType, $EnumType.case /// store %1 to %nopayload_addr /// /// We leave the cleaning up to mem2reg. SILInstruction * SILCombiner::visitInjectEnumAddrInst(InjectEnumAddrInst *IEAI) { // Given an inject_enum_addr of a concrete type without payload, promote it to // a store of an enum. Mem2reg/load forwarding will clean things up for us. We // can't handle the payload case here due to the flow problems caused by the // dependency in between the enum and its data. assert(IEAI->getOperand()->getType().isAddress() && "Must be an address"); Builder.setCurrentDebugScope(IEAI->getDebugScope()); if (IEAI->getOperand()->getType().isAddressOnly(IEAI->getModule())) { // Check for the following pattern inside the current basic block: // inject_enum_addr %payload_allocation, $EnumType.case1 // ... no insns storing anything into %payload_allocation // select_enum_addr %payload_allocation, // case $EnumType.case1: %Result1, // case case $EnumType.case2: %bResult2 // ... // // Replace the select_enum_addr by %Result1 auto *Term = IEAI->getParent()->getTerminator(); if (isa<CondBranchInst>(Term) || isa<SwitchValueInst>(Term)) { auto BeforeTerm = std::prev(std::prev(IEAI->getParent()->end())); auto *SEAI = dyn_cast<SelectEnumAddrInst>(BeforeTerm); if (!SEAI) return nullptr; if (SEAI->getOperand() != IEAI->getOperand()) return nullptr; SILBasicBlock::iterator II = IEAI->getIterator(); StoreInst *SI = nullptr; for (;;) { SILInstruction *CI = &*II; if (CI == SEAI) break; ++II; SI = dyn_cast<StoreInst>(CI); if (SI) { if (SI->getDest() == IEAI->getOperand()) return nullptr; } // Allow all instructions in between, which don't have any dependency to // the store. if (AA->mayWriteToMemory(&*II, IEAI->getOperand())) return nullptr; } auto *InjectedEnumElement = IEAI->getElement(); auto Result = SEAI->getCaseResult(InjectedEnumElement); // Replace select_enum_addr by the result replaceInstUsesWith(*SEAI, Result); return nullptr; } // Check for the following pattern inside the current basic block: // inject_enum_addr %payload_allocation, $EnumType.case1 // ... no insns storing anything into %payload_allocation // switch_enum_addr %payload_allocation, // case $EnumType.case1: %bbX, // case case $EnumType.case2: %bbY // ... // // Replace the switch_enum_addr by select_enum_addr, switch_value. if (auto *SEI = dyn_cast<SwitchEnumAddrInst>(Term)) { if (SEI->getOperand() != IEAI->getOperand()) return nullptr; SILBasicBlock::iterator II = IEAI->getIterator(); StoreInst *SI = nullptr; for (;;) { SILInstruction *CI = &*II; if (CI == SEI) break; ++II; SI = dyn_cast<StoreInst>(CI); if (SI) { if (SI->getDest() == IEAI->getOperand()) return nullptr; } // Allow all instructions in between, which don't have any dependency to // the store. if (AA->mayWriteToMemory(&*II, IEAI->getOperand())) return nullptr; } // Replace switch_enum_addr by a branch instruction. SILBuilderWithScope B(SEI); SmallVector<std::pair<EnumElementDecl *, SILValue>, 8> CaseValues; SmallVector<std::pair<SILValue, SILBasicBlock *>, 8> CaseBBs; auto IntTy = SILType::getBuiltinIntegerType(32, B.getASTContext()); for (int i = 0, e = SEI->getNumCases(); i < e; ++i) { auto Pair = SEI->getCase(i); auto *IL = B.createIntegerLiteral(SEI->getLoc(), IntTy, APInt(32, i, false)); SILValue ILValue = SILValue(IL); CaseValues.push_back(std::make_pair(Pair.first, ILValue)); CaseBBs.push_back(std::make_pair(ILValue, Pair.second)); } SILValue DefaultValue; SILBasicBlock *DefaultBB = nullptr; if (SEI->hasDefault()) { auto *IL = B.createIntegerLiteral( SEI->getLoc(), IntTy, APInt(32, static_cast<uint64_t>(SEI->getNumCases()), false)); DefaultValue = SILValue(IL); DefaultBB = SEI->getDefaultBB(); } auto *SEAI = B.createSelectEnumAddr(SEI->getLoc(), SEI->getOperand(), IntTy, DefaultValue, CaseValues); B.createSwitchValue(SEI->getLoc(), SILValue(SEAI), DefaultBB, CaseBBs); return eraseInstFromFunction(*SEI); } return nullptr; } // If the enum does not have a payload create the enum/store since we don't // need to worry about payloads. if (!IEAI->getElement()->hasArgumentType()) { EnumInst *E = Builder.createEnum(IEAI->getLoc(), SILValue(), IEAI->getElement(), IEAI->getOperand()->getType().getObjectType()); Builder.createStore(IEAI->getLoc(), E, IEAI->getOperand(), StoreOwnershipQualifier::Unqualified); return eraseInstFromFunction(*IEAI); } // Ok, we have a payload enum, make sure that we have a store previous to // us... SILValue ASO = IEAI->getOperand(); if (!isa<AllocStackInst>(ASO)) { return nullptr; } InitEnumDataAddrInst *DataAddrInst = nullptr; InjectEnumAddrInst *EnumAddrIns = nullptr; llvm::SmallPtrSet<SILInstruction *, 32> WriteSet; for (auto UsersIt : ASO->getUses()) { SILInstruction *CurrUser = UsersIt->getUser(); if (CurrUser->isDeallocatingStack()) { // we don't care about the dealloc stack instructions continue; } if (isDebugInst(CurrUser) || isa<LoadInst>(CurrUser)) { // These Instructions are a non-risky use we can ignore continue; } if (auto *CurrInst = dyn_cast<InitEnumDataAddrInst>(CurrUser)) { if (DataAddrInst) { return nullptr; } DataAddrInst = CurrInst; continue; } if (auto *CurrInst = dyn_cast<InjectEnumAddrInst>(CurrUser)) { if (EnumAddrIns) { return nullptr; } EnumAddrIns = CurrInst; continue; } if (isa<StoreInst>(CurrUser)) { // The only MayWrite Instruction we can safely handle WriteSet.insert(CurrUser); continue; } // It is too risky to continue if it is any other instruction. return nullptr; } if (!DataAddrInst || !EnumAddrIns) { return nullptr; } assert((EnumAddrIns == IEAI) && "Found InitEnumDataAddrInst differs from IEAI"); // Found the DataAddrInst to this enum payload. Check if it has only use. if (!hasOneNonDebugUse(DataAddrInst)) return nullptr; StoreInst *SI = dyn_cast<StoreInst>(getSingleNonDebugUser(DataAddrInst)); ApplyInst *AI = dyn_cast<ApplyInst>(getSingleNonDebugUser(DataAddrInst)); if (!SI && !AI) { return nullptr; } // Make sure the enum pattern instructions are the only ones which write to // this location if (!WriteSet.empty()) { // Analyze the instructions (implicit dominator analysis) // If we find any of MayWriteSet, return nullptr SILBasicBlock *InitEnumBB = DataAddrInst->getParent(); assert(InitEnumBB && "DataAddrInst is not in a valid Basic Block"); llvm::SmallVector<SILInstruction *, 64> Worklist; Worklist.push_back(IEAI); llvm::SmallPtrSet<SILBasicBlock *, 16> Preds; Preds.insert(IEAI->getParent()); while (!Worklist.empty()) { SILInstruction *CurrIns = Worklist.pop_back_val(); SILBasicBlock *CurrBB = CurrIns->getParent(); if (CurrBB->isEntry() && CurrBB != InitEnumBB) { // reached prologue without encountering the init bb return nullptr; } for (auto InsIt = ++CurrIns->getIterator().getReverse(); InsIt != CurrBB->rend(); ++InsIt) { SILInstruction *Ins = &*InsIt; if (Ins == DataAddrInst) { // don't care about what comes before init enum in the basic block break; } if (WriteSet.count(Ins) != 0) { return nullptr; } } if (CurrBB == InitEnumBB) { continue; } // Go to predecessors and do all that again for (SILBasicBlock *Pred : CurrBB->getPredecessorBlocks()) { // If it's already in the set, then we've already queued and/or // processed the predecessors. if (Preds.insert(Pred).second) { Worklist.push_back(&*Pred->rbegin()); } } } } if (SI) { assert((SI->getDest() == DataAddrInst) && "Can't find StoreInst with DataAddrInst as its destination"); // In that case, create the payload enum/store. EnumInst *E = Builder.createEnum( DataAddrInst->getLoc(), SI->getSrc(), DataAddrInst->getElement(), DataAddrInst->getOperand()->getType().getObjectType()); Builder.createStore(DataAddrInst->getLoc(), E, DataAddrInst->getOperand(), StoreOwnershipQualifier::Unqualified); // Cleanup. eraseInstFromFunction(*SI); eraseInstFromFunction(*DataAddrInst); return eraseInstFromFunction(*IEAI); } // Check whether we have an apply initializing the enum. // %iedai = init_enum_data_addr %enum_addr // = apply(%iedai,...) // inject_enum_addr %enum_addr // // We can localize the store to an alloc_stack. // Allowing us to perform the same optimization as for the store. // // %alloca = alloc_stack // apply(%alloca,...) // %load = load %alloca // %1 = enum $EnumType, $EnumType.case, %load // store %1 to %nopayload_addr // assert(AI && "Must have an apply"); unsigned ArgIdx = 0; Operand *EnumInitOperand = nullptr; for (auto &Opd : AI->getArgumentOperands()) { // Found an apply that initializes the enum. We can optimize this by // localizing the initialization to an alloc_stack and loading from it. DataAddrInst = dyn_cast<InitEnumDataAddrInst>(Opd.get()); if (DataAddrInst && DataAddrInst->getOperand() == IEAI->getOperand() && ArgIdx < AI->getSubstCalleeType()->getNumIndirectResults()) { EnumInitOperand = &Opd; break; } ++ArgIdx; } if (!EnumInitOperand) { return nullptr; } // Localize the address access. Builder.setInsertionPoint(AI); auto *AllocStack = Builder.createAllocStack(DataAddrInst->getLoc(), EnumInitOperand->get()->getType()); EnumInitOperand->set(AllocStack); Builder.setInsertionPoint(std::next(SILBasicBlock::iterator(AI))); SILValue Load(Builder.createLoad(DataAddrInst->getLoc(), AllocStack, LoadOwnershipQualifier::Unqualified)); EnumInst *E = Builder.createEnum( DataAddrInst->getLoc(), Load, DataAddrInst->getElement(), DataAddrInst->getOperand()->getType().getObjectType()); Builder.createStore(DataAddrInst->getLoc(), E, DataAddrInst->getOperand(), StoreOwnershipQualifier::Unqualified); Builder.createDeallocStack(DataAddrInst->getLoc(), AllocStack); eraseInstFromFunction(*DataAddrInst); return eraseInstFromFunction(*IEAI); }