/// We analyze the body of globalinit_func to see if it can be statically /// initialized. If yes, we set the initial value of the SILGlobalVariable and /// remove the "once" call to globalinit_func from the addressor. void SILGlobalOpt::optimizeInitializer(SILFunction *AddrF, GlobalInitCalls &Calls) { if (UnhandledOnceCallee) return; // Find the initializer and the SILGlobalVariable. BuiltinInst *CallToOnce; // If the addressor contains a single "once" call, it calls globalinit_func, // and the globalinit_func is called by "once" from a single location, // continue; otherwise bail. auto *InitF = findInitializer(Module, AddrF, CallToOnce); if (!InitF || !InitF->getName().startswith("globalinit_") || InitializerCount[InitF] > 1) return; // If the globalinit_func is trivial, continue; otherwise bail. auto *SILG = SILGlobalVariable::getVariableOfStaticInitializer(InitF); if (!SILG || !SILG->isDefinition()) return; DEBUG(llvm::dbgs() << "GlobalOpt: use static initializer for " << SILG->getName() << '\n'); // Remove "once" call from the addressor. if (!isAssignedOnlyOnceInInitializer(SILG) || !SILG->getDecl()) { removeToken(CallToOnce->getOperand(0)); CallToOnce->eraseFromParent(); SILG->setInitializer(InitF); HasChanged = true; return; } replaceLoadsByKnownValue(CallToOnce, AddrF, InitF, SILG, Calls); HasChanged = true; }
SILInstruction *optimizeBitOp(BuiltinInst *BI, CombineFunc combine, NeutralFunc isNeutral, ZeroFunc isZero, SILBuilder &Builder, SILCombiner *C) { SILValue firstOp; APInt bits; if (!getBitOpArgs(BI, firstOp, bits)) return nullptr; // Combine all bits of consecutive bit operations, e.g. ((op & c1) & c2) & c3 SILValue op = firstOp; BuiltinInst *Prev; APInt prevBits; while ((Prev = dyn_cast<BuiltinInst>(op)) && Prev->getBuiltinInfo().ID == BI->getBuiltinInfo().ID && getBitOpArgs(Prev, op, prevBits)) { combine(bits, prevBits); } if (isNeutral(bits)) { // The bit operation has no effect, e.g. x | 0 -> x C->replaceInstUsesWith(*BI, op); return BI; } if (isZero(bits)) // The bit operation yields to a constant, e.g. x & 0 -> 0 return Builder.createIntegerLiteral(BI->getLoc(), BI->getType(), bits); if (op != firstOp) { // We combined multiple bit operations to a single one, // e.g. (x & c1) & c2 -> x & (c1 & c2) auto *newLI = Builder.createIntegerLiteral(BI->getLoc(), BI->getType(), bits); return Builder.createBuiltin(BI->getLoc(), BI->getName(), BI->getType(), BI->getSubstitutions(), { op, newLI }); } return nullptr; }
/// Checks whether the cond_br in the preheader's predecessor ensures that the /// loop is only executed if "Start < End". static bool isLessThanCheck(SILValue Start, SILValue End, CondBranchInst *CondBr, SILBasicBlock *Preheader) { BuiltinInst *BI = dyn_cast<BuiltinInst>(CondBr->getCondition()); if (!BI) return false; BuiltinValueKind Id = BI->getBuiltinInfo().ID; if (BI->getNumOperands() != 2) return false; SILValue LeftArg = BI->getOperand(0); SILValue RightArg = BI->getOperand(1); if (RightArg == Start) { std::swap(LeftArg, RightArg); Id = swapCmpID(Id); } if (LeftArg != Start || RightArg != End) return false; if (CondBr->getTrueBB() != Preheader) { assert(CondBr->getFalseBB() == Preheader); Id = invertCmpID(Id); } switch (Id) { case BuiltinValueKind::ICMP_SLT: case BuiltinValueKind::ICMP_ULT: return true; case BuiltinValueKind::ICMP_NE: // Special case: if it is a 0-to-count loop, we know that the count cannot // be negative. In this case the 'Start < End' check can also be done with // 'count != 0'. if (getZeroToCountArray(Start, End)) return true; return false; default: return false; } }
/// If necessary insert an overflow for this induction variable. /// If we compare for equality we need to make sure that the range does wrap. /// We would have trapped either when overflowing or when accessing an array /// out of bounds in the original loop. void checkOverflow(SILBuilder &Builder) { if (IsOverflowCheckInserted || Cmp != BuiltinValueKind::ICMP_EQ) return; auto Loc = Inc->getLoc(); auto ResultTy = SILType::getBuiltinIntegerType(1, Builder.getASTContext()); auto *CmpSGE = Builder.createBuiltinBinaryFunction( Loc, "cmp_sge", Start->getType(), ResultTy, {Start, End}); Builder.createCondFail(Loc, CmpSGE); IsOverflowCheckInserted = true; // We can now remove the cond fail on the increment the above comparison // guarantees that the addition won't overflow. auto *CondFail = isOverflowChecked(cast<BuiltinInst>(Inc)); if (CondFail) CondFail->eraseFromParent(); }
SILInstruction * SILCombiner:: visitPointerToAddressInst(PointerToAddressInst *PTAI) { Builder.setCurrentDebugScope(PTAI->getDebugScope()); // If we reach this point, we know that the types must be different since // otherwise simplifyInstruction would have handled the identity case. This is // always legal to do since address-to-pointer pointer-to-address implies // layout compatibility. // // (pointer-to-address (address-to-pointer %x)) -> (unchecked_addr_cast %x) if (auto *ATPI = dyn_cast<AddressToPointerInst>(PTAI->getOperand())) { return Builder.createUncheckedAddrCast(PTAI->getLoc(), ATPI->getOperand(), PTAI->getType()); } // Turn this also into a index_addr. We generate this pattern after switching // the Word type to an explicit Int32 or Int64 in the stdlib. // // %101 = builtin "strideof_nonzero"<Int>(%84 : $@thick Int.Type) : // $Builtin.Word // %102 = builtin "zextOrBitCast_Word_Int64"(%101 : $Builtin.Word) : // $Builtin.Int64 // %111 = builtin "smul_with_overflow_Int64"(%108 : $Builtin.Int64, // %102 : $Builtin.Int64, %20 : $Builtin.Int1) : // $(Builtin.Int64, Builtin.Int1) // %112 = tuple_extract %111 : $(Builtin.Int64, Builtin.Int1), 0 // %113 = builtin "truncOrBitCast_Int64_Word"(%112 : $Builtin.Int64) : // $Builtin.Word // %114 = index_raw_pointer %100 : $Builtin.RawPointer, %113 : $Builtin.Word // %115 = pointer_to_address %114 : $Builtin.RawPointer to $*Int SILValue Distance; SILValue TruncOrBitCast; MetatypeInst *Metatype; IndexRawPointerInst *IndexRawPtr; BuiltinInst *StrideMul; if (match( PTAI->getOperand(), m_IndexRawPointerInst(IndexRawPtr))) { SILValue Ptr = IndexRawPtr->getOperand(0); SILValue TruncOrBitCast = IndexRawPtr->getOperand(1); if (match(TruncOrBitCast, m_ApplyInst(BuiltinValueKind::TruncOrBitCast, m_TupleExtractInst(m_BuiltinInst(StrideMul), 0)))) { if (match(StrideMul, m_ApplyInst( BuiltinValueKind::SMulOver, m_SILValue(Distance), m_ApplyInst(BuiltinValueKind::ZExtOrBitCast, m_ApplyInst(BuiltinValueKind::StrideofNonZero, m_MetatypeInst(Metatype))))) || match(StrideMul, m_ApplyInst( BuiltinValueKind::SMulOver, m_ApplyInst(BuiltinValueKind::ZExtOrBitCast, m_ApplyInst(BuiltinValueKind::StrideofNonZero, m_MetatypeInst(Metatype))), m_SILValue(Distance)))) { SILType InstanceType = Metatype->getType().getMetatypeInstanceType(PTAI->getModule()); auto *Trunc = cast<BuiltinInst>(TruncOrBitCast); // Make sure that the type of the metatype matches the type that we are // casting to so we stride by the correct amount. if (InstanceType.getAddressType() != PTAI->getType()) { return nullptr; } auto *NewPTAI = Builder.createPointerToAddress(PTAI->getLoc(), Ptr, PTAI->getType()); auto DistanceAsWord = Builder.createBuiltin( PTAI->getLoc(), Trunc->getName(), Trunc->getType(), {}, Distance); return Builder.createIndexAddr(PTAI->getLoc(), NewPTAI, DistanceAsWord); } } } // Turn: // // %stride = Builtin.strideof(T) * %distance // %ptr' = index_raw_pointer %ptr, %stride // %result = pointer_to_address %ptr, $T' // // To: // // %addr = pointer_to_address %ptr, $T // %result = index_addr %addr, %distance // BuiltinInst *Bytes; if (match(PTAI->getOperand(), m_IndexRawPointerInst(m_ValueBase(), m_TupleExtractInst(m_BuiltinInst(Bytes), 0)))) { if (match(Bytes, m_ApplyInst(BuiltinValueKind::SMulOver, m_ValueBase(), m_ApplyInst(BuiltinValueKind::Strideof, m_MetatypeInst(Metatype)), m_ValueBase())) || match(Bytes, m_ApplyInst(BuiltinValueKind::SMulOver, m_ValueBase(), m_ApplyInst(BuiltinValueKind::StrideofNonZero, m_MetatypeInst(Metatype)), m_ValueBase()))) { SILType InstanceType = Metatype->getType().getMetatypeInstanceType(PTAI->getModule()); // Make sure that the type of the metatype matches the type that we are // casting to so we stride by the correct amount. if (InstanceType.getAddressType() != PTAI->getType()) return nullptr; auto IRPI = cast<IndexRawPointerInst>(PTAI->getOperand().getDef()); SILValue Ptr = IRPI->getOperand(0); SILValue Distance = Bytes->getArguments()[0]; auto *NewPTAI = Builder.createPointerToAddress(PTAI->getLoc(), Ptr, PTAI->getType()); return Builder.createIndexAddr(PTAI->getLoc(), NewPTAI, Distance); } } return nullptr; }
/// Remove retain/release pairs around builtin "unsafeGuaranteed" instruction /// sequences. static bool removeGuaranteedRetainReleasePairs(SILFunction &F) { bool Changed = false; for (auto &BB : F) { auto It = BB.begin(), End = BB.end(); llvm::DenseMap<SILValue, SILInstruction *> LastRetain; while (It != End) { auto *CurInst = &*It; ++It; // Memorize the last retain. if (isa<StrongRetainInst>(CurInst) || isa<RetainValueInst>(CurInst)) { LastRetain[CurInst->getOperand(0)] = CurInst; continue; } // Look for a builtin "unsafeGuaranteed" instruction. auto *UnsafeGuaranteedI = dyn_cast<BuiltinInst>(CurInst); if (!UnsafeGuaranteedI || !UnsafeGuaranteedI->getBuiltinKind() || *UnsafeGuaranteedI->getBuiltinKind() != BuiltinValueKind::UnsafeGuaranteed) continue; auto Opd = UnsafeGuaranteedI->getOperand(0); if (!LastRetain.count(Opd)) { DEBUG(llvm::dbgs() << "LastRetain failed\n"); continue; } // This code is very conservative. Check that there is a matching retain // before the unsafeGuaranteed builtin with only retains inbetween. auto *LastRetainInst = LastRetain[Opd]; auto NextInstIter = std::next(SILBasicBlock::iterator(LastRetainInst)); while (NextInstIter != BB.end() && &*NextInstIter != CurInst && (isa<RetainValueInst>(*NextInstIter) || isa<StrongRetainInst>(*NextInstIter))) ++NextInstIter; if (&*NextInstIter != CurInst) { DEBUG(llvm::dbgs() << "Last retain right before match failed\n"); continue; } DEBUG(llvm::dbgs() << "Saw " << *UnsafeGuaranteedI); DEBUG(llvm::dbgs() << " with operand " << *Opd); // Match the reference and token result. // %4 = builtin "unsafeGuaranteed"<Foo>(%0 : $Foo) // %5 = tuple_extract %4 : $(Foo, Builtin.Int8), 0 // %6 = tuple_extract %4 : $(Foo, Builtin.Int8), 1 SILInstruction *UnsafeGuaranteedValue; SILInstruction *UnsafeGuaranteedToken; std::tie(UnsafeGuaranteedValue, UnsafeGuaranteedToken) = getSingleUnsafeGuaranteedValueResult(UnsafeGuaranteedI); if (!UnsafeGuaranteedValue) { DEBUG(llvm::dbgs() << " no single unsafeGuaranteed value use\n"); continue; } // Look for a builtin "unsafeGuaranteedEnd" instruction that uses the // token. // builtin "unsafeGuaranteedEnd"(%6 : $Builtin.Int8) : $() BuiltinInst *UnsafeGuaranteedEndI = nullptr; for (auto *Operand : getNonDebugUses(UnsafeGuaranteedToken)) { if (UnsafeGuaranteedEndI) { DEBUG(llvm::dbgs() << " multiple unsafeGuaranteedEnd users\n"); UnsafeGuaranteedEndI = nullptr; break; } auto *BI = dyn_cast<BuiltinInst>(Operand->getUser()); if (!BI || !BI->getBuiltinKind() || *BI->getBuiltinKind() != BuiltinValueKind::UnsafeGuaranteedEnd) { DEBUG(llvm::dbgs() << " wrong unsafeGuaranteed token user " << *Operand->getUser()); break; } UnsafeGuaranteedEndI = BI; } if (!UnsafeGuaranteedEndI) { DEBUG(llvm::dbgs() << " no single unsafeGuaranteedEnd use found\n"); continue; } if (SILBasicBlock::iterator(UnsafeGuaranteedEndI) == UnsafeGuaranteedEndI->getParent()->end()) continue; // Find the release to match with the unsafeGuaranteedValue. auto &UnsafeGuaranteedEndBB = *UnsafeGuaranteedEndI->getParent(); auto LastReleaseIt = findReleaseToMatchUnsafeGuaranteedValue( UnsafeGuaranteedEndI, UnsafeGuaranteedValue, UnsafeGuaranteedEndBB); if (LastReleaseIt == UnsafeGuaranteedEndBB.end()) { DEBUG(llvm::dbgs() << " no release before unsafeGuaranteedEnd found\n"); continue; } SILInstruction *LastRelease = &*LastReleaseIt; // Restart iteration before the earliest instruction we remove. bool RestartAtBeginningOfBlock = false; auto LastRetainIt = SILBasicBlock::iterator(LastRetainInst); if (LastRetainIt != BB.begin()) { It = std::prev(LastRetainIt); } else RestartAtBeginningOfBlock = true; // Okay we found a post dominating release. Let's remove the // retain/unsafeGuaranteed/release combo. // LastRetainInst->eraseFromParent(); LastRelease->eraseFromParent(); UnsafeGuaranteedEndI->eraseFromParent(); deleteAllDebugUses(UnsafeGuaranteedValue); deleteAllDebugUses(UnsafeGuaranteedToken); deleteAllDebugUses(UnsafeGuaranteedI); UnsafeGuaranteedValue->replaceAllUsesWith(Opd); UnsafeGuaranteedValue->eraseFromParent(); UnsafeGuaranteedToken->eraseFromParent(); UnsafeGuaranteedI->eraseFromParent(); if (RestartAtBeginningOfBlock) ++It = BB.begin(); Changed = true; } } return Changed; }