/// Given an aggregate value and an access path, extract the value indicated by /// the path. static SILValue ExtractSubElement(SILValue Val, unsigned SubElementNumber, SILBuilder &B, SILLocation Loc) { SILType ValTy = Val->getType(); // Extract tuple elements. if (auto TT = ValTy.getAs<TupleType>()) { for (unsigned EltNo : indices(TT.getElementTypes())) { // Keep track of what subelement is being referenced. SILType EltTy = ValTy.getTupleElementType(EltNo); unsigned NumSubElt = getNumSubElements(EltTy, B.getModule()); if (SubElementNumber < NumSubElt) { Val = B.emitTupleExtract(Loc, Val, EltNo, EltTy); return ExtractSubElement(Val, SubElementNumber, B, Loc); } SubElementNumber -= NumSubElt; } llvm_unreachable("Didn't find field"); } // Extract struct elements. if (auto *SD = getFullyReferenceableStruct(ValTy)) { for (auto *D : SD->getStoredProperties()) { auto fieldType = ValTy.getFieldType(D, B.getModule()); unsigned NumSubElt = getNumSubElements(fieldType, B.getModule()); if (SubElementNumber < NumSubElt) { Val = B.emitStructExtract(Loc, Val, D); return ExtractSubElement(Val, SubElementNumber, B, Loc); } SubElementNumber -= NumSubElt; } llvm_unreachable("Didn't find field"); } // Otherwise, we're down to a scalar. assert(SubElementNumber == 0 && "Miscalculation indexing subelements"); return Val; }
// Simplify: // %1 = unchecked_enum_data %0 : $Optional<C>, #Optional.Some!enumelt.1 // %2 = enum $Optional<C>, #Optional.Some!enumelt.1, %1 : $C // to %0 since we are building the same enum. static SILValue simplifyEnumFromUncheckedEnumData(EnumInst *EI) { assert(EI->hasOperand() && "Expected an enum with an operand!"); auto *UEDI = dyn_cast<UncheckedEnumDataInst>(EI->getOperand()); if (!UEDI || UEDI->getElement() != EI->getElement()) return SILValue(); SILValue EnumOp = UEDI->getOperand(); // Same enum elements don't necessarily imply same enum types. // Enum types may be different if the enum is generic, e.g. // E<Int>.Case and E<Double>.Case. SILType OriginalEnum = EnumOp->getType(); SILType NewEnum = EI->getType(); if (OriginalEnum != NewEnum) return SILValue(); return EnumOp; }
/// Check if we can pass/convert all arguments of the original apply /// as required by the found devirtualized method. static bool canPassOrConvertAllArguments(ApplySite AI, CanSILFunctionType SubstCalleeCanType) { for (unsigned ArgN = 0, ArgE = AI.getNumArguments(); ArgN != ArgE; ++ArgN) { SILValue A = AI.getArgument(ArgN); auto ParamType = SubstCalleeCanType->getSILArgumentType( SubstCalleeCanType->getNumSILArguments() - AI.getNumArguments() + ArgN); // Check if we can cast the provided argument into the required // parameter type. auto FromTy = A->getType(); auto ToTy = ParamType; // If types are the same, no conversion will be required. if (FromTy == ToTy) continue; // Otherwise, it should be possible to upcast the arguments. if (!isLegalUpcast(FromTy, ToTy)) return false; } return true; }
SILValue swift::stripUpCasts(SILValue v) { assert(v->getType().isClassOrClassMetatype() && "Expected class or class metatype!"); v = stripSinglePredecessorArgs(v); while (true) { if (auto *ui = dyn_cast<UpcastInst>(v)) { v = ui->getOperand(); continue; } SILValue v2 = stripSinglePredecessorArgs(v); v2 = stripOwnershipInsts(v2); if (v2 == v) { return v2; } v = v2; } }
NullablePtr<SILInstruction> Projection:: createAddrProjection(SILBuilder &B, SILLocation Loc, SILValue Base) const { // Grab Base's type. SILType BaseTy = Base.getType(); // If BaseTy is not an address type, bail. if (!BaseTy.isAddress()) return nullptr; // If this projection is associated with an object type, convert its type to // an address type. // // *NOTE* We purposely do not handle local storage types here since we want to // always fail in such a case. That is handled by checking that Ty is an // address. SILType Ty = Type.isObject()? Type.getAddressType() : Type; if (!Ty.isAddress()) return nullptr; // Ok, we now know that the type of Base and the type represented by the base // of this projection match and that this projection can be represented as // value. Create the instruction if we can. Otherwise, return nullptr. switch (getKind()) { case ProjectionKind::Struct: return B.createStructElementAddr(Loc, Base, cast<VarDecl>(getDecl())); case ProjectionKind::Tuple: return B.createTupleElementAddr(Loc, Base, getIndex()); case ProjectionKind::Index: { auto Ty = SILType::getBuiltinIntegerType(32, B.getASTContext()); auto *IntLiteral = B.createIntegerLiteral(Loc, Ty, getIndex()); return B.createIndexAddr(Loc, Base, IntLiteral); } case ProjectionKind::Enum: return B.createUncheckedTakeEnumDataAddr(Loc, Base, cast<EnumElementDecl>(getDecl())); case ProjectionKind::Class: return B.createRefElementAddr(Loc, Base, cast<VarDecl>(getDecl())); } }
static void assignRecursive(SILGenFunction &gen, SILLocation loc, CanType type, ArrayRef<ManagedValue> &srcValues, SILValue destAddr) { // Recurse into tuples. if (auto srcTupleType = dyn_cast<TupleType>(type)) { assert(destAddr->getType().castTo<TupleType>()->getNumElements() == srcTupleType->getNumElements()); for (auto eltIndex : indices(srcTupleType.getElementTypes())) { auto eltDestAddr = gen.B.createTupleElementAddr(loc, destAddr, eltIndex); assignRecursive(gen, loc, srcTupleType.getElementType(eltIndex), srcValues, eltDestAddr); } return; } // Otherwise, pull the front value off the list. auto srcValue = srcValues.front(); srcValues = srcValues.slice(1); srcValue.assignInto(gen, loc, destAddr); }
static ManagedValue emitBridgeNSStringToString(SILGenFunction &gen, SILLocation loc, ManagedValue nsstr) { SILValue bridgeFn = gen.emitGlobalFunctionRef(loc, gen.SGM.getNSStringToStringFn()); Type inputType = nsstr.getType().getSwiftRValueType(); if (!inputType->getOptionalObjectType()) { SILType loweredOptTy = gen.SGM.getLoweredType(OptionalType::get(inputType)); auto *someDecl = gen.getASTContext().getOptionalSomeDecl(); auto *enumInst = gen.B.createEnum(loc, nsstr.getValue(), someDecl, loweredOptTy); nsstr = ManagedValue(enumInst, nsstr.getCleanup()); } SILType nativeTy = gen.getLoweredType(gen.SGM.Types.getStringType()); SILValue str = gen.B.createApply(loc, bridgeFn, bridgeFn.getType(), nativeTy, {}, { nsstr.forward(gen) }); return gen.emitManagedRValueWithCleanup(str); }
static bool expandReleaseValue(ReleaseValueInst *DV) { SILModule &Module = DV->getModule(); SILBuilderWithScope Builder(DV); // Strength reduce destroy_addr inst into release/store if // we have a non-address only type. SILValue Value = DV->getOperand(); // If we have an address only type, do nothing. SILType Type = Value->getType(); assert(Type.isLoadable(Module) && "release_value should never be called on a non-loadable type."); auto &TL = Module.getTypeLowering(Type); TL.emitLoweredDestroyValue(Builder, DV->getLoc(), Value, TypeLowering::LoweringStyle::Deep); DEBUG(llvm::dbgs() << " Expanding Destroy Value: " << *DV); ++NumExpand; return true; }
/// Use the summary analysis to check whether a call to the given /// function would conflict with any in progress accesses. The starting /// index indicates what index into the the callee's parameters the /// arguments array starts at -- this is useful for partial_apply functions, /// which pass only a suffix of the callee's arguments at the apply site. static void checkForViolationWithCall( const StorageMap &Accesses, SILFunction *Callee, unsigned StartingAtIndex, OperandValueArrayRef Arguments, AccessSummaryAnalysis *ASA, llvm::SmallVectorImpl<ConflictingAccess> &ConflictingAccesses) { const AccessSummaryAnalysis::FunctionSummary &FS = ASA->getOrCreateSummary(Callee); // For each argument in the suffix of the callee arguments being passed // at this call site, determine whether the arguments will be accessed // in a way that conflicts with any currently in progress accesses. // If so, diagnose. for (unsigned ArgumentIndex : indices(Arguments)) { unsigned CalleeIndex = StartingAtIndex + ArgumentIndex; const AccessSummaryAnalysis::ArgumentSummary &AS = FS.getAccessForArgument(CalleeIndex); const auto &SubAccesses = AS.getSubAccesses(); // Is the capture accessed in the callee? if (SubAccesses.size() == 0) continue; SILValue Argument = Arguments[ArgumentIndex]; assert(Argument->getType().isAddress()); const AccessedStorage &Storage = findAccessedStorage(Argument); auto AccessIt = Accesses.find(Storage); // Are there any accesses in progress at the time of the call? if (AccessIt == Accesses.end()) continue; const AccessInfo &Info = AccessIt->getSecond(); if (auto Conflict = findConflictingArgumentAccess(AS, Storage, Info)) { ConflictingAccesses.push_back(*Conflict); } } }
void SILGenFunction::emitInjectOptionalValueInto(SILLocation loc, ArgumentSource &&value, SILValue dest, const TypeLowering &optTL) { SILType optType = optTL.getLoweredType(); assert(dest->getType() == optType.getAddressType()); auto loweredPayloadTy = optType.getAnyOptionalObjectType(); assert(loweredPayloadTy); // Project out the payload area. auto someDecl = getASTContext().getOptionalSomeDecl(); auto destPayload = B.createInitEnumDataAddr(loc, dest, someDecl, loweredPayloadTy.getAddressType()); // Emit the value into the payload area. TemporaryInitialization emitInto(destPayload, CleanupHandle::invalid()); std::move(value).forwardInto(*this, &emitInto); // Inject the tag. B.createInjectEnumAddr(loc, dest, someDecl); }
SILInstruction *SILCombiner::visitSwitchValueInst(SwitchValueInst *SVI) { SILValue Cond = SVI->getOperand(); BuiltinIntegerType *CondTy = Cond->getType().getAs<BuiltinIntegerType>(); if (!CondTy || !CondTy->isFixedWidth(1)) return nullptr; SILBasicBlock *FalseBB = nullptr; SILBasicBlock *TrueBB = nullptr; for (unsigned Idx = 0, Num = SVI->getNumCases(); Idx < Num; ++Idx) { auto Case = SVI->getCase(Idx); auto *CaseVal = dyn_cast<IntegerLiteralInst>(Case.first); if (!CaseVal) return nullptr; SILBasicBlock *DestBB = Case.second; assert(DestBB->args_empty() && "switch_value case destination cannot take arguments"); if (CaseVal->getValue() == 0) { assert(!FalseBB && "double case value 0 in switch_value"); FalseBB = DestBB; } else { assert(!TrueBB && "double case value 1 in switch_value"); TrueBB = DestBB; } } if (SVI->hasDefault()) { assert(SVI->getDefaultBB()->args_empty() && "switch_value default destination cannot take arguments"); if (!FalseBB) { FalseBB = SVI->getDefaultBB(); } else if (!TrueBB) { TrueBB = SVI->getDefaultBB(); } } if (!FalseBB || !TrueBB) return nullptr; Builder.setCurrentDebugScope(SVI->getDebugScope()); return Builder.createCondBranch(SVI->getLoc(), Cond, TrueBB, FalseBB); }
/// For each argument in the range of the callee arguments being applied at the /// given apply site, use the summary analysis to determine whether the /// arguments will be accessed in a way that conflicts with any currently in /// progress accesses. If so, diagnose. static void checkCaptureAccess(ApplySite Apply, AccessState &State) { SILFunction *Callee = Apply.getCalleeFunction(); if (!Callee || Callee->empty()) return; const AccessSummaryAnalysis::FunctionSummary &FS = State.ASA->getOrCreateSummary(Callee); for (unsigned ArgumentIndex : range(Apply.getNumArguments())) { unsigned CalleeIndex = Apply.getCalleeArgIndexOfFirstAppliedArg() + ArgumentIndex; const AccessSummaryAnalysis::ArgumentSummary &AS = FS.getAccessForArgument(CalleeIndex); const auto &SubAccesses = AS.getSubAccesses(); // Is the capture accessed in the callee? if (SubAccesses.empty()) continue; SILValue Argument = Apply.getArgument(ArgumentIndex); assert(Argument->getType().isAddress()); // A valid AccessedStorage should alway sbe found because Unsafe accesses // are not tracked by AccessSummaryAnalysis. const AccessedStorage &Storage = findValidAccessedStorage(Argument); auto AccessIt = State.Accesses->find(Storage); // Are there any accesses in progress at the time of the call? if (AccessIt == State.Accesses->end()) continue; const AccessInfo &Info = AccessIt->getSecond(); if (auto Conflict = findConflictingArgumentAccess(AS, Storage, Info)) State.ConflictingAccesses.push_back(*Conflict); } }
static bool expandRetainValue(RetainValueInst *CV) { SILModule &Module = CV->getModule(); SILBuilderWithScope Builder(CV); // Strength reduce destroy_addr inst into release/store if // we have a non-address only type. SILValue Value = CV->getOperand(); // If we have an address only type, do nothing. SILType Type = Value->getType(); assert(Type.isLoadable(Module) && "Copy Value can only be called on loadable " "types."); auto &TL = Module.getTypeLowering(Type); TL.emitLoweredCopyValue(Builder, CV->getLoc(), Value, TypeLowering::LoweringStyle::DeepNoEnum); DEBUG(llvm::dbgs() << " Expanding Copy Value: " << *CV); ++NumExpand; return true; }
void CallSiteDescriptor::extendArgumentLifetime( SILValue Arg, SILArgumentConvention ArgConvention) const { assert(!CInfo->LifetimeFrontier.empty() && "Need a post-dominating release(s)"); auto ArgTy = Arg->getType(); // Extend the lifetime of a captured argument to cover the callee. SILBuilderWithScope Builder(getClosure()); // Indirect non-inout arguments are not supported yet. assert(!isNonInoutIndirectSILArgument(Arg, ArgConvention)); if (ArgTy.isObject()) { Builder.createRetainValue(getClosure()->getLoc(), Arg, Builder.getDefaultAtomicity()); for (auto *I : CInfo->LifetimeFrontier) { Builder.setInsertionPoint(I); Builder.createReleaseValue(getClosure()->getLoc(), Arg, Builder.getDefaultAtomicity()); } } }
/// Helper function for simplifying conversions between /// thick and objc metatypes. static SILInstruction * visitMetatypeConversionInst(SILBuilder &Builder, ConversionInst *MCI, MetatypeRepresentation Representation) { SILValue Op = MCI->getOperand(0); // Instruction has a proper target type already. SILType Ty = MCI->getType(); auto MetatypeTy = Op.getType().getAs<AnyMetatypeType>(); if (MetatypeTy->getRepresentation() != Representation) return nullptr; if (isa<MetatypeInst>(Op)) return Builder.createMetatype(MCI->getLoc(), Ty); if (auto *VMI = dyn_cast<ValueMetatypeInst>(Op)) return Builder.createValueMetatype(MCI->getLoc(), Ty, VMI->getOperand()); if (auto *EMI = dyn_cast<ExistentialMetatypeInst>(Op)) return Builder.createExistentialMetatype(MCI->getLoc(), Ty, EMI->getOperand()); return nullptr; }
static void splitDestructure(SILBuilder &B, SILInstruction *I, SILValue Op) { assert((isa<DestructureStructInst>(I) || isa<DestructureTupleInst>(I)) && "Only destructure operations can be passed to splitDestructure"); SILModule &M = I->getModule(); SILLocation Loc = I->getLoc(); SILType OpType = Op->getType(); llvm::SmallVector<Projection, 8> Projections; Projection::getFirstLevelProjections(OpType, M, Projections); assert(Projections.size() == I->getNumResults()); llvm::SmallVector<SILValue, 8> NewValues; for (unsigned i : indices(Projections)) { const auto &Proj = Projections[i]; NewValues.push_back(Proj.createObjectProjection(B, Loc, Op).get()); assert(NewValues.back()->getType() == I->getResults()[i]->getType() && "Expected created projections and results to be the same types"); } I->replaceAllUsesPairwiseWith(NewValues); I->eraseFromParent(); }
NullablePtr<SILInstruction> Projection:: createValueProjection(SILBuilder &B, SILLocation Loc, SILValue Base) const { // Grab Base's type. SILType BaseTy = Base.getType(); // If BaseTy is not an object type, bail. if (!BaseTy.isObject()) return nullptr; // If this projection is associated with an address type, convert its type to // an object type. // // We explicitly do not convert Type to be an object if it is a local storage // type since we want it to fail. SILType Ty = Type.isAddress()? Type.getObjectType() : Type; if (!Ty.isObject()) return nullptr; // Ok, we now know that the type of Base and the type represented by the base // of this projection match and that this projection can be represented as // value. Create the instruction if we can. Otherwise, return nullptr. switch (getKind()) { case ProjectionKind::Struct: return B.createStructExtract(Loc, Base, cast<VarDecl>(getDecl())); case ProjectionKind::Tuple: return B.createTupleExtract(Loc, Base, getIndex()); case ProjectionKind::Index: return nullptr; case ProjectionKind::Enum: return B.createUncheckedEnumData(Loc, Base, cast<EnumElementDecl>(getDecl())); case ProjectionKind::Class: return nullptr; } }
/// Perform a foreign error check by testing whether the call result is nil. static ManagedValue emitResultIsNilErrorCheck(SILGenFunction &gen, SILLocation loc, ManagedValue origResult, ManagedValue errorSlot, bool suppressErrorCheck) { // Take local ownership of the optional result value. SILValue optionalResult = origResult.forward(gen); OptionalTypeKind optKind; SILType resultObjectType = optionalResult->getType().getAnyOptionalObjectType(gen.SGM.M, optKind); ASTContext &ctx = gen.getASTContext(); // If we're suppressing the check, just do an unchecked take. if (suppressErrorCheck) { SILValue objectResult = gen.B.createUncheckedEnumData(loc, optionalResult, ctx.getOptionalSomeDecl(optKind)); return gen.emitManagedRValueWithCleanup(objectResult); } // Switch on the optional result. SILBasicBlock *errorBB = gen.createBasicBlock(FunctionSection::Postmatter); SILBasicBlock *contBB = gen.createBasicBlock(); gen.B.createSwitchEnum(loc, optionalResult, /*default*/ nullptr, { { ctx.getOptionalSomeDecl(optKind), contBB }, { ctx.getOptionalNoneDecl(optKind), errorBB } }); // Emit the error block. gen.emitForeignErrorBlock(loc, errorBB, errorSlot); // In the continuation block, take ownership of the now non-optional // result value. gen.B.emitBlock(contBB); SILValue objectResult = contBB->createBBArg(resultObjectType); return gen.emitManagedRValueWithCleanup(objectResult); }
std::tuple<ManagedValue, SILType> SILGenFunction::emitSiblingMethodRef(SILLocation loc, SILValue selfValue, SILDeclRef methodConstant, const SubstitutionMap &subMap) { SILValue methodValue; // If the method is dynamic, access it through runtime-hookable virtual // dispatch (viz. objc_msgSend for now). if (methodConstant.hasDecl() && methodConstant.getDecl()->isDynamic()) methodValue = emitDynamicMethodRef(loc, methodConstant, SGM.Types.getConstantInfo(methodConstant)); else methodValue = emitGlobalFunctionRef(loc, methodConstant); SILType methodTy = methodValue->getType(); // Specialize the generic method. methodTy = methodTy.substGenericArgs(SGM.M, subMap); return std::make_tuple(ManagedValue::forUnmanaged(methodValue), methodTy); }
void SILGenFunction::emitClassConstructorAllocator(ConstructorDecl *ctor) { assert(!ctor->isFactoryInit() && "factories should not be emitted here"); // Emit the prolog. Since we're just going to forward our args directly // to the initializer, don't allocate local variables for them. RegularLocation Loc(ctor); Loc.markAutoGenerated(); // Forward the constructor arguments. // FIXME: Handle 'self' along with the other body patterns. SmallVector<SILValue, 8> args; bindParametersForForwarding(ctor->getParameterList(1), args); SILValue selfMetaValue = emitConstructorMetatypeArg(*this, ctor); // Allocate the "self" value. VarDecl *selfDecl = ctor->getImplicitSelfDecl(); SILType selfTy = getLoweredType(selfDecl->getType()); assert(selfTy.hasReferenceSemantics() && "can't emit a value type ctor here"); // Use alloc_ref to allocate the object. // TODO: allow custom allocation? // FIXME: should have a cleanup in case of exception auto selfClassDecl = ctor->getDeclContext()->getAsClassOrClassExtensionContext(); SILValue selfValue; // Allocate the 'self' value. bool useObjCAllocation = usesObjCAllocator(selfClassDecl); if (ctor->isConvenienceInit() || ctor->hasClangNode()) { // For a convenience initializer or an initializer synthesized // for an Objective-C class, allocate using the metatype. SILValue allocArg = selfMetaValue; // When using Objective-C allocation, convert the metatype // argument to an Objective-C metatype. if (useObjCAllocation) { auto metaTy = allocArg->getType().castTo<MetatypeType>(); metaTy = CanMetatypeType::get(metaTy.getInstanceType(), MetatypeRepresentation::ObjC); allocArg = B.createThickToObjCMetatype(Loc, allocArg, getLoweredType(metaTy)); } selfValue = B.createAllocRefDynamic(Loc, allocArg, selfTy, useObjCAllocation, {}, {}); } else { // For a designated initializer, we know that the static type being // allocated is the type of the class that defines the designated // initializer. selfValue = B.createAllocRef(Loc, selfTy, useObjCAllocation, false, {}, {}); } args.push_back(selfValue); // Call the initializer. Always use the Swift entry point, which will be a // bridging thunk if we're calling ObjC. SILDeclRef initConstant = SILDeclRef(ctor, SILDeclRef::Kind::Initializer, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isObjC=*/false); ManagedValue initVal; SILType initTy; ArrayRef<Substitution> subs; // Call the initializer. ArrayRef<Substitution> forwardingSubs; if (auto *genericEnv = ctor->getGenericEnvironmentOfContext()) { auto *genericSig = ctor->getGenericSignatureOfContext(); forwardingSubs = genericEnv->getForwardingSubstitutions( SGM.SwiftModule, genericSig); } std::tie(initVal, initTy, subs) = emitSiblingMethodRef(Loc, selfValue, initConstant, forwardingSubs); SILValue initedSelfValue = emitApplyWithRethrow(Loc, initVal.forward(*this), initTy, subs, args); // Return the initialized 'self'. B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc), initedSelfValue); }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, ArrayRef<ProtocolConformanceRef> conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F, bool allowEmbeddedNSError) { // Mark the needed conformances as used. for (auto conformance : conformances) SGM.useConformance(conformance); // If we're erasing to the 'Error' type, we might be able to get an NSError // representation more efficiently. auto &ctx = getASTContext(); if (ctx.LangOpts.EnableObjCInterop && conformances.size() == 1 && conformances[0].getRequirement() == ctx.getErrorDecl() && ctx.getNSErrorDecl()) { auto nsErrorDecl = ctx.getNSErrorDecl(); // If the concrete type is NSError or a subclass thereof, just erase it // directly. auto nsErrorType = nsErrorDecl->getDeclaredType()->getCanonicalType(); if (nsErrorType->isExactSuperclassOf(concreteFormalType, nullptr)) { ManagedValue nsError = F(SGFContext()); if (nsErrorType != concreteFormalType) { nsError = ManagedValue(B.createUpcast(loc, nsError.getValue(), getLoweredType(nsErrorType)), nsError.getCleanup()); } return emitBridgedToNativeError(loc, nsError); } // If the concrete type is known to conform to _BridgedStoredNSError, // call the _nsError witness getter to extract the NSError directly, // then just erase the NSError. if (auto storedNSErrorConformance = SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType)) { auto nsErrorVar = SGM.getNSErrorRequirement(loc); if (!nsErrorVar) return emitUndef(loc, existentialTL.getLoweredType()); SubstitutionList nsErrorVarSubstitutions; // Devirtualize. Maybe this should be done implicitly by // emitPropertyLValue? if (storedNSErrorConformance->isConcrete()) { if (auto witnessVar = storedNSErrorConformance->getConcrete() ->getWitness(nsErrorVar, nullptr)) { nsErrorVar = cast<VarDecl>(witnessVar.getDecl()); nsErrorVarSubstitutions = witnessVar.getSubstitutions(); } } auto nativeError = F(SGFContext()); WritebackScope writebackScope(*this); auto nsError = emitRValueForPropertyLoad(loc, nativeError, concreteFormalType, /*super*/ false, nsErrorVar, nsErrorVarSubstitutions, AccessSemantics::Ordinary, nsErrorType, SGFContext()) .getAsSingleValue(*this, loc); return emitBridgedToNativeError(loc, nsError); } // Otherwise, if it's an archetype, try calling the _getEmbeddedNSError() // witness to try to dig out the embedded NSError. But don't do this // when we're being called recursively. if (isa<ArchetypeType>(concreteFormalType) && allowEmbeddedNSError) { auto contBB = createBasicBlock(); auto isNotPresentBB = createBasicBlock(); auto isPresentBB = createBasicBlock(); // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an // NSError from the value. auto getEmbeddedNSErrorFn = SGM.getGetErrorEmbeddedNSError(loc); if (!getEmbeddedNSErrorFn) return emitUndef(loc, existentialTL.getLoweredType()); Substitution getEmbeddedNSErrorSubstitutions[1] = { Substitution(concreteFormalType, conformances) }; ManagedValue concreteValue = F(SGFContext()); ManagedValue potentialNSError = emitApplyOfLibraryIntrinsic(loc, getEmbeddedNSErrorFn, getEmbeddedNSErrorSubstitutions, { concreteValue.copy(*this, loc) }, SGFContext()) .getAsSingleValue(*this, loc); // We're going to consume 'concreteValue' in exactly one branch, // so kill its cleanup now and recreate it on both branches. (void) concreteValue.forward(*this); // Check whether we got an NSError back. std::pair<EnumElementDecl*, SILBasicBlock*> cases[] = { { ctx.getOptionalSomeDecl(), isPresentBB }, { ctx.getOptionalNoneDecl(), isNotPresentBB } }; B.createSwitchEnum(loc, potentialNSError.forward(*this), /*default*/ nullptr, cases); // If we did get an NSError, emit the existential erasure from that // NSError. B.emitBlock(isPresentBB); SILValue branchArg; { // Don't allow cleanups to escape the conditional block. FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); enterDestroyCleanup(concreteValue.getValue()); // Receive the error value. It's typed as an 'AnyObject' for // layering reasons, so perform an unchecked cast down to NSError. SILType anyObjectTy = potentialNSError.getType().getAnyOptionalObjectType(); SILValue nsError = isPresentBB->createPHIArgument( anyObjectTy, ValueOwnershipKind::Owned); nsError = B.createUncheckedRefCast(loc, nsError, getLoweredType(nsErrorType)); branchArg = emitBridgedToNativeError(loc, emitManagedRValueWithCleanup(nsError)) .forward(*this); } B.createBranch(loc, contBB, branchArg); // If we did not get an NSError, just directly emit the existential. // Since this is a recursive call, make sure we don't end up in this // path again. B.emitBlock(isNotPresentBB); { FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); concreteValue = emitManagedRValueWithCleanup(concreteValue.getValue()); branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL, existentialTL, conformances, SGFContext(), [&](SGFContext C) { return concreteValue; }, /*allowEmbeddedNSError=*/false) .forward(*this); } B.createBranch(loc, contBB, branchArg); // Continue. B.emitBlock(contBB); SILValue existentialResult = contBB->createPHIArgument( existentialTL.getLoweredType(), ValueOwnershipKind::Owned); return emitManagedRValueWithCleanup(existentialResult, existentialTL); } } switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto *existential = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, conformances); auto *valueAddr = B.createProjectExistentialBox(loc, concreteTL.getLoweredType(), existential); // Initialize the concrete value in-place. ExistentialInitialization init(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this); ManagedValue mv = F(SGFContext(&init)); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init.getAddress()); init.finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // If the concrete value is a pseudogeneric archetype, first erase it to // its upper bound. auto anyObjectProto = getASTContext() .getProtocol(KnownProtocolKind::AnyObject); auto anyObjectTy = anyObjectProto ? anyObjectProto->getDeclaredType()->getCanonicalType() : CanType(); auto eraseToAnyObject = [&, concreteFormalType, F](SGFContext C) -> ManagedValue { auto concreteValue = F(SGFContext()); auto anyObjectConformance = SGM.SwiftModule ->lookupConformance(concreteFormalType, anyObjectProto, nullptr); ProtocolConformanceRef buf[] = { *anyObjectConformance, }; auto asAnyObject = B.createInitExistentialRef(loc, SILType::getPrimitiveObjectType(anyObjectTy), concreteFormalType, concreteValue.getValue(), getASTContext().AllocateCopy(buf)); return ManagedValue(asAnyObject, concreteValue.getCleanup()); }; auto concreteTLPtr = &concreteTL; if (this->F.getLoweredFunctionType()->isPseudogeneric()) { if (anyObjectTy && concreteFormalType->is<ArchetypeType>()) { concreteFormalType = anyObjectTy; concreteTLPtr = &getTypeLowering(anyObjectTy); F = eraseToAnyObject; } } // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTLPtr->getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } llvm_unreachable("Unhandled ExistentialRepresentation in switch."); }
/// AggregateAvailableValues - Given a bunch of primitive subelement values, /// build out the right aggregate type (LoadTy) by emitting tuple and struct /// instructions as necessary. static SILValue AggregateAvailableValues(SILInstruction *Inst, SILType LoadTy, SILValue Address, ArrayRef<std::pair<SILValue, unsigned>> AvailableValues, unsigned FirstElt) { assert(LoadTy.isObject()); SILModule &M = Inst->getModule(); // Check to see if the requested value is fully available, as an aggregate. // This is a super-common case for single-element structs, but is also a // general answer for arbitrary structs and tuples as well. if (FirstElt < AvailableValues.size()) { // #Elements may be zero. SILValue FirstVal = AvailableValues[FirstElt].first; if (FirstVal.isValid() && AvailableValues[FirstElt].second == 0 && FirstVal.getType() == LoadTy) { // If the first element of this value is available, check any extra ones // before declaring success. bool AllMatch = true; for (unsigned i = 0, e = getNumSubElements(LoadTy, M); i != e; ++i) if (AvailableValues[FirstElt+i].first != FirstVal || AvailableValues[FirstElt+i].second != i) { AllMatch = false; break; } if (AllMatch) return FirstVal; } } SILBuilderWithScope B(Inst); if (TupleType *TT = LoadTy.getAs<TupleType>()) { SmallVector<SILValue, 4> ResultElts; for (unsigned EltNo : indices(TT->getElements())) { SILType EltTy = LoadTy.getTupleElementType(EltNo); unsigned NumSubElt = getNumSubElements(EltTy, M); // If we are missing any of the available values in this struct element, // compute an address to load from. SILValue EltAddr; if (anyMissing(FirstElt, NumSubElt, AvailableValues)) EltAddr = B.createTupleElementAddr(Inst->getLoc(), Address, EltNo, EltTy.getAddressType()); ResultElts.push_back(AggregateAvailableValues(Inst, EltTy, EltAddr, AvailableValues, FirstElt)); FirstElt += NumSubElt; } return B.createTuple(Inst->getLoc(), LoadTy, ResultElts); } // Extract struct elements from fully referenceable structs. if (auto *SD = getFullyReferenceableStruct(LoadTy)) { SmallVector<SILValue, 4> ResultElts; for (auto *FD : SD->getStoredProperties()) { SILType EltTy = LoadTy.getFieldType(FD, M); unsigned NumSubElt = getNumSubElements(EltTy, M); // If we are missing any of the available values in this struct element, // compute an address to load from. SILValue EltAddr; if (anyMissing(FirstElt, NumSubElt, AvailableValues)) EltAddr = B.createStructElementAddr(Inst->getLoc(), Address, FD, EltTy.getAddressType()); ResultElts.push_back(AggregateAvailableValues(Inst, EltTy, EltAddr, AvailableValues, FirstElt)); FirstElt += NumSubElt; } return B.createStruct(Inst->getLoc(), LoadTy, ResultElts); } // Otherwise, we have a simple primitive. If the value is available, use it, // otherwise emit a load of the value. auto Val = AvailableValues[FirstElt]; if (!Val.first.isValid()) return B.createLoad(Inst->getLoc(), Address); SILValue EltVal = ExtractSubElement(Val.first, Val.second, B, Inst->getLoc()); // It must be the same type as LoadTy if available. assert(EltVal.getType() == LoadTy && "Subelement types mismatch"); return EltVal; }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, ArrayRef<ProtocolConformanceRef> conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F, bool allowEmbeddedNSError) { // Mark the needed conformances as used. for (auto conformance : conformances) SGM.useConformance(conformance); // If we're erasing to the 'Error' type, we might be able to get an NSError // representation more efficiently. auto &ctx = getASTContext(); auto nsError = ctx.getNSErrorDecl(); if (allowEmbeddedNSError && nsError && existentialTL.getSemanticType().getSwiftRValueType()->getAnyNominal() == ctx.getErrorDecl()) { // Check whether the concrete type conforms to the _BridgedStoredNSError // protocol. In that case, call the _nsError witness getter to extract the // NSError directly. auto conformance = SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType); CanType nsErrorType = nsError->getDeclaredInterfaceType()->getCanonicalType(); ProtocolConformanceRef nsErrorConformances[1] = { ProtocolConformanceRef(SGM.getNSErrorConformanceToError()) }; if (conformance && nsError && SGM.getNSErrorConformanceToError()) { if (auto witness = conformance->getWitness(SGM.getNSErrorRequirement(loc), nullptr)) { // Create a reference to the getter witness. SILDeclRef getter = getGetterDeclRef(cast<VarDecl>(witness.getDecl()), /*isDirectAccessorUse=*/true); // Compute the substitutions. ArrayRef<Substitution> substitutions = concreteFormalType->gatherAllSubstitutions( SGM.SwiftModule, nullptr); // Emit the erasure, through the getter to _nsError. return emitExistentialErasure( loc, nsErrorType, getTypeLowering(nsErrorType), existentialTL, ctx.AllocateCopy(nsErrorConformances), C, [&](SGFContext innerC) -> ManagedValue { // Call the getter. return emitGetAccessor(loc, getter, substitutions, ArgumentSource(loc, RValue(*this, loc, concreteFormalType, F(SGFContext()))), /*isSuper=*/false, /*isDirectAccessorUse=*/true, RValue(), innerC) .getAsSingleValue(*this, loc); }); } } // Check whether the concrete type is an archetype. If so, call the // _getEmbeddedNSError() witness to try to dig out the embedded NSError. if (auto archetypeType = concreteFormalType->getAs<ArchetypeType>()) { if (std::find(archetypeType->getConformsTo().begin(), archetypeType->getConformsTo().end(), ctx.getErrorDecl()) != archetypeType->getConformsTo().end()) { auto contBB = createBasicBlock(); auto isNotPresentBB = createBasicBlock(); auto isPresentBB = createBasicBlock(); SILValue existentialResult = contBB->createBBArg(existentialTL.getLoweredType()); ProtocolConformanceRef trivialErrorConformances[1] = { ProtocolConformanceRef(ctx.getErrorDecl()) }; Substitution substitutions[1] = { Substitution(concreteFormalType, ctx.AllocateCopy(trivialErrorConformances)) }; // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an // NSError from the value. ManagedValue concreteValue = F(SGFContext()); ManagedValue potentialNSError = emitApplyOfLibraryIntrinsic(loc, SGM.getGetErrorEmbeddedNSError(loc), ctx.AllocateCopy(substitutions), { concreteValue }, SGFContext()) .getAsSingleValue(*this, loc); // Check whether we got an NSError back. SILValue hasNSError = emitDoesOptionalHaveValue(loc, potentialNSError.getValue()); B.createCondBranch(loc, hasNSError, isPresentBB, isNotPresentBB); // If we did get an NSError, emit the existential erasure from that // NSError. B.emitBlock(isPresentBB); SILValue branchArg; { // Don't allow cleanups to escape the conditional block. FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); // Emit the existential erasure from the NSError. branchArg = emitExistentialErasure( loc, nsErrorType, getTypeLowering(nsErrorType), existentialTL, ctx.AllocateCopy(nsErrorConformances), C, [&](SGFContext innerC) -> ManagedValue { // Pull the NSError object out of the optional result. auto &inputTL = getTypeLowering(potentialNSError.getType()); auto nsErrorValue = emitUncheckedGetOptionalValueFrom(loc, potentialNSError, inputTL); // Perform an unchecked cast down to NSError, because it was typed // as 'AnyObject' for layering reasons. return ManagedValue(B.createUncheckedRefCast( loc, nsErrorValue.getValue(), getLoweredType(nsErrorType)), nsErrorValue.getCleanup()); }).forward(*this); } B.createBranch(loc, contBB, branchArg); // If we did not get an NSError, just directly emit the existential // (recursively). B.emitBlock(isNotPresentBB); branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL, existentialTL, conformances, SGFContext(), F, /*allowEmbeddedNSError=*/false) .forward(*this); B.createBranch(loc, contBB, branchArg); // Continue. B.emitBlock(contBB); return emitManagedRValueWithCleanup(existentialResult, existentialTL); } } } switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto *existential = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, conformances); auto *valueAddr = B.createProjectExistentialBox(loc, concreteTL.getLoweredType(), existential); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTL.getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } }
/// Generate a new apply of a function_ref to replace an apply of a /// witness_method when we've determined the actual function we'll end /// up calling. static ApplySite devirtualizeWitnessMethod(ApplySite AI, SILFunction *F, ArrayRef<Substitution> Subs) { // We know the witness thunk and the corresponding set of substitutions // required to invoke the protocol method at this point. auto &Module = AI.getModule(); // Collect all the required substitutions. // // The complete set of substitutions may be different, e.g. because the found // witness thunk F may have been created by a specialization pass and have // additional generic parameters. SmallVector<Substitution, 16> NewSubstList(Subs.begin(), Subs.end()); // Add the non-self-derived substitutions from the original application. ArrayRef<Substitution> SubstList; SubstList = AI.getSubstitutionsWithoutSelfSubstitution(); for (auto &origSub : SubstList) if (!origSub.getArchetype()->isSelfDerived()) NewSubstList.push_back(origSub); // Figure out the exact bound type of the function to be called by // applying all substitutions. auto CalleeCanType = F->getLoweredFunctionType(); auto SubstCalleeCanType = CalleeCanType->substGenericArgs( Module, Module.getSwiftModule(), NewSubstList); // Collect arguments from the apply instruction. auto Arguments = SmallVector<SILValue, 4>(); auto ParamTypes = SubstCalleeCanType->getParameterSILTypes(); // Iterate over the non self arguments and add them to the // new argument list, upcasting when required. SILBuilderWithScope B(AI.getInstruction()); for (unsigned ArgN = 0, ArgE = AI.getNumArguments(); ArgN != ArgE; ++ArgN) { SILValue A = AI.getArgument(ArgN); auto ParamType = ParamTypes[ParamTypes.size() - AI.getNumArguments() + ArgN]; if (A.getType() != ParamType) A = B.createUpcast(AI.getLoc(), A, ParamType); Arguments.push_back(A); } // Replace old apply instruction by a new apply instruction that invokes // the witness thunk. SILBuilderWithScope Builder(AI.getInstruction()); SILLocation Loc = AI.getLoc(); FunctionRefInst *FRI = Builder.createFunctionRef(Loc, F); auto SubstCalleeSILType = SILType::getPrimitiveObjectType(SubstCalleeCanType); auto ResultSILType = SubstCalleeCanType->getSILResult(); ApplySite SAI; if (auto *A = dyn_cast<ApplyInst>(AI)) SAI = Builder.createApply(Loc, FRI, SubstCalleeSILType, ResultSILType, NewSubstList, Arguments, A->isNonThrowing()); if (auto *TAI = dyn_cast<TryApplyInst>(AI)) SAI = Builder.createTryApply(Loc, FRI, SubstCalleeSILType, NewSubstList, Arguments, TAI->getNormalBB(), TAI->getErrorBB()); if (auto *PAI = dyn_cast<PartialApplyInst>(AI)) SAI = Builder.createPartialApply(Loc, FRI, SubstCalleeSILType, NewSubstList, Arguments, PAI->getType()); NumWitnessDevirt++; return SAI; }
DevirtualizationResult swift::tryDevirtualizeClassMethod(FullApplySite AI, SILValue ClassInstance) { if (!canDevirtualizeClassMethod(AI, ClassInstance.getType())) return std::make_pair(nullptr, FullApplySite()); return devirtualizeClassMethod(AI, ClassInstance); }
/// \brief Devirtualize an apply of a class method. /// /// \p AI is the apply to devirtualize. /// \p ClassOrMetatype is a class value or metatype value that is the /// self argument of the apply we will devirtualize. /// return the result value of the new ApplyInst if created one or null. DevirtualizationResult swift::devirtualizeClassMethod(FullApplySite AI, SILValue ClassOrMetatype) { DEBUG(llvm::dbgs() << " Trying to devirtualize : " << *AI.getInstruction()); SILModule &Mod = AI.getModule(); auto *CMI = cast<ClassMethodInst>(AI.getCallee()); auto ClassOrMetatypeType = ClassOrMetatype.getType(); auto *F = getTargetClassMethod(Mod, ClassOrMetatypeType, CMI->getMember()); CanSILFunctionType GenCalleeType = F->getLoweredFunctionType(); auto Subs = getSubstitutionsForCallee(Mod, GenCalleeType, ClassOrMetatypeType, AI); CanSILFunctionType SubstCalleeType = GenCalleeType; if (GenCalleeType->isPolymorphic()) SubstCalleeType = GenCalleeType->substGenericArgs(Mod, Mod.getSwiftModule(), Subs); SILBuilderWithScope B(AI.getInstruction()); FunctionRefInst *FRI = B.createFunctionRef(AI.getLoc(), F); // Create the argument list for the new apply, casting when needed // in order to handle covariant indirect return types and // contravariant argument types. llvm::SmallVector<SILValue, 8> NewArgs; auto Args = AI.getArguments(); auto ParamTypes = SubstCalleeType->getParameterSILTypes(); for (unsigned i = 0, e = Args.size() - 1; i != e; ++i) NewArgs.push_back(castValueToABICompatibleType(&B, AI.getLoc(), Args[i], Args[i].getType(), ParamTypes[i]).getValue()); // Add the self argument, upcasting if required because we're // calling a base class's method. auto SelfParamTy = SubstCalleeType->getSelfParameter().getSILType(); NewArgs.push_back(castValueToABICompatibleType(&B, AI.getLoc(), ClassOrMetatype, ClassOrMetatypeType, SelfParamTy).getValue()); // If we have a direct return type, make sure we use the subst callee return // type. If we have an indirect return type, AI's return type of the empty // tuple should be ok. SILType ResultTy = AI.getType(); if (!SubstCalleeType->hasIndirectResult()) { ResultTy = SubstCalleeType->getSILResult(); } SILType SubstCalleeSILType = SILType::getPrimitiveObjectType(SubstCalleeType); FullApplySite NewAI; SILBasicBlock *ResultBB = nullptr; SILBasicBlock *NormalBB = nullptr; SILValue ResultValue; bool ResultCastRequired = false; SmallVector<Operand *, 4> OriginalResultUses; if (!isa<TryApplyInst>(AI)) { NewAI = B.createApply(AI.getLoc(), FRI, SubstCalleeSILType, ResultTy, Subs, NewArgs, cast<ApplyInst>(AI)->isNonThrowing()); ResultValue = SILValue(NewAI.getInstruction(), 0); } else { auto *TAI = cast<TryApplyInst>(AI); // Create new normal and error BBs only if: // - re-using a BB would create a critical edge // - or, the result of the new apply would be of different // type than the argument of the original normal BB. if (TAI->getNormalBB()->getSinglePredecessor()) ResultBB = TAI->getNormalBB(); else { ResultBB = B.getFunction().createBasicBlock(); ResultBB->createBBArg(ResultTy); } NormalBB = TAI->getNormalBB(); SILBasicBlock *ErrorBB = nullptr; if (TAI->getErrorBB()->getSinglePredecessor()) ErrorBB = TAI->getErrorBB(); else { ErrorBB = B.getFunction().createBasicBlock(); ErrorBB->createBBArg(TAI->getErrorBB()->getBBArg(0)->getType()); } NewAI = B.createTryApply(AI.getLoc(), FRI, SubstCalleeSILType, Subs, NewArgs, ResultBB, ErrorBB); if (ErrorBB != TAI->getErrorBB()) { B.setInsertionPoint(ErrorBB); B.createBranch(TAI->getLoc(), TAI->getErrorBB(), {ErrorBB->getBBArg(0)}); } // Does the result value need to be casted? ResultCastRequired = ResultTy != NormalBB->getBBArg(0)->getType(); if (ResultBB != NormalBB) B.setInsertionPoint(ResultBB); else if (ResultCastRequired) { B.setInsertionPoint(NormalBB->begin()); // Collect all uses, before casting. for (auto *Use : NormalBB->getBBArg(0)->getUses()) { OriginalResultUses.push_back(Use); } NormalBB->getBBArg(0)->replaceAllUsesWith(SILUndef::get(AI.getType(), Mod)); NormalBB->replaceBBArg(0, ResultTy, nullptr); } // The result value is passed as a parameter to the normal block. ResultValue = ResultBB->getBBArg(0); } // Check if any casting is required for the return value. ResultValue = castValueToABICompatibleType(&B, NewAI.getLoc(), ResultValue, ResultTy, AI.getType()).getValue(); DEBUG(llvm::dbgs() << " SUCCESS: " << F->getName() << "\n"); NumClassDevirt++; if (NormalBB) { if (NormalBB != ResultBB) { // If artificial normal BB was introduced, branch // to the original normal BB. B.createBranch(NewAI.getLoc(), NormalBB, { ResultValue }); } else if (ResultCastRequired) { // Update all original uses by the new value. for(auto *Use: OriginalResultUses) { Use->set(ResultValue); } } return std::make_pair(NewAI.getInstruction(), NewAI); } // We need to return a pair of values here: // - the first one is the actual result of the devirtualized call, possibly // casted into an appropriate type. This SILValue may be a BB arg, if it // was a cast between optional types. // - the second one is the new apply site. return std::make_pair(ResultValue.getDef(), NewAI); }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, const ArrayRef<ProtocolConformance *> &conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F) { // Mark the needed conformances as used. for (auto *conformance : conformances) SGM.useConformance(conformance); switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype.getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto box = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, concreteTL.getLoweredType(), conformances); auto existential = box->getExistentialResult(); auto valueAddr = box->getValueAddressResult(); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTL.getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } }
void SILGenFunction::emitArtificialTopLevel(ClassDecl *mainClass) { // Load argc and argv from the entry point arguments. SILValue argc = F.begin()->getBBArg(0); SILValue argv = F.begin()->getBBArg(1); switch (mainClass->getArtificialMainKind()) { case ArtificialMainKind::UIApplicationMain: { // Emit a UIKit main. // return UIApplicationMain(C_ARGC, C_ARGV, nil, ClassName); CanType NSStringTy = SGM.Types.getNSStringType(); CanType OptNSStringTy = OptionalType::get(NSStringTy)->getCanonicalType(); CanType IUOptNSStringTy = ImplicitlyUnwrappedOptionalType::get(NSStringTy)->getCanonicalType(); // Look up UIApplicationMain. // FIXME: Doing an AST lookup here is gross and not entirely sound; // we're getting away with it because the types are guaranteed to already // be imported. ASTContext &ctx = getASTContext(); Module *UIKit = ctx.getLoadedModule(ctx.getIdentifier("UIKit")); SmallVector<ValueDecl *, 1> results; UIKit->lookupQualified(UIKit->getDeclaredType(), ctx.getIdentifier("UIApplicationMain"), NL_QualifiedDefault, /*resolver*/nullptr, results); assert(!results.empty() && "couldn't find UIApplicationMain in UIKit"); assert(results.size() == 1 && "more than one UIApplicationMain?"); SILDeclRef mainRef{results.front(), ResilienceExpansion::Minimal, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isForeign*/true}; auto UIApplicationMainFn = SGM.M.getOrCreateFunction(mainClass, mainRef, NotForDefinition); auto fnTy = UIApplicationMainFn->getLoweredFunctionType(); // Get the class name as a string using NSStringFromClass. CanType mainClassTy = mainClass->getDeclaredTypeInContext()->getCanonicalType(); CanType mainClassMetaty = CanMetatypeType::get(mainClassTy, MetatypeRepresentation::ObjC); ProtocolDecl *anyObjectProtocol = ctx.getProtocol(KnownProtocolKind::AnyObject); auto mainClassAnyObjectConformance = ProtocolConformanceRef( *SGM.M.getSwiftModule()->lookupConformance(mainClassTy, anyObjectProtocol, nullptr)); CanType anyObjectTy = anyObjectProtocol ->getDeclaredTypeInContext() ->getCanonicalType(); CanType anyObjectMetaTy = CanExistentialMetatypeType::get(anyObjectTy, MetatypeRepresentation::ObjC); auto NSStringFromClassType = SILFunctionType::get(nullptr, SILFunctionType::ExtInfo() .withRepresentation(SILFunctionType::Representation:: CFunctionPointer), ParameterConvention::Direct_Unowned, SILParameterInfo(anyObjectMetaTy, ParameterConvention::Direct_Unowned), SILResultInfo(OptNSStringTy, ResultConvention::Autoreleased), /*error result*/ None, ctx); auto NSStringFromClassFn = SGM.M.getOrCreateFunction(mainClass, "NSStringFromClass", SILLinkage::PublicExternal, NSStringFromClassType, IsBare, IsTransparent, IsNotFragile); auto NSStringFromClass = B.createFunctionRef(mainClass, NSStringFromClassFn); SILValue metaTy = B.createMetatype(mainClass, SILType::getPrimitiveObjectType(mainClassMetaty)); metaTy = B.createInitExistentialMetatype(mainClass, metaTy, SILType::getPrimitiveObjectType(anyObjectMetaTy), ctx.AllocateCopy( llvm::makeArrayRef(mainClassAnyObjectConformance))); SILValue optName = B.createApply(mainClass, NSStringFromClass, NSStringFromClass->getType(), SILType::getPrimitiveObjectType(OptNSStringTy), {}, metaTy); // Fix up the string parameters to have the right type. SILType nameArgTy = fnTy->getSILArgumentType(3); assert(nameArgTy == fnTy->getSILArgumentType(2)); auto managedName = ManagedValue::forUnmanaged(optName); SILValue nilValue; if (optName->getType() == nameArgTy) { nilValue = getOptionalNoneValue(mainClass, getTypeLowering(OptNSStringTy)); } else { assert(nameArgTy.getSwiftRValueType() == IUOptNSStringTy); nilValue = getOptionalNoneValue(mainClass, getTypeLowering(IUOptNSStringTy)); managedName = emitOptionalToOptional( mainClass, managedName, SILType::getPrimitiveObjectType(IUOptNSStringTy), [](SILGenFunction &, SILLocation, ManagedValue input, SILType) { return input; }); } // Fix up argv to have the right type. auto argvTy = fnTy->getSILArgumentType(1); SILType unwrappedTy = argvTy; if (Type innerTy = argvTy.getSwiftRValueType()->getAnyOptionalObjectType()){ auto canInnerTy = innerTy->getCanonicalType(); unwrappedTy = SILType::getPrimitiveObjectType(canInnerTy); } auto managedArgv = ManagedValue::forUnmanaged(argv); if (unwrappedTy != argv->getType()) { auto converted = emitPointerToPointer(mainClass, managedArgv, argv->getType().getSwiftRValueType(), unwrappedTy.getSwiftRValueType()); managedArgv = std::move(converted).getAsSingleValue(*this, mainClass); } if (unwrappedTy != argvTy) { managedArgv = getOptionalSomeValue(mainClass, managedArgv, getTypeLowering(argvTy)); } auto UIApplicationMain = B.createFunctionRef(mainClass, UIApplicationMainFn); SILValue args[] = {argc, managedArgv.getValue(), nilValue, managedName.getValue()}; B.createApply(mainClass, UIApplicationMain, UIApplicationMain->getType(), argc->getType(), {}, args); SILValue r = B.createIntegerLiteral(mainClass, SILType::getBuiltinIntegerType(32, ctx), 0); auto rType = F.getLoweredFunctionType()->getSingleResult().getSILType(); if (r->getType() != rType) r = B.createStruct(mainClass, rType, r); Cleanups.emitCleanupsForReturn(mainClass); B.createReturn(mainClass, r); return; } case ArtificialMainKind::NSApplicationMain: { // Emit an AppKit main. // return NSApplicationMain(C_ARGC, C_ARGV); SILParameterInfo argTypes[] = { SILParameterInfo(argc->getType().getSwiftRValueType(), ParameterConvention::Direct_Unowned), SILParameterInfo(argv->getType().getSwiftRValueType(), ParameterConvention::Direct_Unowned), }; auto NSApplicationMainType = SILFunctionType::get(nullptr, SILFunctionType::ExtInfo() // Should be C calling convention, but NSApplicationMain // has an overlay to fix the type of argv. .withRepresentation(SILFunctionType::Representation::Thin), ParameterConvention::Direct_Unowned, argTypes, SILResultInfo(argc->getType().getSwiftRValueType(), ResultConvention::Unowned), /*error result*/ None, getASTContext()); auto NSApplicationMainFn = SGM.M.getOrCreateFunction(mainClass, "NSApplicationMain", SILLinkage::PublicExternal, NSApplicationMainType, IsBare, IsTransparent, IsNotFragile); auto NSApplicationMain = B.createFunctionRef(mainClass, NSApplicationMainFn); SILValue args[] = { argc, argv }; B.createApply(mainClass, NSApplicationMain, NSApplicationMain->getType(), argc->getType(), {}, args); SILValue r = B.createIntegerLiteral(mainClass, SILType::getBuiltinIntegerType(32, getASTContext()), 0); auto rType = F.getLoweredFunctionType()->getSingleResult().getSILType(); if (r->getType() != rType) r = B.createStruct(mainClass, rType, r); B.createReturn(mainClass, r); return; } } }
void SILGenFunction::emitValueConstructor(ConstructorDecl *ctor) { MagicFunctionName = SILGenModule::getMagicFunctionName(ctor); if (ctor->isMemberwiseInitializer()) return emitImplicitValueConstructor(*this, ctor); // True if this constructor delegates to a peer constructor with self.init(). bool isDelegating = ctor->getDelegatingOrChainedInitKind(nullptr) == ConstructorDecl::BodyInitKind::Delegating; // Get the 'self' decl and type. VarDecl *selfDecl = ctor->getImplicitSelfDecl(); auto &lowering = getTypeLowering(selfDecl->getType()->getInOutObjectType()); SILType selfTy = lowering.getLoweredType(); (void)selfTy; assert(!selfTy.getClassOrBoundGenericClass() && "can't emit a class ctor here"); // Allocate the local variable for 'self'. emitLocalVariableWithCleanup(selfDecl, false)->finishInitialization(*this); // Mark self as being uninitialized so that DI knows where it is and how to // check for it. SILValue selfLV; { auto &SelfVarLoc = VarLocs[selfDecl]; auto MUIKind = isDelegating ? MarkUninitializedInst::DelegatingSelf : MarkUninitializedInst::RootSelf; selfLV = B.createMarkUninitialized(selfDecl, SelfVarLoc.value, MUIKind); SelfVarLoc.value = selfLV; } // Emit the prolog. emitProlog(ctor->getParameterList(1), ctor->getResultType(), ctor, ctor->hasThrows()); emitConstructorMetatypeArg(*this, ctor); // Create a basic block to jump to for the implicit 'self' return. // We won't emit this until after we've emitted the body. // The epilog takes a void return because the return of 'self' is implicit. prepareEpilog(Type(), ctor->hasThrows(), CleanupLocation(ctor)); // If the constructor can fail, set up an alternative epilog for constructor // failure. SILBasicBlock *failureExitBB = nullptr; SILArgument *failureExitArg = nullptr; auto &resultLowering = getTypeLowering(ctor->getResultType()); if (ctor->getFailability() != OTK_None) { SILBasicBlock *failureBB = createBasicBlock(FunctionSection::Postmatter); // On failure, we'll clean up everything (except self, which should have // been cleaned up before jumping here) and return nil instead. SavedInsertionPoint savedIP(*this, failureBB, FunctionSection::Postmatter); failureExitBB = createBasicBlock(); Cleanups.emitCleanupsForReturn(ctor); // Return nil. if (lowering.isAddressOnly()) { // Inject 'nil' into the indirect return. assert(F.getIndirectResults().size() == 1); B.createInjectEnumAddr(ctor, F.getIndirectResults()[0], getASTContext().getOptionalNoneDecl()); B.createBranch(ctor, failureExitBB); B.setInsertionPoint(failureExitBB); B.createReturn(ctor, emitEmptyTuple(ctor)); } else { // Pass 'nil' as the return value to the exit BB. failureExitArg = new (F.getModule()) SILArgument(failureExitBB, resultLowering.getLoweredType()); SILValue nilResult = B.createEnum(ctor, {}, getASTContext().getOptionalNoneDecl(), resultLowering.getLoweredType()); B.createBranch(ctor, failureExitBB, nilResult); B.setInsertionPoint(failureExitBB); B.createReturn(ctor, failureExitArg); } FailDest = JumpDest(failureBB, Cleanups.getCleanupsDepth(), ctor); } // If this is not a delegating constructor, emit member initializers. if (!isDelegating) { auto *dc = ctor->getDeclContext(); auto *nominal = dc->getAsNominalTypeOrNominalTypeExtensionContext(); emitMemberInitializers(dc, selfDecl, nominal); } emitProfilerIncrement(ctor->getBody()); // Emit the constructor body. emitStmt(ctor->getBody()); // Build a custom epilog block, since the AST representation of the // constructor decl (which has no self in the return type) doesn't match the // SIL representation. SILValue selfValue; { SavedInsertionPoint savedIP(*this, ReturnDest.getBlock()); assert(B.getInsertionBB()->empty() && "Epilog already set up?"); auto cleanupLoc = CleanupLocation::get(ctor); if (!lowering.isAddressOnly()) { // Otherwise, load and return the final 'self' value. selfValue = B.createLoad(cleanupLoc, selfLV, LoadOwnershipQualifier::Unqualified); // Emit a retain of the loaded value, since we return it +1. lowering.emitCopyValue(B, cleanupLoc, selfValue); // Inject the self value into an optional if the constructor is failable. if (ctor->getFailability() != OTK_None) { selfValue = B.createEnum(ctor, selfValue, getASTContext().getOptionalSomeDecl(), getLoweredLoadableType(ctor->getResultType())); } } else { // If 'self' is address-only, copy 'self' into the indirect return slot. assert(F.getIndirectResults().size() == 1 && "no indirect return for address-only ctor?!"); // Get the address to which to store the result. SILValue completeReturnAddress = F.getIndirectResults()[0]; SILValue returnAddress; switch (ctor->getFailability()) { // For non-failable initializers, store to the return address directly. case OTK_None: returnAddress = completeReturnAddress; break; // If this is a failable initializer, project out the payload. case OTK_Optional: case OTK_ImplicitlyUnwrappedOptional: returnAddress = B.createInitEnumDataAddr(ctor, completeReturnAddress, getASTContext().getOptionalSomeDecl(), selfLV->getType()); break; } // We have to do a non-take copy because someone else may be using the // box (e.g. someone could have closed over it). B.createCopyAddr(cleanupLoc, selfLV, returnAddress, IsNotTake, IsInitialization); // Inject the enum tag if the result is optional because of failability. if (ctor->getFailability() != OTK_None) { // Inject the 'Some' tag. B.createInjectEnumAddr(ctor, completeReturnAddress, getASTContext().getOptionalSomeDecl()); } } } // Finally, emit the epilog and post-matter. auto returnLoc = emitEpilog(ctor, /*UsesCustomEpilog*/true); // Finish off the epilog by returning. If this is a failable ctor, then we // actually jump to the failure epilog to keep the invariant that there is // only one SIL return instruction per SIL function. if (B.hasValidInsertionPoint()) { if (!failureExitBB) { // If we're not returning self, then return () since we're returning Void. if (!selfValue) { SILLocation loc(ctor); loc.markAutoGenerated(); selfValue = emitEmptyTuple(loc); } B.createReturn(returnLoc, selfValue); } else { if (selfValue) B.createBranch(returnLoc, failureExitBB, selfValue); else B.createBranch(returnLoc, failureExitBB); } } }
void SILGenFunction::emitCurryThunk(ValueDecl *vd, SILDeclRef from, SILDeclRef to) { SmallVector<SILValue, 8> curriedArgs; unsigned paramCount = from.uncurryLevel + 1; if (isa<ConstructorDecl>(vd) || isa<EnumElementDecl>(vd)) { // The first body parameter pattern for a constructor specifies the // "self" instance, but the constructor is invoked from outside on a // metatype. assert(from.uncurryLevel == 0 && to.uncurryLevel == 1 && "currying constructor at level other than one?!"); F.setBare(IsBare); auto selfMetaTy = vd->getType()->getAs<AnyFunctionType>()->getInput(); auto metatypeVal = new (F.getModule()) SILArgument(F.begin(), getLoweredLoadableType(selfMetaTy)); curriedArgs.push_back(metatypeVal); } else if (auto fd = dyn_cast<AbstractFunctionDecl>(vd)) { // Forward implicit closure context arguments. bool hasCaptures = fd->getCaptureInfo().hasLocalCaptures(); if (hasCaptures) --paramCount; // Forward the curried formal arguments. auto forwardedPatterns = fd->getParameterLists().slice(0, paramCount); for (auto *paramPattern : reversed(forwardedPatterns)) bindParametersForForwarding(paramPattern, curriedArgs); // Forward captures. if (hasCaptures) { auto captureInfo = SGM.Types.getLoweredLocalCaptures(fd); for (auto capture : captureInfo.getCaptures()) forwardCaptureArgs(*this, curriedArgs, capture); } } else { llvm_unreachable("don't know how to curry this decl"); } // Forward substitutions. ArrayRef<Substitution> subs; if (auto gp = getConstantInfo(to).ContextGenericParams) { subs = gp->getForwardingSubstitutions(getASTContext()); } SILValue toFn = getNextUncurryLevelRef(*this, vd, to, from.isDirectReference, curriedArgs, subs); SILType resultTy = SGM.getConstantType(from).castTo<SILFunctionType>() ->getSingleResult().getSILType(); resultTy = F.mapTypeIntoContext(resultTy); auto toTy = toFn->getType(); // Forward archetypes and specialize if the function is generic. if (!subs.empty()) { auto toFnTy = toFn->getType().castTo<SILFunctionType>(); toTy = getLoweredLoadableType( toFnTy->substGenericArgs(SGM.M, SGM.SwiftModule, subs)); } // Partially apply the next uncurry level and return the result closure. auto closureTy = SILGenBuilder::getPartialApplyResultType(toFn->getType(), curriedArgs.size(), SGM.M, subs); SILInstruction *toClosure = B.createPartialApply(vd, toFn, toTy, subs, curriedArgs, closureTy); if (resultTy != closureTy) toClosure = B.createConvertFunction(vd, toClosure, resultTy); B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(vd), toClosure); }