void SILGenFunction::emitThrow(SILLocation loc, ManagedValue exnMV, bool emitWillThrow) { assert(ThrowDest.isValid() && "calling emitThrow with invalid throw destination!"); // Claim the exception value. If we need to handle throwing // cleanups, the correct thing to do here is to recreate the // exception's cleanup when emitting each cleanup we branch through. // But for now we aren't bothering. SILValue exn = exnMV.forward(*this); if (emitWillThrow) { // Generate a call to the 'swift_willThrow' runtime function to allow the // debugger to catch the throw event. B.createBuiltin(loc, SGM.getASTContext().getIdentifier("willThrow"), SGM.Types.getEmptyTupleType(), {}, {exn}); } // Branch to the cleanup destination. Cleanups.emitBranchAndCleanups(ThrowDest, loc, exn); }
RValue SILGenFunction::emitPointerToPointer(SILLocation loc, ManagedValue input, CanType inputType, CanType outputType, SGFContext C) { auto converter = getASTContext().getConvertPointerToPointerArgument(nullptr); // The generic function currently always requires indirection, but pointers // are always loadable. auto origBuf = emitTemporaryAllocation(loc, input.getType()); B.createStore(loc, input.forward(*this), origBuf); auto origValue = emitManagedBufferWithCleanup(origBuf); // Invoke the conversion intrinsic to convert to the destination type. Substitution subs[2] = { getPointerSubstitution(inputType), getPointerSubstitution(outputType), }; return emitApplyOfLibraryIntrinsic(loc, converter, subs, origValue, C); }
/// Emit a materializeForSet operation that calls a mutable addressor. /// /// If it's not an unsafe addressor, this uses a callback function to /// write the l-value back. SILValue MaterializeForSetEmitter::emitUsingAddressor(SILGenFunction &gen, SILLocation loc, ManagedValue self, RValue &&indices, SILValue callbackBuffer, SILFunction *&callback) { bool isDirect = (TheAccessSemantics != AccessSemantics::Ordinary); // Call the mutable addressor. auto addressor = gen.getAddressorDeclRef(WitnessStorage, AccessKind::ReadWrite, isDirect); ArgumentSource baseRV = gen.prepareAccessorBaseArg(loc, self, SubstSelfType, addressor); SILType addressType = WitnessStorageType.getAddressType(); auto result = gen.emitAddressorAccessor(loc, addressor, WitnessSubs, std::move(baseRV), IsSuper, isDirect, std::move(indices), addressType); SILValue address = result.first.getUnmanagedValue(); AddressorKind addressorKind = WitnessStorage->getMutableAddressor()->getAddressorKind(); ManagedValue owner = result.second; if (!owner) { assert(addressorKind == AddressorKind::Unsafe); } else { SILValue allocatedCallbackBuffer = gen.B.createAllocValueBuffer(loc, owner.getType(), callbackBuffer); gen.B.emitStoreValueOperation(loc, owner.forward(gen), allocatedCallbackBuffer, StoreOwnershipQualifier::Init); callback = createAddressorCallback(gen.F, owner.getType(), addressorKind); } return address; }
/// Recursively walk into the given formal index type, expanding tuples, /// in order to form the arguments to a subscript accessor. static void translateIndices(SILGenFunction &gen, SILLocation loc, AbstractionPattern pattern, CanType formalType, ArrayRef<ManagedValue> &sourceIndices, RValue &result) { // Expand if the pattern was a tuple. if (pattern.isTuple()) { auto formalTupleType = cast<TupleType>(formalType); for (auto i : indices(formalTupleType.getElementTypes())) { translateIndices(gen, loc, pattern.getTupleElementType(i), formalTupleType.getElementType(i), sourceIndices, result); } return; } assert(!sourceIndices.empty() && "ran out of elements in index!"); ManagedValue value = sourceIndices.front(); sourceIndices = sourceIndices.slice(1); // We're going to build an RValue here, so make sure we translate // indirect arguments to be scalar if we have a loadable type. if (value.getType().isAddress()) { auto &valueTL = gen.getTypeLowering(value.getType()); if (!valueTL.isAddressOnly()) { value = gen.emitLoad(loc, value.forward(gen), valueTL, SGFContext(), IsTake); } } // Reabstract the subscripts from the requirement pattern to the // formal type. value = gen.emitOrigToSubstValue(loc, value, pattern, formalType); // Invoking the accessor will expect a value of the formal type, so // don't reabstract to that here. // Add that to the result, further expanding if necessary. result.addElement(gen, value, formalType, loc); }
// FIXME: With some changes to their callers, all of the below functions // could be re-worked to use emitInjectEnum(). ManagedValue SILGenFunction::emitInjectOptional(SILLocation loc, const TypeLowering &optTL, SGFContext ctxt, llvm::function_ref<ManagedValue(SGFContext)> generator) { SILType optTy = optTL.getLoweredType(); SILType objectTy = optTy.getAnyOptionalObjectType(); assert(objectTy && "expected type was not optional"); auto someDecl = getASTContext().getOptionalSomeDecl(); // If the value is loadable, just emit and wrap. // TODO: honor +0 contexts? if (optTL.isLoadable() || !silConv.useLoweredAddresses()) { ManagedValue objectResult = generator(SGFContext()); auto some = B.createEnum(loc, objectResult.forward(*this), someDecl, optTy); return emitManagedRValueWithCleanup(some, optTL); } // Otherwise it's address-only; try to avoid spurious copies by // evaluating into the context. // Prepare a buffer for the object value. return B.bufferForExpr( loc, optTy.getObjectType(), optTL, ctxt, [&](SILValue optBuf) { auto objectBuf = B.createInitEnumDataAddr(loc, optBuf, someDecl, objectTy); // Evaluate the value in-place into that buffer. TemporaryInitialization init(objectBuf, CleanupHandle::invalid()); ManagedValue objectResult = generator(SGFContext(&init)); if (!objectResult.isInContext()) { objectResult.forwardInto(*this, loc, objectBuf); } // Finalize the outer optional buffer. B.createInjectEnumAddr(loc, optBuf, someDecl); }); }
// FIXME: With some changes to their callers, all of the below functions // could be re-worked to use emitInjectEnum(). ManagedValue SILGenFunction::emitInjectOptional(SILLocation loc, ManagedValue v, CanType inputFormalType, CanType substFormalType, const TypeLowering &expectedTL, SGFContext ctxt) { // Optional's payload is currently maximally abstracted. FIXME: Eventually // it shouldn't be. auto opaque = AbstractionPattern::getOpaque(); OptionalTypeKind substOTK; auto substObjectType = substFormalType.getAnyOptionalObjectType(substOTK); auto loweredTy = getLoweredType(opaque, substObjectType); if (v.getType() != loweredTy) v = emitTransformedValue(loc, v, AbstractionPattern(inputFormalType), inputFormalType, opaque, substObjectType); auto someDecl = getASTContext().getOptionalSomeDecl(substOTK); SILType optTy = getLoweredType(substFormalType); if (v.getType().isAddress()) { auto buf = getBufferForExprResult(loc, optTy.getObjectType(), ctxt); auto payload = B.createInitEnumDataAddr(loc, buf, someDecl, v.getType()); // FIXME: Is it correct to use IsTake here even if v doesn't have a cleanup? B.createCopyAddr(loc, v.forward(*this), payload, IsTake, IsInitialization); B.createInjectEnumAddr(loc, buf, someDecl); v = manageBufferForExprResult(buf, expectedTL, ctxt); } else { auto some = B.createEnum(loc, v.getValue(), someDecl, optTy); v = ManagedValue(some, v.getCleanup()); } return v; }
/// Return a value for an optional ".Some(x)" of the specified type. This only /// works for loadable enum types. ManagedValue SILGenFunction:: getOptionalSomeValue(SILLocation loc, ManagedValue value, const TypeLowering &optTL) { assert(optTL.isLoadable() && "Address-only optionals cannot use this"); SILType optType = optTL.getLoweredType(); CanType formalOptType = optType.getSwiftRValueType(); OptionalTypeKind OTK; auto formalObjectType = formalOptType->getAnyOptionalObjectType(OTK) ->getCanonicalType(); assert(OTK != OTK_None); auto someDecl = getASTContext().getOptionalSomeDecl(OTK); AbstractionPattern origType = AbstractionPattern::getOpaque(); // Reabstract input value to the type expected by the enum. value = emitSubstToOrigValue(loc, value, origType, formalObjectType); SILValue result = B.createEnum(loc, value.forward(*this), someDecl, optTL.getLoweredType()); return emitManagedRValueWithCleanup(result, optTL); }
/// Perform a foreign error check by testing whether the call result is nil. static ManagedValue emitResultIsNilErrorCheck(SILGenFunction &gen, SILLocation loc, ManagedValue origResult, ManagedValue errorSlot, bool suppressErrorCheck) { // Take local ownership of the optional result value. SILValue optionalResult = origResult.forward(gen); OptionalTypeKind optKind; SILType resultObjectType = optionalResult->getType().getAnyOptionalObjectType(gen.SGM.M, optKind); ASTContext &ctx = gen.getASTContext(); // If we're suppressing the check, just do an unchecked take. if (suppressErrorCheck) { SILValue objectResult = gen.B.createUncheckedEnumData(loc, optionalResult, ctx.getOptionalSomeDecl(optKind)); return gen.emitManagedRValueWithCleanup(objectResult); } // Switch on the optional result. SILBasicBlock *errorBB = gen.createBasicBlock(FunctionSection::Postmatter); SILBasicBlock *contBB = gen.createBasicBlock(); gen.B.createSwitchEnum(loc, optionalResult, /*default*/ nullptr, { { ctx.getOptionalSomeDecl(optKind), contBB }, { ctx.getOptionalNoneDecl(optKind), errorBB } }); // Emit the error block. gen.emitForeignErrorBlock(loc, errorBB, errorSlot); // In the continuation block, take ownership of the now non-optional // result value. gen.B.emitBlock(contBB); SILValue objectResult = contBB->createBBArg(resultObjectType); return gen.emitManagedRValueWithCleanup(objectResult); }
void SILGenFunction::emitClassConstructorAllocator(ConstructorDecl *ctor) { assert(!ctor->isFactoryInit() && "factories should not be emitted here"); // Emit the prolog. Since we're just going to forward our args directly // to the initializer, don't allocate local variables for them. RegularLocation Loc(ctor); Loc.markAutoGenerated(); // Forward the constructor arguments. // FIXME: Handle 'self' along with the other body patterns. SmallVector<SILValue, 8> args; bindParametersForForwarding(ctor->getParameterList(1), args); SILValue selfMetaValue = emitConstructorMetatypeArg(*this, ctor); // Allocate the "self" value. VarDecl *selfDecl = ctor->getImplicitSelfDecl(); SILType selfTy = getLoweredType(selfDecl->getType()); assert(selfTy.hasReferenceSemantics() && "can't emit a value type ctor here"); // Use alloc_ref to allocate the object. // TODO: allow custom allocation? // FIXME: should have a cleanup in case of exception auto selfClassDecl = ctor->getDeclContext()->getAsClassOrClassExtensionContext(); SILValue selfValue; // Allocate the 'self' value. bool useObjCAllocation = usesObjCAllocator(selfClassDecl); if (ctor->isConvenienceInit() || ctor->hasClangNode()) { // For a convenience initializer or an initializer synthesized // for an Objective-C class, allocate using the metatype. SILValue allocArg = selfMetaValue; // When using Objective-C allocation, convert the metatype // argument to an Objective-C metatype. if (useObjCAllocation) { auto metaTy = allocArg->getType().castTo<MetatypeType>(); metaTy = CanMetatypeType::get(metaTy.getInstanceType(), MetatypeRepresentation::ObjC); allocArg = B.createThickToObjCMetatype(Loc, allocArg, getLoweredType(metaTy)); } selfValue = B.createAllocRefDynamic(Loc, allocArg, selfTy, useObjCAllocation, {}, {}); } else { // For a designated initializer, we know that the static type being // allocated is the type of the class that defines the designated // initializer. selfValue = B.createAllocRef(Loc, selfTy, useObjCAllocation, false, {}, {}); } args.push_back(selfValue); // Call the initializer. Always use the Swift entry point, which will be a // bridging thunk if we're calling ObjC. SILDeclRef initConstant = SILDeclRef(ctor, SILDeclRef::Kind::Initializer, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isObjC=*/false); ManagedValue initVal; SILType initTy; ArrayRef<Substitution> subs; // Call the initializer. ArrayRef<Substitution> forwardingSubs; if (auto *genericEnv = ctor->getGenericEnvironmentOfContext()) { auto *genericSig = ctor->getGenericSignatureOfContext(); forwardingSubs = genericEnv->getForwardingSubstitutions( SGM.SwiftModule, genericSig); } std::tie(initVal, initTy, subs) = emitSiblingMethodRef(Loc, selfValue, initConstant, forwardingSubs); SILValue initedSelfValue = emitApplyWithRethrow(Loc, initVal.forward(*this), initTy, subs, args); // Return the initialized 'self'. B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc), initedSelfValue); }
void SILGenFunction::emitEnumConstructor(EnumElementDecl *element) { CanType enumTy = element->getParentEnum() ->getDeclaredTypeInContext() ->getCanonicalType(); auto &enumTI = getTypeLowering(enumTy); RegularLocation Loc(element); CleanupLocation CleanupLoc(element); Loc.markAutoGenerated(); // Emit the indirect return slot. std::unique_ptr<Initialization> dest; if (enumTI.isAddressOnly()) { auto &AC = getASTContext(); auto VD = new (AC) ParamDecl(/*IsLet*/ false, SourceLoc(), SourceLoc(), AC.getIdentifier("$return_value"), SourceLoc(), AC.getIdentifier("$return_value"), CanInOutType::get(enumTy), element->getDeclContext()); auto resultSlot = new (SGM.M) SILArgument(F.begin(), enumTI.getLoweredType(), VD); dest = std::unique_ptr<Initialization>( new KnownAddressInitialization(resultSlot)); } Scope scope(Cleanups, CleanupLoc); // Emit the exploded constructor argument. ArgumentSource payload; if (element->hasArgumentType()) { RValue arg = emitImplicitValueConstructorArg (*this, Loc, element->getArgumentType()->getCanonicalType(), element->getDeclContext()); payload = ArgumentSource(Loc, std::move(arg)); } // Emit the metatype argument. emitConstructorMetatypeArg(*this, element); // If possible, emit the enum directly into the indirect return. SGFContext C = (dest ? SGFContext(dest.get()) : SGFContext()); ManagedValue mv = emitInjectEnum(Loc, std::move(payload), enumTI.getLoweredType(), element, C); // Return the enum. auto ReturnLoc = ImplicitReturnLocation::getImplicitReturnLoc(Loc); if (mv.isInContext()) { assert(enumTI.isAddressOnly()); scope.pop(); B.createReturn(ReturnLoc, emitEmptyTuple(Loc)); } else { assert(enumTI.isLoadable()); SILValue result = mv.forward(*this); scope.pop(); B.createReturn(ReturnLoc, result); } }
ManagedValue SILGenBuilder::createUncheckedAddrCast(SILLocation loc, ManagedValue op, SILType resultTy) { CleanupCloner cloner(*this, op); SILValue cast = createUncheckedAddrCast(loc, op.forward(SGF), resultTy); return cloner.clone(cast); }
ManagedValue SILGenBuilder::createUpcast(SILLocation loc, ManagedValue original, SILType type) { CleanupCloner cloner(*this, original); SILValue convertedValue = createUpcast(loc, original.forward(SGF), type); return cloner.clone(convertedValue); }
SILGenFunction::OpaqueValueState SILGenFunction::emitOpenExistential( SILLocation loc, ManagedValue existentialValue, ArchetypeType *openedArchetype, SILType loweredOpenedType, AccessKind accessKind) { // Open the existential value into the opened archetype value. bool isUnique = true; bool canConsume; ManagedValue archetypeMV; SILType existentialType = existentialValue.getType(); switch (existentialType.getPreferredExistentialRepresentation(SGM.M)) { case ExistentialRepresentation::Opaque: { SILValue archetypeValue; if (existentialType.isAddress()) { OpenedExistentialAccess allowedAccess = getOpenedExistentialAccessFor(accessKind); if (!loweredOpenedType.isAddress()) { assert(!silConv.useLoweredAddresses() && "Non-address loweredOpenedType is only allowed under opaque " "value mode"); loweredOpenedType = loweredOpenedType.getAddressType(); } archetypeValue = B.createOpenExistentialAddr(loc, existentialValue.forward(*this), loweredOpenedType, allowedAccess); } else { archetypeValue = B.createOpenExistentialOpaque( loc, existentialValue.forward(*this), loweredOpenedType); } if (existentialValue.hasCleanup()) { // With CoW existentials we can't consume the boxed value inside of // the existential. (We could only do so after a uniqueness check on // the box holding the value). canConsume = false; enterDestroyCleanup(existentialValue.getValue()); archetypeMV = ManagedValue::forUnmanaged(archetypeValue); } else { canConsume = false; archetypeMV = ManagedValue::forUnmanaged(archetypeValue); } break; } case ExistentialRepresentation::Metatype: assert(existentialType.isObject()); archetypeMV = ManagedValue::forUnmanaged( B.createOpenExistentialMetatype( loc, existentialValue.forward(*this), loweredOpenedType)); // Metatypes are always trivial. Consuming would be a no-op. canConsume = false; break; case ExistentialRepresentation::Class: { assert(existentialType.isObject()); SILValue archetypeValue = B.createOpenExistentialRef( loc, existentialValue.forward(*this), loweredOpenedType); canConsume = existentialValue.hasCleanup(); archetypeMV = (canConsume ? emitManagedRValueWithCleanup(archetypeValue) : ManagedValue::forUnmanaged(archetypeValue)); break; } case ExistentialRepresentation::Boxed: if (existentialType.isAddress()) { existentialValue = emitLoad(loc, existentialValue.getValue(), getTypeLowering(existentialType), SGFContext::AllowGuaranteedPlusZero, IsNotTake); } existentialType = existentialValue.getType(); assert(existentialType.isObject()); // NB: Don't forward the cleanup, because consuming a boxed value won't // consume the box reference. archetypeMV = ManagedValue::forUnmanaged(B.createOpenExistentialBox( loc, existentialValue.getValue(), loweredOpenedType.getAddressType())); // The boxed value can't be assumed to be uniquely referenced. We can never // consume it. // TODO: We could use isUniquelyReferenced to shorten the duration of // the box to the point that the opaque value is copied out. isUnique = false; canConsume = false; break; case ExistentialRepresentation::None: llvm_unreachable("not existential"); } assert(!canConsume || isUnique); (void) isUnique; return SILGenFunction::OpaqueValueState{ archetypeMV, /*isConsumable*/ canConsume, /*hasBeenConsumed*/ false }; }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, ArrayRef<ProtocolConformanceRef> conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F, bool allowEmbeddedNSError) { // Mark the needed conformances as used. for (auto conformance : conformances) SGM.useConformance(conformance); // If we're erasing to the 'Error' type, we might be able to get an NSError // representation more efficiently. auto &ctx = getASTContext(); if (ctx.LangOpts.EnableObjCInterop && conformances.size() == 1 && conformances[0].getRequirement() == ctx.getErrorDecl() && ctx.getNSErrorDecl()) { auto nsErrorDecl = ctx.getNSErrorDecl(); // If the concrete type is NSError or a subclass thereof, just erase it // directly. auto nsErrorType = nsErrorDecl->getDeclaredType()->getCanonicalType(); if (nsErrorType->isExactSuperclassOf(concreteFormalType, nullptr)) { ManagedValue nsError = F(SGFContext()); if (nsErrorType != concreteFormalType) { nsError = ManagedValue(B.createUpcast(loc, nsError.getValue(), getLoweredType(nsErrorType)), nsError.getCleanup()); } return emitBridgedToNativeError(loc, nsError); } // If the concrete type is known to conform to _BridgedStoredNSError, // call the _nsError witness getter to extract the NSError directly, // then just erase the NSError. if (auto storedNSErrorConformance = SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType)) { auto nsErrorVar = SGM.getNSErrorRequirement(loc); if (!nsErrorVar) return emitUndef(loc, existentialTL.getLoweredType()); SubstitutionList nsErrorVarSubstitutions; // Devirtualize. Maybe this should be done implicitly by // emitPropertyLValue? if (storedNSErrorConformance->isConcrete()) { if (auto witnessVar = storedNSErrorConformance->getConcrete() ->getWitness(nsErrorVar, nullptr)) { nsErrorVar = cast<VarDecl>(witnessVar.getDecl()); nsErrorVarSubstitutions = witnessVar.getSubstitutions(); } } auto nativeError = F(SGFContext()); WritebackScope writebackScope(*this); auto nsError = emitRValueForPropertyLoad(loc, nativeError, concreteFormalType, /*super*/ false, nsErrorVar, nsErrorVarSubstitutions, AccessSemantics::Ordinary, nsErrorType, SGFContext()) .getAsSingleValue(*this, loc); return emitBridgedToNativeError(loc, nsError); } // Otherwise, if it's an archetype, try calling the _getEmbeddedNSError() // witness to try to dig out the embedded NSError. But don't do this // when we're being called recursively. if (isa<ArchetypeType>(concreteFormalType) && allowEmbeddedNSError) { auto contBB = createBasicBlock(); auto isNotPresentBB = createBasicBlock(); auto isPresentBB = createBasicBlock(); // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an // NSError from the value. auto getEmbeddedNSErrorFn = SGM.getGetErrorEmbeddedNSError(loc); if (!getEmbeddedNSErrorFn) return emitUndef(loc, existentialTL.getLoweredType()); Substitution getEmbeddedNSErrorSubstitutions[1] = { Substitution(concreteFormalType, conformances) }; ManagedValue concreteValue = F(SGFContext()); ManagedValue potentialNSError = emitApplyOfLibraryIntrinsic(loc, getEmbeddedNSErrorFn, getEmbeddedNSErrorSubstitutions, { concreteValue.copy(*this, loc) }, SGFContext()) .getAsSingleValue(*this, loc); // We're going to consume 'concreteValue' in exactly one branch, // so kill its cleanup now and recreate it on both branches. (void) concreteValue.forward(*this); // Check whether we got an NSError back. std::pair<EnumElementDecl*, SILBasicBlock*> cases[] = { { ctx.getOptionalSomeDecl(), isPresentBB }, { ctx.getOptionalNoneDecl(), isNotPresentBB } }; B.createSwitchEnum(loc, potentialNSError.forward(*this), /*default*/ nullptr, cases); // If we did get an NSError, emit the existential erasure from that // NSError. B.emitBlock(isPresentBB); SILValue branchArg; { // Don't allow cleanups to escape the conditional block. FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); enterDestroyCleanup(concreteValue.getValue()); // Receive the error value. It's typed as an 'AnyObject' for // layering reasons, so perform an unchecked cast down to NSError. SILType anyObjectTy = potentialNSError.getType().getAnyOptionalObjectType(); SILValue nsError = isPresentBB->createPHIArgument( anyObjectTy, ValueOwnershipKind::Owned); nsError = B.createUncheckedRefCast(loc, nsError, getLoweredType(nsErrorType)); branchArg = emitBridgedToNativeError(loc, emitManagedRValueWithCleanup(nsError)) .forward(*this); } B.createBranch(loc, contBB, branchArg); // If we did not get an NSError, just directly emit the existential. // Since this is a recursive call, make sure we don't end up in this // path again. B.emitBlock(isNotPresentBB); { FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); concreteValue = emitManagedRValueWithCleanup(concreteValue.getValue()); branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL, existentialTL, conformances, SGFContext(), [&](SGFContext C) { return concreteValue; }, /*allowEmbeddedNSError=*/false) .forward(*this); } B.createBranch(loc, contBB, branchArg); // Continue. B.emitBlock(contBB); SILValue existentialResult = contBB->createPHIArgument( existentialTL.getLoweredType(), ValueOwnershipKind::Owned); return emitManagedRValueWithCleanup(existentialResult, existentialTL); } } switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto *existential = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, conformances); auto *valueAddr = B.createProjectExistentialBox(loc, concreteTL.getLoweredType(), existential); // Initialize the concrete value in-place. ExistentialInitialization init(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this); ManagedValue mv = F(SGFContext(&init)); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init.getAddress()); init.finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // If the concrete value is a pseudogeneric archetype, first erase it to // its upper bound. auto anyObjectProto = getASTContext() .getProtocol(KnownProtocolKind::AnyObject); auto anyObjectTy = anyObjectProto ? anyObjectProto->getDeclaredType()->getCanonicalType() : CanType(); auto eraseToAnyObject = [&, concreteFormalType, F](SGFContext C) -> ManagedValue { auto concreteValue = F(SGFContext()); auto anyObjectConformance = SGM.SwiftModule ->lookupConformance(concreteFormalType, anyObjectProto, nullptr); ProtocolConformanceRef buf[] = { *anyObjectConformance, }; auto asAnyObject = B.createInitExistentialRef(loc, SILType::getPrimitiveObjectType(anyObjectTy), concreteFormalType, concreteValue.getValue(), getASTContext().AllocateCopy(buf)); return ManagedValue(asAnyObject, concreteValue.getCleanup()); }; auto concreteTLPtr = &concreteTL; if (this->F.getLoweredFunctionType()->isPseudogeneric()) { if (anyObjectTy && concreteFormalType->is<ArchetypeType>()) { concreteFormalType = anyObjectTy; concreteTLPtr = &getTypeLowering(anyObjectTy); F = eraseToAnyObject; } } // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTLPtr->getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } llvm_unreachable("Unhandled ExistentialRepresentation in switch."); }
static void buildFuncToBlockInvokeBody(SILGenFunction &gen, SILLocation loc, CanSILFunctionType blockTy, CanSILBlockStorageType blockStorageTy, CanSILFunctionType funcTy) { Scope scope(gen.Cleanups, CleanupLocation::get(loc)); SILBasicBlock *entry = &*gen.F.begin(); // Get the captured native function value out of the block. auto storageAddrTy = SILType::getPrimitiveAddressType(blockStorageTy); auto storage = new (gen.SGM.M) SILArgument(entry, storageAddrTy); auto capture = gen.B.createProjectBlockStorage(loc, storage); auto &funcTL = gen.getTypeLowering(funcTy); auto fn = gen.emitLoad(loc, capture, funcTL, SGFContext(), IsNotTake); // Collect the block arguments, which may have nonstandard conventions. assert(blockTy->getParameters().size() == funcTy->getParameters().size() && "block and function types don't match"); SmallVector<ManagedValue, 4> args; for (unsigned i : indices(funcTy->getParameters())) { auto &funcParam = funcTy->getParameters()[i]; auto ¶m = blockTy->getParameters()[i]; SILValue v = new (gen.SGM.M) SILArgument(entry, param.getSILType()); ManagedValue mv; // If the parameter is a block, we need to copy it to ensure it lives on // the heap. The adapted closure value might outlive the block's original // scope. if (param.getSILType().isBlockPointerCompatible()) { // We still need to consume the original block if it was owned. switch (param.getConvention()) { case ParameterConvention::Direct_Owned: gen.emitManagedRValueWithCleanup(v); break; case ParameterConvention::Direct_Deallocating: case ParameterConvention::Direct_Guaranteed: case ParameterConvention::Direct_Unowned: break; case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_In_Guaranteed: case ParameterConvention::Indirect_Inout: case ParameterConvention::Indirect_InoutAliasable: llvm_unreachable("indirect params to blocks not supported"); } SILValue blockCopy = gen.B.createCopyBlock(loc, v); mv = gen.emitManagedRValueWithCleanup(blockCopy); } else { switch (param.getConvention()) { case ParameterConvention::Direct_Owned: // Consume owned parameters at +1. mv = gen.emitManagedRValueWithCleanup(v); break; case ParameterConvention::Direct_Guaranteed: case ParameterConvention::Direct_Unowned: // We need to independently retain the value. mv = gen.emitManagedRetain(loc, v); break; case ParameterConvention::Direct_Deallocating: // We do not need to retain the value since the value is already being // deallocated. mv = ManagedValue::forUnmanaged(v); break; case ParameterConvention::Indirect_In_Guaranteed: case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_Inout: case ParameterConvention::Indirect_InoutAliasable: llvm_unreachable("indirect arguments to blocks not supported"); } } args.push_back(gen.emitBridgedToNativeValue(loc, mv, SILFunctionTypeRepresentation::CFunctionPointer, funcParam.getType())); } // Call the native function. assert(!funcTy->hasIndirectResults() && "block thunking func with indirect result not supported"); assert(funcTy->getNumDirectResults() <= 1 && "block thunking func with multiple results not supported"); ManagedValue result = gen.emitMonomorphicApply(loc, fn, args, funcTy->getSILResult().getSwiftRValueType(), ApplyOptions::None, None, None) .getAsSingleValue(gen, loc); // Bridge the result back to ObjC. result = gen.emitNativeToBridgedValue(loc, result, SILFunctionTypeRepresentation::CFunctionPointer, blockTy->getSILResult().getSwiftRValueType()); auto resultVal = result.forward(gen); scope.pop(); gen.B.createReturn(loc, resultVal); }
void SILGenFunction::emitDestroyingDestructor(DestructorDecl *dd) { MagicFunctionName = DeclName(SGM.M.getASTContext().getIdentifier("deinit")); RegularLocation Loc(dd); if (dd->isImplicit()) Loc.markAutoGenerated(); auto cd = cast<ClassDecl>(dd->getDeclContext()); SILValue selfValue = emitSelfDecl(dd->getImplicitSelfDecl()); // Create a basic block to jump to for the implicit destruction behavior // of releasing the elements and calling the superclass destructor. // We won't actually emit the block until we finish with the destructor body. prepareEpilog(Type(), false, CleanupLocation::get(Loc)); emitProfilerIncrement(dd->getBody()); // Emit the destructor body. emitStmt(dd->getBody()); Optional<SILValue> maybeReturnValue; SILLocation returnLoc(Loc); std::tie(maybeReturnValue, returnLoc) = emitEpilogBB(Loc); if (!maybeReturnValue) return; auto cleanupLoc = CleanupLocation::get(Loc); // If we have a superclass, invoke its destructor. SILValue resultSelfValue; SILType objectPtrTy = SILType::getNativeObjectType(F.getASTContext()); SILType classTy = selfValue->getType(); if (cd->hasSuperclass()) { Type superclassTy = dd->mapTypeIntoContext(cd->getSuperclass()); ClassDecl *superclass = superclassTy->getClassOrBoundGenericClass(); auto superclassDtorDecl = superclass->getDestructor(); SILDeclRef dtorConstant = SILDeclRef(superclassDtorDecl, SILDeclRef::Kind::Destroyer); SILType baseSILTy = getLoweredLoadableType(superclassTy); SILValue baseSelf = B.createUpcast(cleanupLoc, selfValue, baseSILTy); ManagedValue dtorValue; SILType dtorTy; auto subMap = superclassTy->getContextSubstitutionMap(SGM.M.getSwiftModule(), superclass); std::tie(dtorValue, dtorTy) = emitSiblingMethodRef(cleanupLoc, baseSelf, dtorConstant, subMap); resultSelfValue = B.createApply(cleanupLoc, dtorValue.forward(*this), dtorTy, objectPtrTy, subMap, baseSelf); } else { resultSelfValue = selfValue; } ArgumentScope S(*this, Loc); ManagedValue borrowedValue = ManagedValue::forUnmanaged(resultSelfValue).borrow(*this, cleanupLoc); if (classTy != borrowedValue.getType()) { borrowedValue = B.createUncheckedRefCast(cleanupLoc, borrowedValue, classTy); } // Release our members. emitClassMemberDestruction(borrowedValue, cd, cleanupLoc); S.pop(); if (resultSelfValue->getType() != objectPtrTy) { resultSelfValue = B.createUncheckedRefCast(cleanupLoc, resultSelfValue, objectPtrTy); } if (resultSelfValue.getOwnershipKind() != ValueOwnershipKind::Owned) { assert(resultSelfValue.getOwnershipKind() == ValueOwnershipKind::Guaranteed); resultSelfValue = B.createUncheckedOwnershipConversion( cleanupLoc, resultSelfValue, ValueOwnershipKind::Owned); } B.createReturn(returnLoc, resultSelfValue); }
void SILGenFunction::emitClassConstructorInitializer(ConstructorDecl *ctor) { MagicFunctionName = SILGenModule::getMagicFunctionName(ctor); assert(ctor->getBody() && "Class constructor without a body?"); // True if this constructor delegates to a peer constructor with self.init(). bool isDelegating = false; if (!ctor->hasStubImplementation()) { isDelegating = ctor->getDelegatingOrChainedInitKind(nullptr) == ConstructorDecl::BodyInitKind::Delegating; } // Set up the 'self' argument. If this class has a superclass, we set up // self as a box. This allows "self reassignment" to happen in super init // method chains, which is important for interoperating with Objective-C // classes. We also use a box for delegating constructors, since the // delegated-to initializer may also replace self. // // TODO: If we could require Objective-C classes to have an attribute to get // this behavior, we could avoid runtime overhead here. VarDecl *selfDecl = ctor->getImplicitSelfDecl(); auto *dc = ctor->getDeclContext(); auto selfClassDecl = dc->getAsClassOrClassExtensionContext(); bool NeedsBoxForSelf = isDelegating || (selfClassDecl->hasSuperclass() && !ctor->hasStubImplementation()); bool usesObjCAllocator = Lowering::usesObjCAllocator(selfClassDecl); // If needed, mark 'self' as uninitialized so that DI knows to // enforce its DI properties on stored properties. MarkUninitializedInst::Kind MUKind; if (isDelegating) MUKind = MarkUninitializedInst::DelegatingSelf; else if (selfClassDecl->requiresStoredPropertyInits() && usesObjCAllocator) { // Stored properties will be initialized in a separate // .cxx_construct method called by the Objective-C runtime. assert(selfClassDecl->hasSuperclass() && "Cannot use ObjC allocation without a superclass"); MUKind = MarkUninitializedInst::DerivedSelfOnly; } else if (selfClassDecl->hasSuperclass()) MUKind = MarkUninitializedInst::DerivedSelf; else MUKind = MarkUninitializedInst::RootSelf; if (NeedsBoxForSelf) { // Allocate the local variable for 'self'. emitLocalVariableWithCleanup(selfDecl, MUKind)->finishInitialization(*this); } // Emit the prolog for the non-self arguments. // FIXME: Handle self along with the other body patterns. uint16_t ArgNo = emitProlog(ctor->getParameterList(1), TupleType::getEmpty(F.getASTContext()), ctor, ctor->hasThrows()); SILType selfTy = getLoweredLoadableType(selfDecl->getType()); ManagedValue selfArg = B.createInputFunctionArgument(selfTy, selfDecl); if (!NeedsBoxForSelf) { SILLocation PrologueLoc(selfDecl); PrologueLoc.markAsPrologue(); SILDebugVariable DbgVar(selfDecl->isLet(), ++ArgNo); B.createDebugValue(PrologueLoc, selfArg.getValue(), DbgVar); } if (!ctor->hasStubImplementation()) { assert(selfTy.hasReferenceSemantics() && "can't emit a value type ctor here"); if (NeedsBoxForSelf) { SILLocation prologueLoc = RegularLocation(ctor); prologueLoc.markAsPrologue(); // SEMANTIC ARC TODO: When the verifier is complete, review this. B.emitStoreValueOperation(prologueLoc, selfArg.forward(*this), VarLocs[selfDecl].value, StoreOwnershipQualifier::Init); } else { selfArg = B.createMarkUninitialized(selfDecl, selfArg, MUKind); VarLocs[selfDecl] = VarLoc::get(selfArg.getValue()); } } // Prepare the end of initializer location. SILLocation endOfInitLoc = RegularLocation(ctor); endOfInitLoc.pointToEnd(); // Create a basic block to jump to for the implicit 'self' return. // We won't emit the block until after we've emitted the body. prepareEpilog(Type(), ctor->hasThrows(), CleanupLocation::get(endOfInitLoc)); auto resultType = ctor->mapTypeIntoContext(ctor->getResultInterfaceType()); // If the constructor can fail, set up an alternative epilog for constructor // failure. SILBasicBlock *failureExitBB = nullptr; SILArgument *failureExitArg = nullptr; auto &resultLowering = getTypeLowering(resultType); if (ctor->getFailability() != OTK_None) { SILBasicBlock *failureBB = createBasicBlock(FunctionSection::Postmatter); RegularLocation loc(ctor); loc.markAutoGenerated(); // On failure, we'll clean up everything and return nil instead. SILGenSavedInsertionPoint savedIP(*this, failureBB, FunctionSection::Postmatter); failureExitBB = createBasicBlock(); failureExitArg = failureExitBB->createPHIArgument( resultLowering.getLoweredType(), ValueOwnershipKind::Owned); Cleanups.emitCleanupsForReturn(ctor); SILValue nilResult = B.createEnum(loc, SILValue(), getASTContext().getOptionalNoneDecl(), resultLowering.getLoweredType()); B.createBranch(loc, failureExitBB, nilResult); B.setInsertionPoint(failureExitBB); B.createReturn(loc, failureExitArg); FailDest = JumpDest(failureBB, Cleanups.getCleanupsDepth(), ctor); } // Handle member initializers. if (isDelegating) { // A delegating initializer does not initialize instance // variables. } else if (ctor->hasStubImplementation()) { // Nor does a stub implementation. } else if (selfClassDecl->requiresStoredPropertyInits() && usesObjCAllocator) { // When the class requires all stored properties to have initial // values and we're using Objective-C's allocation, stored // properties are initialized via the .cxx_construct method, which // will be called by the runtime. // Note that 'self' has been fully initialized at this point. } else { // Emit the member initializers. emitMemberInitializers(ctor, selfDecl, selfClassDecl); } emitProfilerIncrement(ctor->getBody()); // Emit the constructor body. emitStmt(ctor->getBody()); // Emit the call to super.init() right before exiting from the initializer. if (NeedsBoxForSelf) { if (auto *SI = ctor->getSuperInitCall()) { B.setInsertionPoint(ReturnDest.getBlock()); emitRValue(SI); B.emitBlock(B.splitBlockForFallthrough(), ctor); ReturnDest = JumpDest(B.getInsertionBB(), ReturnDest.getDepth(), ReturnDest.getCleanupLocation()); B.clearInsertionPoint(); } } CleanupStateRestorationScope SelfCleanupSave(Cleanups); // Build a custom epilog block, since the AST representation of the // constructor decl (which has no self in the return type) doesn't match the // SIL representation. { // Ensure that before we add additional cleanups, that we have emitted all // cleanups at this point. assert(!Cleanups.hasAnyActiveCleanups(getCleanupsDepth(), ReturnDest.getDepth()) && "emitting epilog in wrong scope"); SILGenSavedInsertionPoint savedIP(*this, ReturnDest.getBlock()); auto cleanupLoc = CleanupLocation(ctor); // If we're using a box for self, reload the value at the end of the init // method. if (NeedsBoxForSelf) { ManagedValue storedSelf = ManagedValue::forUnmanaged(VarLocs[selfDecl].value); selfArg = B.createLoadCopy(cleanupLoc, storedSelf); } else { // We have to do a retain because we are returning the pointer +1. // // SEMANTIC ARC TODO: When the verifier is complete, we will need to // change this to selfArg = B.emitCopyValueOperation(...). Currently due // to the way that SILGen performs folding of copy_value, destroy_value, // the returned selfArg may be deleted causing us to have a // dead-pointer. Instead just use the old self value since we have a // class. selfArg = B.createCopyValue(cleanupLoc, selfArg); } // Inject the self value into an optional if the constructor is failable. if (ctor->getFailability() != OTK_None) { RegularLocation loc(ctor); loc.markAutoGenerated(); selfArg = B.createEnum(loc, selfArg, getASTContext().getOptionalSomeDecl(), getLoweredLoadableType(resultType)); } // Save our cleanup state. We want all other potential cleanups to fire, but // not this one. if (selfArg.hasCleanup()) SelfCleanupSave.pushCleanupState(selfArg.getCleanup(), CleanupState::Dormant); // Translate our cleanup to the new top cleanup. // // This is needed to preserve the invariant in getEpilogBB that when // cleanups are emitted, everything above ReturnDest.getDepth() has been // emitted. This is not true if we use ManagedValue and friends in the // epilogBB, thus the translation. We perform the same check above that // getEpilogBB performs to ensure that we still do not have the same // problem. ReturnDest = std::move(ReturnDest).translate(getTopCleanup()); } // Emit the epilog and post-matter. auto returnLoc = emitEpilog(ctor, /*UsesCustomEpilog*/true); // Unpop our selfArg cleanup, so we can forward. std::move(SelfCleanupSave).pop(); // Finish off the epilog by returning. If this is a failable ctor, then we // actually jump to the failure epilog to keep the invariant that there is // only one SIL return instruction per SIL function. if (B.hasValidInsertionPoint()) { if (failureExitBB) B.createBranch(returnLoc, failureExitBB, selfArg.forward(*this)); else B.createReturn(returnLoc, selfArg.forward(*this)); } }
void SILGenFunction::emitClassConstructorAllocator(ConstructorDecl *ctor) { assert(!ctor->isFactoryInit() && "factories should not be emitted here"); // Emit the prolog. Since we're just going to forward our args directly // to the initializer, don't allocate local variables for them. RegularLocation Loc(ctor); Loc.markAutoGenerated(); // Forward the constructor arguments. // FIXME: Handle 'self' along with the other body patterns. SmallVector<SILValue, 8> args; bindParametersForForwarding(ctor->getParameters(), args); SILValue selfMetaValue = emitConstructorMetatypeArg(*this, ctor); // Allocate the "self" value. VarDecl *selfDecl = ctor->getImplicitSelfDecl(); SILType selfTy = getLoweredType(selfDecl->getType()); assert(selfTy.hasReferenceSemantics() && "can't emit a value type ctor here"); // Use alloc_ref to allocate the object. // TODO: allow custom allocation? // FIXME: should have a cleanup in case of exception auto selfClassDecl = ctor->getDeclContext()->getSelfClassDecl(); SILValue selfValue; // Allocate the 'self' value. bool useObjCAllocation = usesObjCAllocator(selfClassDecl); if (ctor->hasClangNode() || ctor->isConvenienceInit()) { assert(ctor->hasClangNode() || ctor->isObjC()); // For an allocator thunk synthesized for an @objc convenience initializer // or imported Objective-C init method, allocate using the metatype. SILValue allocArg = selfMetaValue; // When using Objective-C allocation, convert the metatype // argument to an Objective-C metatype. if (useObjCAllocation) { auto metaTy = allocArg->getType().castTo<MetatypeType>(); metaTy = CanMetatypeType::get(metaTy.getInstanceType(), MetatypeRepresentation::ObjC); allocArg = B.createThickToObjCMetatype(Loc, allocArg, getLoweredType(metaTy)); } selfValue = B.createAllocRefDynamic(Loc, allocArg, selfTy, useObjCAllocation, {}, {}); } else { assert(ctor->isDesignatedInit()); // For a designated initializer, we know that the static type being // allocated is the type of the class that defines the designated // initializer. selfValue = B.createAllocRef(Loc, selfTy, useObjCAllocation, false, ArrayRef<SILType>(), ArrayRef<SILValue>()); } args.push_back(selfValue); // Call the initializer. Always use the Swift entry point, which will be a // bridging thunk if we're calling ObjC. auto initConstant = SILDeclRef(ctor, SILDeclRef::Kind::Initializer); ManagedValue initVal; SILType initTy; // Call the initializer. SubstitutionMap subMap; if (auto *genericEnv = ctor->getGenericEnvironmentOfContext()) { auto *genericSig = genericEnv->getGenericSignature(); subMap = SubstitutionMap::get( genericSig, [&](SubstitutableType *t) -> Type { return genericEnv->mapTypeIntoContext( t->castTo<GenericTypeParamType>()); }, MakeAbstractConformanceForGenericType()); } std::tie(initVal, initTy) = emitSiblingMethodRef(Loc, selfValue, initConstant, subMap); SILValue initedSelfValue = emitApplyWithRethrow(Loc, initVal.forward(*this), initTy, subMap, args); emitProfilerIncrement(ctor->getBody()); // Return the initialized 'self'. B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(Loc), initedSelfValue); }
/// Bridge argument types and adjust retain count conventions for an ObjC thunk. static SILFunctionType *emitObjCThunkArguments(SILGenFunction &gen, SILLocation loc, SILDeclRef thunk, SmallVectorImpl<SILValue> &args, SILValue &foreignErrorSlot, Optional<ForeignErrorConvention> &foreignError) { SILDeclRef native = thunk.asForeign(false); auto mod = gen.SGM.M.getSwiftModule(); auto subs = gen.F.getForwardingSubstitutions(); auto objcInfo = gen.SGM.Types.getConstantInfo(thunk); auto objcFnTy = objcInfo.SILFnType->substGenericArgs(gen.SGM.M, mod, subs); auto swiftInfo = gen.SGM.Types.getConstantInfo(native); auto swiftFnTy = swiftInfo.SILFnType->substGenericArgs(gen.SGM.M, mod, subs); // We must have the same context archetypes as the unthunked function. assert(objcInfo.ContextGenericParams == swiftInfo.ContextGenericParams); SmallVector<ManagedValue, 8> bridgedArgs; bridgedArgs.reserve(objcFnTy->getParameters().size()); SILFunction *orig = gen.SGM.getFunction(native, NotForDefinition); // Find the foreign error convention if we have one. if (orig->getLoweredFunctionType()->hasErrorResult()) { auto func = cast<AbstractFunctionDecl>(thunk.getDecl()); foreignError = func->getForeignErrorConvention(); assert(foreignError && "couldn't find foreign error convention!"); } // Emit the indirect result arguments, if any. // FIXME: we're just assuming that these match up exactly? for (auto indirectResult : objcFnTy->getIndirectResults()) { SILType argTy = gen.F.mapTypeIntoContext(indirectResult.getSILType()); auto arg = new (gen.F.getModule()) SILArgument(gen.F.begin(), argTy); args.push_back(arg); } // Emit the other arguments, taking ownership of arguments if necessary. auto inputs = objcFnTy->getParameters(); auto nativeInputs = swiftFnTy->getParameters(); assert(inputs.size() == nativeInputs.size() + unsigned(foreignError.hasValue())); for (unsigned i = 0, e = inputs.size(); i < e; ++i) { SILType argTy = gen.F.mapTypeIntoContext(inputs[i].getSILType()); SILValue arg = new(gen.F.getModule()) SILArgument(gen.F.begin(), argTy); // If this parameter is the foreign error slot, pull it out. // It does not correspond to a native argument. if (foreignError && i == foreignError->getErrorParameterIndex()) { foreignErrorSlot = arg; continue; } // If this parameter is deallocating, emit an unmanaged rvalue and // continue. The object has the deallocating bit set so retain, release is // irrelevant. if (inputs[i].isDeallocating()) { bridgedArgs.push_back(ManagedValue::forUnmanaged(arg)); continue; } // If the argument is a block, copy it. if (argTy.isBlockPointerCompatible()) { auto copy = gen.B.createCopyBlock(loc, arg); // If the argument is consumed, we're still responsible for releasing the // original. if (inputs[i].isConsumed()) gen.emitManagedRValueWithCleanup(arg); arg = copy; } // Convert the argument to +1 if necessary. else if (!inputs[i].isConsumed()) { arg = emitObjCUnconsumedArgument(gen, loc, arg); } auto managedArg = gen.emitManagedRValueWithCleanup(arg); bridgedArgs.push_back(managedArg); } assert(bridgedArgs.size() + unsigned(foreignError.hasValue()) == objcFnTy->getParameters().size() && "objc inputs don't match number of arguments?!"); assert(bridgedArgs.size() == swiftFnTy->getNumSILArguments() && "swift inputs don't match number of arguments?!"); assert((foreignErrorSlot || !foreignError) && "didn't find foreign error slot"); // Bridge the input types. Scope scope(gen.Cleanups, CleanupLocation::get(loc)); assert(bridgedArgs.size() == nativeInputs.size()); for (unsigned i = 0, size = bridgedArgs.size(); i < size; ++i) { SILType argTy = gen.F.mapTypeIntoContext( swiftFnTy->getParameters()[i].getSILType()); ManagedValue native = gen.emitBridgedToNativeValue(loc, bridgedArgs[i], SILFunctionTypeRepresentation::ObjCMethod, argTy.getSwiftType()); SILValue argValue; if (nativeInputs[i].isConsumed()) argValue = native.forward(gen); else argValue = native.getValue(); args.push_back(argValue); } return objcFnTy; }
ManagedValue SILGenFunction::emitPreconditionOptionalHasValue(SILLocation loc, ManagedValue optional) { // Generate code to the optional is present, and if not, abort with a message // (provided by the stdlib). SILBasicBlock *contBB = createBasicBlock(); SILBasicBlock *failBB = createBasicBlock(); bool hadCleanup = optional.hasCleanup(); bool hadLValue = optional.isLValue(); auto noneDecl = getASTContext().getOptionalNoneDecl(); auto someDecl = getASTContext().getOptionalSomeDecl(); if (optional.getType().isAddress()) { // We forward in the creation routine for // unchecked_take_enum_data_addr. switch_enum_addr is a +0 operation. B.createSwitchEnumAddr(loc, optional.getValue(), /*defaultDest*/ nullptr, {{someDecl, contBB}, {noneDecl, failBB}}); } else { B.createSwitchEnum(loc, optional.forward(*this), /*defaultDest*/ nullptr, {{someDecl, contBB}, {noneDecl, failBB}}); } B.emitBlock(failBB); // Call the standard library implementation of _diagnoseUnexpectedNilOptional. if (auto diagnoseFailure = getASTContext().getDiagnoseUnexpectedNilOptional(nullptr)) { auto args = emitSourceLocationArgs(loc.getSourceLoc(), loc); emitApplyOfLibraryIntrinsic(loc, diagnoseFailure, SubstitutionMap(), { args.filenameStartPointer, args.filenameLength, args.filenameIsAscii, args.line }, SGFContext()); } B.createUnreachable(loc); B.clearInsertionPoint(); B.emitBlock(contBB); ManagedValue result; SILType payloadType = optional.getType().getAnyOptionalObjectType(); if (payloadType.isObject()) { result = B.createOwnedPHIArgument(payloadType); } else { result = B.createUncheckedTakeEnumDataAddr(loc, optional, someDecl, payloadType); } if (hadCleanup) { return result; } if (hadLValue) { return ManagedValue::forLValue(result.forward(*this)); } return ManagedValue::forUnmanaged(result.forward(*this)); }
SILGenFunction::OpaqueValueState SILGenFunction::emitOpenExistential( SILLocation loc, ManagedValue existentialValue, CanArchetypeType openedArchetype, SILType loweredOpenedType) { // Open the existential value into the opened archetype value. bool isUnique = true; bool canConsume; ManagedValue archetypeMV; SILType existentialType = existentialValue.getType(); switch (existentialType.getPreferredExistentialRepresentation(SGM.M)) { case ExistentialRepresentation::Opaque: { if (existentialType.isAddress()) { SILValue archetypeValue = B.createOpenExistentialAddr( loc, existentialValue.forward(*this), loweredOpenedType); if (existentialValue.hasCleanup()) { canConsume = true; // Leave a cleanup to deinit the existential container. enterDeinitExistentialCleanup(existentialValue.getValue(), CanType(), ExistentialRepresentation::Opaque); archetypeMV = emitManagedBufferWithCleanup(archetypeValue); } else { canConsume = false; archetypeMV = ManagedValue::forUnmanaged(archetypeValue); } } else { SILValue archetypeValue = B.createOpenExistentialOpaque( loc, existentialValue.forward(*this), loweredOpenedType); assert(!existentialValue.hasCleanup()); canConsume = false; archetypeMV = ManagedValue::forUnmanaged(archetypeValue); } break; } case ExistentialRepresentation::Metatype: assert(existentialType.isObject()); archetypeMV = ManagedValue::forUnmanaged( B.createOpenExistentialMetatype( loc, existentialValue.forward(*this), loweredOpenedType)); // Metatypes are always trivial. Consuming would be a no-op. canConsume = false; break; case ExistentialRepresentation::Class: { assert(existentialType.isObject()); SILValue archetypeValue = B.createOpenExistentialRef( loc, existentialValue.forward(*this), loweredOpenedType); canConsume = existentialValue.hasCleanup(); archetypeMV = (canConsume ? emitManagedRValueWithCleanup(archetypeValue) : ManagedValue::forUnmanaged(archetypeValue)); break; } case ExistentialRepresentation::Boxed: if (existentialType.isAddress()) { existentialValue = emitLoad(loc, existentialValue.getValue(), getTypeLowering(existentialType), SGFContext::AllowGuaranteedPlusZero, IsNotTake); } existentialType = existentialValue.getType(); assert(existentialType.isObject()); // NB: Don't forward the cleanup, because consuming a boxed value won't // consume the box reference. archetypeMV = ManagedValue::forUnmanaged( B.createOpenExistentialBox(loc, existentialValue.getValue(), loweredOpenedType)); // The boxed value can't be assumed to be uniquely referenced. We can never // consume it. // TODO: We could use isUniquelyReferenced to shorten the duration of // the box to the point that the opaque value is copied out. isUnique = false; canConsume = false; break; case ExistentialRepresentation::None: llvm_unreachable("not existential"); } setArchetypeOpeningSite(openedArchetype, archetypeMV.getValue()); assert(!canConsume || isUnique); (void) isUnique; return SILGenFunction::OpaqueValueState{ archetypeMV, /*isConsumable*/ canConsume, /*hasBeenConsumed*/ false }; }
ManagedValue SILGenBuilder::createUnconditionalCheckedCastValue( SILLocation loc, ManagedValue operand, SILType type) { SILValue result = createUnconditionalCheckedCastValue(loc, operand.forward(SGF), type); return SGF.emitManagedRValueWithCleanup(result); }