void ArgumentSource::forwardInto(SILGenFunction &SGF, AbstractionPattern origFormalType, Initialization *dest, const TypeLowering &destTL) && { auto substFormalType = getSubstRValueType(); assert(destTL.getLoweredType() == SGF.getLoweredType(origFormalType, substFormalType)); // If there are no abstraction changes, we can just forward // normally. if (origFormalType.isExactType(substFormalType) || destTL.getLoweredType() == SGF.getLoweredType(substFormalType)) { std::move(*this).forwardInto(SGF, dest); return; } // Otherwise, emit as a single independent value. SILLocation loc = getLocation(); ManagedValue outputValue = std::move(*this).getAsSingleValue(SGF, origFormalType, SGFContext(dest)); if (outputValue.isInContext()) return; // Use RValue's forward-into-initialization code. We have to lie to // RValue about the formal type (by using the lowered type) because // we're emitting into an abstracted value, which RValue doesn't // really handle. auto substLoweredType = destTL.getLoweredType().getASTType(); RValue(SGF, loc, substLoweredType, outputValue).forwardInto(SGF, loc, dest); }
RValue::RValue(SILGenFunction &gen, Expr *expr, ManagedValue v) : type(expr->getType()->getCanonicalType()), elementsToBeAdded(0) { if (v.isInContext()) { type = CanType(); elementsToBeAdded = Used; return; } assert(v && "creating r-value with consumed value"); ExplodeTupleValue(values, gen, expr).visit(type, v); assert(values.size() == getRValueSize(type)); }
void RValue::addElement(SILGenFunction &SGF, ManagedValue element, CanType formalType, SILLocation l) & { assert(element && "adding consumed value to r-value"); assert(!element.isInContext() && "adding in-context value to r-value"); assert(!isComplete() && "rvalue already complete"); assert(!isInSpecialState() && "cannot add elements to an in-context r-value"); --elementsToBeAdded; ExplodeTupleValue(values, SGF, l).visit(formalType, element); assert(!isComplete() || values.size() == getRValueSize(type)); verify(SGF); }
/// Emit a materializeForSet operation that simply loads the l-value /// into the result buffer. This operation creates a callback to write /// the l-value back. SILValue MaterializeForSetEmitter::emitUsingGetterSetter(SILGenFunction &gen, SILLocation loc, ManagedValue self, RValue &&indices, SILValue resultBuffer, SILValue callbackBuffer, SILFunction *&callback) { // Copy the indices into the callback storage. const TypeLowering *indicesTL = nullptr; CleanupHandle indicesCleanup = CleanupHandle::invalid(); CanType indicesFormalType; if (isa<SubscriptDecl>(WitnessStorage)) { indicesFormalType = indices.getType(); indicesTL = &gen.getTypeLowering(indicesFormalType); SILValue allocatedCallbackBuffer = gen.B.createAllocValueBuffer(loc, indicesTL->getLoweredType(), callbackBuffer); // Emit into the buffer. auto init = gen.useBufferAsTemporary(loc, allocatedCallbackBuffer, *indicesTL); indicesCleanup = init->getInitializedCleanup(); indices.copyInto(gen, init.get(), loc); } // Set up the result buffer. resultBuffer = gen.B.createPointerToAddress(loc, resultBuffer, Conformance ? RequirementStorageType.getAddressType() : WitnessStorageType.getAddressType()); TemporaryInitialization init(resultBuffer, CleanupHandle::invalid()); // Evaluate the getter into the result buffer. LValue lv = buildLValue(gen, loc, self, std::move(indices), AccessKind::Read); ManagedValue result = gen.emitLoadOfLValue(loc, std::move(lv), SGFContext(&init)); if (!result.isInContext()) { result.forwardInto(gen, loc, resultBuffer); } // Forward the cleanup on the saved indices. if (indicesCleanup.isValid()) { gen.Cleanups.setCleanupState(indicesCleanup, CleanupState::Dead); } callback = createSetterCallback(gen.F, indicesTL, indicesFormalType); return resultBuffer; }
RValue::RValue(SILGenFunction &gen, SILLocation l, CanType formalType, ManagedValue v) : type(formalType), elementsToBeAdded(0) { assert(v && "creating r-value with consumed value"); if (v.isInContext()) { type = CanType(); elementsToBeAdded = Used; return; } ExplodeTupleValue(values, gen, l).visit(formalType, v); assert(values.size() == getRValueSize(type)); }
// FIXME: With some changes to their callers, all of the below functions // could be re-worked to use emitInjectEnum(). ManagedValue SILGenFunction::emitInjectOptional(SILLocation loc, const TypeLowering &optTL, SGFContext ctxt, llvm::function_ref<ManagedValue(SGFContext)> generator) { SILType optTy = optTL.getLoweredType(); SILType objectTy = optTy.getAnyOptionalObjectType(); assert(objectTy && "expected type was not optional"); auto someDecl = getASTContext().getOptionalSomeDecl(); // If the value is loadable, just emit and wrap. // TODO: honor +0 contexts? if (optTL.isLoadable() || !silConv.useLoweredAddresses()) { ManagedValue objectResult = generator(SGFContext()); auto some = B.createEnum(loc, objectResult.forward(*this), someDecl, optTy); return emitManagedRValueWithCleanup(some, optTL); } // Otherwise it's address-only; try to avoid spurious copies by // evaluating into the context. // Prepare a buffer for the object value. return B.bufferForExpr( loc, optTy.getObjectType(), optTL, ctxt, [&](SILValue optBuf) { auto objectBuf = B.createInitEnumDataAddr(loc, optBuf, someDecl, objectTy); // Evaluate the value in-place into that buffer. TemporaryInitialization init(objectBuf, CleanupHandle::invalid()); ManagedValue objectResult = generator(SGFContext(&init)); if (!objectResult.isInContext()) { objectResult.forwardInto(*this, loc, objectBuf); } // Finalize the outer optional buffer. B.createInjectEnumAddr(loc, optBuf, someDecl); }); }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, ArrayRef<ProtocolConformanceRef> conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F, bool allowEmbeddedNSError) { // Mark the needed conformances as used. for (auto conformance : conformances) SGM.useConformance(conformance); // If we're erasing to the 'Error' type, we might be able to get an NSError // representation more efficiently. auto &ctx = getASTContext(); if (ctx.LangOpts.EnableObjCInterop && conformances.size() == 1 && conformances[0].getRequirement() == ctx.getErrorDecl() && ctx.getNSErrorDecl()) { auto nsErrorDecl = ctx.getNSErrorDecl(); // If the concrete type is NSError or a subclass thereof, just erase it // directly. auto nsErrorType = nsErrorDecl->getDeclaredType()->getCanonicalType(); if (nsErrorType->isExactSuperclassOf(concreteFormalType, nullptr)) { ManagedValue nsError = F(SGFContext()); if (nsErrorType != concreteFormalType) { nsError = ManagedValue(B.createUpcast(loc, nsError.getValue(), getLoweredType(nsErrorType)), nsError.getCleanup()); } return emitBridgedToNativeError(loc, nsError); } // If the concrete type is known to conform to _BridgedStoredNSError, // call the _nsError witness getter to extract the NSError directly, // then just erase the NSError. if (auto storedNSErrorConformance = SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType)) { auto nsErrorVar = SGM.getNSErrorRequirement(loc); if (!nsErrorVar) return emitUndef(loc, existentialTL.getLoweredType()); SubstitutionList nsErrorVarSubstitutions; // Devirtualize. Maybe this should be done implicitly by // emitPropertyLValue? if (storedNSErrorConformance->isConcrete()) { if (auto witnessVar = storedNSErrorConformance->getConcrete() ->getWitness(nsErrorVar, nullptr)) { nsErrorVar = cast<VarDecl>(witnessVar.getDecl()); nsErrorVarSubstitutions = witnessVar.getSubstitutions(); } } auto nativeError = F(SGFContext()); WritebackScope writebackScope(*this); auto nsError = emitRValueForPropertyLoad(loc, nativeError, concreteFormalType, /*super*/ false, nsErrorVar, nsErrorVarSubstitutions, AccessSemantics::Ordinary, nsErrorType, SGFContext()) .getAsSingleValue(*this, loc); return emitBridgedToNativeError(loc, nsError); } // Otherwise, if it's an archetype, try calling the _getEmbeddedNSError() // witness to try to dig out the embedded NSError. But don't do this // when we're being called recursively. if (isa<ArchetypeType>(concreteFormalType) && allowEmbeddedNSError) { auto contBB = createBasicBlock(); auto isNotPresentBB = createBasicBlock(); auto isPresentBB = createBasicBlock(); // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an // NSError from the value. auto getEmbeddedNSErrorFn = SGM.getGetErrorEmbeddedNSError(loc); if (!getEmbeddedNSErrorFn) return emitUndef(loc, existentialTL.getLoweredType()); Substitution getEmbeddedNSErrorSubstitutions[1] = { Substitution(concreteFormalType, conformances) }; ManagedValue concreteValue = F(SGFContext()); ManagedValue potentialNSError = emitApplyOfLibraryIntrinsic(loc, getEmbeddedNSErrorFn, getEmbeddedNSErrorSubstitutions, { concreteValue.copy(*this, loc) }, SGFContext()) .getAsSingleValue(*this, loc); // We're going to consume 'concreteValue' in exactly one branch, // so kill its cleanup now and recreate it on both branches. (void) concreteValue.forward(*this); // Check whether we got an NSError back. std::pair<EnumElementDecl*, SILBasicBlock*> cases[] = { { ctx.getOptionalSomeDecl(), isPresentBB }, { ctx.getOptionalNoneDecl(), isNotPresentBB } }; B.createSwitchEnum(loc, potentialNSError.forward(*this), /*default*/ nullptr, cases); // If we did get an NSError, emit the existential erasure from that // NSError. B.emitBlock(isPresentBB); SILValue branchArg; { // Don't allow cleanups to escape the conditional block. FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); enterDestroyCleanup(concreteValue.getValue()); // Receive the error value. It's typed as an 'AnyObject' for // layering reasons, so perform an unchecked cast down to NSError. SILType anyObjectTy = potentialNSError.getType().getAnyOptionalObjectType(); SILValue nsError = isPresentBB->createPHIArgument( anyObjectTy, ValueOwnershipKind::Owned); nsError = B.createUncheckedRefCast(loc, nsError, getLoweredType(nsErrorType)); branchArg = emitBridgedToNativeError(loc, emitManagedRValueWithCleanup(nsError)) .forward(*this); } B.createBranch(loc, contBB, branchArg); // If we did not get an NSError, just directly emit the existential. // Since this is a recursive call, make sure we don't end up in this // path again. B.emitBlock(isNotPresentBB); { FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); concreteValue = emitManagedRValueWithCleanup(concreteValue.getValue()); branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL, existentialTL, conformances, SGFContext(), [&](SGFContext C) { return concreteValue; }, /*allowEmbeddedNSError=*/false) .forward(*this); } B.createBranch(loc, contBB, branchArg); // Continue. B.emitBlock(contBB); SILValue existentialResult = contBB->createPHIArgument( existentialTL.getLoweredType(), ValueOwnershipKind::Owned); return emitManagedRValueWithCleanup(existentialResult, existentialTL); } } switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto *existential = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, conformances); auto *valueAddr = B.createProjectExistentialBox(loc, concreteTL.getLoweredType(), existential); // Initialize the concrete value in-place. ExistentialInitialization init(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this); ManagedValue mv = F(SGFContext(&init)); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init.getAddress()); init.finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // If the concrete value is a pseudogeneric archetype, first erase it to // its upper bound. auto anyObjectProto = getASTContext() .getProtocol(KnownProtocolKind::AnyObject); auto anyObjectTy = anyObjectProto ? anyObjectProto->getDeclaredType()->getCanonicalType() : CanType(); auto eraseToAnyObject = [&, concreteFormalType, F](SGFContext C) -> ManagedValue { auto concreteValue = F(SGFContext()); auto anyObjectConformance = SGM.SwiftModule ->lookupConformance(concreteFormalType, anyObjectProto, nullptr); ProtocolConformanceRef buf[] = { *anyObjectConformance, }; auto asAnyObject = B.createInitExistentialRef(loc, SILType::getPrimitiveObjectType(anyObjectTy), concreteFormalType, concreteValue.getValue(), getASTContext().AllocateCopy(buf)); return ManagedValue(asAnyObject, concreteValue.getCleanup()); }; auto concreteTLPtr = &concreteTL; if (this->F.getLoweredFunctionType()->isPseudogeneric()) { if (anyObjectTy && concreteFormalType->is<ArchetypeType>()) { concreteFormalType = anyObjectTy; concreteTLPtr = &getTypeLowering(anyObjectTy); F = eraseToAnyObject; } } // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTLPtr->getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } llvm_unreachable("Unhandled ExistentialRepresentation in switch."); }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, ArrayRef<ProtocolConformanceRef> conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F, bool allowEmbeddedNSError) { // Mark the needed conformances as used. for (auto conformance : conformances) SGM.useConformance(conformance); // If we're erasing to the 'Error' type, we might be able to get an NSError // representation more efficiently. auto &ctx = getASTContext(); auto nsError = ctx.getNSErrorDecl(); if (allowEmbeddedNSError && nsError && existentialTL.getSemanticType().getSwiftRValueType()->getAnyNominal() == ctx.getErrorDecl()) { // Check whether the concrete type conforms to the _BridgedStoredNSError // protocol. In that case, call the _nsError witness getter to extract the // NSError directly. auto conformance = SGM.getConformanceToBridgedStoredNSError(loc, concreteFormalType); CanType nsErrorType = nsError->getDeclaredInterfaceType()->getCanonicalType(); ProtocolConformanceRef nsErrorConformances[1] = { ProtocolConformanceRef(SGM.getNSErrorConformanceToError()) }; if (conformance && nsError && SGM.getNSErrorConformanceToError()) { if (auto witness = conformance->getWitness(SGM.getNSErrorRequirement(loc), nullptr)) { // Create a reference to the getter witness. SILDeclRef getter = getGetterDeclRef(cast<VarDecl>(witness.getDecl()), /*isDirectAccessorUse=*/true); // Compute the substitutions. ArrayRef<Substitution> substitutions = concreteFormalType->gatherAllSubstitutions( SGM.SwiftModule, nullptr); // Emit the erasure, through the getter to _nsError. return emitExistentialErasure( loc, nsErrorType, getTypeLowering(nsErrorType), existentialTL, ctx.AllocateCopy(nsErrorConformances), C, [&](SGFContext innerC) -> ManagedValue { // Call the getter. return emitGetAccessor(loc, getter, substitutions, ArgumentSource(loc, RValue(*this, loc, concreteFormalType, F(SGFContext()))), /*isSuper=*/false, /*isDirectAccessorUse=*/true, RValue(), innerC) .getAsSingleValue(*this, loc); }); } } // Check whether the concrete type is an archetype. If so, call the // _getEmbeddedNSError() witness to try to dig out the embedded NSError. if (auto archetypeType = concreteFormalType->getAs<ArchetypeType>()) { if (std::find(archetypeType->getConformsTo().begin(), archetypeType->getConformsTo().end(), ctx.getErrorDecl()) != archetypeType->getConformsTo().end()) { auto contBB = createBasicBlock(); auto isNotPresentBB = createBasicBlock(); auto isPresentBB = createBasicBlock(); SILValue existentialResult = contBB->createBBArg(existentialTL.getLoweredType()); ProtocolConformanceRef trivialErrorConformances[1] = { ProtocolConformanceRef(ctx.getErrorDecl()) }; Substitution substitutions[1] = { Substitution(concreteFormalType, ctx.AllocateCopy(trivialErrorConformances)) }; // Call swift_stdlib_getErrorEmbeddedNSError to attempt to extract an // NSError from the value. ManagedValue concreteValue = F(SGFContext()); ManagedValue potentialNSError = emitApplyOfLibraryIntrinsic(loc, SGM.getGetErrorEmbeddedNSError(loc), ctx.AllocateCopy(substitutions), { concreteValue }, SGFContext()) .getAsSingleValue(*this, loc); // Check whether we got an NSError back. SILValue hasNSError = emitDoesOptionalHaveValue(loc, potentialNSError.getValue()); B.createCondBranch(loc, hasNSError, isPresentBB, isNotPresentBB); // If we did get an NSError, emit the existential erasure from that // NSError. B.emitBlock(isPresentBB); SILValue branchArg; { // Don't allow cleanups to escape the conditional block. FullExpr presentScope(Cleanups, CleanupLocation::get(loc)); // Emit the existential erasure from the NSError. branchArg = emitExistentialErasure( loc, nsErrorType, getTypeLowering(nsErrorType), existentialTL, ctx.AllocateCopy(nsErrorConformances), C, [&](SGFContext innerC) -> ManagedValue { // Pull the NSError object out of the optional result. auto &inputTL = getTypeLowering(potentialNSError.getType()); auto nsErrorValue = emitUncheckedGetOptionalValueFrom(loc, potentialNSError, inputTL); // Perform an unchecked cast down to NSError, because it was typed // as 'AnyObject' for layering reasons. return ManagedValue(B.createUncheckedRefCast( loc, nsErrorValue.getValue(), getLoweredType(nsErrorType)), nsErrorValue.getCleanup()); }).forward(*this); } B.createBranch(loc, contBB, branchArg); // If we did not get an NSError, just directly emit the existential // (recursively). B.emitBlock(isNotPresentBB); branchArg = emitExistentialErasure(loc, concreteFormalType, concreteTL, existentialTL, conformances, SGFContext(), F, /*allowEmbeddedNSError=*/false) .forward(*this); B.createBranch(loc, contBB, branchArg); // Continue. B.emitBlock(contBB); return emitManagedRValueWithCleanup(existentialResult, existentialTL); } } } switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype->getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto *existential = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, conformances); auto *valueAddr = B.createProjectExistentialBox(loc, concreteTL.getLoweredType(), existential); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTL.getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } }
ManagedValue SILGenFunction::emitExistentialErasure( SILLocation loc, CanType concreteFormalType, const TypeLowering &concreteTL, const TypeLowering &existentialTL, const ArrayRef<ProtocolConformance *> &conformances, SGFContext C, llvm::function_ref<ManagedValue (SGFContext)> F) { // Mark the needed conformances as used. for (auto *conformance : conformances) SGM.useConformance(conformance); switch (existentialTL.getLoweredType().getObjectType() .getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) { case ExistentialRepresentation::None: llvm_unreachable("not an existential type"); case ExistentialRepresentation::Metatype: { assert(existentialTL.isLoadable()); SILValue metatype = F(SGFContext()).getUnmanagedValue(); assert(metatype.getType().castTo<AnyMetatypeType>()->getRepresentation() == MetatypeRepresentation::Thick); auto upcast = B.createInitExistentialMetatype(loc, metatype, existentialTL.getLoweredType(), conformances); return ManagedValue::forUnmanaged(upcast); } case ExistentialRepresentation::Class: { assert(existentialTL.isLoadable()); ManagedValue sub = F(SGFContext()); SILValue v = B.createInitExistentialRef(loc, existentialTL.getLoweredType(), concreteFormalType, sub.getValue(), conformances); return ManagedValue(v, sub.getCleanup()); } case ExistentialRepresentation::Boxed: { // Allocate the existential. auto box = B.createAllocExistentialBox(loc, existentialTL.getLoweredType(), concreteFormalType, concreteTL.getLoweredType(), conformances); auto existential = box->getExistentialResult(); auto valueAddr = box->getValueAddressResult(); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Boxed, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return emitManagedRValueWithCleanup(existential); } case ExistentialRepresentation::Opaque: { // Allocate the existential. SILValue existential = getBufferForExprResult(loc, existentialTL.getLoweredType(), C); // Allocate the concrete value inside the container. SILValue valueAddr = B.createInitExistentialAddr( loc, existential, concreteFormalType, concreteTL.getLoweredType(), conformances); // Initialize the concrete value in-place. InitializationPtr init( new ExistentialInitialization(existential, valueAddr, concreteFormalType, ExistentialRepresentation::Opaque, *this)); ManagedValue mv = F(SGFContext(init.get())); if (!mv.isInContext()) { mv.forwardInto(*this, loc, init->getAddress()); init->finishInitialization(*this); } return manageBufferForExprResult(existential, existentialTL, C); } } }
void SILGenFunction::emitEnumConstructor(EnumElementDecl *element) { CanType enumTy = element->getParentEnum() ->getDeclaredTypeInContext() ->getCanonicalType(); auto &enumTI = getTypeLowering(enumTy); RegularLocation Loc(element); CleanupLocation CleanupLoc(element); Loc.markAutoGenerated(); // Emit the indirect return slot. std::unique_ptr<Initialization> dest; if (enumTI.isAddressOnly()) { auto &AC = getASTContext(); auto VD = new (AC) ParamDecl(/*IsLet*/ false, SourceLoc(), SourceLoc(), AC.getIdentifier("$return_value"), SourceLoc(), AC.getIdentifier("$return_value"), CanInOutType::get(enumTy), element->getDeclContext()); auto resultSlot = new (SGM.M) SILArgument(F.begin(), enumTI.getLoweredType(), VD); dest = std::unique_ptr<Initialization>( new KnownAddressInitialization(resultSlot)); } Scope scope(Cleanups, CleanupLoc); // Emit the exploded constructor argument. ArgumentSource payload; if (element->hasArgumentType()) { RValue arg = emitImplicitValueConstructorArg (*this, Loc, element->getArgumentType()->getCanonicalType(), element->getDeclContext()); payload = ArgumentSource(Loc, std::move(arg)); } // Emit the metatype argument. emitConstructorMetatypeArg(*this, element); // If possible, emit the enum directly into the indirect return. SGFContext C = (dest ? SGFContext(dest.get()) : SGFContext()); ManagedValue mv = emitInjectEnum(Loc, std::move(payload), enumTI.getLoweredType(), element, C); // Return the enum. auto ReturnLoc = ImplicitReturnLocation::getImplicitReturnLoc(Loc); if (mv.isInContext()) { assert(enumTI.isAddressOnly()); scope.pop(); B.createReturn(ReturnLoc, emitEmptyTuple(Loc)); } else { assert(enumTI.isLoadable()); SILValue result = mv.forward(*this); scope.pop(); B.createReturn(ReturnLoc, result); } }
void StmtEmitter::visitForEachStmt(ForEachStmt *S) { // Emit the 'iterator' variable that we'll be using for iteration. LexicalScope OuterForScope(SGF.Cleanups, SGF, CleanupLocation(S)); SGF.visitPatternBindingDecl(S->getIterator()); // If we ever reach an unreachable point, stop emitting statements. // This will need revision if we ever add goto. if (!SGF.B.hasValidInsertionPoint()) return; // If generator's optional result is address-only, create a stack allocation // to hold the results. This will be initialized on every entry into the loop // header and consumed by the loop body. On loop exit, the terminating value // will be in the buffer. auto optTy = S->getIteratorNext()->getType()->getCanonicalType(); auto &optTL = SGF.getTypeLowering(optTy); SILValue nextBufOrValue; if (optTL.isAddressOnly()) nextBufOrValue = SGF.emitTemporaryAllocation(S, optTL.getLoweredType()); // Create a new basic block and jump into it. JumpDest loopDest = createJumpDest(S->getBody()); SGF.B.emitBlock(loopDest.getBlock(), S); // Set the destinations for 'break' and 'continue'. JumpDest endDest = createJumpDest(S->getBody()); SGF.BreakContinueDestStack.push_back({ S, endDest, loopDest }); // Advance the generator. Use a scope to ensure that any temporary stack // allocations in the subexpression are immediately released. if (optTL.isAddressOnly()) { Scope InnerForScope(SGF.Cleanups, CleanupLocation(S->getIteratorNext())); auto nextInit = SGF.useBufferAsTemporary(nextBufOrValue, optTL); SGF.emitExprInto(S->getIteratorNext(), nextInit.get()); nextInit->getManagedAddress().forward(SGF); } else { Scope InnerForScope(SGF.Cleanups, CleanupLocation(S->getIteratorNext())); nextBufOrValue = SGF.emitRValueAsSingleValue(S->getIteratorNext()).forward(SGF); } // Continue if the value is present. Condition Cond = SGF.emitCondition( SGF.emitDoesOptionalHaveValue(S, nextBufOrValue), S, /*hasFalseCode=*/false, /*invertValue=*/false); if (Cond.hasTrue()) { Cond.enterTrue(SGF); SGF.emitProfilerIncrement(S->getBody()); // Emit the loop body. // The declared variable(s) for the current element are destroyed // at the end of each loop iteration. { Scope InnerForScope(SGF.Cleanups, CleanupLocation(S->getBody())); // Emit the initialization for the pattern. If any of the bound patterns // fail (because this is a 'for case' pattern with a refutable pattern, // the code should jump to the continue block. InitializationPtr initLoopVars = SGF.emitPatternBindingInitialization(S->getPattern(), loopDest); ManagedValue val; // If we had a loadable "next" generator value, we know it is present. // Get the value out of the optional, and wrap it up with a cleanup so // that any exits out of this scope properly clean it up. if (optTL.isLoadable()) { val = SGF.emitManagedRValueWithCleanup(nextBufOrValue); } else { val = SGF.emitManagedBufferWithCleanup(nextBufOrValue); } val = SGF.emitUncheckedGetOptionalValueFrom(S, val, optTL, SGFContext(initLoopVars.get())); if (!val.isInContext()) RValue(SGF, S, optTy.getAnyOptionalObjectType(), val) .forwardInto(SGF, S, initLoopVars.get()); // Now that the pattern has been initialized, check any where condition. // If it fails, loop around as if 'continue' happened. if (auto *Where = S->getWhere()) { auto cond = SGF.emitCondition(Where, /*hasFalse*/false, /*invert*/true); // If self is null, branch to the epilog. cond.enterTrue(SGF); SGF.Cleanups.emitBranchAndCleanups(loopDest, Where, { }); cond.exitTrue(SGF); cond.complete(SGF); } visit(S->getBody()); } // Loop back to the header. if (SGF.B.hasValidInsertionPoint()) { // Associate the loop body's closing brace with this branch. RegularLocation L(S->getBody()); L.pointToEnd(); SGF.B.createBranch(L, loopDest.getBlock()); } Cond.exitTrue(SGF); } // Complete the conditional execution. Cond.complete(SGF); emitOrDeleteBlock(SGF, endDest, S); SGF.BreakContinueDestStack.pop_back(); // We do not need to destroy the value in the 'nextBuf' slot here, because // either the 'for' loop finished naturally and the buffer contains '.None', // or we exited by 'break' and the value in the buffer was consumed. }