SILValue SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, SILConstantInfo constantInfo) { assert(constantInfo == getConstantInfo(constant)); // Builtins must be fully applied at the point of reference. if (constant.hasDecl() && isa<BuiltinUnit>(constant.getDecl()->getDeclContext())) { SGM.diagnose(loc.getSourceLoc(), diag::not_implemented, "delayed application of builtin"); return SILUndef::get(constantInfo.getSILType(), SGM.M); } // If the constant is a thunk we haven't emitted yet, emit it. if (!SGM.hasFunction(constant)) { if (constant.isCurried) { SGM.emitCurryThunk(constant); } else if (constant.isForeignToNativeThunk()) { SGM.emitForeignToNativeThunk(constant); } else if (constant.isNativeToForeignThunk()) { SGM.emitNativeToForeignThunk(constant); } else if (constant.kind == SILDeclRef::Kind::EnumElement) { SGM.emitEnumConstructor(cast<EnumElementDecl>(constant.getDecl())); } } auto f = SGM.getFunction(constant, NotForDefinition); assert(f->getLoweredFunctionType() == constantInfo.SILFnType); return B.createFunctionRef(loc, f); }
DeclName SILGenModule::getMagicFunctionName(SILDeclRef ref) { switch (ref.kind) { case SILDeclRef::Kind::Func: if (auto closure = ref.getAbstractClosureExpr()) return getMagicFunctionName(closure); return getMagicFunctionName(cast<FuncDecl>(ref.getDecl())); case SILDeclRef::Kind::Initializer: case SILDeclRef::Kind::Allocator: return getMagicFunctionName(cast<ConstructorDecl>(ref.getDecl())); case SILDeclRef::Kind::Deallocator: case SILDeclRef::Kind::Destroyer: return getMagicFunctionName(cast<DestructorDecl>(ref.getDecl())); case SILDeclRef::Kind::GlobalAccessor: case SILDeclRef::Kind::GlobalGetter: return getMagicFunctionName(cast<VarDecl>(ref.getDecl())->getDeclContext()); case SILDeclRef::Kind::DefaultArgGenerator: return getMagicFunctionName(cast<AbstractFunctionDecl>(ref.getDecl())); case SILDeclRef::Kind::StoredPropertyInitializer: return getMagicFunctionName(cast<VarDecl>(ref.getDecl())->getDeclContext()); case SILDeclRef::Kind::IVarInitializer: return getMagicFunctionName(cast<ClassDecl>(ref.getDecl())); case SILDeclRef::Kind::IVarDestroyer: return getMagicFunctionName(cast<ClassDecl>(ref.getDecl())); case SILDeclRef::Kind::EnumElement: return getMagicFunctionName(cast<EnumElementDecl>(ref.getDecl()) ->getDeclContext()); } llvm_unreachable("Unhandled SILDeclRefKind in switch."); }
static SILValue getNextUncurryLevelRef(SILGenFunction &gen, SILLocation loc, SILDeclRef next, bool direct, ArrayRef<SILValue> curriedArgs, ArrayRef<Substitution> curriedSubs) { // For a foreign function, reference the native thunk. if (next.isForeign) return gen.emitGlobalFunctionRef(loc, next.asForeign(false)); // If the fully-uncurried reference is to a native dynamic class method, emit // the dynamic dispatch. auto fullyAppliedMethod = !next.isCurried && !next.isForeign && !direct && next.hasDecl(); auto constantInfo = gen.SGM.Types.getConstantInfo(next); SILValue thisArg; if (!curriedArgs.empty()) thisArg = curriedArgs.back(); if (fullyAppliedMethod && isa<AbstractFunctionDecl>(next.getDecl()) && gen.getMethodDispatch(cast<AbstractFunctionDecl>(next.getDecl())) == MethodDispatch::Class) { SILValue thisArg = curriedArgs.back(); // Use the dynamic thunk if dynamic. if (next.getDecl()->isDynamic()) { auto dynamicThunk = gen.SGM.getDynamicThunk(next, constantInfo); return gen.B.createFunctionRef(loc, dynamicThunk); } return gen.B.createClassMethod(loc, thisArg, next); } // If the fully-uncurried reference is to a generic method, look up the // witness. if (fullyAppliedMethod && constantInfo.SILFnType->getRepresentation() == SILFunctionTypeRepresentation::WitnessMethod) { auto thisType = curriedSubs[0].getReplacement()->getCanonicalType(); assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!"); SILValue OpenedExistential; if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull()) OpenedExistential = thisArg; return gen.B.createWitnessMethod(loc, thisType, nullptr, next, constantInfo.getSILType(), OpenedExistential); } // Otherwise, emit a direct call. return gen.emitGlobalFunctionRef(loc, next); }
void SILGenModule::emitForeignToNativeThunk(SILDeclRef thunk) { // Thunks are always emitted by need, so don't need delayed emission. assert(!thunk.isForeign && "foreign-to-native thunks only"); SILFunction *f = getFunction(thunk, ForDefinition); f->setThunk(IsThunk); if (thunk.asForeign().isClangGenerated()) f->setSerialized(IsSerialized); preEmitFunction(thunk, thunk.getDecl(), f, thunk.getDecl()); PrettyStackTraceSILFunction X("silgen emitForeignToNativeThunk", f); SILGenFunction(*this, *f).emitForeignToNativeThunk(thunk); postEmitFunction(thunk, f); }
SILValue SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, SILConstantInfo constantInfo) { assert(constantInfo == getConstantInfo(constant)); // Builtins must be fully applied at the point of reference. if (constant.hasDecl() && isa<BuiltinUnit>(constant.getDecl()->getDeclContext())) { SGM.diagnose(loc.getSourceLoc(), diag::not_implemented, "delayed application of builtin"); return SILUndef::get(constantInfo.getSILType(), SGM.M); } // If the constant is a thunk we haven't emitted yet, emit it. if (!SGM.hasFunction(constant)) { if (constant.isCurried) { auto vd = constant.getDecl(); // Reference the next uncurrying level of the function. SILDeclRef next = SILDeclRef(vd, constant.kind, SILDeclRef::ConstructAtBestResilienceExpansion, constant.uncurryLevel + 1); // If the function is fully uncurried and natively foreign, reference its // foreign entry point. if (!next.isCurried) { if (requiresForeignToNativeThunk(vd)) next = next.asForeign(); } // Preserve whether the curry thunks lead to a direct reference to the // method implementation. next = next.asDirectReference(constant.isDirectReference); SGM.emitCurryThunk(vd, constant, next); } // Otherwise, if this is a calling convention thunk we haven't emitted yet, // emit it. else if (constant.isForeignToNativeThunk()) { SGM.emitForeignToNativeThunk(constant); } else if (constant.isNativeToForeignThunk()) { SGM.emitNativeToForeignThunk(constant); } else if (constant.kind == SILDeclRef::Kind::EnumElement) { SGM.emitEnumConstructor(cast<EnumElementDecl>(constant.getDecl())); } } auto f = SGM.getFunction(constant, NotForDefinition); assert(f->getLoweredFunctionType() == constantInfo.SILFnType); return B.createFunctionRef(loc, f); }
void addMethod(SILDeclRef fn) { if (fn.getDecl()->getDeclContext() == Target) { Layout.NumImmediateMembers++; Layout.MethodInfos.try_emplace(fn, getNextOffset()); } super::addMethod(fn); }
std::tuple<ManagedValue, SILType> SILGenFunction::emitSiblingMethodRef(SILLocation loc, SILValue selfValue, SILDeclRef methodConstant, const SubstitutionMap &subMap) { SILValue methodValue; // If the method is dynamic, access it through runtime-hookable virtual // dispatch (viz. objc_msgSend for now). if (methodConstant.hasDecl() && methodConstant.getDecl()->isDynamic()) { methodValue = emitDynamicMethodRef(loc, methodConstant, SGM.Types.getConstantInfo(methodConstant).SILFnType); } else { methodValue = emitGlobalFunctionRef(loc, methodConstant); } SILType methodTy = methodValue->getType(); // Specialize the generic method. methodTy = methodTy.substGenericArgs(SGM.M, subMap); return std::make_tuple(ManagedValue::forUnmanaged(methodValue), methodTy); }
void SILGenModule::emitNativeToForeignThunk(SILDeclRef thunk) { // Thunks are always emitted by need, so don't need delayed emission. assert(thunk.isForeign && "native-to-foreign thunks only"); SILFunction *f = getFunction(thunk, ForDefinition); if (thunk.hasDecl()) preEmitFunction(thunk, thunk.getDecl(), f, thunk.getDecl()); else preEmitFunction(thunk, thunk.getAbstractClosureExpr(), f, thunk.getAbstractClosureExpr()); PrettyStackTraceSILFunction X("silgen emitNativeToForeignThunk", f); f->setBare(IsBare); f->setThunk(IsThunk); SILGenFunction(*this, *f).emitNativeToForeignThunk(thunk); postEmitFunction(thunk, f); }
void SILGenFunction::emitIVarInitializer(SILDeclRef ivarInitializer) { auto cd = cast<ClassDecl>(ivarInitializer.getDecl()); RegularLocation loc(cd); loc.markAutoGenerated(); // Emit 'self', then mark it uninitialized. auto selfDecl = cd->getDestructor()->getImplicitSelfDecl(); SILType selfTy = getLoweredLoadableType(selfDecl->getType()); SILValue selfArg = new (SGM.M) SILArgument(F.begin(), selfTy, selfDecl); SILLocation PrologueLoc(selfDecl); PrologueLoc.markAsPrologue(); B.createDebugValue(PrologueLoc, selfArg); selfArg = B.createMarkUninitialized(selfDecl, selfArg, MarkUninitializedInst::RootSelf); assert(selfTy.hasReferenceSemantics() && "can't emit a value type ctor here"); VarLocs[selfDecl] = VarLoc::get(selfArg); auto cleanupLoc = CleanupLocation::get(loc); prepareEpilog(TupleType::getEmpty(getASTContext()), false, cleanupLoc); // Emit the initializers. emitMemberInitializers(cd, selfDecl, cd); // Return 'self'. B.createReturn(loc, selfArg); emitEpilog(loc); }
void SILGenFunction::emitGeneratorFunction(SILDeclRef function, Expr *value) { MagicFunctionName = SILGenModule::getMagicFunctionName(function); RegularLocation Loc(value); Loc.markAutoGenerated(); // Default argument generators of function typed values return noescape // functions. Strip the escape to noescape function conversion. if (function.kind == SILDeclRef::Kind::DefaultArgGenerator) { if (auto funType = value->getType()->getAs<AnyFunctionType>()) { if (funType->getExtInfo().isNoEscape()) { auto conv = cast<FunctionConversionExpr>(value); value = conv->getSubExpr(); assert(funType->withExtInfo(funType->getExtInfo().withNoEscape(false)) ->isEqual(value->getType())); } } } auto *dc = function.getDecl()->getInnermostDeclContext(); auto interfaceType = value->getType()->mapTypeOutOfContext(); emitProlog({}, interfaceType, dc, false); prepareEpilog(value->getType(), false, CleanupLocation::get(Loc)); emitReturnExpr(Loc, value); emitEpilog(Loc); }
std::tuple<ManagedValue, SILType, ArrayRef<Substitution>> SILGenFunction::emitSiblingMethodRef(SILLocation loc, SILValue selfValue, SILDeclRef methodConstant, ArrayRef<Substitution> subs) { SILValue methodValue; // If the method is dynamic, access it through runtime-hookable virtual // dispatch (viz. objc_msgSend for now). if (methodConstant.hasDecl() && methodConstant.getDecl()->getAttrs().hasAttribute<DynamicAttr>()) methodValue = emitDynamicMethodRef(loc, methodConstant, SGM.Types.getConstantInfo(methodConstant)); else methodValue = emitGlobalFunctionRef(loc, methodConstant); SILType methodTy = methodValue->getType(); if (!subs.empty()) { // Specialize the generic method. methodTy = getLoweredLoadableType( methodTy.castTo<SILFunctionType>() ->substGenericArgs(SGM.M, SGM.SwiftModule, subs)); } return std::make_tuple(ManagedValue::forUnmanaged(methodValue), methodTy, subs); }
Optional<SpecializedEmitter> SpecializedEmitter::forDecl(SILGenModule &SGM, SILDeclRef function) { // Only consider standalone declarations in the Builtin module. if (function.kind != SILDeclRef::Kind::Func) return None; if (!function.hasDecl()) return None; ValueDecl *decl = function.getDecl(); if (!isa<BuiltinUnit>(decl->getDeclContext())) return None; auto name = decl->getBaseName().getIdentifier(); const BuiltinInfo &builtin = SGM.M.getBuiltinInfo(name); switch (builtin.ID) { // All the non-SIL, non-type-trait builtins should use the // named-builtin logic, which just emits the builtin as a call to a // builtin function. This includes builtins that aren't even declared // in Builtins.def, i.e. all of the LLVM intrinsics. // // We do this in a separate pass over Builtins.def to avoid creating // a bunch of identical cases. #define BUILTIN(Id, Name, Attrs) \ case BuiltinValueKind::Id: #define BUILTIN_SIL_OPERATION(Id, Name, Overload) #define BUILTIN_SANITIZER_OPERATION(Id, Name, Attrs) #define BUILTIN_TYPE_TRAIT_OPERATION(Id, Name) #include "swift/AST/Builtins.def" case BuiltinValueKind::None: return SpecializedEmitter(name); // Do a second pass over Builtins.def, ignoring all the cases for // which we emitted something above. #define BUILTIN(Id, Name, Attrs) // Use specialized emitters for SIL builtins. #define BUILTIN_SIL_OPERATION(Id, Name, Overload) \ case BuiltinValueKind::Id: \ return SpecializedEmitter(&emitBuiltin##Id); // Sanitizer builtins should never directly be called; they should only // be inserted as instrumentation by SILGen. #define BUILTIN_SANITIZER_OPERATION(Id, Name, Attrs) \ case BuiltinValueKind::Id: \ llvm_unreachable("Sanitizer builtin called directly?"); // Lower away type trait builtins when they're trivially solvable. #define BUILTIN_TYPE_TRAIT_OPERATION(Id, Name) \ case BuiltinValueKind::Id: \ return SpecializedEmitter(&emitBuiltinTypeTrait<&TypeBase::Name, \ BuiltinValueKind::Id>); #include "swift/AST/Builtins.def" } llvm_unreachable("bad builtin kind"); }
static std::pair<ManagedValue, SILDeclRef> getNextUncurryLevelRef(SILGenFunction &SGF, SILLocation loc, SILDeclRef thunk, ManagedValue selfArg, SubstitutionMap curriedSubs) { auto *vd = thunk.getDecl(); // Reference the next uncurrying level of the function. SILDeclRef next = SILDeclRef(vd, thunk.kind); assert(!next.isCurried); auto constantInfo = SGF.SGM.Types.getConstantInfo(next); // If the function is natively foreign, reference its foreign entry point. if (requiresForeignToNativeThunk(vd)) return {ManagedValue::forUnmanaged(SGF.emitGlobalFunctionRef(loc, next)), next}; // If the thunk is a curry thunk for a direct method reference, we are // doing a direct dispatch (eg, a fragile 'super.foo()' call). if (thunk.isDirectReference) return {ManagedValue::forUnmanaged(SGF.emitGlobalFunctionRef(loc, next)), next}; if (auto *func = dyn_cast<AbstractFunctionDecl>(vd)) { if (getMethodDispatch(func) == MethodDispatch::Class) { // Use the dynamic thunk if dynamic. if (vd->isObjCDynamic()) { return {SGF.emitDynamicMethodRef(loc, next, constantInfo.SILFnType), next}; } auto methodTy = SGF.SGM.Types.getConstantOverrideType(next); SILValue result = SGF.emitClassMethodRef(loc, selfArg.getValue(), next, methodTy); return {ManagedValue::forUnmanaged(result), next.getOverriddenVTableEntry()}; } // If the fully-uncurried reference is to a generic method, look up the // witness. if (constantInfo.SILFnType->getRepresentation() == SILFunctionTypeRepresentation::WitnessMethod) { auto protocol = func->getDeclContext()->getSelfProtocolDecl(); auto origSelfType = protocol->getSelfInterfaceType()->getCanonicalType(); auto substSelfType = origSelfType.subst(curriedSubs)->getCanonicalType(); auto conformance = curriedSubs.lookupConformance(origSelfType, protocol); auto result = SGF.B.createWitnessMethod(loc, substSelfType, *conformance, next, constantInfo.getSILType()); return {ManagedValue::forUnmanaged(result), next}; } } // Otherwise, emit a direct call. return {ManagedValue::forUnmanaged(SGF.emitGlobalFunctionRef(loc, next)), next}; }
void SILGenFunction::emitIVarDestroyer(SILDeclRef ivarDestroyer) { auto cd = cast<ClassDecl>(ivarDestroyer.getDecl()); RegularLocation loc(cd); loc.markAutoGenerated(); SILValue selfValue = emitSelfDecl(cd->getDestructor()->getImplicitSelfDecl()); auto cleanupLoc = CleanupLocation::get(loc); prepareEpilog(TupleType::getEmpty(getASTContext()), false, cleanupLoc); emitClassMemberDestruction(selfValue, cd, cleanupLoc); B.createReturn(loc, emitEmptyTuple(loc)); emitEpilog(loc); }
void SILGenFunction::emitCurryThunk(SILDeclRef thunk) { assert(thunk.isCurried); auto *vd = thunk.getDecl(); if (auto *fd = dyn_cast<AbstractFunctionDecl>(vd)) { assert(!SGM.M.Types.hasLoweredLocalCaptures(fd) && "methods cannot have captures"); (void) fd; } auto selfTy = vd->getInterfaceType()->castTo<AnyFunctionType>() ->getInput(); selfTy = vd->getInnermostDeclContext()->mapTypeIntoContext(selfTy); ManagedValue selfArg = B.createFunctionArgument(getLoweredType(selfTy), nullptr); // Forward substitutions. auto subs = F.getForwardingSubstitutions(); ManagedValue toFn = getNextUncurryLevelRef(*this, vd, thunk, selfArg, subs); // FIXME: Using the type from the ConstantInfo instead of looking at // getConstantOverrideInfo() for methods looks suspect in the presence // of covariant overrides and multiple vtable entries. SILFunctionConventions fromConv( SGM.Types.getConstantInfo(thunk).SILFnType, SGM.M); SILType resultTy = fromConv.getSingleSILResultType(); resultTy = F.mapTypeIntoContext(resultTy); auto substTy = toFn.getType().substGenericArgs(SGM.M, subs); // Partially apply the next uncurry level and return the result closure. selfArg = selfArg.ensurePlusOne(*this, vd); auto calleeConvention = ParameterConvention::Direct_Guaranteed; auto closureTy = SILGenBuilder::getPartialApplyResultType( toFn.getType(), /*appliedParams=*/1, SGM.M, subs, calleeConvention); ManagedValue toClosure = B.createPartialApply(vd, toFn, substTy, subs, {selfArg}, closureTy); if (resultTy != closureTy) { CanSILFunctionType resultFnTy = resultTy.castTo<SILFunctionType>(); CanSILFunctionType closureFnTy = closureTy.castTo<SILFunctionType>(); if (resultFnTy->isABICompatibleWith(closureFnTy).isCompatible()) { toClosure = B.createConvertFunction(vd, toClosure, resultTy); } else { toClosure = emitCanonicalFunctionThunk(vd, toClosure, closureFnTy, resultFnTy); } } B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(vd), toClosure); }
void SILGenModule::emitCurryThunk(SILDeclRef constant) { assert(constant.isCurried); // Thunks are always emitted by need, so don't need delayed emission. SILFunction *f = getFunction(constant, ForDefinition); f->setThunk(IsThunk); f->setBare(IsBare); auto *fd = constant.getDecl(); preEmitFunction(constant, fd, f, fd); PrettyStackTraceSILFunction X("silgen emitCurryThunk", f); SILGenFunction(*this, *f).emitCurryThunk(constant); postEmitFunction(constant, f); }
void SILGenFunction::emitGeneratorFunction(SILDeclRef function, Expr *value) { MagicFunctionName = SILGenModule::getMagicFunctionName(function); RegularLocation Loc(value); Loc.markAutoGenerated(); // Override location for #file, #line etc. to an invalid one so that we // don't put extra strings into the default argument generator function that // is not going to be ever used anyway. overrideLocationForMagicIdentifiers = SourceLoc(); emitProlog({ }, value->getType(), function.getDecl()->getDeclContext()); prepareEpilog(value->getType(), false, CleanupLocation::get(Loc)); emitReturnExpr(Loc, value); emitEpilog(Loc); }
void addMethod(SILDeclRef method) { assert(method.getDecl()->getDeclContext() == CD); if (CD->hasResilientMetadata()) { if (FirstTime) { FirstTime = false; // If the class is itself resilient and has at least one vtable entry, // it has a method lookup function. TBD.addSymbol(LinkEntity::forMethodLookupFunction(CD)); } TBD.addDispatchThunk(method); } TBD.addMethodDescriptor(method); }
static SILFunction::ClassVisibility_t getClassVisibility(SILDeclRef constant) { if (!constant.hasDecl()) return SILFunction::NotRelevant; // If this declaration is a function which goes into a vtable, then it's // symbol must be as visible as its class. Derived classes even have to put // all less visible methods of the base class into their vtables. auto *FD = dyn_cast<AbstractFunctionDecl>(constant.getDecl()); if (!FD) return SILFunction::NotRelevant; DeclContext *context = FD->getDeclContext(); // Methods from extensions don't go into vtables (yet). if (context->isExtensionContext()) return SILFunction::NotRelevant; auto *classType = context->getAsClassOrClassExtensionContext(); if (!classType || classType->isFinal()) return SILFunction::NotRelevant; if (FD->isFinal() && !FD->getOverriddenDecl()) return SILFunction::NotRelevant; assert(FD->getEffectiveAccess() <= classType->getEffectiveAccess() && "class must be as visible as its members"); switch (classType->getEffectiveAccess()) { case Accessibility::Private: case Accessibility::FilePrivate: return SILFunction::NotRelevant; case Accessibility::Internal: return SILFunction::InternalClass; case Accessibility::Public: case Accessibility::Open: return SILFunction::PublicClass; } llvm_unreachable("Unhandled Accessibility in switch."); }
static FunctionPointer lookupMethod(IRGenFunction &IGF, SILDeclRef declRef) { auto *decl = cast<AbstractFunctionDecl>(declRef.getDecl()); // Protocol case. if (isa<ProtocolDecl>(decl->getDeclContext())) { // Find the witness table. llvm::Value *wtable = (IGF.CurFn->arg_end() - 1); // Find the witness we're interested in. return emitWitnessMethodValue(IGF, wtable, declRef); } // Class case. auto funcTy = IGF.IGM.getSILModule().Types.getConstantFunctionType(declRef); // Load the metadata, or use the 'self' value if we have a static method. llvm::Value *self; // Non-throwing class methods always have the 'self' parameter at the end. // Throwing class methods have 'self' right before the error parameter. // // FIXME: Should find a better way of expressing this. if (funcTy->hasErrorResult()) self = (IGF.CurFn->arg_end() - 2); else self = (IGF.CurFn->arg_end() - 1); auto selfTy = funcTy->getSelfParameter().getSILStorageType(); llvm::Value *metadata; if (selfTy.is<MetatypeType>()) { metadata = self; } else { metadata = emitHeapMetadataRefForHeapObject(IGF, self, selfTy, /*suppress cast*/ true); } return emitVirtualMethodValue(IGF, metadata, declRef, funcTy); }
static SILValue getThunkedForeignFunctionRef(SILGenFunction &gen, SILLocation loc, SILDeclRef foreign, ArrayRef<ManagedValue> args, ArrayRef<Substitution> subs, const SILConstantInfo &foreignCI) { assert(!foreign.isCurried && "should not thunk calling convention when curried"); // Produce a witness_method when thunking ObjC protocol methods. auto dc = foreign.getDecl()->getDeclContext(); if (isa<ProtocolDecl>(dc) && cast<ProtocolDecl>(dc)->isObjC()) { assert(subs.size() == 1); auto thisType = subs[0].getReplacement()->getCanonicalType(); assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!"); SILValue thisArg = args.back().getValue(); SILValue OpenedExistential; if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull()) OpenedExistential = thisArg; auto conformance = ProtocolConformanceRef(cast<ProtocolDecl>(dc)); return gen.B.createWitnessMethod(loc, thisType, conformance, foreign, foreignCI.getSILType(), OpenedExistential); // Produce a class_method when thunking imported ObjC methods. } else if (foreignCI.SILFnType->getRepresentation() == SILFunctionTypeRepresentation::ObjCMethod) { assert(subs.empty()); SILValue thisArg = args.back().getValue(); return gen.B.createClassMethod(loc, thisArg, foreign, SILType::getPrimitiveObjectType(foreignCI.SILFnType), /*volatile*/ true); } // Otherwise, emit a function_ref. return gen.emitGlobalFunctionRef(loc, foreign); }
/// Generate code to emit a thunk with native conventions that calls a /// function with foreign conventions. void SILGenFunction::emitForeignToNativeThunk(SILDeclRef thunk) { assert(!thunk.isForeign && "foreign-to-native thunks only"); // Wrap the function in its original form. auto fd = cast<AbstractFunctionDecl>(thunk.getDecl()); auto nativeCI = getConstantInfo(thunk); auto nativeFormalResultTy = nativeCI.LoweredInterfaceType.getResult(); auto nativeFnTy = F.getLoweredFunctionType(); assert(nativeFnTy == nativeCI.SILFnType); // Find the foreign error convention and 'self' parameter index. Optional<ForeignErrorConvention> foreignError; if (nativeFnTy->hasErrorResult()) { foreignError = fd->getForeignErrorConvention(); assert(foreignError && "couldn't find foreign error convention!"); } ImportAsMemberStatus memberStatus = fd->getImportAsMemberStatus(); // Forward the arguments. auto forwardedParameters = fd->getParameterLists(); // For allocating constructors, 'self' is a metatype, not the 'self' value // formally present in the constructor body. Type allocatorSelfType; if (thunk.kind == SILDeclRef::Kind::Allocator) { allocatorSelfType = forwardedParameters[0]->getType(getASTContext()) ->getLValueOrInOutObjectType(); forwardedParameters = forwardedParameters.slice(1); } SmallVector<SILValue, 8> params; for (auto *paramList : reversed(forwardedParameters)) bindParametersForForwarding(paramList, params); if (allocatorSelfType) { auto selfMetatype = CanMetatypeType::get(allocatorSelfType->getCanonicalType()); auto selfArg = new (F.getModule()) SILArgument( F.begin(), getLoweredLoadableType(selfMetatype), fd->getImplicitSelfDecl()); params.push_back(selfArg); } // Set up the throw destination if necessary. CleanupLocation cleanupLoc(fd); if (foreignError) { prepareRethrowEpilog(cleanupLoc); } SILValue result; { Scope scope(Cleanups, fd); SILDeclRef foreignDeclRef = thunk.asForeign(true); SILConstantInfo foreignCI = getConstantInfo(foreignDeclRef); auto foreignFnTy = foreignCI.SILFnType; // Bridge all the arguments. SmallVector<ManagedValue, 8> args; unsigned foreignArgIndex = 0; // A helper function to add a function error argument in the // appropriate position. auto maybeAddForeignErrorArg = [&] { if (foreignError && foreignArgIndex == foreignError->getErrorParameterIndex()) { args.push_back(ManagedValue()); foreignArgIndex++; } }; for (unsigned nativeParamIndex : indices(params)) { // Bring the parameter to +1. auto paramValue = params[nativeParamIndex]; auto thunkParam = nativeFnTy->getParameters()[nativeParamIndex]; // TODO: Could avoid a retain if the bridged parameter is also +0 and // doesn't require a bridging conversion. ManagedValue param; switch (thunkParam.getConvention()) { case ParameterConvention::Direct_Owned: param = emitManagedRValueWithCleanup(paramValue); break; case ParameterConvention::Direct_Guaranteed: case ParameterConvention::Direct_Unowned: param = emitManagedRetain(fd, paramValue); break; case ParameterConvention::Direct_Deallocating: param = ManagedValue::forUnmanaged(paramValue); break; case ParameterConvention::Indirect_Inout: case ParameterConvention::Indirect_InoutAliasable: param = ManagedValue::forUnmanaged(paramValue); break; case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_In_Guaranteed: llvm_unreachable("indirect args in foreign thunked method not implemented"); } maybeAddForeignErrorArg(); bool isSelf = nativeParamIndex == params.size() - 1; if (memberStatus.isInstance()) { // Leave space for `self` to be filled in later. if (foreignArgIndex == memberStatus.getSelfIndex()) { args.push_back({}); foreignArgIndex++; } // Use the `self` space we skipped earlier if it's time. if (isSelf) { foreignArgIndex = memberStatus.getSelfIndex(); } } else if (memberStatus.isStatic() && isSelf) { // Lose a static `self` parameter. break; } auto foreignParam = foreignFnTy->getParameters()[foreignArgIndex++]; SILType foreignArgTy = foreignParam.getSILType(); auto bridged = emitNativeToBridgedValue(fd, param, SILFunctionTypeRepresentation::CFunctionPointer, foreignArgTy.getSwiftRValueType()); // Handle C pointer arguments imported as indirect `self` arguments. if (foreignParam.getConvention() == ParameterConvention::Indirect_In) { auto temp = emitTemporaryAllocation(fd, bridged.getType()); bridged.forwardInto(*this, fd, temp); bridged = emitManagedBufferWithCleanup(temp); } if (memberStatus.isInstance() && isSelf) { // Fill in the `self` space. args[memberStatus.getSelfIndex()] = bridged; } else { args.push_back(bridged); } } maybeAddForeignErrorArg(); // Call the original. auto subs = getForwardingSubstitutions(); auto fn = getThunkedForeignFunctionRef(*this, fd, foreignDeclRef, args, subs, foreignCI); auto fnType = fn->getType().castTo<SILFunctionType>(); fnType = fnType->substGenericArgs(SGM.M, SGM.SwiftModule, subs); result = emitApply(fd, ManagedValue::forUnmanaged(fn), subs, args, fnType, AbstractionPattern(nativeFormalResultTy), nativeFormalResultTy, ApplyOptions::None, None, foreignError, SGFContext()) .forwardAsSingleValue(*this, fd); } B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(fd), result); // Emit the throw destination. emitRethrowEpilog(fd); }
void SILGenFunction::emitObjCDestructor(SILDeclRef dtor) { auto dd = cast<DestructorDecl>(dtor.getDecl()); auto cd = cast<ClassDecl>(dd->getDeclContext()); MagicFunctionName = DeclName(SGM.M.getASTContext().getIdentifier("deinit")); RegularLocation loc(dd); if (dd->isImplicit()) loc.markAutoGenerated(); SILValue selfValue = emitSelfDecl(dd->getImplicitSelfDecl()); // Create a basic block to jump to for the implicit destruction behavior // of releasing the elements and calling the superclass destructor. // We won't actually emit the block until we finish with the destructor body. prepareEpilog(Type(), false, CleanupLocation::get(loc)); // Emit the destructor body. emitStmt(dd->getBody()); Optional<SILValue> maybeReturnValue; SILLocation returnLoc(loc); std::tie(maybeReturnValue, returnLoc) = emitEpilogBB(loc); if (!maybeReturnValue) return; auto cleanupLoc = CleanupLocation::get(loc); // Note: the ivar destroyer is responsible for destroying the // instance variables before the object is actually deallocated. // Form a reference to the superclass -dealloc. Type superclassTy = dd->mapTypeIntoContext(cd->getSuperclass()); assert(superclassTy && "Emitting Objective-C -dealloc without superclass?"); ClassDecl *superclass = superclassTy->getClassOrBoundGenericClass(); auto superclassDtorDecl = superclass->getDestructor(); SILDeclRef superclassDtor(superclassDtorDecl, SILDeclRef::Kind::Deallocator, SILDeclRef::ConstructAtBestResilienceExpansion, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isForeign=*/true); auto superclassDtorType = SGM.getConstantType(superclassDtor); SILValue superclassDtorValue = B.createSuperMethod( cleanupLoc, selfValue, superclassDtor, superclassDtorType); // Call the superclass's -dealloc. SILType superclassSILTy = getLoweredLoadableType(superclassTy); SILValue superSelf = B.createUpcast(cleanupLoc, selfValue, superclassSILTy); ArrayRef<Substitution> subs = superclassTy->gatherAllSubstitutions(SGM.M.getSwiftModule(), nullptr); auto substDtorType = superclassDtorType.castTo<SILFunctionType>() ->substGenericArgs(SGM.M, subs); SILFunctionConventions dtorConv(substDtorType, SGM.M); B.createApply(cleanupLoc, superclassDtorValue, SILType::getPrimitiveObjectType(substDtorType), dtorConv.getSILResultType(), subs, superSelf); // Return. B.createReturn(returnLoc, emitEmptyTuple(cleanupLoc)); }
void addMethod(SILDeclRef method) { if (method.getDecl()->getDeclContext() == CD) TBD.addDispatchThunk(method); }
void SILGenFunction::emitObjCDestructor(SILDeclRef dtor) { auto dd = cast<DestructorDecl>(dtor.getDecl()); auto cd = cast<ClassDecl>(dd->getDeclContext()); MagicFunctionName = DeclName(SGM.M.getASTContext().getIdentifier("deinit")); RegularLocation loc(dd); if (dd->isImplicit()) loc.markAutoGenerated(); SILValue selfValue = emitSelfDecl(dd->getImplicitSelfDecl()); // Create a basic block to jump to for the implicit destruction behavior // of releasing the elements and calling the superclass destructor. // We won't actually emit the block until we finish with the destructor body. prepareEpilog(Type(), false, CleanupLocation::get(loc)); emitProfilerIncrement(dd->getBody()); // Emit the destructor body. emitStmt(dd->getBody()); Optional<SILValue> maybeReturnValue; SILLocation returnLoc(loc); std::tie(maybeReturnValue, returnLoc) = emitEpilogBB(loc); if (!maybeReturnValue) return; auto cleanupLoc = CleanupLocation::get(loc); // Note: the ivar destroyer is responsible for destroying the // instance variables before the object is actually deallocated. // Form a reference to the superclass -dealloc. Type superclassTy = dd->mapTypeIntoContext(cd->getSuperclass()); assert(superclassTy && "Emitting Objective-C -dealloc without superclass?"); ClassDecl *superclass = superclassTy->getClassOrBoundGenericClass(); auto superclassDtorDecl = superclass->getDestructor(); auto superclassDtor = SILDeclRef(superclassDtorDecl, SILDeclRef::Kind::Deallocator) .asForeign(); auto superclassDtorType = SGM.Types.getConstantType(superclassDtor); SILValue superclassDtorValue = B.createObjCSuperMethod( cleanupLoc, selfValue, superclassDtor, superclassDtorType); // Call the superclass's -dealloc. SILType superclassSILTy = getLoweredLoadableType(superclassTy); SILValue superSelf = B.createUpcast(cleanupLoc, selfValue, superclassSILTy); assert(superSelf.getOwnershipKind() == ValueOwnershipKind::Owned); auto subMap = superclassTy->getContextSubstitutionMap(SGM.M.getSwiftModule(), superclass); auto substDtorType = superclassDtorType.substGenericArgs(SGM.M, subMap); CanSILFunctionType substFnType = substDtorType.castTo<SILFunctionType>(); SILFunctionConventions dtorConv(substFnType, SGM.M); assert(substFnType->getSelfParameter().getConvention() == ParameterConvention::Direct_Unowned && "Objective C deinitializing destructor takes self as unowned"); B.createApply(cleanupLoc, superclassDtorValue, substDtorType, dtorConv.getSILResultType(), subMap, superSelf); // We know that the givne value came in at +1, but we pass the relevant value // as unowned to the destructor. Create a fake balance for the verifier to be // happy. B.createEndLifetime(cleanupLoc, superSelf); // Return. B.createReturn(returnLoc, emitEmptyTuple(cleanupLoc)); }
static std::string mangleConstant(SILDeclRef c, SILDeclRef::ManglingKind Kind) { using namespace NewMangling; ASTMangler mangler; // As a special case, Clang functions and globals don't get mangled at all. if (c.hasDecl()) { if (auto clangDecl = c.getDecl()->getClangDecl()) { if (!c.isForeignToNativeThunk() && !c.isNativeToForeignThunk() && !c.isCurried) { if (auto namedClangDecl = dyn_cast<clang::DeclaratorDecl>(clangDecl)) { if (auto asmLabel = namedClangDecl->getAttr<clang::AsmLabelAttr>()) { std::string s(1, '\01'); s += asmLabel->getLabel(); return s; } else if (namedClangDecl->hasAttr<clang::OverloadableAttr>()) { std::string storage; llvm::raw_string_ostream SS(storage); // FIXME: When we can import C++, use Clang's mangler all the time. mangleClangDecl(SS, namedClangDecl, c.getDecl()->getASTContext()); return SS.str(); } return namedClangDecl->getName(); } } } } ASTMangler::SymbolKind SKind = ASTMangler::SymbolKind::Default; switch (Kind) { case SILDeclRef::ManglingKind::Default: if (c.isForeign) { SKind = ASTMangler::SymbolKind::SwiftAsObjCThunk; } else if (c.isDirectReference) { SKind = ASTMangler::SymbolKind::DirectMethodReferenceThunk; } else if (c.isForeignToNativeThunk()) { SKind = ASTMangler::SymbolKind::ObjCAsSwiftThunk; } break; case SILDeclRef::ManglingKind::VTableMethod: SKind = ASTMangler::SymbolKind::VTableMethod; break; case SILDeclRef::ManglingKind::DynamicThunk: SKind = ASTMangler::SymbolKind::DynamicThunk; break; } switch (c.kind) { case SILDeclRef::Kind::Func: if (!c.hasDecl()) return mangler.mangleClosureEntity(c.getAbstractClosureExpr(), SKind); // As a special case, functions can have manually mangled names. // Use the SILGen name only for the original non-thunked, non-curried entry // point. if (auto NameA = c.getDecl()->getAttrs().getAttribute<SILGenNameAttr>()) if (!c.isForeignToNativeThunk() && !c.isNativeToForeignThunk() && !c.isCurried) { return NameA->Name; } // Use a given cdecl name for native-to-foreign thunks. if (auto CDeclA = c.getDecl()->getAttrs().getAttribute<CDeclAttr>()) if (c.isNativeToForeignThunk()) { return CDeclA->Name; } // Otherwise, fall through into the 'other decl' case. SWIFT_FALLTHROUGH; case SILDeclRef::Kind::EnumElement: return mangler.mangleEntity(c.getDecl(), c.isCurried, SKind); case SILDeclRef::Kind::Deallocator: assert(!c.isCurried); return mangler.mangleDestructorEntity(cast<DestructorDecl>(c.getDecl()), /*isDeallocating*/ true, SKind); case SILDeclRef::Kind::Destroyer: assert(!c.isCurried); return mangler.mangleDestructorEntity(cast<DestructorDecl>(c.getDecl()), /*isDeallocating*/ false, SKind); case SILDeclRef::Kind::Allocator: return mangler.mangleConstructorEntity(cast<ConstructorDecl>(c.getDecl()), /*allocating*/ true, c.isCurried, SKind); case SILDeclRef::Kind::Initializer: return mangler.mangleConstructorEntity(cast<ConstructorDecl>(c.getDecl()), /*allocating*/ false, c.isCurried, SKind); case SILDeclRef::Kind::IVarInitializer: case SILDeclRef::Kind::IVarDestroyer: assert(!c.isCurried); return mangler.mangleIVarInitDestroyEntity(cast<ClassDecl>(c.getDecl()), c.kind == SILDeclRef::Kind::IVarDestroyer, SKind); case SILDeclRef::Kind::GlobalAccessor: assert(!c.isCurried); return mangler.mangleAccessorEntity(AccessorKind::IsMutableAddressor, AddressorKind::Unsafe, c.getDecl(), /*isStatic*/ false, SKind); case SILDeclRef::Kind::GlobalGetter: assert(!c.isCurried); return mangler.mangleGlobalGetterEntity(c.getDecl(), SKind); case SILDeclRef::Kind::DefaultArgGenerator: assert(!c.isCurried); return mangler.mangleDefaultArgumentEntity( cast<AbstractFunctionDecl>(c.getDecl()), c.defaultArgIndex, SKind); case SILDeclRef::Kind::StoredPropertyInitializer: assert(!c.isCurried); return mangler.mangleInitializerEntity(cast<VarDecl>(c.getDecl()), SKind); } llvm_unreachable("bad entity kind!"); }
Optional<SILVTable::Entry> SILGenModule::emitVTableMethod(ClassDecl *theClass, SILDeclRef derived, SILDeclRef base) { assert(base.kind == derived.kind); auto *baseDecl = base.getDecl(); auto *derivedDecl = derived.getDecl(); // Note: We intentionally don't support extension members here. // // Once extensions can override or introduce new vtable entries, this will // all likely change anyway. auto *baseClass = cast<ClassDecl>(baseDecl->getDeclContext()); auto *derivedClass = cast<ClassDecl>(derivedDecl->getDeclContext()); // Figure out if the vtable entry comes from the superclass, in which // case we won't emit it if building a resilient module. SILVTable::Entry::Kind implKind; if (baseClass == theClass) { // This is a vtable entry for a method of the immediate class. implKind = SILVTable::Entry::Kind::Normal; } else if (derivedClass == theClass) { // This is a vtable entry for a method of a base class, but it is being // overridden in the immediate class. implKind = SILVTable::Entry::Kind::Override; } else { // This vtable entry is copied from the superclass. implKind = SILVTable::Entry::Kind::Inherited; // If the override is defined in a class from a different resilience // domain, don't emit the vtable entry. if (derivedClass->isResilient(M.getSwiftModule(), ResilienceExpansion::Maximal)) { return None; } } SILFunction *implFn; SILLinkage implLinkage; // If the member is dynamic, reference its dynamic dispatch thunk so that // it will be redispatched, funneling the method call through the runtime // hook point. if (derivedDecl->isDynamic() && derived.kind != SILDeclRef::Kind::Allocator) { implFn = getDynamicThunk(derived, Types.getConstantInfo(derived).SILFnType); implLinkage = SILLinkage::Public; } else { implFn = getFunction(derived, NotForDefinition); implLinkage = stripExternalFromLinkage(implFn->getLinkage()); } // As a fast path, if there is no override, definitely no thunk is necessary. if (derived == base) return SILVTable::Entry(base, implFn, implKind, implLinkage); // Determine the derived thunk type by lowering the derived type against the // abstraction pattern of the base. auto baseInfo = Types.getConstantInfo(base); auto derivedInfo = Types.getConstantInfo(derived); auto basePattern = AbstractionPattern(baseInfo.LoweredType); auto overrideInfo = M.Types.getConstantOverrideInfo(derived, base); // The override member type is semantically a subtype of the base // member type. If the override is ABI compatible, we do not need // a thunk. if (M.Types.checkFunctionForABIDifferences(derivedInfo.SILFnType, overrideInfo.SILFnType) == TypeConverter::ABIDifference::Trivial) return SILVTable::Entry(base, implFn, implKind, implLinkage); // Generate the thunk name. std::string name; { Mangle::ASTMangler mangler; if (isa<FuncDecl>(baseDecl)) { name = mangler.mangleVTableThunk( cast<FuncDecl>(baseDecl), cast<FuncDecl>(derivedDecl)); } else { name = mangler.mangleConstructorVTableThunk( cast<ConstructorDecl>(baseDecl), cast<ConstructorDecl>(derivedDecl), base.kind == SILDeclRef::Kind::Allocator); } } // If we already emitted this thunk, reuse it. if (auto existingThunk = M.lookUpFunction(name)) return SILVTable::Entry(base, existingThunk, implKind, implLinkage); // Emit the thunk. SILLocation loc(derivedDecl); SILGenFunctionBuilder builder(*this); auto thunk = builder.createFunction( SILLinkage::Private, name, overrideInfo.SILFnType, cast<AbstractFunctionDecl>(derivedDecl)->getGenericEnvironment(), loc, IsBare, IsNotTransparent, IsNotSerialized); thunk->setDebugScope(new (M) SILDebugScope(loc, thunk)); SILGenFunction(*this, *thunk, theClass) .emitVTableThunk(derived, implFn, basePattern, overrideInfo.LoweredType, derivedInfo.LoweredType); return SILVTable::Entry(base, thunk, implKind, implLinkage); }
static std::string mangleConstant(SILDeclRef c, SILDeclRef::ManglingKind Kind) { using namespace Mangle; Mangler mangler; // Almost everything below gets one of the common prefixes: // mangled-name ::= '_T' global // Native symbol // mangled-name ::= '_TTo' global // ObjC interop thunk // mangled-name ::= '_TTO' global // Foreign function thunk // mangled-name ::= '_TTd' global // Direct StringRef introducer = "_T"; switch (Kind) { case SILDeclRef::ManglingKind::Default: if (c.isForeign) { introducer = "_TTo"; } else if (c.isDirectReference) { introducer = "_TTd"; } else if (c.isForeignToNativeThunk()) { introducer = "_TTO"; } break; case SILDeclRef::ManglingKind::VTableMethod: introducer = "_TTV"; break; case SILDeclRef::ManglingKind::DynamicThunk: introducer = "_TTD"; break; } // As a special case, Clang functions and globals don't get mangled at all. if (c.hasDecl()) { if (auto clangDecl = c.getDecl()->getClangDecl()) { if (!c.isForeignToNativeThunk() && !c.isNativeToForeignThunk() && !c.isCurried) { if (auto namedClangDecl = dyn_cast<clang::DeclaratorDecl>(clangDecl)) { if (auto asmLabel = namedClangDecl->getAttr<clang::AsmLabelAttr>()) { mangler.append('\01'); mangler.append(asmLabel->getLabel()); } else if (namedClangDecl->hasAttr<clang::OverloadableAttr>()) { std::string storage; llvm::raw_string_ostream SS(storage); // FIXME: When we can import C++, use Clang's mangler all the time. mangleClangDecl(SS, namedClangDecl, c.getDecl()->getASTContext()); mangler.append(SS.str()); } else { mangler.append(namedClangDecl->getName()); } return mangler.finalize(); } } } } switch (c.kind) { // entity ::= declaration // other declaration case SILDeclRef::Kind::Func: if (!c.hasDecl()) { mangler.append(introducer); mangler.mangleClosureEntity(c.getAbstractClosureExpr(), c.uncurryLevel); return mangler.finalize(); } // As a special case, functions can have manually mangled names. // Use the SILGen name only for the original non-thunked, non-curried entry // point. if (auto NameA = c.getDecl()->getAttrs().getAttribute<SILGenNameAttr>()) if (!c.isForeignToNativeThunk() && !c.isNativeToForeignThunk() && !c.isCurried) { mangler.append(NameA->Name); return mangler.finalize(); } // Use a given cdecl name for native-to-foreign thunks. if (auto CDeclA = c.getDecl()->getAttrs().getAttribute<CDeclAttr>()) if (c.isNativeToForeignThunk()) { mangler.append(CDeclA->Name); return mangler.finalize(); } // Otherwise, fall through into the 'other decl' case. SWIFT_FALLTHROUGH; case SILDeclRef::Kind::EnumElement: mangler.append(introducer); mangler.mangleEntity(c.getDecl(), c.uncurryLevel); return mangler.finalize(); // entity ::= context 'D' // deallocating destructor case SILDeclRef::Kind::Deallocator: mangler.append(introducer); mangler.mangleDestructorEntity(cast<DestructorDecl>(c.getDecl()), /*isDeallocating*/ true); return mangler.finalize(); // entity ::= context 'd' // destroying destructor case SILDeclRef::Kind::Destroyer: mangler.append(introducer); mangler.mangleDestructorEntity(cast<DestructorDecl>(c.getDecl()), /*isDeallocating*/ false); return mangler.finalize(); // entity ::= context 'C' type // allocating constructor case SILDeclRef::Kind::Allocator: mangler.append(introducer); mangler.mangleConstructorEntity(cast<ConstructorDecl>(c.getDecl()), /*allocating*/ true, c.uncurryLevel); return mangler.finalize(); // entity ::= context 'c' type // initializing constructor case SILDeclRef::Kind::Initializer: mangler.append(introducer); mangler.mangleConstructorEntity(cast<ConstructorDecl>(c.getDecl()), /*allocating*/ false, c.uncurryLevel); return mangler.finalize(); // entity ::= declaration 'e' // ivar initializer // entity ::= declaration 'E' // ivar destroyer case SILDeclRef::Kind::IVarInitializer: case SILDeclRef::Kind::IVarDestroyer: mangler.append(introducer); mangler.mangleIVarInitDestroyEntity( cast<ClassDecl>(c.getDecl()), c.kind == SILDeclRef::Kind::IVarDestroyer); return mangler.finalize(); // entity ::= declaration 'a' // addressor case SILDeclRef::Kind::GlobalAccessor: mangler.append(introducer); mangler.mangleAddressorEntity(c.getDecl()); return mangler.finalize(); // entity ::= declaration 'G' // getter case SILDeclRef::Kind::GlobalGetter: mangler.append(introducer); mangler.mangleGlobalGetterEntity(c.getDecl()); return mangler.finalize(); // entity ::= context 'e' index // default arg generator case SILDeclRef::Kind::DefaultArgGenerator: mangler.append(introducer); mangler.mangleDefaultArgumentEntity(cast<AbstractFunctionDecl>(c.getDecl()), c.defaultArgIndex); return mangler.finalize(); // entity ::= 'I' declaration 'i' // stored property initializer case SILDeclRef::Kind::StoredPropertyInitializer: mangler.append(introducer); mangler.mangleInitializerEntity(cast<VarDecl>(c.getDecl())); return mangler.finalize(); } llvm_unreachable("bad entity kind!"); }
void SILGenFunction::emitCurryThunk(SILDeclRef thunk) { assert(thunk.isCurried); auto *vd = thunk.getDecl(); if (auto *fd = dyn_cast<AbstractFunctionDecl>(vd)) { assert(!SGM.M.Types.hasLoweredLocalCaptures(fd) && "methods cannot have captures"); (void) fd; } SILLocation loc(vd); Scope S(*this, vd); auto thunkInfo = SGM.Types.getConstantInfo(thunk); auto thunkFnTy = thunkInfo.SILFnType; SILFunctionConventions fromConv(thunkFnTy, SGM.M); auto selfTy = fromConv.getSILType(thunkFnTy->getSelfParameter()); selfTy = F.mapTypeIntoContext(selfTy); ManagedValue selfArg = B.createInputFunctionArgument(selfTy, loc); // Forward substitutions. auto subs = F.getForwardingSubstitutionMap(); auto toFnAndRef = getNextUncurryLevelRef(*this, loc, thunk, selfArg, subs); ManagedValue toFn = toFnAndRef.first; SILDeclRef calleeRef = toFnAndRef.second; SILType resultTy = fromConv.getSingleSILResultType(); resultTy = F.mapTypeIntoContext(resultTy); // Partially apply the next uncurry level and return the result closure. selfArg = selfArg.ensurePlusOne(*this, loc); auto calleeConvention = ParameterConvention::Direct_Guaranteed; ManagedValue toClosure = B.createPartialApply(loc, toFn, subs, {selfArg}, calleeConvention); if (resultTy != toClosure.getType()) { CanSILFunctionType resultFnTy = resultTy.castTo<SILFunctionType>(); CanSILFunctionType closureFnTy = toClosure.getType().castTo<SILFunctionType>(); if (resultFnTy->isABICompatibleWith(closureFnTy).isCompatible()) { toClosure = B.createConvertFunction(loc, toClosure, resultTy); } else { // Compute the partially-applied abstraction pattern for the callee: // just grab the pattern for the curried fn ref and "call" it. assert(!calleeRef.isCurried); calleeRef.isCurried = true; auto appliedFnPattern = SGM.Types.getConstantInfo(calleeRef).FormalPattern .getFunctionResultType(); auto appliedThunkPattern = thunkInfo.FormalPattern.getFunctionResultType(); // The formal type should be the same for the callee and the thunk. auto formalType = thunkInfo.FormalType; if (auto genericSubstType = dyn_cast<GenericFunctionType>(formalType)) { formalType = genericSubstType.substGenericArgs(subs); } formalType = cast<AnyFunctionType>(formalType.getResult()); toClosure = emitTransformedValue(loc, toClosure, appliedFnPattern, formalType, appliedThunkPattern, formalType); } } toClosure = S.popPreservingValue(toClosure); B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(loc), toClosure); }