/// Returns true if the object file generated by \p IGM will be the "first" /// object file in the module. This lets us determine where to put a symbol /// that must be unique. static bool isFirstObjectFileInModule(IRGenModule &IGM) { if (IGM.getSILModule().isWholeModule()) return IGM.IRGen.getPrimaryIGM() == &IGM; const DeclContext *DC = IGM.getSILModule().getAssociatedContext(); if (!DC) return false; assert(!isa<ModuleDecl>(DC) && "that would be a whole module build"); assert(isa<FileUnit>(DC) && "compiling something smaller than a file?"); ModuleDecl *containingModule = cast<FileUnit>(DC)->getParentModule(); return containingModule->getFiles().front() == DC; }
FixedTypeMetadataBuilder(IRGenModule &IGM, CanType builtinType) : ReflectionMetadataBuilder(IGM) { module = builtinType->getASTContext().TheBuiltinModule; type = builtinType; ti = &cast<FixedTypeInfo>(IGM.getTypeInfoForUnlowered(builtinType)); }
Size irgen::getClassFieldOffsetOffset(IRGenModule &IGM, ClassDecl *theClass, VarDecl *field) { if (theClass->getForeignClassKind() == ClassDecl::ForeignKind::CFType) return Size(); return IGM.getClassMetadataLayout(theClass).getStaticFieldOffset(field); }
bool FulfillmentMap::searchWitnessTable( IRGenModule &IGM, CanType type, ProtocolDecl *protocol, unsigned source, MetadataPath &&path, const InterestingKeysCallback &keys, llvm::SmallPtrSetImpl<ProtocolDecl *> *interestingConformances) { bool hadFulfillment = false; auto &pi = IGM.getProtocolInfo(protocol, ProtocolInfoKind::RequirementSignature); for (auto &entry : pi.getWitnessEntries()) { if (!entry.isBase()) continue; ProtocolDecl *inherited = entry.getBase(); MetadataPath inheritedPath = path; inheritedPath.addInheritedProtocolComponent(pi.getBaseWitnessIndex(&entry)); hadFulfillment |= searchWitnessTable(IGM, type, inherited, source, std::move(inheritedPath), keys, interestingConformances); } // If we're not limiting the set of interesting conformances, or if // this is an interesting conformance, record it. if (!interestingConformances || interestingConformances->count(protocol)) { hadFulfillment |= addFulfillment({type, protocol}, source, std::move(path), MetadataState::Complete); } return hadFulfillment; }
EnumPayload EnumPayload::fromBitPattern(IRGenModule &IGM, APInt bitPattern, EnumPayloadSchema schema) { EnumPayload result; schema.forEachType(IGM, [&](llvm::Type *type) { unsigned bitSize = IGM.DataLayout.getTypeSizeInBits(type); llvm::IntegerType *intTy = llvm::IntegerType::get(IGM.getLLVMContext(), bitSize); // Take some bits off of the bottom of the pattern. auto bits = bitPattern.zextOrTrunc(bitSize); auto val = llvm::ConstantInt::get(intTy, bits); if (val->getType() != type) { if (type->isPointerTy()) val = llvm::ConstantExpr::getIntToPtr(val, type); else val = llvm::ConstantExpr::getBitCast(val, type); } result.PayloadValues.push_back(val); // Shift the remaining bits down. bitPattern = bitPattern.lshr(bitSize); }); return result; }
static bool isAvailableExternally(IRGenModule &IGM, const DeclContext *dc) { dc = dc->getModuleScopeContext(); if (isa<ClangModuleUnit>(dc) || dc == IGM.getSILModule().getAssociatedContext()) return false; return true; }
IRGenFunction::IRGenFunction(IRGenModule &IGM, llvm::Function *Fn, const SILDebugScope *DbgScope, Optional<SILLocation> DbgLoc) : IGM(IGM), Builder(IGM.getLLVMContext()), CurFn(Fn), DbgScope(DbgScope) { // Make sure the instructions in this function are attached its debug scope. if (IGM.DebugInfo) { // Functions, especially artificial thunks and closures, are often // generated on-the-fly while we are in the middle of another // function. Be nice and preserve the current debug location until // after we're done with this function. IGM.DebugInfo->pushLoc(); } // Apply sanitizer attributes to the function. // TODO: Check if the function is ASan black listed either in the external // file or via annotations. if (IGM.IRGen.Opts.Sanitize == SanitizerKind::Address) Fn->addFnAttr(llvm::Attribute::SanitizeAddress); if (IGM.IRGen.Opts.Sanitize == SanitizerKind::Thread) Fn->addFnAttr(llvm::Attribute::SanitizeThread); emitPrologue(); }
/// Add the fields for the standard heap header to the given layout. void irgen::addHeapHeaderToLayout(IRGenModule &IGM, Size &size, Alignment &align, SmallVectorImpl<llvm::Type*> &fields) { assert(size.isZero() && align.isOne() && fields.empty()); size = getHeapHeaderSize(IGM); align = IGM.getPointerAlignment(); fields.push_back(IGM.RefCountedStructTy); }
FixedTypeMetadataBuilder(IRGenModule &IGM, const NominalTypeDecl *nominalDecl) : ReflectionMetadataBuilder(IGM) { module = nominalDecl->getParentModule(); type = nominalDecl->getDeclaredType()->getCanonicalType(); ti = &cast<FixedTypeInfo>(IGM.getTypeInfoForUnlowered( nominalDecl->getDeclaredTypeInContext()->getCanonicalType())); }
void setModuleFlags(IRGenModule &IGM) { auto *Module = IGM.getModule(); // These module flags don't affect code generation; they just let us // error during LTO if the user tries to combine files across ABIs. Module->addModuleFlag(llvm::Module::Error, "Swift Version", IRGenModule::swiftVersion); }
static void initLLVMModule(const IRGenModule &IGM) { auto *Module = IGM.getModule(); assert(Module && "Expected llvm:Module for IR generation!"); Module->setTargetTriple(IGM.Triple.str()); // Set the module's string representation. Module->setDataLayout(IGM.DataLayout.getStringRepresentation()); }
clang::CanQualType ClangTypeConverter::convert(IRGenModule &IGM, CanType type) { // Try to do this without making cache entries for obvious cases. if (auto nominal = dyn_cast<NominalType>(type)) { auto decl = nominal->getDecl(); if (auto clangDecl = decl->getClangDecl()) { if (auto clangTypeDecl = dyn_cast<clang::TypeDecl>(clangDecl)) { auto &ctx = IGM.getClangASTContext(); return ctx.getCanonicalType(ctx.getTypeDeclType(clangTypeDecl)); } else if (auto ifaceDecl = dyn_cast<clang::ObjCInterfaceDecl>(clangDecl)) { auto &ctx = IGM.getClangASTContext(); auto clangType = ctx.getObjCInterfaceType(ifaceDecl); auto ptrTy = ctx.getObjCObjectPointerType(clangType); return ctx.getCanonicalType(ptrTy); } else if (auto protoDecl = dyn_cast<clang::ObjCProtocolDecl>(clangDecl)){ auto &ctx = IGM.getClangASTContext(); auto clangType = ctx.getObjCObjectType( ctx.ObjCBuiltinIdTy, const_cast<clang::ObjCProtocolDecl **>(&protoDecl), 1); auto ptrTy = ctx.getObjCObjectPointerType(clangType); return ctx.getCanonicalType(ptrTy); } } else if (decl == IGM.Context.getBoolDecl()) { // FIXME: Handle _Bool and DarwinBoolean. auto &ctx = IGM.getClangASTContext(); auto &TI = ctx.getTargetInfo(); if (TI.useSignedCharForObjCBool()) { return ctx.SignedCharTy; } } } // Look in the cache. auto it = Cache.find(type); if (it != Cache.end()) { return it->second; } // If that failed, convert the type, cache, and return. clang::CanQualType result = GenClangType(IGM, *this).visit(type); Cache.insert({type, result}); return result; }
bool FulfillmentMap::searchNominalTypeMetadata(IRGenModule &IGM, CanType type, MetadataState metadataState, unsigned source, MetadataPath &&path, const InterestingKeysCallback &keys) { // Objective-C generics don't preserve their generic parameters at runtime, // so they aren't able to fulfill type metadata requirements. if (type.getAnyNominal()->hasClangNode()) { return false; } auto *nominal = type.getAnyNominal(); if (!nominal->isGenericContext() || isa<ProtocolDecl>(nominal)) { return false; } bool hadFulfillment = false; GenericTypeRequirements requirements(IGM, nominal); requirements.enumerateFulfillments( IGM, type->getContextSubstitutionMap(IGM.getSwiftModule(), nominal), [&](unsigned reqtIndex, CanType arg, Optional<ProtocolConformanceRef> conf) { // Skip uninteresting type arguments. if (!keys.hasInterestingType(arg)) return; // If the fulfilled value is type metadata, refine the path. if (!conf) { auto argState = getPresumedMetadataStateForTypeArgument(metadataState); MetadataPath argPath = path; argPath.addNominalTypeArgumentComponent(reqtIndex); hadFulfillment |= searchTypeMetadata(IGM, arg, IsExact, argState, source, std::move(argPath), keys); return; } // Otherwise, it's a conformance. // Ignore it unless the type itself is interesting. if (!keys.isInterestingType(arg)) return; // Refine the path. MetadataPath argPath = path; argPath.addNominalTypeArgumentConformanceComponent(reqtIndex); hadFulfillment |= searchWitnessTable(IGM, arg, conf->getRequirement(), source, std::move(argPath), keys); }); return hadFulfillment; }
void noteStartOfImmediateMembers(ClassDecl *forClass) { // If our superclass is resilient to us, or the class itself is resilient // to us, we will access metadata members relative to a base offset. if (forClass == Target) { Layout.StartOfImmediateMembers = getNextOffset(); if (Layout.HasResilientSuperclass || IGM.isResilient(forClass, ResilienceExpansion::Maximal)) { assert(!DynamicOffsetBase); DynamicOffsetBase = NextOffset; } } }
Alignment LinkEntity::getAlignment(IRGenModule &IGM) const { switch (getKind()) { case Kind::ModuleDescriptor: case Kind::ExtensionDescriptor: case Kind::AnonymousDescriptor: case Kind::NominalTypeDescriptor: case Kind::ProtocolDescriptor: case Kind::AssociatedTypeDescriptor: case Kind::AssociatedConformanceDescriptor: case Kind::BaseConformanceDescriptor: case Kind::ProtocolConformanceDescriptor: case Kind::ProtocolRequirementsBaseDescriptor: case Kind::ReflectionBuiltinDescriptor: case Kind::ReflectionFieldDescriptor: case Kind::ReflectionAssociatedTypeDescriptor: case Kind::PropertyDescriptor: case Kind::EnumCase: case Kind::MethodDescriptor: case Kind::MethodDescriptorInitializer: case Kind::MethodDescriptorAllocator: case Kind::OpaqueTypeDescriptor: return Alignment(4); case Kind::ObjCClassRef: case Kind::ObjCClass: case Kind::TypeMetadataLazyCacheVariable: case Kind::TypeMetadataSingletonInitializationCache: case Kind::TypeMetadata: case Kind::TypeMetadataPattern: case Kind::ClassMetadataBaseOffset: case Kind::TypeMetadataInstantiationCache: case Kind::ValueWitnessTable: case Kind::FieldOffset: case Kind::ProtocolWitnessTableLazyCacheVariable: case Kind::ProtocolWitnessTable: case Kind::ProtocolWitnessTablePattern: case Kind::ObjCMetaclass: case Kind::SwiftMetaclassStub: case Kind::DynamicallyReplaceableFunctionVariable: case Kind::DynamicallyReplaceableFunctionKey: case Kind::OpaqueTypeDescriptorAccessorKey: case Kind::OpaqueTypeDescriptorAccessorVar: case Kind::ObjCResilientClassStub: return IGM.getPointerAlignment(); case Kind::SILFunction: return Alignment(1); default: llvm_unreachable("alignment not specified"); } }
IRGenFunction::IRGenFunction(IRGenModule &IGM, llvm::Function *Fn, const SILDebugScope *DbgScope, Optional<SILLocation> DbgLoc) : IGM(IGM), Builder(IGM.getLLVMContext(), IGM.DebugInfo && !IGM.Context.LangOpts.DebuggerSupport), CurFn(Fn), DbgScope(DbgScope) { // Make sure the instructions in this function are attached its debug scope. if (IGM.DebugInfo) { // Functions, especially artificial thunks and closures, are often // generated on-the-fly while we are in the middle of another // function. Be nice and preserve the current debug location until // after we're done with this function. IGM.DebugInfo->pushLoc(); } emitPrologue(); }
clang::CanQualType ClangTypeConverter::reverseBuiltinTypeMapping(IRGenModule &IGM, CanStructType type) { // Handle builtin types by adding entries to the cache that reverse // the mapping done by the importer. We could try to look at the // members of the struct instead, but even if that's ABI-equivalent // (which it had better be!), it might erase interesting semantic // differences like integers vs. characters. This is important // because CC lowering isn't the only purpose of this conversion. // // The importer maps builtin types like 'int' to named types like // 'CInt', which are generally typealiases. So what we do here is // map the underlying types of those typealiases back to the builtin // type. These typealiases frequently create a many-to-one mapping, // so just use the first type that mapped to a particular underlying // type. // // This is the last thing that happens before asserting that the // struct type doesn't have a mapping. Furthermore, all of the // builtin types are pre-built in the clang ASTContext. So it's not // really a significant performance problem to just cache all them // right here; it makes making a few more entries in the cache than // we really need, but it also means we won't end up repeating these // stdlib lookups multiple times, and we have to perform multiple // lookups anyway because the MAP_BUILTIN_TYPE database uses // typealias names (like 'CInt') that aren't obviously associated // with the underlying C library type. auto stdlib = IGM.Context.getStdlibModule(); assert(stdlib && "translating stdlib type to C without stdlib module?"); auto &ctx = IGM.getClangASTContext(); auto cacheStdlibType = [&](StringRef swiftName, clang::BuiltinType::Kind builtinKind) { CanType swiftType = getNamedSwiftType(stdlib, swiftName); if (!swiftType) return; auto &sema = IGM.Context.getClangModuleLoader()->getClangSema(); // Handle Int and UInt specially. On Apple platforms, these correspond to // the NSInteger and NSUInteger typedefs, so map them back to those typedefs // if they're available, to ensure we get consistent ObjC @encode strings. if (swiftType->getAnyNominal() == IGM.Context.getIntDecl()) { if (auto NSIntegerTy = getClangBuiltinTypeFromTypedef(sema, "NSInteger")) { Cache.insert({swiftType, NSIntegerTy}); return; } } else if (swiftType->getAnyNominal() == IGM.Context.getUIntDecl()) { if (auto NSUIntegerTy = getClangBuiltinTypeFromTypedef(sema, "NSUInteger")) { Cache.insert({swiftType, NSUIntegerTy}); return; } } Cache.insert({swiftType, getClangBuiltinTypeFromKind(ctx, builtinKind)}); }; #define MAP_BUILTIN_TYPE(CLANG_BUILTIN_KIND, SWIFT_TYPE_NAME) \ cacheStdlibType(#SWIFT_TYPE_NAME, clang::BuiltinType::CLANG_BUILTIN_KIND); #include "swift/ClangImporter/BuiltinMappedTypes.def" // The above code sets up a bunch of mappings in the cache; just // assume that we hit one of them. auto it = Cache.find(type); assert(it != Cache.end() && "cannot translate Swift type to C! type is not specially known"); return it->second; }
Alignment irgen::getFixedBufferAlignment(IRGenModule &IGM) { return IGM.getPointerAlignment(); }
/// A fixed-size buffer is always 16 bytes and pointer-aligned. /// If we align them more, we'll need to introduce padding to /// make protocol types work. Size irgen::getFixedBufferSize(IRGenModule &IGM) { return 3 * IGM.getPointerSize(); }
/// Return the size of the standard heap header. Size irgen::getHeapHeaderSize(IRGenModule &IGM) { return IGM.getPointerSize() + Size(8); }
bool FulfillmentMap::searchBoundGenericTypeMetadata(IRGenModule &IGM, CanBoundGenericType type, unsigned source, MetadataPath &&path, const InterestingKeysCallback &keys) { if (type->getDecl()->hasClangNode()) return false; bool hadFulfillment = false; GenericTypeRequirements requirements(IGM, type->getDecl()); requirements.enumerateFulfillments(IGM, type->getSubstitutions(IGM.getSwiftModule(), nullptr), [&](unsigned reqtIndex, CanType arg, Optional<ProtocolConformanceRef> conf) { // Skip uninteresting type arguments. if (!keys.hasInterestingType(arg)) return; // If the fulfilled value is type metadata, refine the path. if (!conf) { MetadataPath argPath = path; argPath.addNominalTypeArgumentComponent(reqtIndex); hadFulfillment |= searchTypeMetadata(IGM, arg, IsExact, source, std::move(argPath), keys); return; } // Otherwise, it's a conformance. // Ignore it unless the type itself is interesting. if (!keys.isInterestingType(arg)) return; // Refine the path. MetadataPath argPath = path; argPath.addNominalTypeArgumentConformanceComponent(reqtIndex); llvm::SmallPtrSet<ProtocolDecl*, 4> interestingConformancesBuffer; llvm::SmallPtrSetImpl<ProtocolDecl*> *interestingConformances = nullptr; // If the interesting-keys set is limiting the set of interesting // conformances, collect that filter. if (keys.hasLimitedInterestingConformances(arg)) { // Bail out immediately if the set is empty. auto requiredConformances = keys.getInterestingConformances(arg); if (requiredConformances.empty()) return; interestingConformancesBuffer.insert(requiredConformances.begin(), requiredConformances.end()); interestingConformances = &interestingConformancesBuffer; } hadFulfillment |= searchWitnessTable(IGM, arg, conf->getRequirement(), source, std::move(argPath), keys, interestingConformances); }); // Also match against the parent. The polymorphic type // will start with any arguments from the parent. hadFulfillment |= searchParentTypeMetadata(IGM, type->getDecl(), type.getParent(), source, std::move(path), keys); return hadFulfillment; }
/// Perform structure layout on the given types. StructLayout::StructLayout(IRGenModule &IGM, CanType astTy, LayoutKind layoutKind, LayoutStrategy strategy, ArrayRef<const TypeInfo *> types, llvm::StructType *typeToFill) { ASTTy = astTy; Elements.reserve(types.size()); // Fill in the Elements array. for (auto type : types) Elements.push_back(ElementLayout::getIncomplete(*type, *type)); assert(typeToFill == nullptr || typeToFill->isOpaque()); StructLayoutBuilder builder(IGM); // Add the heap header if necessary. if (requiresHeapHeader(layoutKind)) { builder.addHeapHeader(); } bool nonEmpty = builder.addFields(Elements, strategy); // Special-case: there's nothing to store. // In this case, produce an opaque type; this tends to cause lovely // assertions. if (!nonEmpty) { assert(!builder.empty() == requiresHeapHeader(layoutKind)); MinimumAlign = Alignment(1); MinimumSize = Size(0); SpareBits.clear(); IsFixedLayout = true; IsKnownPOD = IsPOD; IsKnownBitwiseTakable = IsBitwiseTakable; IsKnownAlwaysFixedSize = IsFixedSize; Ty = (typeToFill ? typeToFill : IGM.OpaquePtrTy->getElementType()); } else { MinimumAlign = builder.getAlignment(); MinimumSize = builder.getSize(); SpareBits = std::move(builder.getSpareBits()); IsFixedLayout = builder.isFixedLayout(); IsKnownPOD = builder.isPOD(); IsKnownBitwiseTakable = builder.isBitwiseTakable(); IsKnownAlwaysFixedSize = builder.isAlwaysFixedSize(); if (typeToFill) { builder.setAsBodyOfStruct(typeToFill); Ty = typeToFill; } else { Ty = builder.getAsAnonStruct(); } } // If the struct is not @_fixed_layout, it will have a dynamic // layout outside of its resilience domain. if (astTy && astTy->getAnyNominal()) if (IGM.isResilient(astTy->getAnyNominal(), ResilienceExpansion::Minimal)) IsKnownAlwaysFixedSize = IsNotFixedSize; assert(typeToFill == nullptr || Ty == typeToFill); if (ASTTy) applyLayoutAttributes(IGM, ASTTy, IsFixedLayout, MinimumAlign); }
UniversalLinkageInfo::UniversalLinkageInfo(IRGenModule &IGM) : UniversalLinkageInfo(IGM.Triple, IGM.IRGen.hasMultipleIGMs(), IGM.getSILModule().isWholeModule()) {}
static std::pair<SILType, const TypeInfo &> getLoweredTypeAndTypeInfo(IRGenModule &IGM, Type unloweredType) { auto lowered = IGM.getLoweredType(unloweredType); return {lowered, IGM.getTypeInfo(lowered)}; }
void noteStartOfTypeSpecificMembers() { assert(getNextOffset().getStaticOffset() == IGM.getOffsetOfStructTypeSpecificMetadataMembers()); }