void ModuleFile::getImportDecls(SmallVectorImpl<Decl *> &Results) { if (!Bits.ComputedImportDecls) { ASTContext &Ctx = getContext(); for (auto &Dep : Dependencies) { // FIXME: We need a better way to show headers, since they usually /are/ // re-exported. This isn't likely to come up much, though. if (Dep.isHeader()) continue; StringRef ModulePathStr = Dep.RawPath; StringRef ScopePath; if (Dep.isScoped()) std::tie(ModulePathStr, ScopePath) = ModulePathStr.rsplit('\0'); SmallVector<std::pair<swift::Identifier, swift::SourceLoc>, 1> AccessPath; while (!ModulePathStr.empty()) { StringRef NextComponent; std::tie(NextComponent, ModulePathStr) = ModulePathStr.split('\0'); AccessPath.push_back({Ctx.getIdentifier(NextComponent), SourceLoc()}); } if (AccessPath.size() == 1 && AccessPath[0].first == Ctx.StdlibModuleName) continue; Module *M = Ctx.getModule(AccessPath); auto Kind = ImportKind::Module; if (!ScopePath.empty()) { auto ScopeID = Ctx.getIdentifier(ScopePath); assert(!ScopeID.empty() && "invalid decl name (non-top-level decls not supported)"); if (!M) { // The dependency module could not be loaded. Just make a guess // about the import kind, we cannot do better. Kind = ImportKind::Func; } else { // Lookup the decl in the top-level module. Module *TopLevelModule = M; if (AccessPath.size() > 1) TopLevelModule = Ctx.getLoadedModule(AccessPath.front().first); SmallVector<ValueDecl *, 8> Decls; TopLevelModule->lookupQualified( ModuleType::get(TopLevelModule), ScopeID, NL_QualifiedDefault | NL_KnownNoDependency, nullptr, Decls); Optional<ImportKind> FoundKind = ImportDecl::findBestImportKind(Decls); assert(FoundKind.hasValue() && "deserialized imports should not be ambiguous"); Kind = *FoundKind; } AccessPath.push_back({ ScopeID, SourceLoc() }); } auto *ID = ImportDecl::create(Ctx, FileContext, SourceLoc(), Kind, SourceLoc(), AccessPath); ID->setModule(M); if (Dep.isExported()) ID->getAttrs().add( new (Ctx) ExportedAttr(/*IsImplicit=*/false)); ImportDecls.push_back(ID); } Bits.ComputedImportDecls = true; } Results.append(ImportDecls.begin(), ImportDecls.end()); }
/// InsertUnwindResumeCalls - Convert the ResumeInsts that are still present /// into calls to the appropriate _Unwind_Resume function. bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) { SmallVector<ResumeInst*, 16> Resumes; for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { TerminatorInst *TI = I->getTerminator(); if (ResumeInst *RI = dyn_cast<ResumeInst>(TI)) Resumes.push_back(RI); } if (Resumes.empty()) return false; // Find the rewind function if we didn't already. const TargetLowering *TLI = TM->getTargetLowering(); if (!RewindFunction) { LLVMContext &Ctx = Resumes[0]->getContext(); FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), Type::getInt8PtrTy(Ctx), false); const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME); RewindFunction = Fn.getParent()->getOrInsertFunction(RewindName, FTy); } // Create the basic block where the _Unwind_Resume call will live. LLVMContext &Ctx = Fn.getContext(); unsigned ResumesSize = Resumes.size(); if (ResumesSize == 1) { // Instead of creating a new BB and PHI node, just append the call to // _Unwind_Resume to the end of the single resume block. ResumeInst *RI = Resumes.front(); BasicBlock *UnwindBB = RI->getParent(); Value *ExnObj = GetExceptionObject(RI); // Call the _Unwind_Resume function. CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB); CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME)); // We never expect _Unwind_Resume to return. new UnreachableInst(Ctx, UnwindBB); return true; } BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", &Fn); PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), ResumesSize, "exn.obj", UnwindBB); // Extract the exception object from the ResumeInst and add it to the PHI node // that feeds the _Unwind_Resume call. for (SmallVectorImpl<ResumeInst*>::iterator I = Resumes.begin(), E = Resumes.end(); I != E; ++I) { ResumeInst *RI = *I; BasicBlock *Parent = RI->getParent(); BranchInst::Create(UnwindBB, Parent); Value *ExnObj = GetExceptionObject(RI); PN->addIncoming(ExnObj, Parent); ++NumResumesLowered; } // Call the function. CallInst *CI = CallInst::Create(RewindFunction, PN, "", UnwindBB); CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME)); // We never expect _Unwind_Resume to return. new UnreachableInst(Ctx, UnwindBB); return true; }
Status ModuleFile::associateWithFileContext(FileUnit *file, SourceLoc diagLoc) { PrettyModuleFileDeserialization stackEntry(*this); assert(getStatus() == Status::Valid && "invalid module file"); assert(!FileContext && "already associated with an AST module"); FileContext = file; if (file->getParentModule()->getName().str() != Name) return error(Status::NameMismatch); ASTContext &ctx = getContext(); llvm::Triple moduleTarget(llvm::Triple::normalize(TargetTriple)); if (!areCompatibleArchitectures(moduleTarget, ctx.LangOpts.Target) || !areCompatibleOSs(moduleTarget, ctx.LangOpts.Target)) { return error(Status::TargetIncompatible); } if (ctx.LangOpts.EnableTargetOSChecking && isTargetTooNew(moduleTarget, ctx.LangOpts.Target)) { return error(Status::TargetTooNew); } for (const auto &searchPathPair : SearchPaths) ctx.addSearchPath(searchPathPair.first, searchPathPair.second); auto clangImporter = static_cast<ClangImporter *>(ctx.getClangModuleLoader()); bool missingDependency = false; for (auto &dependency : Dependencies) { assert(!dependency.isLoaded() && "already loaded?"); if (dependency.isHeader()) { // The path may be empty if the file being loaded is a partial AST, // and the current compiler invocation is a merge-modules step. if (!dependency.RawPath.empty()) { bool hadError = clangImporter->importHeader(dependency.RawPath, file->getParentModule(), importedHeaderInfo.fileSize, importedHeaderInfo.fileModTime, importedHeaderInfo.contents, diagLoc); if (hadError) return error(Status::FailedToLoadBridgingHeader); } Module *importedHeaderModule = clangImporter->getImportedHeaderModule(); dependency.Import = { {}, importedHeaderModule }; continue; } StringRef modulePathStr = dependency.RawPath; StringRef scopePath; if (dependency.isScoped()) { auto splitPoint = modulePathStr.find_last_of('\0'); assert(splitPoint != StringRef::npos); scopePath = modulePathStr.substr(splitPoint+1); modulePathStr = modulePathStr.slice(0, splitPoint); } SmallVector<Identifier, 4> modulePath; while (!modulePathStr.empty()) { StringRef nextComponent; std::tie(nextComponent, modulePathStr) = modulePathStr.split('\0'); modulePath.push_back(ctx.getIdentifier(nextComponent)); assert(!modulePath.back().empty() && "invalid module name (submodules not yet supported)"); } auto module = getModule(modulePath); if (!module) { // If we're missing the module we're shadowing, treat that specially. if (modulePath.size() == 1 && modulePath.front() == file->getParentModule()->getName()) { return error(Status::MissingShadowedModule); } // Otherwise, continue trying to load dependencies, so that we can list // everything that's missing. missingDependency = true; continue; } // This is for backwards-compatibility with modules that still rely on the // "HasUnderlyingModule" flag. if (Bits.HasUnderlyingModule && module == ShadowedModule) dependency.forceExported(); if (scopePath.empty()) { dependency.Import = { {}, module }; } else { auto scopeID = ctx.getIdentifier(scopePath); assert(!scopeID.empty() && "invalid decl name (non-top-level decls not supported)"); auto path = Module::AccessPathTy({scopeID, SourceLoc()}); dependency.Import = { ctx.AllocateCopy(path), module }; } } if (missingDependency) { return error(Status::MissingDependency); } if (Bits.HasEntryPoint) { FileContext->getParentModule()->registerEntryPointFile(FileContext, SourceLoc(), None); } return getStatus(); }
void NameBinder::addImport( SmallVectorImpl<std::pair<ImportedModule, ImportOptions>> &imports, ImportDecl *ID) { if (ID->getModulePath().front().first == SF.getParentModule()->getName() && ID->getModulePath().size() == 1 && !shouldImportSelfImportClang(ID, SF)) { // If the imported module name is the same as the current module, // produce a diagnostic. StringRef filename = llvm::sys::path::filename(SF.getFilename()); if (filename.empty()) Context.Diags.diagnose(ID, diag::sema_import_current_module, ID->getModulePath().front().first); else Context.Diags.diagnose(ID, diag::sema_import_current_module_with_file, filename, ID->getModulePath().front().first); ID->setModule(SF.getParentModule()); return; } Module *M = getModule(ID->getModulePath()); if (!M) { SmallString<64> modulePathStr; interleave(ID->getModulePath(), [&](ImportDecl::AccessPathElement elem) { modulePathStr += elem.first.str(); }, [&] { modulePathStr += "."; }); auto diagKind = diag::sema_no_import; if (SF.Kind == SourceFileKind::REPL || Context.LangOpts.DebuggerSupport) diagKind = diag::sema_no_import_repl; diagnose(ID->getLoc(), diagKind, modulePathStr); if (Context.SearchPathOpts.SDKPath.empty() && llvm::Triple(llvm::sys::getProcessTriple()).isMacOSX()) { diagnose(SourceLoc(), diag::sema_no_import_no_sdk); diagnose(SourceLoc(), diag::sema_no_import_no_sdk_xcrun); } return; } ID->setModule(M); Module *topLevelModule; if (ID->getModulePath().size() == 1) { topLevelModule = M; } else { // If we imported a submodule, import the top-level module as well. Identifier topLevelName = ID->getModulePath().front().first; topLevelModule = Context.getLoadedModule(topLevelName); assert(topLevelModule && "top-level module missing"); } auto *testableAttr = ID->getAttrs().getAttribute<TestableAttr>(); if (testableAttr && !topLevelModule->isTestingEnabled() && Context.LangOpts.EnableTestableAttrRequiresTestableModule) { diagnose(ID->getModulePath().front().second, diag::module_not_testable, topLevelModule->getName()); testableAttr->setInvalid(); } ImportOptions options; if (ID->isExported()) options |= SourceFile::ImportFlags::Exported; if (testableAttr) options |= SourceFile::ImportFlags::Testable; imports.push_back({ { ID->getDeclPath(), M }, options }); if (topLevelModule != M) imports.push_back({ { ID->getDeclPath(), topLevelModule }, options }); if (ID->getImportKind() != ImportKind::Module) { // If we're importing a specific decl, validate the import kind. using namespace namelookup; auto declPath = ID->getDeclPath(); // FIXME: Doesn't handle scoped testable imports correctly. assert(declPath.size() == 1 && "can't handle sub-decl imports"); SmallVector<ValueDecl *, 8> decls; lookupInModule(topLevelModule, declPath, declPath.front().first, decls, NLKind::QualifiedLookup, ResolutionKind::Overloadable, /*resolver*/nullptr, &SF); if (decls.empty()) { diagnose(ID, diag::no_decl_in_module) .highlight(SourceRange(declPath.front().second, declPath.back().second)); return; } ID->setDecls(Context.AllocateCopy(decls)); Optional<ImportKind> actualKind = ImportDecl::findBestImportKind(decls); if (!actualKind.hasValue()) { // FIXME: print entire module name? diagnose(ID, diag::ambiguous_decl_in_module, declPath.front().first, M->getName()); for (auto next : decls) diagnose(next, diag::found_candidate); } else if (!isCompatibleImportKind(ID->getImportKind(), *actualKind)) { diagnose(ID, diag::imported_decl_is_wrong_kind, declPath.front().first, getImportKindString(ID->getImportKind()), static_cast<unsigned>(*actualKind)) .fixItReplace(SourceRange(ID->getKindLoc()), getImportKindString(*actualKind)); if (decls.size() == 1) diagnose(decls.front(), diag::decl_declared_here, decls.front()->getName()); } } }
void SILGenFunction::emitArtificialTopLevel(ClassDecl *mainClass) { // Load argc and argv from the entry point arguments. SILValue argc = F.begin()->getBBArg(0); SILValue argv = F.begin()->getBBArg(1); switch (mainClass->getArtificialMainKind()) { case ArtificialMainKind::UIApplicationMain: { // Emit a UIKit main. // return UIApplicationMain(C_ARGC, C_ARGV, nil, ClassName); CanType NSStringTy = SGM.Types.getNSStringType(); CanType OptNSStringTy = OptionalType::get(NSStringTy)->getCanonicalType(); CanType IUOptNSStringTy = ImplicitlyUnwrappedOptionalType::get(NSStringTy)->getCanonicalType(); // Look up UIApplicationMain. // FIXME: Doing an AST lookup here is gross and not entirely sound; // we're getting away with it because the types are guaranteed to already // be imported. ASTContext &ctx = getASTContext(); Module *UIKit = ctx.getLoadedModule(ctx.getIdentifier("UIKit")); SmallVector<ValueDecl *, 1> results; UIKit->lookupQualified(UIKit->getDeclaredType(), ctx.getIdentifier("UIApplicationMain"), NL_QualifiedDefault, /*resolver*/nullptr, results); assert(!results.empty() && "couldn't find UIApplicationMain in UIKit"); assert(results.size() == 1 && "more than one UIApplicationMain?"); SILDeclRef mainRef{results.front(), ResilienceExpansion::Minimal, SILDeclRef::ConstructAtNaturalUncurryLevel, /*isForeign*/true}; auto UIApplicationMainFn = SGM.M.getOrCreateFunction(mainClass, mainRef, NotForDefinition); auto fnTy = UIApplicationMainFn->getLoweredFunctionType(); // Get the class name as a string using NSStringFromClass. CanType mainClassTy = mainClass->getDeclaredTypeInContext()->getCanonicalType(); CanType mainClassMetaty = CanMetatypeType::get(mainClassTy, MetatypeRepresentation::ObjC); ProtocolDecl *anyObjectProtocol = ctx.getProtocol(KnownProtocolKind::AnyObject); auto mainClassAnyObjectConformance = ProtocolConformanceRef( *SGM.M.getSwiftModule()->lookupConformance(mainClassTy, anyObjectProtocol, nullptr)); CanType anyObjectTy = anyObjectProtocol ->getDeclaredTypeInContext() ->getCanonicalType(); CanType anyObjectMetaTy = CanExistentialMetatypeType::get(anyObjectTy, MetatypeRepresentation::ObjC); auto NSStringFromClassType = SILFunctionType::get(nullptr, SILFunctionType::ExtInfo() .withRepresentation(SILFunctionType::Representation:: CFunctionPointer), ParameterConvention::Direct_Unowned, SILParameterInfo(anyObjectMetaTy, ParameterConvention::Direct_Unowned), SILResultInfo(OptNSStringTy, ResultConvention::Autoreleased), /*error result*/ None, ctx); auto NSStringFromClassFn = SGM.M.getOrCreateFunction(mainClass, "NSStringFromClass", SILLinkage::PublicExternal, NSStringFromClassType, IsBare, IsTransparent, IsNotFragile); auto NSStringFromClass = B.createFunctionRef(mainClass, NSStringFromClassFn); SILValue metaTy = B.createMetatype(mainClass, SILType::getPrimitiveObjectType(mainClassMetaty)); metaTy = B.createInitExistentialMetatype(mainClass, metaTy, SILType::getPrimitiveObjectType(anyObjectMetaTy), ctx.AllocateCopy( llvm::makeArrayRef(mainClassAnyObjectConformance))); SILValue optName = B.createApply(mainClass, NSStringFromClass, NSStringFromClass->getType(), SILType::getPrimitiveObjectType(OptNSStringTy), {}, metaTy); // Fix up the string parameters to have the right type. SILType nameArgTy = fnTy->getSILArgumentType(3); assert(nameArgTy == fnTy->getSILArgumentType(2)); auto managedName = ManagedValue::forUnmanaged(optName); SILValue nilValue; if (optName->getType() == nameArgTy) { nilValue = getOptionalNoneValue(mainClass, getTypeLowering(OptNSStringTy)); } else { assert(nameArgTy.getSwiftRValueType() == IUOptNSStringTy); nilValue = getOptionalNoneValue(mainClass, getTypeLowering(IUOptNSStringTy)); managedName = emitOptionalToOptional( mainClass, managedName, SILType::getPrimitiveObjectType(IUOptNSStringTy), [](SILGenFunction &, SILLocation, ManagedValue input, SILType) { return input; }); } // Fix up argv to have the right type. auto argvTy = fnTy->getSILArgumentType(1); SILType unwrappedTy = argvTy; if (Type innerTy = argvTy.getSwiftRValueType()->getAnyOptionalObjectType()){ auto canInnerTy = innerTy->getCanonicalType(); unwrappedTy = SILType::getPrimitiveObjectType(canInnerTy); } auto managedArgv = ManagedValue::forUnmanaged(argv); if (unwrappedTy != argv->getType()) { auto converted = emitPointerToPointer(mainClass, managedArgv, argv->getType().getSwiftRValueType(), unwrappedTy.getSwiftRValueType()); managedArgv = std::move(converted).getAsSingleValue(*this, mainClass); } if (unwrappedTy != argvTy) { managedArgv = getOptionalSomeValue(mainClass, managedArgv, getTypeLowering(argvTy)); } auto UIApplicationMain = B.createFunctionRef(mainClass, UIApplicationMainFn); SILValue args[] = {argc, managedArgv.getValue(), nilValue, managedName.getValue()}; B.createApply(mainClass, UIApplicationMain, UIApplicationMain->getType(), argc->getType(), {}, args); SILValue r = B.createIntegerLiteral(mainClass, SILType::getBuiltinIntegerType(32, ctx), 0); auto rType = F.getLoweredFunctionType()->getSingleResult().getSILType(); if (r->getType() != rType) r = B.createStruct(mainClass, rType, r); Cleanups.emitCleanupsForReturn(mainClass); B.createReturn(mainClass, r); return; } case ArtificialMainKind::NSApplicationMain: { // Emit an AppKit main. // return NSApplicationMain(C_ARGC, C_ARGV); SILParameterInfo argTypes[] = { SILParameterInfo(argc->getType().getSwiftRValueType(), ParameterConvention::Direct_Unowned), SILParameterInfo(argv->getType().getSwiftRValueType(), ParameterConvention::Direct_Unowned), }; auto NSApplicationMainType = SILFunctionType::get(nullptr, SILFunctionType::ExtInfo() // Should be C calling convention, but NSApplicationMain // has an overlay to fix the type of argv. .withRepresentation(SILFunctionType::Representation::Thin), ParameterConvention::Direct_Unowned, argTypes, SILResultInfo(argc->getType().getSwiftRValueType(), ResultConvention::Unowned), /*error result*/ None, getASTContext()); auto NSApplicationMainFn = SGM.M.getOrCreateFunction(mainClass, "NSApplicationMain", SILLinkage::PublicExternal, NSApplicationMainType, IsBare, IsTransparent, IsNotFragile); auto NSApplicationMain = B.createFunctionRef(mainClass, NSApplicationMainFn); SILValue args[] = { argc, argv }; B.createApply(mainClass, NSApplicationMain, NSApplicationMain->getType(), argc->getType(), {}, args); SILValue r = B.createIntegerLiteral(mainClass, SILType::getBuiltinIntegerType(32, getASTContext()), 0); auto rType = F.getLoweredFunctionType()->getSingleResult().getSILType(); if (r->getType() != rType) r = B.createStruct(mainClass, rType, r); B.createReturn(mainClass, r); return; } } }
void swift::ide::printSubmoduleInterface( Module *M, ArrayRef<StringRef> FullModuleName, ArrayRef<StringRef> GroupNames, ModuleTraversalOptions TraversalOptions, ASTPrinter &Printer, const PrintOptions &Options, const bool PrintSynthesizedExtensions) { auto AdjustedOptions = Options; adjustPrintOptions(AdjustedOptions); SmallVector<Decl *, 1> Decls; M->getDisplayDecls(Decls); auto &SwiftContext = M->getASTContext(); auto &Importer = static_cast<ClangImporter &>(*SwiftContext.getClangModuleLoader()); const clang::Module *InterestingClangModule = nullptr; SmallVector<ImportDecl *, 1> ImportDecls; llvm::DenseSet<const clang::Module *> ClangModulesForImports; SmallVector<Decl *, 1> SwiftDecls; llvm::DenseMap<const clang::Module *, SmallVector<std::pair<Decl *, clang::SourceLocation>, 1>> ClangDecls; // Drop top-level module name. FullModuleName = FullModuleName.slice(1); InterestingClangModule = M->findUnderlyingClangModule(); if (InterestingClangModule) { for (StringRef Name : FullModuleName) { InterestingClangModule = InterestingClangModule->findSubmodule(Name); if (!InterestingClangModule) return; } } else { assert(FullModuleName.empty()); } // If we're printing recursively, find all of the submodules to print. if (InterestingClangModule) { if (TraversalOptions) { SmallVector<const clang::Module *, 8> Worklist; SmallPtrSet<const clang::Module *, 8> Visited; Worklist.push_back(InterestingClangModule); Visited.insert(InterestingClangModule); while (!Worklist.empty()) { const clang::Module *CM = Worklist.pop_back_val(); if (!(TraversalOptions & ModuleTraversal::VisitHidden) && CM->IsExplicit) continue; ClangDecls.insert({ CM, {} }); // If we're supposed to visit submodules, add them now. if (TraversalOptions & ModuleTraversal::VisitSubmodules) { for (auto Sub = CM->submodule_begin(), SubEnd = CM->submodule_end(); Sub != SubEnd; ++Sub) { if (Visited.insert(*Sub).second) Worklist.push_back(*Sub); } } } } else { ClangDecls.insert({ InterestingClangModule, {} }); } } // Collect those submodules that are actually imported but have no import decls // in the module. llvm::SmallPtrSet<const clang::Module *, 16> NoImportSubModules; if (InterestingClangModule) { // Assume all submodules are missing. for (auto It =InterestingClangModule->submodule_begin(); It != InterestingClangModule->submodule_end(); It ++) { NoImportSubModules.insert(*It); } } llvm::StringMap<std::vector<Decl*>> FileRangedDecls; // Separate the declarations that we are going to print into different // buckets. for (Decl *D : Decls) { // Skip declarations that are not accessible. if (auto *VD = dyn_cast<ValueDecl>(D)) { if (Options.AccessibilityFilter > Accessibility::Private && VD->hasAccessibility() && VD->getFormalAccess() < Options.AccessibilityFilter) continue; } auto ShouldPrintImport = [&](ImportDecl *ImportD) -> bool { if (!InterestingClangModule) return true; auto ClangMod = ImportD->getClangModule(); if (!ClangMod) return true; if (!ClangMod->isSubModule()) return true; if (ClangMod == InterestingClangModule) return false; // FIXME: const-ness on the clang API. return ClangMod->isSubModuleOf( const_cast<clang::Module*>(InterestingClangModule)); }; if (auto ID = dyn_cast<ImportDecl>(D)) { if (ShouldPrintImport(ID)) { if (ID->getClangModule()) // Erase those submodules that are not missing. NoImportSubModules.erase(ID->getClangModule()); if (ID->getImportKind() == ImportKind::Module) { // Make sure we don't print duplicate imports, due to getting imports // for both a clang module and its overlay. if (auto *ClangMod = getUnderlyingClangModuleForImport(ID)) { auto P = ClangModulesForImports.insert(ClangMod); bool IsNew = P.second; if (!IsNew) continue; } } ImportDecls.push_back(ID); } continue; } auto addToClangDecls = [&](Decl *D) { assert(D->hasClangNode()); auto CN = D->getClangNode(); clang::SourceLocation Loc = CN.getLocation(); auto *OwningModule = Importer.getClangOwningModule(CN); auto I = ClangDecls.find(OwningModule); if (I != ClangDecls.end()) { I->second.push_back({ D, Loc }); } }; if (D->hasClangNode()) { addToClangDecls(D); continue; } if (FullModuleName.empty()) { // If group name is given and the decl does not belong to the group, skip it. if (!GroupNames.empty()){ if (auto Target = D->getGroupName()) { if (std::find(GroupNames.begin(), GroupNames.end(), Target.getValue()) != GroupNames.end()) { FileRangedDecls.insert(std::make_pair(D->getSourceFileName().getValue(), std::vector<Decl*>())).first->getValue().push_back(D); } } continue; } // Add Swift decls if we are printing the top-level module. SwiftDecls.push_back(D); } } if (!GroupNames.empty()) { assert(SwiftDecls.empty()); for (auto &Entry : FileRangedDecls) { auto &DeclsInFile = Entry.getValue(); std::sort(DeclsInFile.begin(), DeclsInFile.end(), [](Decl* LHS, Decl *RHS) { assert(LHS->getSourceOrder().hasValue()); assert(RHS->getSourceOrder().hasValue()); return LHS->getSourceOrder().getValue() < RHS->getSourceOrder().getValue(); }); for (auto D : DeclsInFile) { SwiftDecls.push_back(D); } } } // Create the missing import decls and add to the collector. for (auto *SM : NoImportSubModules) { ImportDecls.push_back(createImportDecl(M->getASTContext(), M, SM, {})); } auto &ClangSourceManager = Importer.getClangASTContext().getSourceManager(); // Sort imported declarations in source order *within a submodule*. for (auto &P : ClangDecls) { std::sort(P.second.begin(), P.second.end(), [&](std::pair<Decl *, clang::SourceLocation> LHS, std::pair<Decl *, clang::SourceLocation> RHS) -> bool { return ClangSourceManager.isBeforeInTranslationUnit(LHS.second, RHS.second); }); } // Sort Swift declarations so that we print them in a consistent order. std::sort(ImportDecls.begin(), ImportDecls.end(), [](ImportDecl *LHS, ImportDecl *RHS) -> bool { auto LHSPath = LHS->getFullAccessPath(); auto RHSPath = RHS->getFullAccessPath(); for (unsigned i = 0, e = std::min(LHSPath.size(), RHSPath.size()); i != e; i++) { if (int Ret = LHSPath[i].first.str().compare(RHSPath[i].first.str())) return Ret < 0; } return false; }); // If the group name is specified, we sort them according to their source order, // which is the order preserved by getTopLeveDecls. if (GroupNames.empty()) { std::sort(SwiftDecls.begin(), SwiftDecls.end(), [&](Decl *LHS, Decl *RHS) -> bool { auto *LHSValue = dyn_cast<ValueDecl>(LHS); auto *RHSValue = dyn_cast<ValueDecl>(RHS); if (LHSValue && RHSValue) { StringRef LHSName = LHSValue->getName().str(); StringRef RHSName = RHSValue->getName().str(); if (int Ret = LHSName.compare(RHSName)) return Ret < 0; // FIXME: this is not sufficient to establish a total order for overloaded // decls. return LHS->getKind() < RHS->getKind(); } return LHS->getKind() < RHS->getKind(); }); } ASTPrinter *PrinterToUse = &Printer; ClangCommentPrinter RegularCommentPrinter(Printer, Importer); if (Options.PrintRegularClangComments) PrinterToUse = &RegularCommentPrinter; auto PrintDecl = [&](Decl *D) -> bool { ASTPrinter &Printer = *PrinterToUse; if (!shouldPrint(D, AdjustedOptions)) { Printer.callAvoidPrintDeclPost(D); return false; } if (auto Ext = dyn_cast<ExtensionDecl>(D)) { // Clang extensions (categories) are always printed in source order. // Swift extensions are printed with their associated type unless it's // a cross-module extension. if (!Ext->hasClangNode()) { auto ExtendedNominal = Ext->getExtendedType()->getAnyNominal(); if (Ext->getModuleContext() == ExtendedNominal->getModuleContext()) return false; } } std::unique_ptr<SynthesizedExtensionAnalyzer> pAnalyzer; if (auto NTD = dyn_cast<NominalTypeDecl>(D)) { if (PrintSynthesizedExtensions) { pAnalyzer.reset(new SynthesizedExtensionAnalyzer(NTD, AdjustedOptions)); AdjustedOptions.shouldCloseNominal = !pAnalyzer->hasMergeGroup( SynthesizedExtensionAnalyzer::MergeGroupKind::MergableWithTypeDef); } } if (D->print(Printer, AdjustedOptions)) { if (AdjustedOptions.shouldCloseNominal) Printer << "\n"; AdjustedOptions.shouldCloseNominal = true; if (auto NTD = dyn_cast<NominalTypeDecl>(D)) { std::queue<NominalTypeDecl *> SubDecls{{NTD}}; while (!SubDecls.empty()) { auto NTD = SubDecls.front(); SubDecls.pop(); // Add sub-types of NTD. for (auto Sub : NTD->getMembers()) if (auto N = dyn_cast<NominalTypeDecl>(Sub)) SubDecls.push(N); if (!PrintSynthesizedExtensions) { // Print Ext and add sub-types of Ext. for (auto Ext : NTD->getExtensions()) { if (!shouldPrint(Ext, AdjustedOptions)) { Printer.callAvoidPrintDeclPost(Ext); continue; } if (Ext->hasClangNode()) continue; // will be printed in its source location, see above. Printer << "\n"; Ext->print(Printer, AdjustedOptions); Printer << "\n"; for (auto Sub : Ext->getMembers()) if (auto N = dyn_cast<NominalTypeDecl>(Sub)) SubDecls.push(N); } continue; } bool IsTopLevelDecl = D == NTD; // If printed Decl is the top-level, merge the constraint-free extensions // into the main body. if (IsTopLevelDecl) { // Print the part that should be merged with the type decl. pAnalyzer->forEachExtensionMergeGroup( SynthesizedExtensionAnalyzer::MergeGroupKind::MergableWithTypeDef, [&](ArrayRef<ExtensionAndIsSynthesized> Decls){ for (auto ET : Decls) { AdjustedOptions.shouldOpenExtension = false; AdjustedOptions.shouldCloseExtension = Decls.back().first == ET.first; if (ET.second) AdjustedOptions. initArchetypeTransformerForSynthesizedExtensions(NTD, pAnalyzer.get()); ET.first->print(Printer, AdjustedOptions); if (ET.second) AdjustedOptions. clearArchetypeTransformerForSynthesizedExtensions(); if (AdjustedOptions.shouldCloseExtension) Printer << "\n"; } }); } // If the printed Decl is not the top-level one, reset analyzer. if (!IsTopLevelDecl) pAnalyzer.reset(new SynthesizedExtensionAnalyzer(NTD, AdjustedOptions)); // Print the rest as synthesized extensions. pAnalyzer->forEachExtensionMergeGroup( // For top-level decls, only contraint extensions are to print; // Since the rest are merged into the main body. IsTopLevelDecl ? SynthesizedExtensionAnalyzer::MergeGroupKind::UnmergableWithTypeDef : // For sub-decls, all extensions should be printed. SynthesizedExtensionAnalyzer::MergeGroupKind::All, [&](ArrayRef<ExtensionAndIsSynthesized> Decls){ for (auto ET : Decls) { AdjustedOptions.shouldOpenExtension = Decls.front().first == ET.first; AdjustedOptions.shouldCloseExtension = Decls.back().first == ET.first; if (AdjustedOptions.shouldOpenExtension) Printer << "\n"; if (ET.second) AdjustedOptions. initArchetypeTransformerForSynthesizedExtensions(NTD, pAnalyzer.get()); ET.first->print(Printer, AdjustedOptions); if (ET.second) AdjustedOptions. clearArchetypeTransformerForSynthesizedExtensions(); if (AdjustedOptions.shouldCloseExtension) Printer << "\n"; } }); } } return true; } return false; }; // Imports from the stdlib are internal details that don't need to be exposed. if (!M->isStdlibModule()) { for (auto *D : ImportDecls) PrintDecl(D); Printer << "\n"; } { using ModuleAndName = std::pair<const clang::Module *, std::string>; SmallVector<ModuleAndName, 8> ClangModules; for (auto P : ClangDecls) { ClangModules.push_back({ P.first, P.first->getFullModuleName() }); } // Sort modules by name. std::sort(ClangModules.begin(), ClangModules.end(), [](const ModuleAndName &LHS, const ModuleAndName &RHS) -> bool { return LHS.second < RHS.second; }); for (auto CM : ClangModules) { for (auto DeclAndLoc : ClangDecls[CM.first]) PrintDecl(DeclAndLoc.first); } } if (!(TraversalOptions & ModuleTraversal::SkipOverlay) || !InterestingClangModule) { for (auto *D : SwiftDecls) { if (PrintDecl(D)) Printer << "\n"; } } }
Substitution Substitution::subst(Module *module, ArrayRef<Substitution> subs, TypeSubstitutionMap &subMap, ArchetypeConformanceMap &conformanceMap) const { // Substitute the replacement. Type substReplacement = Replacement.subst(module, subMap, None); assert(substReplacement && "substitution replacement failed"); if (substReplacement->isEqual(Replacement)) return *this; if (Conformance.empty()) { return {substReplacement, Conformance}; } bool conformancesChanged = false; SmallVector<ProtocolConformanceRef, 4> substConformances; substConformances.reserve(Conformance.size()); for (auto c : Conformance) { // If we have a concrete conformance, we need to substitute the // conformance to apply to the new type. if (c.isConcrete()) { auto substC = c.getConcrete()->subst(module, substReplacement, subs, subMap, conformanceMap); substConformances.push_back(ProtocolConformanceRef(substC)); if (c != substConformances.back()) conformancesChanged = true; continue; } // Otherwise, we may need to fill in the conformance. ProtocolDecl *proto = c.getAbstract(); Optional<ProtocolConformanceRef> conformance; // If the original type was an archetype, check the conformance map. if (auto replacementArch = Replacement->getAs<ArchetypeType>()) { // Check for conformances for the type that apply to the original // substituted archetype. auto it = conformanceMap.find(replacementArch); assert(it != conformanceMap.end()); for (ProtocolConformanceRef found : it->second) { auto foundProto = found.getRequirement(); if (foundProto == proto) { conformance = found; break; } else if (foundProto->inheritsFrom(proto)) { if (found.isConcrete()) { conformance = ProtocolConformanceRef( found.getConcrete()->getInheritedConformance(proto)); } else { conformance = found; } break; } } } // If that didn't find anything, we can still synthesize AnyObject // conformances from thin air. FIXME: gross. if (!conformance && proto->isSpecificProtocol(KnownProtocolKind::AnyObject)) { auto classDecl = substReplacement->getClassOrBoundGenericClass(); SmallVector<ProtocolConformance *, 1> lookupResults; classDecl->lookupConformance(classDecl->getParentModule(), proto, lookupResults); conformance = ProtocolConformanceRef(lookupResults.front()); } if (conformance) { if (conformance->isConcrete()) conformancesChanged = true; substConformances.push_back(*conformance); } else { assert(substReplacement->hasDependentProtocolConformances() && "couldn't find concrete conformance for concrete type?"); substConformances.push_back(ProtocolConformanceRef(proto)); } } assert(substConformances.size() == Conformance.size()); ArrayRef<ProtocolConformanceRef> substConfs; if (conformancesChanged) substConfs = module->getASTContext().AllocateCopy(substConformances); else substConfs = Conformance; return Substitution{substReplacement, substConfs}; }
/// InsertUnwindResumeCalls - Convert the ResumeInsts that are still present /// into calls to the appropriate _Unwind_Resume function. bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) { SmallVector<ResumeInst*, 16> Resumes; SmallVector<LandingPadInst*, 16> CleanupLPads; for (BasicBlock &BB : Fn) { if (auto *RI = dyn_cast<ResumeInst>(BB.getTerminator())) Resumes.push_back(RI); if (auto *LP = BB.getLandingPadInst()) if (LP->isCleanup()) CleanupLPads.push_back(LP); } if (Resumes.empty()) return false; // Check the personality, don't do anything if it's funclet-based. EHPersonality Pers = classifyEHPersonality(Fn.getPersonalityFn()); if (isFuncletEHPersonality(Pers)) return false; LLVMContext &Ctx = Fn.getContext(); size_t ResumesLeft = pruneUnreachableResumes(Fn, Resumes, CleanupLPads); if (ResumesLeft == 0) return true; // We pruned them all. // Find the rewind function if we didn't already. if (!RewindFunction) { FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), Type::getInt8PtrTy(Ctx), false); const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME); RewindFunction = Fn.getParent()->getOrInsertFunction(RewindName, FTy); } // Create the basic block where the _Unwind_Resume call will live. if (ResumesLeft == 1) { // Instead of creating a new BB and PHI node, just append the call to // _Unwind_Resume to the end of the single resume block. ResumeInst *RI = Resumes.front(); BasicBlock *UnwindBB = RI->getParent(); Value *ExnObj = GetExceptionObject(RI); // Call the _Unwind_Resume function. CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB); CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME)); // We never expect _Unwind_Resume to return. new UnreachableInst(Ctx, UnwindBB); return true; } BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", &Fn); PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), ResumesLeft, "exn.obj", UnwindBB); // Extract the exception object from the ResumeInst and add it to the PHI node // that feeds the _Unwind_Resume call. for (ResumeInst *RI : Resumes) { BasicBlock *Parent = RI->getParent(); BranchInst::Create(UnwindBB, Parent); Value *ExnObj = GetExceptionObject(RI); PN->addIncoming(ExnObj, Parent); ++NumResumesLowered; } // Call the function. CallInst *CI = CallInst::Create(RewindFunction, PN, "", UnwindBB); CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME)); // We never expect _Unwind_Resume to return. new UnreachableInst(Ctx, UnwindBB); return true; }
TypeIndex llvm::codeview::getModifiedType(const CVType &CVT) { assert(CVT.kind() == LF_MODIFIER); SmallVector<TypeIndex, 1> Refs; discoverTypeIndices(CVT, Refs); return Refs.front(); }
void NamedParameterCheck::check(const MatchFinder::MatchResult &Result) { const SourceManager &SM = *Result.SourceManager; const auto *Function = Result.Nodes.getNodeAs<FunctionDecl>("decl"); SmallVector<std::pair<const FunctionDecl *, unsigned>, 4> UnnamedParams; // Ignore implicitly generated members. if (Function->isImplicit()) return; // Ignore declarations without a definition if we're not dealing with an // overriden method. const FunctionDecl *Definition = nullptr; if ((!Function->isDefined(Definition) || Function->isDefaulted() || Function->isDeleted()) && (!isa<CXXMethodDecl>(Function) || cast<CXXMethodDecl>(Function)->size_overridden_methods() == 0)) return; // TODO: Handle overloads. // TODO: We could check that all redeclarations use the same name for // arguments in the same position. for (unsigned I = 0, E = Function->getNumParams(); I != E; ++I) { const ParmVarDecl *Parm = Function->getParamDecl(I); // Look for unnamed parameters. if (!Parm->getName().empty()) continue; // Don't warn on the dummy argument on post-inc and post-dec operators. if ((Function->getOverloadedOperator() == OO_PlusPlus || Function->getOverloadedOperator() == OO_MinusMinus) && Parm->getType()->isSpecificBuiltinType(BuiltinType::Int)) continue; // Sanity check the source locations. if (!Parm->getLocation().isValid() || Parm->getLocation().isMacroID() || !SM.isWrittenInSameFile(Parm->getLocStart(), Parm->getLocation())) continue; // Skip gmock testing::Unused parameters. if (auto Typedef = Parm->getType()->getAs<clang::TypedefType>()) if (Typedef->getDecl()->getQualifiedNameAsString() == "testing::Unused") continue; // Skip std::nullptr_t. if (Parm->getType().getCanonicalType()->isNullPtrType()) continue; // Look for comments. We explicitly want to allow idioms like // void foo(int /*unused*/) const char *Begin = SM.getCharacterData(Parm->getLocStart()); const char *End = SM.getCharacterData(Parm->getLocation()); StringRef Data(Begin, End - Begin); if (Data.find("/*") != StringRef::npos) continue; UnnamedParams.push_back(std::make_pair(Function, I)); } // Emit only one warning per function but fixits for all unnamed parameters. if (!UnnamedParams.empty()) { const ParmVarDecl *FirstParm = UnnamedParams.front().first->getParamDecl(UnnamedParams.front().second); auto D = diag(FirstParm->getLocation(), "all parameters should be named in a function"); for (auto P : UnnamedParams) { // Fallback to an unused marker. StringRef NewName = "unused"; // If the method is overridden, try to copy the name from the base method // into the overrider. const auto *M = dyn_cast<CXXMethodDecl>(P.first); if (M && M->size_overridden_methods() > 0) { const ParmVarDecl *OtherParm = (*M->begin_overridden_methods())->getParamDecl(P.second); StringRef Name = OtherParm->getName(); if (!Name.empty()) NewName = Name; } // If the definition has a named parameter use that name. if (Definition) { const ParmVarDecl *DefParm = Definition->getParamDecl(P.second); StringRef Name = DefParm->getName(); if (!Name.empty()) NewName = Name; } // Now insert the comment. Note that getLocation() points to the place // where the name would be, this allows us to also get complex cases like // function pointers right. const ParmVarDecl *Parm = P.first->getParamDecl(P.second); D << FixItHint::CreateInsertion(Parm->getLocation(), " /*" + NewName.str() + "*/"); } } }
Substitution Substitution::subst(Module *module, ArrayRef<Substitution> subs, TypeSubstitutionMap &subMap, ArchetypeConformanceMap &conformanceMap) const { // Substitute the replacement. Type substReplacement = Replacement.subst(module, subMap, None); assert(substReplacement && "substitution replacement failed"); if (substReplacement->isEqual(Replacement)) return *this; bool conformancesChanged = false; SmallVector<ProtocolConformance *, 4> substConformance; substConformance.reserve(Conformance.size()); // When substituting a concrete type for an archetype, we need to fill in the // conformances. if (auto replacementArch = Replacement->getAs<ArchetypeType>()) { if (!substReplacement->hasDependentProtocolConformances()) { // Find the conformances mapped to the archetype. auto found = conformanceMap.find(replacementArch); assert(found != conformanceMap.end() && "no conformances for replaced archetype?!"); auto &foundConformances = found->second; // If the substituted replacement archetype has no conformances, // then there are no conformances to substitute. if (foundConformances.empty()) return Substitution{Archetype, substReplacement, Conformance}; conformancesChanged = true; // Get the conformances for the type that apply to the original // substituted archetype. for (auto proto : Archetype->getConformsTo()) { for (auto c : foundConformances) { if (c->getProtocol() == proto) { substConformance.push_back(c); goto found_conformance; } if (c->getProtocol()->inheritsFrom(proto)) { substConformance.push_back(c->getInheritedConformance(proto)); goto found_conformance; } } // FIXME: AnyObject conformances can be synthesized from // thin air. Gross. if (proto->isSpecificProtocol(KnownProtocolKind::AnyObject)) { auto classDecl = substReplacement->getClassOrBoundGenericClass(); SmallVector<ProtocolConformance *, 1> conformances; classDecl->lookupConformance(classDecl->getParentModule(), proto, conformances); substConformance.push_back(conformances.front()); goto found_conformance; } assert(false && "did not find conformance for archetype requirement?!"); found_conformance:; } } } else { // If we substituted a concrete type for another, we need to substitute the // conformance to apply to the new type. for (auto c : Conformance) { auto substC = c->subst(module, substReplacement, subs, subMap, conformanceMap); if (c != substC) conformancesChanged = true; substConformance.push_back(substC); } } ArrayRef<ProtocolConformance *> substConformanceRef; if (conformancesChanged) substConformanceRef = module->getASTContext().AllocateCopy(substConformance); else substConformanceRef = Conformance; assert(substReplacement->hasDependentProtocolConformances() || substConformanceRef.size() == Archetype->getConformsTo().size()); return Substitution{Archetype, substReplacement, substConformanceRef}; }
static std::string fixupWithCase(StringRef Name, IdentifierNamingCheck::CaseType Case) { static llvm::Regex Splitter( "([a-z0-9A-Z]*)(_+)|([A-Z]?[a-z0-9]+)([A-Z]|$)|([A-Z]+)([A-Z]|$)"); SmallVector<StringRef, 8> Substrs; Name.split(Substrs, "_", -1, false); SmallVector<StringRef, 8> Words; for (auto Substr : Substrs) { while (!Substr.empty()) { SmallVector<StringRef, 8> Groups; if (!Splitter.match(Substr, &Groups)) break; if (Groups[2].size() > 0) { Words.push_back(Groups[1]); Substr = Substr.substr(Groups[0].size()); } else if (Groups[3].size() > 0) { Words.push_back(Groups[3]); Substr = Substr.substr(Groups[0].size() - Groups[4].size()); } else if (Groups[5].size() > 0) { Words.push_back(Groups[5]); Substr = Substr.substr(Groups[0].size() - Groups[6].size()); } } } if (Words.empty()) return Name; std::string Fixup; switch (Case) { case IdentifierNamingCheck::CT_AnyCase: Fixup += Name; break; case IdentifierNamingCheck::CT_LowerCase: for (auto const &Word : Words) { if (&Word != &Words.front()) Fixup += "_"; Fixup += Word.lower(); } break; case IdentifierNamingCheck::CT_UpperCase: for (auto const &Word : Words) { if (&Word != &Words.front()) Fixup += "_"; Fixup += Word.upper(); } break; case IdentifierNamingCheck::CT_CamelCase: for (auto const &Word : Words) { Fixup += Word.substr(0, 1).upper(); Fixup += Word.substr(1).lower(); } break; case IdentifierNamingCheck::CT_CamelBack: for (auto const &Word : Words) { if (&Word == &Words.front()) { Fixup += Word.lower(); } else { Fixup += Word.substr(0, 1).upper(); Fixup += Word.substr(1).lower(); } } break; case IdentifierNamingCheck::CT_CamelSnakeCase: for (auto const &Word : Words) { if (&Word != &Words.front()) Fixup += "_"; Fixup += Word.substr(0, 1).upper(); Fixup += Word.substr(1).lower(); } break; case IdentifierNamingCheck::CT_CamelSnakeBack: for (auto const &Word : Words) { if (&Word != &Words.front()) { Fixup += "_"; Fixup += Word.substr(0, 1).upper(); } else { Fixup += Word.substr(0, 1).lower(); } Fixup += Word.substr(1).lower(); } break; } return Fixup; }
void SILGenFunction::emitArtificialTopLevel(ClassDecl *mainClass) { // Load argc and argv from the entry point arguments. SILValue argc = F.begin()->getArgument(0); SILValue argv = F.begin()->getArgument(1); switch (mainClass->getArtificialMainKind()) { case ArtificialMainKind::UIApplicationMain: { // Emit a UIKit main. // return UIApplicationMain(C_ARGC, C_ARGV, nil, ClassName); CanType NSStringTy = SGM.Types.getNSStringType(); CanType OptNSStringTy = OptionalType::get(NSStringTy)->getCanonicalType(); // Look up UIApplicationMain. // FIXME: Doing an AST lookup here is gross and not entirely sound; // we're getting away with it because the types are guaranteed to already // be imported. ASTContext &ctx = getASTContext(); std::pair<Identifier, SourceLoc> UIKitName = {ctx.getIdentifier("UIKit"), SourceLoc()}; ModuleDecl *UIKit = ctx .getClangModuleLoader() ->loadModule(SourceLoc(), UIKitName); assert(UIKit && "couldn't find UIKit objc module?!"); SmallVector<ValueDecl *, 1> results; UIKit->lookupQualified(UIKit, ctx.getIdentifier("UIApplicationMain"), NL_QualifiedDefault, results); assert(results.size() == 1 && "couldn't find a unique UIApplicationMain in the UIKit ObjC " "module?!"); ValueDecl *UIApplicationMainDecl = results.front(); auto mainRef = SILDeclRef(UIApplicationMainDecl).asForeign(); SILGenFunctionBuilder builder(SGM); auto UIApplicationMainFn = builder.getOrCreateFunction(mainClass, mainRef, NotForDefinition); auto fnTy = UIApplicationMainFn->getLoweredFunctionType(); SILFunctionConventions fnConv(fnTy, SGM.M); // Get the class name as a string using NSStringFromClass. CanType mainClassTy = mainClass->getDeclaredInterfaceType() ->getCanonicalType(); CanType mainClassMetaty = CanMetatypeType::get(mainClassTy, MetatypeRepresentation::ObjC); CanType anyObjectTy = ctx.getAnyObjectType(); CanType anyObjectMetaTy = CanExistentialMetatypeType::get(anyObjectTy, MetatypeRepresentation::ObjC); auto NSStringFromClassType = SILFunctionType::get(nullptr, SILFunctionType::ExtInfo() .withRepresentation(SILFunctionType::Representation:: CFunctionPointer), SILCoroutineKind::None, ParameterConvention::Direct_Unowned, SILParameterInfo(anyObjectMetaTy, ParameterConvention::Direct_Unowned), /*yields*/ {}, SILResultInfo(OptNSStringTy, ResultConvention::Autoreleased), /*error result*/ None, ctx); auto NSStringFromClassFn = builder.getOrCreateFunction( mainClass, "NSStringFromClass", SILLinkage::PublicExternal, NSStringFromClassType, IsBare, IsTransparent, IsNotSerialized); auto NSStringFromClass = B.createFunctionRef(mainClass, NSStringFromClassFn); SILValue metaTy = B.createMetatype(mainClass, SILType::getPrimitiveObjectType(mainClassMetaty)); metaTy = B.createInitExistentialMetatype(mainClass, metaTy, SILType::getPrimitiveObjectType(anyObjectMetaTy), {}); SILValue optName = B.createApply(mainClass, NSStringFromClass, NSStringFromClass->getType(), SILType::getPrimitiveObjectType(OptNSStringTy), {}, metaTy); // Fix up the string parameters to have the right type. SILType nameArgTy = fnConv.getSILArgumentType(3); assert(nameArgTy == fnConv.getSILArgumentType(2)); (void)nameArgTy; auto managedName = ManagedValue::forUnmanaged(optName); SILValue nilValue; assert(optName->getType() == nameArgTy); nilValue = getOptionalNoneValue(mainClass, getTypeLowering(OptNSStringTy)); // Fix up argv to have the right type. auto argvTy = fnConv.getSILArgumentType(1); SILType unwrappedTy = argvTy; if (Type innerTy = argvTy.getASTType()->getOptionalObjectType()) { auto canInnerTy = innerTy->getCanonicalType(); unwrappedTy = SILType::getPrimitiveObjectType(canInnerTy); } auto managedArgv = ManagedValue::forUnmanaged(argv); if (unwrappedTy != argv->getType()) { auto converted = emitPointerToPointer(mainClass, managedArgv, argv->getType().getASTType(), unwrappedTy.getASTType()); managedArgv = std::move(converted).getAsSingleValue(*this, mainClass); } if (unwrappedTy != argvTy) { managedArgv = getOptionalSomeValue(mainClass, managedArgv, getTypeLowering(argvTy)); } auto UIApplicationMain = B.createFunctionRef(mainClass, UIApplicationMainFn); SILValue args[] = {argc, managedArgv.getValue(), nilValue, managedName.getValue()}; B.createApply(mainClass, UIApplicationMain, UIApplicationMain->getType(), argc->getType(), {}, args); SILValue r = B.createIntegerLiteral(mainClass, SILType::getBuiltinIntegerType(32, ctx), 0); auto rType = F.getConventions().getSingleSILResultType(); if (r->getType() != rType) r = B.createStruct(mainClass, rType, r); Cleanups.emitCleanupsForReturn(mainClass, NotForUnwind); B.createReturn(mainClass, r); return; } case ArtificialMainKind::NSApplicationMain: { // Emit an AppKit main. // return NSApplicationMain(C_ARGC, C_ARGV); SILParameterInfo argTypes[] = { SILParameterInfo(argc->getType().getASTType(), ParameterConvention::Direct_Unowned), SILParameterInfo(argv->getType().getASTType(), ParameterConvention::Direct_Unowned), }; auto NSApplicationMainType = SILFunctionType::get(nullptr, SILFunctionType::ExtInfo() // Should be C calling convention, but NSApplicationMain // has an overlay to fix the type of argv. .withRepresentation(SILFunctionType::Representation::Thin), SILCoroutineKind::None, ParameterConvention::Direct_Unowned, argTypes, /*yields*/ {}, SILResultInfo(argc->getType().getASTType(), ResultConvention::Unowned), /*error result*/ None, getASTContext()); SILGenFunctionBuilder builder(SGM); auto NSApplicationMainFn = builder.getOrCreateFunction( mainClass, "NSApplicationMain", SILLinkage::PublicExternal, NSApplicationMainType, IsBare, IsTransparent, IsNotSerialized); auto NSApplicationMain = B.createFunctionRef(mainClass, NSApplicationMainFn); SILValue args[] = { argc, argv }; B.createApply(mainClass, NSApplicationMain, NSApplicationMain->getType(), argc->getType(), {}, args); SILValue r = B.createIntegerLiteral(mainClass, SILType::getBuiltinIntegerType(32, getASTContext()), 0); auto rType = F.getConventions().getSingleSILResultType(); if (r->getType() != rType) r = B.createStruct(mainClass, rType, r); B.createReturn(mainClass, r); return; } } }
void NameBinder::addImport( SmallVectorImpl<SourceFile::ImportedModuleDesc> &imports, ImportDecl *ID) { if (ID->getModulePath().front().first == SF.getParentModule()->getName() && ID->getModulePath().size() == 1 && !shouldImportSelfImportClang(ID, SF)) { // If the imported module name is the same as the current module, // produce a diagnostic. StringRef filename = llvm::sys::path::filename(SF.getFilename()); if (filename.empty()) Context.Diags.diagnose(ID, diag::sema_import_current_module, ID->getModulePath().front().first); else Context.Diags.diagnose(ID, diag::sema_import_current_module_with_file, filename, ID->getModulePath().front().first); ID->setModule(SF.getParentModule()); return; } ModuleDecl *M = getModule(ID->getModulePath()); if (!M) { SmallString<64> modulePathStr; interleave(ID->getModulePath(), [&](ImportDecl::AccessPathElement elem) { modulePathStr += elem.first.str(); }, [&] { modulePathStr += "."; }); auto diagKind = diag::sema_no_import; if (SF.Kind == SourceFileKind::REPL || Context.LangOpts.DebuggerSupport) diagKind = diag::sema_no_import_repl; diagnose(ID->getLoc(), diagKind, modulePathStr); if (Context.SearchPathOpts.SDKPath.empty() && llvm::Triple(llvm::sys::getProcessTriple()).isMacOSX()) { diagnose(SourceLoc(), diag::sema_no_import_no_sdk); diagnose(SourceLoc(), diag::sema_no_import_no_sdk_xcrun); } return; } ID->setModule(M); ModuleDecl *topLevelModule; if (ID->getModulePath().size() == 1) { topLevelModule = M; } else { // If we imported a submodule, import the top-level module as well. Identifier topLevelName = ID->getModulePath().front().first; topLevelModule = Context.getLoadedModule(topLevelName); if (!topLevelModule) { // Clang can sometimes import top-level modules as if they were // submodules. assert(!M->getFiles().empty() && isa<ClangModuleUnit>(M->getFiles().front())); topLevelModule = M; } } auto *testableAttr = ID->getAttrs().getAttribute<TestableAttr>(); if (testableAttr && !topLevelModule->isTestingEnabled() && Context.LangOpts.EnableTestableAttrRequiresTestableModule) { diagnose(ID->getModulePath().front().second, diag::module_not_testable, topLevelModule->getName()); testableAttr->setInvalid(); } auto *privateImportAttr = ID->getAttrs().getAttribute<PrivateImportAttr>(); StringRef privateImportFileName; if (privateImportAttr) { if (!topLevelModule->arePrivateImportsEnabled()) { diagnose(ID->getModulePath().front().second, diag::module_not_compiled_for_private_import, topLevelModule->getName()); privateImportAttr->setInvalid(); } else { privateImportFileName = privateImportAttr->getSourceFile(); } } ImportOptions options; if (ID->isExported()) options |= SourceFile::ImportFlags::Exported; if (testableAttr) options |= SourceFile::ImportFlags::Testable; if (privateImportAttr) options |= SourceFile::ImportFlags::PrivateImport; auto *implementationOnlyAttr = ID->getAttrs().getAttribute<ImplementationOnlyAttr>(); if (implementationOnlyAttr) { if (options.contains(SourceFile::ImportFlags::Exported)) { diagnose(ID, diag::import_implementation_cannot_be_exported, topLevelModule->getName()) .fixItRemove(implementationOnlyAttr->getRangeWithAt()); } else { options |= SourceFile::ImportFlags::ImplementationOnly; } } imports.push_back(SourceFile::ImportedModuleDesc( {ID->getDeclPath(), M}, options, privateImportFileName)); if (topLevelModule != M) imports.push_back(SourceFile::ImportedModuleDesc( {ID->getDeclPath(), topLevelModule}, options, privateImportFileName)); if (ID->getImportKind() != ImportKind::Module) { // If we're importing a specific decl, validate the import kind. using namespace namelookup; auto declPath = ID->getDeclPath(); // FIXME: Doesn't handle scoped testable imports correctly. assert(declPath.size() == 1 && "can't handle sub-decl imports"); SmallVector<ValueDecl *, 8> decls; lookupInModule(topLevelModule, declPath, declPath.front().first, decls, NLKind::QualifiedLookup, ResolutionKind::Overloadable, /*resolver*/nullptr, &SF); if (decls.empty()) { diagnose(ID, diag::decl_does_not_exist_in_module, static_cast<unsigned>(ID->getImportKind()), declPath.front().first, ID->getModulePath().front().first) .highlight(SourceRange(declPath.front().second, declPath.back().second)); return; } ID->setDecls(Context.AllocateCopy(decls)); Optional<ImportKind> actualKind = ImportDecl::findBestImportKind(decls); if (!actualKind.hasValue()) { // FIXME: print entire module name? diagnose(ID, diag::ambiguous_decl_in_module, declPath.front().first, M->getName()); for (auto next : decls) diagnose(next, diag::found_candidate); } else if (!isCompatibleImportKind(ID->getImportKind(), *actualKind)) { Optional<InFlightDiagnostic> emittedDiag; if (*actualKind == ImportKind::Type && isNominalImportKind(ID->getImportKind())) { assert(decls.size() == 1 && "if we start suggesting ImportKind::Type for, e.g., a mix of " "structs and classes, we'll need a different message here"); assert(isa<TypeAliasDecl>(decls.front()) && "ImportKind::Type is only the best choice for a typealias"); auto *typealias = cast<TypeAliasDecl>(decls.front()); emittedDiag.emplace(diagnose(ID, diag::imported_decl_is_wrong_kind_typealias, typealias->getDescriptiveKind(), TypeAliasType::get(typealias, Type(), SubstitutionMap(), typealias->getUnderlyingTypeLoc().getType()), getImportKindString(ID->getImportKind()))); } else { emittedDiag.emplace(diagnose(ID, diag::imported_decl_is_wrong_kind, declPath.front().first, getImportKindString(ID->getImportKind()), static_cast<unsigned>(*actualKind))); } emittedDiag->fixItReplace(SourceRange(ID->getKindLoc()), getImportKindString(*actualKind)); emittedDiag->flush(); if (decls.size() == 1) diagnose(decls.front(), diag::decl_declared_here, decls.front()->getFullName()); } } }
bool ModuleFile::readIndexBlock(llvm::BitstreamCursor &cursor) { cursor.EnterSubBlock(INDEX_BLOCK_ID); SmallVector<uint64_t, 4> scratch; StringRef blobData; while (true) { auto next = cursor.advance(); switch (next.Kind) { case llvm::BitstreamEntry::EndBlock: return true; case llvm::BitstreamEntry::Error: return false; case llvm::BitstreamEntry::SubBlock: // Unknown sub-block, which this version of the compiler won't use. if (cursor.SkipBlock()) return false; break; case llvm::BitstreamEntry::Record: scratch.clear(); blobData = {}; unsigned kind = cursor.readRecord(next.ID, scratch, &blobData); switch (kind) { case index_block::DECL_OFFSETS: assert(blobData.empty()); Decls.assign(scratch.begin(), scratch.end()); break; case index_block::DECL_CONTEXT_OFFSETS: assert(blobData.empty()); DeclContexts.assign(scratch.begin(), scratch.end()); break; case index_block::TYPE_OFFSETS: assert(blobData.empty()); Types.assign(scratch.begin(), scratch.end()); break; case index_block::IDENTIFIER_OFFSETS: assert(blobData.empty()); Identifiers.assign(scratch.begin(), scratch.end()); break; case index_block::TOP_LEVEL_DECLS: TopLevelDecls = readDeclTable(scratch, blobData); break; case index_block::OPERATORS: OperatorDecls = readDeclTable(scratch, blobData); break; case index_block::EXTENSIONS: ExtensionDecls = readDeclTable(scratch, blobData); break; case index_block::CLASS_MEMBERS: ClassMembersByName = readDeclTable(scratch, blobData); break; case index_block::OPERATOR_METHODS: OperatorMethodDecls = readDeclTable(scratch, blobData); break; case index_block::OBJC_METHODS: ObjCMethods = readObjCMethodTable(scratch, blobData); break; case index_block::ENTRY_POINT: assert(blobData.empty()); setEntryPointClassID(scratch.front()); break; case index_block::LOCAL_TYPE_DECLS: LocalTypeDecls = readLocalDeclTable(scratch, blobData); break; case index_block::LOCAL_DECL_CONTEXT_OFFSETS: assert(blobData.empty()); LocalDeclContexts.assign(scratch.begin(), scratch.end()); break; case index_block::NORMAL_CONFORMANCE_OFFSETS: assert(blobData.empty()); NormalConformances.assign(scratch.begin(), scratch.end()); break; default: // Unknown index kind, which this version of the compiler won't use. break; } break; } } }
Value* LoopTripCount::insertTripCount(Loop* L, Instruction* InsertPos) { // inspired from Loop::getCanonicalInductionVariable BasicBlock *H = L->getHeader(); BasicBlock* LoopPred = L->getLoopPredecessor(); BasicBlock* startBB = NULL;//which basicblock stores start value int OneStep = 0;// the extra add or plus step for calc Assert(LoopPred, "Require Loop has a Pred"); DEBUG(errs()<<"loop depth:"<<L->getLoopDepth()<<"\n"); /** whats difference on use of predecessor and preheader??*/ //RET_ON_FAIL(self->getLoopLatch()&&self->getLoopPreheader()); //assert(self->getLoopLatch() && self->getLoopPreheader() && "need loop simplify form" ); ret_null_fail(L->getLoopLatch(), "need loop simplify form"); BasicBlock* TE = NULL;//True Exit SmallVector<BasicBlock*,4> Exits; L->getExitingBlocks(Exits); if(Exits.size()==1) TE = Exits.front(); else{ if(std::find(Exits.begin(),Exits.end(),L->getLoopLatch())!=Exits.end()) TE = L->getLoopLatch(); else{ SmallVector<llvm::Loop::Edge,4> ExitEdges; L->getExitEdges(ExitEdges); //stl 用法,先把所有满足条件的元素(出口的结束符是不可到达)移动到数组的末尾,再统一删除 ExitEdges.erase(std::remove_if(ExitEdges.begin(), ExitEdges.end(), [](llvm::Loop::Edge& I){ return isa<UnreachableInst>(I.second->getTerminator()); }), ExitEdges.end()); if(ExitEdges.size()==1) TE = const_cast<BasicBlock*>(ExitEdges.front().first); } } //process true exit ret_null_fail(TE, "need have a true exit"); Instruction* IndOrNext = NULL; Value* END = NULL; //终止块的终止指令:分情况讨论branchinst,switchinst; //跳转指令br bool a1,a2;condition<-->bool if(isa<BranchInst>(TE->getTerminator())){ const BranchInst* EBR = cast<BranchInst>(TE->getTerminator()); Assert(EBR->isConditional(), "end branch is not conditional"); ICmpInst* EC = dyn_cast<ICmpInst>(EBR->getCondition()); if(EC->getPredicate() == EC->ICMP_SGT){ Assert(!L->contains(EBR->getSuccessor(0)), *EBR<<":abnormal exit with great than");//终止块的终止指令---->跳出执行循环外的指令 OneStep += 1; } else if(EC->getPredicate() == EC->ICMP_EQ) Assert(!L->contains(EBR->getSuccessor(0)), *EBR<<":abnormal exit with great than"); else if(EC->getPredicate() == EC->ICMP_SLT) { ret_null_fail(!L->contains(EBR->getSuccessor(1)), *EBR<<":abnormal exit with less than"); } else { ret_null_fail(0, *EC<<" unknow combination of end condition"); } IndOrNext = dyn_cast<Instruction>(castoff(EC->getOperand(0)));//去掉类型转化 END = EC->getOperand(1); DEBUG(errs()<<"end value:"<<*EC<<"\n"); }else if(isa<SwitchInst>(TE->getTerminator())){ SwitchInst* ESW = const_cast<SwitchInst*>(cast<SwitchInst>(TE->getTerminator())); IndOrNext = dyn_cast<Instruction>(castoff(ESW->getCondition())); for(auto I = ESW->case_begin(),E = ESW->case_end();I!=E;++I){ if(!L->contains(I.getCaseSuccessor())){ ret_null_fail(!END,""); assert(!END && "shouldn't have two ends"); END = I.getCaseValue(); } } DEBUG(errs()<<"end value:"<<*ESW<<"\n"); }else{ assert(0 && "unknow terminator type"); } ret_null_fail(L->isLoopInvariant(END), "end value should be loop invariant");//至此得END值 Value* start = NULL; Value* ind = NULL; Instruction* next = NULL; bool addfirst = false;//add before icmp ed DISABLE(errs()<<*IndOrNext<<"\n"); if(isa<LoadInst>(IndOrNext)){ //memory depend analysis Value* PSi = IndOrNext->getOperand(0);//point type Step.i int SICount[2] = {0};//store in predecessor count,store in loop body count for( auto I = PSi->use_begin(),E = PSi->use_end();I!=E;++I){ DISABLE(errs()<<**I<<"\n"); StoreInst* SI = dyn_cast<StoreInst>(*I); if(!SI || SI->getOperand(1) != PSi) continue; if(!start&&L->isLoopInvariant(SI->getOperand(0))) { if(SI->getParent() != LoopPred) if(std::find(pred_begin(LoopPred),pred_end(LoopPred),SI->getParent()) == pred_end(LoopPred)) continue; start = SI->getOperand(0); startBB = SI->getParent(); ++SICount[0]; } Instruction* SI0 = dyn_cast<Instruction>(SI->getOperand(0)); if(L->contains(SI) && SI0 && SI0->getOpcode() == Instruction::Add){ next = SI0; ++SICount[1]; } } Assert(SICount[0]==1 && SICount[1]==1, ""); ind = IndOrNext; }else{ if(isa<PHINode>(IndOrNext)){ PHINode* PHI = cast<PHINode>(IndOrNext); ind = IndOrNext; if(castoff(PHI->getIncomingValue(0)) == castoff(PHI->getIncomingValue(1)) && PHI->getParent() != H) ind = castoff(PHI->getIncomingValue(0)); addfirst = false; }else if(IndOrNext->getOpcode() == Instruction::Add){ next = IndOrNext; addfirst = true; }else{ Assert(0 ,"unknow how to analysis"); } for(auto I = H->begin();isa<PHINode>(I);++I){ PHINode* P = cast<PHINode>(I); if(ind && P == ind){ //start = P->getIncomingValueForBlock(L->getLoopPredecessor()); start = tryFindStart(P, L, startBB); next = dyn_cast<Instruction>(P->getIncomingValueForBlock(L->getLoopLatch())); }else if(next && P->getIncomingValueForBlock(L->getLoopLatch()) == next){ //start = P->getIncomingValueForBlock(L->getLoopPredecessor()); start = tryFindStart(P, L, startBB); ind = P; } } } Assert(start ,"couldn't find a start value"); //process complex loops later //DEBUG(if(L->getLoopDepth()>1 || !L->getSubLoops().empty()) return NULL); DEBUG(errs()<<"start value:"<<*start<<"\n"); DEBUG(errs()<<"ind value:"<<*ind<<"\n"); DEBUG(errs()<<"next value:"<<*next<<"\n"); //process non add later unsigned next_phi_idx = 0; ConstantInt* Step = NULL,*PrevStep = NULL;/*only used if next is phi node*/ ret_null_fail(next, ""); PHINode* next_phi = dyn_cast<PHINode>(next); do{ if(next_phi) { next = dyn_cast<Instruction>(next_phi->getIncomingValue(next_phi_idx)); ret_null_fail(next, ""); DEBUG(errs()<<"next phi "<<next_phi_idx<<":"<<*next<<"\n"); if(Step&&PrevStep){ Assert(Step->getSExtValue() == PrevStep->getSExtValue(),""); } PrevStep = Step; } Assert(next->getOpcode() == Instruction::Add , "why induction increment is not Add"); Assert(next->getOperand(0) == ind ,"why induction increment is not add it self"); Step = dyn_cast<ConstantInt>(next->getOperand(1)); Assert(Step,""); }while(next_phi && ++next_phi_idx<next_phi->getNumIncomingValues()); //RET_ON_FAIL(Step->equalsInt(1)); //assert(VERBOSE(Step->equalsInt(1),Step) && "why induction increment number is not 1"); Value* RES = NULL; //if there are no predecessor, we can insert code into start value basicblock IRBuilder<> Builder(InsertPos); Assert(start->getType()->isIntegerTy() && END->getType()->isIntegerTy() , " why increment is not integer type"); if(start->getType() != END->getType()){ start = Builder.CreateCast(CastInst::getCastOpcode(start, false, END->getType(), false),start,END->getType()); } if(Step->getType() != END->getType()){ //Because Step is a Constant, so it casted is constant Step = dyn_cast<ConstantInt>(Builder.CreateCast(CastInst::getCastOpcode(Step, false, END->getType(), false),Step,END->getType())); AssertRuntime(Step); } if(Step->isMinusOne()) RES = Builder.CreateSub(start,END); else//Step Couldn't be zero RES = Builder.CreateSub(END, start); if(addfirst) OneStep -= 1; if(Step->isMinusOne()) OneStep*=-1; assert(OneStep<=1 && OneStep>=-1); RES = (OneStep==1)?Builder.CreateAdd(RES,Step):(OneStep==-1)?Builder.CreateSub(RES, Step):RES; if(!Step->isMinusOne()&&!Step->isOne()) RES = Builder.CreateSDiv(RES, Step); RES->setName(H->getName()+".tc"); return RES; }
bool GuardWideningImpl::combineRangeChecks( SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks, SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) { unsigned OldCount = Checks.size(); while (!Checks.empty()) { // Pick all of the range checks with a specific base and length, and try to // merge them. Value *CurrentBase = Checks.front().getBase(); Value *CurrentLength = Checks.front().getLength(); SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks; auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) { return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength; }; copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck); Checks.erase(remove_if(Checks, IsCurrentCheck), Checks.end()); assert(CurrentChecks.size() != 0 && "We know we have at least one!"); if (CurrentChecks.size() < 3) { RangeChecksOut.insert(RangeChecksOut.end(), CurrentChecks.begin(), CurrentChecks.end()); continue; } // CurrentChecks.size() will typically be 3 here, but so far there has been // no need to hard-code that fact. std::sort(CurrentChecks.begin(), CurrentChecks.end(), [&](const GuardWideningImpl::RangeCheck &LHS, const GuardWideningImpl::RangeCheck &RHS) { return LHS.getOffsetValue().slt(RHS.getOffsetValue()); }); // Note: std::sort should not invalidate the ChecksStart iterator. ConstantInt *MinOffset = CurrentChecks.front().getOffset(), *MaxOffset = CurrentChecks.back().getOffset(); unsigned BitWidth = MaxOffset->getValue().getBitWidth(); if ((MaxOffset->getValue() - MinOffset->getValue()) .ugt(APInt::getSignedMinValue(BitWidth))) return false; APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue(); const APInt &HighOffset = MaxOffset->getValue(); auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) { return (HighOffset - RC.getOffsetValue()).ult(MaxDiff); }; if (MaxDiff.isMinValue() || !std::all_of(std::next(CurrentChecks.begin()), CurrentChecks.end(), OffsetOK)) return false; // We have a series of f+1 checks as: // // I+k_0 u< L ... Chk_0 // I_k_1 u< L ... Chk_1 // ... // I_k_f u< L ... Chk_(f+1) // // with forall i in [0,f): k_f-k_i u< k_f-k_0 ... Precond_0 // k_f-k_0 u< INT_MIN+k_f ... Precond_1 // k_f != k_0 ... Precond_2 // // Claim: // Chk_0 AND Chk_(f+1) implies all the other checks // // Informal proof sketch: // // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and // thus I+k_f is the greatest unsigned value in that range. // // This combined with Ckh_(f+1) shows that everything in that range is u< L. // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1) // lie in [I+k_0,I+k_f], this proving our claim. // // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal // since k_0 != k_f). In the former case, [I+k_0,I+k_f] is not a wrapping // range by definition, and the latter case is impossible: // // 0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1) // xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx // // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted // with 'x' above) to be at least >u INT_MIN. RangeChecksOut.emplace_back(CurrentChecks.front()); RangeChecksOut.emplace_back(CurrentChecks.back()); } assert(RangeChecksOut.size() <= OldCount && "We pessimized!"); return RangeChecksOut.size() != OldCount; }
Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { Value *LHS = SVI.getOperand(0); Value *RHS = SVI.getOperand(1); SmallVector<int, 16> Mask = SVI.getShuffleMask(); Type *Int32Ty = Type::getInt32Ty(SVI.getContext()); bool MadeChange = false; // Undefined shuffle mask -> undefined value. if (isa<UndefValue>(SVI.getOperand(2))) return replaceInstUsesWith(SVI, UndefValue::get(SVI.getType())); unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) { if (V != &SVI) return replaceInstUsesWith(SVI, V); LHS = SVI.getOperand(0); RHS = SVI.getOperand(1); MadeChange = true; } unsigned LHSWidth = cast<VectorType>(LHS->getType())->getNumElements(); // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask') // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask'). if (LHS == RHS || isa<UndefValue>(LHS)) { if (isa<UndefValue>(LHS) && LHS == RHS) { // shuffle(undef,undef,mask) -> undef. Value *Result = (VWidth == LHSWidth) ? LHS : UndefValue::get(SVI.getType()); return replaceInstUsesWith(SVI, Result); } // Remap any references to RHS to use LHS. SmallVector<Constant*, 16> Elts; for (unsigned i = 0, e = LHSWidth; i != VWidth; ++i) { if (Mask[i] < 0) { Elts.push_back(UndefValue::get(Int32Ty)); continue; } if ((Mask[i] >= (int)e && isa<UndefValue>(RHS)) || (Mask[i] < (int)e && isa<UndefValue>(LHS))) { Mask[i] = -1; // Turn into undef. Elts.push_back(UndefValue::get(Int32Ty)); } else { Mask[i] = Mask[i] % e; // Force to LHS. Elts.push_back(ConstantInt::get(Int32Ty, Mask[i])); } } SVI.setOperand(0, SVI.getOperand(1)); SVI.setOperand(1, UndefValue::get(RHS->getType())); SVI.setOperand(2, ConstantVector::get(Elts)); LHS = SVI.getOperand(0); RHS = SVI.getOperand(1); MadeChange = true; } if (VWidth == LHSWidth) { // Analyze the shuffle, are the LHS or RHS and identity shuffles? bool isLHSID, isRHSID; recognizeIdentityMask(Mask, isLHSID, isRHSID); // Eliminate identity shuffles. if (isLHSID) return replaceInstUsesWith(SVI, LHS); if (isRHSID) return replaceInstUsesWith(SVI, RHS); } if (isa<UndefValue>(RHS) && CanEvaluateShuffled(LHS, Mask)) { Value *V = EvaluateInDifferentElementOrder(LHS, Mask); return replaceInstUsesWith(SVI, V); } // SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to // a non-vector type. We can instead bitcast the original vector followed by // an extract of the desired element: // // %sroa = shufflevector <16 x i8> %in, <16 x i8> undef, // <4 x i32> <i32 0, i32 1, i32 2, i32 3> // %1 = bitcast <4 x i8> %sroa to i32 // Becomes: // %bc = bitcast <16 x i8> %in to <4 x i32> // %ext = extractelement <4 x i32> %bc, i32 0 // // If the shuffle is extracting a contiguous range of values from the input // vector then each use which is a bitcast of the extracted size can be // replaced. This will work if the vector types are compatible, and the begin // index is aligned to a value in the casted vector type. If the begin index // isn't aligned then we can shuffle the original vector (keeping the same // vector type) before extracting. // // This code will bail out if the target type is fundamentally incompatible // with vectors of the source type. // // Example of <16 x i8>, target type i32: // Index range [4,8): v-----------v Will work. // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ // <16 x i8>: | | | | | | | | | | | | | | | | | // <4 x i32>: | | | | | // +-----------+-----------+-----------+-----------+ // Index range [6,10): ^-----------^ Needs an extra shuffle. // Target type i40: ^--------------^ Won't work, bail. if (isShuffleExtractingFromLHS(SVI, Mask)) { Value *V = LHS; unsigned MaskElems = Mask.size(); unsigned BegIdx = Mask.front(); VectorType *SrcTy = cast<VectorType>(V->getType()); unsigned VecBitWidth = SrcTy->getBitWidth(); unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType()); assert(SrcElemBitWidth && "vector elements must have a bitwidth"); unsigned SrcNumElems = SrcTy->getNumElements(); SmallVector<BitCastInst *, 8> BCs; DenseMap<Type *, Value *> NewBCs; for (User *U : SVI.users()) if (BitCastInst *BC = dyn_cast<BitCastInst>(U)) if (!BC->use_empty()) // Only visit bitcasts that weren't previously handled. BCs.push_back(BC); for (BitCastInst *BC : BCs) { Type *TgtTy = BC->getDestTy(); unsigned TgtElemBitWidth = DL.getTypeSizeInBits(TgtTy); if (!TgtElemBitWidth) continue; unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth; bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth; bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth); if (!VecBitWidthsEqual) continue; if (!VectorType::isValidElementType(TgtTy)) continue; VectorType *CastSrcTy = VectorType::get(TgtTy, TgtNumElems); if (!BegIsAligned) { // Shuffle the input so [0,NumElements) contains the output, and // [NumElems,SrcNumElems) is undef. SmallVector<Constant *, 16> ShuffleMask(SrcNumElems, UndefValue::get(Int32Ty)); for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I) ShuffleMask[I] = ConstantInt::get(Int32Ty, Idx); V = Builder->CreateShuffleVector(V, UndefValue::get(V->getType()), ConstantVector::get(ShuffleMask), SVI.getName() + ".extract"); BegIdx = 0; } unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth; assert(SrcElemsPerTgtElem); BegIdx /= SrcElemsPerTgtElem; bool BCAlreadyExists = NewBCs.find(CastSrcTy) != NewBCs.end(); auto *NewBC = BCAlreadyExists ? NewBCs[CastSrcTy] : Builder->CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc"); if (!BCAlreadyExists) NewBCs[CastSrcTy] = NewBC; auto *Ext = Builder->CreateExtractElement( NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract"); // The shufflevector isn't being replaced: the bitcast that used it // is. InstCombine will visit the newly-created instructions. replaceInstUsesWith(*BC, Ext); MadeChange = true; } } // If the LHS is a shufflevector itself, see if we can combine it with this // one without producing an unusual shuffle. // Cases that might be simplified: // 1. // x1=shuffle(v1,v2,mask1) // x=shuffle(x1,undef,mask) // ==> // x=shuffle(v1,undef,newMask) // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1 // 2. // x1=shuffle(v1,undef,mask1) // x=shuffle(x1,x2,mask) // where v1.size() == mask1.size() // ==> // x=shuffle(v1,x2,newMask) // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i] // 3. // x2=shuffle(v2,undef,mask2) // x=shuffle(x1,x2,mask) // where v2.size() == mask2.size() // ==> // x=shuffle(x1,v2,newMask) // newMask[i] = (mask[i] < x1.size()) // ? mask[i] : mask2[mask[i]-x1.size()]+x1.size() // 4. // x1=shuffle(v1,undef,mask1) // x2=shuffle(v2,undef,mask2) // x=shuffle(x1,x2,mask) // where v1.size() == v2.size() // ==> // x=shuffle(v1,v2,newMask) // newMask[i] = (mask[i] < x1.size()) // ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size() // // Here we are really conservative: // we are absolutely afraid of producing a shuffle mask not in the input // program, because the code gen may not be smart enough to turn a merged // shuffle into two specific shuffles: it may produce worse code. As such, // we only merge two shuffles if the result is either a splat or one of the // input shuffle masks. In this case, merging the shuffles just removes // one instruction, which we know is safe. This is good for things like // turning: (splat(splat)) -> splat, or // merge(V[0..n], V[n+1..2n]) -> V[0..2n] ShuffleVectorInst* LHSShuffle = dyn_cast<ShuffleVectorInst>(LHS); ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS); if (LHSShuffle) if (!isa<UndefValue>(LHSShuffle->getOperand(1)) && !isa<UndefValue>(RHS)) LHSShuffle = nullptr; if (RHSShuffle) if (!isa<UndefValue>(RHSShuffle->getOperand(1))) RHSShuffle = nullptr; if (!LHSShuffle && !RHSShuffle) return MadeChange ? &SVI : nullptr; Value* LHSOp0 = nullptr; Value* LHSOp1 = nullptr; Value* RHSOp0 = nullptr; unsigned LHSOp0Width = 0; unsigned RHSOp0Width = 0; if (LHSShuffle) { LHSOp0 = LHSShuffle->getOperand(0); LHSOp1 = LHSShuffle->getOperand(1); LHSOp0Width = cast<VectorType>(LHSOp0->getType())->getNumElements(); } if (RHSShuffle) { RHSOp0 = RHSShuffle->getOperand(0); RHSOp0Width = cast<VectorType>(RHSOp0->getType())->getNumElements(); } Value* newLHS = LHS; Value* newRHS = RHS; if (LHSShuffle) { // case 1 if (isa<UndefValue>(RHS)) { newLHS = LHSOp0; newRHS = LHSOp1; } // case 2 or 4 else if (LHSOp0Width == LHSWidth) { newLHS = LHSOp0; } } // case 3 or 4 if (RHSShuffle && RHSOp0Width == LHSWidth) { newRHS = RHSOp0; } // case 4 if (LHSOp0 == RHSOp0) { newLHS = LHSOp0; newRHS = nullptr; } if (newLHS == LHS && newRHS == RHS) return MadeChange ? &SVI : nullptr; SmallVector<int, 16> LHSMask; SmallVector<int, 16> RHSMask; if (newLHS != LHS) LHSMask = LHSShuffle->getShuffleMask(); if (RHSShuffle && newRHS != RHS) RHSMask = RHSShuffle->getShuffleMask(); unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth; SmallVector<int, 16> newMask; bool isSplat = true; int SplatElt = -1; // Create a new mask for the new ShuffleVectorInst so that the new // ShuffleVectorInst is equivalent to the original one. for (unsigned i = 0; i < VWidth; ++i) { int eltMask; if (Mask[i] < 0) { // This element is an undef value. eltMask = -1; } else if (Mask[i] < (int)LHSWidth) { // This element is from left hand side vector operand. // // If LHS is going to be replaced (case 1, 2, or 4), calculate the // new mask value for the element. if (newLHS != LHS) { eltMask = LHSMask[Mask[i]]; // If the value selected is an undef value, explicitly specify it // with a -1 mask value. if (eltMask >= (int)LHSOp0Width && isa<UndefValue>(LHSOp1)) eltMask = -1; } else eltMask = Mask[i]; } else { // This element is from right hand side vector operand // // If the value selected is an undef value, explicitly specify it // with a -1 mask value. (case 1) if (isa<UndefValue>(RHS)) eltMask = -1; // If RHS is going to be replaced (case 3 or 4), calculate the // new mask value for the element. else if (newRHS != RHS) { eltMask = RHSMask[Mask[i]-LHSWidth]; // If the value selected is an undef value, explicitly specify it // with a -1 mask value. if (eltMask >= (int)RHSOp0Width) { assert(isa<UndefValue>(RHSShuffle->getOperand(1)) && "should have been check above"); eltMask = -1; } } else eltMask = Mask[i]-LHSWidth; // If LHS's width is changed, shift the mask value accordingly. // If newRHS == NULL, i.e. LHSOp0 == RHSOp0, we want to remap any // references from RHSOp0 to LHSOp0, so we don't need to shift the mask. // If newRHS == newLHS, we want to remap any references from newRHS to // newLHS so that we can properly identify splats that may occur due to // obfuscation across the two vectors. if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS) eltMask += newLHSWidth; } // Check if this could still be a splat. if (eltMask >= 0) { if (SplatElt >= 0 && SplatElt != eltMask) isSplat = false; SplatElt = eltMask; } newMask.push_back(eltMask); } // If the result mask is equal to one of the original shuffle masks, // or is a splat, do the replacement. if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) { SmallVector<Constant*, 16> Elts; for (unsigned i = 0, e = newMask.size(); i != e; ++i) { if (newMask[i] < 0) { Elts.push_back(UndefValue::get(Int32Ty)); } else { Elts.push_back(ConstantInt::get(Int32Ty, newMask[i])); } } if (!newRHS) newRHS = UndefValue::get(newLHS->getType()); return new ShuffleVectorInst(newLHS, newRHS, ConstantVector::get(Elts)); } // If the result mask is an identity, replace uses of this instruction with // corresponding argument. bool isLHSID, isRHSID; recognizeIdentityMask(newMask, isLHSID, isRHSID); if (isLHSID && VWidth == LHSOp0Width) return replaceInstUsesWith(SVI, newLHS); if (isRHSID && VWidth == RHSOp0Width) return replaceInstUsesWith(SVI, newRHS); return MadeChange ? &SVI : nullptr; }