extern "C" void PyString_InternInPlace(PyObject** p) noexcept { BoxedString* s = (BoxedString*)*p; if (s == NULL || !PyString_Check(s)) Py_FatalError("PyString_InternInPlace: strings only please!"); /* If it's a string subclass, we don't really know what putting it in the interned dict might do. */ if (!PyString_CheckExact(s)) return; if (PyString_CHECK_INTERNED(s)) return; auto it = interned_strings.find(s); if (it != interned_strings.end()) { auto entry = *it; Py_INCREF(entry); Py_DECREF(*p); *p = entry; } else { // TODO: do CPython's refcounting here num_interned_strings.log(); interned_strings.insert(s); Py_INCREF(s); // CPython returns mortal but in our current implementation they are inmortal s->interned_state = SSTATE_INTERNED_IMMORTAL; } }
uint32_t SymbolFilePDB::FindTypes( const lldb_private::SymbolContext &sc, const lldb_private::ConstString &name, const lldb_private::CompilerDeclContext *parent_decl_ctx, bool append, uint32_t max_matches, llvm::DenseSet<lldb_private::SymbolFile *> &searched_symbol_files, lldb_private::TypeMap &types) { if (!append) types.Clear(); if (!name) return 0; searched_symbol_files.clear(); searched_symbol_files.insert(this); std::string name_str = name.AsCString(); // If this might be a regex, we have to return EVERY symbol and process them // one by one, which is going // to destroy performance on large PDB files. So try really hard not to use a // regex match. if (name_str.find_first_of("[]?*.-+\\") != std::string::npos) FindTypesByRegex(name_str, max_matches, types); else FindTypesByName(name_str, max_matches, types); return types.GetSize(); }
void addVarDeclsVisible(clang::ForStmt const *Parent, clang::Decl const *PriorToDecl, clang::Stmt const *PriorToStmt, seec::seec_clang::MappedAST const &Map, llvm::DenseSet<clang::VarDecl const *> &Set) { // The initialisation statement. if (auto const Init = Parent->getInit()) { if (PriorToStmt && Init == PriorToStmt) return; if (auto const DeclStmt = llvm::dyn_cast<clang::DeclStmt>(Init)) addVarDeclsVisible(DeclStmt, nullptr, nullptr, Map, Set); } // The condition statement. if (PriorToStmt && Parent->getCond() == PriorToStmt) return; if (auto const CV = Parent->getConditionVariable()) Set.insert(CV); // The increment statement. if (PriorToStmt && Parent->getInc() == PriorToStmt) return; // Any VarDecls in the Body should have already been added. }
void addSymbol(const CVSymbol &Symbol) { if (Symbol.kind() == S_UDT) { auto Iter = UdtHashes.insert(Symbol); if (!Iter.second) return; } Records.push_back(Symbol); }
static void addAllChildren(llvm::DenseSet<clang::Stmt const *> &Set, clang::Stmt const *S) { for (auto const &Child : S->children()) { if (Child) { Set.insert(Child); addAllChildren(Set, Child); } } }
void visitModuleFile(StringRef Filename, serialization::ModuleKind Kind) override { auto *File = CI.getFileManager().getFile(Filename); assert(File && "missing file for loaded module?"); // Only rewrite each module file once. if (!Rewritten.insert(File).second) return; serialization::ModuleFile *MF = CI.getModuleManager()->getModuleManager().lookup(File); assert(File && "missing module file for loaded module?"); // Not interested in PCH / preambles. if (!MF->isModule()) return; auto OS = Out.lock(); assert(OS && "loaded module file after finishing rewrite action?"); (*OS) << "#pragma clang module build "; if (isValidIdentifier(MF->ModuleName)) (*OS) << MF->ModuleName; else { (*OS) << '"'; OS->write_escaped(MF->ModuleName); (*OS) << '"'; } (*OS) << '\n'; // Rewrite the contents of the module in a separate compiler instance. CompilerInstance Instance(CI.getPCHContainerOperations(), &CI.getPreprocessor().getPCMCache()); Instance.setInvocation( std::make_shared<CompilerInvocation>(CI.getInvocation())); Instance.createDiagnostics( new ForwardingDiagnosticConsumer(CI.getDiagnosticClient()), /*ShouldOwnClient=*/true); Instance.getFrontendOpts().DisableFree = false; Instance.getFrontendOpts().Inputs.clear(); Instance.getFrontendOpts().Inputs.emplace_back( Filename, InputKind(InputKind::Unknown, InputKind::Precompiled)); Instance.getFrontendOpts().ModuleFiles.clear(); Instance.getFrontendOpts().ModuleMapFiles.clear(); // Don't recursively rewrite imports. We handle them all at the top level. Instance.getPreprocessorOutputOpts().RewriteImports = false; llvm::CrashRecoveryContext().RunSafelyOnThread([&]() { RewriteIncludesAction Action; Action.OutputStream = OS; Instance.ExecuteAction(Action); }); (*OS) << "#pragma clang module endbuild /*" << MF->ModuleName << "*/\n"; }
void foundDecl(ValueDecl *D, DeclVisibilityKind Reason) override { // If the declaration has an override, name lookup will also have found // the overridden method. Skip this declaration, because we prefer the // overridden method. if (D->getOverriddenDecl()) return; // Initializers cannot be found by dynamic lookup. if (isa<ConstructorDecl>(D)) return; // Check if we already reported a decl with the same signature. if (auto *FD = dyn_cast<FuncDecl>(D)) { assert(FD->getImplicitSelfDecl() && "should not find free functions"); (void)FD; // Get the type without the first uncurry level with 'self'. CanType T = D->getType() ->castTo<AnyFunctionType>() ->getResult() ->getCanonicalType(); auto Signature = std::make_pair(D->getName(), T); if (!FunctionsReported.insert(Signature).second) return; } else if (isa<SubscriptDecl>(D)) { auto Signature = D->getType()->getCanonicalType(); if (!SubscriptsReported.insert(Signature).second) return; } else if (isa<VarDecl>(D)) { auto Signature = std::make_pair(D->getName(), D->getType()->getCanonicalType()); if (!PropertiesReported.insert(Signature).second) return; } else { llvm_unreachable("unhandled decl kind"); } if (isDeclVisibleInLookupMode(D, LS, CurrDC, TypeResolver)) ChainedConsumer.foundDecl(D, DeclVisibilityKind::DynamicLookup); }
void addVarDeclsVisible(clang::DeclStmt const *Parent, clang::Decl const *PriorToDecl, clang::Stmt const *PriorToStmt, seec::seec_clang::MappedAST const &Map, llvm::DenseSet<clang::VarDecl const *> &Set) { if (Parent->isSingleDecl()) { auto const Decl = Parent->getSingleDecl(); if (auto const VarDecl = llvm::dyn_cast<clang::VarDecl>(Decl)) Set.insert(VarDecl); } else { for (auto const Decl : Parent->getDeclGroup()) { if (auto const VarDecl = llvm::dyn_cast<clang::VarDecl>(Decl)) Set.insert(VarDecl); if (PriorToDecl && Decl == PriorToDecl) return; } } }
BoxedString* internStringImmortal(llvm::StringRef s) noexcept { auto it = interned_strings.find_as(s); if (it != interned_strings.end()) return incref(*it); num_interned_strings.log(); BoxedString* entry = boxString(s); // CPython returns mortal but in our current implementation they are inmortal entry->interned_state = SSTATE_INTERNED_IMMORTAL; interned_strings.insert((BoxedString*)entry); Py_INCREF(entry); return entry; }
void addVarDeclsVisible(clang::WhileStmt const *Parent, clang::Decl const *PriorToDecl, clang::Stmt const *PriorToStmt, seec::seec_clang::MappedAST const &Map, llvm::DenseSet<clang::VarDecl const *> &Set) { if (PriorToStmt && Parent->getCond() == PriorToStmt) return; if (auto const CV = Parent->getConditionVariable()) Set.insert(CV); // Any VarDecls in the Body should have already been added. }
void Fix(CompoundStmt* CS) { if (!CS->size()) return; typedef llvm::SmallVector<Stmt*, 32> Statements; Statements Stmts; Stmts.append(CS->body_begin(), CS->body_end()); for (Statements::iterator I = Stmts.begin(); I != Stmts.end(); ++I) { if (!TraverseStmt(*I) && !m_HandledDecls.count(m_FoundDRE->getDecl())) { Sema::DeclGroupPtrTy VDPtrTy = m_Sema->ConvertDeclToDeclGroup(m_FoundDRE->getDecl()); StmtResult DS = m_Sema->ActOnDeclStmt(VDPtrTy, m_FoundDRE->getLocStart(), m_FoundDRE->getLocEnd()); assert(!DS.isInvalid() && "Invalid DeclStmt."); I = Stmts.insert(I, DS.take()); m_HandledDecls.insert(m_FoundDRE->getDecl()); } } CS->setStmts(m_Sema->getASTContext(), Stmts.data(), Stmts.size()); }
/// TODO: We should consult the cached LoweredLocalCaptures the SIL /// TypeConverter calculates, but that would require plumbing SILModule& /// through every SILDeclRef constructor. Since this is only used to determine /// "natural uncurry level", and "uncurry level" is a concept we'd like to /// phase out, it's not worth it. static bool hasLoweredLocalCaptures(AnyFunctionRef AFR, llvm::DenseSet<AnyFunctionRef> &visited) { if (!AFR.getCaptureInfo().hasLocalCaptures()) return false; // Scan for local, non-function captures. bool functionCapturesToRecursivelyCheck = false; auto addFunctionCapture = [&](AnyFunctionRef capture) { if (visited.find(capture) == visited.end()) functionCapturesToRecursivelyCheck = true; }; for (auto &capture : AFR.getCaptureInfo().getCaptures()) { if (!capture.getDecl()->getDeclContext()->isLocalContext()) continue; // We transitively capture a local function's captures. if (auto func = dyn_cast<AbstractFunctionDecl>(capture.getDecl())) { addFunctionCapture(func); continue; } // We may either directly capture properties, or capture through their // accessors. if (auto var = dyn_cast<VarDecl>(capture.getDecl())) { switch (var->getStorageKind()) { case VarDecl::StoredWithTrivialAccessors: llvm_unreachable("stored local variable with trivial accessors?"); case VarDecl::InheritedWithObservers: llvm_unreachable("inherited local variable?"); case VarDecl::StoredWithObservers: case VarDecl::Addressed: case VarDecl::AddressedWithTrivialAccessors: case VarDecl::AddressedWithObservers: case VarDecl::ComputedWithMutableAddress: // Directly capture storage if we're supposed to. if (capture.isDirect()) return true; // Otherwise, transitively capture the accessors. SWIFT_FALLTHROUGH; case VarDecl::Computed: addFunctionCapture(var->getGetter()); if (auto setter = var->getSetter()) addFunctionCapture(setter); continue; case VarDecl::Stored: return true; } } // Anything else is directly captured. return true; } // Recursively consider function captures, since we didn't have any direct // captures. auto captureHasLocalCaptures = [&](AnyFunctionRef capture) -> bool { if (visited.insert(capture).second) return hasLoweredLocalCaptures(capture, visited); return false; }; if (functionCapturesToRecursivelyCheck) { for (auto &capture : AFR.getCaptureInfo().getCaptures()) { if (!capture.getDecl()->getDeclContext()->isLocalContext()) continue; if (auto func = dyn_cast<AbstractFunctionDecl>(capture.getDecl())) { if (captureHasLocalCaptures(func)) return true; continue; } if (auto var = dyn_cast<VarDecl>(capture.getDecl())) { switch (var->getStorageKind()) { case VarDecl::StoredWithTrivialAccessors: llvm_unreachable("stored local variable with trivial accessors?"); case VarDecl::InheritedWithObservers: llvm_unreachable("inherited local variable?"); case VarDecl::StoredWithObservers: case VarDecl::Addressed: case VarDecl::AddressedWithTrivialAccessors: case VarDecl::AddressedWithObservers: case VarDecl::ComputedWithMutableAddress: assert(!capture.isDirect() && "should have short circuited out"); // Otherwise, transitively capture the accessors. SWIFT_FALLTHROUGH; case VarDecl::Computed: if (captureHasLocalCaptures(var->getGetter())) return true; if (auto setter = var->getSetter()) if (captureHasLocalCaptures(setter)) return true; continue; case VarDecl::Stored: llvm_unreachable("should have short circuited out"); } } llvm_unreachable("should have short circuited out"); } } return false; }
void registerGCTrackedICInfo(ICInfo* ic) { #if MOVING_GC assert(ics_list.count(ic) == 0); ics_list.insert(ic); #endif }
void foundDecl(ValueDecl *D, DeclVisibilityKind Reason) override { // If the declaration has an override, name lookup will also have found // the overridden method. Skip this declaration, because we prefer the // overridden method. if (D->getOverriddenDecl()) return; // If the declaration is not @objc, it cannot be called dynamically. if (!D->isObjC()) return; // Ensure that the declaration has a type. if (!D->hasInterfaceType()) { if (!TypeResolver) return; TypeResolver->resolveDeclSignature(D); if (!D->hasInterfaceType()) return; } switch (D->getKind()) { #define DECL(ID, SUPER) \ case DeclKind::ID: #define VALUE_DECL(ID, SUPER) #include "swift/AST/DeclNodes.def" llvm_unreachable("not a ValueDecl!"); // Types cannot be found by dynamic lookup. case DeclKind::GenericTypeParam: case DeclKind::AssociatedType: case DeclKind::TypeAlias: case DeclKind::Enum: case DeclKind::Class: case DeclKind::Struct: case DeclKind::Protocol: return; // Initializers cannot be found by dynamic lookup. case DeclKind::Constructor: case DeclKind::Destructor: return; // These cases are probably impossible here but can also just // be safely ignored. case DeclKind::EnumElement: case DeclKind::Param: case DeclKind::Module: return; // For other kinds of values, check if we already reported a decl // with the same signature. case DeclKind::Accessor: case DeclKind::Func: { auto FD = cast<FuncDecl>(D); assert(FD->getImplicitSelfDecl() && "should not find free functions"); (void)FD; if (FD->isInvalid()) break; // Get the type without the first uncurry level with 'self'. CanType T = D->getInterfaceType() ->castTo<AnyFunctionType>() ->getResult() ->getCanonicalType(); auto Signature = std::make_pair(D->getBaseName(), T); if (!FunctionsReported.insert(Signature).second) return; break; } case DeclKind::Subscript: { auto Signature = D->getInterfaceType()->getCanonicalType(); if (!SubscriptsReported.insert(Signature).second) return; break; } case DeclKind::Var: { auto *VD = cast<VarDecl>(D); auto Signature = std::make_pair(VD->getName(), VD->getInterfaceType()->getCanonicalType()); if (!PropertiesReported.insert(Signature).second) return; break; } } if (isDeclVisibleInLookupMode(D, LS, CurrDC, TypeResolver)) ChainedConsumer.foundDecl(D, DeclVisibilityKind::DynamicLookup); }
void ClosureSpecializer::gatherCallSites( SILFunction *Caller, llvm::SmallVectorImpl<ClosureInfo*> &ClosureCandidates, llvm::DenseSet<FullApplySite> &MultipleClosureAI) { // A set of apply inst that we have associated with a closure. We use this to // make sure that we do not handle call sites with multiple closure arguments. llvm::DenseSet<FullApplySite> VisitedAI; // For each basic block BB in Caller... for (auto &BB : *Caller) { // For each instruction II in BB... for (auto &II : BB) { // If II is not a closure that we support specializing, skip it... if (!isSupportedClosure(&II)) continue; ClosureInfo *CInfo = nullptr; // Go through all uses of our closure. for (auto *Use : II.getUses()) { // If this use is not an apply inst or an apply inst with // substitutions, there is nothing interesting for us to do, so // continue... auto AI = FullApplySite::isa(Use->getUser()); if (!AI || AI.hasSubstitutions()) continue; // Check if we have already associated this apply inst with a closure to // be specialized. We do not handle applies that take in multiple // closures at this time. if (!VisitedAI.insert(AI).second) { MultipleClosureAI.insert(AI); continue; } // If AI does not have a function_ref definition as its callee, we can // not do anything here... so continue... SILFunction *ApplyCallee = AI.getReferencedFunction(); if (!ApplyCallee || ApplyCallee->isExternalDeclaration()) continue; // Ok, we know that we can perform the optimization but not whether or // not the optimization is profitable. Find the index of the argument // corresponding to our partial apply. Optional<unsigned> ClosureIndex; for (unsigned i = 0, e = AI.getNumArguments(); i != e; ++i) { if (AI.getArgument(i) != SILValue(&II)) continue; ClosureIndex = i; DEBUG(llvm::dbgs() << " Found callsite with closure argument at " << i << ": " << *AI.getInstruction()); break; } // If we did not find an index, there is nothing further to do, // continue. if (!ClosureIndex.hasValue()) continue; // Make sure that the Closure is invoked in the Apply's callee. We only // want to perform closure specialization if we know that we will be // able to change a partial_apply into an apply. // // TODO: Maybe just call the function directly instead of moving the // partial apply? SILValue Arg = ApplyCallee->getArgument(ClosureIndex.getValue()); if (std::none_of(Arg->use_begin(), Arg->use_end(), [&Arg](Operand *Op) -> bool { auto UserAI = FullApplySite::isa(Op->getUser()); return UserAI && UserAI.getCallee() == Arg; })) { continue; } auto NumIndirectResults = AI.getSubstCalleeType()->getNumIndirectResults(); assert(ClosureIndex.getValue() >= NumIndirectResults); auto ClosureParamIndex = ClosureIndex.getValue() - NumIndirectResults; auto ParamInfo = AI.getSubstCalleeType()->getParameters(); SILParameterInfo ClosureParamInfo = ParamInfo[ClosureParamIndex]; // Get all non-failure exit BBs in the Apply Callee if our partial apply // is guaranteed. If we do not understand one of the exit BBs, bail. // // We need this to make sure that we insert a release in the appropriate // locations to balance the +1 from the creation of the partial apply. llvm::TinyPtrVector<SILBasicBlock *> NonFailureExitBBs; if (ClosureParamInfo.isGuaranteed() && !findAllNonFailureExitBBs(ApplyCallee, NonFailureExitBBs)) { continue; } // Compute the final release points of the closure. We will insert // release of the captured arguments here. if (!CInfo) { CInfo = new ClosureInfo(&II); ValueLifetimeAnalysis VLA(CInfo->Closure); VLA.computeFrontier(CInfo->LifetimeFrontier, ValueLifetimeAnalysis::AllowToModifyCFG); } // Now we know that CSDesc is profitable to specialize. Add it to our // call site list. CInfo->CallSites.push_back( CallSiteDescriptor(CInfo, AI, ClosureIndex.getValue(), ClosureParamInfo, std::move(NonFailureExitBBs))); } if (CInfo) ClosureCandidates.push_back(CInfo); } } }