void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{ switch (getKind()) { case APValue::Uninitialized: Out << "<uninitialized>"; return; case APValue::Int: if (Ty->isBooleanType()) Out << (getInt().getBoolValue() ? "true" : "false"); else Out << getInt(); return; case APValue::Float: Out << GetApproxValue(getFloat()); return; case APValue::Vector: { Out << '{'; QualType ElemTy = Ty->getAs<VectorType>()->getElementType(); getVectorElt(0).printPretty(Out, Ctx, ElemTy); for (unsigned i = 1; i != getVectorLength(); ++i) { Out << ", "; getVectorElt(i).printPretty(Out, Ctx, ElemTy); } Out << '}'; return; } case APValue::ComplexInt: Out << getComplexIntReal() << "+" << getComplexIntImag() << "i"; return; case APValue::ComplexFloat: Out << GetApproxValue(getComplexFloatReal()) << "+" << GetApproxValue(getComplexFloatImag()) << "i"; return; case APValue::LValue: { LValueBase Base = getLValueBase(); if (!Base) { Out << "0"; return; } bool IsReference = Ty->isReferenceType(); QualType InnerTy = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType(); if (InnerTy.isNull()) InnerTy = Ty; if (!hasLValuePath()) { // No lvalue path: just print the offset. CharUnits O = getLValueOffset(); CharUnits S = Ctx.getTypeSizeInChars(InnerTy); if (!O.isZero()) { if (IsReference) Out << "*("; if (O % S) { Out << "(char*)"; S = CharUnits::One(); } Out << '&'; } else if (!IsReference) Out << '&'; if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) Out << *VD; else { assert(Base.get<const Expr *>() != nullptr && "Expecting non-null Expr"); Base.get<const Expr*>()->printPretty(Out, nullptr, Ctx.getPrintingPolicy()); } if (!O.isZero()) { Out << " + " << (O / S); if (IsReference) Out << ')'; } return; } // We have an lvalue path. Print it out nicely. if (!IsReference) Out << '&'; else if (isLValueOnePastTheEnd()) Out << "*(&"; QualType ElemTy; if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) { Out << *VD; ElemTy = VD->getType(); } else { const Expr *E = Base.get<const Expr*>(); assert(E != nullptr && "Expecting non-null Expr"); E->printPretty(Out, nullptr, Ctx.getPrintingPolicy()); ElemTy = E->getType(); } ArrayRef<LValuePathEntry> Path = getLValuePath(); const CXXRecordDecl *CastToBase = nullptr; for (unsigned I = 0, N = Path.size(); I != N; ++I) { if (ElemTy->getAs<RecordType>()) { // The lvalue refers to a class type, so the next path entry is a base // or member. const Decl *BaseOrMember = BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer(); if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) { CastToBase = RD; ElemTy = Ctx.getRecordType(RD); } else { const ValueDecl *VD = cast<ValueDecl>(BaseOrMember); Out << "."; if (CastToBase) Out << *CastToBase << "::"; Out << *VD; ElemTy = VD->getType(); } } else { // The lvalue must refer to an array. Out << '[' << Path[I].ArrayIndex << ']'; ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType(); } } // Handle formatting of one-past-the-end lvalues. if (isLValueOnePastTheEnd()) { // FIXME: If CastToBase is non-0, we should prefix the output with // "(CastToBase*)". Out << " + 1"; if (IsReference) Out << ')'; } return; } case APValue::Array: { const ArrayType *AT = Ctx.getAsArrayType(Ty); QualType ElemTy = AT->getElementType(); Out << '{'; if (unsigned N = getArrayInitializedElts()) { getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy); for (unsigned I = 1; I != N; ++I) { Out << ", "; if (I == 10) { // Avoid printing out the entire contents of large arrays. Out << "..."; break; } getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy); } } Out << '}'; return; } case APValue::Struct: { Out << '{'; const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl(); bool First = true; if (unsigned N = getStructNumBases()) { const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD); CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin(); for (unsigned I = 0; I != N; ++I, ++BI) { assert(BI != CD->bases_end()); if (!First) Out << ", "; getStructBase(I).printPretty(Out, Ctx, BI->getType()); First = false; } } for (const auto *FI : RD->fields()) { if (!First) Out << ", "; if (FI->isUnnamedBitfield()) continue; getStructField(FI->getFieldIndex()). printPretty(Out, Ctx, FI->getType()); First = false; } Out << '}'; return; } case APValue::Union: Out << '{'; if (const FieldDecl *FD = getUnionField()) { Out << "." << *FD << " = "; getUnionValue().printPretty(Out, Ctx, FD->getType()); } Out << '}'; return; case APValue::MemberPointer: // FIXME: This is not enough to unambiguously identify the member in a // multiple-inheritance scenario. if (const ValueDecl *VD = getMemberPointerDecl()) { Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD; return; } Out << "0"; return; case APValue::AddrLabelDiff: Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName(); Out << " - "; Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName(); return; } llvm_unreachable("Unknown APValue kind!"); }
/// This callback is used to infer the types for Class variables. This info is /// used later to validate messages that sent to classes. Class variables are /// initialized with by invoking the 'class' method on a class. /// This method is also used to infer the type information for the return /// types. // TODO: right now it only tracks generic types. Extend this to track every // type in the DynamicTypeMap and diagnose type errors! void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const { const ObjCMessageExpr *MessageExpr = M.getOriginExpr(); SymbolRef RetSym = M.getReturnValue().getAsSymbol(); if (!RetSym) return; Selector Sel = MessageExpr->getSelector(); ProgramStateRef State = C.getState(); // Inference for class variables. // We are only interested in cases where the class method is invoked on a // class. This method is provided by the runtime and available on all classes. if (MessageExpr->getReceiverKind() == ObjCMessageExpr::Class && Sel.getAsString() == "class") { QualType ReceiverType = MessageExpr->getClassReceiver(); const auto *ReceiverClassType = ReceiverType->getAs<ObjCObjectType>(); QualType ReceiverClassPointerType = C.getASTContext().getObjCObjectPointerType( QualType(ReceiverClassType, 0)); if (!ReceiverClassType->isSpecialized()) return; const auto *InferredType = ReceiverClassPointerType->getAs<ObjCObjectPointerType>(); assert(InferredType); State = State->set<MostSpecializedTypeArgsMap>(RetSym, InferredType); C.addTransition(State); return; } // Tracking for return types. SymbolRef RecSym = M.getReceiverSVal().getAsSymbol(); if (!RecSym) return; const ObjCObjectPointerType *const *TrackedType = State->get<MostSpecializedTypeArgsMap>(RecSym); if (!TrackedType) return; ASTContext &ASTCtxt = C.getASTContext(); const ObjCMethodDecl *Method = findMethodDecl(MessageExpr, *TrackedType, ASTCtxt); if (!Method) return; Optional<ArrayRef<QualType>> TypeArgs = (*TrackedType)->getObjCSubstitutions(Method->getDeclContext()); if (!TypeArgs) return; QualType ResultType = getReturnTypeForMethod(Method, *TypeArgs, *TrackedType, ASTCtxt); // The static type is the same as the deduced type. if (ResultType.isNull()) return; const MemRegion *RetRegion = M.getReturnValue().getAsRegion(); ExplodedNode *Pred = C.getPredecessor(); // When there is an entry available for the return symbol in DynamicTypeMap, // the call was inlined, and the information in the DynamicTypeMap is should // be precise. if (RetRegion && !State->get<DynamicTypeMap>(RetRegion)) { // TODO: we have duplicated information in DynamicTypeMap and // MostSpecializedTypeArgsMap. We should only store anything in the later if // the stored data differs from the one stored in the former. State = setDynamicTypeInfo(State, RetRegion, ResultType, /*CanBeSubclass=*/true); Pred = C.addTransition(State); } const auto *ResultPtrType = ResultType->getAs<ObjCObjectPointerType>(); if (!ResultPtrType || ResultPtrType->isUnspecialized()) return; // When the result is a specialized type and it is not tracked yet, track it // for the result symbol. if (!State->get<MostSpecializedTypeArgsMap>(RetSym)) { State = State->set<MostSpecializedTypeArgsMap>(RetSym, ResultPtrType); C.addTransition(State, Pred); } }
/// \brief Build a new nested-name-specifier for "identifier::", as described /// by ActOnCXXNestedNameSpecifier. /// /// \param S Scope in which the nested-name-specifier occurs. /// \param Identifier Identifier in the sequence "identifier" "::". /// \param IdentifierLoc Location of the \p Identifier. /// \param CCLoc Location of "::" following Identifier. /// \param ObjectType Type of postfix expression if the nested-name-specifier /// occurs in construct like: <tt>ptr->nns::f</tt>. /// \param EnteringContext If true, enter the context specified by the /// nested-name-specifier. /// \param SS Optional nested name specifier preceding the identifier. /// \param ScopeLookupResult Provides the result of name lookup within the /// scope of the nested-name-specifier that was computed at template /// definition time. /// \param ErrorRecoveryLookup Specifies if the method is called to improve /// error recovery and what kind of recovery is performed. /// \param IsCorrectedToColon If not null, suggestion of replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to /// 'true' if the identifier is treated as if it was followed by ':', /// not '::'. /// /// This routine differs only slightly from ActOnCXXNestedNameSpecifier, in /// that it contains an extra parameter \p ScopeLookupResult, which provides /// the result of name lookup within the scope of the nested-name-specifier /// that was computed at template definition time. /// /// If ErrorRecoveryLookup is true, then this call is used to improve error /// recovery. This means that it should not emit diagnostics, it should /// just return true on failure. It also means it should only return a valid /// scope if it *knows* that the result is correct. It should not return in a /// dependent context, for example. Nor will it extend \p SS with the scope /// specifier. bool Sema::BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon) { LookupResult Found(*this, &Identifier, IdentifierLoc, LookupNestedNameSpecifierName); // Determine where to perform name lookup DeclContext *LookupCtx = nullptr; bool isDependent = false; if (IsCorrectedToColon) *IsCorrectedToColon = false; if (!ObjectType.isNull()) { // This nested-name-specifier occurs in a member access expression, e.g., // x->B::f, and we are looking into the type of the object. assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist"); LookupCtx = computeDeclContext(ObjectType); isDependent = ObjectType->isDependentType(); } else if (SS.isSet()) { // This nested-name-specifier occurs after another nested-name-specifier, // so look into the context associated with the prior nested-name-specifier. LookupCtx = computeDeclContext(SS, EnteringContext); isDependent = isDependentScopeSpecifier(SS); Found.setContextRange(SS.getRange()); } bool ObjectTypeSearchedInScope = false; if (LookupCtx) { // Perform "qualified" name lookup into the declaration context we // computed, which is either the type of the base of a member access // expression or the declaration context associated with a prior // nested-name-specifier. // The declaration context must be complete. if (!LookupCtx->isDependentContext() && RequireCompleteDeclContext(SS, LookupCtx)) return true; LookupQualifiedName(Found, LookupCtx); if (!ObjectType.isNull() && Found.empty()) { // C++ [basic.lookup.classref]p4: // If the id-expression in a class member access is a qualified-id of // the form // // class-name-or-namespace-name::... // // the class-name-or-namespace-name following the . or -> operator is // looked up both in the context of the entire postfix-expression and in // the scope of the class of the object expression. If the name is found // only in the scope of the class of the object expression, the name // shall refer to a class-name. If the name is found only in the // context of the entire postfix-expression, the name shall refer to a // class-name or namespace-name. [...] // // Qualified name lookup into a class will not find a namespace-name, // so we do not need to diagnose that case specifically. However, // this qualified name lookup may find nothing. In that case, perform // unqualified name lookup in the given scope (if available) or // reconstruct the result from when name lookup was performed at template // definition time. if (S) LookupName(Found, S); else if (ScopeLookupResult) Found.addDecl(ScopeLookupResult); ObjectTypeSearchedInScope = true; } } else if (!isDependent) { // Perform unqualified name lookup in the current scope. LookupName(Found, S); } // If we performed lookup into a dependent context and did not find anything, // that's fine: just build a dependent nested-name-specifier. if (Found.empty() && isDependent && !(LookupCtx && LookupCtx->isRecord() && (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() || !cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()))) { // Don't speculate if we're just trying to improve error recovery. if (ErrorRecoveryLookup) return true; // We were not able to compute the declaration context for a dependent // base object type or prior nested-name-specifier, so this // nested-name-specifier refers to an unknown specialization. Just build // a dependent nested-name-specifier. SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc); return false; } // FIXME: Deal with ambiguities cleanly. if (Found.empty() && !ErrorRecoveryLookup) { // If identifier is not found as class-name-or-namespace-name, but is found // as other entity, don't look for typos. LookupResult R(*this, Found.getLookupNameInfo(), LookupOrdinaryName); if (LookupCtx) LookupQualifiedName(R, LookupCtx); else if (S && !isDependent) LookupName(R, S); if (!R.empty()) { // The identifier is found in ordinary lookup. If correction to colon is // allowed, suggest replacement to ':'. if (IsCorrectedToColon) { *IsCorrectedToColon = true; Diag(CCLoc, diag::err_nested_name_spec_is_not_class) << &Identifier << getLangOpts().CPlusPlus << FixItHint::CreateReplacement(CCLoc, ":"); if (NamedDecl *ND = R.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_declared_at); return true; } // Replacement '::' -> ':' is not allowed, just issue respective error. Diag(R.getNameLoc(), diag::err_expected_class_or_namespace) << &Identifier << getLangOpts().CPlusPlus; if (NamedDecl *ND = R.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier; return true; } } if (Found.empty() && !ErrorRecoveryLookup && !getLangOpts().MSVCCompat) { // We haven't found anything, and we're not recovering from a // different kind of error, so look for typos. DeclarationName Name = Found.getLookupName(); Found.clear(); if (TypoCorrection Corrected = CorrectTypo( Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS, llvm::make_unique<NestedNameSpecifierValidatorCCC>(*this), CTK_ErrorRecovery, LookupCtx, EnteringContext)) { if (LookupCtx) { bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Name.getAsString() == Corrected.getAsString(getLangOpts()); if (DroppedSpecifier) SS.clear(); diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest) << Name << LookupCtx << DroppedSpecifier << SS.getRange()); } else diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest) << Name); if (NamedDecl *ND = Corrected.getCorrectionDecl()) Found.addDecl(ND); Found.setLookupName(Corrected.getCorrection()); } else { Found.setLookupName(&Identifier); } } NamedDecl *SD = Found.getAsSingle<NamedDecl>(); bool IsExtension = false; bool AcceptSpec = isAcceptableNestedNameSpecifier(SD, &IsExtension); if (!AcceptSpec && IsExtension) { AcceptSpec = true; Diag(IdentifierLoc, diag::ext_nested_name_spec_is_enum); } if (AcceptSpec) { if (!ObjectType.isNull() && !ObjectTypeSearchedInScope && !getLangOpts().CPlusPlus11) { // C++03 [basic.lookup.classref]p4: // [...] If the name is found in both contexts, the // class-name-or-namespace-name shall refer to the same entity. // // We already found the name in the scope of the object. Now, look // into the current scope (the scope of the postfix-expression) to // see if we can find the same name there. As above, if there is no // scope, reconstruct the result from the template instantiation itself. // // Note that C++11 does *not* perform this redundant lookup. NamedDecl *OuterDecl; if (S) { LookupResult FoundOuter(*this, &Identifier, IdentifierLoc, LookupNestedNameSpecifierName); LookupName(FoundOuter, S); OuterDecl = FoundOuter.getAsSingle<NamedDecl>(); } else OuterDecl = ScopeLookupResult; if (isAcceptableNestedNameSpecifier(OuterDecl) && OuterDecl->getCanonicalDecl() != SD->getCanonicalDecl() && (!isa<TypeDecl>(OuterDecl) || !isa<TypeDecl>(SD) || !Context.hasSameType( Context.getTypeDeclType(cast<TypeDecl>(OuterDecl)), Context.getTypeDeclType(cast<TypeDecl>(SD))))) { if (ErrorRecoveryLookup) return true; Diag(IdentifierLoc, diag::err_nested_name_member_ref_lookup_ambiguous) << &Identifier; Diag(SD->getLocation(), diag::note_ambig_member_ref_object_type) << ObjectType; Diag(OuterDecl->getLocation(), diag::note_ambig_member_ref_scope); // Fall through so that we'll pick the name we found in the object // type, since that's probably what the user wanted anyway. } } if (auto *TD = dyn_cast_or_null<TypedefNameDecl>(SD)) MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false); // If we're just performing this lookup for error-recovery purposes, // don't extend the nested-name-specifier. Just return now. if (ErrorRecoveryLookup) return false; // The use of a nested name specifier may trigger deprecation warnings. DiagnoseUseOfDecl(SD, CCLoc); if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD)) { SS.Extend(Context, Namespace, IdentifierLoc, CCLoc); return false; } if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD)) { SS.Extend(Context, Alias, IdentifierLoc, CCLoc); return false; } QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD)); TypeLocBuilder TLB; if (isa<InjectedClassNameType>(T)) { InjectedClassNameTypeLoc InjectedTL = TLB.push<InjectedClassNameTypeLoc>(T); InjectedTL.setNameLoc(IdentifierLoc); } else if (isa<RecordType>(T)) { RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T); RecordTL.setNameLoc(IdentifierLoc); } else if (isa<TypedefType>(T)) { TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T); TypedefTL.setNameLoc(IdentifierLoc); } else if (isa<EnumType>(T)) { EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T); EnumTL.setNameLoc(IdentifierLoc); } else if (isa<TemplateTypeParmType>(T)) { TemplateTypeParmTypeLoc TemplateTypeTL = TLB.push<TemplateTypeParmTypeLoc>(T); TemplateTypeTL.setNameLoc(IdentifierLoc); } else if (isa<UnresolvedUsingType>(T)) { UnresolvedUsingTypeLoc UnresolvedTL = TLB.push<UnresolvedUsingTypeLoc>(T); UnresolvedTL.setNameLoc(IdentifierLoc); } else if (isa<SubstTemplateTypeParmType>(T)) { SubstTemplateTypeParmTypeLoc TL = TLB.push<SubstTemplateTypeParmTypeLoc>(T); TL.setNameLoc(IdentifierLoc); } else if (isa<SubstTemplateTypeParmPackType>(T)) { SubstTemplateTypeParmPackTypeLoc TL = TLB.push<SubstTemplateTypeParmPackTypeLoc>(T); TL.setNameLoc(IdentifierLoc); } else { llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier"); } if (T->isEnumeralType()) Diag(IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec); SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T), CCLoc); return false; } // Otherwise, we have an error case. If we don't want diagnostics, just // return an error now. if (ErrorRecoveryLookup) return true; // If we didn't find anything during our lookup, try again with // ordinary name lookup, which can help us produce better error // messages. if (Found.empty()) { Found.clear(LookupOrdinaryName); LookupName(Found, S); } // In Microsoft mode, if we are within a templated function and we can't // resolve Identifier, then extend the SS with Identifier. This will have // the effect of resolving Identifier during template instantiation. // The goal is to be able to resolve a function call whose // nested-name-specifier is located inside a dependent base class. // Example: // // class C { // public: // static void foo2() { } // }; // template <class T> class A { public: typedef C D; }; // // template <class T> class B : public A<T> { // public: // void foo() { D::foo2(); } // }; if (getLangOpts().MSVCCompat) { DeclContext *DC = LookupCtx ? LookupCtx : CurContext; if (DC->isDependentContext() && DC->isFunctionOrMethod()) { CXXRecordDecl *ContainingClass = dyn_cast<CXXRecordDecl>(DC->getParent()); if (ContainingClass && ContainingClass->hasAnyDependentBases()) { Diag(IdentifierLoc, diag::ext_undeclared_unqual_id_with_dependent_base) << &Identifier << ContainingClass; SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc); return false; } } } if (!Found.empty()) { if (TypeDecl *TD = Found.getAsSingle<TypeDecl>()) Diag(IdentifierLoc, diag::err_expected_class_or_namespace) << QualType(TD->getTypeForDecl(), 0) << getLangOpts().CPlusPlus; else { Diag(IdentifierLoc, diag::err_expected_class_or_namespace) << &Identifier << getLangOpts().CPlusPlus; if (NamedDecl *ND = Found.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier; } } else if (SS.isSet()) Diag(IdentifierLoc, diag::err_no_member) << &Identifier << LookupCtx << SS.getRange(); else Diag(IdentifierLoc, diag::err_undeclared_var_use) << &Identifier; return true; }
bool DeclarationName::isDependentName() const { QualType T = getCXXNameType(); return !T.isNull() && T->isDependentType(); }
// We need to artificially create: // cling::valuePrinterInternal::Select((void*) raw_ostream, // (ASTContext)Ctx, (Expr*)E, &i); Expr* ValuePrinterSynthesizer::SynthesizeCppVP(Expr* E) { QualType QT = E->getType(); // For now we skip void and function pointer types. if (QT.isNull() || QT->isVoidType()) return 0; // 1. Call gCling->getValuePrinterStream() // 1.1. Find gCling SourceLocation NoSLoc = SourceLocation(); NamespaceDecl* NSD = utils::Lookup::Namespace(m_Sema, "cling"); NSD = utils::Lookup::Namespace(m_Sema, "valuePrinterInternal", NSD); DeclarationName PVName = &m_Context->Idents.get("Select"); LookupResult R(*m_Sema, PVName, NoSLoc, Sema::LookupOrdinaryName, Sema::ForRedeclaration); assert(NSD && "There must be a valid namespace."); m_Sema->LookupQualifiedName(R, NSD); assert(!R.empty() && "Cannot find valuePrinterInternal::Select(...)"); CXXScopeSpec CSS; Expr* UnresolvedLookup = m_Sema->BuildDeclarationNameExpr(CSS, R, /*ADL*/ false).take(); // 2.4. Prepare the params // 2.4.1 Lookup the llvm::raw_ostream CXXRecordDecl* RawOStreamRD = dyn_cast<CXXRecordDecl>(utils::Lookup::Named(m_Sema, "raw_ostream", utils::Lookup::Namespace(m_Sema, "llvm"))); assert(RawOStreamRD && "Declaration of the expr not found!"); QualType RawOStreamRDTy = m_Context->getTypeDeclType(RawOStreamRD); // 2.4.2 Lookup the expr type CXXRecordDecl* ExprRD = dyn_cast<CXXRecordDecl>(utils::Lookup::Named(m_Sema, "Expr", utils::Lookup::Namespace(m_Sema, "clang"))); assert(ExprRD && "Declaration of the expr not found!"); QualType ExprRDTy = m_Context->getTypeDeclType(ExprRD); // 2.4.3 Lookup ASTContext type CXXRecordDecl* ASTContextRD = dyn_cast<CXXRecordDecl>(utils::Lookup::Named(m_Sema, "ASTContext", utils::Lookup::Namespace(m_Sema, "clang"))); assert(ASTContextRD && "Declaration of the expr not found!"); QualType ASTContextRDTy = m_Context->getTypeDeclType(ASTContextRD); Expr* RawOStreamTy = utils::Synthesize::CStyleCastPtrExpr(m_Sema, RawOStreamRDTy, (uint64_t)m_ValuePrinterStream.get() ); Expr* ExprTy = utils::Synthesize::CStyleCastPtrExpr(m_Sema, ExprRDTy, (uint64_t)E); Expr* ASTContextTy = utils::Synthesize::CStyleCastPtrExpr(m_Sema, ASTContextRDTy, (uint64_t)m_Context); // E might contain temporaries. This means that the topmost expr is // ExprWithCleanups. This contains the information about the temporaries and // signals when they should be destroyed. // Here we replace E with call to value printer and we must extend the life // time of those temporaries to the end of the new CallExpr. bool NeedsCleanup = false; if (ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(E)) { E = EWC->getSubExpr(); NeedsCleanup = true; } if (QT->isFunctionPointerType()) { // convert func ptr to void*: E = utils::Synthesize::CStyleCastPtrExpr(m_Sema, m_Context->VoidPtrTy, E); } llvm::SmallVector<Expr*, 4> CallArgs; CallArgs.push_back(RawOStreamTy); CallArgs.push_back(ExprTy); CallArgs.push_back(ASTContextTy); CallArgs.push_back(E); Scope* S = m_Sema->getScopeForContext(m_Sema->CurContext); // Here we expect a template instantiation. We need to open the transaction // that we are currently work with. Transaction::State oldState = getTransaction()->getState(); getTransaction()->setState(Transaction::kCollecting); Expr* Result = m_Sema->ActOnCallExpr(S, UnresolvedLookup, NoSLoc, CallArgs, NoSLoc).take(); getTransaction()->setState(oldState); Result = m_Sema->ActOnFinishFullExpr(Result).take(); if (NeedsCleanup && !isa<ExprWithCleanups>(Result)) { llvm::ArrayRef<ExprWithCleanups::CleanupObject> Cleanups; ExprWithCleanups* EWC = ExprWithCleanups::Create(*m_Context, Result, Cleanups); Result = EWC; } assert(Result && "Cannot create value printer!"); return Result; }
bool Declarator::isDeclarationOfFunction() const { for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) { switch (DeclTypeInfo[i].Kind) { case DeclaratorChunk::Function: return true; case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Array: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return false; } llvm_unreachable("Invalid type chunk"); } switch (DS.getTypeSpecType()) { case TST_atomic: case TST_auto: case TST_bool: case TST_char: case TST_char16: case TST_char32: case TST_class: case TST_decimal128: case TST_decimal32: case TST_decimal64: case TST_double: case TST_enum: case TST_error: case TST_float: case TST_half: case TST_int: case TST_int128: case TST_struct: case TST_interface: case TST_union: case TST_unknown_anytype: case TST_unspecified: case TST_void: case TST_wchar: case TST_image1d_t: case TST_image1d_array_t: case TST_image1d_buffer_t: case TST_image2d_t: case TST_image2d_array_t: case TST_image3d_t: case TST_sampler_t: case TST_event_t: return false; case TST_decltype_auto: // This must have an initializer, so can't be a function declaration, // even if the initializer has function type. return false; case TST_decltype: case TST_typeofExpr: if (Expr *E = DS.getRepAsExpr()) return E->getType()->isFunctionType(); return false; case TST_underlyingType: case TST_typename: case TST_typeofType: { QualType QT = DS.getRepAsType().get(); if (QT.isNull()) return false; if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) QT = LIT->getType(); if (QT.isNull()) return false; return QT->isFunctionType(); } } llvm_unreachable("Invalid TypeSpecType!"); }
/// \brief Figure out if an expression could be turned into a call. /// /// Use this when trying to recover from an error where the programmer may have /// written just the name of a function instead of actually calling it. /// /// \param E - The expression to examine. /// \param ZeroArgCallReturnTy - If the expression can be turned into a call /// with no arguments, this parameter is set to the type returned by such a /// call; otherwise, it is set to an empty QualType. /// \param OverloadSet - If the expression is an overloaded function /// name, this parameter is populated with the decls of the various overloads. bool Sema::isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &OverloadSet) { ZeroArgCallReturnTy = QualType(); OverloadSet.clear(); if (E.getType() == Context.OverloadTy) { OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); const OverloadExpr *Overloads = FR.Expression; for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { OverloadSet.addDecl(*it); // Check whether the function is a non-template which takes no // arguments. if (const FunctionDecl *OverloadDecl = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { if (OverloadDecl->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = OverloadDecl->getResultType(); } } // Ignore overloads that are pointer-to-member constants. if (FR.HasFormOfMemberPointer) return false; return true; } if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { if (Fun->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = Fun->getResultType(); return true; } } // We don't have an expression that's convenient to get a FunctionDecl from, // but we can at least check if the type is "function of 0 arguments". QualType ExprTy = E.getType(); const FunctionType *FunTy = NULL; QualType PointeeTy = ExprTy->getPointeeType(); if (!PointeeTy.isNull()) FunTy = PointeeTy->getAs<FunctionType>(); if (!FunTy) FunTy = ExprTy->getAs<FunctionType>(); if (!FunTy && ExprTy == Context.BoundMemberTy) { // Look for the bound-member type. If it's still overloaded, give up, // although we probably should have fallen into the OverloadExpr case above // if we actually have an overloaded bound member. QualType BoundMemberTy = Expr::findBoundMemberType(&E); if (!BoundMemberTy.isNull()) FunTy = BoundMemberTy->castAs<FunctionType>(); } if (const FunctionProtoType *FPT = dyn_cast_or_null<FunctionProtoType>(FunTy)) { if (FPT->getNumArgs() == 0) ZeroArgCallReturnTy = FunTy->getResultType(); return true; } return false; }
bool Declarator::isDeclarationOfFunction() const { for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) { switch (DeclTypeInfo[i].Kind) { case DeclaratorChunk::Function: return true; case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Array: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return false; } llvm_unreachable("Invalid type chunk"); } switch (DS.getTypeSpecType()) { case TST_atomic: case TST_auto: case TST_bool: case TST_char: case TST_char16: case TST_char32: case TST_class: case TST_decimal128: case TST_decimal32: case TST_decimal64: case TST_double: case TST_enum: case TST_error: case TST_float: case TST_half: case TST_int: case TST_int128: case TST_struct: case TST_union: case TST_unknown_anytype: case TST_unspecified: case TST_void: case TST_wchar: case TST_cbit: //Scaffold addition case TST_qbit: //Scaffold addition case TST_qstruct: case TST_qunion: return false; case TST_decltype: case TST_typeofExpr: if (Expr *E = DS.getRepAsExpr()) return E->getType()->isFunctionType(); return false; case TST_underlyingType: case TST_typename: case TST_typeofType: { QualType QT = DS.getRepAsType().get(); if (QT.isNull()) return false; if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) QT = LIT->getType(); if (QT.isNull()) return false; return QT->isFunctionType(); } } llvm_unreachable("Invalid TypeSpecType!"); }
void ScopeChecker::check( const MatchFinder::MatchResult &Result) { // There are a variety of different reasons why something could be allocated AllocationVariety Variety = AV_None; SourceLocation Loc; QualType T; if (const ParmVarDecl *D = Result.Nodes.getNodeAs<ParmVarDecl>("parm_vardecl")) { if (D->hasUnparsedDefaultArg() || D->hasUninstantiatedDefaultArg()) { return; } if (const Expr *Default = D->getDefaultArg()) { if (const MaterializeTemporaryExpr *E = dyn_cast<MaterializeTemporaryExpr>(Default)) { // We have just found a ParmVarDecl which has, as its default argument, // a MaterializeTemporaryExpr. We mark that MaterializeTemporaryExpr as // automatic, by adding it to the AutomaticTemporaryMap. // Reporting on this type will occur when the MaterializeTemporaryExpr // is matched against. AutomaticTemporaries[E] = D; } } return; } // Determine the type of allocation which we detected if (const VarDecl *D = Result.Nodes.getNodeAs<VarDecl>("node")) { if (D->hasGlobalStorage()) { Variety = AV_Global; } else { Variety = AV_Automatic; } T = D->getType(); Loc = D->getLocStart(); } else if (const CXXNewExpr *E = Result.Nodes.getNodeAs<CXXNewExpr>("node")) { // New allocates things on the heap. // We don't consider placement new to do anything, as it doesn't actually // allocate the storage, and thus gives us no useful information. if (!isPlacementNew(E)) { Variety = AV_Heap; T = E->getAllocatedType(); Loc = E->getLocStart(); } } else if (const MaterializeTemporaryExpr *E = Result.Nodes.getNodeAs<MaterializeTemporaryExpr>("node")) { // Temporaries can actually have varying storage durations, due to temporary // lifetime extension. We consider the allocation variety of this temporary // to be the same as the allocation variety of its lifetime. // XXX We maybe should mark these lifetimes as being due to a temporary // which has had its lifetime extended, to improve the error messages. switch (E->getStorageDuration()) { case SD_FullExpression: { // Check if this temporary is allocated as a default argument! // if it is, we want to pretend that it is automatic. AutomaticTemporaryMap::iterator AutomaticTemporary = AutomaticTemporaries.find(E); if (AutomaticTemporary != AutomaticTemporaries.end()) { Variety = AV_Automatic; } else { Variety = AV_Temporary; } } break; case SD_Automatic: Variety = AV_Automatic; break; case SD_Thread: case SD_Static: Variety = AV_Global; break; case SD_Dynamic: assert(false && "I don't think that this ever should occur..."); Variety = AV_Heap; break; } T = E->getType().getUnqualifiedType(); Loc = E->getLocStart(); } else if (const CallExpr *E = Result.Nodes.getNodeAs<CallExpr>("node")) { T = E->getType()->getPointeeType(); if (!T.isNull()) { // This will always allocate on the heap, as the heapAllocator() check // was made in the matcher Variety = AV_Heap; Loc = E->getLocStart(); } } // Error messages for incorrect allocations. const char* Stack = "variable of type %0 only valid on the stack"; const char* Global = "variable of type %0 only valid as global"; const char* Heap = "variable of type %0 only valid on the heap"; const char* NonHeap = "variable of type %0 is not valid on the heap"; const char* NonTemporary = "variable of type %0 is not valid in a temporary"; const char* StackNote = "value incorrectly allocated in an automatic variable"; const char* GlobalNote = "value incorrectly allocated in a global variable"; const char* HeapNote = "value incorrectly allocated on the heap"; const char* TemporaryNote = "value incorrectly allocated in a temporary"; // Report errors depending on the annotations on the input types. switch (Variety) { case AV_None: return; case AV_Global: StackClass.reportErrorIfPresent(*this, T, Loc, Stack, GlobalNote); HeapClass.reportErrorIfPresent(*this, T, Loc, Heap, GlobalNote); break; case AV_Automatic: GlobalClass.reportErrorIfPresent(*this, T, Loc, Global, StackNote); HeapClass.reportErrorIfPresent(*this, T, Loc, Heap, StackNote); break; case AV_Temporary: GlobalClass.reportErrorIfPresent(*this, T, Loc, Global, TemporaryNote); HeapClass.reportErrorIfPresent(*this, T, Loc, Heap, TemporaryNote); NonTemporaryClass.reportErrorIfPresent(*this, T, Loc, NonTemporary, TemporaryNote); break; case AV_Heap: GlobalClass.reportErrorIfPresent(*this, T, Loc, Global, HeapNote); StackClass.reportErrorIfPresent(*this, T, Loc, Stack, HeapNote); NonHeapClass.reportErrorIfPresent(*this, T, Loc, NonHeap, HeapNote); break; } }
const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D, const LocationContext *LC) { const MemRegion *sReg = nullptr; if (D->hasGlobalStorage() && !D->isStaticLocal()) { // First handle the globals defined in system headers. if (C.getSourceManager().isInSystemHeader(D->getLocation())) { // Whitelist the system globals which often DO GET modified, assume the // rest are immutable. if (D->getName().find("errno") != StringRef::npos) sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind); else sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind); // Treat other globals as GlobalInternal unless they are constants. } else { QualType GQT = D->getType(); const Type *GT = GQT.getTypePtrOrNull(); // TODO: We could walk the complex types here and see if everything is // constified. if (GT && GQT.isConstQualified() && GT->isArithmeticType()) sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind); else sReg = getGlobalsRegion(); } // Finally handle static locals. } else { // FIXME: Once we implement scope handling, we will need to properly lookup // 'D' to the proper LocationContext. const DeclContext *DC = D->getDeclContext(); llvm::PointerUnion<const StackFrameContext *, const VarRegion *> V = getStackOrCaptureRegionForDeclContext(LC, DC, D); if (V.is<const VarRegion*>()) return V.get<const VarRegion*>(); const StackFrameContext *STC = V.get<const StackFrameContext*>(); if (!STC) { // FIXME: Assign a more sensible memory space to static locals // we see from within blocks that we analyze as top-level declarations. sReg = getUnknownRegion(); } else { if (D->hasLocalStorage()) { sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC)) : static_cast<const MemRegion*>(getStackLocalsRegion(STC)); } else { assert(D->isStaticLocal()); const Decl *STCD = STC->getDecl(); if (isa<FunctionDecl>(STCD) || isa<ObjCMethodDecl>(STCD)) sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind, getFunctionCodeRegion(cast<NamedDecl>(STCD))); else if (const BlockDecl *BD = dyn_cast<BlockDecl>(STCD)) { // FIXME: The fallback type here is totally bogus -- though it should // never be queried, it will prevent uniquing with the real // BlockCodeRegion. Ideally we'd fix the AST so that we always had a // signature. QualType T; if (const TypeSourceInfo *TSI = BD->getSignatureAsWritten()) T = TSI->getType(); if (T.isNull()) T = getContext().VoidTy; if (!T->getAs<FunctionType>()) T = getContext().getFunctionNoProtoType(T); T = getContext().getBlockPointerType(T); const BlockCodeRegion *BTR = getBlockCodeRegion(BD, C.getCanonicalType(T), STC->getAnalysisDeclContext()); sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind, BTR); } else { sReg = getGlobalsRegion(); } } } } return getSubRegion<VarRegion>(D, sReg); }
// The following is common part for 'cilk vector functions' and // 'omp declare simd' functions metadata generation. // void CodeGenModule::EmitVectorVariantsMetadata(const CGFunctionInfo &FnInfo, const FunctionDecl *FD, llvm::Function *Fn, GroupMap &Groups) { // Do not emit any vector variant if there is an unsupported feature. bool HasImplicitThis = false; if (!CheckElementalArguments(*this, FD, Fn, HasImplicitThis)) return; llvm::LLVMContext &Context = getLLVMContext(); ASTContext &C = getContext(); // Common metadata nodes. llvm::NamedMDNode *CilkElementalMetadata = getModule().getOrInsertNamedMetadata("cilk.functions"); llvm::Metadata *ElementalMDArgs[] = { llvm::MDString::get(Context, "elemental") }; llvm::MDNode *ElementalNode = llvm::MDNode::get(Context, ElementalMDArgs); llvm::Metadata *MaskMDArgs[] = { llvm::MDString::get(Context, "mask"), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::IntegerType::getInt1Ty(Context), 1)) }; llvm::MDNode *MaskNode = llvm::MDNode::get(Context, MaskMDArgs); MaskMDArgs[1] = llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::IntegerType::getInt1Ty(Context), 0)); llvm::MDNode *NoMaskNode = llvm::MDNode::get(Context, MaskMDArgs); SmallVector<llvm::Metadata*, 8> ParameterNameArgs; ParameterNameArgs.push_back(llvm::MDString::get(Context, "arg_name")); llvm::MDNode *ParameterNameNode = 0; // // Vector variant metadata. // llvm::Value *VariantMDArgs[] = { // llvm::MDString::get(Context, "variant"), // llvm::UndefValue::get(llvm::Type::getVoidTy(Context)) // }; // llvm::MDNode *VariantNode = llvm::MDNode::get(Context, VariantMDArgs); for (GroupMap::iterator GI = Groups.begin(), GE = Groups.end(); GI != GE; ++GI) { CilkElementalGroup &G = GI->second; // Parameter information. QualType FirstNonStepParmType; SmallVector<llvm::Metadata *, 8> AligArgs; SmallVector<llvm::Metadata *, 8> StepArgs; AligArgs.push_back(llvm::MDString::get(Context, "arg_alig")); StepArgs.push_back(llvm::MDString::get(Context, "arg_step")); // Handle implicit 'this' parameter if necessary. if (HasImplicitThis) { ParameterNameArgs.push_back(llvm::MDString::get(Context, "this")); bool IsNonStepParm = handleParameter(*this, G, "this", StepArgs, AligArgs); if (IsNonStepParm) FirstNonStepParmType = cast<CXXMethodDecl>(FD)->getThisType(C); } // Handle explicit paramenters. for (unsigned I = 0; I != FD->getNumParams(); ++I) { const ParmVarDecl *Parm = FD->getParamDecl(I); StringRef ParmName = Parm->getName(); if (!ParameterNameNode) ParameterNameArgs.push_back(llvm::MDString::get(Context, ParmName)); bool IsNonStepParm = handleParameter(*this, G, ParmName, StepArgs, AligArgs); if (IsNonStepParm && FirstNonStepParmType.isNull()) FirstNonStepParmType = Parm->getType(); } llvm::MDNode *StepNode = llvm::MDNode::get(Context, StepArgs); llvm::MDNode *AligNode = llvm::MDNode::get(Context, AligArgs); if (!ParameterNameNode) ParameterNameNode = llvm::MDNode::get(Context, ParameterNameArgs); // If there is no vectorlengthfor() in this group, determine the // characteristic type. This can depend on the linear/uniform attributes, // so it can differ between groups. // // The rules for computing the characteristic type are: // // a) For a non-void function, the characteristic data type is the // return type. // // b) If the function has any non-uniform, non-linear parameters, the // the characteristic data type is the type of the first such parameter. // // c) If the characteristic data type determined by a) or b) above is // struct, union, or class type which is pass-by-value (except fo // the type that maps to the built-in complex data type) // the characteristic data type is int. // // d) If none of the above three cases is applicable, // the characteristic data type is int. // // e) For Intel Xeon Phi native and offload compilation, if the resulting // characteristic data type is 8-bit or 16-bit integer data type // the characteristic data type is int. // // These rules missed the reference types and we use their pointer types. // if (G.VecLengthFor.empty()) { QualType FnRetTy = FD->getReturnType(); QualType CharacteristicType; if (!FnRetTy->isVoidType()) CharacteristicType = FnRetTy; else if (!FirstNonStepParmType.isNull()) CharacteristicType = FirstNonStepParmType.getCanonicalType(); else CharacteristicType = C.IntTy; if (CharacteristicType->isReferenceType()) { QualType BaseTy = CharacteristicType.getNonReferenceType(); CharacteristicType = C.getPointerType(BaseTy); } else if (CharacteristicType->isAggregateType()) CharacteristicType = C.IntTy; // FIXME: handle Xeon Phi targets. G.VecLengthFor.push_back(CharacteristicType); } // // If no mask variants are specified, generate both. // if (G.Mask.empty()) { // G.Mask.push_back(1); // G.Mask.push_back(0); // } // If no vector length is specified, push a dummy value to iterate over. if (G.VecLength.empty()) G.VecLength.push_back(0); for (CilkElementalGroup::VecLengthForVector::iterator TI = G.VecLengthFor.begin(), TE = G.VecLengthFor.end(); TI != TE; ++TI) { uint64_t VectorRegisterBytes = 0; // Inspect the current target features to determine the // appropriate vector size. // This is currently X86 specific. if (Target.hasFeature("avx2")) VectorRegisterBytes = 64; else if (Target.hasFeature("avx")) VectorRegisterBytes = 32; else if (Target.hasFeature("sse2")) VectorRegisterBytes = 16; else if (Target.hasFeature("sse") && (*TI)->isFloatingType() && C.getTypeSizeInChars(*TI).getQuantity() == 4) VectorRegisterBytes = 16; else if (Target.hasFeature("mmx") && (*TI)->isIntegerType()) VectorRegisterBytes = 8; for (CilkElementalGroup::VecLengthVector::iterator LI = G.VecLength.begin(), LE = G.VecLength.end(); LI != LE; ++LI) { uint64_t VL = *LI ? *LI : (CharUnits::fromQuantity(VectorRegisterBytes) / C.getTypeSizeInChars(*TI)); llvm::MDNode *VecTypeNode = MakeVecLengthMetadata(*this, "vec_length", *TI, VL); { SmallVector <llvm::Metadata*, 7> kernelMDArgs; kernelMDArgs.push_back(llvm::ValueAsMetadata::get(Fn)); kernelMDArgs.push_back(ElementalNode); kernelMDArgs.push_back(ParameterNameNode); kernelMDArgs.push_back(StepNode); kernelMDArgs.push_back(AligNode); kernelMDArgs.push_back(VecTypeNode); if (!G.Mask.empty()) kernelMDArgs.push_back((G.Mask.back()==0)?(NoMaskNode):(MaskNode)); llvm::MDNode *KernelMD = llvm::MDNode::get(Context, kernelMDArgs); CilkElementalMetadata->addOperand(KernelMD); } // for (CilkElementalGroup::MaskVector::iterator // MI = G.Mask.begin(), // ME = G.Mask.end(); // MI != ME; // ++MI) { // // SmallVector <llvm::Value*, 7> kernelMDArgs; // kernelMDArgs.push_back(Fn); // kernelMDArgs.push_back(ElementalNode); // kernelMDArgs.push_back(ParameterNameNode); // kernelMDArgs.push_back(StepNode); // kernelMDArgs.push_back(AligNode); // kernelMDArgs.push_back(VecTypeNode); // kernelMDArgs.push_back((*MI==0)?(NoMaskNode):(MaskNode)); // if (ProcessorNode) // kernelMDArgs.push_back(ProcessorNode); // kernelMDArgs.push_back(VariantNode); // llvm::MDNode *KernelMD = llvm::MDNode::get(Context, kernelMDArgs); // CilkElementalMetadata->addOperand(KernelMD); // ElementalVariantToEmit.push_back( // ElementalVariantInfo(&FnInfo, FD, Fn, KernelMD)); // } } } } }
/// The call exit is simulated with a sequence of nodes, which occur between /// CallExitBegin and CallExitEnd. The following operations occur between the /// two program points: /// 1. CallExitBegin (triggers the start of call exit sequence) /// 2. Bind the return value /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. /// 4. CallExitEnd (switch to the caller context) /// 5. PostStmt<CallExpr> void ExprEngine::processCallExit(ExplodedNode *CEBNode) { // Step 1 CEBNode was generated before the call. const StackFrameContext *calleeCtx = CEBNode->getLocationContext()->getCurrentStackFrame(); // The parent context might not be a stack frame, so make sure we // look up the first enclosing stack frame. const StackFrameContext *callerCtx = calleeCtx->getParent()->getCurrentStackFrame(); const Stmt *CE = calleeCtx->getCallSite(); ProgramStateRef state = CEBNode->getState(); // Find the last statement in the function and the corresponding basic block. const Stmt *LastSt = 0; const CFGBlock *Blk = 0; llvm::tie(LastSt, Blk) = getLastStmt(CEBNode); // Generate a CallEvent /before/ cleaning the state, so that we can get the // correct value for 'this' (if necessary). CallEventManager &CEMgr = getStateManager().getCallEventManager(); CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. // If the callee returns an expression, bind its value to CallExpr. if (CE) { if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { const LocationContext *LCtx = CEBNode->getLocationContext(); SVal V = state->getSVal(RS, LCtx); // Ensure that the return type matches the type of the returned Expr. if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { QualType ReturnedTy = CallEvent::getDeclaredResultType(calleeCtx->getDecl()); if (!ReturnedTy.isNull()) { if (const Expr *Ex = dyn_cast<Expr>(CE)) { V = adjustReturnValue(V, Ex->getType(), ReturnedTy, getStoreManager()); } } } state = state->BindExpr(CE, callerCtx, V); } // Bind the constructed object value to CXXConstructExpr. if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { loc::MemRegionVal This = svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); SVal ThisV = state->getSVal(This); // If the constructed object is a prvalue, get its bindings. // Note that we have to be careful here because constructors embedded // in DeclStmts are not marked as lvalues. if (!CCE->isGLValue()) if (const MemRegion *MR = ThisV.getAsRegion()) if (isa<CXXTempObjectRegion>(MR)) ThisV = state->getSVal(cast<Loc>(ThisV)); state = state->BindExpr(CCE, callerCtx, ThisV); } } // Step 3: BindedRetNode -> CleanedNodes // If we can find a statement and a block in the inlined function, run remove // dead bindings before returning from the call. This is important to ensure // that we report the issues such as leaks in the stack contexts in which // they occurred. ExplodedNodeSet CleanedNodes; if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value"); PostStmt Loc(LastSt, calleeCtx, &retValBind); bool isNew; ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); BindedRetNode->addPredecessor(CEBNode, G); if (!isNew) return; NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); currBldrCtx = &Ctx; // Here, we call the Symbol Reaper with 0 statement and callee location // context, telling it to clean up everything in the callee's context // (and its children). We use the callee's function body as a diagnostic // statement, with which the program point will be associated. removeDead(BindedRetNode, CleanedNodes, 0, calleeCtx, calleeCtx->getAnalysisDeclContext()->getBody(), ProgramPoint::PostStmtPurgeDeadSymbolsKind); currBldrCtx = 0; } else { CleanedNodes.Add(CEBNode); } for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), E = CleanedNodes.end(); I != E; ++I) { // Step 4: Generate the CallExit and leave the callee's context. // CleanedNodes -> CEENode CallExitEnd Loc(calleeCtx, callerCtx); bool isNew; ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState(); ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); CEENode->addPredecessor(*I, G); if (!isNew) return; // Step 5: Perform the post-condition check of the CallExpr and enqueue the // result onto the work list. // CEENode -> Dst -> WorkList NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, &Ctx); SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); ExplodedNodeSet DstPostCall; getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, *UpdatedCall, *this, /*WasInlined=*/true); ExplodedNodeSet Dst; if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, *this, /*WasInlined=*/true); } else if (CE) { getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, *this, /*WasInlined=*/true); } else { Dst.insert(DstPostCall); } // Enqueue the next element in the block. for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); PSI != PSE; ++PSI) { Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), calleeCtx->getIndex()+1); } } }
static StyleKind findStyleKind( const NamedDecl *D, const std::vector<llvm::Optional<IdentifierNamingCheck::NamingStyle>> &NamingStyles) { if (isa<TypedefDecl>(D) && NamingStyles[SK_Typedef]) return SK_Typedef; if (isa<TypeAliasDecl>(D) && NamingStyles[SK_TypeAlias]) return SK_TypeAlias; if (const auto *Decl = dyn_cast<NamespaceDecl>(D)) { if (Decl->isAnonymousNamespace()) return SK_Invalid; if (Decl->isInline() && NamingStyles[SK_InlineNamespace]) return SK_InlineNamespace; if (NamingStyles[SK_Namespace]) return SK_Namespace; } if (isa<EnumDecl>(D) && NamingStyles[SK_Enum]) return SK_Enum; if (isa<EnumConstantDecl>(D)) { if (NamingStyles[SK_EnumConstant]) return SK_EnumConstant; if (NamingStyles[SK_Constant]) return SK_Constant; return SK_Invalid; } if (const auto *Decl = dyn_cast<CXXRecordDecl>(D)) { if (Decl->isAnonymousStructOrUnion()) return SK_Invalid; if (!Decl->getCanonicalDecl()->isThisDeclarationADefinition()) return SK_Invalid; if (Decl->hasDefinition() && Decl->isAbstract() && NamingStyles[SK_AbstractClass]) return SK_AbstractClass; if (Decl->isStruct() && NamingStyles[SK_Struct]) return SK_Struct; if (Decl->isStruct() && NamingStyles[SK_Class]) return SK_Class; if (Decl->isClass() && NamingStyles[SK_Class]) return SK_Class; if (Decl->isClass() && NamingStyles[SK_Struct]) return SK_Struct; if (Decl->isUnion() && NamingStyles[SK_Union]) return SK_Union; if (Decl->isEnum() && NamingStyles[SK_Enum]) return SK_Enum; return SK_Invalid; } if (const auto *Decl = dyn_cast<FieldDecl>(D)) { QualType Type = Decl->getType(); if (!Type.isNull() && Type.isConstQualified()) { if (NamingStyles[SK_ConstantMember]) return SK_ConstantMember; if (NamingStyles[SK_Constant]) return SK_Constant; } if (Decl->getAccess() == AS_private && NamingStyles[SK_PrivateMember]) return SK_PrivateMember; if (Decl->getAccess() == AS_protected && NamingStyles[SK_ProtectedMember]) return SK_ProtectedMember; if (Decl->getAccess() == AS_public && NamingStyles[SK_PublicMember]) return SK_PublicMember; if (NamingStyles[SK_Member]) return SK_Member; return SK_Invalid; } if (const auto *Decl = dyn_cast<ParmVarDecl>(D)) { QualType Type = Decl->getType(); if (Decl->isConstexpr() && NamingStyles[SK_ConstexprVariable]) return SK_ConstexprVariable; if (!Type.isNull() && Type.isConstQualified()) { if (NamingStyles[SK_ConstantParameter]) return SK_ConstantParameter; if (NamingStyles[SK_Constant]) return SK_Constant; } if (Decl->isParameterPack() && NamingStyles[SK_ParameterPack]) return SK_ParameterPack; if (NamingStyles[SK_Parameter]) return SK_Parameter; return SK_Invalid; } if (const auto *Decl = dyn_cast<VarDecl>(D)) { QualType Type = Decl->getType(); if (Decl->isConstexpr() && NamingStyles[SK_ConstexprVariable]) return SK_ConstexprVariable; if (!Type.isNull() && Type.isConstQualified()) { if (Decl->isStaticDataMember() && NamingStyles[SK_ClassConstant]) return SK_ClassConstant; if (Decl->isFileVarDecl() && NamingStyles[SK_GlobalConstant]) return SK_GlobalConstant; if (Decl->isStaticLocal() && NamingStyles[SK_StaticConstant]) return SK_StaticConstant; if (Decl->isLocalVarDecl() && NamingStyles[SK_LocalConstant]) return SK_LocalConstant; if (Decl->isFunctionOrMethodVarDecl() && NamingStyles[SK_LocalConstant]) return SK_LocalConstant; if (NamingStyles[SK_Constant]) return SK_Constant; } if (Decl->isStaticDataMember() && NamingStyles[SK_ClassMember]) return SK_ClassMember; if (Decl->isFileVarDecl() && NamingStyles[SK_GlobalVariable]) return SK_GlobalVariable; if (Decl->isStaticLocal() && NamingStyles[SK_StaticVariable]) return SK_StaticVariable; if (Decl->isLocalVarDecl() && NamingStyles[SK_LocalVariable]) return SK_LocalVariable; if (Decl->isFunctionOrMethodVarDecl() && NamingStyles[SK_LocalVariable]) return SK_LocalVariable; if (NamingStyles[SK_Variable]) return SK_Variable; return SK_Invalid; } if (const auto *Decl = dyn_cast<CXXMethodDecl>(D)) { if (Decl->isMain() || !Decl->isUserProvided() || Decl->isUsualDeallocationFunction() || Decl->isCopyAssignmentOperator() || Decl->isMoveAssignmentOperator() || Decl->size_overridden_methods() > 0) return SK_Invalid; if (Decl->isConstexpr() && NamingStyles[SK_ConstexprMethod]) return SK_ConstexprMethod; if (Decl->isConstexpr() && NamingStyles[SK_ConstexprFunction]) return SK_ConstexprFunction; if (Decl->isStatic() && NamingStyles[SK_ClassMethod]) return SK_ClassMethod; if (Decl->isVirtual() && NamingStyles[SK_VirtualMethod]) return SK_VirtualMethod; if (Decl->getAccess() == AS_private && NamingStyles[SK_PrivateMethod]) return SK_PrivateMethod; if (Decl->getAccess() == AS_protected && NamingStyles[SK_ProtectedMethod]) return SK_ProtectedMethod; if (Decl->getAccess() == AS_public && NamingStyles[SK_PublicMethod]) return SK_PublicMethod; if (NamingStyles[SK_Method]) return SK_Method; if (NamingStyles[SK_Function]) return SK_Function; return SK_Invalid; } if (const auto *Decl = dyn_cast<FunctionDecl>(D)) { if (Decl->isMain()) return SK_Invalid; if (Decl->isConstexpr() && NamingStyles[SK_ConstexprFunction]) return SK_ConstexprFunction; if (Decl->isGlobal() && NamingStyles[SK_GlobalFunction]) return SK_GlobalFunction; if (NamingStyles[SK_Function]) return SK_Function; } if (isa<TemplateTypeParmDecl>(D)) { if (NamingStyles[SK_TypeTemplateParameter]) return SK_TypeTemplateParameter; if (NamingStyles[SK_TemplateParameter]) return SK_TemplateParameter; return SK_Invalid; } if (isa<NonTypeTemplateParmDecl>(D)) { if (NamingStyles[SK_ValueTemplateParameter]) return SK_ValueTemplateParameter; if (NamingStyles[SK_TemplateParameter]) return SK_TemplateParameter; return SK_Invalid; } if (isa<TemplateTemplateParmDecl>(D)) { if (NamingStyles[SK_TemplateTemplateParameter]) return SK_TemplateTemplateParameter; if (NamingStyles[SK_TemplateParameter]) return SK_TemplateParameter; return SK_Invalid; } return SK_Invalid; }
/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the /// pointer over the consumed characters. This returns the resultant type. static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context, Builtin::Context::GetBuiltinTypeError &Error, bool AllowTypeModifiers = true) { // Modifiers. int HowLong = 0; bool Signed = false, Unsigned = false; // Read the modifiers first. bool Done = false; while (!Done) { switch (*Str++) { default: Done = true; --Str; break; case 'S': assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); assert(!Signed && "Can't use 'S' modifier multiple times!"); Signed = true; break; case 'U': assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); assert(!Unsigned && "Can't use 'S' modifier multiple times!"); Unsigned = true; break; case 'L': assert(HowLong <= 2 && "Can't have LLLL modifier"); ++HowLong; break; } } QualType Type; // Read the base type. switch (*Str++) { default: assert(0 && "Unknown builtin type letter!"); case 'v': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'v'!"); Type = Context.VoidTy; break; case 'f': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'f'!"); Type = Context.FloatTy; break; case 'd': assert(HowLong < 2 && !Signed && !Unsigned && "Bad modifiers used with 'd'!"); if (HowLong) Type = Context.LongDoubleTy; else Type = Context.DoubleTy; break; case 's': assert(HowLong == 0 && "Bad modifiers used with 's'!"); if (Unsigned) Type = Context.UnsignedShortTy; else Type = Context.ShortTy; break; case 'i': if (HowLong == 3) Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; else if (HowLong == 2) Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; else if (HowLong == 1) Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; else Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; break; case 'c': assert(HowLong == 0 && "Bad modifiers used with 'c'!"); if (Signed) Type = Context.SignedCharTy; else if (Unsigned) Type = Context.UnsignedCharTy; else Type = Context.CharTy; break; case 'b': // boolean assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); Type = Context.BoolTy; break; case 'z': // size_t. assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); Type = Context.getSizeType(); break; case 'F': Type = Context.getCFConstantStringType(); break; case 'a': Type = Context.getBuiltinVaListType(); assert(!Type.isNull() && "builtin va list type not initialized!"); break; case 'A': // This is a "reference" to a va_list; however, what exactly // this means depends on how va_list is defined. There are two // different kinds of va_list: ones passed by value, and ones // passed by reference. An example of a by-value va_list is // x86, where va_list is a char*. An example of by-ref va_list // is x86-64, where va_list is a __va_list_tag[1]. For x86, // we want this argument to be a char*&; for x86-64, we want // it to be a __va_list_tag*. Type = Context.getBuiltinVaListType(); assert(!Type.isNull() && "builtin va list type not initialized!"); if (Type->isArrayType()) { Type = Context.getArrayDecayedType(Type); } else { Type = Context.getLValueReferenceType(Type); } break; case 'V': { char *End; unsigned NumElements = strtoul(Str, &End, 10); assert(End != Str && "Missing vector size"); Str = End; QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false); Type = Context.getVectorType(ElementType, NumElements); break; } case 'P': { IdentifierInfo *II = &Context.Idents.get("FILE"); DeclContext::lookup_result Lookup = Context.getTranslationUnitDecl()->lookup(Context, II); if (Lookup.first != Lookup.second && isa<TypeDecl>(*Lookup.first)) { Type = Context.getTypeDeclType(cast<TypeDecl>(*Lookup.first)); break; } else { Error = Builtin::Context::GE_Missing_FILE; return QualType(); } } } if (!AllowTypeModifiers) return Type; Done = false; while (!Done) { switch (*Str++) { default: Done = true; --Str; break; case '*': Type = Context.getPointerType(Type); break; case '&': Type = Context.getLValueReferenceType(Type); break; // FIXME: There's no way to have a built-in with an rvalue ref arg. case 'C': Type = Type.getQualifiedType(QualType::Const); break; } } return Type; }
void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { if (Policy.TerseOutput) return; if (Indent) Indentation += Policy.Indentation; SmallVector<Decl*, 2> Decls; for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end(); D != DEnd; ++D) { // Don't print ObjCIvarDecls, as they are printed when visiting the // containing ObjCInterfaceDecl. if (isa<ObjCIvarDecl>(*D)) continue; // Skip over implicit declarations in pretty-printing mode. if (D->isImplicit()) continue; // The next bits of code handles stuff like "struct {int x;} a,b"; we're // forced to merge the declarations because there's no other way to // refer to the struct in question. This limited merging is safe without // a bunch of other checks because it only merges declarations directly // referring to the tag, not typedefs. // // Check whether the current declaration should be grouped with a previous // unnamed struct. QualType CurDeclType = getDeclType(*D); if (!Decls.empty() && !CurDeclType.isNull()) { QualType BaseType = GetBaseType(CurDeclType); if (!BaseType.isNull() && isa<ElaboratedType>(BaseType)) BaseType = cast<ElaboratedType>(BaseType)->getNamedType(); if (!BaseType.isNull() && isa<TagType>(BaseType) && cast<TagType>(BaseType)->getDecl() == Decls[0]) { Decls.push_back(*D); continue; } } // If we have a merged group waiting to be handled, handle it now. if (!Decls.empty()) ProcessDeclGroup(Decls); // If the current declaration is an unnamed tag type, save it // so we can merge it with the subsequent declaration(s) using it. if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) { Decls.push_back(*D); continue; } if (isa<AccessSpecDecl>(*D)) { Indentation -= Policy.Indentation; this->Indent(); Print(D->getAccess()); Out << ":\n"; Indentation += Policy.Indentation; continue; } this->Indent(); Visit(*D); // FIXME: Need to be able to tell the DeclPrinter when const char *Terminator = nullptr; if (isa<OMPThreadPrivateDecl>(*D) || isa<OMPDeclareReductionDecl>(*D)) Terminator = nullptr; else if (isa<FunctionDecl>(*D) && cast<FunctionDecl>(*D)->isThisDeclarationADefinition()) Terminator = nullptr; else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody()) Terminator = nullptr; else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) || isa<ObjCImplementationDecl>(*D) || isa<ObjCInterfaceDecl>(*D) || isa<ObjCProtocolDecl>(*D) || isa<ObjCCategoryImplDecl>(*D) || isa<ObjCCategoryDecl>(*D)) Terminator = nullptr; else if (isa<EnumConstantDecl>(*D)) { DeclContext::decl_iterator Next = D; ++Next; if (Next != DEnd) Terminator = ","; } else Terminator = ";"; if (Terminator) Out << Terminator; Out << "\n"; // Declare target attribute is special one, natural spelling for the pragma // assumes "ending" construct so print it here. if (D->hasAttr<OMPDeclareTargetDeclAttr>()) Out << "#pragma omp end declare target\n"; } if (!Decls.empty()) ProcessDeclGroup(Decls); if (Indent) Indentation -= Policy.Indentation; }
static void checkAllAtProps(MigrationContext &MigrateCtx, SourceLocation AtLoc, IndivPropsTy &IndProps) { if (IndProps.empty()) return; for (IndivPropsTy::iterator PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) { QualType T = (*PI)->getType(); if (T.isNull() || !T->isObjCRetainableType()) return; } SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs; bool hasWeak = false, hasStrong = false; ObjCPropertyDecl::PropertyAttributeKind Attrs = ObjCPropertyDecl::OBJC_PR_noattr; for (IndivPropsTy::iterator PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) { ObjCPropertyDecl *PD = *PI; Attrs = PD->getPropertyAttributesAsWritten(); TypeSourceInfo *TInfo = PD->getTypeSourceInfo(); if (!TInfo) return; TypeLoc TL = TInfo->getTypeLoc(); if (AttributedTypeLoc ATL = TL.getAs<AttributedTypeLoc>()) { ATLs.push_back(std::make_pair(ATL, PD)); if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { hasWeak = true; } else if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Strong) hasStrong = true; else return; } } if (ATLs.empty()) return; if (hasWeak && hasStrong) return; TransformActions &TA = MigrateCtx.Pass.TA; Transaction Trans(TA); if (GCAttrsCollector::hasObjCImpl( cast<Decl>(IndProps.front()->getDeclContext()))) { if (hasWeak) MigrateCtx.AtPropsWeak.insert(AtLoc.getRawEncoding()); } else { StringRef toAttr = "strong"; if (hasWeak) { if (canApplyWeak(MigrateCtx.Pass.Ctx, IndProps.front()->getType(), /*AllowOnUnkwownClass=*/true)) toAttr = "weak"; else toAttr = "unsafe_unretained"; } if (Attrs & ObjCPropertyDecl::OBJC_PR_assign) MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc); else MigrateCtx.addPropertyAttribute(toAttr, AtLoc); } for (unsigned i = 0, e = ATLs.size(); i != e; ++i) { SourceLocation Loc = ATLs[i].first.getAttrNameLoc(); if (Loc.isMacroID()) Loc = MigrateCtx.Pass.Ctx.getSourceManager() .getImmediateExpansionRange(Loc).first; TA.remove(Loc); TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc); TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership, ATLs[i].second->getLocation()); MigrateCtx.RemovedAttrSet.insert(Loc.getRawEncoding()); } }
SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType, bool &Failed) { Failed = false; const MemRegion *MR = Base.getAsRegion(); if (!MR) return UnknownVal(); // Assume the derived class is a pointer or a reference to a CXX record. TargetType = TargetType->getPointeeType(); assert(!TargetType.isNull()); const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl(); if (!TargetClass && !TargetType->isVoidType()) return UnknownVal(); // Drill down the CXXBaseObject chains, which represent upcasts (casts from // derived to base). while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) { // If found the derived class, the cast succeeds. if (MRClass == TargetClass) return loc::MemRegionVal(MR); if (!TargetType->isVoidType()) { // Static upcasts are marked as DerivedToBase casts by Sema, so this will // only happen when multiple or virtual inheritance is involved. CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, /*DetectVirtual=*/false); if (MRClass->isDerivedFrom(TargetClass, Paths)) return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front()); } if (const CXXBaseObjectRegion *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) { // Drill down the chain to get the derived classes. MR = BaseR->getSuperRegion(); continue; } // If this is a cast to void*, return the region. if (TargetType->isVoidType()) return loc::MemRegionVal(MR); // Strange use of reinterpret_cast can give us paths we don't reason // about well, by putting in ElementRegions where we'd expect // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the // derived class has a zero offset from the base class), then it's safe // to strip the cast; if it's invalid, -Wreinterpret-base-class should // catch it. In the interest of performance, the analyzer will silently // do the wrong thing in the invalid case (because offsets for subregions // will be wrong). const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false); if (Uncasted == MR) { // We reached the bottom of the hierarchy and did not find the derived // class. We we must be casting the base to derived, so the cast should // fail. break; } MR = Uncasted; } // We failed if the region we ended up with has perfect type info. Failed = isa<TypedValueRegion>(MR); return UnknownVal(); }
/// \brief Convert the given type to a string suitable for printing as part of /// a diagnostic. /// /// There are four main criteria when determining whether we should have an /// a.k.a. clause when pretty-printing a type: /// /// 1) Some types provide very minimal sugar that doesn't impede the /// user's understanding --- for example, elaborated type /// specifiers. If this is all the sugar we see, we don't want an /// a.k.a. clause. /// 2) Some types are technically sugared but are much more familiar /// when seen in their sugared form --- for example, va_list, /// vector types, and the magic Objective C types. We don't /// want to desugar these, even if we do produce an a.k.a. clause. /// 3) Some types may have already been desugared previously in this diagnostic. /// if this is the case, doing another "aka" would just be clutter. /// 4) Two different types within the same diagnostic have the same output /// string. In this case, force an a.k.a with the desugared type when /// doing so will provide additional information. /// /// \param Context the context in which the type was allocated /// \param Ty the type to print /// \param QualTypeVals pointer values to QualTypes which are used in the /// diagnostic message static std::string ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, const DiagnosticsEngine::ArgumentValue *PrevArgs, unsigned NumPrevArgs, ArrayRef<intptr_t> QualTypeVals) { // FIXME: Playing with std::string is really slow. bool ForceAKA = false; QualType CanTy = Ty.getCanonicalType(); std::string S = Ty.getAsString(Context.getPrintingPolicy()); std::string CanS = CanTy.getAsString(Context.getPrintingPolicy()); for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) { QualType CompareTy = QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I])); if (CompareTy.isNull()) continue; if (CompareTy == Ty) continue; // Same types QualType CompareCanTy = CompareTy.getCanonicalType(); if (CompareCanTy == CanTy) continue; // Same canonical types std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy()); bool aka; QualType CompareDesugar = Desugar(Context, CompareTy, aka); std::string CompareDesugarStr = CompareDesugar.getAsString(Context.getPrintingPolicy()); if (CompareS != S && CompareDesugarStr != S) continue; // The type string is different than the comparison string // and the desugared comparison string. std::string CompareCanS = CompareCanTy.getAsString(Context.getPrintingPolicy()); if (CompareCanS == CanS) continue; // No new info from canonical type ForceAKA = true; break; } // Check to see if we already desugared this type in this // diagnostic. If so, don't do it again. bool Repeated = false; for (unsigned i = 0; i != NumPrevArgs; ++i) { // TODO: Handle ak_declcontext case. if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) { void *Ptr = (void*)PrevArgs[i].second; QualType PrevTy(QualType::getFromOpaquePtr(Ptr)); if (PrevTy == Ty) { Repeated = true; break; } } } // Consider producing an a.k.a. clause if removing all the direct // sugar gives us something "significantly different". if (!Repeated) { bool ShouldAKA = false; QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA); if (ShouldAKA || ForceAKA) { if (DesugaredTy == Ty) { DesugaredTy = Ty.getCanonicalType(); } std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy()); if (akaStr != S) { S = "'" + S + "' (aka '" + akaStr + "')"; return S; } } } S = "'" + S + "'"; return S; }
void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { if (Policy.TerseOutput) return; if (Indent) Indentation += Policy.Indentation; SmallVector<Decl*, 2> Decls; bool MergeOneDecl = false; for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end(); D != DEnd; ++D) { // Don't print ObjCIvarDecls, as they are printed when visiting the // containing ObjCInterfaceDecl. if (isa<ObjCIvarDecl>(*D)) continue; // Skip over implicit declarations in pretty-printing mode. if (D->isImplicit()) continue; // Don't print implicit specializations, as they are printed when visiting // corresponding templates. if (auto FD = dyn_cast<FunctionDecl>(*D)) if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation && !isa<ClassTemplateSpecializationDecl>(DC)) continue; // The next bits of code handles stuff like "struct {int x;} a,b"; we're // forced to merge the declarations because there's no other way to // refer to the struct in question. This limited merging is safe without // a bunch of other checks because it only merges declarations directly // referring to the tag, not typedefs. // // Check whether the current declaration should be grouped with a previous // unnamed struct. QualType CurDeclType = getDeclType(*D); if (!Decls.empty() && !CurDeclType.isNull() && (!MergeOneDecl || Decls.size() == 1)) { QualType BaseType = GetBaseType(CurDeclType); if (!BaseType.isNull() && isa<ElaboratedType>(BaseType)) BaseType = cast<ElaboratedType>(BaseType)->getNamedType(); if (!BaseType.isNull() && isa<TagType>(BaseType) && cast<TagType>(BaseType)->getDecl() == Decls[0]) { Decls.push_back(*D); continue; } } // If we have a merged group waiting to be handled, handle it now. if (!Decls.empty()) ProcessDeclGroup(Decls); // If the current declaration is an unnamed tag type, save it // so we can merge it with the subsequent declaration(s) using it. if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) { Decls.push_back(*D); MergeOneDecl = false; continue; } // Attempt to merge named tags too, but // only with a single decl. (This cleans // up warnings about unused declarations // when a struct is defined inline inside // another struct.) Only merge one variable // declaration, so we don't have to worry // about whether the storage class and/or // qualifiers match. if (isa<TagDecl>(*D)) { Decls.push_back(*D); MergeOneDecl = true; continue; } if (isa<AccessSpecDecl>(*D)) { Indentation -= Policy.Indentation; this->Indent(); Print(D->getAccess()); Out << ":\n"; Indentation += Policy.Indentation; continue; } this->Indent(); Visit(*D); // FIXME: Need to be able to tell the DeclPrinter when const char *Terminator = nullptr; if (isa<OMPThreadPrivateDecl>(*D) || isa<OMPDeclareReductionDecl>(*D) || isa<PragmaPupcDecl>(*D)) Terminator = nullptr; else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->hasBody()) Terminator = nullptr; else if (auto FD = dyn_cast<FunctionDecl>(*D)) { if (FD->isThisDeclarationADefinition()) Terminator = nullptr; else Terminator = ";"; } else if (auto TD = dyn_cast<FunctionTemplateDecl>(*D)) { if (TD->getTemplatedDecl()->isThisDeclarationADefinition()) Terminator = nullptr; else Terminator = ";"; } else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) || isa<ObjCImplementationDecl>(*D) || isa<ObjCInterfaceDecl>(*D) || isa<ObjCProtocolDecl>(*D) || isa<ObjCCategoryImplDecl>(*D) || isa<ObjCCategoryDecl>(*D)) Terminator = nullptr; else if (isa<EnumConstantDecl>(*D)) { DeclContext::decl_iterator Next = D; ++Next; if (Next != DEnd) Terminator = ","; } else Terminator = ";"; if (Terminator) Out << Terminator; if (!Policy.TerseOutput && ((isa<FunctionDecl>(*D) && cast<FunctionDecl>(*D)->doesThisDeclarationHaveABody()) || (isa<FunctionTemplateDecl>(*D) && cast<FunctionTemplateDecl>(*D)->getTemplatedDecl()->doesThisDeclarationHaveABody()))) ; // StmtPrinter already added '\n' after CompoundStmt. else Out << "\n"; // Declare target attribute is special one, natural spelling for the pragma // assumes "ending" construct so print it here. if (D->hasAttr<OMPDeclareTargetDeclAttr>()) Out << "#pragma omp end declare target\n"; } if (!Decls.empty()) ProcessDeclGroup(Decls); if (Indent) Indentation -= Policy.Indentation; }
bool Sema::containsUnexpandedParameterPacks(Declarator &D) { const DeclSpec &DS = D.getDeclSpec(); switch (DS.getTypeSpecType()) { case TST_typename: case TST_typeofType: case TST_underlyingType: case TST_atomic: { QualType T = DS.getRepAsType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; break; } case TST_typeofExpr: case TST_decltype: if (DS.getRepAsExpr() && DS.getRepAsExpr()->containsUnexpandedParameterPack()) return true; break; case TST_unspecified: case TST_void: case TST_char: case TST_wchar: case TST_char16: case TST_char32: case TST_int: case TST_int128: case TST_half: case TST_float: case TST_double: case TST_float128: case TST_bool: case TST_decimal32: case TST_decimal64: case TST_decimal128: case TST_enum: case TST_union: case TST_struct: case TST_interface: case TST_class: case TST_auto: case TST_auto_type: case TST_decltype_auto: #define GENERIC_IMAGE_TYPE(ImgType, Id) case TST_##ImgType##_t: #include "clang/Basic/OpenCLImageTypes.def" case TST_unknown_anytype: case TST_error: break; } for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) { const DeclaratorChunk &Chunk = D.getTypeObject(I); switch (Chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Paren: case DeclaratorChunk::Pipe: case DeclaratorChunk::BlockPointer: // These declarator chunks cannot contain any parameter packs. break; case DeclaratorChunk::Array: if (Chunk.Arr.NumElts && Chunk.Arr.NumElts->containsUnexpandedParameterPack()) return true; break; case DeclaratorChunk::Function: for (unsigned i = 0, e = Chunk.Fun.NumParams; i != e; ++i) { ParmVarDecl *Param = cast<ParmVarDecl>(Chunk.Fun.Params[i].Param); QualType ParamTy = Param->getType(); assert(!ParamTy.isNull() && "Couldn't parse type?"); if (ParamTy->containsUnexpandedParameterPack()) return true; } if (Chunk.Fun.getExceptionSpecType() == EST_Dynamic) { for (unsigned i = 0; i != Chunk.Fun.getNumExceptions(); ++i) { if (Chunk.Fun.Exceptions[i] .Ty.get() ->containsUnexpandedParameterPack()) return true; } } else if (Chunk.Fun.getExceptionSpecType() == EST_ComputedNoexcept && Chunk.Fun.NoexceptExpr->containsUnexpandedParameterPack()) return true; if (Chunk.Fun.hasTrailingReturnType()) { QualType T = Chunk.Fun.getTrailingReturnType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; } break; case DeclaratorChunk::MemberPointer: if (Chunk.Mem.Scope().getScopeRep() && Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack()) return true; break; } } return false; }
/// \brief Returns the size of the type source info data block. unsigned TypeLoc::getFullDataSizeForType(QualType Ty) { if (Ty.isNull()) return 0; return TypeSizer().Visit(TypeLoc(Ty, 0)); }
ProgramStateRef GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE, CheckerContext &C) const { ProgramStateRef State = C.getState(); // Check for taint in arguments. bool IsTainted = false; for (ArgVector::const_iterator I = SrcArgs.begin(), E = SrcArgs.end(); I != E; ++I) { unsigned ArgNum = *I; if (ArgNum == InvalidArgIndex) { // Check if any of the arguments is tainted, but skip the // destination arguments. for (unsigned int i = 0; i < CE->getNumArgs(); ++i) { if (isDestinationArgument(i)) continue; if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C))) break; } break; } if (CE->getNumArgs() < (ArgNum + 1)) return State; if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C))) break; } if (!IsTainted) return State; // Mark the arguments which should be tainted after the function returns. for (ArgVector::const_iterator I = DstArgs.begin(), E = DstArgs.end(); I != E; ++I) { unsigned ArgNum = *I; // Should we mark all arguments as tainted? if (ArgNum == InvalidArgIndex) { // For all pointer and references that were passed in: // If they are not pointing to const data, mark data as tainted. // TODO: So far we are just going one level down; ideally we'd need to // recurse here. for (unsigned int i = 0; i < CE->getNumArgs(); ++i) { const Expr *Arg = CE->getArg(i); // Process pointer argument. const Type *ArgTy = Arg->getType().getTypePtr(); QualType PType = ArgTy->getPointeeType(); if ((!PType.isNull() && !PType.isConstQualified()) || (ArgTy->isReferenceType() && !Arg->getType().isConstQualified())) State = State->add<TaintArgsOnPostVisit>(i); } continue; } // Should mark the return value? if (ArgNum == ReturnValueIndex) { State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex); continue; } // Mark the given argument. assert(ArgNum < CE->getNumArgs()); State = State->add<TaintArgsOnPostVisit>(ArgNum); } return State; }
bool Sema::containsUnexpandedParameterPacks(Declarator &D) { const DeclSpec &DS = D.getDeclSpec(); switch (DS.getTypeSpecType()) { case TST_typename: case TST_typeofType: case TST_underlyingType: case TST_atomic: { QualType T = DS.getRepAsType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; break; } case TST_typeofExpr: case TST_decltype: if (DS.getRepAsExpr() && DS.getRepAsExpr()->containsUnexpandedParameterPack()) return true; break; case TST_unspecified: case TST_void: case TST_char: case TST_wchar: case TST_char16: case TST_char32: case TST_int: case TST_int128: case TST_half: case TST_float: case TST_double: case TST_bool: case TST_decimal32: case TST_decimal64: case TST_decimal128: case TST_enum: case TST_union: case TST_struct: case TST_interface: case TST_class: case TST_auto: case TST_decltype_auto: case TST_unknown_anytype: case TST_error: break; } for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) { const DeclaratorChunk &Chunk = D.getTypeObject(I); switch (Chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Paren: // These declarator chunks cannot contain any parameter packs. break; case DeclaratorChunk::Array: case DeclaratorChunk::Function: case DeclaratorChunk::BlockPointer: // Syntactically, these kinds of declarator chunks all come after the // declarator-id (conceptually), so the parser should not invoke this // routine at this time. llvm_unreachable("Could not have seen this kind of declarator chunk"); case DeclaratorChunk::MemberPointer: if (Chunk.Mem.Scope().getScopeRep() && Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack()) return true; break; } } return false; }
void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope) { // Determine if we're within a context where we know that the lambda will // be dependent, because there are template parameters in scope. bool KnownDependent = false; if (Scope *TmplScope = CurScope->getTemplateParamParent()) if (!TmplScope->decl_empty()) KnownDependent = true; // Determine the signature of the call operator. TypeSourceInfo *MethodTyInfo; bool ExplicitParams = true; bool ExplicitResultType = true; bool ContainsUnexpandedParameterPack = false; SourceLocation EndLoc; SmallVector<ParmVarDecl *, 8> Params; if (ParamInfo.getNumTypeObjects() == 0) { // C++11 [expr.prim.lambda]p4: // If a lambda-expression does not include a lambda-declarator, it is as // if the lambda-declarator were (). FunctionProtoType::ExtProtoInfo EPI; EPI.HasTrailingReturn = true; EPI.TypeQuals |= DeclSpec::TQ_const; QualType MethodTy = Context.getFunctionType(Context.DependentTy, None, EPI); MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy); ExplicitParams = false; ExplicitResultType = false; EndLoc = Intro.Range.getEnd(); } else { assert(ParamInfo.isFunctionDeclarator() && "lambda-declarator is a function"); DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo(); // C++11 [expr.prim.lambda]p5: // This function call operator is declared const (9.3.1) if and only if // the lambda-expression's parameter-declaration-clause is not followed // by mutable. It is neither virtual nor declared volatile. [...] if (!FTI.hasMutableQualifier()) FTI.TypeQuals |= DeclSpec::TQ_const; MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope); assert(MethodTyInfo && "no type from lambda-declarator"); EndLoc = ParamInfo.getSourceRange().getEnd(); ExplicitResultType = MethodTyInfo->getType()->getAs<FunctionType>()->getResultType() != Context.DependentTy; if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 && cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType()) { // Empty arg list, don't push any params. checkVoidParamDecl(cast<ParmVarDecl>(FTI.ArgInfo[0].Param)); } else { Params.reserve(FTI.NumArgs); for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) Params.push_back(cast<ParmVarDecl>(FTI.ArgInfo[i].Param)); } // Check for unexpanded parameter packs in the method type. if (MethodTyInfo->getType()->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; } CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo, KnownDependent); CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params); if (ExplicitParams) CheckCXXDefaultArguments(Method); // Attributes on the lambda apply to the method. ProcessDeclAttributes(CurScope, Method, ParamInfo); // Introduce the function call operator as the current declaration context. PushDeclContext(CurScope, Method); // Introduce the lambda scope. LambdaScopeInfo *LSI = enterLambdaScope(Method, Intro.Range, Intro.Default, ExplicitParams, ExplicitResultType, !Method->isConst()); // Handle explicit captures. SourceLocation PrevCaptureLoc = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc; for (SmallVector<LambdaCapture, 4>::const_iterator C = Intro.Captures.begin(), E = Intro.Captures.end(); C != E; PrevCaptureLoc = C->Loc, ++C) { if (C->Kind == LCK_This) { // C++11 [expr.prim.lambda]p8: // An identifier or this shall not appear more than once in a // lambda-capture. if (LSI->isCXXThisCaptured()) { Diag(C->Loc, diag::err_capture_more_than_once) << "'this'" << SourceRange(LSI->getCXXThisCapture().getLocation()) << FixItHint::CreateRemoval( SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p8: // If a lambda-capture includes a capture-default that is =, the // lambda-capture shall not contain this [...]. if (Intro.Default == LCD_ByCopy) { Diag(C->Loc, diag::err_this_capture_with_copy_default) << FixItHint::CreateRemoval( SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p12: // If this is captured by a local lambda expression, its nearest // enclosing function shall be a non-static member function. QualType ThisCaptureType = getCurrentThisType(); if (ThisCaptureType.isNull()) { Diag(C->Loc, diag::err_this_capture) << true; continue; } CheckCXXThisCapture(C->Loc, /*Explicit=*/true); continue; } // FIXME: C++1y [expr.prim.lambda]p11 if (C->Init.isInvalid()) continue; if (C->Init.isUsable()) { Diag(C->Loc, diag::err_lambda_init_capture_unsupported); continue; } assert(C->Id && "missing identifier for capture"); // C++11 [expr.prim.lambda]p8: // If a lambda-capture includes a capture-default that is &, the // identifiers in the lambda-capture shall not be preceded by &. // If a lambda-capture includes a capture-default that is =, [...] // each identifier it contains shall be preceded by &. if (C->Kind == LCK_ByRef && Intro.Default == LCD_ByRef) { Diag(C->Loc, diag::err_reference_capture_with_reference_default) << FixItHint::CreateRemoval( SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } else if (C->Kind == LCK_ByCopy && Intro.Default == LCD_ByCopy) { Diag(C->Loc, diag::err_copy_capture_with_copy_default) << FixItHint::CreateRemoval( SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } DeclarationNameInfo Name(C->Id, C->Loc); LookupResult R(*this, Name, LookupOrdinaryName); LookupName(R, CurScope); if (R.isAmbiguous()) continue; if (R.empty()) { // FIXME: Disable corrections that would add qualification? CXXScopeSpec ScopeSpec; DeclFilterCCC<VarDecl> Validator; if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, Validator)) continue; } // C++11 [expr.prim.lambda]p10: // The identifiers in a capture-list are looked up using the usual rules // for unqualified name lookup (3.4.1); each such lookup shall find a // variable with automatic storage duration declared in the reaching // scope of the local lambda expression. // // Note that the 'reaching scope' check happens in tryCaptureVariable(). VarDecl *Var = R.getAsSingle<VarDecl>(); if (!Var) { Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id; continue; } // Ignore invalid decls; they'll just confuse the code later. if (Var->isInvalidDecl()) continue; if (!Var->hasLocalStorage()) { Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id; Diag(Var->getLocation(), diag::note_previous_decl) << C->Id; continue; } // C++11 [expr.prim.lambda]p8: // An identifier or this shall not appear more than once in a // lambda-capture. if (LSI->isCaptured(Var)) { Diag(C->Loc, diag::err_capture_more_than_once) << C->Id << SourceRange(LSI->getCapture(Var).getLocation()) << FixItHint::CreateRemoval( SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p23: // A capture followed by an ellipsis is a pack expansion (14.5.3). SourceLocation EllipsisLoc; if (C->EllipsisLoc.isValid()) { if (Var->isParameterPack()) { EllipsisLoc = C->EllipsisLoc; } else { Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << SourceRange(C->Loc); // Just ignore the ellipsis. } } else if (Var->isParameterPack()) { ContainsUnexpandedParameterPack = true; } TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef : TryCapture_ExplicitByVal; tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc); } finishLambdaExplicitCaptures(LSI); LSI->ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack; // Add lambda parameters into scope. addLambdaParameters(Method, CurScope); // Enter a new evaluation context to insulate the lambda from any // cleanups from the enclosing full-expression. PushExpressionEvaluationContext(PotentiallyEvaluated); }
RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const { const ObjCMessageExpr *E = getOriginExpr(); assert(E); Selector Sel = E->getSelector(); if (E->isInstanceMessage()) { // Find the receiver type. const ObjCObjectPointerType *ReceiverT = nullptr; bool CanBeSubClassed = false; QualType SupersType = E->getSuperType(); const MemRegion *Receiver = nullptr; if (!SupersType.isNull()) { // Super always means the type of immediate predecessor to the method // where the call occurs. ReceiverT = cast<ObjCObjectPointerType>(SupersType); } else { Receiver = getReceiverSVal().getAsRegion(); if (!Receiver) return RuntimeDefinition(); DynamicTypeInfo DTI = getDynamicTypeInfo(getState(), Receiver); QualType DynType = DTI.getType(); CanBeSubClassed = DTI.canBeASubClass(); ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType); if (ReceiverT && CanBeSubClassed) if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) if (!canBeOverridenInSubclass(IDecl, Sel)) CanBeSubClassed = false; } // Lookup the method implementation. if (ReceiverT) if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) { // Repeatedly calling lookupPrivateMethod() is expensive, especially // when in many cases it returns null. We cache the results so // that repeated queries on the same ObjCIntefaceDecl and Selector // don't incur the same cost. On some test cases, we can see the // same query being issued thousands of times. // // NOTE: This cache is essentially a "global" variable, but it // only gets lazily created when we get here. The value of the // cache probably comes from it being global across ExprEngines, // where the same queries may get issued. If we are worried about // concurrency, or possibly loading/unloading ASTs, etc., we may // need to revisit this someday. In terms of memory, this table // stays around until clang quits, which also may be bad if we // need to release memory. typedef std::pair<const ObjCInterfaceDecl*, Selector> PrivateMethodKey; typedef llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *> > PrivateMethodCache; static PrivateMethodCache PMC; Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)]; // Query lookupPrivateMethod() if the cache does not hit. if (!Val.hasValue()) { Val = IDecl->lookupPrivateMethod(Sel); // If the method is a property accessor, we should try to "inline" it // even if we don't actually have an implementation. if (!*Val) if (const ObjCMethodDecl *CompileTimeMD = E->getMethodDecl()) if (CompileTimeMD->isPropertyAccessor()) Val = IDecl->lookupInstanceMethod(Sel); } const ObjCMethodDecl *MD = Val.getValue(); if (CanBeSubClassed) return RuntimeDefinition(MD, Receiver); else return RuntimeDefinition(MD, nullptr); } } else { // This is a class method. // If we have type info for the receiver class, we are calling via // class name. if (ObjCInterfaceDecl *IDecl = E->getReceiverInterface()) { // Find/Return the method implementation. return RuntimeDefinition(IDecl->lookupPrivateClassMethod(Sel)); } } return RuntimeDefinition(); }
/// \brief Returns the alignment of the type source info data block. unsigned TypeLoc::getLocalAlignmentForType(QualType Ty) { if (Ty.isNull()) return 1; return TypeAligner().Visit(TypeLoc(Ty, nullptr)); }
/// Look up the std::coroutine_traits<...>::promise_type for the given /// function type. static QualType lookupPromiseType(Sema &S, const FunctionProtoType *FnType, SourceLocation KwLoc, SourceLocation FuncLoc) { // FIXME: Cache std::coroutine_traits once we've found it. NamespaceDecl *StdExp = S.lookupStdExperimentalNamespace(); if (!StdExp) { S.Diag(KwLoc, diag::err_implied_coroutine_type_not_found) << "std::experimental::coroutine_traits"; return QualType(); } LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_traits"), FuncLoc, Sema::LookupOrdinaryName); if (!S.LookupQualifiedName(Result, StdExp)) { S.Diag(KwLoc, diag::err_implied_coroutine_type_not_found) << "std::experimental::coroutine_traits"; return QualType(); } ClassTemplateDecl *CoroTraits = Result.getAsSingle<ClassTemplateDecl>(); if (!CoroTraits) { Result.suppressDiagnostics(); // We found something weird. Complain about the first thing we found. NamedDecl *Found = *Result.begin(); S.Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits); return QualType(); } // Form template argument list for coroutine_traits<R, P1, P2, ...>. TemplateArgumentListInfo Args(KwLoc, KwLoc); Args.addArgument(TemplateArgumentLoc( TemplateArgument(FnType->getReturnType()), S.Context.getTrivialTypeSourceInfo(FnType->getReturnType(), KwLoc))); // FIXME: If the function is a non-static member function, add the type // of the implicit object parameter before the formal parameters. for (QualType T : FnType->getParamTypes()) Args.addArgument(TemplateArgumentLoc( TemplateArgument(T), S.Context.getTrivialTypeSourceInfo(T, KwLoc))); // Build the template-id. QualType CoroTrait = S.CheckTemplateIdType(TemplateName(CoroTraits), KwLoc, Args); if (CoroTrait.isNull()) return QualType(); if (S.RequireCompleteType(KwLoc, CoroTrait, diag::err_coroutine_type_missing_specialization)) return QualType(); auto *RD = CoroTrait->getAsCXXRecordDecl(); assert(RD && "specialization of class template is not a class?"); // Look up the ::promise_type member. LookupResult R(S, &S.PP.getIdentifierTable().get("promise_type"), KwLoc, Sema::LookupOrdinaryName); S.LookupQualifiedName(R, RD); auto *Promise = R.getAsSingle<TypeDecl>(); if (!Promise) { S.Diag(FuncLoc, diag::err_implied_std_coroutine_traits_promise_type_not_found) << RD; return QualType(); } // The promise type is required to be a class type. QualType PromiseType = S.Context.getTypeDeclType(Promise); auto buildElaboratedType = [&]() { auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, StdExp); NNS = NestedNameSpecifier::Create(S.Context, NNS, false, CoroTrait.getTypePtr()); return S.Context.getElaboratedType(ETK_None, NNS, PromiseType); }; if (!PromiseType->getAsCXXRecordDecl()) { S.Diag(FuncLoc, diag::err_implied_std_coroutine_traits_promise_type_not_class) << buildElaboratedType(); return QualType(); } if (S.RequireCompleteType(FuncLoc, buildElaboratedType(), diag::err_coroutine_promise_type_incomplete)) return QualType(); return PromiseType; }
ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **strings, unsigned NumStrings) { StringLiteral **Strings = reinterpret_cast<StringLiteral**>(strings); // Most ObjC strings are formed out of a single piece. However, we *can* // have strings formed out of multiple @ strings with multiple pptokens in // each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one // StringLiteral for ObjCStringLiteral to hold onto. StringLiteral *S = Strings[0]; // If we have a multi-part string, merge it all together. if (NumStrings != 1) { // Concatenate objc strings. llvm::SmallString<128> StrBuf; llvm::SmallVector<SourceLocation, 8> StrLocs; for (unsigned i = 0; i != NumStrings; ++i) { S = Strings[i]; // ObjC strings can't be wide. if (S->isWide()) { Diag(S->getLocStart(), diag::err_cfstring_literal_not_string_constant) << S->getSourceRange(); return true; } // Append the string. StrBuf += S->getString(); // Get the locations of the string tokens. StrLocs.append(S->tokloc_begin(), S->tokloc_end()); } // Create the aggregate string with the appropriate content and location // information. S = StringLiteral::Create(Context, &StrBuf[0], StrBuf.size(), false, Context.getPointerType(Context.CharTy), &StrLocs[0], StrLocs.size()); } // Verify that this composite string is acceptable for ObjC strings. if (CheckObjCString(S)) return true; // Initialize the constant string interface lazily. This assumes // the NSString interface is seen in this translation unit. Note: We // don't use NSConstantString, since the runtime team considers this // interface private (even though it appears in the header files). QualType Ty = Context.getObjCConstantStringInterface(); if (!Ty.isNull()) { Ty = Context.getObjCObjectPointerType(Ty); } else if (getLangOptions().NoConstantCFStrings) { IdentifierInfo *NSIdent=0; std::string StringClass(getLangOptions().ObjCConstantStringClass); if (StringClass.empty()) NSIdent = &Context.Idents.get("NSConstantString"); else NSIdent = &Context.Idents.get(StringClass); NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLocs[0], LookupOrdinaryName); if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) { Context.setObjCConstantStringInterface(StrIF); Ty = Context.getObjCConstantStringInterface(); Ty = Context.getObjCObjectPointerType(Ty); } else { // If there is no NSConstantString interface defined then treat this // as error and recover from it. Diag(S->getLocStart(), diag::err_no_nsconstant_string_class) << NSIdent << S->getSourceRange(); Ty = Context.getObjCIdType(); } } else { IdentifierInfo *NSIdent = &Context.Idents.get("NSString"); NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLocs[0], LookupOrdinaryName); if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) { Context.setObjCConstantStringInterface(StrIF); Ty = Context.getObjCConstantStringInterface(); Ty = Context.getObjCObjectPointerType(Ty); } else { // If there is no NSString interface defined then treat constant // strings as untyped objects and let the runtime figure it out later. Ty = Context.getObjCIdType(); } } return new (Context) ObjCStringLiteral(S, Ty, AtLocs[0]); }
bool Sema::ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext) { if (SS.isInvalid()) return true; // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); translateTemplateArguments(TemplateArgsIn, TemplateArgs); DependentTemplateName *DTN = Template.get().getAsDependentTemplateName(); if (DTN && DTN->isIdentifier()) { // Handle a dependent template specialization for which we cannot resolve // the template name. assert(DTN->getQualifier() == SS.getScopeRep()); QualType T = Context.getDependentTemplateSpecializationType(ETK_None, DTN->getQualifier(), DTN->getIdentifier(), TemplateArgs); // Create source-location information for this type. TypeLocBuilder Builder; DependentTemplateSpecializationTypeLoc SpecTL = Builder.push<DependentTemplateSpecializationTypeLoc>(T); SpecTL.setElaboratedKeywordLoc(SourceLocation()); SpecTL.setQualifierLoc(SS.getWithLocInContext(Context)); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateNameLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T), CCLoc); return false; } TemplateDecl *TD = Template.get().getAsTemplateDecl(); if (Template.get().getAsOverloadedTemplate() || DTN || isa<FunctionTemplateDecl>(TD) || isa<VarTemplateDecl>(TD)) { SourceRange R(TemplateNameLoc, RAngleLoc); if (SS.getRange().isValid()) R.setBegin(SS.getRange().getBegin()); Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier) << (TD && isa<VarTemplateDecl>(TD)) << Template.get() << R; NoteAllFoundTemplates(Template.get()); return true; } // We were able to resolve the template name to an actual template. // Build an appropriate nested-name-specifier. QualType T = CheckTemplateIdType(Template.get(), TemplateNameLoc, TemplateArgs); if (T.isNull()) return true; // Alias template specializations can produce types which are not valid // nested name specifiers. if (!T->isDependentType() && !T->getAs<TagType>()) { Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T; NoteAllFoundTemplates(Template.get()); return true; } // Provide source-location information for the template specialization type. TypeLocBuilder Builder; TemplateSpecializationTypeLoc SpecTL = Builder.push<TemplateSpecializationTypeLoc>(T); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateNameLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T), CCLoc); return false; }
/// getOrCreateType - Get the type from the cache or create a new /// one if necessary. llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DICompileUnit Unit) { if (Ty.isNull()) return llvm::DIType(); // Check to see if the compile unit already has created this type. llvm::DIType &Slot = TypeCache[Ty.getAsOpaquePtr()]; if (!Slot.isNull()) return Slot; // Handle CVR qualifiers, which recursively handles what they refer to. if (Ty.getCVRQualifiers()) return Slot = CreateCVRType(Ty, Unit); // Work out details of type. switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" assert(false && "Dependent types cannot show up in debug information"); case Type::Complex: case Type::LValueReference: case Type::RValueReference: case Type::Vector: case Type::ExtVector: case Type::ExtQual: case Type::ObjCQualifiedInterface: case Type::ObjCQualifiedId: case Type::FixedWidthInt: case Type::BlockPointer: case Type::MemberPointer: case Type::TemplateSpecialization: case Type::QualifiedName: case Type::ObjCQualifiedClass: // Unsupported types return llvm::DIType(); case Type::ObjCInterface: Slot = CreateType(cast<ObjCInterfaceType>(Ty), Unit); break; case Type::Builtin: Slot = CreateType(cast<BuiltinType>(Ty), Unit); break; case Type::Pointer: Slot = CreateType(cast<PointerType>(Ty), Unit); break; case Type::Typedef: Slot = CreateType(cast<TypedefType>(Ty), Unit); break; case Type::Record: case Type::Enum: Slot = CreateType(cast<TagType>(Ty), Unit); break; case Type::FunctionProto: case Type::FunctionNoProto: return Slot = CreateType(cast<FunctionType>(Ty), Unit); case Type::ConstantArray: case Type::VariableArray: case Type::IncompleteArray: return Slot = CreateType(cast<ArrayType>(Ty), Unit); case Type::TypeOfExpr: return Slot = getOrCreateType(cast<TypeOfExprType>(Ty)->getUnderlyingExpr() ->getType(), Unit); case Type::TypeOf: return Slot = getOrCreateType(cast<TypeOfType>(Ty)->getUnderlyingType(), Unit); } return Slot; }