/// Adjusts a return value when the called function's return type does not /// match the caller's expression type. This can happen when a dynamic call /// is devirtualized, and the overridding method has a covariant (more specific) /// return type than the parent's method. For C++ objects, this means we need /// to add base casts. static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, StoreManager &StoreMgr) { // For now, the only adjustments we handle apply only to locations. if (!V.getAs<Loc>()) return V; // If the types already match, don't do any unnecessary work. ExpectedTy = ExpectedTy.getCanonicalType(); ActualTy = ActualTy.getCanonicalType(); if (ExpectedTy == ActualTy) return V; // No adjustment is needed between Objective-C pointer types. if (ExpectedTy->isObjCObjectPointerType() && ActualTy->isObjCObjectPointerType()) return V; // C++ object pointers may need "derived-to-base" casts. const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); if (ExpectedClass && ActualClass) { CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/false); if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { return StoreMgr.evalDerivedToBase(V, Paths.front()); } } // Unfortunately, Objective-C does not enforce that overridden methods have // covariant return types, so we can't assert that that never happens. // Be safe and return UnknownVal(). return UnknownVal(); }
bool CodeGenTypes::isHighInt(QualType Ty) { if(const EnumType* et = dyn_cast<EnumType>(Ty.getCanonicalType())) { Ty = et->getDecl()->getIntegerType(); } if(const BuiltinType* bt = dyn_cast<BuiltinType>(Ty.getCanonicalType())) return bt->isHighInt(); return false; }
QualType TypeResolver::resolveCanonical(QualType Q) const { if (Q->hasCanonicalType()) return Q.getCanonicalType(); const Type* T = Q.getTypePtr(); switch (Q->getTypeClass()) { case TC_BUILTIN: return Q; case TC_POINTER: { const PointerType* P = cast<PointerType>(T); QualType t1 = P->getPointeeType(); // Pointee will always be in same TypeContext (file), since it's either built-in or UnresolvedType QualType t2 = resolveCanonical(t1); assert(t2.isValid()); if (t1 == t2) { Q->setCanonicalType(Q); return Q; } else { // TODO qualifiers QualType Canon = typeContext.getPointerType(t2); if (!Canon->hasCanonicalType()) Canon->setCanonicalType(Canon); Q->setCanonicalType(Canon); return Canon; } } case TC_ARRAY: { const ArrayType* A = cast<ArrayType>(T); QualType t1 = A->getElementType(); // NOTE: qualifiers are lost here! QualType t2 = resolveCanonical(t1); if (t1 == t2) { Q->setCanonicalType(Q); return Q; } else { // NOTE: need size Expr, but set ownership to none QualType Canon = typeContext.getArrayType(t2, A->getSizeExpr(), false, A->isIncremental()); if (!Canon->hasCanonicalType()) Canon->setCanonicalType(Canon); Q->setCanonicalType(Canon); return Canon; } } case TC_UNRESOLVED: assert(0 && "should not get here"); return QualType(); case TC_ALIAS: case TC_STRUCT: case TC_ENUM: case TC_FUNCTION: return Q.getCanonicalType(); case TC_MODULE: assert(0 && "TBD"); return Q; } assert(0); }
bool Sema::RequireCompleteTypeRoger(QualType T, RogerRequireCompleteReason RogerOnlyInheritance) { if (!T->isIncompleteType()) { return false; } if (T.getCanonicalType()->getTypeClass() != Type::Record) { return false; } RecordDecl *Rec = cast<RecordType>(T.getCanonicalType())->getDecl(); return RequireCompleteRecordRoger(Rec, RogerOnlyInheritance); }
bool ExprTypeAnalyser::checkExplicitCast(const ExplicitCastExpr* expr, QualType DestType, QualType SrcType) { // C99 6.5.4p2: the cast type needs to be void or scalar and the expression if (!DestType.isScalarType()) { // Dont allow any cast to non-scalar StringBuilder buf1(MAX_LEN_TYPENAME); StringBuilder buf2(MAX_LEN_TYPENAME); DestType.DiagName(buf1); SrcType.DiagName(buf2); Diags.Report(expr->getLocation(), diag::err_typecheck_cond_expect_scalar) << buf1 << buf2 << expr->getSourceRange(); return false; } // If either type is a pointer, the other type has to be either an // integer or a pointer // TODO decide if Enums are arithmatic types or not (they are in C99, not is C++0x) if (DestType.isPointerType()) { if (SrcType.isPointerType()) { // allow all pointer casts return true; } else { // only allow cast to pointer from uint32/64 (pointer size) const BuiltinType* BT = dyncast<BuiltinType>(SrcType.getCanonicalType()); // TODO use TargetInfo to check if 32-bit if (BT && BT->getKind() == BuiltinType::UInt64) return true; QualType expected = Type::UInt64(); StringBuilder buf1(MAX_LEN_TYPENAME); expected.DiagName(buf1); Diags.Report(expr->getLocation(), diag::err_cast_nonword_to_pointer) << buf1; } } else { if (SrcType.isPointerType()) { // only allow cast to uint32/64 (pointer size) const BuiltinType* BT = dyncast<BuiltinType>(DestType.getCanonicalType()); // TODO use TargetInfo to check if 32-bit if (BT && BT->getKind() == BuiltinType::UInt64) return true; QualType expected = Type::UInt64(); StringBuilder buf1(MAX_LEN_TYPENAME); expected.DiagName(buf1); Diags.Report(expr->getLocation(), diag::err_cast_pointer_to_nonword) << buf1; } else { // check non-pointer to non-pointer type // TODO make this top level function? (switch on src-type) return checkNonPointerCast(expr, DestType, SrcType); } } return false; }
QualType TypeFinder::LargestType(const Expr* Left, const Expr* Right) { QualType TL = findType(Left); QualType TR = findType(Right); // TODO cleanup QualType Lcanon = TL.getCanonicalType(); QualType Rcanon = TR.getCanonicalType(); assert(Lcanon.isBuiltinType()); assert(Rcanon.isBuiltinType()); const BuiltinType* Lbi = cast<BuiltinType>(Lcanon); const BuiltinType* Rbi = cast<BuiltinType>(Rcanon); if (Lbi->getWidth() > Rbi->getWidth()) { return TL; } return TR; }
/// CastRetrievedVal - Used by subclasses of StoreManager to implement /// implicit casts that arise from loads from regions that are reinterpreted /// as another region. SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R, QualType castTy) { if (castTy.isNull() || V.isUnknownOrUndef()) return V; // The dispatchCast() call below would convert the int into a float. // What we want, however, is a bit-by-bit reinterpretation of the int // as a float, which usually yields nothing garbage. For now skip casts // from ints to floats. // TODO: What other combinations of types are affected? if (castTy->isFloatingType()) { SymbolRef Sym = V.getAsSymbol(); if (Sym && !Sym->getType()->isFloatingType()) return UnknownVal(); } // When retrieving symbolic pointer and expecting a non-void pointer, // wrap them into element regions of the expected type if necessary. // SValBuilder::dispatchCast() doesn't do that, but it is necessary to // make sure that the retrieved value makes sense, because there's no other // cast in the AST that would tell us to cast it to the correct pointer type. // We might need to do that for non-void pointers as well. // FIXME: We really need a single good function to perform casts for us // correctly every time we need it. if (castTy->isPointerType() && !castTy->isVoidPointerType()) if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(V.getAsRegion())) if (SR->getSymbol()->getType().getCanonicalType() != castTy.getCanonicalType()) return loc::MemRegionVal(castRegion(SR, castTy)); return svalBuilder.dispatchCast(V, castTy); }
unsigned CodeGenModule::getAlignment(QualType Q) const { const Type* T = Q.getCanonicalType(); switch (T->getTypeClass()) { case TC_BUILTIN: return cast<BuiltinType>(T)->getAlignment(); case TC_POINTER: return 4; // TEMP case TC_ARRAY: return getAlignment(cast<ArrayType>(T)->getElementType()); case TC_UNRESOLVED: case TC_ALIAS: assert(0); break; case TC_STRUCT: // TEMP, for now always align on 4 return 4; case TC_ENUM: assert(0); break; case TC_FUNCTION: assert(0 && "TODO"); break; case TC_MODULE: assert(0); break; } return 0; }
bool ExprTypeAnalyser::checkCompatible(QualType left, const Expr* expr) const { QualType right = expr->getType(); //right = TypeFinder::findType(expr); assert(left.isValid()); const Type* canon = left.getCanonicalType(); assert(canon); switch (canon->getTypeClass()) { case TC_BUILTIN: return checkBuiltin(left, right, expr, true); case TC_POINTER: return checkPointer(left, right, expr); case TC_ARRAY: break; case TC_UNRESOLVED: break; case TC_ALIAS: break; case TC_STRUCT: break; case TC_ENUM: break; case TC_FUNCTION: return checkFunction(left, expr); case TC_MODULE: assert(0 && "TODO"); break; } return false; }
bool clang::index::generateUSRForType(QualType T, ASTContext &Ctx, SmallVectorImpl<char> &Buf) { if (T.isNull()) return true; T = T.getCanonicalType(); USRGenerator UG(&Ctx, Buf); UG.VisitType(T); return UG.ignoreResults(); }
void FindGPUMacro::analyze_value_decl(ValueDecl *val) { QualType type = val->getType(); std::pair<uint64_t, unsigned> fieldInfo = val->getASTContext().getTypeInfo(val->getType()); uint64_t typeSize = fieldInfo.first; unsigned fieldAlign = fieldInfo.second; _os << "base type: " << type.getCanonicalType().getAsString() << ", size (bits): " << typeSize << ", align (bits): " << fieldAlign << "\n"; }
static bool isStdInitializerList(QualType Type) { Type = Type.getCanonicalType(); if (const auto *TS = Type->getAs<TemplateSpecializationType>()) { if (const TemplateDecl *TD = TS->getTemplateName().getAsTemplateDecl()) return declIsStdInitializerList(TD); } if (const auto *RT = Type->getAs<RecordType>()) { if (const auto *Specialization = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) return declIsStdInitializerList(Specialization->getSpecializedTemplate()); } return false; }
bool RetainSummaryManager::isKnownSmartPointer(QualType QT) { QT = QT.getCanonicalType(); const auto *RD = QT->getAsCXXRecordDecl(); if (!RD) return false; const IdentifierInfo *II = RD->getIdentifier(); if (II && II->getName() == "smart_ptr") if (const auto *ND = dyn_cast<NamespaceDecl>(RD->getDeclContext())) if (ND->getNameAsString() == "os") return true; return false; }
bool SymbolManager::canSymbolicate(QualType T) { T = T.getCanonicalType(); if (Loc::isLocType(T)) return true; if (T->isIntegralOrEnumerationType()) return true; if (T->isRecordType() && !T->isUnionType()) return true; return false; }
bool ExprTypeAnalyser::checkFunctionCast(const ExplicitCastExpr* expr, QualType DestType, QualType SrcType) { switch (DestType->getTypeClass()) { case TC_BUILTIN: { // TODO duplicate code // only allow cast to uint32/64 (pointer size) const BuiltinType* BT = dyncast<BuiltinType>(DestType.getCanonicalType()); // TODO use TargetInfo to check if 32-bit if (BT && BT->getKind() == BuiltinType::UInt64) return true; QualType expected = Type::UInt64(); StringBuilder buf1(MAX_LEN_TYPENAME); expected.DiagName(buf1); // TODO use warn_int_to_void_pointer_cast, remove err_cast_pointer_to_nonword Diags.Report(expr->getLocation(), diag::err_cast_pointer_to_nonword) << buf1; return false; } case TC_POINTER: case TC_ARRAY: case TC_UNRESOLVED: case TC_ALIAS: case TC_STRUCT: assert(0 && "should not come here"); return false; case TC_ENUM: break; // deny case TC_FUNCTION: { // Always allow TEMP return true; /* // check other function proto, allow if same const FunctionType* src = cast<FunctionType>(SrcType); const FunctionType* dest = cast<FunctionType>(DestType); if (FunctionType::sameProto(src, dest)) return true; break; // deny */ } case TC_MODULE: assert(0 && "should not come here"); return false; } StringBuilder buf1(MAX_LEN_TYPENAME); StringBuilder buf2(MAX_LEN_TYPENAME); SrcType.DiagName(buf1); DestType.DiagName(buf2); Diags.Report(expr->getLocation(), diag::err_illegal_cast) << buf1 << buf2 << expr->getSourceRange(); return false; }
bool LiteralAnalyser::checkRange(QualType TLeft, const Expr* Right, clang::SourceLocation Loc, llvm::APSInt Result) { // TODO refactor with check() const QualType QT = TLeft.getCanonicalType(); int availableWidth = 0; if (QT.isBuiltinType()) { const BuiltinType* TL = cast<BuiltinType>(QT); if (!TL->isInteger()) { // TODO floats return false; } availableWidth = TL->getIntegerWidth(); } else { QT.dump(); assert(0 && "todo"); } const Limit* L = getLimit(availableWidth); assert(Result.isSigned() && "TEMP FOR NOW"); int64_t value = Result.getSExtValue(); bool overflow = false; if (Result.isNegative()) { const int64_t limit = L->minVal; if (value < limit) overflow = true; } else { const int64_t limit = (int64_t)L->maxVal; if (value > limit) overflow = true; } //fprintf(stderr, "VAL=%lld width=%d signed=%d\n", value, availableWidth, Result.isSigned()); if (overflow) { SmallString<20> ss; Result.toString(ss, 10, true); StringBuilder buf1; TLeft->DiagName(buf1); if (Right) { Diags.Report(Right->getLocStart(), diag::err_literal_outofbounds) << buf1 << L->minStr << L->maxStr << ss << Right->getSourceRange(); } else { Diags.Report(Loc, diag::err_literal_outofbounds) << buf1 << L->minStr << L->maxStr << ss; } return false; } return true; }
// Based on QualType::isTrivial. bool isTriviallyDefaultConstructible(QualType Type, const ASTContext &Context) { if (Type.isNull()) return false; if (Type->isArrayType()) return isTriviallyDefaultConstructible(Context.getBaseElementType(Type), Context); // Return false for incomplete types after skipping any incomplete array // types which are expressly allowed by the standard and thus our API. if (Type->isIncompleteType()) return false; if (Context.getLangOpts().ObjCAutoRefCount) { switch (Type.getObjCLifetime()) { case Qualifiers::OCL_ExplicitNone: return true; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: case Qualifiers::OCL_Autoreleasing: return false; case Qualifiers::OCL_None: if (Type->isObjCLifetimeType()) return false; break; } } QualType CanonicalType = Type.getCanonicalType(); if (CanonicalType->isDependentType()) return false; // As an extension, Clang treats vector types as Scalar types. if (CanonicalType->isScalarType() || CanonicalType->isVectorType()) return true; if (const auto *RT = CanonicalType->getAs<RecordType>()) { return recordIsTriviallyDefaultConstructible(*RT->getDecl(), Context); } // No other types can match. return false; }
/// isSafeToConvert - Return true if it is safe to convert this field type, /// which requires the structure elements contained by-value to all be /// recursively safe to convert. static bool isSafeToConvert(QualType T, CodeGenTypes &CGT, llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { T = T.getCanonicalType(); // If this is a record, check it. if (const RecordType *RT = dyn_cast<RecordType>(T)) return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); // If this is an array, check the elements, which are embedded inline. if (const ArrayType *AT = dyn_cast<ArrayType>(T)) return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); // Otherwise, there is no concern about transforming this. We only care about // things that are contained by-value in a structure that can have another // structure as a member. return true; }
bool CodeGenFunction::hasAggregateLLVMType(QualType type) { switch (type.getCanonicalType()->getTypeClass()) { #define TYPE(name, parent) #define ABSTRACT_TYPE(name, parent) #define NON_CANONICAL_TYPE(name, parent) case Type::name: #define DEPENDENT_TYPE(name, parent) case Type::name: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: #include "clang/AST/TypeNodes.def" llvm_unreachable("non-canonical or dependent type in IR-generation"); case Type::Builtin: case Type::Pointer: case Type::BlockPointer: case Type::LValueReference: case Type::RValueReference: case Type::MemberPointer: case Type::Vector: case Type::ExtVector: case Type::FunctionProto: case Type::FunctionNoProto: case Type::Enum: case Type::ObjCObjectPointer: return false; // Complexes, arrays, records, and Objective-C objects. case Type::Complex: case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::Record: case Type::ObjCObject: case Type::ObjCInterface: return true; // In IRGen, atomic types are just the underlying type case Type::Atomic: return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType()); } llvm_unreachable("unknown type kind!"); }
bool ExprTypeAnalyser::checkNonPointerCast(const ExplicitCastExpr* expr, QualType DestType, QualType SrcType) { // by now: DestType isScalar(): Bool, Arithmetic, Function or Enum QualType C = SrcType.getCanonicalType(); switch (C->getTypeClass()) { case TC_BUILTIN: return checkBuiltinCast(expr, DestType, SrcType); case TC_POINTER: assert(0 && "should not come here"); return false; case TC_ARRAY: // TODO break; case TC_UNRESOLVED: // TODO break; case TC_ALIAS: // TODO break; case TC_STRUCT: // no casts allowed break; case TC_ENUM: return checkEnumCast(expr, DestType, SrcType); case TC_FUNCTION: return checkFunctionCast(expr, DestType, SrcType); case TC_MODULE: // no casts allowed break; } // TODO refactor duplicate code (after completion) StringBuilder buf1(MAX_LEN_TYPENAME); StringBuilder buf2(MAX_LEN_TYPENAME); SrcType.DiagName(buf1); DestType.DiagName(buf2); Diags.Report(expr->getLocation(), diag::err_illegal_cast) << buf1 << buf2 << expr->getSourceRange(); return false; }
bool LiteralAnalyser::calcWidth(QualType TLeft, const Expr* Right, int* availableWidth) { const QualType QT = TLeft.getCanonicalType(); // TODO check if type is already ok?, then skip check? //if (QT == Right->getType().getCanonicalType()) return; if (QT.isBuiltinType()) { const BuiltinType* TL = cast<BuiltinType>(QT); if (!TL->isInteger()) { // TODO floats return false; } // TODO remove const cast Expr* EE = const_cast<Expr*>(Right); QualType Canon = EE->getType().getCanonicalType(); assert(Canon->isBuiltinType()); const BuiltinType* BI = cast<BuiltinType>(Canon); if (TL->getKind() != BI->getKind()) EE->setImpCast(TL->getKind()); if (QT == Type::Bool()) { // NOTE: any integer to bool is ok return false; } *availableWidth = TL->getIntegerWidth(); } else if (QT.isPointerType()) { *availableWidth = 32; // only 32-bit for now // dont ask for pointer, replace with uint32 here. } else { StringBuilder t1name(128); Right->getType().DiagName(t1name); // Q: allow FuncPtr to return 0? (or nil?) StringBuilder t2name(128); TLeft->DiagName(t2name); Diags.Report(Right->getLocation(), diag::err_typecheck_convert_incompatible) << t1name << t2name << 2 << 0 << 0; return false; //QT.dump(); //assert(0 && "todo"); } return true; }
// FIXME: should rewrite according to the cast kind. SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) { castTy = Context.getCanonicalType(castTy); originalTy = Context.getCanonicalType(originalTy); if (val.isUnknownOrUndef() || castTy == originalTy) return val; if (castTy->isBooleanType()) { if (val.isUnknownOrUndef()) return val; if (val.isConstant()) return makeTruthVal(!val.isZeroConstant(), castTy); if (!Loc::isLocType(originalTy) && !originalTy->isIntegralOrEnumerationType() && !originalTy->isMemberPointerType()) return UnknownVal(); if (SymbolRef Sym = val.getAsSymbol(true)) { BasicValueFactory &BVF = getBasicValueFactory(); // FIXME: If we had a state here, we could see if the symbol is known to // be zero, but we don't. return makeNonLoc(Sym, BO_NE, BVF.getValue(0, Sym->getType()), castTy); } // Loc values are not always true, they could be weakly linked functions. if (Optional<Loc> L = val.getAs<Loc>()) return evalCastFromLoc(*L, castTy); Loc L = val.castAs<nonloc::LocAsInteger>().getLoc(); return evalCastFromLoc(L, castTy); } // For const casts, casts to void, just propagate the value. if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType()) if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy), Context.getPointerType(originalTy))) return val; // Check for casts from pointers to integers. if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy)) return evalCastFromLoc(val.castAs<Loc>(), castTy); // Check for casts from integers to pointers. if (Loc::isLocType(castTy) && originalTy->isIntegralOrEnumerationType()) { if (Optional<nonloc::LocAsInteger> LV = val.getAs<nonloc::LocAsInteger>()) { if (const MemRegion *R = LV->getLoc().getAsRegion()) { StoreManager &storeMgr = StateMgr.getStoreManager(); R = storeMgr.castRegion(R, castTy); return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); } return LV->getLoc(); } return dispatchCast(val, castTy); } // Just pass through function and block pointers. if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) { assert(Loc::isLocType(castTy)); return val; } // Check for casts from array type to another type. if (const ArrayType *arrayT = dyn_cast<ArrayType>(originalTy.getCanonicalType())) { // We will always decay to a pointer. QualType elemTy = arrayT->getElementType(); val = StateMgr.ArrayToPointer(val.castAs<Loc>(), elemTy); // Are we casting from an array to a pointer? If so just pass on // the decayed value. if (castTy->isPointerType() || castTy->isReferenceType()) return val; // Are we casting from an array to an integer? If so, cast the decayed // pointer value to an integer. assert(castTy->isIntegralOrEnumerationType()); // FIXME: Keep these here for now in case we decide soon that we // need the original decayed type. // QualType elemTy = cast<ArrayType>(originalTy)->getElementType(); // QualType pointerTy = C.getPointerType(elemTy); return evalCastFromLoc(val.castAs<Loc>(), castTy); } // Check for casts from a region to a specific type. if (const MemRegion *R = val.getAsRegion()) { // Handle other casts of locations to integers. if (castTy->isIntegralOrEnumerationType()) return evalCastFromLoc(loc::MemRegionVal(R), castTy); // FIXME: We should handle the case where we strip off view layers to get // to a desugared type. if (!Loc::isLocType(castTy)) { // FIXME: There can be gross cases where one casts the result of a function // (that returns a pointer) to some other value that happens to fit // within that pointer value. We currently have no good way to // model such operations. When this happens, the underlying operation // is that the caller is reasoning about bits. Conceptually we are // layering a "view" of a location on top of those bits. Perhaps // we need to be more lazy about mutual possible views, even on an // SVal? This may be necessary for bit-level reasoning as well. return UnknownVal(); } // We get a symbolic function pointer for a dereference of a function // pointer, but it is of function type. Example: // struct FPRec { // void (*my_func)(int * x); // }; // // int bar(int x); // // int f1_a(struct FPRec* foo) { // int x; // (*foo->my_func)(&x); // return bar(x)+1; // no-warning // } assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() || originalTy->isBlockPointerType() || castTy->isReferenceType()); StoreManager &storeMgr = StateMgr.getStoreManager(); // Delegate to store manager to get the result of casting a region to a // different type. If the MemRegion* returned is NULL, this expression // Evaluates to UnknownVal. R = storeMgr.castRegion(R, castTy); return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); } return dispatchCast(val, castTy); }
/// \brief Compute the DeclContext that is associated with the given /// scope specifier. /// /// \param SS the C++ scope specifier as it appears in the source /// /// \param EnteringContext when true, we will be entering the context of /// this scope specifier, so we can retrieve the declaration context of a /// class template or class template partial specialization even if it is /// not the current instantiation. /// /// \returns the declaration context represented by the scope specifier @p SS, /// or NULL if the declaration context cannot be computed (e.g., because it is /// dependent and not the current instantiation). DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext) { if (!SS.isSet() || SS.isInvalid()) return nullptr; NestedNameSpecifier *NNS = SS.getScopeRep(); if (NNS->isDependent()) { // If this nested-name-specifier refers to the current // instantiation, return its DeclContext. if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS)) return Record; if (EnteringContext) { const Type *NNSType = NNS->getAsType(); if (!NNSType) { return nullptr; } // Look through type alias templates, per C++0x [temp.dep.type]p1. NNSType = Context.getCanonicalType(NNSType); if (const TemplateSpecializationType *SpecType = NNSType->getAs<TemplateSpecializationType>()) { // We are entering the context of the nested name specifier, so try to // match the nested name specifier to either a primary class template // or a class template partial specialization. if (ClassTemplateDecl *ClassTemplate = dyn_cast_or_null<ClassTemplateDecl>( SpecType->getTemplateName().getAsTemplateDecl())) { QualType ContextType = Context.getCanonicalType(QualType(SpecType, 0)); // If the type of the nested name specifier is the same as the // injected class name of the named class template, we're entering // into that class template definition. QualType Injected = ClassTemplate->getInjectedClassNameSpecialization(); // Injected might not be canonical Injected = Injected.getCanonicalType(); if (Context.hasSameType(Injected, ContextType)) return ClassTemplate->getTemplatedDecl(); // If the type of the nested name specifier is the same as the // type of one of the class template's class template partial // specializations, we're entering into the definition of that // class template partial specialization. if (ClassTemplatePartialSpecializationDecl *PartialSpec = ClassTemplate->findPartialSpecialization(ContextType)) { // A declaration of the partial specialization must be visible. // We can always recover here, because this only happens when we're // entering the context, and that can't happen in a SFINAE context. assert(!isSFINAEContext() && "partial specialization scope specifier in SFINAE context?"); if (!hasVisibleDeclaration(PartialSpec)) diagnoseMissingImport(SS.getLastQualifierNameLoc(), PartialSpec, MissingImportKind::PartialSpecialization, /*Recover*/true); return PartialSpec; } } } else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) { // The nested name specifier refers to a member of a class template. return RecordT->getDecl(); } } return nullptr; } switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: llvm_unreachable("Dependent nested-name-specifier has no DeclContext"); case NestedNameSpecifier::Namespace: return NNS->getAsNamespace(); case NestedNameSpecifier::NamespaceAlias: return NNS->getAsNamespaceAlias()->getNamespace(); case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: { const TagType *Tag = NNS->getAsType()->getAs<TagType>(); if (!Tag && sema::HackForDefaultTemplateArg::AllowNonCanonicalSubst()) { // In case we are in the middle of a template name creation // that tries to keep some of the typedef Tag = NNS->getAsType()->getCanonicalTypeInternal()->getAs<TagType>(); } assert(Tag && "Non-tag type in nested-name-specifier"); return Tag->getDecl(); } case NestedNameSpecifier::Global: return Context.getTranslationUnitDecl(); case NestedNameSpecifier::Super: return NNS->getAsRecordDecl(); } llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); }
// The following is common part for 'cilk vector functions' and // 'omp declare simd' functions metadata generation. // void CodeGenModule::EmitVectorVariantsMetadata(const CGFunctionInfo &FnInfo, const FunctionDecl *FD, llvm::Function *Fn, GroupMap &Groups) { // Do not emit any vector variant if there is an unsupported feature. bool HasImplicitThis = false; if (!CheckElementalArguments(*this, FD, Fn, HasImplicitThis)) return; llvm::LLVMContext &Context = getLLVMContext(); ASTContext &C = getContext(); // Common metadata nodes. llvm::NamedMDNode *CilkElementalMetadata = getModule().getOrInsertNamedMetadata("cilk.functions"); llvm::Metadata *ElementalMDArgs[] = { llvm::MDString::get(Context, "elemental") }; llvm::MDNode *ElementalNode = llvm::MDNode::get(Context, ElementalMDArgs); llvm::Metadata *MaskMDArgs[] = { llvm::MDString::get(Context, "mask"), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::IntegerType::getInt1Ty(Context), 1)) }; llvm::MDNode *MaskNode = llvm::MDNode::get(Context, MaskMDArgs); MaskMDArgs[1] = llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::IntegerType::getInt1Ty(Context), 0)); llvm::MDNode *NoMaskNode = llvm::MDNode::get(Context, MaskMDArgs); SmallVector<llvm::Metadata*, 8> ParameterNameArgs; ParameterNameArgs.push_back(llvm::MDString::get(Context, "arg_name")); llvm::MDNode *ParameterNameNode = 0; // // Vector variant metadata. // llvm::Value *VariantMDArgs[] = { // llvm::MDString::get(Context, "variant"), // llvm::UndefValue::get(llvm::Type::getVoidTy(Context)) // }; // llvm::MDNode *VariantNode = llvm::MDNode::get(Context, VariantMDArgs); for (GroupMap::iterator GI = Groups.begin(), GE = Groups.end(); GI != GE; ++GI) { CilkElementalGroup &G = GI->second; // Parameter information. QualType FirstNonStepParmType; SmallVector<llvm::Metadata *, 8> AligArgs; SmallVector<llvm::Metadata *, 8> StepArgs; AligArgs.push_back(llvm::MDString::get(Context, "arg_alig")); StepArgs.push_back(llvm::MDString::get(Context, "arg_step")); // Handle implicit 'this' parameter if necessary. if (HasImplicitThis) { ParameterNameArgs.push_back(llvm::MDString::get(Context, "this")); bool IsNonStepParm = handleParameter(*this, G, "this", StepArgs, AligArgs); if (IsNonStepParm) FirstNonStepParmType = cast<CXXMethodDecl>(FD)->getThisType(C); } // Handle explicit paramenters. for (unsigned I = 0; I != FD->getNumParams(); ++I) { const ParmVarDecl *Parm = FD->getParamDecl(I); StringRef ParmName = Parm->getName(); if (!ParameterNameNode) ParameterNameArgs.push_back(llvm::MDString::get(Context, ParmName)); bool IsNonStepParm = handleParameter(*this, G, ParmName, StepArgs, AligArgs); if (IsNonStepParm && FirstNonStepParmType.isNull()) FirstNonStepParmType = Parm->getType(); } llvm::MDNode *StepNode = llvm::MDNode::get(Context, StepArgs); llvm::MDNode *AligNode = llvm::MDNode::get(Context, AligArgs); if (!ParameterNameNode) ParameterNameNode = llvm::MDNode::get(Context, ParameterNameArgs); // If there is no vectorlengthfor() in this group, determine the // characteristic type. This can depend on the linear/uniform attributes, // so it can differ between groups. // // The rules for computing the characteristic type are: // // a) For a non-void function, the characteristic data type is the // return type. // // b) If the function has any non-uniform, non-linear parameters, the // the characteristic data type is the type of the first such parameter. // // c) If the characteristic data type determined by a) or b) above is // struct, union, or class type which is pass-by-value (except fo // the type that maps to the built-in complex data type) // the characteristic data type is int. // // d) If none of the above three cases is applicable, // the characteristic data type is int. // // e) For Intel Xeon Phi native and offload compilation, if the resulting // characteristic data type is 8-bit or 16-bit integer data type // the characteristic data type is int. // // These rules missed the reference types and we use their pointer types. // if (G.VecLengthFor.empty()) { QualType FnRetTy = FD->getReturnType(); QualType CharacteristicType; if (!FnRetTy->isVoidType()) CharacteristicType = FnRetTy; else if (!FirstNonStepParmType.isNull()) CharacteristicType = FirstNonStepParmType.getCanonicalType(); else CharacteristicType = C.IntTy; if (CharacteristicType->isReferenceType()) { QualType BaseTy = CharacteristicType.getNonReferenceType(); CharacteristicType = C.getPointerType(BaseTy); } else if (CharacteristicType->isAggregateType()) CharacteristicType = C.IntTy; // FIXME: handle Xeon Phi targets. G.VecLengthFor.push_back(CharacteristicType); } // // If no mask variants are specified, generate both. // if (G.Mask.empty()) { // G.Mask.push_back(1); // G.Mask.push_back(0); // } // If no vector length is specified, push a dummy value to iterate over. if (G.VecLength.empty()) G.VecLength.push_back(0); for (CilkElementalGroup::VecLengthForVector::iterator TI = G.VecLengthFor.begin(), TE = G.VecLengthFor.end(); TI != TE; ++TI) { uint64_t VectorRegisterBytes = 0; // Inspect the current target features to determine the // appropriate vector size. // This is currently X86 specific. if (Target.hasFeature("avx2")) VectorRegisterBytes = 64; else if (Target.hasFeature("avx")) VectorRegisterBytes = 32; else if (Target.hasFeature("sse2")) VectorRegisterBytes = 16; else if (Target.hasFeature("sse") && (*TI)->isFloatingType() && C.getTypeSizeInChars(*TI).getQuantity() == 4) VectorRegisterBytes = 16; else if (Target.hasFeature("mmx") && (*TI)->isIntegerType()) VectorRegisterBytes = 8; for (CilkElementalGroup::VecLengthVector::iterator LI = G.VecLength.begin(), LE = G.VecLength.end(); LI != LE; ++LI) { uint64_t VL = *LI ? *LI : (CharUnits::fromQuantity(VectorRegisterBytes) / C.getTypeSizeInChars(*TI)); llvm::MDNode *VecTypeNode = MakeVecLengthMetadata(*this, "vec_length", *TI, VL); { SmallVector <llvm::Metadata*, 7> kernelMDArgs; kernelMDArgs.push_back(llvm::ValueAsMetadata::get(Fn)); kernelMDArgs.push_back(ElementalNode); kernelMDArgs.push_back(ParameterNameNode); kernelMDArgs.push_back(StepNode); kernelMDArgs.push_back(AligNode); kernelMDArgs.push_back(VecTypeNode); if (!G.Mask.empty()) kernelMDArgs.push_back((G.Mask.back()==0)?(NoMaskNode):(MaskNode)); llvm::MDNode *KernelMD = llvm::MDNode::get(Context, kernelMDArgs); CilkElementalMetadata->addOperand(KernelMD); } // for (CilkElementalGroup::MaskVector::iterator // MI = G.Mask.begin(), // ME = G.Mask.end(); // MI != ME; // ++MI) { // // SmallVector <llvm::Value*, 7> kernelMDArgs; // kernelMDArgs.push_back(Fn); // kernelMDArgs.push_back(ElementalNode); // kernelMDArgs.push_back(ParameterNameNode); // kernelMDArgs.push_back(StepNode); // kernelMDArgs.push_back(AligNode); // kernelMDArgs.push_back(VecTypeNode); // kernelMDArgs.push_back((*MI==0)?(NoMaskNode):(MaskNode)); // if (ProcessorNode) // kernelMDArgs.push_back(ProcessorNode); // kernelMDArgs.push_back(VariantNode); // llvm::MDNode *KernelMD = llvm::MDNode::get(Context, kernelMDArgs); // CilkElementalMetadata->addOperand(KernelMD); // ElementalVariantToEmit.push_back( // ElementalVariantInfo(&FnInfo, FD, Fn, KernelMD)); // } } } } }
/// \brief The LoopFixer callback, which determines if loops discovered by the /// matchers are convertible, printing information about the loops if so. void LoopFixer::run(const MatchFinder::MatchResult &Result) { const BoundNodes &Nodes = Result.Nodes; Confidence ConfidenceLevel(RL_Safe); ASTContext *Context = Result.Context; const ForStmt *TheLoop = Nodes.getStmtAs<ForStmt>(LoopName); if (!Owner.isFileModifiable(Context->getSourceManager(),TheLoop->getForLoc())) return; // Check that we have exactly one index variable and at most one end variable. const VarDecl *LoopVar = Nodes.getDeclAs<VarDecl>(IncrementVarName); const VarDecl *CondVar = Nodes.getDeclAs<VarDecl>(ConditionVarName); const VarDecl *InitVar = Nodes.getDeclAs<VarDecl>(InitVarName); if (!areSameVariable(LoopVar, CondVar) || !areSameVariable(LoopVar, InitVar)) return; const VarDecl *EndVar = Nodes.getDeclAs<VarDecl>(EndVarName); const VarDecl *ConditionEndVar = Nodes.getDeclAs<VarDecl>(ConditionEndVarName); if (EndVar && !areSameVariable(EndVar, ConditionEndVar)) return; // If the end comparison isn't a variable, we can try to work with the // expression the loop variable is being tested against instead. const CXXMemberCallExpr *EndCall = Nodes.getStmtAs<CXXMemberCallExpr>(EndCallName); const Expr *BoundExpr = Nodes.getStmtAs<Expr>(ConditionBoundName); // If the loop calls end()/size() after each iteration, lower our confidence // level. if (FixerKind != LFK_Array && !EndVar) ConfidenceLevel.lowerTo(RL_Reasonable); const Expr *ContainerExpr = nullptr; bool DerefByValue = false; bool DerefByConstRef = false; bool ContainerNeedsDereference = false; // FIXME: Try to put most of this logic inside a matcher. Currently, matchers // don't allow the right-recursive checks in digThroughConstructors. if (FixerKind == LFK_Iterator) { ContainerExpr = findContainer(Context, LoopVar->getInit(), EndVar ? EndVar->getInit() : EndCall, &ContainerNeedsDereference); QualType InitVarType = InitVar->getType(); QualType CanonicalInitVarType = InitVarType.getCanonicalType(); const CXXMemberCallExpr *BeginCall = Nodes.getNodeAs<CXXMemberCallExpr>(BeginCallName); assert(BeginCall && "Bad Callback. No begin call expression."); QualType CanonicalBeginType = BeginCall->getMethodDecl()->getReturnType().getCanonicalType(); if (CanonicalBeginType->isPointerType() && CanonicalInitVarType->isPointerType()) { QualType BeginPointeeType = CanonicalBeginType->getPointeeType(); QualType InitPointeeType = CanonicalInitVarType->getPointeeType(); // If the initializer and the variable are both pointers check if the // un-qualified pointee types match otherwise we don't use auto. if (!Context->hasSameUnqualifiedType(InitPointeeType, BeginPointeeType)) return; } else { // Check for qualified types to avoid conversions from non-const to const // iterator types. if (!Context->hasSameType(CanonicalInitVarType, CanonicalBeginType)) return; } DerefByValue = Nodes.getNodeAs<QualType>(DerefByValueResultName) != nullptr; if (!DerefByValue) { if (const QualType *DerefType = Nodes.getNodeAs<QualType>(DerefByRefResultName)) { // A node will only be bound with DerefByRefResultName if we're dealing // with a user-defined iterator type. Test the const qualification of // the reference type. DerefByConstRef = (*DerefType)->getAs<ReferenceType>()->getPointeeType() .isConstQualified(); } else { // By nature of the matcher this case is triggered only for built-in // iterator types (i.e. pointers). assert(isa<PointerType>(CanonicalInitVarType) && "Non-class iterator type is not a pointer type"); QualType InitPointeeType = CanonicalInitVarType->getPointeeType(); QualType BeginPointeeType = CanonicalBeginType->getPointeeType(); // If the initializer and variable have both the same type just use auto // otherwise we test for const qualification of the pointed-at type. if (!Context->hasSameType(InitPointeeType, BeginPointeeType)) DerefByConstRef = InitPointeeType.isConstQualified(); } } else { // If the dereference operator returns by value then test for the // canonical const qualification of the init variable type. DerefByConstRef = CanonicalInitVarType.isConstQualified(); } } else if (FixerKind == LFK_PseudoArray) { if (!EndCall) return; ContainerExpr = EndCall->getImplicitObjectArgument(); const MemberExpr *Member = dyn_cast<MemberExpr>(EndCall->getCallee()); if (!Member) return; ContainerNeedsDereference = Member->isArrow(); } // We must know the container or an array length bound. if (!ContainerExpr && !BoundExpr) return; findAndVerifyUsages(Context, LoopVar, EndVar, ContainerExpr, BoundExpr, ContainerNeedsDereference, DerefByValue, DerefByConstRef, TheLoop, ConfidenceLevel); }
void Walker::VisitCXXMemberCallExpr(CXXMemberCallExpr *CE) { LangOptions LangOpts; LangOpts.CPlusPlus = true; PrintingPolicy Policy(LangOpts); const Decl *D = AC->getDecl(); std::string dname = ""; if (const NamedDecl *ND = llvm::dyn_cast_or_null<NamedDecl>(D)) dname = ND->getQualifiedNameAsString(); CXXMethodDecl *MD = CE->getMethodDecl(); if (!MD) return; std::string mname = MD->getQualifiedNameAsString(); // llvm::errs()<<"Parent Decl: '"<<dname<<"'\n"; // llvm::errs()<<"Method Decl: '"<<mname<<"'\n"; // llvm::errs()<<"call expression '"; // CE->printPretty(llvm::errs(),0,Policy); // llvm::errs()<<"'\n"; // if (!MD) return; llvm::SmallString<100> buf; llvm::raw_svector_ostream os(buf); if (mname == "edm::Event::getByLabel" || mname == "edm::Event::getManyByType") { // if (const CXXRecordDecl * RD = llvm::dyn_cast_or_null<CXXMethodDecl>(D)->getParent() ) { // llvm::errs()<<"class "<<RD->getQualifiedNameAsString()<<"\n"; // llvm::errs()<<"\n"; // } os << "function '"; llvm::dyn_cast<CXXMethodDecl>(D)->getNameForDiagnostic(os, Policy, true); os << "' "; // os<<"call expression '"; // CE->printPretty(os,0,Policy); // os<<"' "; if (mname == "edm::Event::getByLabel") { os << "calls edm::Event::getByLabel with arguments '"; QualType QT; for (auto I = CE->arg_begin(), E = CE->arg_end(); I != E; ++I) { QT = (*I)->getType(); std::string qtname = QT.getCanonicalType().getAsString(); if (qtname.substr(0, 17) == "class edm::Handle") { // os<<"argument name '"; // (*I)->printPretty(os,0,Policy); // os<<"' "; const CXXRecordDecl *RD = QT->getAsCXXRecordDecl(); std::string rname = RD->getQualifiedNameAsString(); os << rname << " "; const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD); for (unsigned J = 0, F = SD->getTemplateArgs().size(); J != F; ++J) { SD->getTemplateArgs().data()[J].print(Policy, os); os << ", "; } } else { os << " " << qtname << " "; (*I)->printPretty(os, nullptr, Policy); os << ", "; } } os << "'\n"; } else { os << "calls edm::Event::getManyByType with argument '"; QualType QT = (*CE->arg_begin())->getType(); const CXXRecordDecl *RD = QT->getAsCXXRecordDecl(); os << "getManyByType , "; const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD); const TemplateArgument TA = SD->getTemplateArgs().data()[0]; const QualType AQT = TA.getAsType(); const CXXRecordDecl *SRD = AQT->getAsCXXRecordDecl(); os << SRD->getQualifiedNameAsString() << " "; const ClassTemplateSpecializationDecl *SVD = dyn_cast<ClassTemplateSpecializationDecl>(SRD); for (unsigned J = 0, F = SVD->getTemplateArgs().size(); J != F; ++J) { SVD->getTemplateArgs().data()[J].print(Policy, os); os << ", "; } } // llvm::errs()<<os.str()<<"\n"; PathDiagnosticLocation CELoc = PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC); BugType *BT = new BugType(Checker, "edm::getByLabel or edm::getManyByType called", "optional"); std::unique_ptr<BugReport> R = llvm::make_unique<BugReport>(*BT, os.str(), CELoc); R->addRange(CE->getSourceRange()); BR.emitReport(std::move(R)); } else { for (auto I = CE->arg_begin(), E = CE->arg_end(); I != E; ++I) { QualType QT = (*I)->getType(); std::string qtname = QT.getAsString(); // if (qtname.find(" edm::Event") != std::string::npos ) llvm::errs()<<"arg type '" << qtname <<"'\n"; if (qtname == "edm::Event" || qtname == "const edm::Event" || qtname == "edm::Event *" || qtname == "const edm::Event *") { std::string tname; os << "function '" << dname << "' "; os << "calls '"; MD->getNameForDiagnostic(os, Policy, true); os << "' with argument of type '" << qtname << "'\n"; // llvm::errs()<<"\n"; // llvm::errs()<<"call expression passed edm::Event "; // CE->printPretty(llvm::errs(),0,Policy); // llvm::errs()<<" argument name "; // (*I)->printPretty(llvm::errs(),0,Policy); // llvm::errs()<<" "<<qtname<<"\n"; PathDiagnosticLocation CELoc = PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC); BugType *BT = new BugType(Checker, "function call with argument of type edm::Event", "optional"); std::unique_ptr<BugReport> R = llvm::make_unique<BugReport>(*BT, os.str(), CELoc); R->addRange(CE->getSourceRange()); BR.emitReport(std::move(R)); } } } }
QualType TypeResolver::checkCanonicals(Decls& decls, QualType Q, bool set) const { if (Q->hasCanonicalType()) return Q.getCanonicalType(); const Type* T = Q.getTypePtr(); switch (Q->getTypeClass()) { case TC_BUILTIN: return Q; case TC_POINTER: { const PointerType* P = cast<PointerType>(T); QualType t1 = P->getPointeeType(); // Pointee will always be in same TypeContext (file), since it's either built-in or UnresolvedType QualType t2 = checkCanonicals(decls, t1, set); if (!t2.isValid()) return t2; QualType canon; if (t1 == t2) canon = Q; else { canon = typeContext.getPointerType(t2); if (!canon->hasCanonicalType()) canon->setCanonicalType(canon); } assert(Q.isValid()); if (set) P->setCanonicalType(canon); return canon; } case TC_ARRAY: { const ArrayType* A = cast<ArrayType>(T); QualType t1 = A->getElementType(); // NOTE: qualifiers are lost here! QualType t2 = checkCanonicals(decls, t1, set); if (!t2.isValid()) return t2; QualType canon; if (t1 == t2) canon = Q; // NOTE: need size Expr, but set ownership to none else { canon = typeContext.getArrayType(t2, A->getSizeExpr(), false, A->isIncremental()); if (!canon->hasCanonicalType()) canon->setCanonicalType(canon); } if (set) A->setCanonicalType(canon); return canon; } case TC_UNRESOLVED: { const UnresolvedType* U = cast<UnresolvedType>(T); TypeDecl* TD = U->getDecl(); assert(TD); // check if exists if (!checkDecls(decls, TD)) { return QualType(); } QualType canonical = checkCanonicals(decls, TD->getType(), false); if (set) U->setCanonicalType(canonical); return canonical; } case TC_ALIAS: { const AliasType* A = cast<AliasType>(T); if (!checkDecls(decls, A->getDecl())) { return QualType(); } QualType canonical = checkCanonicals(decls, A->getRefType(), set); assert(Q.isValid()); if (set) A->setCanonicalType(canonical); return canonical; } case TC_STRUCT: return Q.getCanonicalType(); case TC_ENUM: { assert(0 && "TODO"); return 0; } case TC_FUNCTION: return Q.getCanonicalType(); case TC_MODULE: assert(0 && "TBD"); return 0; } assert(0); }
/// \brief Convert the given type to a string suitable for printing as part of /// a diagnostic. /// /// There are four main criteria when determining whether we should have an /// a.k.a. clause when pretty-printing a type: /// /// 1) Some types provide very minimal sugar that doesn't impede the /// user's understanding --- for example, elaborated type /// specifiers. If this is all the sugar we see, we don't want an /// a.k.a. clause. /// 2) Some types are technically sugared but are much more familiar /// when seen in their sugared form --- for example, va_list, /// vector types, and the magic Objective C types. We don't /// want to desugar these, even if we do produce an a.k.a. clause. /// 3) Some types may have already been desugared previously in this diagnostic. /// if this is the case, doing another "aka" would just be clutter. /// 4) Two different types within the same diagnostic have the same output /// string. In this case, force an a.k.a with the desugared type when /// doing so will provide additional information. /// /// \param Context the context in which the type was allocated /// \param Ty the type to print /// \param QualTypeVals pointer values to QualTypes which are used in the /// diagnostic message static std::string ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, const DiagnosticsEngine::ArgumentValue *PrevArgs, unsigned NumPrevArgs, ArrayRef<intptr_t> QualTypeVals) { // FIXME: Playing with std::string is really slow. bool ForceAKA = false; QualType CanTy = Ty.getCanonicalType(); std::string S = Ty.getAsString(Context.getPrintingPolicy()); std::string CanS = CanTy.getAsString(Context.getPrintingPolicy()); for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) { QualType CompareTy = QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I])); if (CompareTy.isNull()) continue; if (CompareTy == Ty) continue; // Same types QualType CompareCanTy = CompareTy.getCanonicalType(); if (CompareCanTy == CanTy) continue; // Same canonical types std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy()); bool aka; QualType CompareDesugar = Desugar(Context, CompareTy, aka); std::string CompareDesugarStr = CompareDesugar.getAsString(Context.getPrintingPolicy()); if (CompareS != S && CompareDesugarStr != S) continue; // The type string is different than the comparison string // and the desugared comparison string. std::string CompareCanS = CompareCanTy.getAsString(Context.getPrintingPolicy()); if (CompareCanS == CanS) continue; // No new info from canonical type ForceAKA = true; break; } // Check to see if we already desugared this type in this // diagnostic. If so, don't do it again. bool Repeated = false; for (unsigned i = 0; i != NumPrevArgs; ++i) { // TODO: Handle ak_declcontext case. if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) { void *Ptr = (void*)PrevArgs[i].second; QualType PrevTy(QualType::getFromOpaquePtr(Ptr)); if (PrevTy == Ty) { Repeated = true; break; } } } // Consider producing an a.k.a. clause if removing all the direct // sugar gives us something "significantly different". if (!Repeated) { bool ShouldAKA = false; QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA); if (ShouldAKA || ForceAKA) { if (DesugaredTy == Ty) { DesugaredTy = Ty.getCanonicalType(); } std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy()); if (akaStr != S) { S = "'" + S + "' (aka '" + akaStr + "')"; return S; } } } S = "'" + S + "'"; return S; }
void AvoidCStyleCastsCheck::check(const MatchFinder::MatchResult &Result) { const auto *CastExpr = Result.Nodes.getNodeAs<CStyleCastExpr>("cast"); auto ParenRange = CharSourceRange::getTokenRange(CastExpr->getLParenLoc(), CastExpr->getRParenLoc()); // Ignore casts in macros. if (ParenRange.getBegin().isMacroID() || ParenRange.getEnd().isMacroID()) return; // Casting to void is an idiomatic way to mute "unused variable" and similar // warnings. if (CastExpr->getTypeAsWritten()->isVoidType()) return; QualType SourceType = CastExpr->getSubExprAsWritten()->getType(); QualType DestType = CastExpr->getTypeAsWritten(); if (SourceType == DestType) { diag(CastExpr->getLocStart(), "redundant cast to the same type") << FixItHint::CreateRemoval(ParenRange); return; } SourceType = SourceType.getCanonicalType(); DestType = DestType.getCanonicalType(); if (SourceType == DestType) { diag(CastExpr->getLocStart(), "possibly redundant cast between typedefs of the same type"); return; } // The rest of this check is only relevant to C++. if (!Result.Context->getLangOpts().CPlusPlus) return; // Ignore code inside extern "C" {} blocks. if (!match(expr(hasAncestor(linkageSpecDecl())), *CastExpr, *Result.Context) .empty()) return; // Leave type spelling exactly as it was (unlike // getTypeAsWritten().getAsString() which would spell enum types 'enum X'). StringRef DestTypeString = Lexer::getSourceText( CharSourceRange::getTokenRange( CastExpr->getLParenLoc().getLocWithOffset(1), CastExpr->getRParenLoc().getLocWithOffset(-1)), *Result.SourceManager, Result.Context->getLangOpts()); auto diag_builder = diag(CastExpr->getLocStart(), "C-style casts are discouraged. %0"); auto ReplaceWithCast = [&](StringRef CastType) { diag_builder << ("Use " + CastType).str(); const Expr *SubExpr = CastExpr->getSubExprAsWritten()->IgnoreImpCasts(); std::string CastText = (CastType + "<" + DestTypeString + ">").str(); if (!isa<ParenExpr>(SubExpr)) { CastText.push_back('('); diag_builder << FixItHint::CreateInsertion( Lexer::getLocForEndOfToken(SubExpr->getLocEnd(), 0, *Result.SourceManager, Result.Context->getLangOpts()), ")"); } diag_builder << FixItHint::CreateReplacement(ParenRange, CastText); }; // Suggest appropriate C++ cast. See [expr.cast] for cast notation semantics. switch (CastExpr->getCastKind()) { case CK_NoOp: if (needsConstCast(SourceType, DestType) && pointedTypesAreEqual(SourceType, DestType)) { ReplaceWithCast("const_cast"); return; } if (DestType->isReferenceType() && (SourceType.getNonReferenceType() == DestType.getNonReferenceType().withConst() || SourceType.getNonReferenceType() == DestType.getNonReferenceType())) { ReplaceWithCast("const_cast"); return; } // FALLTHROUGH case clang::CK_IntegralCast: // Convert integral and no-op casts between builtin types and enums to // static_cast. A cast from enum to integer may be unnecessary, but it's // still retained. if ((SourceType->isBuiltinType() || SourceType->isEnumeralType()) && (DestType->isBuiltinType() || DestType->isEnumeralType())) { ReplaceWithCast("static_cast"); return; } break; case CK_BitCast: // FIXME: Suggest const_cast<...>(reinterpret_cast<...>(...)) replacement. if (!needsConstCast(SourceType, DestType)) { ReplaceWithCast("reinterpret_cast"); return; } break; default: break; } diag_builder << "Use static_cast/const_cast/reinterpret_cast"; }
void CodeGenFunction::EmitVariablyModifiedType(QualType type) { assert(type->isVariablyModifiedType() && "Must pass variably modified type to EmitVLASizes!"); EnsureInsertPoint(); // We're going to walk down into the type and look for VLA // expressions. type = type.getCanonicalType(); do { assert(type->isVariablyModifiedType()); const Type *ty = type.getTypePtr(); switch (ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" llvm_unreachable("unexpected dependent or non-canonical type!"); // These types are never variably-modified. case Type::Builtin: case Type::Complex: case Type::Vector: case Type::ExtVector: case Type::Record: case Type::Enum: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: llvm_unreachable("type class is never variably-modified!"); case Type::Pointer: type = cast<PointerType>(ty)->getPointeeType(); break; case Type::BlockPointer: type = cast<BlockPointerType>(ty)->getPointeeType(); break; case Type::LValueReference: case Type::RValueReference: type = cast<ReferenceType>(ty)->getPointeeType(); break; case Type::MemberPointer: type = cast<MemberPointerType>(ty)->getPointeeType(); break; case Type::ConstantArray: case Type::IncompleteArray: // Losing element qualification here is fine. type = cast<ArrayType>(ty)->getElementType(); break; case Type::VariableArray: { // Losing element qualification here is fine. const VariableArrayType *vat = cast<VariableArrayType>(ty); // Unknown size indication requires no size computation. // Otherwise, evaluate and record it. if (const Expr *size = vat->getSizeExpr()) { // It's possible that we might have emitted this already, // e.g. with a typedef and a pointer to it. llvm::Value *&entry = VLASizeMap[size]; if (!entry) { // Always zexting here would be wrong if it weren't // undefined behavior to have a negative bound. entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy, /*signed*/ false); } } type = vat->getElementType(); break; } case Type::FunctionProto: case Type::FunctionNoProto: type = cast<FunctionType>(ty)->getResultType(); break; case Type::Atomic: type = cast<AtomicType>(ty)->getValueType(); break; } } while (type->isVariablyModifiedType()); }