// access 2D memory array at given index Expr *ASTTranslate::accessMem2DAt(DeclRefExpr *LHS, Expr *idx_x, Expr *idx_y) { QualType QT = LHS->getType(); QualType QT2 = QT->getPointeeType()->getAsArrayTypeUnsafe()->getElementType(); // mark image as being used within the kernel Kernel->setUsed(LHS->getNameInfo().getAsString()); Expr *result = new (Ctx) ArraySubscriptExpr(createImplicitCastExpr(Ctx, QT, CK_LValueToRValue, LHS, nullptr, VK_RValue), idx_y, QT->getPointeeType(), VK_LValue, OK_Ordinary, SourceLocation()); result = new (Ctx) ArraySubscriptExpr(createImplicitCastExpr(Ctx, Ctx.getPointerType(QT2), CK_ArrayToPointerDecay, result, nullptr, VK_RValue), idx_x, QT2, VK_LValue, OK_Ordinary, SourceLocation()); return result; }
SVal StoreManager::evalDynamicCast(SVal Base, QualType DerivedType, bool &Failed) { Failed = false; loc::MemRegionVal *BaseRegVal = dyn_cast<loc::MemRegionVal>(&Base); if (!BaseRegVal) return UnknownVal(); const MemRegion *BaseRegion = BaseRegVal->stripCasts(/*StripBases=*/false); // Assume the derived class is a pointer or a reference to a CXX record. DerivedType = DerivedType->getPointeeType(); assert(!DerivedType.isNull()); const CXXRecordDecl *DerivedDecl = DerivedType->getAsCXXRecordDecl(); if (!DerivedDecl && !DerivedType->isVoidType()) return UnknownVal(); // Drill down the CXXBaseObject chains, which represent upcasts (casts from // derived to base). const MemRegion *SR = BaseRegion; while (const TypedRegion *TSR = dyn_cast_or_null<TypedRegion>(SR)) { QualType BaseType = TSR->getLocationType()->getPointeeType(); assert(!BaseType.isNull()); const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl(); if (!SRDecl) return UnknownVal(); // If found the derived class, the cast succeeds. if (SRDecl == DerivedDecl) return loc::MemRegionVal(TSR); if (!DerivedType->isVoidType()) { // Static upcasts are marked as DerivedToBase casts by Sema, so this will // only happen when multiple or virtual inheritance is involved. CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, /*DetectVirtual=*/false); if (SRDecl->isDerivedFrom(DerivedDecl, Paths)) return evalDerivedToBase(loc::MemRegionVal(TSR), Paths.front()); } if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR)) // Drill down the chain to get the derived classes. SR = R->getSuperRegion(); else { // We reached the bottom of the hierarchy. // If this is a cast to void*, return the region. if (DerivedType->isVoidType()) return loc::MemRegionVal(TSR); // We did not find the derived class. We we must be casting the base to // derived, so the cast should fail. Failed = true; return UnknownVal(); } } return UnknownVal(); }
/// \brief Returns true if a type is a pointer-to-const or reference-to-const /// with no further indirection. static bool isPointerToConst(QualType Ty) { QualType PointeeTy = Ty->getPointeeType(); if (PointeeTy == QualType()) return false; if (!PointeeTy.isConstQualified()) return false; if (PointeeTy->isAnyPointerType()) return false; return true; }
/// \brief Figure out if an expression could be turned into a call. /// /// Use this when trying to recover from an error where the programmer may have /// written just the name of a function instead of actually calling it. /// /// \param E - The expression to examine. /// \param ZeroArgCallReturnTy - If the expression can be turned into a call /// with no arguments, this parameter is set to the type returned by such a /// call; otherwise, it is set to an empty QualType. /// \param NonTemplateOverloads - If the expression is an overloaded function /// name, this parameter is populated with the decls of the various overloads. bool Sema::isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads) { ZeroArgCallReturnTy = QualType(); NonTemplateOverloads.clear(); if (const OverloadExpr *Overloads = dyn_cast<OverloadExpr>(&E)) { for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { // Our overload set may include TemplateDecls, which we'll ignore for our // present purpose. if (const FunctionDecl *OverloadDecl = dyn_cast<FunctionDecl>(*it)) { NonTemplateOverloads.addDecl(*it); if (OverloadDecl->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = OverloadDecl->getResultType(); } } return true; } if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(&E)) { if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { if (Fun->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = Fun->getResultType(); return true; } } // We don't have an expression that's convenient to get a FunctionDecl from, // but we can at least check if the type is "function of 0 arguments". QualType ExprTy = E.getType(); const FunctionType *FunTy = NULL; QualType PointeeTy = ExprTy->getPointeeType(); if (!PointeeTy.isNull()) FunTy = PointeeTy->getAs<FunctionType>(); if (!FunTy) FunTy = ExprTy->getAs<FunctionType>(); if (!FunTy && ExprTy == Context.BoundMemberTy) { // Look for the bound-member type. If it's still overloaded, give up, // although we probably should have fallen into the OverloadExpr case above // if we actually have an overloaded bound member. QualType BoundMemberTy = Expr::findBoundMemberType(&E); if (!BoundMemberTy.isNull()) FunTy = BoundMemberTy->castAs<FunctionType>(); } if (const FunctionProtoType *FPT = dyn_cast_or_null<FunctionProtoType>(FunTy)) { if (FPT->getNumArgs() == 0) ZeroArgCallReturnTy = FunTy->getResultType(); return true; } return false; }
PathDiagnosticPiece * UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC, BugReport &BR) { ProgramStateRef State = N->getState(); ProgramPoint ProgLoc = N->getLocation(); // We are only interested in visiting CallEnter nodes. Optional<CallEnter> CEnter = ProgLoc.getAs<CallEnter>(); if (!CEnter) return 0; // Check if one of the arguments is the region the visitor is tracking. CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager(); CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State); unsigned Idx = 0; for (CallEvent::param_iterator I = Call->param_begin(), E = Call->param_end(); I != E; ++I, ++Idx) { const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion(); // Are we tracking the argument or its subregion? if ( !ArgReg || (ArgReg != R && !R->isSubRegionOf(ArgReg->StripCasts()))) continue; // Check the function parameter type. const ParmVarDecl *ParamDecl = *I; assert(ParamDecl && "Formal parameter has no decl?"); QualType T = ParamDecl->getType(); if (!(T->isAnyPointerType() || T->isReferenceType())) { // Function can only change the value passed in by address. continue; } // If it is a const pointer value, the function does not intend to // change the value. if (T->getPointeeType().isConstQualified()) continue; // Mark the call site (LocationContext) as interesting if the value of the // argument is undefined or '0'/'NULL'. SVal BoundVal = State->getSVal(R); if (BoundVal.isUndef() || BoundVal.isZeroConstant()) { BR.markInteresting(CEnter->getCalleeContext()); return 0; } } return 0; }
// Check if type is a "Field() *" pointer type, or alternatively a pointer to // any type in "alt" if provided. bool CheckAllocationsInFunctionVisitor::IsFieldPointer( const QualType& qtype, const char* alt) { if (qtype->isPointerType()) { auto name = qtype->getPointeeType() .getDesugaredType(_mainVisitor->getContext()).getAsString(); return StartsWith(name, "class Memory::WriteBarrierPtr<") || StartsWith(name, "typename WriteBarrierFieldTypeTraits<") || (alt && strstr(alt, name.c_str())); } return false; }
void edmChecker::checkASTDecl(const clang::CXXRecordDecl *RD, clang::ento::AnalysisManager& mgr, clang::ento::BugReporter &BR) const { const clang::SourceManager &SM = BR.getSourceManager(); clang::ento::PathDiagnosticLocation DLoc =clang::ento::PathDiagnosticLocation::createBegin( RD, SM ); if ( !m_exception.reportClass( DLoc, BR ) ) return; // Check the class methods (member methods). for (clang::CXXRecordDecl::method_iterator I = RD->method_begin(), E = RD->method_end(); I != E; ++I) { if ( !llvm::isa<clang::CXXMethodDecl>((*I)) ) continue; clang::CXXMethodDecl * MD = llvm::cast<clang::CXXMethodDecl>((*I)); if ( MD->getNameAsString() == "beginRun" || MD->getNameAsString() == "endRun" || MD->getNameAsString() == "beginLuminosityBlock" || MD->getNameAsString() == "endLuminosityBlock" ) { // llvm::errs()<<MD->getQualifiedNameAsString()<<"\n"; for (auto J=RD->bases_begin(), F=RD->bases_end();J != F; ++J) { std::string name = J->getType()->castAs<RecordType>()->getDecl()->getQualifiedNameAsString(); // llvm::errs()<<RD->getQualifiedNameAsString()<<"\n"; // llvm::errs() << "inherits from " <<name<<"\n"; if (name=="edm::EDProducer" || name=="edm::EDFilter") { llvm::SmallString<100> buf; llvm::raw_svector_ostream os(buf); os << RD->getQualifiedNameAsString() << " inherits from edm::EDProducer or edm::EDFilter"; os << "\n"; llvm::errs()<<os.str(); CXXMethodDecl::param_iterator I = MD->param_begin(); ParmVarDecl * PVD = *(I); QualType PQT = PVD->getType(); if ( PQT->isReferenceType() ) { QualType RQT = PQT->getPointeeType(); if (RQT.isConstQualified()) continue; } clang::ento::PathDiagnosticLocation ELoc =clang::ento::PathDiagnosticLocation::createBegin( MD, SM ); clang::SourceLocation SL = MD->getLocStart(); BR.EmitBasicReport(MD, "Class Checker : inherits from edm::EDProducer or edm::EDFilter","optional",os.str(),ELoc,SL); } } } } } //end of class
static RValue PerformReturnAdjustment(CodeGenFunction &CGF, QualType ResultType, RValue RV, const ThunkInfo &Thunk) { // Emit the return adjustment. bool NullCheckValue = !ResultType->isReferenceType(); llvm::BasicBlock *AdjustNull = nullptr; llvm::BasicBlock *AdjustNotNull = nullptr; llvm::BasicBlock *AdjustEnd = nullptr; llvm::Value *ReturnValue = RV.getScalarVal(); if (NullCheckValue) { AdjustNull = CGF.createBasicBlock("adjust.null"); AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); AdjustEnd = CGF.createBasicBlock("adjust.end"); llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); CGF.EmitBlock(AdjustNotNull); } auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl(); auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl); ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, Address(ReturnValue, ClassAlign), Thunk.Return); if (NullCheckValue) { CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustNull); CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustEnd); llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); PHI->addIncoming(ReturnValue, AdjustNotNull); PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), AdjustNull); ReturnValue = PHI; } return RValue::get(ReturnValue); }
static bool isCallback(QualType T) { // If a parameter is a block or a callback, assume it can modify pointer. if (T->isBlockPointerType() || T->isFunctionPointerType() || T->isObjCSelType()) return true; // Check if a callback is passed inside a struct (for both, struct passed by // reference and by value). Dig just one level into the struct for now. if (T->isAnyPointerType() || T->isReferenceType()) T = T->getPointeeType(); if (const RecordType *RT = T->getAsStructureType()) { const RecordDecl *RD = RT->getDecl(); for (const auto *I : RD->fields()) { QualType FieldT = I->getType(); if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType()) return true; } } return false; }
Optional<SVal> GenericTaintChecker::getPointedToSVal(CheckerContext &C, const Expr *Arg) { ProgramStateRef State = C.getState(); SVal AddrVal = C.getSVal(Arg->IgnoreParens()); if (AddrVal.isUnknownOrUndef()) return None; Optional<Loc> AddrLoc = AddrVal.getAs<Loc>(); if (!AddrLoc) return None; QualType ArgTy = Arg->getType().getCanonicalType(); if (!ArgTy->isPointerType()) return None; QualType ValTy = ArgTy->getPointeeType(); // Do not dereference void pointers. Treat them as byte pointers instead. // FIXME: we might want to consider more than just the first byte. if (ValTy->isVoidType()) ValTy = C.getASTContext().CharTy; return State->getSVal(*AddrLoc, ValTy); }
SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType, bool &Failed) { Failed = false; const MemRegion *MR = Base.getAsRegion(); if (!MR) return UnknownVal(); // Assume the derived class is a pointer or a reference to a CXX record. TargetType = TargetType->getPointeeType(); assert(!TargetType.isNull()); const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl(); if (!TargetClass && !TargetType->isVoidType()) return UnknownVal(); // Drill down the CXXBaseObject chains, which represent upcasts (casts from // derived to base). while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) { // If found the derived class, the cast succeeds. if (MRClass == TargetClass) return loc::MemRegionVal(MR); // We skip over incomplete types. They must be the result of an earlier // reinterpret_cast, as one can only dynamic_cast between types in the same // class hierarchy. if (!TargetType->isVoidType() && MRClass->hasDefinition()) { // Static upcasts are marked as DerivedToBase casts by Sema, so this will // only happen when multiple or virtual inheritance is involved. CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, /*DetectVirtual=*/false); if (MRClass->isDerivedFrom(TargetClass, Paths)) return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front()); } if (const auto *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) { // Drill down the chain to get the derived classes. MR = BaseR->getSuperRegion(); continue; } // If this is a cast to void*, return the region. if (TargetType->isVoidType()) return loc::MemRegionVal(MR); // Strange use of reinterpret_cast can give us paths we don't reason // about well, by putting in ElementRegions where we'd expect // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the // derived class has a zero offset from the base class), then it's safe // to strip the cast; if it's invalid, -Wreinterpret-base-class should // catch it. In the interest of performance, the analyzer will silently // do the wrong thing in the invalid case (because offsets for subregions // will be wrong). const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false); if (Uncasted == MR) { // We reached the bottom of the hierarchy and did not find the derived // class. We must be casting the base to derived, so the cast should // fail. break; } MR = Uncasted; } // If we're casting a symbolic base pointer to a derived class, use // CXXDerivedObjectRegion to represent the cast. If it's a pointer to an // unrelated type, it must be a weird reinterpret_cast and we have to // be fine with ElementRegion. TODO: Should we instead make // Derived{TargetClass, Element{SourceClass, SR}}? if (const auto *SR = dyn_cast<SymbolicRegion>(MR)) { QualType T = SR->getSymbol()->getType(); const CXXRecordDecl *SourceClass = T->getPointeeCXXRecordDecl(); if (TargetClass && SourceClass && TargetClass->isDerivedFrom(SourceClass)) return loc::MemRegionVal( MRMgr.getCXXDerivedObjectRegion(TargetClass, SR)); return loc::MemRegionVal(GetElementZeroRegion(SR, TargetType)); } // We failed if the region we ended up with has perfect type info. Failed = isa<TypedValueRegion>(MR); return UnknownVal(); }
/// Checks whether the return types are covariant, according to /// C++[class.virtual]p7. /// /// Similar with clang::Sema::CheckOverridingFunctionReturnType. /// \returns true if the return types of BaseMD and DerivedMD are covariant. static bool checkOverridingFunctionReturnType(const ASTContext *Context, const CXXMethodDecl *BaseMD, const CXXMethodDecl *DerivedMD) { QualType BaseReturnTy = BaseMD->getType() ->getAs<FunctionType>() ->getReturnType() .getCanonicalType(); QualType DerivedReturnTy = DerivedMD->getType() ->getAs<FunctionType>() ->getReturnType() .getCanonicalType(); if (DerivedReturnTy->isDependentType() || BaseReturnTy->isDependentType()) return false; // Check if return types are identical. if (Context->hasSameType(DerivedReturnTy, BaseReturnTy)) return true; /// Check if the return types are covariant. // Both types must be pointers or references to classes. if (!(BaseReturnTy->isPointerType() && DerivedReturnTy->isPointerType()) && !(BaseReturnTy->isReferenceType() && DerivedReturnTy->isReferenceType())) return false; /// BTy is the class type in return type of BaseMD. For example, /// B* Base::md() /// While BRD is the declaration of B. QualType DTy = DerivedReturnTy->getPointeeType().getCanonicalType(); QualType BTy = BaseReturnTy->getPointeeType().getCanonicalType(); const CXXRecordDecl *DRD = DTy->getAsCXXRecordDecl(); const CXXRecordDecl *BRD = BTy->getAsCXXRecordDecl(); if (DRD == nullptr || BRD == nullptr) return false; if (!DRD->hasDefinition() || !BRD->hasDefinition()) return false; if (DRD == BRD) return true; if (!Context->hasSameUnqualifiedType(DTy, BTy)) { // Begin checking whether the conversion from D to B is valid. CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/false); // Check whether D is derived from B, and fill in a CXXBasePaths object. if (!DRD->isDerivedFrom(BRD, Paths)) return false; // Check ambiguity. if (Paths.isAmbiguous(Context->getCanonicalType(BTy).getUnqualifiedType())) return false; // Check accessibility. // FIXME: We currently only support checking if B is accessible base class // of D, or D is the same class which DerivedMD is in. bool IsItself = DRD->getCanonicalDecl() == DerivedMD->getParent()->getCanonicalDecl(); bool HasPublicAccess = false; for (const auto &Path : Paths) { if (Path.Access == AS_public) HasPublicAccess = true; } if (!HasPublicAccess && !IsItself) return false; // End checking conversion from D to B. } // Both pointers or references should have the same cv-qualification. if (DerivedReturnTy.getLocalCVRQualifiers() != BaseReturnTy.getLocalCVRQualifiers()) return false; // The class type D should have the same cv-qualification as or less // cv-qualification than the class type B. if (DTy.isMoreQualifiedThan(BTy)) return false; return true; }
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx, bool IsObjCLiteral) { // %n is different from other conversion specifiers; don't try to fix it. if (CS.getKind() == ConversionSpecifier::nArg) return false; // Handle Objective-C objects first. Note that while the '%@' specifier will // not warn for structure pointer or void pointer arguments (because that's // how CoreFoundation objects are implemented), we only show a fixit for '%@' // if we know it's an object (block, id, class, or __attribute__((NSObject))). if (QT->isObjCRetainableType()) { if (!IsObjCLiteral) return false; CS.setKind(ConversionSpecifier::ObjCObjArg); // Disable irrelevant flags HasThousandsGrouping = false; HasPlusPrefix = false; HasSpacePrefix = false; HasAlternativeForm = false; HasLeadingZeroes = false; Precision.setHowSpecified(OptionalAmount::NotSpecified); LM.setKind(LengthModifier::None); return true; } // Handle strings next (char *, wchar_t *) if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) { CS.setKind(ConversionSpecifier::sArg); // Disable irrelevant flags HasAlternativeForm = 0; HasLeadingZeroes = 0; // Set the long length modifier for wide characters if (QT->getPointeeType()->isWideCharType()) LM.setKind(LengthModifier::AsWideChar); else LM.setKind(LengthModifier::None); return true; } // If it's an enum, get its underlying type. if (const EnumType *ETy = QT->getAs<EnumType>()) QT = ETy->getDecl()->getIntegerType(); // We can only work with builtin types. const BuiltinType *BT = QT->getAs<BuiltinType>(); if (!BT) return false; // Set length modifier switch (BT->getKind()) { case BuiltinType::Bool: case BuiltinType::WChar_U: case BuiltinType::WChar_S: case BuiltinType::Char16: case BuiltinType::Char32: case BuiltinType::UInt128: case BuiltinType::Int128: case BuiltinType::Half: case BuiltinType::Float128: // Various types which are non-trivial to correct. return false; #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ case BuiltinType::Id: #include "clang/Basic/OpenCLImageTypes.def" #define SIGNED_TYPE(Id, SingletonId) #define UNSIGNED_TYPE(Id, SingletonId) #define FLOATING_TYPE(Id, SingletonId) #define BUILTIN_TYPE(Id, SingletonId) \ case BuiltinType::Id: #include "clang/AST/BuiltinTypes.def" // Misc other stuff which doesn't make sense here. return false; case BuiltinType::UInt: case BuiltinType::Int: case BuiltinType::Float: case BuiltinType::Double: LM.setKind(LengthModifier::None); break; case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::Char_S: case BuiltinType::SChar: LM.setKind(LengthModifier::AsChar); break; case BuiltinType::Short: case BuiltinType::UShort: LM.setKind(LengthModifier::AsShort); break; case BuiltinType::Long: case BuiltinType::ULong: LM.setKind(LengthModifier::AsLong); break; case BuiltinType::LongLong: case BuiltinType::ULongLong: LM.setKind(LengthModifier::AsLongLong); break; case BuiltinType::LongDouble: LM.setKind(LengthModifier::AsLongDouble); break; } // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus11)) namedTypeToLengthModifier(QT, LM); // If fixing the length modifier was enough, we might be done. if (hasValidLengthModifier(Ctx.getTargetInfo())) { // If we're going to offer a fix anyway, make sure the sign matches. switch (CS.getKind()) { case ConversionSpecifier::uArg: case ConversionSpecifier::UArg: if (QT->isSignedIntegerType()) CS.setKind(clang::analyze_format_string::ConversionSpecifier::dArg); break; case ConversionSpecifier::dArg: case ConversionSpecifier::DArg: case ConversionSpecifier::iArg: if (QT->isUnsignedIntegerType() && !HasPlusPrefix) CS.setKind(clang::analyze_format_string::ConversionSpecifier::uArg); break; default: // Other specifiers do not have signed/unsigned variants. break; } const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral); if (ATR.isValid() && ATR.matchesType(Ctx, QT)) return true; } // Set conversion specifier and disable any flags which do not apply to it. // Let typedefs to char fall through to int, as %c is silly for uint8_t. if (!isa<TypedefType>(QT) && QT->isCharType()) { CS.setKind(ConversionSpecifier::cArg); LM.setKind(LengthModifier::None); Precision.setHowSpecified(OptionalAmount::NotSpecified); HasAlternativeForm = 0; HasLeadingZeroes = 0; HasPlusPrefix = 0; } // Test for Floating type first as LongDouble can pass isUnsignedIntegerType else if (QT->isRealFloatingType()) { CS.setKind(ConversionSpecifier::fArg); } else if (QT->isSignedIntegerType()) { CS.setKind(ConversionSpecifier::dArg); HasAlternativeForm = 0; } else if (QT->isUnsignedIntegerType()) { CS.setKind(ConversionSpecifier::uArg); HasAlternativeForm = 0; HasPlusPrefix = 0; } else { llvm_unreachable("Unexpected type"); } return true; }
bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt) { if (!QT->isPointerType()) return false; QualType PT = QT->getPointeeType(); const BuiltinType *BT = PT->getAs<BuiltinType>(); if (!BT) return false; // Pointer to a character. if (PT->isAnyCharacterType()) { CS.setKind(ConversionSpecifier::sArg); if (PT->isWideCharType()) LM.setKind(LengthModifier::AsWideChar); else LM.setKind(LengthModifier::None); return true; } // Figure out the length modifier. switch (BT->getKind()) { // no modifier case BuiltinType::UInt: case BuiltinType::Int: case BuiltinType::Float: LM.setKind(LengthModifier::None); break; // hh case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::Char_S: case BuiltinType::SChar: LM.setKind(LengthModifier::AsChar); break; // h case BuiltinType::Short: case BuiltinType::UShort: LM.setKind(LengthModifier::AsShort); break; // l case BuiltinType::Long: case BuiltinType::ULong: case BuiltinType::Double: LM.setKind(LengthModifier::AsLong); break; // ll case BuiltinType::LongLong: case BuiltinType::ULongLong: LM.setKind(LengthModifier::AsLongLong); break; // L case BuiltinType::LongDouble: LM.setKind(LengthModifier::AsLongDouble); break; // Don't know. default: return false; } // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) { const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier(); if (Identifier->getName() == "size_t") { LM.setKind(LengthModifier::AsSizeT); } else if (Identifier->getName() == "ssize_t") { // Not C99, but common in Unix. LM.setKind(LengthModifier::AsSizeT); } else if (Identifier->getName() == "intmax_t") { LM.setKind(LengthModifier::AsIntMax); } else if (Identifier->getName() == "uintmax_t") { LM.setKind(LengthModifier::AsIntMax); } else if (Identifier->getName() == "ptrdiff_t") { LM.setKind(LengthModifier::AsPtrDiff); } } // Figure out the conversion specifier. if (PT->isRealFloatingType()) CS.setKind(ConversionSpecifier::fArg); else if (PT->isSignedIntegerType()) CS.setKind(ConversionSpecifier::dArg); else if (PT->isUnsignedIntegerType()) { // Preserve the original formatting, e.g. 'X', 'o'. if (!CS.isUIntArg()) { CS.setKind(ConversionSpecifier::uArg); } } else llvm_unreachable("Unexpected type"); return true; }
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); QualType MemTy = AtomicTy; if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) MemTy = AT->getValueType(); CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); uint64_t Size = sizeChars.getQuantity(); CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); unsigned Align = alignChars.getQuantity(); unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); bool UseLibcall = (Size != Align || getContext().toBits(sizeChars) > MaxInlineWidthInBits); llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr, *Val2 = nullptr; llvm::Value *Ptr = EmitScalarExpr(E->getPtr()); if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { assert(!Dest && "Init does not return a value"); LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext()); EmitAtomicInit(E->getVal1(), lvalue); return RValue::get(nullptr); } llvm::Value *Order = EmitScalarExpr(E->getOrder()); switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: llvm_unreachable("Already handled!"); case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: break; case AtomicExpr::AO__atomic_load: Dest = EmitScalarExpr(E->getVal1()); break; case AtomicExpr::AO__atomic_store: Val1 = EmitScalarExpr(E->getVal1()); break; case AtomicExpr::AO__atomic_exchange: Val1 = EmitScalarExpr(E->getVal1()); Dest = EmitScalarExpr(E->getVal2()); break; case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__atomic_compare_exchange_n: case AtomicExpr::AO__atomic_compare_exchange: Val1 = EmitScalarExpr(E->getVal1()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) Val2 = EmitScalarExpr(E->getVal2()); else Val2 = EmitValToTemp(*this, E->getVal2()); OrderFail = EmitScalarExpr(E->getOrderFail()); if (E->getNumSubExprs() == 6) IsWeak = EmitScalarExpr(E->getWeak()); break; case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__c11_atomic_fetch_sub: if (MemTy->isPointerType()) { // For pointer arithmetic, we're required to do a bit of math: // adding 1 to an int* is not the same as adding 1 to a uintptr_t. // ... but only for the C11 builtins. The GNU builtins expect the // user to multiply by sizeof(T). QualType Val1Ty = E->getVal1()->getType(); llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); CharUnits PointeeIncAmt = getContext().getTypeSizeInChars(MemTy->getPointeeType()); Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); break; } // Fall through. case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_store_n: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__atomic_nand_fetch: Val1 = EmitValToTemp(*this, E->getVal1()); break; } QualType RValTy = E->getType().getUnqualifiedType(); auto GetDest = [&] { if (!RValTy->isVoidType() && !Dest) { Dest = CreateMemTemp(RValTy, ".atomicdst"); } return Dest; }; // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . if (UseLibcall) { bool UseOptimizedLibcall = false; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: // For these, only library calls for certain sizes exist. UseOptimizedLibcall = true; break; default: // Only use optimized library calls for sizes for which they exist. if (Size == 1 || Size == 2 || Size == 4 || Size == 8) UseOptimizedLibcall = true; break; } CallArgList Args; if (!UseOptimizedLibcall) { // For non-optimized library calls, the size is the first parameter Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), getContext().getSizeType()); } // Atomic address is the first or second parameter Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy); std::string LibCallName; QualType LoweredMemTy = MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy; QualType RetTy; bool HaveRetTy = false; switch (E->getOp()) { // There is only one libcall for compare an exchange, because there is no // optimisation benefit possible from a libcall version of a weak compare // and exchange. // bool __atomic_compare_exchange(size_t size, void *mem, void *expected, // void *desired, int success, int failure) // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired, // int success, int failure) case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: LibCallName = "__atomic_compare_exchange"; RetTy = getContext().BoolTy; HaveRetTy = true; Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy); AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy, E->getExprLoc(), sizeChars); Args.add(RValue::get(Order), getContext().IntTy); Order = OrderFail; break; // void __atomic_exchange(size_t size, void *mem, void *val, void *return, // int order) // T __atomic_exchange_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: LibCallName = "__atomic_exchange"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // void __atomic_store(size_t size, void *mem, void *val, int order) // void __atomic_store_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: LibCallName = "__atomic_store"; RetTy = getContext().VoidTy; HaveRetTy = true; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // void __atomic_load(size_t size, void *mem, void *return, int order) // T __atomic_load_N(T *mem, int order) case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__atomic_load_n: LibCallName = "__atomic_load"; break; // T __atomic_fetch_add_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: LibCallName = "__atomic_fetch_add"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_and_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: LibCallName = "__atomic_fetch_and"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_or_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: LibCallName = "__atomic_fetch_or"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_sub_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: LibCallName = "__atomic_fetch_sub"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_xor_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: LibCallName = "__atomic_fetch_xor"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; default: return EmitUnsupportedRValue(E, "atomic library call"); } // Optimized functions have the size in their name. if (UseOptimizedLibcall) LibCallName += "_" + llvm::utostr(Size); // By default, assume we return a value of the atomic type. if (!HaveRetTy) { if (UseOptimizedLibcall) { // Value is returned directly. // The function returns an appropriately sized integer type. RetTy = getContext().getIntTypeForBitwidth( getContext().toBits(sizeChars), /*Signed=*/false); } else { // Value is returned through parameter before the order. RetTy = getContext().VoidTy; Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy); } } // order is always the last parameter Args.add(RValue::get(Order), getContext().IntTy); RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args); // The value is returned directly from the libcall. if (HaveRetTy && !RetTy->isVoidType()) return Res; // The value is returned via an explicit out param. if (RetTy->isVoidType()) return RValue::get(nullptr); // The value is returned directly for optimized libcalls but the caller is // expected an out-param. if (UseOptimizedLibcall) { llvm::Value *ResVal = Res.getScalarVal(); llvm::StoreInst *StoreDest = Builder.CreateStore( ResVal, Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo())); StoreDest->setAlignment(Align); } return convertTempToRValue(Dest, RValTy, E->getExprLoc()); } bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || E->getOp() == AtomicExpr::AO__atomic_store || E->getOp() == AtomicExpr::AO__atomic_store_n; bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || E->getOp() == AtomicExpr::AO__atomic_load || E->getOp() == AtomicExpr::AO__atomic_load_n; llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), Size * 8); llvm::Value *OrigDest = GetDest(); Ptr = Builder.CreateBitCast( Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace())); if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo()); if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo()); if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo()); if (isa<llvm::ConstantInt>(Order)) { int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); switch (ord) { case AtomicExpr::AO_ABI_memory_order_relaxed: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Monotonic); break; case AtomicExpr::AO_ABI_memory_order_consume: case AtomicExpr::AO_ABI_memory_order_acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Acquire); break; case AtomicExpr::AO_ABI_memory_order_release: if (IsLoad) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Release); break; case AtomicExpr::AO_ABI_memory_order_acq_rel: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::AcquireRelease); break; case AtomicExpr::AO_ABI_memory_order_seq_cst: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::SequentiallyConsistent); break; default: // invalid order // We should not ever get here normally, but it's hard to // enforce that in general. break; } if (RValTy->isVoidType()) return RValue::get(nullptr); return convertTempToRValue(OrigDest, RValTy, E->getExprLoc()); } // Long case, when Order isn't obviously constant. // Create all the relevant BB's llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, *ReleaseBB = nullptr, *AcqRelBB = nullptr, *SeqCstBB = nullptr; MonotonicBB = createBasicBlock("monotonic", CurFn); if (!IsStore) AcquireBB = createBasicBlock("acquire", CurFn); if (!IsLoad) ReleaseBB = createBasicBlock("release", CurFn); if (!IsLoad && !IsStore) AcqRelBB = createBasicBlock("acqrel", CurFn); SeqCstBB = createBasicBlock("seqcst", CurFn); llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); // Create the switch for the split // MonotonicBB is arbitrarily chosen as the default case; in practice, this // doesn't matter unless someone is crazy enough to use something that // doesn't fold to a constant for the ordering. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); // Emit all the different atomics Builder.SetInsertPoint(MonotonicBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Monotonic); Builder.CreateBr(ContBB); if (!IsStore) { Builder.SetInsertPoint(AcquireBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Acquire); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), AcquireBB); } if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Release); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release), ReleaseBB); } if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::AcquireRelease); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel), AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::SequentiallyConsistent); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); // Cleanup and return Builder.SetInsertPoint(ContBB); if (RValTy->isVoidType()) return RValue::get(nullptr); return convertTempToRValue(OrigDest, RValTy, E->getExprLoc()); }
void TypePrinter::printAttributed(const AttributedType *T, std::string &S) { // Prefer the macro forms of the GC and ownership qualifiers. if (T->getAttrKind() == AttributedType::attr_objc_gc || T->getAttrKind() == AttributedType::attr_objc_ownership) return print(T->getEquivalentType(), S); print(T->getModifiedType(), S); // TODO: not all attributes are GCC-style attributes. S += " __attribute__(("; switch (T->getAttrKind()) { case AttributedType::attr_address_space: S += "address_space("; S += T->getEquivalentType().getAddressSpace(); S += ")"; break; case AttributedType::attr_vector_size: { S += "__vector_size__("; if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) { S += vector->getNumElements(); S += " * sizeof("; std::string tmp; print(vector->getElementType(), tmp); S += tmp; S += ")"; } S += ")"; break; } case AttributedType::attr_neon_vector_type: case AttributedType::attr_neon_polyvector_type: { if (T->getAttrKind() == AttributedType::attr_neon_vector_type) S += "neon_vector_type("; else S += "neon_polyvector_type("; const VectorType *vector = T->getEquivalentType()->getAs<VectorType>(); S += llvm::utostr_32(vector->getNumElements()); S += ")"; break; } case AttributedType::attr_regparm: { S += "regparm("; QualType t = T->getEquivalentType(); while (!t->isFunctionType()) t = t->getPointeeType(); S += t->getAs<FunctionType>()->getRegParmType(); S += ")"; break; } case AttributedType::attr_objc_gc: { S += "objc_gc("; QualType tmp = T->getEquivalentType(); while (tmp.getObjCGCAttr() == Qualifiers::GCNone) { QualType next = tmp->getPointeeType(); if (next == tmp) break; tmp = next; } if (tmp.isObjCGCWeak()) S += "weak"; else S += "strong"; S += ")"; break; } case AttributedType::attr_objc_ownership: S += "objc_ownership("; switch (T->getEquivalentType().getObjCLifetime()) { case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); break; case Qualifiers::OCL_ExplicitNone: S += "none"; break; case Qualifiers::OCL_Strong: S += "strong"; break; case Qualifiers::OCL_Weak: S += "weak"; break; case Qualifiers::OCL_Autoreleasing: S += "autoreleasing"; break; } S += ")"; break; case AttributedType::attr_noreturn: S += "noreturn"; break; case AttributedType::attr_cdecl: S += "cdecl"; break; case AttributedType::attr_fastcall: S += "fastcall"; break; case AttributedType::attr_stdcall: S += "stdcall"; break; case AttributedType::attr_thiscall: S += "thiscall"; break; case AttributedType::attr_pascal: S += "pascal"; break; case AttributedType::attr_pcs: { S += "pcs("; QualType t = T->getEquivalentType(); while (!t->isFunctionType()) t = t->getPointeeType(); S += (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ? "\"aapcs\"" : "\"aapcs-vfp\""); S += ")"; break; } } S += "))"; }
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt) { // Handle strings first (char *, wchar_t *) if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) { CS.setKind(ConversionSpecifier::sArg); // Disable irrelevant flags HasAlternativeForm = 0; HasLeadingZeroes = 0; // Set the long length modifier for wide characters if (QT->getPointeeType()->isWideCharType()) LM.setKind(LengthModifier::AsWideChar); else LM.setKind(LengthModifier::None); return true; } // We can only work with builtin types. const BuiltinType *BT = QT->getAs<BuiltinType>(); if (!BT) return false; // Set length modifier switch (BT->getKind()) { case BuiltinType::Bool: case BuiltinType::WChar_U: case BuiltinType::WChar_S: case BuiltinType::Char16: case BuiltinType::Char32: case BuiltinType::UInt128: case BuiltinType::Int128: case BuiltinType::Half: // Various types which are non-trivial to correct. return false; #define SIGNED_TYPE(Id, SingletonId) #define UNSIGNED_TYPE(Id, SingletonId) #define FLOATING_TYPE(Id, SingletonId) #define BUILTIN_TYPE(Id, SingletonId) \ case BuiltinType::Id: #include "clang/AST/BuiltinTypes.def" // Misc other stuff which doesn't make sense here. return false; case BuiltinType::UInt: case BuiltinType::Int: case BuiltinType::Float: case BuiltinType::Double: LM.setKind(LengthModifier::None); break; case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::Char_S: case BuiltinType::SChar: LM.setKind(LengthModifier::AsChar); break; case BuiltinType::Short: case BuiltinType::UShort: LM.setKind(LengthModifier::AsShort); break; case BuiltinType::Long: case BuiltinType::ULong: LM.setKind(LengthModifier::AsLong); break; case BuiltinType::LongLong: case BuiltinType::ULongLong: LM.setKind(LengthModifier::AsLongLong); break; case BuiltinType::LongDouble: LM.setKind(LengthModifier::AsLongDouble); break; } // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) { const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier(); if (Identifier->getName() == "size_t") { LM.setKind(LengthModifier::AsSizeT); } else if (Identifier->getName() == "ssize_t") { // Not C99, but common in Unix. LM.setKind(LengthModifier::AsSizeT); } else if (Identifier->getName() == "intmax_t") { LM.setKind(LengthModifier::AsIntMax); } else if (Identifier->getName() == "uintmax_t") { LM.setKind(LengthModifier::AsIntMax); } else if (Identifier->getName() == "ptrdiff_t") { LM.setKind(LengthModifier::AsPtrDiff); } } // Set conversion specifier and disable any flags which do not apply to it. // Let typedefs to char fall through to int, as %c is silly for uint8_t. if (isa<TypedefType>(QT) && QT->isAnyCharacterType()) { CS.setKind(ConversionSpecifier::cArg); LM.setKind(LengthModifier::None); Precision.setHowSpecified(OptionalAmount::NotSpecified); HasAlternativeForm = 0; HasLeadingZeroes = 0; HasPlusPrefix = 0; } // Test for Floating type first as LongDouble can pass isUnsignedIntegerType else if (QT->isRealFloatingType()) { CS.setKind(ConversionSpecifier::fArg); } else if (QT->isSignedIntegerType()) { CS.setKind(ConversionSpecifier::dArg); HasAlternativeForm = 0; } else if (QT->isUnsignedIntegerType()) { // Preserve the original formatting, e.g. 'X', 'o'. if (!cast<PrintfConversionSpecifier>(CS).isUIntArg()) CS.setKind(ConversionSpecifier::uArg); HasAlternativeForm = 0; HasPlusPrefix = 0; } else { llvm_unreachable("Unexpected type"); } return true; }
static bool isPointerToConst(const QualType &QT) { return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified(); }
/// \brief Return the fully qualified type, including fully-qualified /// versions of any template parameters. QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx) { // In case of myType* we need to strip the pointer first, fully // qualify and attach the pointer once again. if (isa<PointerType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers Quals = QT.getQualifiers(); QT = getFullyQualifiedType(QT->getPointeeType(), Ctx); QT = Ctx.getPointerType(QT); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, Quals); return QT; } // In case of myType& we need to strip the reference first, fully // qualify and attach the reference once again. if (isa<ReferenceType>(QT.getTypePtr())) { // Get the qualifiers. bool IsLValueRefTy = isa<LValueReferenceType>(QT.getTypePtr()); Qualifiers Quals = QT.getQualifiers(); QT = getFullyQualifiedType(QT->getPointeeType(), Ctx); // Add the r- or l-value reference type back to the fully // qualified one. if (IsLValueRefTy) QT = Ctx.getLValueReferenceType(QT); else QT = Ctx.getRValueReferenceType(QT); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, Quals); return QT; } // Remove the part of the type related to the type being a template // parameter (we won't report it as part of the 'type name' and it // is actually make the code below to be more complex (to handle // those) while (isa<SubstTemplateTypeParmType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers Quals = QT.getQualifiers(); QT = dyn_cast<SubstTemplateTypeParmType>(QT.getTypePtr())->desugar(); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, Quals); } NestedNameSpecifier *Prefix = nullptr; Qualifiers PrefixQualifiers; ElaboratedTypeKeyword Keyword = ETK_None; if (const auto *ETypeInput = dyn_cast<ElaboratedType>(QT.getTypePtr())) { QT = ETypeInput->getNamedType(); Keyword = ETypeInput->getKeyword(); } // Create a nested name specifier if needed (i.e. if the decl context // is not the global scope. Prefix = createNestedNameSpecifierForScopeOf(Ctx, QT.getTypePtr(), true /*FullyQualified*/); // move the qualifiers on the outer type (avoid 'std::const string'!) if (Prefix) { PrefixQualifiers = QT.getLocalQualifiers(); QT = QualType(QT.getTypePtr(), 0); } // In case of template specializations iterate over the arguments and // fully qualify them as well. if (isa<const TemplateSpecializationType>(QT.getTypePtr()) || isa<const RecordType>(QT.getTypePtr())) { // We are asked to fully qualify and we have a Record Type (which // may pont to a template specialization) or Template // Specialization Type. We need to fully qualify their arguments. Qualifiers Quals = QT.getLocalQualifiers(); const Type *TypePtr = getFullyQualifiedTemplateType(Ctx, QT.getTypePtr()); QT = Ctx.getQualifiedType(TypePtr, Quals); } if (Prefix || Keyword != ETK_None) { QT = Ctx.getElaboratedType(Keyword, Prefix, QT); QT = Ctx.getQualifiedType(QT, PrefixQualifiers); } return QT; }
SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op, Loc lhs, NonLoc rhs, QualType resultTy) { // Special case: rhs is a zero constant. if (rhs.isZeroConstant()) return lhs; // Special case: 'rhs' is an integer that has the same width as a pointer and // we are using the integer location in a comparison. Normally this cannot be // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32 // can generate comparisons that trigger this code. // FIXME: Are all locations guaranteed to have pointer width? if (BinaryOperator::isComparisonOp(op)) { if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) { const llvm::APSInt *x = &rhsInt->getValue(); ASTContext &ctx = Context; if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) { // Convert the signedness of the integer (if necessary). if (x->isSigned()) x = &getBasicValueFactory().getValue(*x, true); return evalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy); } } return UnknownVal(); } // We are dealing with pointer arithmetic. // Handle pointer arithmetic on constant values. if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) { if (loc::ConcreteInt *lhsInt = dyn_cast<loc::ConcreteInt>(&lhs)) { const llvm::APSInt &leftI = lhsInt->getValue(); assert(leftI.isUnsigned()); llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true); // Convert the bitwidth of rightI. This should deal with overflow // since we are dealing with concrete values. rightI = rightI.extOrTrunc(leftI.getBitWidth()); // Offset the increment by the pointer size. llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true); rightI *= Multiplicand; // Compute the adjusted pointer. switch (op) { case BO_Add: rightI = leftI + rightI; break; case BO_Sub: rightI = leftI - rightI; break; default: llvm_unreachable("Invalid pointer arithmetic operation"); } return loc::ConcreteInt(getBasicValueFactory().getValue(rightI)); } } // Handle cases where 'lhs' is a region. if (const MemRegion *region = lhs.getAsRegion()) { rhs = cast<NonLoc>(convertToArrayIndex(rhs)); SVal index = UnknownVal(); const MemRegion *superR = 0; QualType elementType; if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) { assert(op == BO_Add || op == BO_Sub); index = evalBinOpNN(state, op, elemReg->getIndex(), rhs, getArrayIndexType()); superR = elemReg->getSuperRegion(); elementType = elemReg->getElementType(); } else if (isa<SubRegion>(region)) { superR = region; index = rhs; if (resultTy->isAnyPointerType()) elementType = resultTy->getPointeeType(); } if (NonLoc *indexV = dyn_cast<NonLoc>(&index)) { return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV, superR, getContext())); } } return UnknownVal(); }
bool PrintfSpecifier::fixType(QualType QT) { // Handle strings first (char *, wchar_t *) if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) { CS.setKind(ConversionSpecifier::sArg); // Disable irrelevant flags HasAlternativeForm = 0; HasLeadingZeroes = 0; // Set the long length modifier for wide characters if (QT->getPointeeType()->isWideCharType()) LM.setKind(LengthModifier::AsWideChar); return true; } // We can only work with builtin types. if (!QT->isBuiltinType()) return false; // Everything else should be a base type const BuiltinType *BT = QT->getAs<BuiltinType>(); // Set length modifier switch (BT->getKind()) { default: // The rest of the conversions are either optional or for non-builtin types LM.setKind(LengthModifier::None); break; case BuiltinType::WChar: case BuiltinType::Long: case BuiltinType::ULong: LM.setKind(LengthModifier::AsLong); break; case BuiltinType::LongLong: case BuiltinType::ULongLong: LM.setKind(LengthModifier::AsLongLong); break; case BuiltinType::LongDouble: LM.setKind(LengthModifier::AsLongDouble); break; } // Set conversion specifier and disable any flags which do not apply to it. if (QT->isAnyCharacterType()) { CS.setKind(ConversionSpecifier::cArg); Precision.setHowSpecified(OptionalAmount::NotSpecified); HasAlternativeForm = 0; HasLeadingZeroes = 0; HasPlusPrefix = 0; } // Test for Floating type first as LongDouble can pass isUnsignedIntegerType else if (QT->isRealFloatingType()) { CS.setKind(ConversionSpecifier::fArg); } else if (QT->isPointerType()) { CS.setKind(ConversionSpecifier::pArg); Precision.setHowSpecified(OptionalAmount::NotSpecified); HasAlternativeForm = 0; HasLeadingZeroes = 0; HasPlusPrefix = 0; } else if (QT->isSignedIntegerType()) { CS.setKind(ConversionSpecifier::dArg); HasAlternativeForm = 0; } else if (QT->isUnsignedIntegerType()) { CS.setKind(ConversionSpecifier::uArg); HasAlternativeForm = 0; HasPlusPrefix = 0; } else { return false; } return true; }
SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType, bool &Failed) { Failed = false; const MemRegion *MR = Base.getAsRegion(); if (!MR) return UnknownVal(); // Assume the derived class is a pointer or a reference to a CXX record. TargetType = TargetType->getPointeeType(); assert(!TargetType.isNull()); const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl(); if (!TargetClass && !TargetType->isVoidType()) return UnknownVal(); // Drill down the CXXBaseObject chains, which represent upcasts (casts from // derived to base). while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) { // If found the derived class, the cast succeeds. if (MRClass == TargetClass) return loc::MemRegionVal(MR); if (!TargetType->isVoidType()) { // Static upcasts are marked as DerivedToBase casts by Sema, so this will // only happen when multiple or virtual inheritance is involved. CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, /*DetectVirtual=*/false); if (MRClass->isDerivedFrom(TargetClass, Paths)) return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front()); } if (const CXXBaseObjectRegion *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) { // Drill down the chain to get the derived classes. MR = BaseR->getSuperRegion(); continue; } // If this is a cast to void*, return the region. if (TargetType->isVoidType()) return loc::MemRegionVal(MR); // Strange use of reinterpret_cast can give us paths we don't reason // about well, by putting in ElementRegions where we'd expect // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the // derived class has a zero offset from the base class), then it's safe // to strip the cast; if it's invalid, -Wreinterpret-base-class should // catch it. In the interest of performance, the analyzer will silently // do the wrong thing in the invalid case (because offsets for subregions // will be wrong). const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false); if (Uncasted == MR) { // We reached the bottom of the hierarchy and did not find the derived // class. We we must be casting the base to derived, so the cast should // fail. break; } MR = Uncasted; } // We failed if the region we ended up with has perfect type info. Failed = isa<TypedValueRegion>(MR); return UnknownVal(); }
llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE) { QualType CastTy = DCE->getTypeAsWritten(); QualType InnerType = CastTy->getPointeeType(); QualType ArgTy = DCE->getSubExpr()->getType(); const llvm::Type *LArgTy = ConvertType(ArgTy); const llvm::Type *LTy = ConvertType(DCE->getType()); bool CanBeZero = false; bool ToVoid = false; bool ThrowOnBad = false; if (CastTy->isPointerType()) { // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this CanBeZero = true; if (InnerType->isVoidType()) ToVoid = true; } else { LTy = LTy->getPointerTo(); ThrowOnBad = true; } CXXRecordDecl *SrcTy; QualType Ty = ArgTy; if (ArgTy.getTypePtr()->isPointerType() || ArgTy.getTypePtr()->isReferenceType()) Ty = Ty.getTypePtr()->getPointeeType(); CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); Ty = CanTy.getUnqualifiedType(); SrcTy = cast<CXXRecordDecl>(Ty->getAs<RecordType>()->getDecl()); llvm::BasicBlock *ContBlock = createBasicBlock(); llvm::BasicBlock *NullBlock = 0; llvm::BasicBlock *NonZeroBlock = 0; if (CanBeZero) { NonZeroBlock = createBasicBlock(); NullBlock = createBasicBlock(); llvm::Value *Zero = llvm::Constant::getNullValue(LArgTy); Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero), NonZeroBlock, NullBlock); EmitBlock(NonZeroBlock); } llvm::BasicBlock *BadCastBlock = 0; const llvm::Type *PtrDiffTy = ConvertType(getContext().getSizeType()); // See if this is a dynamic_cast(void*) if (ToVoid) { llvm::Value *This = V; V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo()); V = Builder.CreateLoad(V, "vtable"); V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL); V = Builder.CreateLoad(V, "offset to top"); This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext)); V = Builder.CreateInBoundsGEP(This, V); V = Builder.CreateBitCast(V, LTy); } else { /// Call __dynamic_cast const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext); const llvm::FunctionType *FTy; std::vector<const llvm::Type*> ArgTys; const llvm::Type *PtrToInt8Ty = llvm::Type::getInt8Ty(VMContext)->getPointerTo(); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrDiffTy); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CXXRecordDecl *DstTy; Ty = CastTy.getTypePtr()->getPointeeType(); CanTy = CGM.getContext().getCanonicalType(Ty); Ty = CanTy.getUnqualifiedType(); DstTy = cast<CXXRecordDecl>(Ty->getAs<RecordType>()->getDecl()); // FIXME: Calculate better hint. llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL); llvm::Value *SrcArg = CGM.GenerateRttiRef(SrcTy); llvm::Value *DstArg = CGM.GenerateRttiRef(DstTy); V = Builder.CreateBitCast(V, PtrToInt8Ty); V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"), V, SrcArg, DstArg, hint); V = Builder.CreateBitCast(V, LTy); if (ThrowOnBad) { BadCastBlock = createBasicBlock(); llvm::Value *Zero = llvm::Constant::getNullValue(LTy); Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero), ContBlock, BadCastBlock); EmitBlock(BadCastBlock); /// Call __cxa_bad_cast ResultType = llvm::Type::getVoidTy(VMContext); const llvm::FunctionType *FBadTy; FBadTy = llvm::FunctionType::get(ResultType, false); llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast"); Builder.CreateCall(F)->setDoesNotReturn(); Builder.CreateUnreachable(); } } if (CanBeZero) { Builder.CreateBr(ContBlock); EmitBlock(NullBlock); Builder.CreateBr(ContBlock); } EmitBlock(ContBlock); if (CanBeZero) { llvm::PHINode *PHI = Builder.CreatePHI(LTy); PHI->reserveOperandSpace(2); PHI->addIncoming(V, NonZeroBlock); PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock); V = PHI; } return V; }
/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a /// function that should return a value. Check that we don't fall off the end /// of a noreturn function. We assume that functions and blocks not marked /// noreturn will return. static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, QualType BlockTy, const CheckFallThroughDiagnostics& CD, AnalysisContext &AC) { bool ReturnsVoid = false; bool HasNoReturn = false; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { ReturnsVoid = FD->getResultType()->isVoidType(); HasNoReturn = FD->hasAttr<NoReturnAttr>() || FD->getType()->getAs<FunctionType>()->getNoReturnAttr(); } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { ReturnsVoid = MD->getResultType()->isVoidType(); HasNoReturn = MD->hasAttr<NoReturnAttr>(); } else if (isa<BlockDecl>(D)) { if (const FunctionType *FT = BlockTy->getPointeeType()->getAs<FunctionType>()) { if (FT->getResultType()->isVoidType()) ReturnsVoid = true; if (FT->getNoReturnAttr()) HasNoReturn = true; } } Diagnostic &Diags = S.getDiagnostics(); // Short circuit for compilation speed. if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) return; // FIXME: Function try block if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) { switch (CheckFallThrough(AC)) { case UnknownFallThrough: break; case MaybeFallThrough: if (HasNoReturn) S.Diag(Compound->getRBracLoc(), CD.diag_MaybeFallThrough_HasNoReturn); else if (!ReturnsVoid) S.Diag(Compound->getRBracLoc(), CD.diag_MaybeFallThrough_ReturnsNonVoid); break; case AlwaysFallThrough: if (HasNoReturn) S.Diag(Compound->getRBracLoc(), CD.diag_AlwaysFallThrough_HasNoReturn); else if (!ReturnsVoid) S.Diag(Compound->getRBracLoc(), CD.diag_AlwaysFallThrough_ReturnsNonVoid); break; case NeverFallThroughOrReturn: if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn); break; case NeverFallThrough: break; } } }
/// Create a fake body for std::call_once. /// Emulates the following function body: /// /// \code /// typedef struct once_flag_s { /// unsigned long __state = 0; /// } once_flag; /// template<class Callable> /// void call_once(once_flag& o, Callable func) { /// if (!o.__state) { /// func(); /// } /// o.__state = 1; /// } /// \endcode static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) { DEBUG(llvm::dbgs() << "Generating body for call_once\n"); // We need at least two parameters. if (D->param_size() < 2) return nullptr; ASTMaker M(C); const ParmVarDecl *Flag = D->getParamDecl(0); const ParmVarDecl *Callback = D->getParamDecl(1); if (!Callback->getType()->isReferenceType()) { llvm::dbgs() << "libcxx03 std::call_once implementation, skipping.\n"; return nullptr; } if (!Flag->getType()->isReferenceType()) { llvm::dbgs() << "unknown std::call_once implementation, skipping.\n"; return nullptr; } QualType CallbackType = Callback->getType().getNonReferenceType(); // Nullable pointer, non-null iff function is a CXXRecordDecl. CXXRecordDecl *CallbackRecordDecl = CallbackType->getAsCXXRecordDecl(); QualType FlagType = Flag->getType().getNonReferenceType(); auto *FlagRecordDecl = dyn_cast_or_null<RecordDecl>(FlagType->getAsTagDecl()); if (!FlagRecordDecl) { DEBUG(llvm::dbgs() << "Flag field is not a record: " << "unknown std::call_once implementation, " << "ignoring the call.\n"); return nullptr; } // We initially assume libc++ implementation of call_once, // where the once_flag struct has a field `__state_`. ValueDecl *FlagFieldDecl = M.findMemberField(FlagRecordDecl, "__state_"); // Otherwise, try libstdc++ implementation, with a field // `_M_once` if (!FlagFieldDecl) { FlagFieldDecl = M.findMemberField(FlagRecordDecl, "_M_once"); } if (!FlagFieldDecl) { DEBUG(llvm::dbgs() << "No field _M_once or __state_ found on " << "std::once_flag struct: unknown std::call_once " << "implementation, ignoring the call."); return nullptr; } bool isLambdaCall = CallbackRecordDecl && CallbackRecordDecl->isLambda(); if (CallbackRecordDecl && !isLambdaCall) { DEBUG(llvm::dbgs() << "Not supported: synthesizing body for functors when " << "body farming std::call_once, ignoring the call."); return nullptr; } SmallVector<Expr *, 5> CallArgs; const FunctionProtoType *CallbackFunctionType; if (isLambdaCall) { // Lambda requires callback itself inserted as a first parameter. CallArgs.push_back( M.makeDeclRefExpr(Callback, /* RefersToEnclosingVariableOrCapture=*/ true)); CallbackFunctionType = CallbackRecordDecl->getLambdaCallOperator() ->getType() ->getAs<FunctionProtoType>(); } else if (!CallbackType->getPointeeType().isNull()) { CallbackFunctionType = CallbackType->getPointeeType()->getAs<FunctionProtoType>(); } else { CallbackFunctionType = CallbackType->getAs<FunctionProtoType>(); } if (!CallbackFunctionType) return nullptr; // First two arguments are used for the flag and for the callback. if (D->getNumParams() != CallbackFunctionType->getNumParams() + 2) { DEBUG(llvm::dbgs() << "Types of params of the callback do not match " << "params passed to std::call_once, " << "ignoring the call\n"); return nullptr; } // All arguments past first two ones are passed to the callback, // and we turn lvalues into rvalues if the argument is not passed by // reference. for (unsigned int ParamIdx = 2; ParamIdx < D->getNumParams(); ParamIdx++) { const ParmVarDecl *PDecl = D->getParamDecl(ParamIdx); Expr *ParamExpr = M.makeDeclRefExpr(PDecl); if (!CallbackFunctionType->getParamType(ParamIdx - 2)->isReferenceType()) { QualType PTy = PDecl->getType().getNonReferenceType(); ParamExpr = M.makeLvalueToRvalue(ParamExpr, PTy); } CallArgs.push_back(ParamExpr); } CallExpr *CallbackCall; if (isLambdaCall) { CallbackCall = create_call_once_lambda_call(C, M, Callback, CallbackRecordDecl, CallArgs); } else { // Function pointer case. CallbackCall = create_call_once_funcptr_call(C, M, Callback, CallArgs); } DeclRefExpr *FlagDecl = M.makeDeclRefExpr(Flag, /* RefersToEnclosingVariableOrCapture=*/true); MemberExpr *Deref = M.makeMemberExpression(FlagDecl, FlagFieldDecl); assert(Deref->isLValue()); QualType DerefType = Deref->getType(); // Negation predicate. UnaryOperator *FlagCheck = new (C) UnaryOperator( /* input=*/ M.makeImplicitCast(M.makeLvalueToRvalue(Deref, DerefType), DerefType, CK_IntegralToBoolean), /* opc=*/ UO_LNot, /* QualType=*/ C.IntTy, /* ExprValueKind=*/ VK_RValue, /* ExprObjectKind=*/ OK_Ordinary, SourceLocation()); // Create assignment. BinaryOperator *FlagAssignment = M.makeAssignment( Deref, M.makeIntegralCast(M.makeIntegerLiteral(1, C.IntTy), DerefType), DerefType); IfStmt *Out = new (C) IfStmt(C, SourceLocation(), /* IsConstexpr=*/ false, /* init=*/ nullptr, /* var=*/ nullptr, /* cond=*/ FlagCheck, /* then=*/ M.makeCompound({CallbackCall, FlagAssignment})); return Out; }
bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx) { if (!QT->isPointerType()) return false; // %n is different from other conversion specifiers; don't try to fix it. if (CS.getKind() == ConversionSpecifier::nArg) return false; QualType PT = QT->getPointeeType(); // If it's an enum, get its underlying type. if (const EnumType *ETy = QT->getAs<EnumType>()) QT = ETy->getDecl()->getIntegerType(); const BuiltinType *BT = PT->getAs<BuiltinType>(); if (!BT) return false; // Pointer to a character. if (PT->isAnyCharacterType()) { CS.setKind(ConversionSpecifier::sArg); if (PT->isWideCharType()) LM.setKind(LengthModifier::AsWideChar); else LM.setKind(LengthModifier::None); return true; } // Figure out the length modifier. switch (BT->getKind()) { // no modifier case BuiltinType::UInt: case BuiltinType::Int: case BuiltinType::Float: LM.setKind(LengthModifier::None); break; // hh case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::Char_S: case BuiltinType::SChar: LM.setKind(LengthModifier::AsChar); break; // h case BuiltinType::Short: case BuiltinType::UShort: LM.setKind(LengthModifier::AsShort); break; // l case BuiltinType::Long: case BuiltinType::ULong: case BuiltinType::Double: LM.setKind(LengthModifier::AsLong); break; // ll case BuiltinType::LongLong: case BuiltinType::ULongLong: LM.setKind(LengthModifier::AsLongLong); break; // L case BuiltinType::LongDouble: LM.setKind(LengthModifier::AsLongDouble); break; // Don't know. default: return false; } // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. if (isa<TypedefType>(PT) && (LangOpt.F90 || LangOpt.F90)) namedTypeToLengthModifier(PT, LM); // If fixing the length modifier was enough, we are done. if (hasValidLengthModifier(Ctx.getTargetInfo())) { const analyze_scanf::ArgType &AT = getArgType(Ctx); if (AT.isValid() && AT.matchesType(Ctx, QT)) return true; } // Figure out the conversion specifier. if (PT->isRealFloatingType()) CS.setKind(ConversionSpecifier::fArg); else if (PT->isSignedIntegerType()) CS.setKind(ConversionSpecifier::dArg); else if (PT->isUnsignedIntegerType()) CS.setKind(ConversionSpecifier::uArg); else llvm_unreachable("Unexpected type"); return true; }
void TypePrinter::printAttributedAfter(const AttributedType *T, raw_ostream &OS) { // Prefer the macro forms of the GC and ownership qualifiers. if (T->getAttrKind() == AttributedType::attr_objc_gc || T->getAttrKind() == AttributedType::attr_objc_ownership) return printAfter(T->getEquivalentType(), OS); // TODO: not all attributes are GCC-style attributes. OS << " __attribute__(("; switch (T->getAttrKind()) { case AttributedType::attr_address_space: OS << "address_space("; OS << T->getEquivalentType().getAddressSpace(); OS << ')'; break; case AttributedType::attr_vector_size: { OS << "__vector_size__("; if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) { OS << vector->getNumElements(); OS << " * sizeof("; print(vector->getElementType(), OS, StringRef()); OS << ')'; } OS << ')'; break; } case AttributedType::attr_neon_vector_type: case AttributedType::attr_neon_polyvector_type: { if (T->getAttrKind() == AttributedType::attr_neon_vector_type) OS << "neon_vector_type("; else OS << "neon_polyvector_type("; const VectorType *vector = T->getEquivalentType()->getAs<VectorType>(); OS << vector->getNumElements(); OS << ')'; break; } case AttributedType::attr_regparm: { OS << "regparm("; QualType t = T->getEquivalentType(); while (!t->isFunctionType()) t = t->getPointeeType(); OS << t->getAs<FunctionType>()->getRegParmType(); OS << ')'; break; } case AttributedType::attr_objc_gc: { OS << "objc_gc("; QualType tmp = T->getEquivalentType(); while (tmp.getObjCGCAttr() == Qualifiers::GCNone) { QualType next = tmp->getPointeeType(); if (next == tmp) break; tmp = next; } if (tmp.isObjCGCWeak()) OS << "weak"; else OS << "strong"; OS << ')'; break; } case AttributedType::attr_objc_ownership: OS << "objc_ownership("; switch (T->getEquivalentType().getObjCLifetime()) { case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); case Qualifiers::OCL_ExplicitNone: OS << "none"; break; case Qualifiers::OCL_Strong: OS << "strong"; break; case Qualifiers::OCL_Weak: OS << "weak"; break; case Qualifiers::OCL_Autoreleasing: OS << "autoreleasing"; break; } OS << ')'; break; case AttributedType::attr_noreturn: OS << "noreturn"; break; case AttributedType::attr_cdecl: OS << "cdecl"; break; case AttributedType::attr_fastcall: OS << "fastcall"; break; case AttributedType::attr_stdcall: OS << "stdcall"; break; case AttributedType::attr_thiscall: OS << "thiscall"; break; case AttributedType::attr_pascal: OS << "pascal"; break; case AttributedType::attr_pcs: { OS << "pcs("; QualType t = T->getEquivalentType(); while (!t->isFunctionType()) t = t->getPointeeType(); OS << (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ? "\"aapcs\"" : "\"aapcs-vfp\""); OS << ')'; break; } case AttributedType::attr_pnaclcall: OS << "pnaclcall"; break; case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break; } OS << "))"; }
SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op, Loc lhs, NonLoc rhs, QualType resultTy) { if (op >= BO_PtrMemD && op <= BO_PtrMemI) { if (auto PTMSV = rhs.getAs<nonloc::PointerToMember>()) { if (PTMSV->isNullMemberPointer()) return UndefinedVal(); if (const FieldDecl *FD = PTMSV->getDeclAs<FieldDecl>()) { SVal Result = lhs; for (const auto &I : *PTMSV) Result = StateMgr.getStoreManager().evalDerivedToBase( Result, I->getType(),I->isVirtual()); return state->getLValue(FD, Result); } } return rhs; } assert(!BinaryOperator::isComparisonOp(op) && "arguments to comparison ops must be of the same type"); // Special case: rhs is a zero constant. if (rhs.isZeroConstant()) return lhs; // We are dealing with pointer arithmetic. // Handle pointer arithmetic on constant values. if (Optional<nonloc::ConcreteInt> rhsInt = rhs.getAs<nonloc::ConcreteInt>()) { if (Optional<loc::ConcreteInt> lhsInt = lhs.getAs<loc::ConcreteInt>()) { const llvm::APSInt &leftI = lhsInt->getValue(); assert(leftI.isUnsigned()); llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true); // Convert the bitwidth of rightI. This should deal with overflow // since we are dealing with concrete values. rightI = rightI.extOrTrunc(leftI.getBitWidth()); // Offset the increment by the pointer size. llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true); rightI *= Multiplicand; // Compute the adjusted pointer. switch (op) { case BO_Add: rightI = leftI + rightI; break; case BO_Sub: rightI = leftI - rightI; break; default: llvm_unreachable("Invalid pointer arithmetic operation"); } return loc::ConcreteInt(getBasicValueFactory().getValue(rightI)); } } // Handle cases where 'lhs' is a region. if (const MemRegion *region = lhs.getAsRegion()) { rhs = convertToArrayIndex(rhs).castAs<NonLoc>(); SVal index = UnknownVal(); const MemRegion *superR = nullptr; // We need to know the type of the pointer in order to add an integer to it. // Depending on the type, different amount of bytes is added. QualType elementType; if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) { assert(op == BO_Add || op == BO_Sub); index = evalBinOpNN(state, op, elemReg->getIndex(), rhs, getArrayIndexType()); superR = elemReg->getSuperRegion(); elementType = elemReg->getElementType(); } else if (isa<SubRegion>(region)) { assert(op == BO_Add || op == BO_Sub); index = (op == BO_Add) ? rhs : evalMinus(rhs); superR = region; // TODO: Is this actually reliable? Maybe improving our MemRegion // hierarchy to provide typed regions for all non-void pointers would be // better. For instance, we cannot extend this towards LocAsInteger // operations, where result type of the expression is integer. if (resultTy->isAnyPointerType()) elementType = resultTy->getPointeeType(); } if (Optional<NonLoc> indexV = index.getAs<NonLoc>()) { return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV, superR, getContext())); } } return UnknownVal(); }
/// \brief Figure out if an expression could be turned into a call. /// /// Use this when trying to recover from an error where the programmer may have /// written just the name of a function instead of actually calling it. /// /// \param E - The expression to examine. /// \param ZeroArgCallReturnTy - If the expression can be turned into a call /// with no arguments, this parameter is set to the type returned by such a /// call; otherwise, it is set to an empty QualType. /// \param OverloadSet - If the expression is an overloaded function /// name, this parameter is populated with the decls of the various overloads. bool Sema::isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &OverloadSet) { ZeroArgCallReturnTy = QualType(); OverloadSet.clear(); if (E.getType() == Context.OverloadTy) { OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); const OverloadExpr *Overloads = FR.Expression; for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { OverloadSet.addDecl(*it); // Check whether the function is a non-template which takes no // arguments. if (const FunctionDecl *OverloadDecl = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { if (OverloadDecl->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = OverloadDecl->getResultType(); } } // Ignore overloads that are pointer-to-member constants. if (FR.HasFormOfMemberPointer) return false; return true; } if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { if (Fun->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = Fun->getResultType(); return true; } } // We don't have an expression that's convenient to get a FunctionDecl from, // but we can at least check if the type is "function of 0 arguments". QualType ExprTy = E.getType(); const FunctionType *FunTy = NULL; QualType PointeeTy = ExprTy->getPointeeType(); if (!PointeeTy.isNull()) FunTy = PointeeTy->getAs<FunctionType>(); if (!FunTy) FunTy = ExprTy->getAs<FunctionType>(); if (!FunTy && ExprTy == Context.BoundMemberTy) { // Look for the bound-member type. If it's still overloaded, give up, // although we probably should have fallen into the OverloadExpr case above // if we actually have an overloaded bound member. QualType BoundMemberTy = Expr::findBoundMemberType(&E); if (!BoundMemberTy.isNull()) FunTy = BoundMemberTy->castAs<FunctionType>(); } if (const FunctionProtoType *FPT = dyn_cast_or_null<FunctionProtoType>(FunTy)) { if (FPT->getNumArgs() == 0) ZeroArgCallReturnTy = FunTy->getResultType(); return true; } return false; }
/// \brief The LoopFixer callback, which determines if loops discovered by the /// matchers are convertible, printing information about the loops if so. void LoopFixer::run(const MatchFinder::MatchResult &Result) { const BoundNodes &Nodes = Result.Nodes; Confidence ConfidenceLevel(RL_Safe); ASTContext *Context = Result.Context; const ForStmt *TheLoop = Nodes.getStmtAs<ForStmt>(LoopName); if (!Owner.isFileModifiable(Context->getSourceManager(),TheLoop->getForLoc())) return; // Check that we have exactly one index variable and at most one end variable. const VarDecl *LoopVar = Nodes.getDeclAs<VarDecl>(IncrementVarName); const VarDecl *CondVar = Nodes.getDeclAs<VarDecl>(ConditionVarName); const VarDecl *InitVar = Nodes.getDeclAs<VarDecl>(InitVarName); if (!areSameVariable(LoopVar, CondVar) || !areSameVariable(LoopVar, InitVar)) return; const VarDecl *EndVar = Nodes.getDeclAs<VarDecl>(EndVarName); const VarDecl *ConditionEndVar = Nodes.getDeclAs<VarDecl>(ConditionEndVarName); if (EndVar && !areSameVariable(EndVar, ConditionEndVar)) return; // If the end comparison isn't a variable, we can try to work with the // expression the loop variable is being tested against instead. const CXXMemberCallExpr *EndCall = Nodes.getStmtAs<CXXMemberCallExpr>(EndCallName); const Expr *BoundExpr = Nodes.getStmtAs<Expr>(ConditionBoundName); // If the loop calls end()/size() after each iteration, lower our confidence // level. if (FixerKind != LFK_Array && !EndVar) ConfidenceLevel.lowerTo(RL_Reasonable); const Expr *ContainerExpr = nullptr; bool DerefByValue = false; bool DerefByConstRef = false; bool ContainerNeedsDereference = false; // FIXME: Try to put most of this logic inside a matcher. Currently, matchers // don't allow the right-recursive checks in digThroughConstructors. if (FixerKind == LFK_Iterator) { ContainerExpr = findContainer(Context, LoopVar->getInit(), EndVar ? EndVar->getInit() : EndCall, &ContainerNeedsDereference); QualType InitVarType = InitVar->getType(); QualType CanonicalInitVarType = InitVarType.getCanonicalType(); const CXXMemberCallExpr *BeginCall = Nodes.getNodeAs<CXXMemberCallExpr>(BeginCallName); assert(BeginCall && "Bad Callback. No begin call expression."); QualType CanonicalBeginType = BeginCall->getMethodDecl()->getReturnType().getCanonicalType(); if (CanonicalBeginType->isPointerType() && CanonicalInitVarType->isPointerType()) { QualType BeginPointeeType = CanonicalBeginType->getPointeeType(); QualType InitPointeeType = CanonicalInitVarType->getPointeeType(); // If the initializer and the variable are both pointers check if the // un-qualified pointee types match otherwise we don't use auto. if (!Context->hasSameUnqualifiedType(InitPointeeType, BeginPointeeType)) return; } else { // Check for qualified types to avoid conversions from non-const to const // iterator types. if (!Context->hasSameType(CanonicalInitVarType, CanonicalBeginType)) return; } DerefByValue = Nodes.getNodeAs<QualType>(DerefByValueResultName) != nullptr; if (!DerefByValue) { if (const QualType *DerefType = Nodes.getNodeAs<QualType>(DerefByRefResultName)) { // A node will only be bound with DerefByRefResultName if we're dealing // with a user-defined iterator type. Test the const qualification of // the reference type. DerefByConstRef = (*DerefType)->getAs<ReferenceType>()->getPointeeType() .isConstQualified(); } else { // By nature of the matcher this case is triggered only for built-in // iterator types (i.e. pointers). assert(isa<PointerType>(CanonicalInitVarType) && "Non-class iterator type is not a pointer type"); QualType InitPointeeType = CanonicalInitVarType->getPointeeType(); QualType BeginPointeeType = CanonicalBeginType->getPointeeType(); // If the initializer and variable have both the same type just use auto // otherwise we test for const qualification of the pointed-at type. if (!Context->hasSameType(InitPointeeType, BeginPointeeType)) DerefByConstRef = InitPointeeType.isConstQualified(); } } else { // If the dereference operator returns by value then test for the // canonical const qualification of the init variable type. DerefByConstRef = CanonicalInitVarType.isConstQualified(); } } else if (FixerKind == LFK_PseudoArray) { if (!EndCall) return; ContainerExpr = EndCall->getImplicitObjectArgument(); const MemberExpr *Member = dyn_cast<MemberExpr>(EndCall->getCallee()); if (!Member) return; ContainerNeedsDereference = Member->isArrow(); } // We must know the container or an array length bound. if (!ContainerExpr && !BoundExpr) return; findAndVerifyUsages(Context, LoopVar, EndVar, ContainerExpr, BoundExpr, ContainerNeedsDereference, DerefByValue, DerefByConstRef, TheLoop, ConfidenceLevel); }