static bool isCallbackArg(SVal V, QualType T) { // If the parameter is 0, it's harmless. if (V.isZeroConstant()) return false; // If a parameter is a block or a callback, assume it can modify pointer. if (T->isBlockPointerType() || T->isFunctionPointerType() || T->isObjCSelType()) return true; // Check if a callback is passed inside a struct (for both, struct passed by // reference and by value). Dig just one level into the struct for now. if (isa<PointerType>(T) || isa<ReferenceType>(T)) T = T->getPointeeType(); if (const RecordType *RT = T->getAsStructureType()) { const RecordDecl *RD = RT->getDecl(); for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); I != E; ++I) { QualType FieldT = I->getType(); if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType()) return true; } } return false; }
void MicrosoftCXXNameMangler::mangleType(QualType T) { // Only operate on the canonical type! T = getASTContext().getCanonicalType(T); Qualifiers Quals = T.getLocalQualifiers(); if (Quals) { // We have to mangle these now, while we still have enough information. // <pointer-cvr-qualifiers> ::= P # pointer // ::= Q # const pointer // ::= R # volatile pointer // ::= S # const volatile pointer if (T->isAnyPointerType() || T->isMemberPointerType() || T->isBlockPointerType()) { if (!Quals.hasVolatile()) Out << 'Q'; else { if (!Quals.hasConst()) Out << 'R'; else Out << 'S'; } } else // Just emit qualifiers like normal. // NB: When we mangle a pointer/reference type, and the pointee // type has no qualifiers, the lack of qualifier gets mangled // in there. mangleQualifiers(Quals, false); } else if (T->isAnyPointerType() || T->isMemberPointerType() || T->isBlockPointerType()) { Out << 'P'; } switch (T->getTypeClass()) { #define ABSTRACT_TYPE(CLASS, PARENT) #define NON_CANONICAL_TYPE(CLASS, PARENT) \ case Type::CLASS: \ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \ return; #define TYPE(CLASS, PARENT) \ case Type::CLASS: \ mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \ break; #include "clang/AST/TypeNodes.def" } }
static bool isCallback(QualType T) { // If a parameter is a block or a callback, assume it can modify pointer. if (T->isBlockPointerType() || T->isFunctionPointerType() || T->isObjCSelType()) return true; // Check if a callback is passed inside a struct (for both, struct passed by // reference and by value). Dig just one level into the struct for now. if (T->isAnyPointerType() || T->isReferenceType()) T = T->getPointeeType(); if (const RecordType *RT = T->getAsStructureType()) { const RecordDecl *RD = RT->getDecl(); for (const auto *I : RD->fields()) { QualType FieldT = I->getType(); if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType()) return true; } } return false; }
SVal ValueManager::getConjuredSymbolVal(const Expr* E, unsigned Count) { QualType T = E->getType(); if (!SymbolManager::canSymbolicate(T)) return UnknownVal(); SymbolRef sym = SymMgr.getConjuredSymbol(E, Count); // If T is of function pointer type or a block pointer type, create a // CodeTextRegion wrapping a symbol. if (T->isFunctionPointerType() || T->isBlockPointerType()) return loc::MemRegionVal(MemMgr.getCodeTextRegion(sym, T)); if (Loc::IsLocType(T)) return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); return nonloc::SymbolVal(sym); }
SVal ValueManager::getRegionValueSymbolVal(const MemRegion* R, QualType T) { if (T.isNull()) { const TypedRegion* TR = cast<TypedRegion>(R); T = TR->getValueType(SymMgr.getContext()); } if (!SymbolManager::canSymbolicate(T)) return UnknownVal(); SymbolRef sym = SymMgr.getRegionValueSymbol(R, T); // If T is of function pointer type or a block pointer type, create a // CodeTextRegion wrapping that symbol. if (T->isFunctionPointerType() || T->isBlockPointerType()) return loc::MemRegionVal(MemMgr.getCodeTextRegion(sym, T)); if (Loc::IsLocType(T)) return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); return nonloc::SymbolVal(sym); }
static void SuggestInitializationFixit(Sema &S, const VarDecl *VD) { // Don't issue a fixit if there is already an initializer. if (VD->getInit()) return; // Suggest possible initialization (if any). const char *initialization = 0; QualType VariableTy = VD->getType().getCanonicalType(); if (VariableTy->isObjCObjectPointerType() || VariableTy->isBlockPointerType()) { // Check if 'nil' is defined. if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("nil"))) initialization = " = nil"; else initialization = " = 0"; } else if (VariableTy->isRealFloatingType()) initialization = " = 0.0"; else if (VariableTy->isBooleanType() && S.Context.getLangOptions().CPlusPlus) initialization = " = false"; else if (VariableTy->isEnumeralType()) return; else if (VariableTy->isPointerType() || VariableTy->isMemberPointerType()) { // Check if 'NULL' is defined. if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("NULL"))) initialization = " = NULL"; else initialization = " = 0"; } else if (VariableTy->isScalarType()) initialization = " = 0"; if (initialization) { SourceLocation loc = S.PP.getLocForEndOfToken(VD->getLocEnd()); S.Diag(loc, diag::note_var_fixit_add_initialization) << FixItHint::CreateInsertion(loc, initialization); } }
void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const { if (!BT) { BT.reset(new APIMisuse(this, "Arguments passed to variadic method aren't all " "Objective-C pointer types")); ASTContext &Ctx = C.getASTContext(); arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx); dictionaryWithObjectsAndKeysS = GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx); setWithObjectsS = GetUnarySelector("setWithObjects", Ctx); orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx); initWithObjectsS = GetUnarySelector("initWithObjects", Ctx); initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx); } if (!isVariadicMessage(msg)) return; // We are not interested in the selector arguments since they have // well-defined types, so the compiler will issue a warning for them. unsigned variadicArgsBegin = msg.getSelector().getNumArgs(); // We're not interested in the last argument since it has to be nil or the // compiler would have issued a warning for it elsewhere. unsigned variadicArgsEnd = msg.getNumArgs() - 1; if (variadicArgsEnd <= variadicArgsBegin) return; // Verify that all arguments have Objective-C types. Optional<ExplodedNode*> errorNode; for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) { QualType ArgTy = msg.getArgExpr(I)->getType(); if (ArgTy->isObjCObjectPointerType()) continue; // Block pointers are treaded as Objective-C pointers. if (ArgTy->isBlockPointerType()) continue; // Ignore pointer constants. if (msg.getArgSVal(I).getAs<loc::ConcreteInt>()) continue; // Ignore pointer types annotated with 'NSObject' attribute. if (C.getASTContext().isObjCNSObjectType(ArgTy)) continue; // Ignore CF references, which can be toll-free bridged. if (coreFoundation::isCFObjectRef(ArgTy)) continue; // Generate only one error node to use for all bug reports. if (!errorNode.hasValue()) errorNode = C.generateNonFatalErrorNode(); if (!errorNode.getValue()) continue; SmallString<128> sbuf; llvm::raw_svector_ostream os(sbuf); StringRef TypeName = GetReceiverInterfaceName(msg); if (!TypeName.empty()) os << "Argument to '" << TypeName << "' method '"; else os << "Argument to method '"; msg.getSelector().print(os); os << "' should be an Objective-C pointer type, not '"; ArgTy.print(os, C.getLangOpts()); os << "'"; auto R = llvm::make_unique<BugReport>(*BT, os.str(), errorNode.getValue()); R->addRange(msg.getArgSourceRange(I)); C.emitReport(std::move(R)); } }
// FIXME: should rewrite according to the cast kind. SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) { castTy = Context.getCanonicalType(castTy); originalTy = Context.getCanonicalType(originalTy); if (val.isUnknownOrUndef() || castTy == originalTy) return val; if (castTy->isBooleanType()) { if (val.isUnknownOrUndef()) return val; if (val.isConstant()) return makeTruthVal(!val.isZeroConstant(), castTy); if (!Loc::isLocType(originalTy) && !originalTy->isIntegralOrEnumerationType() && !originalTy->isMemberPointerType()) return UnknownVal(); if (SymbolRef Sym = val.getAsSymbol(true)) { BasicValueFactory &BVF = getBasicValueFactory(); // FIXME: If we had a state here, we could see if the symbol is known to // be zero, but we don't. return makeNonLoc(Sym, BO_NE, BVF.getValue(0, Sym->getType()), castTy); } // Loc values are not always true, they could be weakly linked functions. if (Optional<Loc> L = val.getAs<Loc>()) return evalCastFromLoc(*L, castTy); Loc L = val.castAs<nonloc::LocAsInteger>().getLoc(); return evalCastFromLoc(L, castTy); } // For const casts, casts to void, just propagate the value. if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType()) if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy), Context.getPointerType(originalTy))) return val; // Check for casts from pointers to integers. if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy)) return evalCastFromLoc(val.castAs<Loc>(), castTy); // Check for casts from integers to pointers. if (Loc::isLocType(castTy) && originalTy->isIntegralOrEnumerationType()) { if (Optional<nonloc::LocAsInteger> LV = val.getAs<nonloc::LocAsInteger>()) { if (const MemRegion *R = LV->getLoc().getAsRegion()) { StoreManager &storeMgr = StateMgr.getStoreManager(); R = storeMgr.castRegion(R, castTy); return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); } return LV->getLoc(); } return dispatchCast(val, castTy); } // Just pass through function and block pointers. if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) { assert(Loc::isLocType(castTy)); return val; } // Check for casts from array type to another type. if (const ArrayType *arrayT = dyn_cast<ArrayType>(originalTy.getCanonicalType())) { // We will always decay to a pointer. QualType elemTy = arrayT->getElementType(); val = StateMgr.ArrayToPointer(val.castAs<Loc>(), elemTy); // Are we casting from an array to a pointer? If so just pass on // the decayed value. if (castTy->isPointerType() || castTy->isReferenceType()) return val; // Are we casting from an array to an integer? If so, cast the decayed // pointer value to an integer. assert(castTy->isIntegralOrEnumerationType()); // FIXME: Keep these here for now in case we decide soon that we // need the original decayed type. // QualType elemTy = cast<ArrayType>(originalTy)->getElementType(); // QualType pointerTy = C.getPointerType(elemTy); return evalCastFromLoc(val.castAs<Loc>(), castTy); } // Check for casts from a region to a specific type. if (const MemRegion *R = val.getAsRegion()) { // Handle other casts of locations to integers. if (castTy->isIntegralOrEnumerationType()) return evalCastFromLoc(loc::MemRegionVal(R), castTy); // FIXME: We should handle the case where we strip off view layers to get // to a desugared type. if (!Loc::isLocType(castTy)) { // FIXME: There can be gross cases where one casts the result of a function // (that returns a pointer) to some other value that happens to fit // within that pointer value. We currently have no good way to // model such operations. When this happens, the underlying operation // is that the caller is reasoning about bits. Conceptually we are // layering a "view" of a location on top of those bits. Perhaps // we need to be more lazy about mutual possible views, even on an // SVal? This may be necessary for bit-level reasoning as well. return UnknownVal(); } // We get a symbolic function pointer for a dereference of a function // pointer, but it is of function type. Example: // struct FPRec { // void (*my_func)(int * x); // }; // // int bar(int x); // // int f1_a(struct FPRec* foo) { // int x; // (*foo->my_func)(&x); // return bar(x)+1; // no-warning // } assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() || originalTy->isBlockPointerType() || castTy->isReferenceType()); StoreManager &storeMgr = StateMgr.getStoreManager(); // Delegate to store manager to get the result of casting a region to a // different type. If the MemRegion* returned is NULL, this expression // Evaluates to UnknownVal. R = storeMgr.castRegion(R, castTy); return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); } return dispatchCast(val, castTy); }
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a /// variable declaration with auto, register, or no storage class specifier. /// These turn into simple stack objects, or GlobalValues depending on target. void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { QualType Ty = D.getType(); bool isByRef = D.hasAttr<BlocksAttr>(); bool needsDispose = false; unsigned Align = 0; bool IsSimpleConstantInitializer = false; llvm::Value *DeclPtr; if (Ty->isConstantSizeType()) { if (!Target.useGlobalsForAutomaticVariables()) { // If this value is an array or struct, is POD, and if the initializer is // a staticly determinable constant, try to optimize it. if (D.getInit() && !isByRef && (Ty->isArrayType() || Ty->isRecordType()) && Ty->isPODType() && D.getInit()->isConstantInitializer(getContext())) { // If this variable is marked 'const', emit the value as a global. if (CGM.getCodeGenOpts().MergeAllConstants && Ty.isConstant(getContext())) { EmitStaticBlockVarDecl(D); return; } IsSimpleConstantInitializer = true; } // A normal fixed sized variable becomes an alloca in the entry block. const llvm::Type *LTy = ConvertTypeForMem(Ty); if (isByRef) LTy = BuildByRefType(&D); llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); Alloc->setName(D.getNameAsString()); Align = getContext().getDeclAlignInBytes(&D); if (isByRef) Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8)); Alloc->setAlignment(Align); DeclPtr = Alloc; } else { // Targets that don't support recursion emit locals as globals. const char *Class = D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto."; DeclPtr = CreateStaticBlockVarDecl(D, Class, llvm::GlobalValue ::InternalLinkage); } // FIXME: Can this happen? if (Ty->isVariablyModifiedType()) EmitVLASize(Ty); } else { EnsureInsertPoint(); if (!DidCallStackSave) { // Save the stack. const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext); llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); llvm::Value *V = Builder.CreateCall(F); Builder.CreateStore(V, Stack); DidCallStackSave = true; { // Push a cleanup block and restore the stack there. DelayedCleanupBlock scope(*this); V = Builder.CreateLoad(Stack, "tmp"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); Builder.CreateCall(F, V); } } // Get the element type. const llvm::Type *LElemTy = ConvertTypeForMem(Ty); const llvm::Type *LElemPtrTy = llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); llvm::Value *VLASize = EmitVLASize(Ty); // Downcast the VLA size expression VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext), false, "tmp"); // Allocate memory for the array. llvm::AllocaInst *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); VLA->setAlignment(getContext().getDeclAlignInBytes(&D)); DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); } llvm::Value *&DMEntry = LocalDeclMap[&D]; assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); DMEntry = DeclPtr; // Emit debug info for local var declaration. if (CGDebugInfo *DI = getDebugInfo()) { assert(HaveInsertPoint() && "Unexpected unreachable point!"); DI->setLocation(D.getLocation()); if (Target.useGlobalsForAutomaticVariables()) { DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); } else DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); } // If this local has an initializer, emit it now. const Expr *Init = D.getInit(); // If we are at an unreachable point, we don't need to emit the initializer // unless it contains a label. if (!HaveInsertPoint()) { if (!ContainsLabel(Init)) Init = 0; else EnsureInsertPoint(); } if (Init) { llvm::Value *Loc = DeclPtr; if (isByRef) Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), D.getNameAsString()); bool isVolatile = getContext().getCanonicalType(D.getType()).isVolatileQualified(); // If the initializer was a simple constant initializer, we can optimize it // in various ways. if (IsSimpleConstantInitializer) { llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this); assert(Init != 0 && "Wasn't a simple constant init?"); llvm::Value *AlignVal = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align); const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, LLVMPointerWidth); llvm::Value *SizeVal = llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty)); const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); if (Loc->getType() != BP) Loc = Builder.CreateBitCast(Loc, BP, "tmp"); // If the initializer is all zeros, codegen with memset. if (isa<llvm::ConstantAggregateZero>(Init)) { llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0); Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal); } else { // Otherwise, create a temporary global with the initializer then // memcpy from the global to the alloca. std::string Name = GetStaticDeclName(*this, D, "."); llvm::GlobalVariable *GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true, llvm::GlobalValue::InternalLinkage, Init, Name, 0, false, 0); GV->setAlignment(Align); llvm::Value *SrcPtr = GV; if (SrcPtr->getType() != BP) SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal); } } else if (Ty->isReferenceType()) { RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true); EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty); } else if (!hasAggregateLLVMType(Init->getType())) { llvm::Value *V = EmitScalarExpr(Init); EmitStoreOfScalar(V, Loc, isVolatile, D.getType()); } else if (Init->getType()->isAnyComplexType()) { EmitComplexExprIntoAddr(Init, Loc, isVolatile); } else { EmitAggExpr(Init, Loc, isVolatile); } } if (isByRef) { const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext); EnsureInsertPoint(); llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); llvm::Value *V; int flag = 0; int flags = 0; needsDispose = true; if (Ty->isBlockPointerType()) { flag |= BLOCK_FIELD_IS_BLOCK; flags |= BLOCK_HAS_COPY_DISPOSE; } else if (BlockRequiresCopying(Ty)) { flag |= BLOCK_FIELD_IS_OBJECT; flags |= BLOCK_HAS_COPY_DISPOSE; } // FIXME: Someone double check this. if (Ty.isObjCGCWeak()) flag |= BLOCK_FIELD_IS_WEAK; int isa = 0; if (flag&BLOCK_FIELD_IS_WEAK) isa = 1; V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); Builder.CreateStore(V, isa_field); Builder.CreateStore(DeclPtr, forwarding_field); V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); Builder.CreateStore(V, flags_field); const llvm::Type *V1; V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), (CGM.getTargetData().getTypeStoreSizeInBits(V1) / 8)); Builder.CreateStore(V, size_field); if (flags & BLOCK_HAS_COPY_DISPOSE) { BlockHasCopyDispose = true; llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align), copy_helper); llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag, Align), destroy_helper); } } // Handle CXX destruction of variables. QualType DtorTy(Ty); while (const ArrayType *Array = getContext().getAsArrayType(DtorTy)) DtorTy = getContext().getBaseElementType(Array); if (const RecordType *RT = DtorTy->getAs<RecordType>()) if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { if (!ClassDecl->hasTrivialDestructor()) { const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext()); assert(D && "EmitLocalBlockVarDecl - destructor is nul"); if (const ConstantArrayType *Array = getContext().getAsConstantArrayType(Ty)) { { DelayedCleanupBlock Scope(*this); QualType BaseElementTy = getContext().getBaseElementType(Array); const llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(DeclPtr, BasePtr); EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); // Make sure to jump to the exit block. EmitBranch(Scope.getCleanupExitBlock()); } if (Exceptions) { EHCleanupBlock Cleanup(*this); QualType BaseElementTy = getContext().getBaseElementType(Array); const llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(DeclPtr, BasePtr); EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); } } else { { DelayedCleanupBlock Scope(*this); EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr); // Make sure to jump to the exit block. EmitBranch(Scope.getCleanupExitBlock()); } if (Exceptions) { EHCleanupBlock Cleanup(*this); EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr); } } } } // Handle the cleanup attribute if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { const FunctionDecl *FD = CA->getFunctionDecl(); llvm::Constant* F = CGM.GetAddrOfFunction(FD); assert(F && "Could not find function!"); const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); // In some cases, the type of the function argument will be different from // the type of the pointer. An example of this is // void f(void* arg); // __attribute__((cleanup(f))) void *g; // // To fix this we insert a bitcast here. QualType ArgTy = Info.arg_begin()->type; { DelayedCleanupBlock scope(*this); CallArgList Args; Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy))), getContext().getPointerType(D.getType()))); EmitCall(Info, F, Args); } if (Exceptions) { EHCleanupBlock Cleanup(*this); CallArgList Args; Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy))), getContext().getPointerType(D.getType()))); EmitCall(Info, F, Args); } } if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { { DelayedCleanupBlock scope(*this); llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); V = Builder.CreateLoad(V); BuildBlockRelease(V); } // FIXME: Turn this on and audit the codegen if (0 && Exceptions) { EHCleanupBlock Cleanup(*this); llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); V = Builder.CreateLoad(V); BuildBlockRelease(V); } } }
clang::analyze_format_string::ArgType::MatchKind ArgType::matchesType(ASTContext &C, QualType argTy) const { if (Ptr) { // It has to be a pointer. const PointerType *PT = argTy->getAs<PointerType>(); if (!PT) return NoMatch; // We cannot write through a const qualified pointer. if (PT->getPointeeType().isConstQualified()) return NoMatch; argTy = PT->getPointeeType(); } switch (K) { case InvalidTy: llvm_unreachable("ArgType must be valid"); case UnknownTy: return Match; case AnyCharTy: { if (const EnumType *ETy = argTy->getAs<EnumType>()) argTy = ETy->getDecl()->getIntegerType(); if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) switch (BT->getKind()) { default: break; case BuiltinType::Char_S: case BuiltinType::SChar: case BuiltinType::UChar: case BuiltinType::Char_U: return Match; } return NoMatch; } case SpecificTy: { if (const EnumType *ETy = argTy->getAs<EnumType>()) argTy = ETy->getDecl()->getIntegerType(); argTy = C.getCanonicalType(argTy).getUnqualifiedType(); if (T == argTy) return Match; // Check for "compatible types". if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) switch (BT->getKind()) { default: break; case BuiltinType::Char_S: case BuiltinType::SChar: case BuiltinType::Char_U: case BuiltinType::UChar: return T == C.UnsignedCharTy || T == C.SignedCharTy ? Match : NoMatch; case BuiltinType::Short: return T == C.UnsignedShortTy ? Match : NoMatch; case BuiltinType::UShort: return T == C.ShortTy ? Match : NoMatch; case BuiltinType::Int: return T == C.UnsignedIntTy ? Match : NoMatch; case BuiltinType::UInt: return T == C.IntTy ? Match : NoMatch; case BuiltinType::Long: return T == C.UnsignedLongTy ? Match : NoMatch; case BuiltinType::ULong: return T == C.LongTy ? Match : NoMatch; case BuiltinType::LongLong: return T == C.UnsignedLongLongTy ? Match : NoMatch; case BuiltinType::ULongLong: return T == C.LongLongTy ? Match : NoMatch; } return NoMatch; } case CStrTy: { const PointerType *PT = argTy->getAs<PointerType>(); if (!PT) return NoMatch; QualType pointeeTy = PT->getPointeeType(); if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>()) switch (BT->getKind()) { case BuiltinType::Void: case BuiltinType::Char_U: case BuiltinType::UChar: case BuiltinType::Char_S: case BuiltinType::SChar: return Match; default: break; } return NoMatch; } case WCStrTy: { const PointerType *PT = argTy->getAs<PointerType>(); if (!PT) return NoMatch; QualType pointeeTy = C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType(); return pointeeTy == C.getWideCharType() ? Match : NoMatch; } case WIntTy: { QualType PromoArg = argTy->isPromotableIntegerType() ? C.getPromotedIntegerType(argTy) : argTy; QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType(); PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType(); // If the promoted argument is the corresponding signed type of the // wint_t type, then it should match. if (PromoArg->hasSignedIntegerRepresentation() && C.getCorrespondingUnsignedType(PromoArg) == WInt) return Match; return WInt == PromoArg ? Match : NoMatch; } case CPointerTy: if (argTy->isVoidPointerType()) { return Match; } if (argTy->isPointerType() || argTy->isObjCObjectPointerType() || argTy->isBlockPointerType() || argTy->isNullPtrType()) { return NoMatchPedantic; } else { return NoMatch; } case ObjCPointerTy: { if (argTy->getAs<ObjCObjectPointerType>() || argTy->getAs<BlockPointerType>()) return Match; // Handle implicit toll-free bridging. if (const PointerType *PT = argTy->getAs<PointerType>()) { // Things such as CFTypeRef are really just opaque pointers // to C structs representing CF types that can often be bridged // to Objective-C objects. Since the compiler doesn't know which // structs can be toll-free bridged, we just accept them all. QualType pointee = PT->getPointeeType(); if (pointee->getAsStructureType() || pointee->isVoidType()) return Match; } return NoMatch; } } llvm_unreachable("Invalid ArgType Kind!"); }
// FIXME: should rewrite according to the cast kind. SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) { castTy = Context.getCanonicalType(castTy); originalTy = Context.getCanonicalType(originalTy); if (val.isUnknownOrUndef() || castTy == originalTy) return val; // For const casts, just propagate the value. if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType()) if (haveSimilarTypes(Context, Context.getPointerType(castTy), Context.getPointerType(originalTy))) return val; // Check for casts from pointers to integers. if (castTy->isIntegerType() && Loc::isLocType(originalTy)) return evalCastFromLoc(cast<Loc>(val), castTy); // Check for casts from integers to pointers. if (Loc::isLocType(castTy) && originalTy->isIntegerType()) { if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) { if (const MemRegion *R = LV->getLoc().getAsRegion()) { StoreManager &storeMgr = StateMgr.getStoreManager(); R = storeMgr.castRegion(R, castTy); return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); } return LV->getLoc(); } return dispatchCast(val, castTy); } // Just pass through function and block pointers. if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) { assert(Loc::isLocType(castTy)); return val; } // Check for casts from array type to another type. if (originalTy->isArrayType()) { // We will always decay to a pointer. val = StateMgr.ArrayToPointer(cast<Loc>(val)); // Are we casting from an array to a pointer? If so just pass on // the decayed value. if (castTy->isPointerType()) return val; // Are we casting from an array to an integer? If so, cast the decayed // pointer value to an integer. assert(castTy->isIntegerType()); // FIXME: Keep these here for now in case we decide soon that we // need the original decayed type. // QualType elemTy = cast<ArrayType>(originalTy)->getElementType(); // QualType pointerTy = C.getPointerType(elemTy); return evalCastFromLoc(cast<Loc>(val), castTy); } // Check for casts from a region to a specific type. if (const MemRegion *R = val.getAsRegion()) { // Handle other casts of locations to integers. if (castTy->isIntegerType()) return evalCastFromLoc(loc::MemRegionVal(R), castTy); // FIXME: We should handle the case where we strip off view layers to get // to a desugared type. if (!Loc::isLocType(castTy)) { // FIXME: There can be gross cases where one casts the result of a function // (that returns a pointer) to some other value that happens to fit // within that pointer value. We currently have no good way to // model such operations. When this happens, the underlying operation // is that the caller is reasoning about bits. Conceptually we are // layering a "view" of a location on top of those bits. Perhaps // we need to be more lazy about mutual possible views, even on an // SVal? This may be necessary for bit-level reasoning as well. return UnknownVal(); } // We get a symbolic function pointer for a dereference of a function // pointer, but it is of function type. Example: // struct FPRec { // void (*my_func)(int * x); // }; // // int bar(int x); // // int f1_a(struct FPRec* foo) { // int x; // (*foo->my_func)(&x); // return bar(x)+1; // no-warning // } assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() || originalTy->isBlockPointerType() || castTy->isReferenceType()); StoreManager &storeMgr = StateMgr.getStoreManager(); // Delegate to store manager to get the result of casting a region to a // different type. If the MemRegion* returned is NULL, this expression // Evaluates to UnknownVal. R = storeMgr.castRegion(R, castTy); return R ? SVal(loc::MemRegionVal(R)) : UnknownVal(); } return dispatchCast(val, castTy); }
/// \brief Build an Objective-C instance message expression. /// /// This routine takes care of both normal instance messages and /// instance messages to the superclass instance. /// /// \param Receiver The expression that computes the object that will /// receive this message. This may be empty, in which case we are /// sending to the superclass instance and \p SuperLoc must be a valid /// source location. /// /// \param ReceiverType The (static) type of the object receiving the /// message. When a \p Receiver expression is provided, this is the /// same type as that expression. For a superclass instance send, this /// is a pointer to the type of the superclass. /// /// \param SuperLoc The location of the "super" keyword in a /// superclass instance message. /// /// \param Sel The selector to which the message is being sent. /// /// \param Method The method that this instance message is invoking, if /// already known. /// /// \param LBracLoc The location of the opening square bracket ']'. /// /// \param RBrac The location of the closing square bracket ']'. /// /// \param Args The message arguments. ExprResult Sema::BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, SourceLocation SelectorLoc, SourceLocation RBracLoc, MultiExprArg ArgsIn) { // The location of the receiver. SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart(); if (LBracLoc.isInvalid()) { Diag(Loc, diag::err_missing_open_square_message_send) << FixItHint::CreateInsertion(Loc, "["); LBracLoc = Loc; } // If we have a receiver expression, perform appropriate promotions // and determine receiver type. if (Receiver) { if (Receiver->isTypeDependent()) { // If the receiver is type-dependent, we can't type-check anything // at this point. Build a dependent expression. unsigned NumArgs = ArgsIn.size(); Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release()); assert(SuperLoc.isInvalid() && "Message to super with dependent type"); return Owned(ObjCMessageExpr::Create(Context, Context.DependentTy, VK_RValue, LBracLoc, Receiver, Sel, SelectorLoc, /*Method=*/0, Args, NumArgs, RBracLoc)); } // If necessary, apply function/array conversion to the receiver. // C99 6.7.5.3p[7,8]. DefaultFunctionArrayLvalueConversion(Receiver); ReceiverType = Receiver->getType(); } if (!Method) { // Handle messages to id. bool receiverIsId = ReceiverType->isObjCIdType(); if (receiverIsId || ReceiverType->isBlockPointerType() || (Receiver && Context.isObjCNSObjectType(Receiver->getType()))) { Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc), receiverIsId); if (!Method) Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc), receiverIsId); } else if (ReceiverType->isObjCClassType() || ReceiverType->isObjCQualifiedClassType()) { // Handle messages to Class. if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) { if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) { // First check the public methods in the class interface. Method = ClassDecl->lookupClassMethod(Sel); if (!Method) Method = LookupPrivateClassMethod(Sel, ClassDecl); // FIXME: if we still haven't found a method, we need to look in // protocols (if we have qualifiers). } if (Method && DiagnoseUseOfDecl(Method, Loc)) return ExprError(); } if (!Method) { // If not messaging 'self', look for any factory method named 'Sel'. if (!Receiver || !isSelfExpr(Receiver)) { Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc), true); if (!Method) { // If no class (factory) method was found, check if an _instance_ // method of the same name exists in the root class only. Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc), true); if (Method) if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) { if (ID->getSuperClass()) Diag(Loc, diag::warn_root_inst_method_not_found) << Sel << SourceRange(LBracLoc, RBracLoc); } } } } } else { ObjCInterfaceDecl* ClassDecl = 0; // We allow sending a message to a qualified ID ("id<foo>"), which is ok as // long as one of the protocols implements the selector (if not, warn). if (const ObjCObjectPointerType *QIdTy = ReceiverType->getAsObjCQualifiedIdType()) { // Search protocols for instance methods. Method = LookupMethodInQualifiedType(Sel, QIdTy, true); if (!Method) Method = LookupMethodInQualifiedType(Sel, QIdTy, false); } else if (const ObjCObjectPointerType *OCIType = ReceiverType->getAsObjCInterfacePointerType()) { // We allow sending a message to a pointer to an interface (an object). ClassDecl = OCIType->getInterfaceDecl(); // FIXME: consider using LookupInstanceMethodInGlobalPool, since it will be // faster than the following method (which can do *many* linear searches). // The idea is to add class info to MethodPool. Method = ClassDecl->lookupInstanceMethod(Sel); if (!Method) // Search protocol qualifiers. Method = LookupMethodInQualifiedType(Sel, OCIType, true); bool forwardClass = false; if (!Method) { // If we have implementations in scope, check "private" methods. Method = LookupPrivateInstanceMethod(Sel, ClassDecl); if (!Method && (!Receiver || !isSelfExpr(Receiver))) { // If we still haven't found a method, look in the global pool. This // behavior isn't very desirable, however we need it for GCC // compatibility. FIXME: should we deviate?? if (OCIType->qual_empty()) { Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc)); forwardClass = OCIType->getInterfaceDecl()->isForwardDecl(); if (Method && !forwardClass) Diag(Loc, diag::warn_maynot_respond) << OCIType->getInterfaceDecl()->getIdentifier() << Sel; } } } if (Method && DiagnoseUseOfDecl(Method, Loc, forwardClass)) return ExprError(); } else if (!Context.getObjCIdType().isNull() && (ReceiverType->isPointerType() || ReceiverType->isIntegerType())) { // Implicitly convert integers and pointers to 'id' but emit a warning. Diag(Loc, diag::warn_bad_receiver_type) << ReceiverType << Receiver->getSourceRange(); if (ReceiverType->isPointerType()) ImpCastExprToType(Receiver, Context.getObjCIdType(), CK_BitCast); else { // TODO: specialized warning on null receivers? bool IsNull = Receiver->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); ImpCastExprToType(Receiver, Context.getObjCIdType(), IsNull ? CK_NullToPointer : CK_IntegralToPointer); } ReceiverType = Receiver->getType(); } else if (getLangOptions().CPlusPlus && !PerformContextuallyConvertToObjCId(Receiver)) { if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Receiver)) { Receiver = ICE->getSubExpr(); ReceiverType = Receiver->getType(); } return BuildInstanceMessage(Receiver, ReceiverType, SuperLoc, Sel, Method, LBracLoc, SelectorLoc, RBracLoc, move(ArgsIn)); } else { // Reject other random receiver types (e.g. structs). Diag(Loc, diag::err_bad_receiver_type) << ReceiverType << Receiver->getSourceRange(); return ExprError(); } } } // Check the message arguments. unsigned NumArgs = ArgsIn.size(); Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release()); QualType ReturnType; ExprValueKind VK = VK_RValue; bool ClassMessage = (ReceiverType->isObjCClassType() || ReceiverType->isObjCQualifiedClassType()); if (CheckMessageArgumentTypes(Args, NumArgs, Sel, Method, ClassMessage, LBracLoc, RBracLoc, ReturnType, VK)) return ExprError(); if (Method && !Method->getResultType()->isVoidType() && RequireCompleteType(LBracLoc, Method->getResultType(), diag::err_illegal_message_expr_incomplete_type)) return ExprError(); // Construct the appropriate ObjCMessageExpr instance. Expr *Result; if (SuperLoc.isValid()) Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc, SuperLoc, /*IsInstanceSuper=*/true, ReceiverType, Sel, SelectorLoc, Method, Args, NumArgs, RBracLoc); else Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc, Receiver, Sel, SelectorLoc, Method, Args, NumArgs, RBracLoc); return MaybeBindToTemporary(Result); }
bool CodeGenTypes::isPointerZeroInitializable(QualType T) { assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); return isZeroInitializable(T); }
void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const { if (!BT) { BT.reset(new APIMisuse("Arguments passed to variadic method aren't all " "Objective-C pointer types")); ASTContext &Ctx = C.getASTContext(); arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx); dictionaryWithObjectsAndKeysS = GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx); setWithObjectsS = GetUnarySelector("setWithObjects", Ctx); orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx); initWithObjectsS = GetUnarySelector("initWithObjects", Ctx); initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx); } if (!isVariadicMessage(msg)) return; // We are not interested in the selector arguments since they have // well-defined types, so the compiler will issue a warning for them. unsigned variadicArgsBegin = msg.getSelector().getNumArgs(); // We're not interested in the last argument since it has to be nil or the // compiler would have issued a warning for it elsewhere. unsigned variadicArgsEnd = msg.getNumArgs() - 1; if (variadicArgsEnd <= variadicArgsBegin) return; // Verify that all arguments have Objective-C types. llvm::Optional<ExplodedNode*> errorNode; ProgramStateRef state = C.getState(); for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) { QualType ArgTy = msg.getArgType(I); if (ArgTy->isObjCObjectPointerType()) continue; // Block pointers are treaded as Objective-C pointers. if (ArgTy->isBlockPointerType()) continue; // Ignore pointer constants. if (isa<loc::ConcreteInt>(msg.getArgSVal(I, C.getLocationContext(), state))) continue; // Ignore pointer types annotated with 'NSObject' attribute. if (C.getASTContext().isObjCNSObjectType(ArgTy)) continue; // Ignore CF references, which can be toll-free bridged. if (coreFoundation::isCFObjectRef(ArgTy)) continue; // Generate only one error node to use for all bug reports. if (!errorNode.hasValue()) { errorNode = C.addTransition(); } if (!errorNode.getValue()) continue; SmallString<128> sbuf; llvm::raw_svector_ostream os(sbuf); if (const char *TypeName = GetReceiverNameType(msg)) os << "Argument to '" << TypeName << "' method '"; else os << "Argument to method '"; os << msg.getSelector().getAsString() << "' should be an Objective-C pointer type, not '" << ArgTy.getAsString() << "'"; BugReport *R = new BugReport(*BT, os.str(), errorNode.getValue()); R->addRange(msg.getArgSourceRange(I)); C.EmitReport(R); } }
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a /// variable declaration with auto, register, or no storage class specifier. /// These turn into simple stack objects, or GlobalValues depending on target. void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { QualType Ty = D.getType(); bool isByRef = D.hasAttr<BlocksAttr>(); bool needsDispose = false; unsigned Align = 0; llvm::Value *DeclPtr; if (Ty->isConstantSizeType()) { if (!Target.useGlobalsForAutomaticVariables()) { // A normal fixed sized variable becomes an alloca in the entry block. const llvm::Type *LTy = ConvertTypeForMem(Ty); Align = getContext().getDeclAlignInBytes(&D); if (isByRef) LTy = BuildByRefType(Ty, Align); llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); Alloc->setName(D.getNameAsString().c_str()); if (isByRef) Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8)); Alloc->setAlignment(Align); DeclPtr = Alloc; } else { // Targets that don't support recursion emit locals as globals. const char *Class = D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto."; DeclPtr = CreateStaticBlockVarDecl(D, Class, llvm::GlobalValue ::InternalLinkage); } // FIXME: Can this happen? if (Ty->isVariablyModifiedType()) EmitVLASize(Ty); } else { EnsureInsertPoint(); if (!DidCallStackSave) { // Save the stack. const llvm::Type *LTy = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); llvm::Value *V = Builder.CreateCall(F); Builder.CreateStore(V, Stack); DidCallStackSave = true; { // Push a cleanup block and restore the stack there. CleanupScope scope(*this); V = Builder.CreateLoad(Stack, "tmp"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); Builder.CreateCall(F, V); } } // Get the element type. const llvm::Type *LElemTy = ConvertTypeForMem(Ty); const llvm::Type *LElemPtrTy = llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); llvm::Value *VLASize = EmitVLASize(Ty); // Downcast the VLA size expression VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext), false, "tmp"); // Allocate memory for the array. llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); } llvm::Value *&DMEntry = LocalDeclMap[&D]; assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); DMEntry = DeclPtr; // Emit debug info for local var declaration. if (CGDebugInfo *DI = getDebugInfo()) { assert(HaveInsertPoint() && "Unexpected unreachable point!"); DI->setLocation(D.getLocation()); if (Target.useGlobalsForAutomaticVariables()) { DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); } else if (isByRef) { llvm::Value *Loc; bool needsCopyDispose = BlockRequiresCopying(Ty); Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); Loc = Builder.CreateLoad(Loc, false); Loc = Builder.CreateBitCast(Loc, DeclPtr->getType()); Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x"); DI->EmitDeclareOfAutoVariable(&D, Loc, Builder); } else DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); } // If this local has an initializer, emit it now. const Expr *Init = D.getInit(); // If we are at an unreachable point, we don't need to emit the initializer // unless it contains a label. if (!HaveInsertPoint()) { if (!ContainsLabel(Init)) Init = 0; else EnsureInsertPoint(); } if (Init) { llvm::Value *Loc = DeclPtr; if (isByRef) { bool needsCopyDispose = BlockRequiresCopying(Ty); Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x"); } if (Ty->isReferenceType()) { RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true); EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty); } else if (!hasAggregateLLVMType(Init->getType())) { llvm::Value *V = EmitScalarExpr(Init); EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(), D.getType()); } else if (Init->getType()->isAnyComplexType()) { EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified()); } else { EmitAggExpr(Init, Loc, D.getType().isVolatileQualified()); } } if (isByRef) { const llvm::PointerType *PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); EnsureInsertPoint(); llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); llvm::Value *V; int flag = 0; int flags = 0; needsDispose = true; if (Ty->isBlockPointerType()) { flag |= BLOCK_FIELD_IS_BLOCK; flags |= BLOCK_HAS_COPY_DISPOSE; } else if (BlockRequiresCopying(Ty)) { flag |= BLOCK_FIELD_IS_OBJECT; flags |= BLOCK_HAS_COPY_DISPOSE; } // FIXME: Someone double check this. if (Ty.isObjCGCWeak()) flag |= BLOCK_FIELD_IS_WEAK; int isa = 0; if (flag&BLOCK_FIELD_IS_WEAK) isa = 1; V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); Builder.CreateStore(V, isa_field); V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding"); Builder.CreateStore(V, forwarding_field); V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); Builder.CreateStore(V, flags_field); const llvm::Type *V1; V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), (CGM.getTargetData().getTypeStoreSizeInBits(V1) / 8)); Builder.CreateStore(V, size_field); if (flags & BLOCK_HAS_COPY_DISPOSE) { BlockHasCopyDispose = true; llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align), copy_helper); llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag, Align), destroy_helper); } } // Handle CXX destruction of variables. QualType DtorTy(Ty); if (const ArrayType *Array = DtorTy->getAs<ArrayType>()) DtorTy = Array->getElementType(); if (const RecordType *RT = DtorTy->getAs<RecordType>()) if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { if (!ClassDecl->hasTrivialDestructor()) { const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext()); assert(D && "EmitLocalBlockVarDecl - destructor is nul"); assert(!Ty->getAs<ArrayType>() && "FIXME - destruction of arrays NYI"); CleanupScope scope(*this); EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr); } } // Handle the cleanup attribute if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { const FunctionDecl *FD = CA->getFunctionDecl(); llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD)); assert(F && "Could not find function!"); CleanupScope scope(*this); const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); // In some cases, the type of the function argument will be different from // the type of the pointer. An example of this is // void f(void* arg); // __attribute__((cleanup(f))) void *g; // // To fix this we insert a bitcast here. QualType ArgTy = Info.arg_begin()->type; DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy)); CallArgList Args; Args.push_back(std::make_pair(RValue::get(DeclPtr), getContext().getPointerType(D.getType()))); EmitCall(Info, F, Args); } if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { CleanupScope scope(*this); llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); V = Builder.CreateLoad(V, false); BuildBlockRelease(V); } }
bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R, FieldChainInfo LocalChain) { assert(R->getValueType()->isRecordType() && !R->getValueType()->isUnionType() && "This method only checks non-union record objects!"); const RecordDecl *RD = R->getValueType()->getAs<RecordType>()->getDecl()->getDefinition(); assert(RD && "Referred record has no definition"); bool ContainsUninitField = false; // Are all of this non-union's fields initialized? for (const FieldDecl *I : RD->fields()) { const auto FieldVal = State->getLValue(I, loc::MemRegionVal(R)).castAs<loc::MemRegionVal>(); const auto *FR = FieldVal.getRegionAs<FieldRegion>(); QualType T = I->getType(); // If LocalChain already contains FR, then we encountered a cyclic // reference. In this case, region FR is already under checking at an // earlier node in the directed tree. if (LocalChain.contains(FR)) return false; if (T->isStructureOrClassType()) { if (isNonUnionUninit(FR, LocalChain.add(RegularField(FR)))) ContainsUninitField = true; continue; } if (T->isUnionType()) { if (isUnionUninit(FR)) { if (addFieldToUninits(LocalChain.add(RegularField(FR)))) ContainsUninitField = true; } else IsAnyFieldInitialized = true; continue; } if (T->isArrayType()) { IsAnyFieldInitialized = true; continue; } if (T->isAnyPointerType() || T->isReferenceType() || T->isBlockPointerType()) { if (isPointerOrReferenceUninit(FR, LocalChain)) ContainsUninitField = true; continue; } if (isPrimitiveType(T)) { SVal V = State->getSVal(FieldVal); if (isPrimitiveUninit(V)) { if (addFieldToUninits(LocalChain.add(RegularField(FR)))) ContainsUninitField = true; } continue; } llvm_unreachable("All cases are handled!"); } // Checking bases. // FIXME: As of now, because of `willObjectBeAnalyzedLater`, objects whose // type is a descendant of another type will emit warnings for uninitalized // inherited members. // This is not the only way to analyze bases of an object -- if we didn't // filter them out, and didn't analyze the bases, this checker would run for // each base of the object in order of base initailization and in theory would // find every uninitalized field. This approach could also make handling // diamond inheritances more easily. // // This rule (that a descendant type's cunstructor is responsible for // initializing inherited data members) is not obvious, and should it should // be. const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); if (!CXXRD) return ContainsUninitField; for (const CXXBaseSpecifier &BaseSpec : CXXRD->bases()) { const auto *BaseRegion = State->getLValue(BaseSpec, R) .castAs<loc::MemRegionVal>() .getRegionAs<TypedValueRegion>(); if (isNonUnionUninit(BaseRegion, LocalChain)) ContainsUninitField = true; } return ContainsUninitField; }