UninitializedFieldRule::StringRefSet UninitializedFieldRule::GetCandidateFieldsList(const RecordDecl* recordDeclaration, ASTContext* context) { StringRefSet candidates; for (const FieldDecl* fieldDeclaration : recordDeclaration->fields()) { if (fieldDeclaration->hasInClassInitializer()) continue; QualType type = fieldDeclaration->getType(); if (! type.isPODType(*context)) continue; if (IsRecordTypeWithoutDataMembers(type)) continue; if (fieldDeclaration->isAnonymousStructOrUnion()) continue; candidates.insert(fieldDeclaration->getName()); } return candidates; }
unsigned clang_isPODType(CXType X) { QualType T = GetQualType(X); if (T.isNull()) return 0; CXTranslationUnit TU = GetTU(X); return T.isPODType(cxtu::getASTUnit(TU)->getASTContext()) ? 1 : 0; }
unsigned clang_isPODType(CXType X) { QualType T = GetQualType(X); if (!T.getTypePtrOrNull()) return 0; CXTranslationUnit TU = GetTU(X); ASTUnit *AU = static_cast<ASTUnit*>(TU->TUData); return T.isPODType(AU->getASTContext()) ? 1 : 0; }
bool IsUninitializedPodVariable(const VarDecl* variableDeclaration, ASTContext* context) { QualType type = variableDeclaration->getType(); if (! type.isPODType(*context)) return false; if (IsRecordTypeWithoutDataMembers(type)) return false; return (!variableDeclaration->hasInit()) || (type->isRecordType() && HasImplicitInitialization(variableDeclaration)); }
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a /// variable declaration with auto, register, or no storage class specifier. /// These turn into simple stack objects, or GlobalValues depending on target. void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { QualType Ty = D.getType(); bool isByRef = D.hasAttr<BlocksAttr>(); bool needsDispose = false; unsigned Align = 0; bool IsSimpleConstantInitializer = false; llvm::Value *DeclPtr; if (Ty->isConstantSizeType()) { if (!Target.useGlobalsForAutomaticVariables()) { // If this value is an array or struct, is POD, and if the initializer is // a staticly determinable constant, try to optimize it. if (D.getInit() && !isByRef && (Ty->isArrayType() || Ty->isRecordType()) && Ty->isPODType() && D.getInit()->isConstantInitializer(getContext())) { // If this variable is marked 'const', emit the value as a global. if (CGM.getCodeGenOpts().MergeAllConstants && Ty.isConstant(getContext())) { EmitStaticBlockVarDecl(D); return; } IsSimpleConstantInitializer = true; } // A normal fixed sized variable becomes an alloca in the entry block. const llvm::Type *LTy = ConvertTypeForMem(Ty); if (isByRef) LTy = BuildByRefType(&D); llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); Alloc->setName(D.getNameAsString()); Align = getContext().getDeclAlignInBytes(&D); if (isByRef) Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8)); Alloc->setAlignment(Align); DeclPtr = Alloc; } else { // Targets that don't support recursion emit locals as globals. const char *Class = D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto."; DeclPtr = CreateStaticBlockVarDecl(D, Class, llvm::GlobalValue ::InternalLinkage); } // FIXME: Can this happen? if (Ty->isVariablyModifiedType()) EmitVLASize(Ty); } else { EnsureInsertPoint(); if (!DidCallStackSave) { // Save the stack. const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext); llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); llvm::Value *V = Builder.CreateCall(F); Builder.CreateStore(V, Stack); DidCallStackSave = true; { // Push a cleanup block and restore the stack there. DelayedCleanupBlock scope(*this); V = Builder.CreateLoad(Stack, "tmp"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); Builder.CreateCall(F, V); } } // Get the element type. const llvm::Type *LElemTy = ConvertTypeForMem(Ty); const llvm::Type *LElemPtrTy = llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); llvm::Value *VLASize = EmitVLASize(Ty); // Downcast the VLA size expression VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext), false, "tmp"); // Allocate memory for the array. llvm::AllocaInst *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); VLA->setAlignment(getContext().getDeclAlignInBytes(&D)); DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); } llvm::Value *&DMEntry = LocalDeclMap[&D]; assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); DMEntry = DeclPtr; // Emit debug info for local var declaration. if (CGDebugInfo *DI = getDebugInfo()) { assert(HaveInsertPoint() && "Unexpected unreachable point!"); DI->setLocation(D.getLocation()); if (Target.useGlobalsForAutomaticVariables()) { DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); } else DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); } // If this local has an initializer, emit it now. const Expr *Init = D.getInit(); // If we are at an unreachable point, we don't need to emit the initializer // unless it contains a label. if (!HaveInsertPoint()) { if (!ContainsLabel(Init)) Init = 0; else EnsureInsertPoint(); } if (Init) { llvm::Value *Loc = DeclPtr; if (isByRef) Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), D.getNameAsString()); bool isVolatile = getContext().getCanonicalType(D.getType()).isVolatileQualified(); // If the initializer was a simple constant initializer, we can optimize it // in various ways. if (IsSimpleConstantInitializer) { llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this); assert(Init != 0 && "Wasn't a simple constant init?"); llvm::Value *AlignVal = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Align); const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, LLVMPointerWidth); llvm::Value *SizeVal = llvm::ConstantInt::get(IntPtr, getContext().getTypeSizeInBytes(Ty)); const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); if (Loc->getType() != BP) Loc = Builder.CreateBitCast(Loc, BP, "tmp"); // If the initializer is all zeros, codegen with memset. if (isa<llvm::ConstantAggregateZero>(Init)) { llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0); Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal); } else { // Otherwise, create a temporary global with the initializer then // memcpy from the global to the alloca. std::string Name = GetStaticDeclName(*this, D, "."); llvm::GlobalVariable *GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true, llvm::GlobalValue::InternalLinkage, Init, Name, 0, false, 0); GV->setAlignment(Align); llvm::Value *SrcPtr = GV; if (SrcPtr->getType() != BP) SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal); } } else if (Ty->isReferenceType()) { RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true); EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty); } else if (!hasAggregateLLVMType(Init->getType())) { llvm::Value *V = EmitScalarExpr(Init); EmitStoreOfScalar(V, Loc, isVolatile, D.getType()); } else if (Init->getType()->isAnyComplexType()) { EmitComplexExprIntoAddr(Init, Loc, isVolatile); } else { EmitAggExpr(Init, Loc, isVolatile); } } if (isByRef) { const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext); EnsureInsertPoint(); llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); llvm::Value *V; int flag = 0; int flags = 0; needsDispose = true; if (Ty->isBlockPointerType()) { flag |= BLOCK_FIELD_IS_BLOCK; flags |= BLOCK_HAS_COPY_DISPOSE; } else if (BlockRequiresCopying(Ty)) { flag |= BLOCK_FIELD_IS_OBJECT; flags |= BLOCK_HAS_COPY_DISPOSE; } // FIXME: Someone double check this. if (Ty.isObjCGCWeak()) flag |= BLOCK_FIELD_IS_WEAK; int isa = 0; if (flag&BLOCK_FIELD_IS_WEAK) isa = 1; V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); Builder.CreateStore(V, isa_field); Builder.CreateStore(DeclPtr, forwarding_field); V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); Builder.CreateStore(V, flags_field); const llvm::Type *V1; V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), (CGM.getTargetData().getTypeStoreSizeInBits(V1) / 8)); Builder.CreateStore(V, size_field); if (flags & BLOCK_HAS_COPY_DISPOSE) { BlockHasCopyDispose = true; llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align), copy_helper); llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag, Align), destroy_helper); } } // Handle CXX destruction of variables. QualType DtorTy(Ty); while (const ArrayType *Array = getContext().getAsArrayType(DtorTy)) DtorTy = getContext().getBaseElementType(Array); if (const RecordType *RT = DtorTy->getAs<RecordType>()) if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { if (!ClassDecl->hasTrivialDestructor()) { const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext()); assert(D && "EmitLocalBlockVarDecl - destructor is nul"); if (const ConstantArrayType *Array = getContext().getAsConstantArrayType(Ty)) { { DelayedCleanupBlock Scope(*this); QualType BaseElementTy = getContext().getBaseElementType(Array); const llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(DeclPtr, BasePtr); EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); // Make sure to jump to the exit block. EmitBranch(Scope.getCleanupExitBlock()); } if (Exceptions) { EHCleanupBlock Cleanup(*this); QualType BaseElementTy = getContext().getBaseElementType(Array); const llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(DeclPtr, BasePtr); EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); } } else { { DelayedCleanupBlock Scope(*this); EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr); // Make sure to jump to the exit block. EmitBranch(Scope.getCleanupExitBlock()); } if (Exceptions) { EHCleanupBlock Cleanup(*this); EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr); } } } } // Handle the cleanup attribute if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { const FunctionDecl *FD = CA->getFunctionDecl(); llvm::Constant* F = CGM.GetAddrOfFunction(FD); assert(F && "Could not find function!"); const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); // In some cases, the type of the function argument will be different from // the type of the pointer. An example of this is // void f(void* arg); // __attribute__((cleanup(f))) void *g; // // To fix this we insert a bitcast here. QualType ArgTy = Info.arg_begin()->type; { DelayedCleanupBlock scope(*this); CallArgList Args; Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy))), getContext().getPointerType(D.getType()))); EmitCall(Info, F, Args); } if (Exceptions) { EHCleanupBlock Cleanup(*this); CallArgList Args; Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy))), getContext().getPointerType(D.getType()))); EmitCall(Info, F, Args); } } if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { { DelayedCleanupBlock scope(*this); llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); V = Builder.CreateLoad(V); BuildBlockRelease(V); } // FIXME: Turn this on and audit the codegen if (0 && Exceptions) { EHCleanupBlock Cleanup(*this); llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); V = Builder.CreateLoad(V); BuildBlockRelease(V); } } }
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { QualType AllocType = E->getAllocatedType(); FunctionDecl *NewFD = E->getOperatorNew(); const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>(); CallArgList NewArgs; // The allocation size is the first argument. QualType SizeTy = getContext().getSizeType(); llvm::Value *NumElements = 0; llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements); NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy)); // Emit the rest of the arguments. // FIXME: Ideally, this should just use EmitCallArgs. CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin(); // First, use the types from the function type. // We start at 1 here because the first argument (the allocation size) // has already been emitted. for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) { QualType ArgType = NewFTy->getArgType(i); assert(getContext().getCanonicalType(ArgType.getNonReferenceType()). getTypePtr() == getContext().getCanonicalType(NewArg->getType()).getTypePtr() && "type mismatch in call argument!"); NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), ArgType)); } // Either we've emitted all the call args, or we have a call to a // variadic function. assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) && "Extra arguments in non-variadic function!"); // If we still have any arguments, emit them using the type of the argument. for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end(); NewArg != NewArgEnd; ++NewArg) { QualType ArgType = NewArg->getType(); NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), ArgType)); } // Emit the call to new. RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs), CGM.GetAddrOfFunction(NewFD), NewArgs, NewFD); // If an allocation function is declared with an empty exception specification // it returns null to indicate failure to allocate storage. [expr.new]p13. // (We don't need to check for null when there's no new initializer and // we're allocating a POD type). bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() && !(AllocType->isPODType() && !E->hasInitializer()); llvm::BasicBlock *NewNull = 0; llvm::BasicBlock *NewNotNull = 0; llvm::BasicBlock *NewEnd = 0; llvm::Value *NewPtr = RV.getScalarVal(); if (NullCheckResult) { NewNull = createBasicBlock("new.null"); NewNotNull = createBasicBlock("new.notnull"); NewEnd = createBasicBlock("new.end"); llvm::Value *IsNull = Builder.CreateICmpEQ(NewPtr, llvm::Constant::getNullValue(NewPtr->getType()), "isnull"); Builder.CreateCondBr(IsNull, NewNull, NewNotNull); EmitBlock(NewNotNull); } if (uint64_t CookiePadding = CalculateCookiePadding(getContext(), E)) { uint64_t CookieOffset = CookiePadding - getContext().getTypeSize(SizeTy) / 8; llvm::Value *NumElementsPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset); NumElementsPtr = Builder.CreateBitCast(NumElementsPtr, ConvertType(SizeTy)->getPointerTo()); Builder.CreateStore(NumElements, NumElementsPtr); // Now add the padding to the new ptr. NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, CookiePadding); } NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType())); EmitNewInitializer(*this, E, NewPtr, NumElements); if (NullCheckResult) { Builder.CreateBr(NewEnd); NewNotNull = Builder.GetInsertBlock(); EmitBlock(NewNull); Builder.CreateBr(NewEnd); EmitBlock(NewEnd); llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType()); PHI->reserveOperandSpace(2); PHI->addIncoming(NewPtr, NewNotNull); PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull); NewPtr = PHI; } return NewPtr; }