llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) { assert(NeedsArrayCookie(expr)); unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace(); ASTContext &Ctx = getContext(); QualType SizeTy = Ctx.getSizeType(); CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy); // The size of the cookie. CharUnits CookieSize = std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); // Compute an offset to the cookie. llvm::Value *CookiePtr = NewPtr; CharUnits CookieOffset = CookieSize - SizeSize; if (!CookieOffset.isZero()) CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr, CookieOffset.getQuantity()); // Write the number of elements into the appropriate slot. llvm::Value *NumElementsPtr = CGF.Builder.CreateBitCast(CookiePtr, CGF.ConvertType(SizeTy)->getPointerTo(AS)); CGF.Builder.CreateStore(NumElements, NumElementsPtr); // Finally, compute a pointer to the actual data buffer by skipping // over the cookie completely. return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieSize.getQuantity()); }
// Scale a base value by a scaling factor, and return the scaled // value as an SVal. Used by 'computeOffset'. static inline SVal scaleValue(ProgramStateRef state, NonLoc baseVal, CharUnits scaling, SValBuilder &sb) { return sb.evalBinOpNN(state, BO_Mul, baseVal, sb.makeArrayIndex(scaling.getQuantity()), sb.getArrayIndexType()); }
void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF, FunctionArgList &Args) { // Emit a call to cudaSetupArgument for each arg in Args. llvm::FunctionCallee cudaSetupArgFn = getSetupArgumentFn(); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end"); CharUnits Offset = CharUnits::Zero(); for (const VarDecl *A : Args) { CharUnits TyWidth, TyAlign; std::tie(TyWidth, TyAlign) = CGM.getContext().getTypeInfoInChars(A->getType()); Offset = Offset.alignTo(TyAlign); llvm::Value *Args[] = { CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(), VoidPtrTy), llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()), llvm::ConstantInt::get(SizeTy, Offset.getQuantity()), }; llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args); llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0); llvm::Value *CBZero = CGF.Builder.CreateICmpEQ(CB, Zero); llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next"); CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock); CGF.EmitBlock(NextBlock); Offset += TyWidth; } // Emit the call to cudaLaunch llvm::FunctionCallee cudaLaunchFn = getLaunchFn(); llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy); CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg); CGF.EmitBranch(EndBlock); CGF.EmitBlock(EndBlock); }
llvm::Type * CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, const ASTRecordLayout &Layout) { Fields[Field] = 0; if (Field->isBitField()) { uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); // Ignore zero sized bit fields. if (FieldSize == 0) return 0; unsigned StorageBits = llvm::RoundUpToAlignment( FieldSize, Types.getTarget().getCharAlign()); CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(StorageBits); llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); if (NumBytesToAppend > CharUnits::One()) FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); // Add the bit field info. BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize, StorageBits, Alignment.getQuantity()); return FieldTy; } // This is a regular union field. return Types.ConvertTypeForMem(Field->getType()); }
void ExprEngine:: VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex, ExplodedNode *Pred, ExplodedNodeSet &Dst) { StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); QualType T = Ex->getTypeOfArgument(); if (Ex->getKind() == UETT_SizeOf) { if (!T->isIncompleteType() && !T->isConstantSizeType()) { assert(T->isVariableArrayType() && "Unknown non-constant-sized type."); // FIXME: Add support for VLA type arguments and VLA expressions. // When that happens, we should probably refactor VLASizeChecker's code. return; } else if (T->getAs<ObjCObjectType>()) { // Some code tries to take the sizeof an ObjCObjectType, relying that // the compiler has laid out its representation. Just report Unknown // for these. return; } } APSInt Value = Ex->EvaluateKnownConstInt(getContext()); CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue()); ProgramStateRef state = Pred->getState(); state = state->BindExpr(Ex, Pred->getLocationContext(), svalBuilder.makeIntVal(amt.getQuantity(), Ex->getType())); Bldr.generateNode(Ex, Pred, state); }
llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) { assert(NeedsArrayCookie(expr)); // NewPtr is a char*. unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace(); ASTContext &Ctx = getContext(); CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType()); llvm::IntegerType *SizeTy = cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType())); // The cookie is always at the start of the buffer. llvm::Value *CookiePtr = NewPtr; // The first element is the element size. CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS)); llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy, Ctx.getTypeSizeInChars(ElementType).getQuantity()); CGF.Builder.CreateStore(ElementSize, CookiePtr); // The second element is the element count. CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1); CGF.Builder.CreateStore(NumElements, CookiePtr); // Finally, compute a pointer to the actual data buffer by skipping // over the cookie completely. CharUnits CookieSize = 2 * SizeSize; return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieSize.getQuantity()); }
RegionOffset MemRegion::getAsOffset() const { const MemRegion *R = this; int64_t Offset = 0; while (1) { switch (R->getKind()) { default: return RegionOffset(0); case SymbolicRegionKind: case AllocaRegionKind: case CompoundLiteralRegionKind: case CXXThisRegionKind: case StringRegionKind: case VarRegionKind: case CXXTempObjectRegionKind: goto Finish; case ElementRegionKind: { const ElementRegion *ER = cast<ElementRegion>(R); QualType EleTy = ER->getValueType(); if (!IsCompleteType(getContext(), EleTy)) return RegionOffset(0); SVal Index = ER->getIndex(); if (const nonloc::ConcreteInt *CI=dyn_cast<nonloc::ConcreteInt>(&Index)) { int64_t i = CI->getValue().getSExtValue(); CharUnits Size = getContext().getTypeSizeInChars(EleTy); Offset += i * Size.getQuantity() * 8; } else { // We cannot compute offset for non-concrete index. return RegionOffset(0); } R = ER->getSuperRegion(); break; } case FieldRegionKind: { const FieldRegion *FR = cast<FieldRegion>(R); const RecordDecl *RD = FR->getDecl()->getParent(); if (!RD->isCompleteDefinition()) // We cannot compute offset for incomplete type. return RegionOffset(0); // Get the field number. unsigned idx = 0; for (RecordDecl::field_iterator FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI, ++idx) if (FR->getDecl() == *FI) break; const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); // This is offset in bits. Offset += Layout.getFieldOffset(idx); R = FR->getSuperRegion(); break; } } } Finish: return RegionOffset(R, Offset); }
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *OID, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers, llvm::Value *Offset) { // Compute (type*) ( (char *) BaseValue + Offset) QualType IvarTy = Ivar->getType(); llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy); V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr"); if (!Ivar->isBitField()) { V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy); LV.getQuals().addCVRQualifiers(CVRQualifiers); return LV; } // We need to compute an access strategy for this bit-field. We are given the // offset to the first byte in the bit-field, the sub-byte offset is taken // from the original layout. We reuse the normal bit-field access strategy by // treating this as an access to a struct where the bit-field is in byte 0, // and adjust the containing type size as appropriate. // // FIXME: Note that currently we make a very conservative estimate of the // alignment of the bit-field, because (a) it is not clear what guarantees the // runtime makes us, and (b) we don't have a way to specify that the struct is // at an alignment plus offset. // // Note, there is a subtle invariant here: we can only call this routine on // non-synthesized ivars but we may be called for synthesized ivars. However, // a synthesized ivar can never be a bit-field, so this is safe. uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar); uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); uint64_t AlignmentBits = CGF.CGM.getContext().getTargetInfo().getCharAlign(); uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext()); CharUnits StorageSize = CGF.CGM.getContext().toCharUnitsFromBits( llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits)); CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits); // Allocate a new CGBitFieldInfo object to describe this access. // // FIXME: This is incredibly wasteful, these should be uniqued or part of some // layout object. However, this is blocked on other cleanups to the // Objective-C code, so for now we just live with allocating a bunch of these // objects. CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize, CGF.CGM.getContext().toBits(StorageSize), Alignment.getQuantity())); V = CGF.Builder.CreateBitCast(V, llvm::Type::getIntNPtrTy(CGF.getLLVMContext(), Info->StorageSize)); return LValue::MakeBitfield(V, *Info, IvarTy.withCVRQualifiers(CVRQualifiers), Alignment); }
void SlicingCheck::check(const MatchFinder::MatchResult &Result) { const auto *BaseDecl = Result.Nodes.getNodeAs<CXXRecordDecl>("BaseDecl"); const auto *DerivedDecl = Result.Nodes.getNodeAs<CXXRecordDecl>("DerivedDecl"); const auto *Call = Result.Nodes.getNodeAs<Expr>("Call"); assert(BaseDecl != nullptr); assert(DerivedDecl != nullptr); assert(Call != nullptr); // Warn when slicing the vtable. // We're looking through all the methods in the derived class and see if they // override some methods in the base class. // It's not enough to just test whether the class is polymorphic because we // would be fine slicing B to A if no method in B (or its bases) overrides // anything in A: // class A { virtual void f(); }; // class B : public A {}; // because in that case calling A::f is the same as calling B::f. DiagnoseSlicedOverriddenMethods(*Call, *DerivedDecl, *BaseDecl); // Warn when slicing member variables. const auto &BaseLayout = BaseDecl->getASTContext().getASTRecordLayout(BaseDecl); const auto &DerivedLayout = DerivedDecl->getASTContext().getASTRecordLayout(DerivedDecl); const CharUnits StateSize = DerivedLayout.getDataSize() - BaseLayout.getDataSize(); if (StateSize.isPositive()) { diag(Call->getExprLoc(), "slicing object from type %0 to %1 discards " "%2 bytes of state") << DerivedDecl << BaseDecl << static_cast<int>(StateSize.getQuantity()); } }
llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, CharUnits ThisAdjustment) { assert(MD->isInstance() && "Member function must not be static!"); MD = MD->getCanonicalDecl(); CodeGenTypes &Types = CGM.getTypes(); llvm::Type *ptrdiff_t = getPtrDiffTy(); // Get the function pointer (or index if this is a virtual function). llvm::Constant *MemPtr[2]; if (MD->isVirtual()) { uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD); const ASTContext &Context = getContext(); CharUnits PointerWidth = Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); uint64_t VTableOffset = (Index * PointerWidth.getQuantity()); if (IsARM) { // ARM C++ ABI 3.2.1: // This ABI specifies that adj contains twice the this // adjustment, plus 1 if the member function is virtual. The // least significant bit of adj then makes exactly the same // discrimination as the least significant bit of ptr does for // Itanium. MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset); MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 2 * ThisAdjustment.getQuantity() + 1); } else { // Itanium C++ ABI 2.3: // For a virtual function, [the pointer field] is 1 plus the // virtual table offset (in bytes) of the function, // represented as a ptrdiff_t. MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1); MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, ThisAdjustment.getQuantity()); } } else { const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); llvm::Type *Ty; // Check whether the function has a computable LLVM signature. if (Types.isFuncTypeConvertible(FPT)) { // The function has a computable LLVM signature; use the correct type. Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); } else { // Use an arbitrary non-function type to tell GetAddrOfFunction that the // function type is incomplete. Ty = ptrdiff_t; } llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t); MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, (IsARM ? 2 : 1) * ThisAdjustment.getQuantity()); } return llvm::ConstantStruct::getAnon(MemPtr); }
// Returns an adjusted base cast to i8*, since we do more address arithmetic on // it. llvm::Value * MicrosoftCXXABI::AdjustVirtualBase(CodeGenFunction &CGF, const CXXRecordDecl *RD, llvm::Value *Base, llvm::Value *VirtualBaseAdjustmentOffset, llvm::Value *VBPtrOffset) { CGBuilderTy &Builder = CGF.Builder; Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy); llvm::BasicBlock *OriginalBB = 0; llvm::BasicBlock *SkipAdjustBB = 0; llvm::BasicBlock *VBaseAdjustBB = 0; // In the unspecified inheritance model, there might not be a vbtable at all, // in which case we need to skip the virtual base lookup. If there is a // vbtable, the first entry is a no-op entry that gives back the original // base, so look for a virtual base adjustment offset of zero. if (VBPtrOffset) { OriginalBB = Builder.GetInsertBlock(); VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust"); SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust"); llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBaseAdjustmentOffset, getZeroInt(), "memptr.is_vbase"); Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB); CGF.EmitBlock(VBaseAdjustBB); } // If we weren't given a dynamic vbptr offset, RD should be complete and we'll // know the vbptr offset. if (!VBPtrOffset) { CharUnits offs = getContext().getASTRecordLayout(RD).getVBPtrOffset(); VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity()); } // Load the vbtable pointer from the vbtable offset in the instance. llvm::Value *VBPtr = Builder.CreateInBoundsGEP(Base, VBPtrOffset, "memptr.vbptr"); llvm::Value *VBTable = Builder.CreateBitCast(VBPtr, CGM.Int8PtrTy->getPointerTo(0)); VBTable = Builder.CreateLoad(VBTable, "memptr.vbtable"); // Load an i32 offset from the vb-table. llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VirtualBaseAdjustmentOffset); VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0)); VBaseOffs = Builder.CreateLoad(VBaseOffs, "memptr.vbase_offs"); // Add it to VBPtr. GEP will sign extend the i32 value for us. llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs); // Merge control flow with the case where we didn't have to adjust. if (VBaseAdjustBB) { Builder.CreateBr(SkipAdjustBB); CGF.EmitBlock(SkipAdjustBB); llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base"); Phi->addIncoming(Base, OriginalBB); Phi->addIncoming(AdjustedBase, VBaseAdjustBB); return Phi; } return AdjustedBase; }
void VBTableInfo::EmitVBTableDefinition( CodeGenModule &CGM, const CXXRecordDecl *RD, llvm::GlobalVariable::LinkageTypes Linkage) const { assert(RD->getNumVBases() && ReusingBase->getNumVBases() && "should only emit vbtables for classes with vbtables"); const ASTRecordLayout &BaseLayout = CGM.getContext().getASTRecordLayout(VBPtrSubobject.getBase()); const ASTRecordLayout &DerivedLayout = CGM.getContext().getASTRecordLayout(RD); SmallVector<llvm::Constant *, 4> Offsets; // The offset from ReusingBase's vbptr to itself always leads. CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset(); Offsets.push_back( llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity())); // These are laid out in the same order as in Itanium, which is the same as // the order of the vbase iterator. for (CXXRecordDecl::base_class_const_iterator I = ReusingBase->vbases_begin(), E = ReusingBase->vbases_end(); I != E; ++I) { const CXXRecordDecl *VBase = I->getType()->getAsCXXRecordDecl(); CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase); assert(!Offset.isNegative()); // Make it relative to the subobject vbptr. Offset -= VBPtrSubobject.getBaseOffset() + VBPtrOffset; Offsets.push_back(llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity())); } assert(Offsets.size() == cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType()) ->getElementType())->getNumElements()); llvm::ArrayType *VBTableType = llvm::ArrayType::get(CGM.IntTy, Offsets.size()); llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets); GV->setInitializer(Init); // Set the correct linkage. GV->setLinkage(Linkage); // Set the right visibility. CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForVTable); }
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc) { Offset = 0; SmallVector<StringRef, 2> Members; Member.split(Members, "."); LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(), LookupOrdinaryName); if (!LookupName(BaseResult, getCurScope())) return true; if(!BaseResult.isSingleResult()) return true; NamedDecl *FoundDecl = BaseResult.getFoundDecl(); for (StringRef NextMember : Members) { const RecordType *RT = nullptr; if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl)) RT = VD->getType()->getAs<RecordType>(); else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) { MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false); RT = TD->getUnderlyingType()->getAs<RecordType>(); } else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl)) RT = TD->getTypeForDecl()->getAs<RecordType>(); else if (FieldDecl *TD = dyn_cast<FieldDecl>(FoundDecl)) RT = TD->getType()->getAs<RecordType>(); if (!RT) return true; if (RequireCompleteType(AsmLoc, QualType(RT, 0), diag::err_asm_incomplete_type)) return true; LookupResult FieldResult(*this, &Context.Idents.get(NextMember), SourceLocation(), LookupMemberName); if (!LookupQualifiedName(FieldResult, RT->getDecl())) return true; if (!FieldResult.isSingleResult()) return true; FoundDecl = FieldResult.getFoundDecl(); // FIXME: Handle IndirectFieldDecl? FieldDecl *FD = dyn_cast<FieldDecl>(FoundDecl); if (!FD) return true; const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl()); unsigned i = FD->getFieldIndex(); CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i)); Offset += (unsigned)Result.getQuantity(); } return false; }
/// \brief Perform the final move to DestPtr if RequiresGCollection is set. /// /// The idea is that you do something like this: /// RValue Result = EmitSomething(..., getReturnValueSlot()); /// EmitGCMove(E, Result); /// If GC doesn't interfere, this will cause the result to be emitted /// directly into the return value slot. If GC does interfere, a final /// move will be performed. void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) { if (Dest.requiresGCollection()) { CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(), Src.getAggregateAddr(), SizeVal); } }
void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr, const CXXDeleteExpr *expr, QualType ElementType, llvm::Value *&NumElements, llvm::Value *&AllocPtr, CharUnits &CookieSize) { // Derive a char* in the same address space as the pointer. unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS); // If we don't need an array cookie, bail out early. if (!NeedsArrayCookie(expr, ElementType)) { AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); NumElements = 0; CookieSize = CharUnits::Zero(); return; } QualType SizeTy = getContext().getSizeType(); CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy); llvm::Type *SizeLTy = CGF.ConvertType(SizeTy); CookieSize = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType)); CharUnits NumElementsOffset = CookieSize - SizeSize; // Compute the allocated pointer. AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr, -CookieSize.getQuantity()); llvm::Value *NumElementsPtr = AllocPtr; if (!NumElementsOffset.isZero()) NumElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr, NumElementsOffset.getQuantity()); NumElementsPtr = CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS)); NumElements = CGF.Builder.CreateLoad(NumElementsPtr); }
void ElementRegion::dumpToStream(raw_ostream &os, int level) const { #ifdef FSS_FORMAT ASTContext &C = getContext(); QualType elemType = getElementType(); CharUnits size = C.getTypeSizeInChars(elemType); if (Index.isZeroConstant() || size.getQuantity() == 0) { superRegion->dumpToStream(os, level + 1); } else { os << '('; superRegion->dumpToStream(os, level + 1); os << " + " << '(' << Index << " * " << size.getQuantity() << "))"; } #else os << "element{" << superRegion << ',' << Index << ',' << getElementType().getAsString() << '}'; #endif }
DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const { ASTContext &Ctx = svalBuilder.getContext(); QualType T = getDesugaredValueType(Ctx); if (isa<VariableArrayType>(T)) return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this)); if (isa<IncompleteArrayType>(T)) return UnknownVal(); CharUnits size = Ctx.getTypeSizeInChars(T); QualType sizeTy = svalBuilder.getArrayIndexType(); return svalBuilder.makeIntVal(size.getQuantity(), sizeTy); }
void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr, const CXXDeleteExpr *expr, QualType ElementType, llvm::Value *&NumElements, llvm::Value *&AllocPtr, CharUnits &CookieSize) { // Derive a char* in the same address space as the pointer. unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS); // If we don't need an array cookie, bail out early. if (!NeedsArrayCookie(expr, ElementType)) { AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); NumElements = 0; CookieSize = CharUnits::Zero(); return; } QualType SizeTy = getContext().getSizeType(); CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy); llvm::Type *SizeLTy = CGF.ConvertType(SizeTy); // The cookie size is always 2 * sizeof(size_t). CookieSize = 2 * SizeSize; // The allocated pointer is the input ptr, minus that amount. AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr, -CookieSize.getQuantity()); // The number of elements is at offset sizeof(size_t) relative to that. llvm::Value *NumElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr, SizeSize.getQuantity()); NumElementsPtr = CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS)); NumElements = CGF.Builder.CreateLoad(NumElementsPtr); }
/// Emit code to cause the variable at the given address to be considered as /// constant from this point onwards. static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *Addr) { // Don't emit the intrinsic if we're not optimizing. if (!CGF.CGM.getCodeGenOpts().OptimizationLevel) return; // Grab the llvm.invariant.start intrinsic. llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID); // Emit a call with the size in bytes of the object. CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType()); uint64_t Width = WidthChars.getQuantity(); llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width), llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)}; CGF.Builder.CreateCall(InvariantStart, Args); }
void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) { // Do not emit the intrinsic if we're not optimizing. if (!CGM.getCodeGenOpts().OptimizationLevel) return; // Grab the llvm.invariant.start intrinsic. llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; // Overloaded address space type. llvm::Type *ObjectPtr[1] = {Int8PtrTy}; llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr); // Emit a call with the size in bytes of the object. uint64_t Width = Size.getQuantity(); llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width), llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)}; Builder.CreateCall(InvariantStart, Args); }
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) { // The element size is right-justified in the cookie. llvm::Value *numElementsPtr = allocPtr; CharUnits numElementsOffset = cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes); if (!numElementsOffset.isZero()) numElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr, numElementsOffset.getQuantity()); unsigned AS = allocPtr->getType()->getPointerAddressSpace(); numElementsPtr = CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); return CGF.Builder.CreateLoad(numElementsPtr); }
ConstantAddress CodeGenModule::getUPCFenceVar() { CharUnits Align = getContext().getTypeAlignInChars(getContext().IntTy); if (!UPCFenceVar) { llvm::GlobalVariable * GV = new llvm::GlobalVariable(getModule(), IntTy, false, llvm::GlobalValue::LinkOnceODRLinkage, llvm::ConstantInt::get(IntTy, 0), "__upc_fence_var"); if(isTargetDarwin()) GV->setSection("__DATA,upc_shared"); else GV->setSection("upc_shared"); GV->setAlignment(Align.getQuantity()); UPCFenceVar = GV; } return ConstantAddress(UPCFenceVar, Align); }
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc) { Offset = 0; LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(), LookupOrdinaryName); if (!LookupName(BaseResult, getCurScope())) return true; if (!BaseResult.isSingleResult()) return true; const RecordType *RT = nullptr; NamedDecl *FoundDecl = BaseResult.getFoundDecl(); if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl)) RT = VD->getType()->getAs<RecordType>(); else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) RT = TD->getUnderlyingType()->getAs<RecordType>(); else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl)) RT = TD->getTypeForDecl()->getAs<RecordType>(); if (!RT) return true; if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0)) return true; LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(), LookupMemberName); if (!LookupQualifiedName(FieldResult, RT->getDecl())) return true; // FIXME: Handle IndirectFieldDecl? FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl()); if (!FD) return true; const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl()); unsigned i = FD->getFieldIndex(); CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i)); Offset = (unsigned)Result.getQuantity(); return false; }
void ExprEngine:: VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex, ExplodedNode *Pred, ExplodedNodeSet &Dst) { // FIXME: Prechecks eventually go in ::Visit(). ExplodedNodeSet CheckedSet; getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, Ex, *this); ExplodedNodeSet EvalSet; StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx); QualType T = Ex->getTypeOfArgument(); for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) { if (Ex->getKind() == UETT_SizeOf) { if (!T->isIncompleteType() && !T->isConstantSizeType()) { assert(T->isVariableArrayType() && "Unknown non-constant-sized type."); // FIXME: Add support for VLA type arguments and VLA expressions. // When that happens, we should probably refactor VLASizeChecker's code. continue; } else if (T->getAs<ObjCObjectType>()) { // Some code tries to take the sizeof an ObjCObjectType, relying that // the compiler has laid out its representation. Just report Unknown // for these. continue; } } APSInt Value = Ex->EvaluateKnownConstInt(getContext()); CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue()); ProgramStateRef state = (*I)->getState(); state = state->BindExpr(Ex, (*I)->getLocationContext(), svalBuilder.makeIntVal(amt.getQuantity(), Ex->getType())); Bldr.generateNode(Ex, *I, state); } getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this); }
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of /// zeros in it, emit a memset and avoid storing the individual zeros. /// static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF) { // If the slot is already known to be zeroed, nothing to do. Don't mess with // volatile stores. if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; // C++ objects with a user-declared constructor don't need zero'ing. if (CGF.getContext().getLangOptions().CPlusPlus) if (const RecordType *RT = CGF.getContext() .getBaseElementType(E->getType())->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (RD->hasUserDeclaredConstructor()) return; } // If the type is 16-bytes or smaller, prefer individual stores over memset. std::pair<CharUnits, CharUnits> TypeInfo = CGF.getContext().getTypeInfoInChars(E->getType()); if (TypeInfo.first <= CharUnits::fromQuantity(16)) return; // Check to see if over 3/4 of the initializer are known to be zero. If so, // we prefer to emit memset + individual stores for the rest. CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); if (NumNonZeroBytes*4 > TypeInfo.first) return; // Okay, it seems like a good idea to use an initial memset, emit the call. llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity()); CharUnits Align = TypeInfo.second; llvm::Value *Loc = Slot.getAddr(); llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); Loc = CGF.Builder.CreateBitCast(Loc, BP); CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, Align.getQuantity(), false); // Tell the AggExprEmitter that the slot is known zero. Slot.setZeroed(); }
void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr, const CXXDeleteExpr *expr, QualType eltTy, llvm::Value *&numElements, llvm::Value *&allocPtr, CharUnits &cookieSize) { // Derive a char* in the same address space as the pointer. unsigned AS = ptr->getType()->getPointerAddressSpace(); llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS); ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy); // If we don't need an array cookie, bail out early. if (!requiresArrayCookie(expr, eltTy)) { allocPtr = ptr; numElements = nullptr; cookieSize = CharUnits::Zero(); return; } cookieSize = getArrayCookieSizeImpl(eltTy); allocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ptr, -cookieSize.getQuantity()); numElements = readArrayCookieImpl(CGF, allocPtr, cookieSize); }
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore, unsigned Alignment) { assert(Src.isAggregate() && "value must be aggregate value!"); // If Dest is ignored, then we're evaluating an aggregate expression // in a context (like an expression statement) that doesn't care // about the result. C says that an lvalue-to-rvalue conversion is // performed in these cases; C++ says that it is not. In either // case, we don't actually need to do anything unless the value is // volatile. if (Dest.isIgnored()) { if (!Src.isVolatileQualified() || CGF.CGM.getLangOptions().CPlusPlus || (IgnoreResult && Ignore)) return; // If the source is volatile, we must read from it; to do that, we need // some place to put it. Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp"); } if (Dest.requiresGCollection()) { CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(), Src.getAggregateAddr(), SizeVal); return; } // If the result of the assignment is used, copy the LHS there also. // FIXME: Pass VolatileDest as well. I think we also need to merge volatile // from the source as well, as we can't eliminate it if either operand // is volatile, unless copy has volatile for both source and destination.. CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(), Dest.isVolatile()|Src.isVolatileQualified(), Alignment); }
RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot, SourceLocation Loc) const { // Try not to in some easy cases. assert(IntVal->getType()->isIntegerTy() && "Expected integer value"); if (getEvaluationKind() == TEK_Scalar && !hasPadding()) { auto *ValTy = CGF.ConvertTypeForMem(ValueTy); if (ValTy->isIntegerTy()) { assert(IntVal->getType() == ValTy && "Different integer types."); return RValue::get(IntVal); } else if (ValTy->isPointerTy()) return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy)); else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy)) return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy)); } // Create a temporary. This needs to be big enough to hold the // atomic integer. llvm::Value *Temp; bool TempIsVolatile = false; CharUnits TempAlignment; if (getEvaluationKind() == TEK_Aggregate) { assert(!ResultSlot.isIgnored()); Temp = ResultSlot.getAddr(); TempAlignment = getValueAlignment(); TempIsVolatile = ResultSlot.isVolatile(); } else { Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp"); TempAlignment = getAtomicAlignment(); } // Slam the integer into the temporary. llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp); CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity()) ->setVolatile(TempIsVolatile); return convertTempToRValue(Temp, ResultSlot, Loc); }
llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *newPtr, llvm::Value *numElements, const CXXNewExpr *expr, QualType elementType) { assert(requiresArrayCookie(expr)); // The size of the cookie. CharUnits cookieSize = getArrayCookieSizeImpl(elementType); // Compute an offset to the cookie. llvm::Value *cookiePtr = newPtr; // Write the number of elements into the appropriate slot. unsigned AS = newPtr->getType()->getPointerAddressSpace(); llvm::Value *numElementsPtr = CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS)); CGF.Builder.CreateStore(numElements, numElementsPtr); // Finally, compute a pointer to the actual data buffer by skipping // over the cookie completely. return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr, cookieSize.getQuantity()); }
/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for /// classes with bases that do not satisfy the abi::__si_class_type_info /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { llvm::Type *UnsignedIntLTy = CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); // Itanium C++ ABI 2.9.5p6c: // __flags is a word with flags describing details about the class // structure, which may be referenced by using the __flags_masks // enumeration. These flags refer to both direct and indirect bases. unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); // Itanium C++ ABI 2.9.5p6c: // __base_count is a word with the number of direct proper base class // descriptions that follow. Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); if (!RD->getNumBases()) return; llvm::Type *LongLTy = CGM.getTypes().ConvertType(CGM.getContext().LongTy); // Now add the base class descriptions. // Itanium C++ ABI 2.9.5p6c: // __base_info[] is an array of base class descriptions -- one for every // direct proper base. Each description is of the type: // // struct abi::__base_class_type_info { // public: // const __class_type_info *__base_type; // long __offset_flags; // // enum __offset_flags_masks { // __virtual_mask = 0x1, // __public_mask = 0x2, // __offset_shift = 8 // }; // }; for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), E = RD->bases_end(); I != E; ++I) { const CXXBaseSpecifier *Base = I; // The __base_type member points to the RTTI for the base type. Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType())); const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); int64_t OffsetFlags = 0; // All but the lower 8 bits of __offset_flags are a signed offset. // For a non-virtual base, this is the offset in the object of the base // subobject. For a virtual base, this is the offset in the virtual table of // the virtual base offset for the virtual base referenced (negative). CharUnits Offset; if (Base->isVirtual()) Offset = CGM.getVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); else { const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); Offset = Layout.getBaseClassOffset(BaseDecl); }; OffsetFlags = uint64_t(Offset.getQuantity()) << 8; // The low-order byte of __offset_flags contains flags, as given by the // masks from the enumeration __offset_flags_masks. if (Base->isVirtual()) OffsetFlags |= BCTI_Virtual; if (Base->getAccessSpecifier() == AS_public) OffsetFlags |= BCTI_Public; Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags)); } }