CodeGenVTables::CodeGenVTables(CodeGenModule &CGM) : CGM(CGM), ItaniumVTContext(CGM.getContext()) { if (CGM.getTarget().getCXXABI().isMicrosoft()) { // FIXME: Eventually, we should only have one of V*TContexts available. // Today we use both in the Microsoft ABI as MicrosoftVFTableContext // is not completely supported in CodeGen yet. MicrosoftVTContext.reset(new MicrosoftVTableContext(CGM.getContext())); } }
CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), TheDataLayout(cgm.getDataLayout()), Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { SkippedLayout = false; }
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info, llvm::SmallVectorImpl<llvm::Type*> &elementTypes) { ASTContext &C = CGM.getContext(); // The header is basically a 'struct { void *; int; int; void *; void *; }'. CharUnits ptrSize, ptrAlign, intSize, intAlign; llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.UInt32Ty); llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.Int32Ty); // Are there crazy embedded platforms where this isn't true? assert(intSize <= ptrSize && "layout assumptions horribly violated"); CharUnits headerSize = ptrSize; if (2 * intSize < ptrAlign) headerSize += ptrSize; else headerSize += 2 * intSize; headerSize += 2 * ptrSize; info.BlockAlign = ptrAlign; info.BlockSize = headerSize; assert(elementTypes.empty()); llvm::Type *i8p = CGM.getTypes().ConvertType(C.UInt32Ty); llvm::Type *intTy = CGM.getTypes().ConvertType(C.Int32Ty); elementTypes.push_back(i8p); elementTypes.push_back(intTy); elementTypes.push_back(intTy); elementTypes.push_back(i8p); elementTypes.push_back(CGM.getBlockDescriptorType()); assert(elementTypes.size() == BlockHeaderSize); }
CodeGenTypes::CodeGenTypes(CodeGenModule &CGM) : Context(CGM.getContext()), Target(Context.getTargetInfo()), TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()), TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()), TheCXXABI(CGM.getCXXABI()), CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) { SkippedLayout = false; }
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()), TheModule(CGM.getModule()), RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode), DeviceMC(CGM.getContext().createMangleContext( CGM.getContext().getAuxTargetInfo())) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); IntTy = CGM.IntTy; SizeTy = CGM.SizeTy; VoidTy = CGM.VoidTy; CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy)); VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy)); VoidPtrPtrTy = VoidPtrTy->getPointerTo(); }
void VBTableInfo::EmitVBTableDefinition( CodeGenModule &CGM, const CXXRecordDecl *RD, llvm::GlobalVariable::LinkageTypes Linkage) const { assert(RD->getNumVBases() && ReusingBase->getNumVBases() && "should only emit vbtables for classes with vbtables"); const ASTRecordLayout &BaseLayout = CGM.getContext().getASTRecordLayout(VBPtrSubobject.getBase()); const ASTRecordLayout &DerivedLayout = CGM.getContext().getASTRecordLayout(RD); SmallVector<llvm::Constant *, 4> Offsets; // The offset from ReusingBase's vbptr to itself always leads. CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset(); Offsets.push_back( llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity())); // These are laid out in the same order as in Itanium, which is the same as // the order of the vbase iterator. for (CXXRecordDecl::base_class_const_iterator I = ReusingBase->vbases_begin(), E = ReusingBase->vbases_end(); I != E; ++I) { const CXXRecordDecl *VBase = I->getType()->getAsCXXRecordDecl(); CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase); assert(!Offset.isNegative()); // Make it relative to the subobject vbptr. Offset -= VBPtrSubobject.getBaseOffset() + VBPtrOffset; Offsets.push_back(llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity())); } assert(Offsets.size() == cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType()) ->getElementType())->getNumElements()); llvm::ArrayType *VBTableType = llvm::ArrayType::get(CGM.IntTy, Offsets.size()); llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets); GV->setInitializer(Init); // Set the correct linkage. GV->setLinkage(Linkage); // Set the right visibility. CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForVTable); }
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); IntTy = Types.ConvertType(Ctx.IntTy); SizeTy = Types.ConvertType(Ctx.getSizeType()); CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy)); VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy)); }
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()), TheModule(CGM.getModule()) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); IntTy = CGM.IntTy; SizeTy = CGM.SizeTy; VoidTy = CGM.VoidTy; CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy)); VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy)); VoidPtrPtrTy = VoidPtrTy->getPointerTo(); }
bool swiftcall::isLegalIntegerType(CodeGenModule &CGM, llvm::IntegerType *intTy) { auto size = intTy->getBitWidth(); switch (size) { case 1: case 8: case 16: case 32: case 64: // Just assume that the above are always legal. return true; case 128: return CGM.getContext().getTargetInfo().hasInt128Type(); default: return false; } }
/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for /// the given type exists somewhere else, and that we should not emit the type /// information in this translation unit. Assumes that it is not a /// standard-library type. static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) { ASTContext &Context = CGM.getContext(); // If RTTI is disabled, don't consider key functions. if (!Context.getLangOpts().RTTI) return false; if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!RD->hasDefinition()) return false; if (!RD->isDynamicClass()) return false; return !CGM.getVTables().ShouldEmitVTableInThisTU(RD); } return false; }
/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for /// the given type exists somewhere else, and that we should not emit the type /// information in this translation unit. Assumes that it is not a /// standard-library type. static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) { ASTContext &Context = CGM.getContext(); // If RTTI is disabled, assume it might be disabled in the // translation unit that defines any potential key function, too. if (!Context.getLangOpts().RTTI) return false; if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!RD->hasDefinition()) return false; if (!RD->isDynamicClass()) return false; // FIXME: this may need to be reconsidered if the key function // changes. return CGM.getVTables().isVTableExternal(RD); } return false; }
CodeGenVTables::CodeGenVTables(CodeGenModule &CGM) : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
/// Compute the layout of the given block. Attempts to lay the block /// out with minimal space requirements. static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) { ASTContext &C = CGM.getContext(); const ScriptDefn *block = info.getBlockDecl(); llvm::SmallVector<llvm::Type*, 8> elementTypes; initializeForBlockHeader(CGM, info, elementTypes); if (!block->hasCaptures()) { info.StructureType = llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); info.CanBeGlobal = true; return; } // Collect the layout chunks. llvm::SmallVector<BlockLayoutChunk, 16> layout; layout.reserve(block->capture_end() - block->capture_begin()); CharUnits maxFieldAlign; // Next, all the block captures. for (ScriptDefn::capture_const_iterator ci = block->capture_begin(), ce = block->capture_end(); ci != ce; ++ci) { const VarDefn *variable = ci->getVariable(); if (ci->isByRef()) { // We have to copy/dispose of the __block reference. info.NeedsCopyDispose = true; // Just use void* instead of a pointer to the byref type. Type byRefPtrTy = C.UInt32Ty; llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy); std::pair<CharUnits,CharUnits> tinfo = CGM.getContext().getTypeInfoInChars(byRefPtrTy); maxFieldAlign = std::max(maxFieldAlign, tinfo.second); layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, &*ci, llvmType)); continue; } // Otherwise, build a layout chunk with the size and alignment of // the declaration. if (llvm::Constant *constant = tryCaptureAsConstant(CGM, variable)) { info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant); continue; } // If we have a lifetime qualifier, honor it for capture purposes. // That includes *not* copying it if it's __unsafe_unretained. if (ci->hasCopyExpr()) { info.NeedsCopyDispose = true; info.HasCXXObject = true; // And so do types with destructors. } CharUnits size = C.getTypeSizeInChars(variable->getType()); CharUnits align = C.getDefnAlign(variable); maxFieldAlign = std::max(maxFieldAlign, align); llvm::Type *llvmType = CGM.getTypes().ConvertTypeForMem(variable->getType()); layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType)); } // If that was everything, we're done here. if (layout.empty()) { info.StructureType = llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); info.CanBeGlobal = true; return; } // Sort the layout by alignment. We have to use a stable sort here // to get reproducible results. There should probably be an // llvm::array_pod_stable_sort. std::stable_sort(layout.begin(), layout.end()); CharUnits &blockSize = info.BlockSize; info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign); // Assuming that the first byte in the header is maximally aligned, // get the alignment of the first byte following the header. CharUnits endAlign = getLowBit(blockSize); // If the end of the header isn't satisfactorily aligned for the // maximum thing, look for things that are okay with the header-end // alignment, and keep appending them until we get something that's // aligned right. This algorithm is only guaranteed optimal if // that condition is satisfied at some point; otherwise we can get // things like: // header // next byte has alignment 4 // something_with_size_5; // next byte has alignment 1 // something_with_alignment_8; // which has 7 bytes of padding, as opposed to the naive solution // which might have less (?). if (endAlign < maxFieldAlign) { llvm::SmallVectorImpl<BlockLayoutChunk>::iterator li = layout.begin() + 1, le = layout.end(); // Look for something that the header end is already // satisfactorily aligned for. for (; li != le && endAlign < li->Alignment; ++li) ; // If we found something that's naturally aligned for the end of // the header, keep adding things... if (li != le) { llvm::SmallVectorImpl<BlockLayoutChunk>::iterator first = li; for (; li != le; ++li) { assert(endAlign >= li->Alignment); li->setIndex(info, elementTypes.size()); elementTypes.push_back(li->Type); blockSize += li->Size; endAlign = getLowBit(blockSize); // ...until we get to the alignment of the maximum field. if (endAlign >= maxFieldAlign) break; } // Don't re-append everything we just appended. layout.erase(first, li); } } // At this point, we just have to add padding if the end align still // isn't aligned right. if (endAlign < maxFieldAlign) { CharUnits padding = maxFieldAlign - endAlign; elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty, padding.getQuantity())); blockSize += padding; endAlign = getLowBit(blockSize); assert(endAlign >= maxFieldAlign); } // Slam everything else on now. This works because they have // strictly decreasing alignment and we expect that size is always a // multiple of alignment. for (llvm::SmallVectorImpl<BlockLayoutChunk>::iterator li = layout.begin(), le = layout.end(); li != le; ++li) { assert(endAlign >= li->Alignment); li->setIndex(info, elementTypes.size()); elementTypes.push_back(li->Type); blockSize += li->Size; endAlign = getLowBit(blockSize); } info.StructureType = llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); }
CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) { // Currently always the size of an ordinary pointer. return CGM.getContext().toCharUnitsFromBits( CGM.getContext().getTargetInfo().getPointerWidth(0)); }
VBTableBuilder::VBTableBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerived) : CGM(CGM), MostDerived(MostDerived), DerivedLayout(CGM.getContext().getASTRecordLayout(MostDerived)) {}
ConstantAddress getUPCForAllDepth(CodeGenModule& CGM) { CharUnits Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().IntTy); return ConstantAddress(CGM.getModule().getOrInsertGlobal("__upc_forall_depth", CGM.IntTy), Align); }