static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Type *Ty, const CXXRecordDecl *RD) { assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() && "No kext in Microsoft ABI"); GD = GD.getCanonicalDecl(); CodeGenModule &CGM = CGF.CGM; llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits()); Ty = Ty->getPointerTo()->getPointerTo(); VTable = CGF.Builder.CreateBitCast(VTable, Ty); assert(VTable && "BuildVirtualCall = kext vtbl pointer is null"); uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD); VTableLayout::AddressPointLocation AddressPoint = VTLayout.getAddressPoint(BaseSubobject(RD, CharUnits::Zero())); VTableIndex += VTLayout.getVTableOffset(AddressPoint.VTableIndex) + AddressPoint.AddressPointIndex; llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt"); llvm::Value *VFunc = CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes); CGCallee Callee(GD.getDecl(), VFunc); return Callee; }
static bool IsDeletingDtor(GlobalDecl GD) { const CXXMethodDecl* MD = cast<CXXMethodDecl>(GD.getDecl()); if (isa<CXXDestructorDecl>(MD)) { return GD.getDtorType() == Dtor_Deleting; } return false; }
const Decl *GetDeclForMangledName(StringRef MangledName) override { GlobalDecl Result; if (!Builder->lookupRepresentativeDecl(MangledName, Result)) return nullptr; const Decl *D = Result.getCanonicalDecl().getDecl(); if (auto FD = dyn_cast<FunctionDecl>(D)) { if (FD->hasBody(FD)) return FD; } else if (auto TD = dyn_cast<TagDecl>(D)) { if (auto Def = TD->getDefinition()) return Def; } return D; }
void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo) { assert(!CurGD.getDecl() && "CurGD was already set!"); CurGD = GD; // Build FunctionArgs. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); QualType ThisType = MD->getThisType(getContext()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType(); FunctionArgList FunctionArgs; // Create the implicit 'this' parameter declaration. CGM.getCXXABI().buildThisParam(*this, FunctionArgs); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) FunctionArgs.push_back(*I); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs); // Start defining the function. StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, MD->getLocation(), SourceLocation()); // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves. CGM.getCXXABI().EmitInstanceFunctionProlog(*this); CXXThisValue = CXXABIThisValue; }
void CodeGenVTables::EmitThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); // We don't need to generate thunks for the base destructor. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) return; const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = VTContext->getThunkInfo(GD); if (!ThunkInfoVector) return; for (const ThunkInfo& Thunk : *ThunkInfoVector) maybeEmitThunk(GD, Thunk, /*ForVTable=*/false); }
bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // We don't have any virtual bases, just return early. if (!MD->getParent()->getNumVBases()) return false; // Check if we have a base constructor. if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) return true; // Check if we have a base destructor. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) return true; return false; }
void CodeGenVTables::EmitThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); // We don't need to generate thunks for the base destructor. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) return; const VTableContext::ThunkInfoVectorTy *ThunkInfoVector = VTContext.getThunkInfo(MD); if (!ThunkInfoVector) return; for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I) EmitThunk(GD, (*ThunkInfoVector)[I], /*UseAvailableExternallyLinkage=*/false); }
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); // Check if we should generate debug info for this function. if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>()) DebugInfo = CGM.getModuleDebugInfo(); FunctionArgList Args; QualType ResTy = FD->getResultType(); CurGD = GD; if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) Args.push_back(FD->getParamDecl(i)); SourceRange BodyRange; if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); // Emit the standard function prologue. StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin()); // Generate the body of the function. if (isa<CXXDestructorDecl>(FD)) EmitDestructorBody(Args); else if (isa<CXXConstructorDecl>(FD)) EmitConstructorBody(Args); else if (getContext().getLangOpts().CUDA && !CGM.getCodeGenOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>()) CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); else if (isa<CXXConversionDecl>(FD) && cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { // The lambda conversion to block pointer is special; the semantics can't be // expressed in the AST, so IRGen needs to special-case it. EmitLambdaToBlockPointerBody(Args); } else if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { // The lambda "__invoke" function is special, because it forwards or // clones the body of the function call operator (but is actually static). EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); } else EmitFunctionBody(Args); // Emit the standard function epilogue. FinishFunction(BodyRange.getEnd()); // If we haven't marked the function nothrow through other means, do // a quick pass now to see if we can. if (!CurFn->doesNotThrow()) TryMarkNoThrow(CurFn); }
void CodeGenVTables::EmitThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); // We don't need to generate thunks for the base destructor. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) return; const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector; if (MicrosoftVTContext.isValid()) { ThunkInfoVector = MicrosoftVTContext->getThunkInfo(GD); } else { ThunkInfoVector = ItaniumVTContext.getThunkInfo(GD); } if (!ThunkInfoVector) return; for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I) emitThunk(GD, (*ThunkInfoVector)[I], /*ForVTable=*/false); }
void CodeGenPGO::assignRegionCounters(GlobalDecl &GD) { bool InstrumentRegions = CGM.getCodeGenOpts().ProfileInstrGenerate; PGOProfileData *PGOData = CGM.getPGOData(); if (!InstrumentRegions && !PGOData) return; const Decl *D = GD.getDecl(); if (!D) return; mapRegionCounters(D); if (InstrumentRegions) emitCounterVariables(); if (PGOData) loadRegionCounts(GD, PGOData); }
void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD, const ThunkInfo &Thunk) { // We only want to do this when building with optimizations. if (!CGM.getCodeGenOpts().OptimizationLevel) return; // We can't emit thunks for member functions with incomplete types. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); if (!CGM.getTypes().isFuncTypeConvertible( cast<FunctionType>(MD->getType().getTypePtr()))) return; EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true); }
static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk, llvm::Function *ThunkFn, bool ForVTable, GlobalDecl GD) { CGM.setFunctionLinkage(GD, ThunkFn); CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD, !Thunk.Return.isEmpty()); // Set the right visibility. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); setThunkVisibility(CGM, MD, Thunk, ThunkFn); if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker()) ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); }
void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo, bool IsUnprototyped) { assert(!CurGD.getDecl() && "CurGD was already set!"); CurGD = GD; CurFuncIsThunk = true; // Build FunctionArgs. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); QualType ThisType = MD->getThisType(); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType; if (IsUnprototyped) ResultType = CGM.getContext().VoidTy; else if (CGM.getCXXABI().HasThisReturn(GD)) ResultType = ThisType; else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) ResultType = CGM.getContext().VoidPtrTy; else ResultType = FPT->getReturnType(); FunctionArgList FunctionArgs; // Create the implicit 'this' parameter declaration. CGM.getCXXABI().buildThisParam(*this, FunctionArgs); // Add the rest of the parameters, if we have a prototype to work with. if (!IsUnprototyped) { FunctionArgs.append(MD->param_begin(), MD->param_end()); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs); } // Start defining the function. auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, MD->getLocation()); // Create a scope with an artificial location for the body of this function. auto AL = ApplyDebugLocation::CreateArtificial(*this); // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves. CGM.getCXXABI().EmitInstanceFunctionProlog(*this); CXXThisValue = CXXABIThisValue; CurCodeDecl = MD; CurFuncDecl = MD; }
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); // Check if we should generate debug info for this function. if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>()) DebugInfo = CGM.getModuleDebugInfo(); FunctionArgList Args; QualType ResTy = FD->getResultType(); CurGD = GD; if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); if (FD->getNumParams()) for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) Args.push_back(FD->getParamDecl(i)); SourceRange BodyRange; if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); // Emit the standard function prologue. StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin()); // Generate the body of the function. if (isa<CXXDestructorDecl>(FD)) EmitDestructorBody(Args); else if (isa<CXXConstructorDecl>(FD)) EmitConstructorBody(Args); else if (getContext().getLangOptions().CUDA && !CGM.getCodeGenOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>()) CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); else EmitFunctionBody(Args); // Emit the standard function epilogue. FinishFunction(BodyRange.getEnd()); // If we haven't marked the function nothrow through other means, do // a quick pass now to see if we can. if (!CurFn->doesNotThrow()) TryMarkNoThrow(CurFn); }
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); // Check if we should generate debug info for this function. if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>()) DebugInfo = CGM.getDebugInfo(); FunctionArgList Args; QualType ResTy = FD->getResultType(); CurGD = GD; if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); if (FD->getNumParams()) { const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>(); assert(FProto && "Function def must have prototype!"); for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) Args.push_back(std::make_pair(FD->getParamDecl(i), FProto->getArgType(i))); } SourceRange BodyRange; if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); // Emit the standard function prologue. StartFunction(GD, ResTy, Fn, Args, BodyRange.getBegin()); // Generate the body of the function. if (isa<CXXDestructorDecl>(FD)) EmitDestructorBody(Args); else if (isa<CXXConstructorDecl>(FD)) EmitConstructorBody(Args); else EmitFunctionBody(Args); // Emit the standard function epilogue. FinishFunction(BodyRange.getEnd()); // If we haven't marked the function nothrow through other means, do // a quick pass now to see if we can. if (!CurFn->doesNotThrow()) TryMarkNoThrow(CurFn); }
void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD, const ThunkInfo &Thunk) { // If the ABI has key functions, only the TU with the key function should emit // the thunk. However, we can allow inlining of thunks if we emit them with // available_externally linkage together with vtables when optimizations are // enabled. if (CGM.getTarget().getCXXABI().hasKeyFunctions() && !CGM.getCodeGenOpts().OptimizationLevel) return; // We can't emit thunks for member functions with incomplete types. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); if (!CGM.getTypes().isFuncTypeConvertible( MD->getType()->castAs<FunctionType>())) return; emitThunk(GD, Thunk, /*ForVTable=*/true); }
void CodeGenFunction::GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { StartThunk(Fn, GD, FnInfo); // Get our callee. llvm::Type *Ty = CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); // Make the call and return the result. EmitCallAndReturnForThunk(GD, Callee, &Thunk); // Set the right linkage. CGM.setFunctionLinkage(GD, Fn); // Set the right visibility. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); setThunkVisibility(CGM, MD, Thunk, Fn); }
void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, bool ForVTable) { const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD); // FIXME: re-use FnInfo in this computation. llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk); llvm::GlobalValue *Entry; // Strip off a bitcast if we got one back. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) { assert(CE->getOpcode() == llvm::Instruction::BitCast); Entry = cast<llvm::GlobalValue>(CE->getOperand(0)); } else { Entry = cast<llvm::GlobalValue>(C); } // There's already a declaration with the same name, check if it has the same // type or if we need to replace it. if (Entry->getType()->getElementType() != CGM.getTypes().GetFunctionTypeForVTable(GD)) { llvm::GlobalValue *OldThunkFn = Entry; // If the types mismatch then we have to rewrite the definition. assert(OldThunkFn->isDeclaration() && "Shouldn't replace non-declaration"); // Remove the name from the old thunk function and get a new thunk. OldThunkFn->setName(StringRef()); Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk)); // If needed, replace the old thunk with a bitcast. if (!OldThunkFn->use_empty()) { llvm::Constant *NewPtrForOldDecl = llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType()); OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl); } // Remove the old thunk. OldThunkFn->eraseFromParent(); } llvm::Function *ThunkFn = cast<llvm::Function>(Entry); bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions(); bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions; if (!ThunkFn->isDeclaration()) { if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) { // There is already a thunk emitted for this function, do nothing. return; } setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); return; } CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn); if (ThunkFn->isVarArg()) { // Varargs thunks are special; we can't just generate a call because // we can't copy the varargs. Our implementation is rather // expensive/sucky at the moment, so don't generate the thunk unless // we have to. // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly. if (UseAvailableExternallyLinkage) return; ThunkFn = CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk); } else { // Normal thunk body generation. CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk); } setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); }
llvm::Constant * CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD, const VTableComponent *Components, unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks, unsigned NumVTableThunks) { SmallVector<llvm::Constant *, 64> Inits; llvm::Type *Int8PtrTy = CGM.Int8PtrTy; llvm::Type *PtrDiffTy = CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); QualType ClassType = CGM.getContext().getTagDeclType(RD); llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType); unsigned NextVTableThunkIndex = 0; llvm::Constant *PureVirtualFn = 0, *DeletedVirtualFn = 0; for (unsigned I = 0; I != NumComponents; ++I) { VTableComponent Component = Components[I]; llvm::Constant *Init = 0; switch (Component.getKind()) { case VTableComponent::CK_VCallOffset: Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset().getQuantity()); Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); break; case VTableComponent::CK_VBaseOffset: Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset().getQuantity()); Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); break; case VTableComponent::CK_OffsetToTop: Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop().getQuantity()); Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); break; case VTableComponent::CK_RTTI: Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy); break; case VTableComponent::CK_FunctionPointer: case VTableComponent::CK_CompleteDtorPointer: case VTableComponent::CK_DeletingDtorPointer: { GlobalDecl GD; // Get the right global decl. switch (Component.getKind()) { default: llvm_unreachable("Unexpected vtable component kind"); case VTableComponent::CK_FunctionPointer: GD = Component.getFunctionDecl(); break; case VTableComponent::CK_CompleteDtorPointer: GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete); break; case VTableComponent::CK_DeletingDtorPointer: GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting); break; } if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) { // We have a pure virtual member function. if (!PureVirtualFn) { llvm::FunctionType *Ty = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName(); PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName); PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn, CGM.Int8PtrTy); } Init = PureVirtualFn; } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) { if (!DeletedVirtualFn) { llvm::FunctionType *Ty = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); StringRef DeletedCallName = CGM.getCXXABI().GetDeletedVirtualCallName(); DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName); DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn, CGM.Int8PtrTy); } Init = DeletedVirtualFn; } else { // Check if we should use a thunk. if (NextVTableThunkIndex < NumVTableThunks && VTableThunks[NextVTableThunkIndex].first == I) { const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second; maybeEmitThunkForVTable(GD, Thunk); Init = CGM.GetAddrOfThunk(GD, Thunk); NextVTableThunkIndex++; } else { llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD); Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); } Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); } break; } case VTableComponent::CK_UnusedFunctionPointer: Init = llvm::ConstantExpr::getNullValue(Int8PtrTy); break; }; Inits.push_back(Init); } llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents); return llvm::ConstantArray::get(ArrayType, Inits); }
void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD, llvm::Value *Callee, const ThunkInfo *Thunk) { assert(isa<CXXMethodDecl>(CurGD.getDecl()) && "Please use a new CGF for this thunk"); const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // Adjust the 'this' pointer if necessary llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment( *this, LoadCXXThis(), Thunk->This) : LoadCXXThis(); // Start building CallArgs. CallArgList CallArgs; QualType ThisType = MD->getThisType(getContext()); CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, GD, CallArgs); // Add the rest of the arguments. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) EmitDelegateCallArg(CallArgs, *I, (*I)->getLocStart()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1)); assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, CurFnInfo->arg_begin()[i].info, CurFnInfo->arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. QualType ResultType = CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType(); ReturnValueSlot Slot; if (!ResultType->isVoidType() && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && !hasScalarEvaluationKind(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD); // Consider return adjustment if we have ThunkInfo. if (Thunk && !Thunk->Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); // Emit return. if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishFunction(); }
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation StartLoc) { const Decl *D = GD.getDecl(); DidCallStackSave = false; CurCodeDecl = CurFuncDecl = D; FnRetTy = RetTy; CurFn = Fn; CurFnInfo = &FnInfo; assert(CurFn->isDeclaration() && "Function already has body?"); // Pass inline keyword to optimizer if it appears explicitly on any // declaration. if (!CGM.getCodeGenOpts().NoInline) if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), RE = FD->redecls_end(); RI != RE; ++RI) if (RI->isInlineSpecified()) { Fn->addFnAttr(llvm::Attribute::InlineHint); break; } if (getContext().getLangOpts().OpenCL) { // Add metadata for a kernel function. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) EmitOpenCLKernelMetadata(FD, Fn); } llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); if (Builder.isNamePreserving()) AllocaInsertPt->setName("allocapt"); ReturnBlock = getJumpDestInCurrentScope("return"); Builder.SetInsertPoint(EntryBB); // Emit subprogram debug descriptor. if (CGDebugInfo *DI = getDebugInfo()) { unsigned NumArgs = 0; QualType *ArgsArray = new QualType[Args.size()]; for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) { ArgsArray[NumArgs++] = (*i)->getType(); } QualType FnType = getContext().getFunctionType(RetTy, ArgsArray, NumArgs, FunctionProtoType::ExtProtoInfo()); delete[] ArgsArray; DI->setLocation(StartLoc); DI->EmitFunctionStart(GD, FnType, CurFn, Builder); } if (ShouldInstrumentFunction()) EmitFunctionInstrumentation("__cyg_profile_func_enter"); if (CGM.getCodeGenOpts().InstrumentForProfiling) EmitMCountInstrumentation(); if (RetTy->isVoidType()) { // Void type; nothing to return. ReturnValue = 0; } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && hasAggregateLLVMType(CurFnInfo->getReturnType())) { // Indirect aggregate return; emit returned value directly into sret slot. // This reduces code size, and affects correctness in C++. ReturnValue = CurFn->arg_begin(); } else { ReturnValue = CreateIRTemp(RetTy, "retval"); // Tell the epilog emitter to autorelease the result. We do this // now so that various specialized functions can suppress it // during their IR-generation. if (getLangOpts().ObjCAutoRefCount && !CurFnInfo->isReturnsRetained() && RetTy->isObjCRetainableType()) AutoreleaseResult = true; } EmitStartEHSpec(CurCodeDecl); PrologueCleanupDepth = EHStack.stable_begin(); EmitFunctionProlog(*CurFnInfo, CurFn, Args); if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { CGM.getCXXABI().EmitInstanceFunctionProlog(*this); const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { // We're in a lambda; figure out the captures. MD->getParent()->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField); if (LambdaThisCaptureField) { // If this lambda captures this, load it. QualType LambdaTagType = getContext().getTagDeclType(LambdaThisCaptureField->getParent()); LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); LValue ThisLValue = EmitLValueForField(LambdaLV, LambdaThisCaptureField); CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal(); } } else { // Not in a lambda; just use 'this' from the method. // FIXME: Should we generate a new load for each use of 'this'? The // fast register allocator would be happier... CXXThisValue = CXXABIThisValue; } } // If any of the arguments have a variably modified type, make sure to // emit the type size. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) { QualType Ty = (*i)->getType(); if (Ty->isVariablyModifiedType()) EmitVariablyModifiedType(Ty); } // Emit a location at the end of the prologue. if (CGDebugInfo *DI = getDebugInfo()) DI->EmitLocation(Builder, StartLoc); }
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) { // Pointer to the personality function llvm::Constant *Personality = CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty (VMContext), true), "__gxx_personality_v0"); Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty); llvm::Value *llvm_eh_exception = CGM.getIntrinsic(llvm::Intrinsic::eh_exception); llvm::Value *llvm_eh_selector = CGM.getIntrinsic(llvm::Intrinsic::eh_selector); llvm::BasicBlock *PrevLandingPad = getInvokeDest(); llvm::BasicBlock *TryHandler = createBasicBlock("try.handler"); llvm::BasicBlock *FinallyBlock = createBasicBlock("finally"); llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw"); llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end"); // Push an EH context entry, used for handling rethrows. PushCleanupBlock(FinallyBlock); // Emit the statements in the try {} block setInvokeDest(TryHandler); // FIXME: We should not have to do this here. The AST should have the member // initializers under the CXXTryStmt's TryBlock. if (OuterTryBlock == &S) { GlobalDecl GD = CurGD; const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) { size_t OldCleanupStackSize = CleanupEntries.size(); EmitCtorPrologue(CD, CurGD.getCtorType()); EmitStmt(S.getTryBlock()); // If any of the member initializers are temporaries bound to references // make sure to emit their destructors. EmitCleanupBlocks(OldCleanupStackSize); } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) { llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue"); PushCleanupBlock(DtorEpilogue); EmitStmt(S.getTryBlock()); CleanupBlockInfo Info = PopCleanupBlock(); assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!"); EmitBlock(DtorEpilogue); EmitDtorEpilogue(DD, GD.getDtorType()); if (Info.SwitchBlock) EmitBlock(Info.SwitchBlock); if (Info.EndBlock) EmitBlock(Info.EndBlock); } else EmitStmt(S.getTryBlock()); } else EmitStmt(S.getTryBlock()); // Jump to end if there is no exception EmitBranchThroughCleanup(FinallyEnd); llvm::BasicBlock *TerminateHandler = getTerminateHandler(); // Emit the handlers EmitBlock(TryHandler); const llvm::IntegerType *Int8Ty; const llvm::PointerType *PtrToInt8Ty; Int8Ty = llvm::Type::getInt8Ty(VMContext); // C string type. Used in lots of places. PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty); llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty); llvm::SmallVector<llvm::Value*, 8> SelectorArgs; llvm::Value *llvm_eh_typeid_for = CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for); // Exception object llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow"); llvm::SmallVector<llvm::Value*, 8> Args; Args.clear(); SelectorArgs.push_back(Exc); SelectorArgs.push_back(Personality); bool HasCatchAll = false; for (unsigned i = 0; i<S.getNumHandlers(); ++i) { const CXXCatchStmt *C = S.getHandler(i); VarDecl *CatchParam = C->getExceptionDecl(); if (CatchParam) { llvm::Value *EHType = CGM.GenerateRTTI(C->getCaughtType().getNonReferenceType()); SelectorArgs.push_back(EHType); } else { // null indicates catch all SelectorArgs.push_back(Null); HasCatchAll = true; } } // We use a cleanup unless there was already a catch all. if (!HasCatchAll) { SelectorArgs.push_back(Null); } // Find which handler was matched. llvm::Value *Selector = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(), SelectorArgs.end(), "selector"); for (unsigned i = 0; i<S.getNumHandlers(); ++i) { const CXXCatchStmt *C = S.getHandler(i); VarDecl *CatchParam = C->getExceptionDecl(); Stmt *CatchBody = C->getHandlerBlock(); llvm::BasicBlock *Next = 0; if (SelectorArgs[i+2] != Null) { llvm::BasicBlock *Match = createBasicBlock("match"); Next = createBasicBlock("catch.next"); const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext()); llvm::Value *Id = Builder.CreateCall(llvm_eh_typeid_for, Builder.CreateBitCast(SelectorArgs[i+2], Int8PtrTy)); Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id), Match, Next); EmitBlock(Match); } llvm::BasicBlock *MatchEnd = createBasicBlock("match.end"); llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler"); PushCleanupBlock(MatchEnd); setInvokeDest(MatchHandler); llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc); { CleanupScope CatchScope(*this); // Bind the catch parameter if it exists. if (CatchParam) { QualType CatchType = CatchParam->getType().getNonReferenceType(); setInvokeDest(TerminateHandler); bool WasPointer = true; if (!CatchType.getTypePtr()->isPointerType()) { if (!isa<ReferenceType>(CatchParam->getType())) WasPointer = false; CatchType = getContext().getPointerType(CatchType); } ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType)); EmitLocalBlockVarDecl(*CatchParam); // FIXME: we need to do this sooner so that the EH region for the // cleanup doesn't start until after the ctor completes, use a decl // init? CopyObject(*this, CatchParam->getType().getNonReferenceType(), WasPointer, ExcObject, GetAddrOfLocalVar(CatchParam)); setInvokeDest(MatchHandler); } EmitStmt(CatchBody); } EmitBranchThroughCleanup(FinallyEnd); EmitBlock(MatchHandler); llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); // We are required to emit this call to satisfy LLVM, even // though we don't use the result. Args.clear(); Args.push_back(Exc); Args.push_back(Personality); Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end()); Builder.CreateStore(Exc, RethrowPtr); EmitBranchThroughCleanup(FinallyRethrow); CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock(); EmitBlock(MatchEnd); llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); Builder.CreateInvoke(getEndCatchFn(*this), Cont, TerminateHandler, Args.begin(), Args.begin()); EmitBlock(Cont); if (Info.SwitchBlock) EmitBlock(Info.SwitchBlock); if (Info.EndBlock) EmitBlock(Info.EndBlock); Exc = Builder.CreateCall(llvm_eh_exception, "exc"); Builder.CreateStore(Exc, RethrowPtr); EmitBranchThroughCleanup(FinallyRethrow); if (Next) EmitBlock(Next); } if (!HasCatchAll) { Builder.CreateStore(Exc, RethrowPtr); EmitBranchThroughCleanup(FinallyRethrow); } CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock(); setInvokeDest(PrevLandingPad); EmitBlock(FinallyBlock); if (Info.SwitchBlock) EmitBlock(Info.SwitchBlock); if (Info.EndBlock) EmitBlock(Info.EndBlock); // Branch around the rethrow code. EmitBranch(FinallyEnd); EmitBlock(FinallyRethrow); // FIXME: Eventually we can chain the handlers together and just do a call // here. if (getInvokeDest()) { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); Builder.CreateInvoke(getUnwindResumeOrRethrowFn(*this), Cont, getInvokeDest(), Builder.CreateLoad(RethrowPtr)); EmitBlock(Cont); } else Builder.CreateCall(getUnwindResumeOrRethrowFn(*this), Builder.CreateLoad(RethrowPtr)); Builder.CreateUnreachable(); EmitBlock(FinallyEnd); }
/// Try to emit a definition as a global alias for another definition. /// If \p InEveryTU is true, we know that an equivalent alias can be produced /// in every translation unit. bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl, GlobalDecl TargetDecl, bool InEveryTU) { if (!getCodeGenOpts().CXXCtorDtorAliases) return true; // The alias will use the linkage of the referent. If we can't // support aliases with that linkage, fail. llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(AliasDecl); // We can't use an alias if the linkage is not valid for one. if (!llvm::GlobalAlias::isValidLinkage(Linkage)) return true; llvm::GlobalValue::LinkageTypes TargetLinkage = getFunctionLinkage(TargetDecl); // Check if we have it already. StringRef MangledName = getMangledName(AliasDecl); llvm::GlobalValue *Entry = GetGlobalValue(MangledName); if (Entry && !Entry->isDeclaration()) return false; if (Replacements.count(MangledName)) return false; // Derive the type for the alias. llvm::PointerType *AliasType = getTypes().GetFunctionType(AliasDecl)->getPointerTo(); // Find the referent. Some aliases might require a bitcast, in // which case the caller is responsible for ensuring the soundness // of these semantics. auto *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl)); auto *Aliasee = dyn_cast<llvm::GlobalObject>(Ref); if (!Aliasee) Aliasee = cast<llvm::GlobalAlias>(Ref)->getAliasee(); // Instead of creating as alias to a linkonce_odr, replace all of the uses // of the aliasee. if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) && (TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage || !TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) { // FIXME: An extern template instantiation will create functions with // linkage "AvailableExternally". In libc++, some classes also define // members with attribute "AlwaysInline" and expect no reference to // be generated. It is desirable to reenable this optimisation after // corresponding LLVM changes. llvm::Constant *Replacement = Aliasee; if (Aliasee->getType() != AliasType) Replacement = llvm::ConstantExpr::getBitCast(Aliasee, AliasType); Replacements[MangledName] = Replacement; return false; } if (!InEveryTU) { /// If we don't have a definition for the destructor yet, don't /// emit. We can't emit aliases to declarations; that's just not /// how aliases work. if (Aliasee->isDeclaration()) return true; } // Don't create an alias to a linker weak symbol. This avoids producing // different COMDATs in different TUs. Another option would be to // output the alias both for weak_odr and linkonce_odr, but that // requires explicit comdat support in the IL. if (llvm::GlobalValue::isWeakForLinker(TargetLinkage)) return true; // Create the alias with no name. auto *Alias = llvm::GlobalAlias::create(AliasType->getElementType(), 0, Linkage, "", Aliasee); // Switch any previous uses to the alias. if (Entry) { assert(Entry->getType() == AliasType && "declaration exists with different type"); Alias->takeName(Entry); Entry->replaceAllUsesWith(Alias); Entry->eraseFromParent(); } else { Alias->setName(MangledName); } // Finally, set up the alias with its proper name and attributes. SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias); return false; }
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation StartLoc) { const Decl *D = GD.getDecl(); DidCallStackSave = false; CurCodeDecl = CurFuncDecl = D; FnRetTy = RetTy; CurFn = Fn; CurFnInfo = &FnInfo; assert(CurFn->isDeclaration() && "Function already has body?"); // Pass inline keyword to optimizer if it appears explicitly on any // declaration. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), RE = FD->redecls_end(); RI != RE; ++RI) if (RI->isInlineSpecified()) { Fn->addFnAttr(llvm::Attribute::InlineHint); break; } if (getContext().getLangOptions().OpenCL) { // Add metadata for a kernel function. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) if (FD->hasAttr<OpenCLKernelAttr>()) { llvm::LLVMContext &Context = getLLVMContext(); llvm::NamedMDNode *OpenCLMetadata = CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); llvm::Value *Op = Fn; OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op)); } } llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); if (Builder.isNamePreserving()) AllocaInsertPt->setName("allocapt"); ReturnBlock = getJumpDestInCurrentScope("return"); Builder.SetInsertPoint(EntryBB); // Emit subprogram debug descriptor. if (CGDebugInfo *DI = getDebugInfo()) { // FIXME: what is going on here and why does it ignore all these // interesting type properties? QualType FnType = getContext().getFunctionType(RetTy, 0, 0, FunctionProtoType::ExtProtoInfo()); DI->setLocation(StartLoc); DI->EmitFunctionStart(GD, FnType, CurFn, Builder); } if (ShouldInstrumentFunction()) EmitFunctionInstrumentation("__cyg_profile_func_enter"); if (CGM.getCodeGenOpts().InstrumentForProfiling) EmitMCountInstrumentation(); if (RetTy->isVoidType()) { // Void type; nothing to return. ReturnValue = 0; } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && hasAggregateLLVMType(CurFnInfo->getReturnType())) { // Indirect aggregate return; emit returned value directly into sret slot. // This reduces code size, and affects correctness in C++. ReturnValue = CurFn->arg_begin(); } else { ReturnValue = CreateIRTemp(RetTy, "retval"); // Tell the epilog emitter to autorelease the result. We do this // now so that various specialized functions can suppress it // during their IR-generation. if (getLangOptions().ObjCAutoRefCount && !CurFnInfo->isReturnsRetained() && RetTy->isObjCRetainableType()) AutoreleaseResult = true; } EmitStartEHSpec(CurCodeDecl); PrologueCleanupDepth = EHStack.stable_begin(); EmitFunctionProlog(*CurFnInfo, CurFn, Args); if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) CGM.getCXXABI().EmitInstanceFunctionProlog(*this); // If any of the arguments have a variably modified type, make sure to // emit the type size. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) { QualType Ty = (*i)->getType(); if (Ty->isVariablyModifiedType()) EmitVariablyModifiedType(Ty); } // Emit a location at the end of the prologue. if (CGDebugInfo *DI = getDebugInfo()) DI->EmitLocation(Builder, StartLoc); }
void CodeGenFunction::GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = FPT->getResultType(); QualType ThisType = MD->getThisType(getContext()); FunctionArgList FunctionArgs; // FIXME: It would be nice if more of this code could be shared with // CodeGenFunction::GenerateCode. // Create the implicit 'this' parameter declaration. CurGD = GD; CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) { ParmVarDecl *Param = *I; FunctionArgs.push_back(Param); } StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, SourceLocation()); CGM.getCXXABI().EmitInstanceFunctionProlog(*this); CXXThisValue = CXXABIThisValue; // Adjust the 'this' pointer if necessary. llvm::Value *AdjustedThisPtr = PerformTypeAdjustment(*this, LoadCXXThis(), Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset, /*IsReturnAdjustment*/false); CallArgList CallArgs; // Add our adjusted 'this' pointer. CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) { ParmVarDecl *param = *I; EmitDelegateCallArg(CallArgs, param); } // Get our callee. llvm::Type *Ty = CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1)); assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() && CallFnInfo.isNoReturn() == FnInfo.isNoReturn() && CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), FnInfo.getReturnInfo(), FnInfo.getReturnType())); assert(CallFnInfo.arg_size() == FnInfo.arg_size()); for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. ReturnValueSlot Slot; if (!ResultType->isVoidType() && FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && hasAggregateLLVMType(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD); if (!Thunk.Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishFunction(); // Set the right linkage. CGM.setFunctionLinkage(MD, Fn); // Set the right visibility. setThunkVisibility(CGM, MD, Thunk, Fn); }
bool MicrosoftCXXABI::needThisReturn(GlobalDecl GD) { const CXXMethodDecl* MD = cast<CXXMethodDecl>(GD.getDecl()); return isa<CXXConstructorDecl>(MD); }
/// Try to emit a definition as a global alias for another definition. bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl, GlobalDecl TargetDecl) { if (!getCodeGenOpts().CXXCtorDtorAliases) return true; // The alias will use the linkage of the referrent. If we can't // support aliases with that linkage, fail. llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl())); switch (Linkage) { // We can definitely emit aliases to definitions with external linkage. case llvm::GlobalValue::ExternalLinkage: case llvm::GlobalValue::ExternalWeakLinkage: break; // Same with local linkage. case llvm::GlobalValue::InternalLinkage: case llvm::GlobalValue::PrivateLinkage: case llvm::GlobalValue::LinkerPrivateLinkage: break; // We should try to support linkonce linkages. case llvm::GlobalValue::LinkOnceAnyLinkage: case llvm::GlobalValue::LinkOnceODRLinkage: return true; // Other linkages will probably never be supported. default: return true; } llvm::GlobalValue::LinkageTypes TargetLinkage = getFunctionLinkage(cast<FunctionDecl>(TargetDecl.getDecl())); if (llvm::GlobalValue::isWeakForLinker(TargetLinkage)) return true; // Derive the type for the alias. const llvm::PointerType *AliasType = getTypes().GetFunctionType(AliasDecl)->getPointerTo(); // Find the referrent. Some aliases might require a bitcast, in // which case the caller is responsible for ensuring the soundness // of these semantics. llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl)); llvm::Constant *Aliasee = Ref; if (Ref->getType() != AliasType) Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType); // Create the alias with no name. llvm::GlobalAlias *Alias = new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule()); // Switch any previous uses to the alias. const char *MangledName = getMangledName(AliasDecl); llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName]; if (Entry) { assert(Entry->isDeclaration() && "definition already exists for alias"); assert(Entry->getType() == AliasType && "declaration exists with different type"); Entry->replaceAllUsesWith(Alias); Entry->eraseFromParent(); } Entry = Alias; // Finally, set up the alias with its proper name and attributes. Alias->setName(MangledName); SetCommonAttributes(AliasDecl.getDecl(), Alias); return false; }
void CodeGenVTables::addVTableComponent( ConstantArrayBuilder &builder, const VTableLayout &layout, unsigned idx, llvm::Constant *rtti, unsigned &nextVTableThunkIndex) { auto &component = layout.vtable_components()[idx]; auto addOffsetConstant = [&](CharUnits offset) { builder.add(llvm::ConstantExpr::getIntToPtr( llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()), CGM.Int8PtrTy)); }; switch (component.getKind()) { case VTableComponent::CK_VCallOffset: return addOffsetConstant(component.getVCallOffset()); case VTableComponent::CK_VBaseOffset: return addOffsetConstant(component.getVBaseOffset()); case VTableComponent::CK_OffsetToTop: return addOffsetConstant(component.getOffsetToTop()); case VTableComponent::CK_RTTI: return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy)); case VTableComponent::CK_FunctionPointer: case VTableComponent::CK_CompleteDtorPointer: case VTableComponent::CK_DeletingDtorPointer: { GlobalDecl GD; // Get the right global decl. switch (component.getKind()) { default: llvm_unreachable("Unexpected vtable component kind"); case VTableComponent::CK_FunctionPointer: GD = component.getFunctionDecl(); break; case VTableComponent::CK_CompleteDtorPointer: GD = GlobalDecl(component.getDestructorDecl(), Dtor_Complete); break; case VTableComponent::CK_DeletingDtorPointer: GD = GlobalDecl(component.getDestructorDecl(), Dtor_Deleting); break; } if (CGM.getLangOpts().CUDA) { // Emit NULL for methods we can't codegen on this // side. Otherwise we'd end up with vtable with unresolved // references. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // OK on device side: functions w/ __device__ attribute // OK on host side: anything except __device__-only functions. bool CanEmitMethod = CGM.getLangOpts().CUDAIsDevice ? MD->hasAttr<CUDADeviceAttr>() : (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>()); if (!CanEmitMethod) return builder.addNullPointer(CGM.Int8PtrTy); // Method is acceptable, continue processing as usual. } auto getSpecialVirtualFn = [&](StringRef name) { llvm::FunctionType *fnTy = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); llvm::Constant *fn = CGM.CreateRuntimeFunction(fnTy, name); if (auto f = dyn_cast<llvm::Function>(fn)) f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy); }; llvm::Constant *fnPtr; // Pure virtual member functions. if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) { if (!PureVirtualFn) PureVirtualFn = getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName()); fnPtr = PureVirtualFn; // Deleted virtual member functions. } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) { if (!DeletedVirtualFn) DeletedVirtualFn = getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName()); fnPtr = DeletedVirtualFn; // Thunks. } else if (nextVTableThunkIndex < layout.vtable_thunks().size() && layout.vtable_thunks()[nextVTableThunkIndex].first == idx) { auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second; maybeEmitThunkForVTable(GD, thunkInfo); nextVTableThunkIndex++; fnPtr = CGM.GetAddrOfThunk(GD, thunkInfo); // Otherwise we can use the method definition directly. } else { llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); } fnPtr = llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy); builder.add(fnPtr); return; } case VTableComponent::CK_UnusedFunctionPointer: return builder.addNullPointer(CGM.Int8PtrTy); } llvm_unreachable("Unexpected vtable component kind"); }
// This function does roughly the same thing as GenerateThunk, but in a // very different way, so that va_start and va_end work correctly. // FIXME: This function assumes "this" is the first non-sret LLVM argument of // a function, and that there is an alloca built in the entry block // for all accesses to "this". // FIXME: This function assumes there is only one "ret" statement per function. // FIXME: Cloning isn't correct in the presence of indirect goto! // FIXME: This implementation of thunks bloats codesize by duplicating the // function definition. There are alternatives: // 1. Add some sort of stub support to LLVM for cases where we can // do a this adjustment, then a sibcall. // 2. We could transform the definition to take a va_list instead of an // actual variable argument list, then have the thunks (including a // no-op thunk for the regular definition) call va_start/va_end. // There's a bit of per-call overhead for this solution, but it's // better for codesize if the definition is long. void CodeGenFunction::GenerateVarArgsThunk( llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = FPT->getReturnType(); // Get the original function assert(FnInfo.isVariadic()); llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); llvm::Function *BaseFn = cast<llvm::Function>(Callee); // Clone to thunk. llvm::ValueToValueMapTy VMap; llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap, /*ModuleLevelChanges=*/false); CGM.getModule().getFunctionList().push_back(NewFn); Fn->replaceAllUsesWith(NewFn); NewFn->takeName(Fn); Fn->eraseFromParent(); Fn = NewFn; // "Initialize" CGF (minimally). CurFn = Fn; // Get the "this" value llvm::Function::arg_iterator AI = Fn->arg_begin(); if (CGM.ReturnTypeUsesSRet(FnInfo)) ++AI; // Find the first store of "this", which will be to the alloca associated // with "this". llvm::Value *ThisPtr = &*AI; llvm::BasicBlock *EntryBB = Fn->begin(); llvm::Instruction *ThisStore = 0; for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end(); I != E; I++) { if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) { ThisStore = cast<llvm::StoreInst>(I); break; } } assert(ThisStore && "Store of this should be in entry block?"); // Adjust "this", if necessary. Builder.SetInsertPoint(ThisStore); llvm::Value *AdjustedThisPtr = CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This); ThisStore->setOperand(0, AdjustedThisPtr); if (!Thunk.Return.isEmpty()) { // Fix up the returned value, if necessary. for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) { llvm::Instruction *T = I->getTerminator(); if (isa<llvm::ReturnInst>(T)) { RValue RV = RValue::get(T->getOperand(0)); T->eraseFromParent(); Builder.SetInsertPoint(&*I); RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); Builder.CreateRet(RV.getScalarVal()); break; } } } }
void CodeGenModule::BuildThunksForVirtual(GlobalDecl GD) { CGVtableInfo::AdjustmentVectorTy *AdjPtr = getVtableInfo().getAdjustments(GD); if (!AdjPtr) return; CGVtableInfo::AdjustmentVectorTy &Adj = *AdjPtr; const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); for (unsigned i = 0; i < Adj.size(); i++) { GlobalDecl OGD = Adj[i].first; const CXXMethodDecl *OMD = cast<CXXMethodDecl>(OGD.getDecl()); QualType nc_oret = OMD->getType()->getAs<FunctionType>()->getResultType(); CanQualType oret = getContext().getCanonicalType(nc_oret); QualType nc_ret = MD->getType()->getAs<FunctionType>()->getResultType(); CanQualType ret = getContext().getCanonicalType(nc_ret); ThunkAdjustment ReturnAdjustment; if (oret != ret) { QualType qD = nc_ret->getPointeeType(); QualType qB = nc_oret->getPointeeType(); CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl()); CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl()); ReturnAdjustment = ComputeThunkAdjustment(D, B); } ThunkAdjustment ThisAdjustment = Adj[i].second; bool Extern = !cast<CXXRecordDecl>(OMD->getDeclContext())->isInAnonymousNamespace(); if (!ReturnAdjustment.isEmpty() || !ThisAdjustment.isEmpty()) { CovariantThunkAdjustment CoAdj(ThisAdjustment, ReturnAdjustment); llvm::Constant *FnConst; if (!ReturnAdjustment.isEmpty()) FnConst = GetAddrOfCovariantThunk(GD, CoAdj); else FnConst = GetAddrOfThunk(GD, ThisAdjustment); if (!isa<llvm::Function>(FnConst)) { llvm::Constant *SubExpr = cast<llvm::ConstantExpr>(FnConst)->getOperand(0); llvm::Function *OldFn = cast<llvm::Function>(SubExpr); std::string Name = OldFn->getNameStr(); GlobalDeclMap.erase(UniqueMangledName(Name.data(), Name.data() + Name.size() + 1)); llvm::Constant *NewFnConst; if (!ReturnAdjustment.isEmpty()) NewFnConst = GetAddrOfCovariantThunk(GD, CoAdj); else NewFnConst = GetAddrOfThunk(GD, ThisAdjustment); llvm::Function *NewFn = cast<llvm::Function>(NewFnConst); NewFn->takeName(OldFn); llvm::Constant *NewPtrForOldDecl = llvm::ConstantExpr::getBitCast(NewFn, OldFn->getType()); OldFn->replaceAllUsesWith(NewPtrForOldDecl); OldFn->eraseFromParent(); FnConst = NewFn; } llvm::Function *Fn = cast<llvm::Function>(FnConst); if (Fn->isDeclaration()) { llvm::GlobalVariable::LinkageTypes linktype; linktype = llvm::GlobalValue::WeakAnyLinkage; if (!Extern) linktype = llvm::GlobalValue::InternalLinkage; Fn->setLinkage(linktype); if (!Features.Exceptions && !Features.ObjCNonFragileABI) Fn->addFnAttr(llvm::Attribute::NoUnwind); Fn->setAlignment(2); CodeGenFunction(*this).GenerateCovariantThunk(Fn, GD, Extern, CoAdj); } } } }