RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *Callee, const CallArgList &CallArgs, const Decl *TargetDecl) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. llvm::SmallVector<llvm::Value*, 16> Args; // Handle struct-return functions by passing a pointer to the // location that we would like to return into. QualType RetTy = CallInfo.getReturnType(); const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); // If the call returns a temporary with struct return, create a temporary // alloca to hold the result. if (CGM.ReturnTypeUsesSret(CallInfo)) Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy))); assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it) { const ABIArgInfo &ArgInfo = info_it->info; RValue RV = I->first; switch (ArgInfo.getKind()) { case ABIArgInfo::Indirect: if (RV.isScalar() || RV.isComplex()) { // Make a temporary alloca to pass the argument. Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second))); if (RV.isScalar()) EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second); else StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); } else { Args.push_back(RV.getAggregateAddr()); } break; case ABIArgInfo::Extend: case ABIArgInfo::Direct: if (RV.isScalar()) { Args.push_back(RV.getScalarVal()); } else if (RV.isComplex()) { llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second)); Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0); Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1); Args.push_back(Tmp); } else { Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); } break; case ABIArgInfo::Ignore: break; case ABIArgInfo::Coerce: { // FIXME: Avoid the conversion through memory if possible. llvm::Value *SrcPtr; if (RV.isScalar()) { SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second); } else if (RV.isComplex()) { SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); } else SrcPtr = RV.getAggregateAddr(); Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this)); break; } case ABIArgInfo::Expand: ExpandTypeToArgs(I->second, RV, Args); break; } } // If the callee is a bitcast of a function to a varargs pointer to function // type, check to see if we can remove the bitcast. This handles some cases // with unprototyped functions. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); const llvm::FunctionType *CurFT = cast<llvm::FunctionType>(CurPT->getElementType()); const llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); if (CE->getOpcode() == llvm::Instruction::BitCast && ActualFT->getReturnType() == CurFT->getReturnType() && ActualFT->getNumParams() == CurFT->getNumParams() && ActualFT->getNumParams() == Args.size()) { bool ArgsMatch = true; for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { ArgsMatch = false; break; } // Strip the cast if we can get away with it. This is a nice cleanup, // but also allows us to inline the function at -O0 if it is marked // always_inline. if (ArgsMatch) Callee = CalleeF; } } llvm::BasicBlock *InvokeDest = getInvokeDest(); unsigned CallingConv; CodeGen::AttributeListType AttributeList; CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), AttributeList.end()); llvm::CallSite CS; if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); } else { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args.data(), Args.data()+Args.size()); EmitBlock(Cont); } CS.setAttributes(Attrs); CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); // If the call doesn't return, finish the basic block and clear the // insertion point; this allows the rest of IRgen to discard // unreachable code. if (CS.doesNotReturn()) { Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); // FIXME: For now, emit a dummy basic block because expr emitters in // generally are not ready to handle emitting expressions at unreachable // points. EnsureInsertPoint(); // Return a reasonable RValue. return GetUndefRValue(RetTy); } llvm::Instruction *CI = CS.getInstruction(); if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) CI->setName("call"); switch (RetAI.getKind()) { case ABIArgInfo::Indirect: if (RetTy->isAnyComplexType()) return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); if (CodeGenFunction::hasAggregateLLVMType(RetTy)) return RValue::getAggregate(Args[0]); return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); case ABIArgInfo::Extend: case ABIArgInfo::Direct: if (RetTy->isAnyComplexType()) { llvm::Value *Real = Builder.CreateExtractValue(CI, 0); llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); return RValue::getComplex(std::make_pair(Real, Imag)); } if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp"); Builder.CreateStore(CI, V); return RValue::getAggregate(V); } return RValue::get(CI); case ABIArgInfo::Ignore: // If we are ignoring an argument that had a result, make sure to // construct the appropriate return value for our caller. return GetUndefRValue(RetTy); case ABIArgInfo::Coerce: { // FIXME: Avoid the conversion through memory if possible. llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce"); CreateCoercedStore(CI, V, *this); if (RetTy->isAnyComplexType()) return RValue::getComplex(LoadComplexFromAddr(V, false)); if (CodeGenFunction::hasAggregateLLVMType(RetTy)) return RValue::getAggregate(V); return RValue::get(EmitLoadOfScalar(V, false, RetTy)); } case ABIArgInfo::Expand: assert(0 && "Invalid ABI kind for return argument"); } assert(0 && "Unhandled ABIArgInfo::Kind"); return RValue::get(0); }
void CodeGenFunction::GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = FPT->getResultType(); QualType ThisType = MD->getThisType(getContext()); FunctionArgList FunctionArgs; // FIXME: It would be nice if more of this code could be shared with // CodeGenFunction::GenerateCode. // Create the implicit 'this' parameter declaration. CurGD = GD; CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) { ParmVarDecl *Param = *I; FunctionArgs.push_back(Param); } StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, SourceLocation()); CGM.getCXXABI().EmitInstanceFunctionProlog(*this); CXXThisValue = CXXABIThisValue; // Adjust the 'this' pointer if necessary. llvm::Value *AdjustedThisPtr = PerformTypeAdjustment(*this, LoadCXXThis(), Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset, /*IsReturnAdjustment*/false); CallArgList CallArgs; // Add our adjusted 'this' pointer. CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) { ParmVarDecl *param = *I; EmitDelegateCallArg(CallArgs, param); } // Get our callee. llvm::Type *Ty = CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1)); assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() && CallFnInfo.isNoReturn() == FnInfo.isNoReturn() && CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), FnInfo.getReturnInfo(), FnInfo.getReturnType())); assert(CallFnInfo.arg_size() == FnInfo.arg_size()); for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. ReturnValueSlot Slot; if (!ResultType->isVoidType() && FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && hasAggregateLLVMType(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD); if (!Thunk.Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishFunction(); // Set the right linkage. CGM.setFunctionLinkage(MD, Fn); // Set the right visibility. setThunkVisibility(CGM, MD, Thunk, Fn); }
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args) { // If this is an implicit-return-zero function, go ahead and // initialize the return value. TODO: it might be nice to have // a more general mechanism for this that didn't require synthesized // return statements. if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { if (FD->hasImplicitReturnZero()) { QualType RetTy = FD->getResultType().getUnqualifiedType(); const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); Builder.CreateStore(Zero, ReturnValue); } } // FIXME: We no longer need the types from FunctionArgList; lift up and // simplify. // Emit allocs for param decls. Give the LLVM Argument nodes names. llvm::Function::arg_iterator AI = Fn->arg_begin(); // Name the struct return argument. if (CGM.ReturnTypeUsesSret(FI)) { AI->setName("agg.result"); ++AI; } assert(FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."); CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i, ++info_it) { const VarDecl *Arg = i->first; QualType Ty = info_it->type; const ABIArgInfo &ArgI = info_it->info; switch (ArgI.getKind()) { case ABIArgInfo::Indirect: { llvm::Value* V = AI; if (hasAggregateLLVMType(Ty)) { // Do nothing, aggregates and complex variables are accessed by // reference. } else { // Load scalar value from indirect argument. V = EmitLoadOfScalar(V, false, Ty); if (!getContext().typesAreCompatible(Ty, Arg->getType())) { // This must be a promotion, for something like // "void a(x) short x; {..." V = EmitScalarConversion(V, Ty, Arg->getType()); } } EmitParmDecl(*Arg, V); break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: { assert(AI != Fn->arg_end() && "Argument mismatch!"); llvm::Value* V = AI; if (hasAggregateLLVMType(Ty)) { // Create a temporary alloca to hold the argument; the rest of // codegen expects to access aggregates & complex values by // reference. V = CreateTempAlloca(ConvertTypeForMem(Ty)); Builder.CreateStore(AI, V); } else { if (!getContext().typesAreCompatible(Ty, Arg->getType())) { // This must be a promotion, for something like // "void a(x) short x; {..." V = EmitScalarConversion(V, Ty, Arg->getType()); } } EmitParmDecl(*Arg, V); break; } case ABIArgInfo::Expand: { // If this structure was expanded into multiple arguments then // we need to create a temporary and reconstruct it from the // arguments. llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), Arg->getName() + ".addr"); // FIXME: What are the right qualifiers here? llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI); EmitParmDecl(*Arg, Temp); // Name the arguments used in expansion and increment AI. unsigned Index = 0; for (; AI != End; ++AI, ++Index) AI->setName(Arg->getName() + "." + llvm::Twine(Index)); continue; } case ABIArgInfo::Ignore: // Initialize the local variable appropriately. if (hasAggregateLLVMType(Ty)) { EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty))); } else { EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); } // Skip increment, no matching LLVM parameter. continue; case ABIArgInfo::Coerce: { assert(AI != Fn->arg_end() && "Argument mismatch!"); // FIXME: This is very wasteful; EmitParmDecl is just going to drop the // result in a new alloca anyway, so we could just store into that // directly if we broke the abstraction down more. llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce"); CreateCoercedStore(AI, V, *this); // Match to what EmitParmDecl is expecting for this type. if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { V = EmitLoadOfScalar(V, false, Ty); if (!getContext().typesAreCompatible(Ty, Arg->getType())) { // This must be a promotion, for something like // "void a(x) short x; {..." V = EmitScalarConversion(V, Ty, Arg->getType()); } } EmitParmDecl(*Arg, V); break; } } ++AI; } assert(AI == Fn->arg_end() && "Argument mismatch!"); }