/// getOrCreateCVRType - Get the CVR qualified type from the cache or create /// a new one if necessary. llvm::DIType CGDebugInfo::CreateCVRType(QualType Ty, llvm::DICompileUnit Unit) { // We will create one Derived type for one qualifier and recurse to handle any // additional ones. llvm::DIType FromTy; unsigned Tag; if (Ty.isConstQualified()) { Tag = llvm::dwarf::DW_TAG_const_type; Ty.removeConst(); FromTy = getOrCreateType(Ty, Unit); } else if (Ty.isVolatileQualified()) { Tag = llvm::dwarf::DW_TAG_volatile_type; Ty.removeVolatile(); FromTy = getOrCreateType(Ty, Unit); } else { assert(Ty.isRestrictQualified() && "Unknown type qualifier for debug info"); Tag = llvm::dwarf::DW_TAG_restrict_type; Ty.removeRestrict(); FromTy = getOrCreateType(Ty, Unit); } // No need to fill in the Name, Line, Size, Alignment, Offset in case of // CVR derived types. return DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(), 0, 0, 0, 0, 0, FromTy); }
void CodeGenFunction::EmitVarDecl(const DeclExpr* D) { // TODO arrays types? QualType qt = D->getType(); llvm::AllocaInst* inst = Builder.CreateAlloca(CGM.ConvertType(qt.getTypePtr()), 0, D->getName()); // TODO smart alignment inst->setAlignment(D->getType()->getWidth()); const Expr* I = D->getInitValue(); if (I) { llvm::Value* val = EmitExpr(I); Builder.CreateStore(val, inst, qt.isVolatileQualified()); } }
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, llvm::Value *NewPtr, llvm::Value *NumElements) { QualType AllocType = E->getAllocatedType(); if (!E->isArray()) { if (CXXConstructorDecl *Ctor = E->getConstructor()) { CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr, E->constructor_arg_begin(), E->constructor_arg_end()); return; } // We have a POD type. if (E->getNumConstructorArgs() == 0) return; assert(E->getNumConstructorArgs() == 1 && "Can only have one argument to initializer of POD type."); const Expr *Init = E->getConstructorArg(0); if (!CGF.hasAggregateLLVMType(AllocType)) CGF.Builder.CreateStore(CGF.EmitScalarExpr(Init), NewPtr); else if (AllocType->isAnyComplexType()) CGF.EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified()); else CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); return; } if (CXXConstructorDecl *Ctor = E->getConstructor()) CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr); }
QualType TypeResolver::resolveUnresolved(QualType Q) const { const Type* T = Q.getTypePtr(); switch (Q->getTypeClass()) { case TC_BUILTIN: return Q; case TC_POINTER: { // Dont return new type if not needed const PointerType* P = cast<PointerType>(T); QualType t1 = P->getPointeeType(); QualType Result = resolveUnresolved(t1); if (t1 == Result) return Q; // TODO qualifiers return typeContext.getPointerType(Result); } case TC_ARRAY: { const ArrayType* A = cast<ArrayType>(T); QualType t1 = A->getElementType(); QualType Result = resolveUnresolved(t1); if (t1 == Result) return Q; // TODO qualifiers return typeContext.getArrayType(Result, A->getSizeExpr(), false, A->isIncremental()); } case TC_UNRESOLVED: { const UnresolvedType* U = cast<UnresolvedType>(T); TypeDecl* TD = U->getDecl(); assert(TD); QualType result = TD->getType(); if (Q.isConstQualified()) result.addConst(); if (Q.isVolatileQualified()) result.addVolatile(); return result; } case TC_ALIAS: case TC_STRUCT: case TC_ENUM: case TC_FUNCTION: return Q; case TC_MODULE: assert(0 && "TBD"); return Q; } return Q; }
/// Emit an alloca (or GlobalValue depending on target) /// for the specified parameter and set up LocalDeclMap. void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) { // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && "Invalid argument to EmitParmDecl"); QualType Ty = D.getType(); llvm::Value *DeclPtr; if (!Ty->isConstantSizeType()) { // Variable sized values always are passed by-reference. DeclPtr = Arg; } else { // A fixed sized single-value variable becomes an alloca in the entry block. const llvm::Type *LTy = ConvertTypeForMem(Ty); if (LTy->isSingleValueType()) { // TODO: Alignment std::string Name = D.getNameAsString(); Name += ".addr"; DeclPtr = CreateTempAlloca(LTy); DeclPtr->setName(Name.c_str()); // Store the initial value into the alloca. EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty); } else { // Otherwise, if this is an aggregate, just use the input pointer. DeclPtr = Arg; } Arg->setName(D.getNameAsString()); } llvm::Value *&DMEntry = LocalDeclMap[&D]; assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); DMEntry = DeclPtr; // Emit debug info for param declaration. if (CGDebugInfo *DI = getDebugInfo()) { DI->setLocation(D.getLocation()); DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder); } }
void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr, const ThunkInfo *Thunk) { assert(isa<CXXMethodDecl>(CurGD.getDecl()) && "Please use a new CGF for this thunk"); const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl()); // Adjust the 'this' pointer if necessary llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment( *this, LoadCXXThisAddress(), Thunk->This) : LoadCXXThis(); if (CurFnInfo->usesInAlloca()) { // We don't handle return adjusting thunks, because they require us to call // the copy constructor. For now, fall through and pretend the return // adjustment was empty so we don't crash. if (Thunk && !Thunk->Return.isEmpty()) { CGM.ErrorUnsupported( MD, "non-trivial argument copy for return-adjusting thunk"); } EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr); return; } // Start building CallArgs. CallArgList CallArgs; QualType ThisType = MD->getThisType(getContext()); CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs); #ifndef NDEBUG unsigned PrefixArgs = CallArgs.size() - 1; #endif // Add the rest of the arguments. for (const ParmVarDecl *PD : MD->parameters()) EmitDelegateCallArg(CallArgs, PD, SourceLocation()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall( CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1, MD), PrefixArgs); assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, CurFnInfo->arg_begin()[i].info, CurFnInfo->arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD) ? ThisType : CGM.getCXXABI().hasMostDerivedReturn(CurGD) ? CGM.getContext().VoidPtrTy : FPT->getReturnType(); ReturnValueSlot Slot; if (!ResultType->isVoidType() && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && !hasScalarEvaluationKind(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. llvm::Instruction *CallOrInvoke; CGCallee Callee = CGCallee::forDirect(CalleePtr, MD); RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke); // Consider return adjustment if we have ThunkInfo. if (Thunk && !Thunk->Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke)) Call->setTailCallKind(llvm::CallInst::TCK_Tail); // Emit return. if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishThunk(); }
void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD, llvm::Value *Callee, const ThunkInfo *Thunk) { assert(isa<CXXMethodDecl>(CurGD.getDecl()) && "Please use a new CGF for this thunk"); const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // Adjust the 'this' pointer if necessary llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment( *this, LoadCXXThis(), Thunk->This) : LoadCXXThis(); // Start building CallArgs. CallArgList CallArgs; QualType ThisType = MD->getThisType(getContext()); CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, GD, CallArgs); // Add the rest of the arguments. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) EmitDelegateCallArg(CallArgs, *I, (*I)->getLocStart()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1)); assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, CurFnInfo->arg_begin()[i].info, CurFnInfo->arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. QualType ResultType = CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType(); ReturnValueSlot Slot; if (!ResultType->isVoidType() && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && !hasScalarEvaluationKind(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD); // Consider return adjustment if we have ThunkInfo. if (Thunk && !Thunk->Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); // Emit return. if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishFunction(); }
QualType Sema::CheckPointerToMemberOperands( Expr *&lex, Expr *&rex, SourceLocation Loc, bool isIndirect) { const char *OpSpelling = isIndirect ? "->*" : ".*"; // C++ 5.5p2 // The binary operator .* [p3: ->*] binds its second operand, which shall // be of type "pointer to member of T" (where T is a completely-defined // class type) [...] QualType RType = rex->getType(); const MemberPointerType *MemPtr = RType->getAsMemberPointerType(); if (!MemPtr) { Diag(Loc, diag::err_bad_memptr_rhs) << OpSpelling << RType << rex->getSourceRange(); return QualType(); } else if (RequireCompleteType(Loc, QualType(MemPtr->getClass(), 0), diag::err_memptr_rhs_incomplete, rex->getSourceRange())) return QualType(); QualType Class(MemPtr->getClass(), 0); // C++ 5.5p2 // [...] to its first operand, which shall be of class T or of a class of // which T is an unambiguous and accessible base class. [p3: a pointer to // such a class] QualType LType = lex->getType(); if (isIndirect) { if (const PointerType *Ptr = LType->getAsPointerType()) LType = Ptr->getPointeeType().getNonReferenceType(); else { Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling << 1 << LType << lex->getSourceRange(); return QualType(); } } if (Context.getCanonicalType(Class).getUnqualifiedType() != Context.getCanonicalType(LType).getUnqualifiedType()) { BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false, /*DetectVirtual=*/false); // FIXME: Would it be useful to print full ambiguity paths, // or is that overkill? if (!IsDerivedFrom(LType, Class, Paths) || Paths.isAmbiguous(Context.getCanonicalType(Class))) { Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling << (int)isIndirect << lex->getType() << lex->getSourceRange(); return QualType(); } } // C++ 5.5p2 // The result is an object or a function of the type specified by the // second operand. // The cv qualifiers are the union of those in the pointer and the left side, // in accordance with 5.5p5 and 5.2.5. // FIXME: This returns a dereferenced member function pointer as a normal // function type. However, the only operation valid on such functions is // calling them. There's also a GCC extension to get a function pointer to // the thing, which is another complication, because this type - unlike the // type that is the result of this expression - takes the class as the first // argument. // We probably need a "MemberFunctionClosureType" or something like that. QualType Result = MemPtr->getPointeeType(); if (LType.isConstQualified()) Result.addConst(); if (LType.isVolatileQualified()) Result.addVolatile(); return Result; }
void CodeGenFunction::GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = FPT->getResultType(); QualType ThisType = MD->getThisType(getContext()); FunctionArgList FunctionArgs; // FIXME: It would be nice if more of this code could be shared with // CodeGenFunction::GenerateCode. // Create the implicit 'this' parameter declaration. CurGD = GD; CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) { ParmVarDecl *Param = *I; FunctionArgs.push_back(Param); } StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, SourceLocation()); CGM.getCXXABI().EmitInstanceFunctionProlog(*this); CXXThisValue = CXXABIThisValue; // Adjust the 'this' pointer if necessary. llvm::Value *AdjustedThisPtr = PerformTypeAdjustment(*this, LoadCXXThis(), Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset, /*IsReturnAdjustment*/false); CallArgList CallArgs; // Add our adjusted 'this' pointer. CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); // Add the rest of the parameters. for (FunctionDecl::param_const_iterator I = MD->param_begin(), E = MD->param_end(); I != E; ++I) { ParmVarDecl *param = *I; EmitDelegateCallArg(CallArgs, param); } // Get our callee. llvm::Type *Ty = CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1)); assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() && CallFnInfo.isNoReturn() == FnInfo.isNoReturn() && CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), FnInfo.getReturnInfo(), FnInfo.getReturnType())); assert(CallFnInfo.arg_size() == FnInfo.arg_size()); for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. ReturnValueSlot Slot; if (!ResultType->isVoidType() && FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && hasAggregateLLVMType(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD); if (!Thunk.Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishFunction(); // Set the right linkage. CGM.setFunctionLinkage(MD, Fn); // Set the right visibility. setThunkVisibility(CGM, MD, Thunk, Fn); }
void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (!DestPtr) { Visit(E->getSubExpr()); return; } switch (E->getCastKind()) { default: assert(0 && "Unhandled cast kind!"); case CastExpr::CK_ToUnion: { // GCC union extension QualType PtrTy = CGF.getContext().getPointerType(E->getSubExpr()->getType()); llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr, CGF.ConvertType(PtrTy)); EmitInitializationToLValue(E->getSubExpr(), LValue::MakeAddr(CastPtr, Qualifiers()), E->getSubExpr()->getType()); break; } // FIXME: Remove the CK_Unknown check here. case CastExpr::CK_Unknown: case CastExpr::CK_NoOp: case CastExpr::CK_UserDefinedConversion: case CastExpr::CK_ConstructorConversion: assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), E->getType()) && "Implicit cast types must be compatible"); Visit(E->getSubExpr()); break; case CastExpr::CK_NullToMemberPointer: { // If the subexpression's type is the C++0x nullptr_t, emit the // subexpression, which may have side effects. if (E->getSubExpr()->getType()->isNullPtrType()) Visit(E->getSubExpr()); const llvm::Type *PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); llvm::Value *NullValue = llvm::Constant::getNullValue(PtrDiffTy); llvm::Value *Ptr = Builder.CreateStructGEP(DestPtr, 0, "ptr"); Builder.CreateStore(NullValue, Ptr, VolatileDest); llvm::Value *Adj = Builder.CreateStructGEP(DestPtr, 1, "adj"); Builder.CreateStore(NullValue, Adj, VolatileDest); break; } case CastExpr::CK_BitCast: { // This must be a member function pointer cast. Visit(E->getSubExpr()); break; } case CastExpr::CK_DerivedToBaseMemberPointer: case CastExpr::CK_BaseToDerivedMemberPointer: { QualType SrcType = E->getSubExpr()->getType(); llvm::Value *Src = CGF.CreateMemTemp(SrcType, "tmp"); CGF.EmitAggExpr(E->getSubExpr(), Src, SrcType.isVolatileQualified()); llvm::Value *SrcPtr = Builder.CreateStructGEP(Src, 0, "src.ptr"); SrcPtr = Builder.CreateLoad(SrcPtr); llvm::Value *SrcAdj = Builder.CreateStructGEP(Src, 1, "src.adj"); SrcAdj = Builder.CreateLoad(SrcAdj); llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr"); Builder.CreateStore(SrcPtr, DstPtr, VolatileDest); llvm::Value *DstAdj = Builder.CreateStructGEP(DestPtr, 1, "dst.adj"); // Now See if we need to update the adjustment. const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(SrcType->getAs<MemberPointerType>()-> getClass()->getAs<RecordType>()->getDecl()); const CXXRecordDecl *DerivedDecl = cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()-> getClass()->getAs<RecordType>()->getDecl()); if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) std::swap(DerivedDecl, BaseDecl); if (llvm::Constant *Adj = CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, BaseDecl)) { if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) SrcAdj = Builder.CreateSub(SrcAdj, Adj, "adj"); else SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj"); } Builder.CreateStore(SrcAdj, DstAdj, VolatileDest); break; } } }
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy) { switch (CK) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); case CK_GetObjCProperty: { LValue LV = CGF.EmitLValue(Op); assert(LV.isPropertyRef() && "Unknown LValue type!"); return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal(); } case CK_NoOp: case CK_LValueToRValue: case CK_UserDefinedConversion: return Visit(Op); case CK_LValueBitCast: { llvm::Value *V = CGF.EmitLValue(Op).getAddress(); V = Builder.CreateBitCast(V, CGF.ConvertType(CGF.getContext().getPointerType(DestTy))); // FIXME: Are the qualifiers correct here? return EmitLoadOfComplex(V, DestTy.isVolatileQualified()); } case CK_BitCast: case CK_BaseToDerived: case CK_DerivedToBase: case CK_UncheckedDerivedToBase: case CK_Dynamic: case CK_ToUnion: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_NullToPointer: case CK_NullToMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: case CK_MemberPointerToBoolean: case CK_ConstructorConversion: case CK_IntegralToPointer: case CK_PointerToIntegral: case CK_PointerToBoolean: case CK_ToVoid: case CK_VectorSplat: case CK_IntegralCast: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: case CK_AnyPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: case CK_ObjCObjectLValueCast: case CK_FloatingComplexToReal: case CK_FloatingComplexToBoolean: case CK_IntegralComplexToReal: case CK_IntegralComplexToBoolean: case CK_ObjCProduceObject: case CK_ObjCConsumeObject: case CK_ObjCReclaimReturnedObject: llvm_unreachable("invalid cast kind for complex value"); case CK_FloatingRealToComplex: case CK_IntegralRealToComplex: { llvm::Value *Elt = CGF.EmitScalarExpr(Op); // Convert the input element to the element type of the complex. DestTy = DestTy->getAs<ComplexType>()->getElementType(); Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy); // Return (realval, 0). return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType())); } case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy); } llvm_unreachable("unknown cast resulting in complex value"); }