llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S, const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, std::string &ConstraintStr) { llvm::Value *Arg; if (Info.allowsRegister() || !Info.allowsMemory()) { const llvm::Type *Ty = ConvertType(InputExpr->getType()); if (Ty->isSingleValueType()) { Arg = EmitScalarExpr(InputExpr); } else { InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = EmitLValue(InputExpr); uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty); if (Size <= 64 && llvm::isPowerOf2_64(Size)) { Ty = llvm::IntegerType::get(VMContext, Size); Ty = llvm::PointerType::getUnqual(Ty); Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty)); } else { Arg = Dest.getAddress(); ConstraintStr += '*'; } } } else { InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = EmitLValue(InputExpr); Arg = Dest.getAddress(); ConstraintStr += '*'; } return Arg; }
/// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || rvalue.getAggregateAddr()->getType()->getPointerElementType() == dest.getAddress()->getType()->getPointerElementType()); AtomicInfo atomics(*this, dest); // If this is an initialization, just put the value there normally. if (isInit) { atomics.emitCopyIntoMemory(rvalue, dest); return; } // Check whether we should use a library call. if (atomics.shouldUseLibcall()) { // Produce a source address. llvm::Value *srcAddr = atomics.materializeRValue(rvalue); // void __atomic_store(size_t size, void *mem, void *val, int order) CallArgList args; args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())), getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy); args.add(RValue::get(llvm::ConstantInt::get( IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); return; } // Okay, we're doing this natively. llvm::Value *intValue = atomics.convertRValueToInt(rvalue); // Do the atomic store. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress()); llvm::StoreInst *store = Builder.CreateStore(intValue, addr); // Initializations don't need to be atomic. if (!isInit) store->setAtomic(llvm::SequentiallyConsistent); // Other decoration. store->setAlignment(dest.getAlignment().getQuantity()); if (dest.isVolatileQualified()) store->setVolatile(true); if (dest.getTBAAInfo()) CGM.DecorateInstruction(store, dest.getTBAAInfo()); }
/// Emit a load from an l-value of atomic type. Note that the r-value /// we produce is an r-value of the atomic *value* type. RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, AggValueSlot resultSlot) { AtomicInfo atomics(*this, src); // Check whether we should use a library call. if (atomics.shouldUseLibcall()) { llvm::Value *tempAddr; if (!resultSlot.isIgnored()) { assert(atomics.getEvaluationKind() == TEK_Aggregate); tempAddr = resultSlot.getAddr(); } else { tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp"); } // void __atomic_load(size_t size, void *mem, void *return, int order); CallArgList args; args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())), getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), getContext().VoidPtrTy); args.add(RValue::get(llvm::ConstantInt::get( IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args); // Produce the r-value. return atomics.convertTempToRValue(tempAddr, resultSlot, loc); } // Okay, we're doing this natively. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress()); llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load"); load->setAtomic(llvm::SequentiallyConsistent); // Other decoration. load->setAlignment(src.getAlignment().getQuantity()); if (src.isVolatileQualified()) load->setVolatile(true); if (src.getTBAAInfo()) CGM.DecorateInstruction(load, src.getTBAAInfo()); // If we're ignoring an aggregate return, don't do anything. if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored()) return RValue::getAggregate(nullptr, false); // Okay, turn that back into the original value type. return atomics.convertIntToValue(load, resultSlot, loc); }
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(), Src.isVolatileQualified()), Ignore); }
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // For an assignment to work, the value on the right has // to be compatible with the value on the left. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), E->getRHS()->getType()) && "Invalid assignment"); LValue LHS = CGF.EmitLValue(E->getLHS()); // We have to special case property setters, otherwise we must have // a simple lvalue (no aggregates inside vectors, bitfields). if (LHS.isPropertyRef()) { llvm::Value *AggLoc = DestPtr; if (!AggLoc) AggLoc = CGF.CreateMemTemp(E->getRHS()->getType()); CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest); CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getAggregate(AggLoc, VolatileDest)); } else if (LHS.isKVCRef()) { llvm::Value *AggLoc = DestPtr; if (!AggLoc) AggLoc = CGF.CreateMemTemp(E->getRHS()->getType()); CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest); CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getAggregate(AggLoc, VolatileDest)); } else { bool RequiresGCollection = false; if (CGF.getContext().getLangOptions().getGCMode()) RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType()); // Codegen the RHS so that it stores directly into the LHS. CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(), false, false, RequiresGCollection); EmitFinalDestCopy(E, LHS, true); } }
void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, llvm::SmallVector<llvm::Value*, 16> &Args) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); RecordDecl *RD = RT->getDecl(); assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); llvm::Value *Addr = RV.getAggregateAddr(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { FieldDecl *FD = *i; QualType FT = FD->getType(); // FIXME: What are the right qualifiers here? LValue LV = EmitLValueForField(Addr, FD, false, 0); if (CodeGenFunction::hasAggregateLLVMType(FT)) { ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); } else { RValue RV = EmitLoadOfLValue(LV, FT); assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); Args.push_back(RV.getScalarVal()); } } }
llvm::Function::arg_iterator CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, llvm::Function::arg_iterator AI) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); RecordDecl *RD = RT->getDecl(); assert(LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."); llvm::Value *Addr = LV.getAddress(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { FieldDecl *FD = *i; QualType FT = FD->getType(); // FIXME: What are the right qualifiers here? LValue LV = EmitLValueForField(Addr, FD, false, 0); if (CodeGenFunction::hasAggregateLLVMType(FT)) { AI = ExpandTypeFromArgs(FT, LV, AI); } else { EmitStoreThroughLValue(RValue::get(AI), LV, FT); ++AI; } } return AI; }
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { QualType type = LV.getType(); // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (Dest.isZeroed() && isSimpleZero(E, CGF)) { // Storing "i32 0" to a zero'd memory location is a noop. } else if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV); } else if (type->isReferenceType()) { RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV); } else if (type->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); } else if (CGF.hasAggregateLLVMType(type)) { CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, Dest.isZeroed())); } else if (LV.isSimple()) { CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); } else { CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); } }
/// Copy an r-value into memory as part of storing to an atomic type. /// This needs to create a bit-pattern suitable for atomic operations. void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const { // If we have an r-value, the rvalue should be of the atomic type, // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { CGF.EmitAggregateCopy(dest.getAddress(), rvalue.getAggregateAddr(), getAtomicType(), (rvalue.isVolatileQualified() || dest.isVolatileQualified()), dest.getAlignment()); return; } // Okay, otherwise we're copying stuff. // Zero out the buffer if necessary. emitMemSetZeroIfNecessary(dest); // Drill past the padding if present. dest = projectValue(dest); // Okay, store the rvalue in. if (rvalue.isScalar()) { CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true); } else { CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true); } }
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to /// load the real and imaginary pieces, returning them as Real/Imag. ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue, SourceLocation loc) { assert(lvalue.isSimple() && "non-simple complex l-value?"); if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal(); llvm::Value *SrcPtr = lvalue.getAddress(); bool isVolatile = lvalue.isVolatileQualified(); unsigned AlignR = lvalue.getAlignment().getQuantity(); ASTContext &C = CGF.getContext(); QualType ComplexTy = lvalue.getType(); unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity(); unsigned AlignI = std::min(AlignR, ComplexAlign); llvm::Value *Real=nullptr, *Imag=nullptr; if (!IgnoreReal || isVolatile) { llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0, SrcPtr->getName() + ".realp"); Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile, SrcPtr->getName() + ".real"); } if (!IgnoreImag || isVolatile) { llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1, SrcPtr->getName() + ".imagp"); Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile, SrcPtr->getName() + ".imag"); } return ComplexPairTy(Real, Imag); }
llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { QualType Ty = E->getType(); const llvm::Type *LTy = ConvertType(Ty)->getPointerTo(); if (E->isTypeOperand()) { Ty = E->getTypeOperand(); CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); Ty = CanTy.getUnqualifiedType().getNonReferenceType(); if (const RecordType *RT = Ty->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (RD->isPolymorphic()) return Builder.CreateBitCast(CGM.GenerateRttiRef(RD), LTy); return Builder.CreateBitCast(CGM.GenerateRtti(RD), LTy); } return Builder.CreateBitCast(CGM.GenerateRttiNonClass(Ty), LTy); } Expr *subE = E->getExprOperand(); Ty = subE->getType(); CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); Ty = CanTy.getUnqualifiedType().getNonReferenceType(); if (const RecordType *RT = Ty->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (RD->isPolymorphic()) { // FIXME: if subE is an lvalue do LValue Obj = EmitLValue(subE); llvm::Value *This = Obj.getAddress(); LTy = LTy->getPointerTo()->getPointerTo(); llvm::Value *V = Builder.CreateBitCast(This, LTy); // We need to do a zero check for *p, unless it has NonNullAttr. // FIXME: PointerType->hasAttr<NonNullAttr>() bool CanBeZero = false; if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens())) if (UO->getOpcode() == UnaryOperator::Deref) CanBeZero = true; if (CanBeZero) { llvm::BasicBlock *NonZeroBlock = createBasicBlock(); llvm::BasicBlock *ZeroBlock = createBasicBlock(); llvm::Value *Zero = llvm::Constant::getNullValue(LTy); Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero), NonZeroBlock, ZeroBlock); EmitBlock(ZeroBlock); /// Call __cxa_bad_typeid const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); const llvm::FunctionType *FTy; FTy = llvm::FunctionType::get(ResultType, false); llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); Builder.CreateCall(F)->setDoesNotReturn(); Builder.CreateUnreachable(); EmitBlock(NonZeroBlock); } V = Builder.CreateLoad(V, "vtable"); V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL); V = Builder.CreateLoad(V); return V; } return Builder.CreateBitCast(CGM.GenerateRtti(RD), LTy); } return Builder.CreateBitCast(CGM.GenerateRttiNonClass(Ty), LTy); }
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) { // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV, T); } else if (T->isReferenceType()) { RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); } else if (CGF.hasAggregateLLVMType(T)) { CGF.EmitAnyExpr(E, LV.getAddress(), false); } else { CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T); } }
void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (!DestPtr && E->getCastKind() != CK_Dynamic) { Visit(E->getSubExpr()); return; } switch (E->getCastKind()) { default: assert(0 && "Unhandled cast kind!"); case CK_Dynamic: { assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); // FIXME: Do we also need to handle property references here? if (LV.isSimple()) CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); else CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); if (DestPtr) CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); break; } case CK_ToUnion: { // GCC union extension QualType Ty = E->getSubExpr()->getType(); QualType PtrTy = CGF.getContext().getPointerType(Ty); llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr, CGF.ConvertType(PtrTy)); EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty), Ty); break; } case CK_DerivedToBase: case CK_BaseToDerived: case CK_UncheckedDerivedToBase: { assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: " "should have been unpacked before we got here"); break; } // FIXME: Remove the CK_Unknown check here. case CK_Unknown: case CK_NoOp: case CK_UserDefinedConversion: case CK_ConstructorConversion: assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), E->getType()) && "Implicit cast types must be compatible"); Visit(E->getSubExpr()); break; case CK_LValueBitCast: llvm_unreachable("there are no lvalue bit-casts on aggregates"); break; } }
void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const { llvm::Value *addr = dest.getAddress(); if (!requiresMemSetZero(addr->getType()->getPointerElementType())) return; CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), AtomicSizeInBits / 8, dest.getAlignment().getQuantity()); }
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) { // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (Dest.isZeroed() && isSimpleZero(E, CGF)) { // Storing "i32 0" to a zero'd memory location is a noop. } else if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV, T); } else if (T->isReferenceType()) { RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); } else if (CGF.hasAggregateLLVMType(T)) { CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true, false, Dest.isZeroed())); } else { CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T); } }
void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) { if (!CGF.hasAggregateLLVMType(T)) { // For non-aggregates, we can store zero llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T)); CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T); } else { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. CGF.EmitNullInitialization(LV.getAddress(), T); } }
/// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { if (lvalue.getType()->isAtomicType() || (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue))) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); Address Ptr = lvalue.getAddress(); Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType()); Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType()); Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified()); Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified()); }
/// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); llvm::Value *Ptr = lvalue.getAddress(); llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); // TODO: alignment Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified()); Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified()); }
ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { ComplexPairTy Val; LValue LV = EmitBinAssignLValue(E, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getContext().getLangOptions().CPlusPlus) return Val; // If the lvalue is non-volatile, return the computed value of the assignment. if (!LV.isVolatileQualified()) return Val; return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified()); }
void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) { if (!CGF.hasAggregateLLVMType(T)) { // For non-aggregates, we can store zero llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T)); CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T); } else { // Otherwise, just memset the whole thing to zero. This is legal // because in LLVM, all default initializers are guaranteed to have a // bit pattern of all zeros. // FIXME: That isn't true for member pointers! // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. CGF.EmitMemSetToZero(LV.getAddress(), T); } }
void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) { // If the destination slot is already zeroed out before the aggregate is // copied into it, we don't have to emit any zeros here. if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T)) return; if (!CGF.hasAggregateLLVMType(T)) { // For non-aggregates, we can store zero llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T)); CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T); } else { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. CGF.EmitNullInitialization(LV.getAddress(), T); } }
/// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); llvm::Value *Ptr = lvalue.getAddress(); llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); unsigned AlignR = lvalue.getAlignment().getQuantity(); ASTContext &C = CGF.getContext(); QualType ComplexTy = lvalue.getType(); unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity(); unsigned AlignI = std::min(AlignR, ComplexAlign); Builder.CreateAlignedStore(Val.first, RealPtr, AlignR, lvalue.isVolatileQualified()); Builder.CreateAlignedStore(Val.second, ImagPtr, AlignI, lvalue.isVolatileQualified()); }
// Compound assignments. ComplexPairTy ComplexExprEmitter:: EmitCompoundAssign(const CompoundAssignOperator *E, ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)) { ComplexPairTy Val; LValue LV = EmitCompoundAssignLValue(E, Func, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getContext().getLangOptions().CPlusPlus) return Val; // Objective-C property assignment never reloads the value following a store. if (LV.isPropertyRef()) return Val; // If the lvalue is non-volatile, return the computed value of the assignment. if (!LV.isVolatileQualified()) return Val; return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified()); }
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to /// load the real and imaginary pieces, returning them as Real/Imag. ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue, SourceLocation loc) { assert(lvalue.isSimple() && "non-simple complex l-value?"); if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal(); Address SrcPtr = lvalue.getAddress(); bool isVolatile = lvalue.isVolatileQualified(); llvm::Value *Real = nullptr, *Imag = nullptr; if (!IgnoreReal || isVolatile) { Address RealP = CGF.emitAddrOfRealComponent(SrcPtr, lvalue.getType()); Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr.getName() + ".real"); } if (!IgnoreImag || isVolatile) { Address ImagP = CGF.emitAddrOfImagComponent(SrcPtr, lvalue.getType()); Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr.getName() + ".imag"); } return ComplexPairTy(Real, Imag); }
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to /// load the real and imaginary pieces, returning them as Real/Imag. ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue) { assert(lvalue.isSimple() && "non-simple complex l-value?"); if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicLoad(lvalue).getComplexVal(); llvm::Value *SrcPtr = lvalue.getAddress(); bool isVolatile = lvalue.isVolatileQualified(); llvm::Value *Real=0, *Imag=0; if (!IgnoreReal || isVolatile) { llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0, SrcPtr->getName() + ".realp"); Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real"); } if (!IgnoreImag || isVolatile) { llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1, SrcPtr->getName() + ".imagp"); Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag"); } return ComplexPairTy(Real, Imag); }
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) { const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()); const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); assert(MD->isInstance() && "Trying to emit a member call expr on a static method!"); const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType(); const llvm::Type *Ty = CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), FPT->isVariadic()); llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, Ty); llvm::Value *BaseValue = 0; // There's a deref operator node added in Sema::BuildCallToMemberFunction // that's giving the wrong type for -> call exprs so we just ignore them // for now. if (ME->isArrow()) return EmitUnsupportedRValue(CE, "C++ member call expr"); else { LValue BaseLV = EmitLValue(ME->getBase()); BaseValue = BaseLV.getAddress(); } CallArgList Args; // Push the 'this' pointer. Args.push_back(std::make_pair(RValue::get(BaseValue), MD->getThisType(getContext()))); EmitCallArgs(Args, FPT, CE->arg_begin(), CE->arg_end()); QualType ResultType = MD->getType()->getAsFunctionType()->getResultType(); return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee, Args, MD); }
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy) { switch (CK) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); // Atomic to non-atomic casts may be more than a no-op for some platforms and // for some types. case CK_AtomicToNonAtomic: case CK_NonAtomicToAtomic: case CK_NoOp: case CK_LValueToRValue: case CK_UserDefinedConversion: return Visit(Op); case CK_LValueBitCast: { LValue origLV = CGF.EmitLValue(Op); llvm::Value *V = origLV.getAddress(); V = Builder.CreateBitCast(V, CGF.ConvertType(CGF.getContext().getPointerType(DestTy))); return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy, origLV.getAlignment())); } case CK_BitCast: case CK_BaseToDerived: case CK_DerivedToBase: case CK_UncheckedDerivedToBase: case CK_Dynamic: case CK_ToUnion: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_NullToPointer: case CK_NullToMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: case CK_MemberPointerToBoolean: case CK_ReinterpretMemberPointer: case CK_ConstructorConversion: case CK_IntegralToPointer: case CK_PointerToIntegral: case CK_PointerToBoolean: case CK_ToVoid: case CK_VectorSplat: case CK_IntegralCast: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: case CK_ObjCObjectLValueCast: case CK_FloatingComplexToReal: case CK_FloatingComplexToBoolean: case CK_IntegralComplexToReal: case CK_IntegralComplexToBoolean: case CK_ARCProduceObject: case CK_ARCConsumeObject: case CK_ARCReclaimReturnedObject: case CK_ARCExtendBlockObject: case CK_CopyAndAutoreleaseBlockObject: case CK_BuiltinFnToFnPtr: case CK_ZeroToOCLEvent: llvm_unreachable("invalid cast kind for complex value"); case CK_FloatingRealToComplex: case CK_IntegralRealToComplex: { llvm::Value *Elt = CGF.EmitScalarExpr(Op); // Convert the input element to the element type of the complex. DestTy = DestTy->castAs<ComplexType>()->getElementType(); Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy); // Return (realval, 0). return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType())); } case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy); } llvm_unreachable("unknown cast resulting in complex value"); }
/// Emit a compare-and-exchange op for atomic type. /// std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak, AggValueSlot Slot) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!Expected.isAggregate() || Expected.getAggregateAddr()->getType()->getPointerElementType() == Obj.getAddress()->getType()->getPointerElementType()); assert(!Desired.isAggregate() || Desired.getAggregateAddr()->getType()->getPointerElementType() == Obj.getAddress()->getType()->getPointerElementType()); AtomicInfo Atomics(*this, Obj); if (Failure >= Success) // Don't assert on undefined behavior. Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); auto Alignment = Atomics.getValueAlignment(); // Check whether we should use a library call. if (Atomics.shouldUseLibcall()) { auto *ExpectedAddr = Atomics.materializeRValue(Expected); // Produce a source address. auto *DesiredAddr = Atomics.materializeRValue(Desired); // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, // void *desired, int success, int failure); CallArgList Args; Args.add(RValue::get(Atomics.getAtomicSizeValue()), getContext().getSizeType()); Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())), getContext().VoidPtrTy); Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)), getContext().VoidPtrTy); Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)), getContext().VoidPtrTy); Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)), getContext().IntTy); Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)), getContext().IntTy); auto SuccessFailureRVal = emitAtomicLibcall( *this, "__atomic_compare_exchange", getContext().BoolTy, Args); auto *PreviousVal = Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity()); return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal); } // If we've got a scalar value of the right size, try to avoid going // through memory. auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected); auto *DesiredIntVal = Atomics.convertRValueToInt(Desired); // Do the atomic store. auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress()); auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal, Success, Failure); // Other decoration. Inst->setVolatile(Obj.isVolatileQualified()); Inst->setWeak(IsWeak); // Okay, turn that back into the original value type. auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0); auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1); return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc), RValue::get(SuccessFailureVal)); }
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { #if 0 // FIXME: Assess perf here? Figure out what cases are worth optimizing here // (Length of globals? Chunks of zeroed-out space?). // // If we can, prefer a copy from a global; this is a lot less code for long // globals, and it's easier for the current optimizers to analyze. if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { llvm::GlobalVariable* GV = new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, llvm::GlobalValue::InternalLinkage, C, ""); EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType())); return; } #endif if (E->hadArrayRangeDesignator()) CGF.ErrorUnsupported(E, "GNU array range designator extension"); llvm::Value *DestPtr = Dest.getAddr(); // Handle initialization of an array. if (E->getType()->isArrayType()) { llvm::PointerType *APType = cast<llvm::PointerType>(DestPtr->getType()); llvm::ArrayType *AType = cast<llvm::ArrayType>(APType->getElementType()); uint64_t NumInitElements = E->getNumInits(); if (E->getNumInits() > 0) { QualType T1 = E->getType(); QualType T2 = E->getInit(0)->getType(); if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) { EmitAggLoadOfLValue(E->getInit(0)); return; } } uint64_t NumArrayElements = AType->getNumElements(); assert(NumInitElements <= NumArrayElements); QualType elementType = E->getType().getCanonicalType(); elementType = CGF.getContext().getQualifiedType( cast<ArrayType>(elementType)->getElementType(), elementType.getQualifiers() + Dest.getQualifiers()); // DestPtr is an array*. Construct an elementType* by drilling // down a level. llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); llvm::Value *indices[] = { zero, zero }; llvm::Value *begin = Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin"); // Exception safety requires us to destroy all the // already-constructed members if an initializer throws. // For that, we'll need an EH cleanup. QualType::DestructionKind dtorKind = elementType.isDestructedType(); llvm::AllocaInst *endOfInit = 0; EHScopeStack::stable_iterator cleanup; llvm::Instruction *cleanupDominator = 0; if (CGF.needsEHCleanup(dtorKind)) { // In principle we could tell the cleanup where we are more // directly, but the control flow can get so varied here that it // would actually be quite complex. Therefore we go through an // alloca. endOfInit = CGF.CreateTempAlloca(begin->getType(), "arrayinit.endOfInit"); cleanupDominator = Builder.CreateStore(begin, endOfInit); CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, CGF.getDestroyer(dtorKind)); cleanup = CGF.EHStack.stable_begin(); // Otherwise, remember that we didn't need a cleanup. } else { dtorKind = QualType::DK_none; } llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); // The 'current element to initialize'. The invariants on this // variable are complicated. Essentially, after each iteration of // the loop, it points to the last initialized element, except // that it points to the beginning of the array before any // elements have been initialized. llvm::Value *element = begin; // Emit the explicit initializers. for (uint64_t i = 0; i != NumInitElements; ++i) { // Advance to the next element. if (i > 0) { element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); // Tell the cleanup that it needs to destroy up to this // element. TODO: some of these stores can be trivially // observed to be unnecessary. if (endOfInit) Builder.CreateStore(element, endOfInit); } LValue elementLV = CGF.MakeAddrLValue(element, elementType); EmitInitializationToLValue(E->getInit(i), elementLV); } // Check whether there's a non-trivial array-fill expression. // Note that this will be a CXXConstructExpr even if the element // type is an array (or array of array, etc.) of class type. Expr *filler = E->getArrayFiller(); bool hasTrivialFiller = true; if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) { assert(cons->getConstructor()->isDefaultConstructor()); hasTrivialFiller = cons->getConstructor()->isTrivial(); } // Any remaining elements need to be zero-initialized, possibly // using the filler expression. We can skip this if the we're // emitting to zeroed memory. if (NumInitElements != NumArrayElements && !(Dest.isZeroed() && hasTrivialFiller && CGF.getTypes().isZeroInitializable(elementType))) { // Use an actual loop. This is basically // do { *array++ = filler; } while (array != end); // Advance to the start of the rest of the array. if (NumInitElements) { element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); if (endOfInit) Builder.CreateStore(element, endOfInit); } // Compute the end of the array. llvm::Value *end = Builder.CreateInBoundsGEP(begin, llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end"); llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); // Jump into the body. CGF.EmitBlock(bodyBB); llvm::PHINode *currentElement = Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); currentElement->addIncoming(element, entryBB); // Emit the actual filler expression. LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); if (filler) EmitInitializationToLValue(filler, elementLV); else EmitNullInitializationToLValue(elementLV); // Move on to the next element. llvm::Value *nextElement = Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); // Tell the EH cleanup that we finished with the last element. if (endOfInit) Builder.CreateStore(nextElement, endOfInit); // Leave the loop if we're done. llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, "arrayinit.done"); llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); Builder.CreateCondBr(done, endBB, bodyBB); currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); CGF.EmitBlock(endBB); } // Leave the partial-array cleanup if we entered one. if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); return; } assert(E->getType()->isRecordType() && "Only support structs/unions here!"); // Do struct initialization; this code just sets each individual member // to the approprate value. This makes bitfield support automatic; // the disadvantage is that the generated code is more difficult for // the optimizer, especially with bitfields. unsigned NumInitElements = E->getNumInits(); RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); if (record->isUnion()) { // Only initialize one field of a union. The field itself is // specified by the initializer list. if (!E->getInitializedFieldInUnion()) { // Empty union; we have nothing to do. #ifndef NDEBUG // Make sure that it's really an empty and not a failure of // semantic analysis. for (RecordDecl::field_iterator Field = record->field_begin(), FieldEnd = record->field_end(); Field != FieldEnd; ++Field) assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); #endif return; } // FIXME: volatility FieldDecl *Field = E->getInitializedFieldInUnion(); LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0); if (NumInitElements) { // Store the initializer into the field EmitInitializationToLValue(E->getInit(0), FieldLoc); } else { // Default-initialize to null. EmitNullInitializationToLValue(FieldLoc); } return; } // We'll need to enter cleanup scopes in case any of the member // initializers throw an exception. SmallVector<EHScopeStack::stable_iterator, 16> cleanups; llvm::Instruction *cleanupDominator = 0; // Here we iterate over the fields; this makes it simpler to both // default-initialize fields and skip over unnamed fields. unsigned curInitIndex = 0; for (RecordDecl::field_iterator field = record->field_begin(), fieldEnd = record->field_end(); field != fieldEnd; ++field) { // We're done once we hit the flexible array member. if (field->getType()->isIncompleteArrayType()) break; // Always skip anonymous bitfields. if (field->isUnnamedBitfield()) continue; // We're done if we reach the end of the explicit initializers, we // have a zeroed object, and the rest of the fields are // zero-initializable. if (curInitIndex == NumInitElements && Dest.isZeroed() && CGF.getTypes().isZeroInitializable(E->getType())) break; // FIXME: volatility LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0); // We never generate write-barries for initialized fields. LV.setNonGC(true); if (curInitIndex < NumInitElements) { // Store the initializer into the field. EmitInitializationToLValue(E->getInit(curInitIndex++), LV); } else { // We're out of initalizers; default-initialize to null EmitNullInitializationToLValue(LV); } // Push a destructor if necessary. // FIXME: if we have an array of structures, all explicitly // initialized, we can end up pushing a linear number of cleanups. bool pushedCleanup = false; if (QualType::DestructionKind dtorKind = field->getType().isDestructedType()) { assert(LV.isSimple()); if (CGF.needsEHCleanup(dtorKind)) { if (!cleanupDominator) cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), CGF.getDestroyer(dtorKind), false); cleanups.push_back(CGF.EHStack.stable_begin()); pushedCleanup = true; } } // If the GEP didn't get used because of a dead zero init or something // else, clean it up for -O0 builds and general tidiness. if (!pushedCleanup && LV.isSimple()) if (llvm::GetElementPtrInst *GEP = dyn_cast<llvm::GetElementPtrInst>(LV.getAddress())) if (GEP->use_empty()) GEP->eraseFromParent(); } // Deactivate all the partial cleanups in reverse order, which // generally means popping them. for (unsigned i = cleanups.size(); i != 0; --i) CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); // Destroy the placeholder if we made one. if (cleanupDominator) cleanupDominator->eraseFromParent(); }
void AggExprEmitter::VisitCastExpr(CastExpr *E) { switch (E->getCastKind()) { case CK_Dynamic: { assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); // FIXME: Do we also need to handle property references here? if (LV.isSimple()) CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); else CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); if (!Dest.isIgnored()) CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); break; } case CK_ToUnion: { if (Dest.isIgnored()) break; // GCC union extension QualType Ty = E->getSubExpr()->getType(); QualType PtrTy = CGF.getContext().getPointerType(Ty); llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), CGF.ConvertType(PtrTy)); EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty)); break; } case CK_DerivedToBase: case CK_BaseToDerived: case CK_UncheckedDerivedToBase: { llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " "should have been unpacked before we got here"); } case CK_LValueToRValue: // hope for downstream optimization case CK_NoOp: case CK_UserDefinedConversion: case CK_ConstructorConversion: assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), E->getType()) && "Implicit cast types must be compatible"); Visit(E->getSubExpr()); break; case CK_LValueBitCast: llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); break; case CK_Dependent: case CK_BitCast: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_NullToPointer: case CK_NullToMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: case CK_MemberPointerToBoolean: case CK_IntegralToPointer: case CK_PointerToIntegral: case CK_PointerToBoolean: case CK_ToVoid: case CK_VectorSplat: case CK_IntegralCast: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: case CK_ObjCObjectLValueCast: case CK_FloatingRealToComplex: case CK_FloatingComplexToReal: case CK_FloatingComplexToBoolean: case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralRealToComplex: case CK_IntegralComplexToReal: case CK_IntegralComplexToBoolean: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: case CK_ARCProduceObject: case CK_ARCConsumeObject: case CK_ARCReclaimReturnedObject: case CK_ARCExtendBlockObject: llvm_unreachable("cast kind invalid for aggregate types"); } }