static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, const Expr *X, const Expr *V, SourceLocation Loc) { // v = x; assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); LValue XLValue = CGF.EmitLValue(X); LValue VLValue = CGF.EmitLValue(V); RValue Res = XLValue.isGlobalReg() ? CGF.EmitLoadOfLValue(XLValue, Loc) : CGF.EmitAtomicLoad(XLValue, Loc); // OpenMP, 2.12.6, atomic Construct // Any atomic construct with a seq_cst clause forces the atomically // performed operation to include an implicit flush operation without a // list. if (IsSeqCst) CGF.CGM.getOpenMPRuntime().EmitOMPFlush(CGF, llvm::None, Loc); switch (CGF.getEvaluationKind(V->getType())) { case TEK_Scalar: CGF.EmitStoreOfScalar( convertToScalarValue(CGF, Res, X->getType(), V->getType()), VLValue); break; case TEK_Complex: CGF.EmitStoreOfComplex( convertToComplexValue(CGF, Res, X->getType(), V->getType()), VLValue, /*isInit=*/false); break; case TEK_Aggregate: llvm_unreachable("Must be a scalar or complex."); } }
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // For an assignment to work, the value on the right has // to be compatible with the value on the left. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), E->getRHS()->getType()) && "Invalid assignment"); // FIXME: __block variables need the RHS evaluated first! LValue LHS = CGF.EmitLValue(E->getLHS()); // We have to special case property setters, otherwise we must have // a simple lvalue (no aggregates inside vectors, bitfields). if (LHS.isPropertyRef()) { AggValueSlot Slot = EnsureSlot(E->getRHS()->getType()); CGF.EmitAggExpr(E->getRHS(), Slot); CGF.EmitStoreThroughPropertyRefLValue(Slot.asRValue(), LHS); } else { bool GCollection = false; if (CGF.getContext().getLangOptions().getGCMode()) GCollection = TypeRequiresGCollection(E->getLHS()->getType()); // Codegen the RHS so that it stores directly into the LHS. AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true, GCollection); CGF.EmitAggExpr(E->getRHS(), LHSSlot, false); EmitFinalDestCopy(E, LHS, true); } }
llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S, const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, std::string &ConstraintStr) { llvm::Value *Arg; if (Info.allowsRegister() || !Info.allowsMemory()) { const llvm::Type *Ty = ConvertType(InputExpr->getType()); if (Ty->isSingleValueType()) { Arg = EmitScalarExpr(InputExpr); } else { InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = EmitLValue(InputExpr); uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty); if (Size <= 64 && llvm::isPowerOf2_64(Size)) { Ty = llvm::IntegerType::get(VMContext, Size); Ty = llvm::PointerType::getUnqual(Ty); Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty)); } else { Arg = Dest.getAddress(); ConstraintStr += '*'; } } } else { InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = EmitLValue(InputExpr); Arg = Dest.getAddress(); ConstraintStr += '*'; } return Arg; }
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(), Src.isVolatileQualified()), Ignore); }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *DeclPtr) { assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); ASTContext &Context = CGF.getContext(); CharUnits alignment = Context.getDeclAlign(&D); QualType type = D.getType(); LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment); const Expr *Init = D.getInit(); if (!CGF.hasAggregateLLVMType(type)) { CodeGenModule &CGM = CGF.CGM; if (lv.isObjCStrong()) CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr, D.isThreadSpecified()); else if (lv.isObjCWeak()) CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr); else CGF.EmitScalarInit(Init, &D, lv, false); } else if (type->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile()); } else { CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); } }
void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, llvm::SmallVector<llvm::Value*, 16> &Args) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); RecordDecl *RD = RT->getDecl(); assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); llvm::Value *Addr = RV.getAggregateAddr(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { FieldDecl *FD = *i; QualType FT = FD->getType(); // FIXME: What are the right qualifiers here? LValue LV = EmitLValueForField(Addr, FD, false, 0); if (CodeGenFunction::hasAggregateLLVMType(FT)) { ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); } else { RValue RV = EmitLoadOfLValue(LV, FT); assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); Args.push_back(RV.getScalarVal()); } } }
// If an l-value is found, return it. Otherwise, return an r-value. bool SemanticAnalysis::svalue(Expression *expr, SValue *outp) { // Demand only RValues. SaveAndSet<ValueContext> context(&value_context_, kLValue); LValue lval; // Between setting the "outparams" and returning, nothing should call us. assert(!outp_ && !hir_); outp_ = &lval; expr->accept(this); outp_ = nullptr; // We should not have received both an r-value and an l-value. assert(!hir_ || lval.kind() == LValue::Error); if (!hir_ && lval.kind() == LValue::Error) return false; if (hir_) *outp = SValue(ReturnAndVoid(hir_)); else *outp = SValue(lval); return true; }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, ConstantAddress DeclPtr) { assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); QualType type = D.getType(); LValue lv = CGF.MakeAddrLValue(DeclPtr, type); const Expr *Init = D.getInit(); switch (CGF.getEvaluationKind(type)) { case TEK_Scalar: { CodeGenModule &CGM = CGF.CGM; if (lv.isObjCStrong()) CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr, D.getTLSKind()); else if (lv.isObjCWeak()) CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr); else CGF.EmitScalarInit(Init, &D, lv, false); return; } case TEK_Complex: CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); return; case TEK_Aggregate: CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); return; } llvm_unreachable("bad evaluation kind"); }
llvm::Function::arg_iterator CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, llvm::Function::arg_iterator AI) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); RecordDecl *RD = RT->getDecl(); assert(LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."); llvm::Value *Addr = LV.getAddress(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { FieldDecl *FD = *i; QualType FT = FD->getType(); // FIXME: What are the right qualifiers here? LValue LV = EmitLValueForField(Addr, FD, false, 0); if (CodeGenFunction::hasAggregateLLVMType(FT)) { AI = ExpandTypeFromArgs(FT, LV, AI); } else { EmitStoreThroughLValue(RValue::get(AI), LV, FT); ++AI; } } return AI; }
/// Copy an r-value into memory as part of storing to an atomic type. /// This needs to create a bit-pattern suitable for atomic operations. void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const { // If we have an r-value, the rvalue should be of the atomic type, // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { CGF.EmitAggregateCopy(dest.getAddress(), rvalue.getAggregateAddr(), getAtomicType(), (rvalue.isVolatileQualified() || dest.isVolatileQualified()), dest.getAlignment()); return; } // Okay, otherwise we're copying stuff. // Zero out the buffer if necessary. emitMemSetZeroIfNecessary(dest); // Drill past the padding if present. dest = projectValue(dest); // Okay, store the rvalue in. if (rvalue.isScalar()) { CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true); } else { CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true); } }
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { QualType type = LV.getType(); // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (Dest.isZeroed() && isSimpleZero(E, CGF)) { // Storing "i32 0" to a zero'd memory location is a noop. } else if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV); } else if (type->isReferenceType()) { RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV); } else if (type->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); } else if (CGF.hasAggregateLLVMType(type)) { CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, Dest.isZeroed())); } else if (LV.isSimple()) { CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); } else { CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); } }
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!"); llvm::Value *Temp = CreateMemTemp(E->getType()); LValue LV = MakeAddrLValue(Temp, E->getType()); EmitAggExpr(E, Temp, LV.isVolatileQualified()); return LV; }
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *OID, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers, llvm::Value *Offset) { // Compute (type*) ( (char *) BaseValue + Offset) QualType IvarTy = Ivar->getType(); llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy); V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr"); if (!Ivar->isBitField()) { V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy); LV.getQuals().addCVRQualifiers(CVRQualifiers); return LV; } // We need to compute an access strategy for this bit-field. We are given the // offset to the first byte in the bit-field, the sub-byte offset is taken // from the original layout. We reuse the normal bit-field access strategy by // treating this as an access to a struct where the bit-field is in byte 0, // and adjust the containing type size as appropriate. // // FIXME: Note that currently we make a very conservative estimate of the // alignment of the bit-field, because (a) it is not clear what guarantees the // runtime makes us, and (b) we don't have a way to specify that the struct is // at an alignment plus offset. // // Note, there is a subtle invariant here: we can only call this routine on // non-synthesized ivars but we may be called for synthesized ivars. However, // a synthesized ivar can never be a bit-field, so this is safe. uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar); uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign(); uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext()); CharUnits StorageSize = CGF.CGM.getContext().toCharUnitsFromBits( llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits)); CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits); // Allocate a new CGBitFieldInfo object to describe this access. // // FIXME: This is incredibly wasteful, these should be uniqued or part of some // layout object. However, this is blocked on other cleanups to the // Objective-C code, so for now we just live with allocating a bunch of these // objects. CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize, CGF.CGM.getContext().toBits(StorageSize), CharUnits::fromQuantity(0))); V = CGF.Builder.CreateBitCast(V, llvm::Type::getIntNPtrTy(CGF.getLLVMContext(), Info->StorageSize)); return LValue::MakeBitfield(V, *Info, IvarTy.withCVRQualifiers(CVRQualifiers), Alignment); }
llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { QualType Ty = E->getType(); const llvm::Type *LTy = ConvertType(Ty)->getPointerTo(); if (E->isTypeOperand()) { Ty = E->getTypeOperand(); CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); Ty = CanTy.getUnqualifiedType().getNonReferenceType(); if (const RecordType *RT = Ty->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (RD->isPolymorphic()) return Builder.CreateBitCast(CGM.GenerateRttiRef(RD), LTy); return Builder.CreateBitCast(CGM.GenerateRtti(RD), LTy); } return Builder.CreateBitCast(CGM.GenerateRttiNonClass(Ty), LTy); } Expr *subE = E->getExprOperand(); Ty = subE->getType(); CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); Ty = CanTy.getUnqualifiedType().getNonReferenceType(); if (const RecordType *RT = Ty->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (RD->isPolymorphic()) { // FIXME: if subE is an lvalue do LValue Obj = EmitLValue(subE); llvm::Value *This = Obj.getAddress(); LTy = LTy->getPointerTo()->getPointerTo(); llvm::Value *V = Builder.CreateBitCast(This, LTy); // We need to do a zero check for *p, unless it has NonNullAttr. // FIXME: PointerType->hasAttr<NonNullAttr>() bool CanBeZero = false; if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens())) if (UO->getOpcode() == UnaryOperator::Deref) CanBeZero = true; if (CanBeZero) { llvm::BasicBlock *NonZeroBlock = createBasicBlock(); llvm::BasicBlock *ZeroBlock = createBasicBlock(); llvm::Value *Zero = llvm::Constant::getNullValue(LTy); Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero), NonZeroBlock, ZeroBlock); EmitBlock(ZeroBlock); /// Call __cxa_bad_typeid const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); const llvm::FunctionType *FTy; FTy = llvm::FunctionType::get(ResultType, false); llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); Builder.CreateCall(F)->setDoesNotReturn(); Builder.CreateUnreachable(); EmitBlock(NonZeroBlock); } V = Builder.CreateLoad(V, "vtable"); V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL); V = Builder.CreateLoad(V); return V; } return Builder.CreateBitCast(CGM.GenerateRtti(RD), LTy); } return Builder.CreateBitCast(CGM.GenerateRttiNonClass(Ty), LTy); }
void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (!DestPtr && E->getCastKind() != CK_Dynamic) { Visit(E->getSubExpr()); return; } switch (E->getCastKind()) { default: assert(0 && "Unhandled cast kind!"); case CK_Dynamic: { assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); // FIXME: Do we also need to handle property references here? if (LV.isSimple()) CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); else CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); if (DestPtr) CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); break; } case CK_ToUnion: { // GCC union extension QualType Ty = E->getSubExpr()->getType(); QualType PtrTy = CGF.getContext().getPointerType(Ty); llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr, CGF.ConvertType(PtrTy)); EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty), Ty); break; } case CK_DerivedToBase: case CK_BaseToDerived: case CK_UncheckedDerivedToBase: { assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: " "should have been unpacked before we got here"); break; } // FIXME: Remove the CK_Unknown check here. case CK_Unknown: case CK_NoOp: case CK_UserDefinedConversion: case CK_ConstructorConversion: assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), E->getType()) && "Implicit cast types must be compatible"); Visit(E->getSubExpr()); break; case CK_LValueBitCast: llvm_unreachable("there are no lvalue bit-casts on aggregates"); break; } }
void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const { llvm::Value *addr = dest.getAddress(); if (!requiresMemSetZero(addr->getType()->getPointerElementType())) return; CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), AtomicSizeInBits / 8, dest.getAlignment().getQuantity()); }
/// Emit a materializeForSet callback that stores the value from the /// result buffer back into the l-value. SILFunction * MaterializeForSetEmitter::createSetterCallback(SILFunction &F, const TypeLowering *indicesTL, CanType indicesFormalType) { return createCallback(F, [&](SILGenFunction &SGF, SILLocation loc, SILValue value, SILValue callbackBuffer, SILValue self) { // If this is a subscript, we need to handle the indices in the // callback storage. RValue indices; if (indicesTL) { assert(isa<SubscriptDecl>(WitnessStorage)); SILType indicesTy = indicesTL->getLoweredType(); // Enter a cleanup to deallocate the callback storage. SGF.Cleanups.pushCleanup<DeallocateValueBuffer>(indicesTy, callbackBuffer); // Project the value out, loading if necessary, and take // ownership of it. SILValue indicesV = SGF.B.createProjectValueBuffer(loc, indicesTy, callbackBuffer); if (indicesTL->isLoadable() || !SGF.silConv.useLoweredAddresses()) indicesV = indicesTL->emitLoad(SGF.B, loc, indicesV, LoadOwnershipQualifier::Take); ManagedValue mIndices = SGF.emitManagedRValueWithCleanup(indicesV, *indicesTL); // Explode as an r-value. indices = RValue(SGF, loc, indicesFormalType, mIndices); } // The callback gets the address of 'self' at +0. ManagedValue mSelf = ManagedValue::forLValue(self); // That's enough to build the l-value. LValue lvalue = buildLValue(SGF, loc, mSelf, std::move(indices), AccessKind::Write); // The callback gets the value at +1. auto &valueTL = SGF.getTypeLowering(lvalue.getTypeOfRValue()); value = SGF.B.createPointerToAddress( loc, value, valueTL.getLoweredType().getAddressType(), /*isStrict*/ true, /*isInvariant*/ false); if (valueTL.isLoadable() || !SGF.silConv.useLoweredAddresses()) value = valueTL.emitLoad(SGF.B, loc, value, LoadOwnershipQualifier::Take); ManagedValue mValue = SGF.emitManagedRValueWithCleanup(value, valueTL); RValue rvalue(SGF, loc, lvalue.getSubstFormalType(), mValue); // Finally, call the setter. SGF.emitAssignToLValue(loc, std::move(rvalue), std::move(lvalue)); }); }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, ConstantAddress DeclPtr) { assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); QualType type = D.getType(); // Deduce UPC strict or relaxed from context, if needed if (CGF.getContext().getLangOpts().UPC) { Qualifiers Quals = type.getQualifiers(); if (Quals.hasShared() && !Quals.hasStrict() && !Quals.hasRelaxed()) { if (D.isUPCInitStrict()) Quals.addStrict(); else Quals.addRelaxed(); type = CGF.getContext().getQualifiedType(type.getUnqualifiedType(), Quals); } } LValue lv; if(type.getQualifiers().hasShared()) lv = CGF.EmitSharedVarDeclLValue(DeclPtr, type); else lv = CGF.MakeAddrLValue(DeclPtr, type); const Expr *Init = D.getInit(); switch (CGF.getEvaluationKind(type)) { case TEK_Scalar: { CodeGenModule &CGM = CGF.CGM; if (lv.isObjCStrong()) CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr, D.getTLSKind()); else if (lv.isObjCWeak()) CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr); else CGF.EmitScalarInit(Init, &D, lv, false); return; } case TEK_Complex: CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); return; case TEK_Aggregate: CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); return; } llvm_unreachable("bad evaluation kind"); }
void SemanticAnalysis::visitAssignment(Assignment *node) { LValue lval; if (!lvalue(node->lvalue(), &lval)) return; HIR *hir = rvalue(node->expression()); if ((hir = coerce(hir, lval.type(), Coerce_Assign)) == nullptr) return; hir_ = new (pool_) HStore(node, lval.type(), node->token(), lval, hir); }
/// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { if (lvalue.getType()->isAtomicType() || (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue))) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); Address Ptr = lvalue.getAddress(); Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType()); Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType()); Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified()); Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified()); }
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // For an assignment to work, the value on the right has // to be compatible with the value on the left. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), E->getRHS()->getType()) && "Invalid assignment"); if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS())) if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) if (VD->hasAttr<BlocksAttr>() && E->getRHS()->HasSideEffects(CGF.getContext())) { // When __block variable on LHS, the RHS must be evaluated first // as it may change the 'forwarding' field via call to Block_copy. LValue RHS = CGF.EmitLValue(E->getRHS()); LValue LHS = CGF.EmitLValue(E->getLHS()); bool GCollection = false; if (CGF.getContext().getLangOptions().getGCMode()) GCollection = TypeRequiresGCollection(E->getLHS()->getType()); Dest = AggValueSlot::forLValue(LHS, true, GCollection); EmitFinalDestCopy(E, RHS, true); return; } LValue LHS = CGF.EmitLValue(E->getLHS()); // We have to special case property setters, otherwise we must have // a simple lvalue (no aggregates inside vectors, bitfields). if (LHS.isPropertyRef()) { const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr(); QualType ArgType = RE->getSetterArgType(); RValue Src; if (ArgType->isReferenceType()) Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0); else { AggValueSlot Slot = EnsureSlot(E->getRHS()->getType()); CGF.EmitAggExpr(E->getRHS(), Slot); Src = Slot.asRValue(); } CGF.EmitStoreThroughPropertyRefLValue(Src, LHS); } else { bool GCollection = false; if (CGF.getContext().getLangOptions().getGCMode()) GCollection = TypeRequiresGCollection(E->getLHS()->getType()); // Codegen the RHS so that it stores directly into the LHS. AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true, GCollection); CGF.EmitAggExpr(E->getRHS(), LHSSlot, false); EmitFinalDestCopy(E, LHS, true); } }
ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { ComplexPairTy Val; LValue LV = EmitBinAssignLValue(E, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) return Val; // If the lvalue is non-volatile, return the computed value of the assignment. if (!LV.isVolatileQualified()) return Val; return EmitLoadOfLValue(LV); }
/// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); llvm::Value *Ptr = lvalue.getAddress(); llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); // TODO: alignment Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified()); Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified()); }
// Compound assignments. ComplexPairTy ComplexExprEmitter:: EmitCompoundAssign(const CompoundAssignOperator *E, ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){ RValue Val; LValue LV = EmitCompoundAssignLValue(E, Func, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) return Val.getComplexVal(); // If the lvalue is non-volatile, return the computed value of the assignment. if (!LV.isVolatileQualified()) return Val.getComplexVal(); return EmitLoadOfLValue(LV, E->getExprLoc()); }
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) { // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV, T); } else if (T->isReferenceType()) { RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); } else if (CGF.hasAggregateLLVMType(T)) { CGF.EmitAnyExpr(E, LV.getAddress(), false); } else { CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T); } }
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // For an assignment to work, the value on the right has // to be compatible with the value on the left. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), E->getRHS()->getType()) && "Invalid assignment"); LValue LHS = CGF.EmitLValue(E->getLHS()); // We have to special case property setters, otherwise we must have // a simple lvalue (no aggregates inside vectors, bitfields). if (LHS.isPropertyRef()) { llvm::Value *AggLoc = DestPtr; if (!AggLoc) AggLoc = CGF.CreateMemTemp(E->getRHS()->getType()); CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest); CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getAggregate(AggLoc, VolatileDest)); } else if (LHS.isKVCRef()) { llvm::Value *AggLoc = DestPtr; if (!AggLoc) AggLoc = CGF.CreateMemTemp(E->getRHS()->getType()); CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest); CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getAggregate(AggLoc, VolatileDest)); } else { bool RequiresGCollection = false; if (CGF.getContext().getLangOptions().getGCMode()) RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType()); // Codegen the RHS so that it stores directly into the LHS. CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(), false, false, RequiresGCollection); EmitFinalDestCopy(E, LHS, true); } }
ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { ComplexPairTy Val; LValue LV = EmitBinAssignLValue(E, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getContext().getLangOptions().CPlusPlus) return Val; // Objective-C property assignment never reloads the value following a store. if (LV.isPropertyRef()) return Val; // If the lvalue is non-volatile, return the computed value of the assignment. if (!LV.isVolatileQualified()) return Val; return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified()); }
void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { QualType type = lv.getType(); // If the destination slot is already zeroed out before the aggregate is // copied into it, we don't have to emit any zeros here. if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) return; if (!CGF.hasAggregateLLVMType(type)) { // For non-aggregates, we can store zero llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type)); CGF.EmitStoreThroughLValue(RValue::get(null), lv); } else { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); } }
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) { // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? if (Dest.isZeroed() && isSimpleZero(E, CGF)) { // Storing "i32 0" to a zero'd memory location is a noop. } else if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV, T); } else if (T->isReferenceType()) { RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); } else if (CGF.hasAggregateLLVMType(T)) { CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true, false, Dest.isZeroed())); } else { CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T); } }
void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) { if (!CGF.hasAggregateLLVMType(T)) { // For non-aggregates, we can store zero llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T)); CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T); } else { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. CGF.EmitNullInitialization(LV.getAddress(), T); } }