bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { if (rv.isScalar()) return DominatingLLVMValue::needsSaving(rv.getScalarVal()); if (rv.isAggregate()) return DominatingLLVMValue::needsSaving(rv.getAggregateAddr()); return true; }
DominatingValue<RValue>::saved_type DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { if (rv.isScalar()) { llvm::Value *V = rv.getScalarVal(); // These automatically dominate and don't need to be saved. if (!DominatingLLVMValue::needsSaving(V)) return saved_type(V, ScalarLiteral); // Everything else needs an alloca. llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); CGF.Builder.CreateStore(V, addr); return saved_type(addr, ScalarAddress); } if (rv.isComplex()) { CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); llvm::Type *ComplexTy = llvm::StructType::get(V.first->getType(), V.second->getType(), (void*) nullptr); llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0)); CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1)); return saved_type(addr, ComplexAddress); } assert(rv.isAggregate()); llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile? if (!DominatingLLVMValue::needsSaving(V)) return saved_type(V, AggregateLiteral); llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); CGF.Builder.CreateStore(V, addr); return saved_type(addr, AggregateAddress); }
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) { assert(Src.isAggregate() && "value must be aggregate value!"); // If the result is ignored, don't copy from the value. if (DestPtr == 0) { if (!Src.isVolatileQualified() || (IgnoreResult && Ignore)) return; // If the source is volatile, we must read from it; to do that, we need // some place to put it. DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp"); } if (RequiresGCollection) { CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr, Src.getAggregateAddr(), E->getType()); return; } // If the result of the assignment is used, copy the LHS there also. // FIXME: Pass VolatileDest as well. I think we also need to merge volatile // from the source as well, as we can't eliminate it if either operand // is volatile, unless copy has volatile for both source and destination.. CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(), VolatileDest|Src.isVolatileQualified()); }
/// Copy an r-value into memory as part of storing to an atomic type. /// This needs to create a bit-pattern suitable for atomic operations. void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const { // If we have an r-value, the rvalue should be of the atomic type, // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { CGF.EmitAggregateCopy(dest.getAddress(), rvalue.getAggregateAddr(), getAtomicType(), (rvalue.isVolatileQualified() || dest.isVolatileQualified()), dest.getAlignment()); return; } // Okay, otherwise we're copying stuff. // Zero out the buffer if necessary. emitMemSetZeroIfNecessary(dest); // Drill past the padding if present. dest = projectValue(dest); // Okay, store the rvalue in. if (rvalue.isScalar()) { CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true); } else { CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true); } }
void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, llvm::SmallVector<llvm::Value*, 16> &Args) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); RecordDecl *RD = RT->getDecl(); assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); llvm::Value *Addr = RV.getAggregateAddr(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { FieldDecl *FD = *i; QualType FT = FD->getType(); // FIXME: What are the right qualifiers here? LValue LV = EmitLValueForField(Addr, FD, false, 0); if (CodeGenFunction::hasAggregateLLVMType(FT)) { ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); } else { RValue RV = EmitLoadOfLValue(LV, FT); assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); Args.push_back(RV.getScalarVal()); } } }
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { if (RV.isScalar()) { Builder.CreateStore(RV.getScalarVal(), ReturnValue); } else if (RV.isAggregate()) { EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty); } else { StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false); } EmitBranchThroughCleanup(ReturnBlock); }
/// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || rvalue.getAggregateAddr()->getType()->getPointerElementType() == dest.getAddress()->getType()->getPointerElementType()); AtomicInfo atomics(*this, dest); // If this is an initialization, just put the value there normally. if (isInit) { atomics.emitCopyIntoMemory(rvalue, dest); return; } // Check whether we should use a library call. if (atomics.shouldUseLibcall()) { // Produce a source address. llvm::Value *srcAddr = atomics.materializeRValue(rvalue); // void __atomic_store(size_t size, void *mem, void *val, int order) CallArgList args; args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())), getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy); args.add(RValue::get(llvm::ConstantInt::get( IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); return; } // Okay, we're doing this natively. llvm::Value *intValue = atomics.convertRValueToInt(rvalue); // Do the atomic store. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress()); llvm::StoreInst *store = Builder.CreateStore(intValue, addr); // Initializations don't need to be atomic. if (!isInit) store->setAtomic(llvm::SequentiallyConsistent); // Other decoration. store->setAlignment(dest.getAlignment().getQuantity()); if (dest.isVolatileQualified()) store->setVolatile(true); if (dest.getTBAAInfo()) CGM.DecorateInstruction(store, dest.getTBAAInfo()); }
DominatingValue<RValue>::saved_type DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { if (rv.isScalar()) { llvm::Value *V = rv.getScalarVal(); // These automatically dominate and don't need to be saved. if (!DominatingLLVMValue::needsSaving(V)) return saved_type(V, ScalarLiteral); // Everything else needs an alloca. Address addr = CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue"); CGF.Builder.CreateStore(V, addr); return saved_type(addr.getPointer(), ScalarAddress); } if (rv.isComplex()) { CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); llvm::Type *ComplexTy = llvm::StructType::get(V.first->getType(), V.second->getType(), (void*) nullptr); Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex"); CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0, CharUnits())); CharUnits offset = CharUnits::fromQuantity( CGF.CGM.getDataLayout().getTypeAllocSize(V.first->getType())); CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1, offset)); return saved_type(addr.getPointer(), ComplexAddress); } assert(rv.isAggregate()); Address V = rv.getAggregateAddress(); // TODO: volatile? if (!DominatingLLVMValue::needsSaving(V.getPointer())) return saved_type(V.getPointer(), AggregateLiteral, V.getAlignment().getQuantity()); Address addr = CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue"); CGF.Builder.CreateStore(V.getPointer(), addr); return saved_type(addr.getPointer(), AggregateAddress, V.getAlignment().getQuantity()); }
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore, unsigned Alignment) { assert(Src.isAggregate() && "value must be aggregate value!"); // If Dest is ignored, then we're evaluating an aggregate expression // in a context (like an expression statement) that doesn't care // about the result. C says that an lvalue-to-rvalue conversion is // performed in these cases; C++ says that it is not. In either // case, we don't actually need to do anything unless the value is // volatile. if (Dest.isIgnored()) { if (!Src.isVolatileQualified() || CGF.CGM.getLangOptions().CPlusPlus || (IgnoreResult && Ignore)) return; // If the source is volatile, we must read from it; to do that, we need // some place to put it. Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp"); } if (Dest.requiresGCollection()) { CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(), Src.getAggregateAddr(), SizeVal); return; } // If the result of the assignment is used, copy the LHS there also. // FIXME: Pass VolatileDest as well. I think we also need to merge volatile // from the source as well, as we can't eliminate it if either operand // is volatile, unless copy has volatile for both source and destination.. CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(), Dest.isVolatile()|Src.isVolatileQualified(), Alignment); }
/// Emit a compare-and-exchange op for atomic type. /// std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak, AggValueSlot Slot) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!Expected.isAggregate() || Expected.getAggregateAddr()->getType()->getPointerElementType() == Obj.getAddress()->getType()->getPointerElementType()); assert(!Desired.isAggregate() || Desired.getAggregateAddr()->getType()->getPointerElementType() == Obj.getAddress()->getType()->getPointerElementType()); AtomicInfo Atomics(*this, Obj); if (Failure >= Success) // Don't assert on undefined behavior. Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); auto Alignment = Atomics.getValueAlignment(); // Check whether we should use a library call. if (Atomics.shouldUseLibcall()) { auto *ExpectedAddr = Atomics.materializeRValue(Expected); // Produce a source address. auto *DesiredAddr = Atomics.materializeRValue(Desired); // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, // void *desired, int success, int failure); CallArgList Args; Args.add(RValue::get(Atomics.getAtomicSizeValue()), getContext().getSizeType()); Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())), getContext().VoidPtrTy); Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)), getContext().VoidPtrTy); Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)), getContext().VoidPtrTy); Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)), getContext().IntTy); Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)), getContext().IntTy); auto SuccessFailureRVal = emitAtomicLibcall( *this, "__atomic_compare_exchange", getContext().BoolTy, Args); auto *PreviousVal = Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity()); return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal); } // If we've got a scalar value of the right size, try to avoid going // through memory. auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected); auto *DesiredIntVal = Atomics.convertRValueToInt(Desired); // Do the atomic store. auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress()); auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal, Success, Failure); // Other decoration. Inst->setVolatile(Obj.isVolatileQualified()); Inst->setWeak(IsWeak); // Okay, turn that back into the original value type. auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0); auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1); return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc), RValue::get(SuccessFailureVal)); }
/// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || rvalue.getAggregateAddr()->getType()->getPointerElementType() == dest.getAddress()->getType()->getPointerElementType()); AtomicInfo atomics(*this, dest); // If this is an initialization, just put the value there normally. if (isInit) { atomics.emitCopyIntoMemory(rvalue, dest); return; } // Check whether we should use a library call. if (atomics.shouldUseLibcall()) { // Produce a source address. llvm::Value *srcAddr = atomics.materializeRValue(rvalue); // void __atomic_store(size_t size, void *mem, void *val, int order) CallArgList args; args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())), getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy); args.add(RValue::get(llvm::ConstantInt::get( IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); return; } // Okay, we're doing this natively. llvm::Value *intValue; // If we've got a scalar value of the right size, try to avoid going // through memory. if (rvalue.isScalar() && !atomics.hasPadding()) { llvm::Value *value = rvalue.getScalarVal(); if (isa<llvm::IntegerType>(value->getType())) { intValue = value; } else { llvm::IntegerType *inputIntTy = llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits()); if (isa<llvm::PointerType>(value->getType())) { intValue = Builder.CreatePtrToInt(value, inputIntTy); } else { intValue = Builder.CreateBitCast(value, inputIntTy); } } // Otherwise, we need to go through memory. } else { // Put the r-value in memory. llvm::Value *addr = atomics.materializeRValue(rvalue); // Cast the temporary to the atomic int type and pull a value out. addr = atomics.emitCastToAtomicIntPointer(addr); intValue = Builder.CreateAlignedLoad(addr, atomics.getAtomicAlignment().getQuantity()); } // Do the atomic store. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress()); llvm::StoreInst *store = Builder.CreateStore(intValue, addr); // Initializations don't need to be atomic. if (!isInit) store->setAtomic(llvm::SequentiallyConsistent); // Other decoration. store->setAlignment(dest.getAlignment().getQuantity()); if (dest.isVolatileQualified()) store->setVolatile(true); if (dest.getTBAAInfo()) CGM.DecorateInstruction(store, dest.getTBAAInfo()); }