/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// CodeGen(true); /// } else { /// CodeGen(false); /// } static void EmitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond, const std::function<void(bool)> &CodeGen) { CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange()); // If the condition constant folds and can be elided, try to avoid emitting // the condition and the dead arm of the if/else. bool CondConstant; if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) { CodeGen(CondConstant); return; } // Otherwise, the condition did not fold, or we couldn't elide it. Just // emit the conditional branch. auto ThenBlock = CGF.createBasicBlock(/*name*/ "omp_if.then"); auto ElseBlock = CGF.createBasicBlock(/*name*/ "omp_if.else"); auto ContBlock = CGF.createBasicBlock(/*name*/ "omp_if.end"); CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount*/ 0); // Emit the 'then' code. CGF.EmitBlock(ThenBlock); CodeGen(/*ThenBlock*/ true); CGF.EmitBranch(ContBlock); // Emit the 'else' code if present. { // There is no need to emit line number for unconditional branch. ApplyDebugLocation DL(CGF); CGF.EmitBlock(ElseBlock); } CodeGen(/*ThenBlock*/ false); { // There is no need to emit line number for unconditional branch. ApplyDebugLocation DL(CGF); CGF.EmitBranch(ContBlock); } // Emit the continuation block for code after the if. CGF.EmitBlock(ContBlock, /*IsFinished*/ true); }
// CopyObject - Utility to copy an object. Calls copy constructor as necessary. // N is casted to the right type. static void CopyObject(CodeGenFunction &CGF, QualType ObjectType, bool WasPointer, bool WasPointerReference, llvm::Value *E, llvm::Value *N) { // Store the throw exception in the exception object. if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) { llvm::Value *Value = E; if (!WasPointer) Value = CGF.Builder.CreateLoad(Value); const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0); if (WasPointerReference) { llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param"); CGF.Builder.CreateStore(Value, Tmp); Value = Tmp; ValuePtrTy = Value->getType()->getPointerTo(0); } N = CGF.Builder.CreateBitCast(N, ValuePtrTy); CGF.Builder.CreateStore(Value, N); } else { const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0); const CXXRecordDecl *RD; RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl()); llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty); if (RD->hasTrivialCopyConstructor()) { CGF.EmitAggregateCopy(This, E, ObjectType); } else if (CXXConstructorDecl *CopyCtor = RD->getCopyConstructor(CGF.getContext(), 0)) { llvm::Value *Src = E; // Stolen from EmitClassAggrMemberwiseCopy llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, Ctor_Complete); CallArgList CallArgs; CallArgs.push_back(std::make_pair(RValue::get(This), CopyCtor->getThisType(CGF.getContext()))); // Push the Src ptr. CallArgs.push_back(std::make_pair(RValue::get(Src), CopyCtor->getParamDecl(0)->getType())); const FunctionProtoType *FPT = CopyCtor->getType()->getAs<FunctionProtoType>(); CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT), Callee, ReturnValueSlot(), CallArgs, CopyCtor); } else llvm_unreachable("uncopyable object"); } }
/// Emit code to cause the variable at the given address to be considered as /// constant from this point onwards. static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *Addr) { // Do not emit the intrinsic if we're not optimizing. if (!CGF.CGM.getCodeGenOpts().OptimizationLevel) return; // Grab the llvm.invariant.start intrinsic. llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; // Overloaded address space type. llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy}; llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr); // Emit a call with the size in bytes of the object. CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType()); uint64_t Width = WidthChars.getQuantity(); llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width), llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)}; CGF.Builder.CreateCall(InvariantStart, Args); }
void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr, const CXXDeleteExpr *expr, QualType ElementType, llvm::Value *&NumElements, llvm::Value *&AllocPtr, CharUnits &CookieSize) { // Derive a char* in the same address space as the pointer. unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS); // If we don't need an array cookie, bail out early. if (!NeedsArrayCookie(expr, ElementType)) { AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); NumElements = 0; CookieSize = CharUnits::Zero(); return; } QualType SizeTy = getContext().getSizeType(); CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy); llvm::Type *SizeLTy = CGF.ConvertType(SizeTy); CookieSize = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType)); CharUnits NumElementsOffset = CookieSize - SizeSize; // Compute the allocated pointer. AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr, -CookieSize.getQuantity()); llvm::Value *NumElementsPtr = AllocPtr; if (!NumElementsOffset.isZero()) NumElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr, NumElementsOffset.getQuantity()); NumElementsPtr = CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS)); NumElements = CGF.Builder.CreateLoad(NumElementsPtr); }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, ConstantAddress DeclPtr) { assert( (D.hasGlobalStorage() || (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && "VarDecl must have global or local (in the case of OpenCL) storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); QualType type = D.getType(); LValue lv = CGF.MakeAddrLValue(DeclPtr, type); const Expr *Init = D.getInit(); switch (CGF.getEvaluationKind(type)) { case TEK_Scalar: { CodeGenModule &CGM = CGF.CGM; if (lv.isObjCStrong()) CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr, D.getTLSKind()); else if (lv.isObjCWeak()) CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr); else CGF.EmitScalarInit(Init, &D, lv, false); return; } case TEK_Complex: CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); return; case TEK_Aggregate: CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); return; } llvm_unreachable("bad evaluation kind"); }
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars) { if (UseOptimizedLibcall) { // Load value and pass it to the function directly. unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity(); int64_t SizeInBits = CGF.getContext().toBits(SizeInChars); ValTy = CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false); llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits)->getPointerTo(); Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false, Align, CGF.getContext().getPointerType(ValTy), Loc); // Coerce the value into an appropriately sized integer type. Args.add(RValue::get(Val), ValTy); } else { // Non-optimized functions always take a reference. Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)), CGF.getContext().VoidPtrTy); } }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *DeclPtr) { assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); ASTContext &Context = CGF.getContext(); CharUnits alignment = Context.getDeclAlign(&D); QualType type = D.getType(); LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment); const Expr *Init = D.getInit(); switch (CGF.getEvaluationKind(type)) { case TEK_Scalar: { CodeGenModule &CGM = CGF.CGM; if (lv.isObjCStrong()) CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr, D.isThreadSpecified()); else if (lv.isObjCWeak()) CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), DeclPtr); else CGF.EmitScalarInit(Init, &D, lv, false); return; } case TEK_Complex: CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); return; case TEK_Aggregate: CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); return; } llvm_unreachable("bad evaluation kind"); }
void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr, const CXXDeleteExpr *expr, QualType ElementType, llvm::Value *&NumElements, llvm::Value *&AllocPtr, CharUnits &CookieSize) { // Derive a char* in the same address space as the pointer. unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS); // If we don't need an array cookie, bail out early. if (!NeedsArrayCookie(expr, ElementType)) { AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); NumElements = 0; CookieSize = CharUnits::Zero(); return; } QualType SizeTy = getContext().getSizeType(); CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy); llvm::Type *SizeLTy = CGF.ConvertType(SizeTy); // The cookie size is always 2 * sizeof(size_t). CookieSize = 2 * SizeSize; // The allocated pointer is the input ptr, minus that amount. AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy); AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr, -CookieSize.getQuantity()); // The number of elements is at offset sizeof(size_t) relative to that. llvm::Value *NumElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr, SizeSize.getQuantity()); NumElementsPtr = CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS)); NumElements = CGF.Builder.CreateLoad(NumElementsPtr); }
void CGOpenMPRuntime::EmitOMPForInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPScheduleClauseKind ScheduleKind, unsigned IVSize, bool IVSigned, llvm::Value *IL, llvm::Value *LB, llvm::Value *UB, llvm::Value *ST, llvm::Value *Chunk) { OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunk != nullptr); // Call __kmpc_for_static_init( // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype, // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower, // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride, // kmp_int[32|64] incr, kmp_int[32|64] chunk); // TODO: Implement dynamic schedule. // If the Chunk was not specified in the clause - use default value 1. if (Chunk == nullptr) Chunk = CGF.Builder.getIntN(IVSize, /*C*/ 1); llvm::Value *Args[] = { EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), GetOpenMPThreadID(CGF, Loc), CGF.Builder.getInt32(Schedule), // Schedule type IL, // &isLastIter LB, // &LB UB, // &UB ST, // &Stride CGF.Builder.getIntN(IVSize, 1), // Incr Chunk // Chunk }; assert((IVSize == 32 || IVSize == 64) && "Index size is not compatible with the omp runtime"); auto F = IVSize == 32 ? (IVSigned ? OMPRTL__kmpc_for_static_init_4 : OMPRTL__kmpc_for_static_init_4u) : (IVSigned ? OMPRTL__kmpc_for_static_init_8 : OMPRTL__kmpc_for_static_init_8u); auto RTLFn = CreateRuntimeFunction(F); CGF.EmitRuntimeCall(RTLFn, Args); }
void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList ¶ms) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); // FIXME: I'm not entirely sure I like using a fake decl just for code // generation. Maybe we can come up with a better way? ImplicitParamDecl *ThisDecl = ImplicitParamDecl::Create(CGM.getContext(), nullptr, MD->getLocation(), &CGM.getContext().Idents.get("this"), MD->getThisType(CGM.getContext())); params.push_back(ThisDecl); CGF.CXXABIThisDecl = ThisDecl; // Compute the presumed alignment of 'this', which basically comes // down to whether we know it's a complete object or not. auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent()); if (MD->getParent()->getNumVBases() == 0 || // avoid vcall in common case MD->getParent()->hasAttr<FinalAttr>() || !isThisCompleteObject(CGF.CurGD)) { CGF.CXXABIThisAlignment = Layout.getAlignment(); } else { CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment(); } }
/// isSimpleZero - If emitting this value will obviously just cause a store of /// zero to memory, return true. This can return false if uncertain, so it just /// handles simple cases. static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { E = E->IgnoreParens(); // 0 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) return IL->getValue() == 0; // +0.0 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) return FL->getValue().isPosZero(); // int() if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && CGF.getTypes().isZeroInitializable(E->getType())) return true; // (int*)0 - Null pointer expressions. if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) return ICE->getCastKind() == CK_NullToPointer; // '\0' if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) return CL->getValue() == 0; // Otherwise, hard case: conservatively return false. return false; }
/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, /// where the source and destination may have different types. /// /// This safely handles the case when the src type is larger than the /// destination type; the upper bits of the src will be lost. static void CreateCoercedStore(llvm::Value *Src, llvm::Value *DstPtr, bool DstIsVolatile, CodeGenFunction &CGF) { const llvm::Type *SrcTy = Src->getType(); const llvm::Type *DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. if (SrcSize <= DstSize) { llvm::Value *Casted = CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); // FIXME: Use better alignment / avoid requiring aligned store. CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1); } else { // Otherwise do coercion through memory. This is stupid, but // simple. // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); CGF.Builder.CreateStore(Src, Tmp); llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); // FIXME: Use better alignment / avoid requiring aligned load. Load->setAlignment(1); CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); } }
/// We don't need a normal entry block for the given cleanup. /// Optimistic fixup branches can cause these blocks to come into /// existence anyway; if so, destroy it. /// /// The validity of this transformation is very much specific to the /// exact ways in which we form branches to cleanup entries. static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, EHCleanupScope &scope) { llvm::BasicBlock *entry = scope.getNormalBlock(); if (!entry) return; // Replace all the uses with unreachable. llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); for (llvm::BasicBlock::use_iterator i = entry->use_begin(), e = entry->use_end(); i != e; ) { llvm::Use &use = *i; ++i; use.set(unreachableBB); // The only uses should be fixup switches. llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { // Replace the switch with a branch. llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si); // The switch operand is a load from the cleanup-dest alloca. llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); // Destroy the switch. si->eraseFromParent(); // Destroy the load. assert(condition->getOperand(0) == CGF.NormalCleanupDest); assert(condition->use_empty()); condition->eraseFromParent(); } } assert(entry->use_empty()); delete entry; }
static RValue PerformReturnAdjustment(CodeGenFunction &CGF, QualType ResultType, RValue RV, const ThunkInfo &Thunk) { // Emit the return adjustment. bool NullCheckValue = !ResultType->isReferenceType(); llvm::BasicBlock *AdjustNull = nullptr; llvm::BasicBlock *AdjustNotNull = nullptr; llvm::BasicBlock *AdjustEnd = nullptr; llvm::Value *ReturnValue = RV.getScalarVal(); if (NullCheckValue) { AdjustNull = CGF.createBasicBlock("adjust.null"); AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); AdjustEnd = CGF.createBasicBlock("adjust.end"); llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); CGF.EmitBlock(AdjustNotNull); } auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl(); auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl); ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, Address(ReturnValue, ClassAlign), Thunk.Return); if (NullCheckValue) { CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustNull); CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustEnd); llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); PHI->addIncoming(ReturnValue, AdjustNotNull); PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), AdjustNull); ReturnValue = PHI; } return RValue::get(ReturnValue); }
static RValue PerformReturnAdjustment(CodeGenFunction &CGF, QualType ResultType, RValue RV, const ThunkInfo &Thunk) { // Emit the return adjustment. bool NullCheckValue = !ResultType->isReferenceType(); llvm::BasicBlock *AdjustNull = 0; llvm::BasicBlock *AdjustNotNull = 0; llvm::BasicBlock *AdjustEnd = 0; llvm::Value *ReturnValue = RV.getScalarVal(); if (NullCheckValue) { AdjustNull = CGF.createBasicBlock("adjust.null"); AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); AdjustEnd = CGF.createBasicBlock("adjust.end"); llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); CGF.EmitBlock(AdjustNotNull); } ReturnValue = PerformTypeAdjustment(CGF, ReturnValue, Thunk.Return.NonVirtual, Thunk.Return.VBaseOffsetOffset, /*IsReturnAdjustment*/true); if (NullCheckValue) { CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustNull); CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustEnd); llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); PHI->addIncoming(ReturnValue, AdjustNotNull); PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), AdjustNull); ReturnValue = PHI; } return RValue::get(ReturnValue); }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *DeclPtr) { assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); ASTContext &Context = CGF.getContext(); const Expr *Init = D.getInit(); QualType T = D.getType(); bool isVolatile = Context.getCanonicalType(T).isVolatileQualified(); if (!CGF.hasAggregateLLVMType(T)) { llvm::Value *V = CGF.EmitScalarExpr(Init); CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile); } else { CGF.EmitAggExpr(Init, DeclPtr, isVolatile); } }
static void EmitCleanup(CodeGenFunction &CGF, EHScopeStack::Cleanup *Fn, EHScopeStack::Cleanup::Flags flags, llvm::Value *ActiveFlag) { // Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't // have this behavior, and the Microsoft C++ runtime will call terminate for // us if the cleanup throws. bool PushedTerminate = false; if (flags.isForEHCleanup() && !CGF.getTarget().getCXXABI().isMicrosoft()) { CGF.EHStack.pushTerminate(); PushedTerminate = true; } // If there's an active flag, load it and skip the cleanup if it's // false. llvm::BasicBlock *ContBB = nullptr; if (ActiveFlag) { ContBB = CGF.createBasicBlock("cleanup.done"); llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); llvm::Value *IsActive = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); CGF.EmitBlock(CleanupBB); } // Ask the cleanup to emit itself. Fn->Emit(CGF, flags); assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); // Emit the continuation block if there was an active flag. if (ActiveFlag) CGF.EmitBlock(ContBB); // Leave the terminate scope. if (PushedTerminate) CGF.EHStack.popTerminate(); }
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, llvm::Value *NewPtr, llvm::Value *NumElements) { QualType AllocType = E->getAllocatedType(); if (!E->isArray()) { if (CXXConstructorDecl *Ctor = E->getConstructor()) { CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr, E->constructor_arg_begin(), E->constructor_arg_end()); return; } // We have a POD type. if (E->getNumConstructorArgs() == 0) return; assert(E->getNumConstructorArgs() == 1 && "Can only have one argument to initializer of POD type."); const Expr *Init = E->getConstructorArg(0); if (!CGF.hasAggregateLLVMType(AllocType)) CGF.Builder.CreateStore(CGF.EmitScalarExpr(Init), NewPtr); else if (AllocType->isAnyComplexType()) CGF.EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified()); else CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); return; } if (CXXConstructorDecl *Ctor = E->getConstructor()) CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr); }
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResultType) { CGF.EmitReturnOfRValue(RV, ResultType); }
LValueExprEmitter::LValueExprEmitter(CodeGenFunction &cgf) : CGF(cgf), Builder(cgf.getBuilder()), VMContext(cgf.getLLVMContext()) { }
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: llvm_unreachable("Already handled!"); case AtomicExpr::AO__c11_atomic_compare_exchange_strong: emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); return; case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: { if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) { emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); } else { // Create all the relevant BB's llvm::BasicBlock *StrongBB = CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn); llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn); llvm::BasicBlock *ContBB = CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB); SI->addCase(CGF.Builder.getInt1(false), StrongBB); CGF.Builder.SetInsertPoint(StrongBB); emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(WeakBB); emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(ContBB); } return; } case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: case AtomicExpr::AO__atomic_load: { llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); Load->setAtomic(Order); Load->setAlignment(Size); Load->setVolatile(E->isVolatile()); llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); StoreDest->setAlignment(Align); return; } case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: { assert(!Dest && "Store does not return a value"); llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); LoadVal1->setAlignment(Align); llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); Store->setAtomic(Order); Store->setAlignment(Size); Store->setVolatile(E->isVolatile()); return; } case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: Op = llvm::AtomicRMWInst::Xchg; break; case AtomicExpr::AO__atomic_add_fetch: PostOp = llvm::Instruction::Add; // Fall through. case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: Op = llvm::AtomicRMWInst::Add; break; case AtomicExpr::AO__atomic_sub_fetch: PostOp = llvm::Instruction::Sub; // Fall through. case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: Op = llvm::AtomicRMWInst::Sub; break; case AtomicExpr::AO__atomic_and_fetch: PostOp = llvm::Instruction::And; // Fall through. case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: Op = llvm::AtomicRMWInst::And; break; case AtomicExpr::AO__atomic_or_fetch: PostOp = llvm::Instruction::Or; // Fall through. case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: Op = llvm::AtomicRMWInst::Or; break; case AtomicExpr::AO__atomic_xor_fetch: PostOp = llvm::Instruction::Xor; // Fall through. case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: Op = llvm::AtomicRMWInst::Xor; break; case AtomicExpr::AO__atomic_nand_fetch: PostOp = llvm::Instruction::And; // Fall through. case AtomicExpr::AO__atomic_fetch_nand: Op = llvm::AtomicRMWInst::Nand; break; } llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); LoadVal1->setAlignment(Align); llvm::AtomicRMWInst *RMWI = CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); RMWI->setVolatile(E->isVolatile()); // For __atomic_*_fetch operations, perform the operation again to // determine the value which was written. llvm::Value *Result = RMWI; if (PostOp) Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) Result = CGF.Builder.CreateNot(Result); llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest); StoreDest->setAlignment(Align); }
CharacterExprEmitter::CharacterExprEmitter(CodeGenFunction &cgf) : CGF(cgf), Builder(cgf.getBuilder()), VMContext(cgf.getLLVMContext()), Dest(nullptr, nullptr) { }
/// The ARM code here follows the Itanium code closely enough that we /// just special-case it at particular places. void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *GV) { CGBuilderTy &Builder = CGF.Builder; // We only need to use thread-safe statics for local variables; // global initialization is always single-threaded. bool threadsafe = (getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl()); llvm::IntegerType *GuardTy; // If we have a global variable with internal linkage and thread-safe statics // are disabled, we can just let the guard variable be of type i8. bool useInt8GuardVariable = !threadsafe && GV->hasInternalLinkage(); if (useInt8GuardVariable) { GuardTy = CGF.Int8Ty; } else { // Guard variables are 64 bits in the generic ABI and 32 bits on ARM. GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty); } llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo(); // Create the guard variable. llvm::SmallString<256> GuardVName; llvm::raw_svector_ostream Out(GuardVName); getMangleContext().mangleItaniumGuardVariable(&D, Out); Out.flush(); // Just absorb linkage and visibility from the variable. llvm::GlobalVariable *GuardVariable = new llvm::GlobalVariable(CGM.getModule(), GuardTy, false, GV->getLinkage(), llvm::ConstantInt::get(GuardTy, 0), GuardVName.str()); GuardVariable->setVisibility(GV->getVisibility()); // Test whether the variable has completed initialization. llvm::Value *IsInitialized; // ARM C++ ABI 3.2.3.1: // To support the potential use of initialization guard variables // as semaphores that are the target of ARM SWP and LDREX/STREX // synchronizing instructions we define a static initialization // guard variable to be a 4-byte aligned, 4- byte word with the // following inline access protocol. // #define INITIALIZED 1 // if ((obj_guard & INITIALIZED) != INITIALIZED) { // if (__cxa_guard_acquire(&obj_guard)) // ... // } if (IsARM && !useInt8GuardVariable) { llvm::Value *V = Builder.CreateLoad(GuardVariable); V = Builder.CreateAnd(V, Builder.getInt32(1)); IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); // Itanium C++ ABI 3.3.2: // The following is pseudo-code showing how these functions can be used: // if (obj_guard.first_byte == 0) { // if ( __cxa_guard_acquire (&obj_guard) ) { // try { // ... initialize the object ...; // } catch (...) { // __cxa_guard_abort (&obj_guard); // throw; // } // ... queue object destructor with __cxa_atexit() ...; // __cxa_guard_release (&obj_guard); // } // } } else { // Load the first byte of the guard variable. llvm::Type *PtrTy = Builder.getInt8PtrTy(); llvm::LoadInst *LI = Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy)); LI->setAlignment(1); // Itanium ABI: // An implementation supporting thread-safety on multiprocessor // systems must also guarantee that references to the initialized // object do not occur before the load of the initialization flag. // // In LLVM, we do this by marking the load Acquire. if (threadsafe) LI->setAtomic(llvm::Acquire); IsInitialized = Builder.CreateIsNull(LI, "guard.uninitialized"); } llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); // Check if the first byte of the guard variable is zero. Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock); CGF.EmitBlock(InitCheckBlock); // Variables used when coping with thread-safe statics and exceptions. if (threadsafe) { // Call __cxa_guard_acquire. llvm::Value *V = Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable); llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), InitBlock, EndBlock); // Call __cxa_guard_abort along the exceptional edge. CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable); CGF.EmitBlock(InitBlock); } // Emit the initializer and add a global destructor if appropriate. CGF.EmitCXXGlobalVarDeclInit(D, GV); if (threadsafe) { // Pop the guard-abort cleanup if we pushed one. CGF.PopCleanupBlock(); // Call __cxa_guard_release. This cannot throw. Builder.CreateCall(getGuardReleaseFn(CGM, GuardPtrTy), GuardVariable); } else { Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable); } CGF.EmitBlock(EndBlock); }
/// Emit code to cause the destruction of the given variable with /// static storage duration. static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, ConstantAddress Addr) { // Honor __attribute__((no_destroy)) and bail instead of attempting // to emit a reference to a possibly nonexistent destructor, which // in turn can cause a crash. This will result in a global constructor // that isn't balanced out by a destructor call as intended by the // attribute. This also checks for -fno-c++-static-destructors and // bails even if the attribute is not present. if (D.isNoDestroy(CGF.getContext())) return; CodeGenModule &CGM = CGF.CGM; // FIXME: __attribute__((cleanup)) ? QualType Type = D.getType(); QualType::DestructionKind DtorKind = Type.isDestructedType(); switch (DtorKind) { case QualType::DK_none: return; case QualType::DK_cxx_destructor: break; case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: case QualType::DK_nontrivial_c_struct: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; } llvm::FunctionCallee Func; llvm::Constant *Argument; // Special-case non-array C++ destructors, if they have the right signature. // Under some ABIs, destructors return this instead of void, and cannot be // passed directly to __cxa_atexit if the target does not allow this // mismatch. const CXXRecordDecl *Record = Type->getAsCXXRecordDecl(); bool CanRegisterDestructor = Record && (!CGM.getCXXABI().HasThisReturn( GlobalDecl(Record->getDestructor(), Dtor_Complete)) || CGM.getCXXABI().canCallMismatchedFunctionType()); // If __cxa_atexit is disabled via a flag, a different helper function is // generated elsewhere which uses atexit instead, and it takes the destructor // directly. bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!Record->hasTrivialDestructor()); CXXDestructorDecl *Dtor = Record->getDestructor(); Func = CGM.getAddrAndTypeOfCXXStructor(Dtor, StructorType::Complete); Argument = llvm::ConstantExpr::getBitCast( Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo()); // Otherwise, the standard logic requires a helper function. } else { Func = CodeGenFunction(CGM) .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind), CGF.needsEHCleanup(DtorKind), &D); Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); } CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument); }
LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) { return CGF.MakeNaturalAlignAddrLValue( CGF.GetAddrOfLocalVar(ThreadIDVar), CGF.getContext().getPointerType(ThreadIDVar->getType())); }
/// Emit code to cause the variable at the given address to be considered as /// constant from this point onwards. static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *Addr) { return CGF.EmitInvariantStart( Addr, CGF.getContext().getTypeSizeInChars(D.getType())); }
void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF, const VarDecl &D) { return CGF.EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage); }
// CopyObject - Utility to copy an object. Calls copy constructor as necessary. // DestPtr is casted to the right type. static void CopyObject(CodeGenFunction &CGF, const Expr *E, llvm::Value *DestPtr, llvm::Value *ExceptionPtrPtr) { QualType ObjectType = E->getType(); // Store the throw exception in the exception object. if (!CGF.hasAggregateLLVMType(ObjectType)) { llvm::Value *Value = CGF.EmitScalarExpr(E); const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(); CGF.Builder.CreateStore(Value, CGF.Builder.CreateBitCast(DestPtr, ValuePtrTy)); } else { const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(); const CXXRecordDecl *RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl()); llvm::Value *This = CGF.Builder.CreateBitCast(DestPtr, Ty); if (RD->hasTrivialCopyConstructor()) { CGF.EmitAggExpr(E, This, false); } else if (CXXConstructorDecl *CopyCtor = RD->getCopyConstructor(CGF.getContext(), 0)) { llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest(); if (CGF.Exceptions) { CodeGenFunction::EHCleanupBlock Cleanup(CGF); llvm::Constant *FreeExceptionFn = getFreeExceptionFn(CGF); // Load the exception pointer. llvm::Value *ExceptionPtr = CGF.Builder.CreateLoad(ExceptionPtrPtr); CGF.Builder.CreateCall(FreeExceptionFn, ExceptionPtr); } llvm::Value *Src = CGF.EmitLValue(E).getAddress(); CGF.setInvokeDest(PrevLandingPad); llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler(); PrevLandingPad = CGF.getInvokeDest(); CGF.setInvokeDest(TerminateHandler); // Stolen from EmitClassAggrMemberwiseCopy llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, Ctor_Complete); CallArgList CallArgs; CallArgs.push_back(std::make_pair(RValue::get(This), CopyCtor->getThisType(CGF.getContext()))); // Push the Src ptr. CallArgs.push_back(std::make_pair(RValue::get(Src), CopyCtor->getParamDecl(0)->getType())); QualType ResultType = CopyCtor->getType()->getAs<FunctionType>()->getResultType(); CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(ResultType, CallArgs), Callee, CallArgs, CopyCtor); CGF.setInvokeDest(PrevLandingPad); } else llvm_unreachable("uncopyable object"); } }
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S, llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn, llvm::Constant *exceptionRethrowFn) { // Jump destination for falling out of catch bodies. CodeGenFunction::JumpDest Cont; if (S.getNumCatchStmts()) Cont = CGF.getJumpDestInCurrentScope("eh.cont"); CodeGenFunction::FinallyInfo FinallyInfo; if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) FinallyInfo.enter(CGF, Finally->getFinallyBody(), beginCatchFn, endCatchFn, exceptionRethrowFn); SmallVector<CatchHandler, 8> Handlers; // Enter the catch, if there is one. if (S.getNumCatchStmts()) { for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); Handlers.push_back(CatchHandler()); CatchHandler &Handler = Handlers.back(); Handler.Variable = CatchDecl; Handler.Body = CatchStmt->getCatchBody(); Handler.Block = CGF.createBasicBlock("catch"); // @catch(...) always matches. if (!CatchDecl) { Handler.TypeInfo = 0; // catch-all // Don't consider any other catches. break; } Handler.TypeInfo = GetEHType(CatchDecl->getType()); } EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); for (unsigned I = 0, E = Handlers.size(); I != E; ++I) Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); } // Emit the try body. CGF.EmitStmt(S.getTryBody()); // Leave the try. if (S.getNumCatchStmts()) CGF.popCatchScope(); // Remember where we were. CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); // Emit the handlers. for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { CatchHandler &Handler = Handlers[I]; CGF.EmitBlock(Handler.Block); llvm::Value *RawExn = CGF.getExceptionFromSlot(); // Enter the catch. llvm::Value *Exn = RawExn; if (beginCatchFn) { Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted"); cast<llvm::CallInst>(Exn)->setDoesNotThrow(); } CodeGenFunction::RunCleanupsScope cleanups(CGF); if (endCatchFn) { // Add a cleanup to leave the catch. bool EndCatchMightThrow = (Handler.Variable == 0); CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup, EndCatchMightThrow, endCatchFn); } // Bind the catch parameter if it exists. if (const VarDecl *CatchParam = Handler.Variable) { llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType); CGF.EmitAutoVarDecl(*CatchParam); CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam)); } CGF.ObjCEHValueStack.push_back(Exn); CGF.EmitStmt(Handler.Body); CGF.ObjCEHValueStack.pop_back(); // Leave any cleanups associated with the catch. cleanups.ForceCleanup(); CGF.EmitBranchThroughCleanup(Cont); } // Go back to the try-statement fallthrough. CGF.Builder.restoreIP(SavedIP); // Pop out of the finally. if (S.getFinallyStmt()) FinallyInfo.exit(CGF); if (Cont.isValid()) CGF.EmitBlock(Cont.getBlock()); }
/// Given an ordering required on success, emit all possible cmpxchg /// instructions to cope with the provided (but possibly only dynamically known) /// FailureOrder. static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *FailureOrderVal, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder) { llvm::AtomicOrdering FailureOrder; if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { switch (FO->getSExtValue()) { default: FailureOrder = llvm::Monotonic; break; case AtomicExpr::AO_ABI_memory_order_consume: case AtomicExpr::AO_ABI_memory_order_acquire: FailureOrder = llvm::Acquire; break; case AtomicExpr::AO_ABI_memory_order_seq_cst: FailureOrder = llvm::SequentiallyConsistent; break; } if (FailureOrder >= SuccessOrder) { // Don't assert on undefined behaviour. FailureOrder = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); } emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, FailureOrder); return; } // Create all the relevant BB's llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, *SeqCstBB = nullptr; MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn); if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release) AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn); if (SuccessOrder == llvm::SequentiallyConsistent) SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn); llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn); llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB); // Emit all the different atomics // MonotonicBB is arbitrarily chosen as the default case; in practice, this // doesn't matter unless someone is crazy enough to use something that // doesn't fold to a constant for the ordering. CGF.Builder.SetInsertPoint(MonotonicBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, llvm::Monotonic); CGF.Builder.CreateBr(ContBB); if (AcquireBB) { CGF.Builder.SetInsertPoint(AcquireBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, llvm::Acquire); CGF.Builder.CreateBr(ContBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), AcquireBB); } if (SeqCstBB) { CGF.Builder.SetInsertPoint(SeqCstBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, llvm::SequentiallyConsistent); CGF.Builder.CreateBr(ContBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); } CGF.Builder.SetInsertPoint(ContBB); }