/// \brief Build scope information for a captured block literal variables. void JumpScopeChecker::BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl, unsigned &ParentScope) { // exclude captured __block variables; there's no destructor // associated with the block literal for them. if (D->hasAttr<BlocksAttr>()) return; QualType T = D->getType(); QualType::DestructionKind destructKind = T.isDestructedType(); if (destructKind != QualType::DK_none) { std::pair<unsigned,unsigned> Diags; switch (destructKind) { case QualType::DK_cxx_destructor: Diags = ScopePair(diag::note_enters_block_captures_cxx_obj, diag::note_exits_block_captures_cxx_obj); break; case QualType::DK_objc_strong_lifetime: Diags = ScopePair(diag::note_enters_block_captures_strong, diag::note_exits_block_captures_strong); break; case QualType::DK_objc_weak_lifetime: Diags = ScopePair(diag::note_enters_block_captures_weak, diag::note_exits_block_captures_weak); break; case QualType::DK_none: llvm_unreachable("non-lifetime captured variable"); } SourceLocation Loc = D->getLocation(); if (Loc.isInvalid()) Loc = BDecl->getLocation(); Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second, Loc)); ParentScope = Scopes.size()-1; } }
bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType) { // If the class's usual deallocation function takes two arguments, // it needs a cookie. if (expr->doesUsualArrayDeleteWantSize()) return true; return elementType.isDestructedType(); }
/// Emit code to cause the destruction of the given variable with /// static storage duration. static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, ConstantAddress addr) { CodeGenModule &CGM = CGF.CGM; // FIXME: __attribute__((cleanup)) ? QualType type = D.getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); switch (dtorKind) { case QualType::DK_none: return; case QualType::DK_cxx_destructor: break; case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; } llvm::Constant *function; llvm::Constant *argument; // Special-case non-array C++ destructors, if they have the right signature. // Under some ABIs, destructors return this instead of void, and cannot be // passed directly to __cxa_atexit if the target does not allow this mismatch. const CXXRecordDecl *Record = type->getAsCXXRecordDecl(); bool CanRegisterDestructor = Record && (!CGM.getCXXABI().HasThisReturn( GlobalDecl(Record->getDestructor(), Dtor_Complete)) || CGM.getCXXABI().canCallMismatchedFunctionType()); // If __cxa_atexit is disabled via a flag, a different helper function is // generated elsewhere which uses atexit instead, and it takes the destructor // directly. bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!Record->hasTrivialDestructor()); CXXDestructorDecl *dtor = Record->getDestructor(); function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete); argument = llvm::ConstantExpr::getBitCast( addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo()); // Otherwise, the standard logic requires a helper function. } else { function = CodeGenFunction(CGM) .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind), CGF.needsEHCleanup(dtorKind), &D); argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); } CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument); }
/// Emit code to cause the destruction of the given variable with /// static storage duration. static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *addr) { CodeGenModule &CGM = CGF.CGM; // FIXME: __attribute__((cleanup)) ? QualType type = D.getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); switch (dtorKind) { case QualType::DK_none: return; case QualType::DK_cxx_destructor: break; case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; } llvm::Constant *function; llvm::Constant *argument; // Special-case non-array C++ destructors, where there's a function // with the right signature that we can just call. const CXXRecordDecl *record = nullptr; if (dtorKind == QualType::DK_cxx_destructor && (record = type->getAsCXXRecordDecl())) { assert(!record->hasTrivialDestructor()); CXXDestructorDecl *dtor = record->getDestructor(); function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete); argument = llvm::ConstantExpr::getBitCast( addr, CGF.getTypes().ConvertType(type)->getPointerTo()); // Otherwise, the standard logic requires a helper function. } else { function = CodeGenFunction(CGM) .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind), CGF.needsEHCleanup(dtorKind), &D); argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); } if(CGM.getTarget().isByteAddressable()) CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument); }
/// Emit code to cause the destruction of the given variable with /// static storage duration. static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *addr) { CodeGenModule &CGM = CGF.CGM; // FIXME: __attribute__((cleanup)) ? QualType type = D.getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); switch (dtorKind) { case QualType::DK_none: return; case QualType::DK_cxx_destructor: break; case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: // We don't care about releasing objects during process teardown. return; } llvm::Constant *function; llvm::Constant *argument; // Special-case non-array C++ destructors, where there's a function // with the right signature that we can just call. const CXXRecordDecl *record = 0; if (dtorKind == QualType::DK_cxx_destructor && (record = type->getAsCXXRecordDecl())) { assert(!record->hasTrivialDestructor()); CXXDestructorDecl *dtor = record->getDestructor(); function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete); argument = addr; // Otherwise, the standard logic requires a helper function. } else { function = CodeGenFunction(CGM).generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind), CGF.needsEHCleanup(dtorKind)); argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); } CGF.EmitCXXGlobalDtorRegistration(function, argument); }
/// Emit code to cause the destruction of the given variable with /// static storage duration. static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, ConstantAddress Addr) { // Honor __attribute__((no_destroy)) and bail instead of attempting // to emit a reference to a possibly nonexistent destructor, which // in turn can cause a crash. This will result in a global constructor // that isn't balanced out by a destructor call as intended by the // attribute. This also checks for -fno-c++-static-destructors and // bails even if the attribute is not present. if (D.isNoDestroy(CGF.getContext())) return; CodeGenModule &CGM = CGF.CGM; // FIXME: __attribute__((cleanup)) ? QualType Type = D.getType(); QualType::DestructionKind DtorKind = Type.isDestructedType(); switch (DtorKind) { case QualType::DK_none: return; case QualType::DK_cxx_destructor: break; case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: case QualType::DK_nontrivial_c_struct: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; } llvm::FunctionCallee Func; llvm::Constant *Argument; // Special-case non-array C++ destructors, if they have the right signature. // Under some ABIs, destructors return this instead of void, and cannot be // passed directly to __cxa_atexit if the target does not allow this // mismatch. const CXXRecordDecl *Record = Type->getAsCXXRecordDecl(); bool CanRegisterDestructor = Record && (!CGM.getCXXABI().HasThisReturn( GlobalDecl(Record->getDestructor(), Dtor_Complete)) || CGM.getCXXABI().canCallMismatchedFunctionType()); // If __cxa_atexit is disabled via a flag, a different helper function is // generated elsewhere which uses atexit instead, and it takes the destructor // directly. bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; if (Record && (CanRegisterDestructor || UsingExternalHelper)) { assert(!Record->hasTrivialDestructor()); CXXDestructorDecl *Dtor = Record->getDestructor(); Func = CGM.getAddrAndTypeOfCXXStructor(Dtor, StructorType::Complete); Argument = llvm::ConstantExpr::getBitCast( Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo()); // Otherwise, the standard logic requires a helper function. } else { Func = CodeGenFunction(CGM) .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind), CGF.needsEHCleanup(DtorKind), &D); Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); } CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument); }
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { #if 0 // FIXME: Assess perf here? Figure out what cases are worth optimizing here // (Length of globals? Chunks of zeroed-out space?). // // If we can, prefer a copy from a global; this is a lot less code for long // globals, and it's easier for the current optimizers to analyze. if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { llvm::GlobalVariable* GV = new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, llvm::GlobalValue::InternalLinkage, C, ""); EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType())); return; } #endif if (E->hadArrayRangeDesignator()) CGF.ErrorUnsupported(E, "GNU array range designator extension"); llvm::Value *DestPtr = Dest.getAddr(); // Handle initialization of an array. if (E->getType()->isArrayType()) { llvm::PointerType *APType = cast<llvm::PointerType>(DestPtr->getType()); llvm::ArrayType *AType = cast<llvm::ArrayType>(APType->getElementType()); uint64_t NumInitElements = E->getNumInits(); if (E->getNumInits() > 0) { QualType T1 = E->getType(); QualType T2 = E->getInit(0)->getType(); if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) { EmitAggLoadOfLValue(E->getInit(0)); return; } } uint64_t NumArrayElements = AType->getNumElements(); assert(NumInitElements <= NumArrayElements); QualType elementType = E->getType().getCanonicalType(); elementType = CGF.getContext().getQualifiedType( cast<ArrayType>(elementType)->getElementType(), elementType.getQualifiers() + Dest.getQualifiers()); // DestPtr is an array*. Construct an elementType* by drilling // down a level. llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); llvm::Value *indices[] = { zero, zero }; llvm::Value *begin = Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin"); // Exception safety requires us to destroy all the // already-constructed members if an initializer throws. // For that, we'll need an EH cleanup. QualType::DestructionKind dtorKind = elementType.isDestructedType(); llvm::AllocaInst *endOfInit = 0; EHScopeStack::stable_iterator cleanup; llvm::Instruction *cleanupDominator = 0; if (CGF.needsEHCleanup(dtorKind)) { // In principle we could tell the cleanup where we are more // directly, but the control flow can get so varied here that it // would actually be quite complex. Therefore we go through an // alloca. endOfInit = CGF.CreateTempAlloca(begin->getType(), "arrayinit.endOfInit"); cleanupDominator = Builder.CreateStore(begin, endOfInit); CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, CGF.getDestroyer(dtorKind)); cleanup = CGF.EHStack.stable_begin(); // Otherwise, remember that we didn't need a cleanup. } else { dtorKind = QualType::DK_none; } llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); // The 'current element to initialize'. The invariants on this // variable are complicated. Essentially, after each iteration of // the loop, it points to the last initialized element, except // that it points to the beginning of the array before any // elements have been initialized. llvm::Value *element = begin; // Emit the explicit initializers. for (uint64_t i = 0; i != NumInitElements; ++i) { // Advance to the next element. if (i > 0) { element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); // Tell the cleanup that it needs to destroy up to this // element. TODO: some of these stores can be trivially // observed to be unnecessary. if (endOfInit) Builder.CreateStore(element, endOfInit); } LValue elementLV = CGF.MakeAddrLValue(element, elementType); EmitInitializationToLValue(E->getInit(i), elementLV); } // Check whether there's a non-trivial array-fill expression. // Note that this will be a CXXConstructExpr even if the element // type is an array (or array of array, etc.) of class type. Expr *filler = E->getArrayFiller(); bool hasTrivialFiller = true; if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) { assert(cons->getConstructor()->isDefaultConstructor()); hasTrivialFiller = cons->getConstructor()->isTrivial(); } // Any remaining elements need to be zero-initialized, possibly // using the filler expression. We can skip this if the we're // emitting to zeroed memory. if (NumInitElements != NumArrayElements && !(Dest.isZeroed() && hasTrivialFiller && CGF.getTypes().isZeroInitializable(elementType))) { // Use an actual loop. This is basically // do { *array++ = filler; } while (array != end); // Advance to the start of the rest of the array. if (NumInitElements) { element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); if (endOfInit) Builder.CreateStore(element, endOfInit); } // Compute the end of the array. llvm::Value *end = Builder.CreateInBoundsGEP(begin, llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end"); llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); // Jump into the body. CGF.EmitBlock(bodyBB); llvm::PHINode *currentElement = Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); currentElement->addIncoming(element, entryBB); // Emit the actual filler expression. LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); if (filler) EmitInitializationToLValue(filler, elementLV); else EmitNullInitializationToLValue(elementLV); // Move on to the next element. llvm::Value *nextElement = Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); // Tell the EH cleanup that we finished with the last element. if (endOfInit) Builder.CreateStore(nextElement, endOfInit); // Leave the loop if we're done. llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, "arrayinit.done"); llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); Builder.CreateCondBr(done, endBB, bodyBB); currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); CGF.EmitBlock(endBB); } // Leave the partial-array cleanup if we entered one. if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); return; } assert(E->getType()->isRecordType() && "Only support structs/unions here!"); // Do struct initialization; this code just sets each individual member // to the approprate value. This makes bitfield support automatic; // the disadvantage is that the generated code is more difficult for // the optimizer, especially with bitfields. unsigned NumInitElements = E->getNumInits(); RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); if (record->isUnion()) { // Only initialize one field of a union. The field itself is // specified by the initializer list. if (!E->getInitializedFieldInUnion()) { // Empty union; we have nothing to do. #ifndef NDEBUG // Make sure that it's really an empty and not a failure of // semantic analysis. for (RecordDecl::field_iterator Field = record->field_begin(), FieldEnd = record->field_end(); Field != FieldEnd; ++Field) assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); #endif return; } // FIXME: volatility FieldDecl *Field = E->getInitializedFieldInUnion(); LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0); if (NumInitElements) { // Store the initializer into the field EmitInitializationToLValue(E->getInit(0), FieldLoc); } else { // Default-initialize to null. EmitNullInitializationToLValue(FieldLoc); } return; } // We'll need to enter cleanup scopes in case any of the member // initializers throw an exception. SmallVector<EHScopeStack::stable_iterator, 16> cleanups; llvm::Instruction *cleanupDominator = 0; // Here we iterate over the fields; this makes it simpler to both // default-initialize fields and skip over unnamed fields. unsigned curInitIndex = 0; for (RecordDecl::field_iterator field = record->field_begin(), fieldEnd = record->field_end(); field != fieldEnd; ++field) { // We're done once we hit the flexible array member. if (field->getType()->isIncompleteArrayType()) break; // Always skip anonymous bitfields. if (field->isUnnamedBitfield()) continue; // We're done if we reach the end of the explicit initializers, we // have a zeroed object, and the rest of the fields are // zero-initializable. if (curInitIndex == NumInitElements && Dest.isZeroed() && CGF.getTypes().isZeroInitializable(E->getType())) break; // FIXME: volatility LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0); // We never generate write-barries for initialized fields. LV.setNonGC(true); if (curInitIndex < NumInitElements) { // Store the initializer into the field. EmitInitializationToLValue(E->getInit(curInitIndex++), LV); } else { // We're out of initalizers; default-initialize to null EmitNullInitializationToLValue(LV); } // Push a destructor if necessary. // FIXME: if we have an array of structures, all explicitly // initialized, we can end up pushing a linear number of cleanups. bool pushedCleanup = false; if (QualType::DestructionKind dtorKind = field->getType().isDestructedType()) { assert(LV.isSimple()); if (CGF.needsEHCleanup(dtorKind)) { if (!cleanupDominator) cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), CGF.getDestroyer(dtorKind), false); cleanups.push_back(CGF.EHStack.stable_begin()); pushedCleanup = true; } } // If the GEP didn't get used because of a dead zero init or something // else, clean it up for -O0 builds and general tidiness. if (!pushedCleanup && LV.isSimple()) if (llvm::GetElementPtrInst *GEP = dyn_cast<llvm::GetElementPtrInst>(LV.getAddress())) if (GEP->use_empty()) GEP->eraseFromParent(); } // Deactivate all the partial cleanups in reverse order, which // generally means popping them. for (unsigned i = cleanups.size(); i != 0; --i) CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); // Destroy the placeholder if we made one. if (cleanupDominator) cleanupDominator->eraseFromParent(); }
llvm::Function *CGOpenMPRuntime::EmitOMPThreadPrivateVarDefinition( const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF) { VD = VD->getDefinition(CGM.getContext()); if (VD && ThreadPrivateWithDefinition.count(VD) == 0) { ThreadPrivateWithDefinition.insert(VD); QualType ASTTy = VD->getType(); llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr; auto Init = VD->getAnyInitializer(); if (CGM.getLangOpts().CPlusPlus && PerformInit) { // Generate function that re-emits the declaration's initializer into the // threadprivate copy of the variable VD CodeGenFunction CtorCGF(CGM); FunctionArgList Args; ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr, CGM.getContext().VoidPtrTy); Args.push_back(&Dst); auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration( CGM.getContext().VoidPtrTy, Args, FunctionType::ExtInfo(), /*isVariadic=*/false); auto FTy = CGM.getTypes().GetFunctionType(FI); auto Fn = CGM.CreateGlobalInitOrDestructFunction( FTy, ".__kmpc_global_ctor_.", Loc); CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI, Args, SourceLocation()); auto ArgVal = CtorCGF.EmitLoadOfScalar( CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, CGM.PointerAlignInBytes, CGM.getContext().VoidPtrTy, Dst.getLocation()); auto Arg = CtorCGF.Builder.CreatePointerCast( ArgVal, CtorCGF.ConvertTypeForMem(CGM.getContext().getPointerType(ASTTy))); CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(), /*IsInitializer=*/true); ArgVal = CtorCGF.EmitLoadOfScalar( CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, CGM.PointerAlignInBytes, CGM.getContext().VoidPtrTy, Dst.getLocation()); CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue); CtorCGF.FinishFunction(); Ctor = Fn; } if (VD->getType().isDestructedType() != QualType::DK_none) { // Generate function that emits destructor call for the threadprivate copy // of the variable VD CodeGenFunction DtorCGF(CGM); FunctionArgList Args; ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr, CGM.getContext().VoidPtrTy); Args.push_back(&Dst); auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration( CGM.getContext().VoidTy, Args, FunctionType::ExtInfo(), /*isVariadic=*/false); auto FTy = CGM.getTypes().GetFunctionType(FI); auto Fn = CGM.CreateGlobalInitOrDestructFunction( FTy, ".__kmpc_global_dtor_.", Loc); DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args, SourceLocation()); auto ArgVal = DtorCGF.EmitLoadOfScalar( DtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, CGM.PointerAlignInBytes, CGM.getContext().VoidPtrTy, Dst.getLocation()); DtorCGF.emitDestroy(ArgVal, ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()), DtorCGF.needsEHCleanup(ASTTy.isDestructedType())); DtorCGF.FinishFunction(); Dtor = Fn; } // Do not emit init function if it is not required. if (!Ctor && !Dtor) return nullptr; llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; auto CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs, /*isVarArg=*/false)->getPointerTo(); // Copying constructor for the threadprivate variable. // Must be NULL - reserved by runtime, but currently it requires that this // parameter is always NULL. Otherwise it fires assertion. CopyCtor = llvm::Constant::getNullValue(CopyCtorTy); if (Ctor == nullptr) { auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy, /*isVarArg=*/false)->getPointerTo(); Ctor = llvm::Constant::getNullValue(CtorTy); } if (Dtor == nullptr) { auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg=*/false)->getPointerTo(); Dtor = llvm::Constant::getNullValue(DtorTy); } if (!CGF) { auto InitFunctionTy = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false); auto InitFunction = CGM.CreateGlobalInitOrDestructFunction( InitFunctionTy, ".__omp_threadprivate_init_."); CodeGenFunction InitCGF(CGM); FunctionArgList ArgList; InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction, CGM.getTypes().arrangeNullaryFunction(), ArgList, Loc); EmitOMPThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); InitCGF.FinishFunction(); return InitFunction; } EmitOMPThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); } return nullptr; }
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType) { // Microsoft seems to completely ignore the possibility of a // two-argument usual deallocation function. return elementType.isDestructedType(); }