/// Emit code to cause the destruction of the given variable with /// static storage duration. static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *addr) { CodeGenModule &CGM = CGF.CGM; // FIXME: __attribute__((cleanup)) ? QualType type = D.getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); switch (dtorKind) { case QualType::DK_none: return; case QualType::DK_cxx_destructor: break; case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; } llvm::Constant *function; llvm::Constant *argument; // Special-case non-array C++ destructors, where there's a function // with the right signature that we can just call. const CXXRecordDecl *record = nullptr; if (dtorKind == QualType::DK_cxx_destructor && (record = type->getAsCXXRecordDecl())) { assert(!record->hasTrivialDestructor()); CXXDestructorDecl *dtor = record->getDestructor(); function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete); argument = llvm::ConstantExpr::getBitCast( addr, CGF.getTypes().ConvertType(type)->getPointerTo()); // Otherwise, the standard logic requires a helper function. } else { function = CodeGenFunction(CGM) .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind), CGF.needsEHCleanup(dtorKind), &D); argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); } CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument); }
/// When we see an expression in a TopLevelCodeDecl in the REPL, process it, /// adding the proper decls back to the top level of the file. void REPLChecker::processREPLTopLevelExpr(Expr *E) { CanType T = E->getType()->getCanonicalType(); // Don't try to print invalid expressions, module exprs, or void expressions. if (isa<ErrorType>(T) || isa<ModuleType>(T) || T->isVoid()) return; // Okay, we need to print this expression. We generally do this by creating a // REPL metavariable (e.g. r4) to hold the result, so it can be referred to // in the future. However, if this is a direct reference to a decl (e.g. "x") // then don't create a repl metavariable. if (VarDecl *d = getObviousDeclFromExpr(E)) { generatePrintOfExpression(d->getName().str(), E); return; } // Remove the expression from being in the list of decls to execute, we're // going to reparent it. auto TLCD = cast<TopLevelCodeDecl>(SF.Decls.back()); E = TC.coerceToMaterializable(E); // Create the meta-variable, let the typechecker name it. Identifier name = TC.getNextResponseVariableName(SF.getParentModule()); VarDecl *vd = new (Context) VarDecl(/*static*/ false, /*IsLet*/true, E->getStartLoc(), name, E->getType(), &SF); SF.Decls.push_back(vd); // Create a PatternBindingDecl to bind the expression into the decl. Pattern *metavarPat = new (Context) NamedPattern(vd); metavarPat->setType(E->getType()); PatternBindingDecl *metavarBinding = PatternBindingDecl::create(Context, SourceLoc(), StaticSpellingKind::None, E->getStartLoc(), metavarPat, E, TLCD); // Overwrite the body of the existing TopLevelCodeDecl. TLCD->setBody(BraceStmt::create(Context, metavarBinding->getStartLoc(), ASTNode(metavarBinding), metavarBinding->getEndLoc(), /*implicit*/true)); // Finally, print the variable's value. E = TC.buildCheckedRefExpr(vd, &SF, E->getStartLoc(), /*Implicit=*/true); generatePrintOfExpression(vd->getName().str(), E); }
void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D, bool IsDynInit) { if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) return; std::string QualName; llvm::raw_string_ostream OS(QualName); D.printQualifiedName(OS); bool IsBlacklisted = false; for (auto Attr : D.specific_attrs<NoSanitizeAttr>()) if (Attr->getMask() & SanitizerKind::Address) IsBlacklisted = true; reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit, IsBlacklisted); }
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr) { const Expr *Init = D.getInit(); QualType T = D.getType(); if (!T->isReferenceType()) { EmitDeclInit(*this, D, DeclPtr); EmitDeclDestroy(*this, D, DeclPtr); return; } unsigned Alignment = getContext().getDeclAlign(&D).getQuantity(); RValue RV = EmitReferenceBindingToExpr(Init, &D); EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T); }
// Like applyConstQualifier, this must not return null (its callers do not // null check). Instead, return the input type on error. Type * TypeResolver::applyByRef(TypeSpecifier *spec, Type *type, TypeSpecHelper *helper) { if (!type->canBeUsedAsRefType()) { cc_.report(spec->byRefLoc(), rmsg::type_cannot_be_ref) << type; return type; } VarDecl* decl = helper ? helper->decl() : nullptr; if (decl) { assert(decl->sym()->isByRef()); assert(decl->sym()->isArgument()); } return cc_.types()->newReference(type); }
bool SimplifyStructUnionDecl::HandleTopLevelDecl(DeclGroupRef DGR) { DeclGroupRef::iterator DI = DGR.begin(); const RecordDecl *RD = dyn_cast<RecordDecl>(*DI); if (RD) { addOneRecordDecl(RD, DGR); return true; } VarDecl *VD = dyn_cast<VarDecl>(*DI); if (!VD) return true; const Type *T = VD->getType().getTypePtr(); RD = getBaseRecordDecl(T); if (!RD) return true; const Decl *CanonicalD = RD->getCanonicalDecl(); void *DGRPointer = RecordDeclToDeclGroup[CanonicalD]; if (!DGRPointer) return true; ValidInstanceNum++; if (ValidInstanceNum != TransformationCounter) return true; TheRecordDecl = dyn_cast<RecordDecl>(CanonicalD); TheDeclGroupRefs.push_back(DGRPointer); TheDeclGroupRefs.push_back(DGR.getAsOpaquePtr()); for (DeclGroupRef::iterator I = DGR.begin(), E = DGR.end(); I != E; ++I) { VarDecl *VD = dyn_cast<VarDecl>(*I); TransAssert(VD && "Bad VarDecl!"); CombinedVars.insert(VD); } DeclGroupRef DefDGR = DeclGroupRef::getFromOpaquePtr(DGRPointer); for (DeclGroupRef::iterator I = DefDGR.begin(), E = DefDGR.end(); I != E; ++I) { VarDecl *VD = dyn_cast<VarDecl>(*I); if (VD) CombinedVars.insert(VD); } return true; }
bool SymbolResolverCallback::LookupObject(LookupResult& R, Scope* S) { if (!ShouldResolveAtRuntime(R, S)) return false; if (m_IsRuntime) { // We are currently parsing an EvaluateT() expression if (!m_Resolve) return false; // Only for demo resolve all unknown objects to cling::test::Tester if (!m_TesterDecl) { clang::Sema& SemaR = m_Interpreter->getSema(); clang::NamespaceDecl* NSD = utils::Lookup::Namespace(&SemaR, "cling"); NSD = utils::Lookup::Namespace(&SemaR, "test", NSD); m_TesterDecl = utils::Lookup::Named(&SemaR, "Tester", NSD); } assert (m_TesterDecl && "Tester not found!"); R.addDecl(m_TesterDecl); return true; // Tell clang to continue. } // We are currently NOT parsing an EvaluateT() expression. // Escape the expression into an EvaluateT() expression. ASTContext& C = R.getSema().getASTContext(); DeclContext* DC = 0; // For DeclContext-less scopes like if (dyn_expr) {} while (!DC) { DC = static_cast<DeclContext*>(S->getEntity()); S = S->getParent(); } DeclarationName Name = R.getLookupName(); IdentifierInfo* II = Name.getAsIdentifierInfo(); SourceLocation Loc = R.getNameLoc(); VarDecl* Res = VarDecl::Create(C, DC, Loc, Loc, II, C.DependentTy, /*TypeSourceInfo*/0, SC_None); // Annotate the decl to give a hint in cling. FIXME: Current implementation // is a gross hack, because TClingCallbacks shouldn't know about // EvaluateTSynthesizer at all! SourceRange invalidRange; Res->addAttr(new (C) AnnotateAttr(invalidRange, C, "__ResolveAtRuntime", 0)); R.addDecl(Res); DC->addDecl(Res); // Say that we can handle the situation. Clang should try to recover return true; }
void TransferFunctions::VisitDeclStmt(DeclStmt *S) { // iterate over all declarations of this DeclStmt for (auto decl : S->decls()) { if (isa<VarDecl>(decl)) { VarDecl *VD = dyn_cast<VarDecl>(decl); if (VD->hasInit()) { if (checkImageAccess(VD->getInit(), READ_ONLY)) { KS.curStmtVectorize = (VectorInfo) (KS.curStmtVectorize|VECTORIZE); } } KS.declsToVector[VD] = KS.curStmtVectorize; } } // reset vectorization status for next statement KS.curStmtVectorize = SCALAR; }
void SemanticAnalysis::checkCall(FunctionSignature *sig, ExpressionList *args) { VarDecl *vararg = nullptr; for (size_t i = 0; i < args->length(); i++) { Expression *expr = args->at(i); VarDecl *arg = nullptr; if (i >= sig->parameters()->length()) { if (!vararg) { cc_.report(expr->loc(), rmsg::wrong_argcount) << args->length(), sig->parameters()->length(); return; } arg = vararg; } else { arg = sig->parameters()->at(i); } (void)arg; visitForValue(expr); #if 0 Coercion cr(cc_, Coercion::Reason::arg, expr, arg->te().resolved()); if (cr.coerce() != Coercion::Result::ok) { auto builder = cc_.report(expr->loc(), rmsg::cannot_coerce_for_arg) << expr->type() << arg->te().resolved(); if (i < args->length() && arg->name()) builder << arg->name(); else builder << i; builder << cr.diag(expr->loc()); break; } // Rewrite the tree for the coerced result. args->at(i) = cr.output(); #endif } }
OMPThreadPrivateDecl *Sema::CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList) { SmallVector<Expr *, 8> Vars; for (ArrayRef<Expr *>::iterator I = VarList.begin(), E = VarList.end(); I != E; ++I) { DeclRefExpr *DE = cast<DeclRefExpr>(*I); VarDecl *VD = cast<VarDecl>(DE->getDecl()); SourceLocation ILoc = DE->getExprLoc(); // OpenMP [2.9.2, Restrictions, C/C++, p.10] // A threadprivate variable must not have an incomplete type. if (RequireCompleteType(ILoc, VD->getType(), diag::err_omp_threadprivate_incomplete_type)) { continue; } // OpenMP [2.9.2, Restrictions, C/C++, p.10] // A threadprivate variable must not have a reference type. if (VD->getType()->isReferenceType()) { Diag(ILoc, diag::err_omp_ref_type_arg) << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType(); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // Check if this is a TLS variable. if (VD->getTLSKind()) { Diag(ILoc, diag::err_omp_var_thread_local) << VD; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } Vars.push_back(*I); } OMPThreadPrivateDecl *D = 0; if (!Vars.empty()) { D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc, Vars); D->setAccess(AS_public); } return D; }
bool VarCollector::VisitDeclStmt(DeclStmt *S) { DeclGroupRef::iterator it; for (it = S->decl_begin(); it != S->decl_end(); it++) { VarDecl * VD = dyn_cast<VarDecl>(*it); assert(VD); FullDirectives->InsertPrivateDecl(VD); if (!VD->hasInit()) { continue; } bool IsArray = false; const Type * T = VD->getType().getTypePtr(); do { if (T->isPointerType()) { string TS = GetType(VD->getType()->getUnqualifiedDesugaredType()); if (IsArray) { HandleArrayInit(VD, TS, S->getLocStart()); } else { HandlePointerInit(VD, TS, S->getLocStart()); } T = NULL; } else if (T->isArrayType()) { IsArray = true; T = T->getAsArrayTypeUnsafe()->getElementType().getTypePtr(); } else { T = NULL; } } while (T); } return true; }
void print_globals(){ vector<VarDecl *> globals = get_reg_decls(); for(vector<VarDecl *>::const_iterator it = globals.begin(); it != globals.end(); it++){ VarDecl *s = *it; cout << s->tostring() << endl; } cout << "var mem:reg8_t[reg32_t];" << endl; vector<Stmt *> helpers = gen_eflags_helpers(); for(vector<Stmt *>::const_iterator it = helpers.begin(); it != helpers.end(); it++){ Stmt *s = *it; cout << s->tostring() << endl; } }
const Type *CombLocalVarCollectionVisitor::getTypeFromDeclStmt(DeclStmt *DS) { Decl *D; if (DS->isSingleDecl()) { D = DS->getSingleDecl(); } else { DeclGroupRef DGR = DS->getDeclGroup(); DeclGroupRef::iterator I = DGR.begin(); D = (*I); } VarDecl *VD = dyn_cast<VarDecl>(D); if (!VD) return NULL; return VD->getType().getTypePtr(); }
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr) { const Expr *Init = D.getInit(); QualType T = D.getType(); if (!T->isReferenceType()) { EmitDeclInit(*this, D, DeclPtr); return; } if (Init->isLvalue(getContext()) == Expr::LV_Valid) { RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true); EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T); return; } ErrorUnsupported(Init, "global variable that binds reference to a non-lvalue"); }
void dump(FunctionSignature *sig) { dump(sig->returnType(), nullptr); if (!sig->parameters()->length()) { fprintf(fp_, " ()\n"); return; } fprintf(fp_, " (\n"); indent(); for (size_t i = 0; i < sig->parameters()->length(); i++) { prefix(); VarDecl *param = sig->parameters()->at(i); dump(param->te(), param->name()); fprintf(fp_, "\n"); } unindent(); prefix(); fprintf(fp_, ")"); }
void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *dtor, llvm::Constant *addr) { if (D.getTLSKind()) CGM.ErrorUnsupported(&D, "non-trivial TLS destruction"); // The default behavior is to use atexit. CGF.registerGlobalDtorWithAtExit(D, dtor, addr); }
bool CombineGlobalVarDecl::HandleTopLevelDecl(DeclGroupRef DGR) { DeclGroupRef::iterator DI = DGR.begin(); VarDecl *VD = dyn_cast<VarDecl>(*DI); if (!VD || isInIncludedFile(VD)) return true; SourceRange Range = VD->getSourceRange(); if (Range.getBegin().isInvalid() || Range.getEnd().isInvalid()) return true; const Type *T = VD->getType().getTypePtr(); const Type *CanonicalT = Context->getCanonicalType(T); DeclGroupVector *DV; TypeToDeclMap::iterator TI = AllDeclGroups.find(CanonicalT); if (TI == AllDeclGroups.end()) { DV = new DeclGroupVector(); AllDeclGroups[CanonicalT] = DV; } else { ValidInstanceNum++; DV = (*TI).second; if (ValidInstanceNum == TransformationCounter) { if (DV->size() >= 1) { void* DP1 = *(DV->begin()); TheDeclGroupRefs.push_back(DP1); TheDeclGroupRefs.push_back(DGR.getAsOpaquePtr()); } } } // Note that it's unnecessary to keep all encountered // DeclGroupRefs. We could choose a light way similar // to what we implemented in CombineLocalVarDecl. // I kept the code here because I feel we probably // need more combinations, i.e., not only combine the // first DeclGroup with others, but we could combine // the second one and the third one. DV->push_back(DGR.getAsOpaquePtr()); return true; }
C2::StmtResult C2Sema::ActOnDeclaration(const char* name, SourceLocation loc, Expr* type, Expr* InitValue) { assert(type); #ifdef SEMA_DEBUG std::cerr << COL_SEMA"SEMA: decl at "; loc.dump(SourceMgr); std::cerr << ANSI_NORMAL"\n"; #endif if (name[0] == '_' && name[1] == '_') { Diag(loc, diag::err_invalid_symbol_name) << name; delete type; delete InitValue; return StmtResult(true); } // TEMP extract here to Type and delete rtype Expr TypeExpr* typeExpr = cast<TypeExpr>(type); bool hasLocal = typeExpr->hasLocalQualifier(); VarDecl* V = createVarDecl(VARDECL_LOCAL, name, loc, typeExpr, InitValue, false); if (hasLocal) V->setLocalQualifier(); return StmtResult(new DeclStmt(V)); }
/// Returns a string representation of the SubPath /// suitable for use in diagnostic text. Only supports the Projections /// that stored-property relaxation supports: struct stored properties /// and tuple elements. std::string AccessSummaryAnalysis::getSubPathDescription( SILType baseType, const IndexTrieNode *subPath, SILModule &M) { // Walk the trie to the root to collect the sequence (in reverse order). llvm::SmallVector<unsigned, 4> reversedIndices; const IndexTrieNode *I = subPath; while (!I->isRoot()) { reversedIndices.push_back(I->getIndex()); I = I->getParent(); } std::string sbuf; llvm::raw_string_ostream os(sbuf); SILType containingType = baseType; for (unsigned index : reversed(reversedIndices)) { os << "."; if (StructDecl *D = containingType.getStructOrBoundGenericStruct()) { auto iter = D->getStoredProperties().begin(); std::advance(iter, index); VarDecl *var = *iter; os << var->getBaseName(); containingType = containingType.getFieldType(var, M); continue; } if (auto tupleTy = containingType.getAs<TupleType>()) { Identifier elementName = tupleTy->getElement(index).getName(); if (elementName.empty()) os << index; else os << elementName; containingType = containingType.getTupleElementType(index); continue; } llvm_unreachable("Unexpected type in projection SubPath!"); } return os.str(); }
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr) { // If we've been asked to forbid guard variables, emit an error now. // This diagnostic is hard-coded for Darwin's use case; we can find // better phrasing if someone else needs it. if (CGM.getCodeGenOpts().ForbidGuardVariables) CGM.Error(D.getLocation(), "this initialization requires a guard variable, which " "the kernel does not support"); CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr); }
void BlockObjCVariableTraverser::traverseBody(BodyContext &BodyCtx) { MigrationPass &Pass = BodyCtx.getMigrationContext().Pass; llvm::DenseSet<VarDecl *> VarsToChange; BlockObjCVarRewriter trans(VarsToChange); trans.TraverseStmt(BodyCtx.getTopStmt()); for (llvm::DenseSet<VarDecl *>::iterator I = VarsToChange.begin(), E = VarsToChange.end(); I != E; ++I) { VarDecl *var = *I; BlocksAttr *attr = var->getAttr<BlocksAttr>(); if(!attr) continue; bool useWeak = canApplyWeak(Pass.Ctx, var->getType()); SourceManager &SM = Pass.Ctx.getSourceManager(); Transaction Trans(Pass.TA); Pass.TA.replaceText(SM.getExpansionLoc(attr->getLocation()), "__block", useWeak ? "__weak" : "__unsafe_unretained"); } }
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *DeclPtr) { assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); assert(!D.getType()->isReferenceType() && "Should not call EmitDeclInit on a reference!"); ASTContext &Context = CGF.getContext(); const Expr *Init = D.getInit(); QualType T = D.getType(); bool isVolatile = Context.getCanonicalType(T).isVolatileQualified(); if (!CGF.hasAggregateLLVMType(T)) { llvm::Value *V = CGF.EmitScalarExpr(Init); CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile); } else { CGF.EmitAggExpr(Init, DeclPtr, isVolatile); } }
JsonList *toJson(const ParameterList *params) { JsonList *list = new (pool_) JsonList(); for (size_t i = 0; i < params->length(); i++) { VarDecl *decl = params->at(i); JsonObject *obj = new (pool_) JsonObject(); obj->add(atom_type_, toJson(decl, false)); if (decl->name()) { obj->add(atom_name_, toJson(decl->name())); obj->add(atom_decl_, toJson(decl, true)); } else { obj->add(atom_name_, toJson("...")); AutoString builder = BuildTypeName(decl->te(), nullptr, TypeDiagFlags::Names); builder = builder + " ..."; obj->add(atom_decl_, toJson(builder.ptr())); } list->add(obj); } return list; }
void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit) { // FIXME: this code was only tested for global initialization. // Not sure whether we want thread-safe static local variables as VS // doesn't make them thread-safe. if (D.getTLSKind()) CGM.ErrorUnsupported(&D, "dynamic TLS initialization"); // Emit the initializer and add a global destructor if appropriate. CGF.EmitCXXGlobalVarDeclInit(D, DeclPtr, PerformInit); }
/// Emit an alloca (or GlobalValue depending on target) /// for the specified parameter and set up LocalDeclMap. void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) { // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && "Invalid argument to EmitParmDecl"); QualType Ty = D.getType(); llvm::Value *DeclPtr; if (!Ty->isConstantSizeType()) { // Variable sized values always are passed by-reference. DeclPtr = Arg; } else { // A fixed sized single-value variable becomes an alloca in the entry block. const llvm::Type *LTy = ConvertTypeForMem(Ty); if (LTy->isSingleValueType()) { // TODO: Alignment std::string Name = D.getNameAsString(); Name += ".addr"; DeclPtr = CreateTempAlloca(LTy); DeclPtr->setName(Name.c_str()); // Store the initial value into the alloca. EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty); } else { // Otherwise, if this is an aggregate, just use the input pointer. DeclPtr = Arg; } Arg->setName(D.getNameAsString()); } llvm::Value *&DMEntry = LocalDeclMap[&D]; assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); DMEntry = DeclPtr; // Emit debug info for param declaration. if (CGDebugInfo *DI = getDebugInfo()) { DI->setLocation(D.getLocation()); DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder); } }
void CodeGenFunction::generateBody(llvm::Function* func) { LOG_FUNC CurFn = func; llvm::BasicBlock *EntryBB = createBasicBlock("entry", func); // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. llvm::IntegerType* Int32Ty = llvm::Type::getInt32Ty(context); llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); if (Builder.isNamePreserving()) AllocaInsertPt->setName("allocapt"); Builder.SetInsertPoint(EntryBB); // arguments Function::arg_iterator argsValues = func->arg_begin(); for (unsigned i=0; i<FuncDecl->numArgs(); i++) { VarDecl* arg = FuncDecl->getArg(i); EmitVarDecl(arg); Value* argumentValue = argsValues++; argumentValue->setName(arg->getName()); new StoreInst(argumentValue, arg->getIRValue(), false, EntryBB); } // body CompoundStmt* Body = FuncDecl->getBody(); EmitCompoundStmt(Body); llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); if (!CurBB->getTerminator()) Builder.CreateRetVoid(); // Remove the AllocaInsertPt instruction, which is just a convenience for us. llvm::Instruction *Ptr = AllocaInsertPt; AllocaInsertPt = 0; Ptr->eraseFromParent(); }
void UnnecessaryCopyInitialization::handleCopyFromMethodReturn( const VarDecl &Var, const Stmt &BlockStmt, bool IssueFix, const VarDecl *ObjectArg, ASTContext &Context) { bool IsConstQualified = Var.getType().isConstQualified(); if (!IsConstQualified && !isOnlyUsedAsConst(Var, BlockStmt, Context)) return; if (ObjectArg != nullptr && !isOnlyUsedAsConst(*ObjectArg, BlockStmt, Context)) return; auto Diagnostic = diag(Var.getLocation(), IsConstQualified ? "the const qualified variable %0 is " "copy-constructed from a const reference; " "consider making it a const reference" : "the variable %0 is copy-constructed from a " "const reference but is only used as const " "reference; consider making it a const reference") << &Var; if (IssueFix) recordFixes(Var, Context, Diagnostic); }
Decl *Sema::ActOnExternalEntityDecl(ASTContext &C, QualType T, SourceLocation IDLoc, const IdentifierInfo *IDInfo) { SourceLocation TypeLoc; VarDecl *ArgumentExternal = nullptr; if (auto Prev = LookupIdentifier(IDInfo)) { auto Quals = getDeclQualifiers(Prev); if(Quals.hasAttributeSpec(Qualifiers::AS_external)) { Diags.Report(IDLoc, diag::err_duplicate_attr_spec) << DeclSpec::getSpecifierName(Qualifiers::AS_external); return Prev; } // apply EXTERNAL to an unused symbol or an argument. auto VD = dyn_cast<VarDecl>(Prev); if(VD && (VD->isUnusedSymbol() || VD->isArgument()) ) { T = VD->getType(); TypeLoc = VD->getLocation(); CurContext->removeDecl(VD); if(VD->isArgument()) ArgumentExternal = VD; } else { DiagnoseRedefinition(IDLoc, IDInfo, Prev); return nullptr; } } if(T.isNull()) T = C.VoidTy; DeclarationNameInfo DeclName(IDInfo,IDLoc); auto Decl = FunctionDecl::Create(C, ArgumentExternal? FunctionDecl::ExternalArgument : FunctionDecl::External, CurContext, DeclName, T); SetFunctionType(Decl, T, TypeLoc, SourceRange()); //FIXME: proper loc, and range CurContext->addDecl(Decl); if(ArgumentExternal) ArgumentExternal->setType(C.getFunctionType(Decl)); return Decl; }
void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) { llvm::Value *&DMEntry = LocalDeclMap[&D]; assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage); // Store into LocalDeclMap before generating initializer to handle // circular references. DMEntry = GV; // Make sure to evaluate VLA bounds now so that we have them for later. // // FIXME: Can this happen? if (D.getType()->isVariablyModifiedType()) EmitVLASize(D.getType()); // If this value has an initializer, emit it. if (D.getInit()) GV = AddInitializerToGlobalBlockVarDecl(D, GV); // FIXME: Merge attribute handling. if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) { SourceManager &SM = CGM.getContext().getSourceManager(); llvm::Constant *Ann = CGM.EmitAnnotateAttr(GV, AA, SM.getInstantiationLineNumber(D.getLocation())); CGM.AddAnnotation(Ann); } if (const SectionAttr *SA = D.getAttr<SectionAttr>()) GV->setSection(SA->getName()); if (D.hasAttr<UsedAttr>()) CGM.AddUsedGlobal(GV); // We may have to cast the constant because of the initializer // mismatch above. // // FIXME: It is really dangerous to store this in the map; if anyone // RAUW's the GV uses of this constant will be invalid. const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType()); const llvm::Type *LPtrTy = llvm::PointerType::get(LTy, D.getType().getAddressSpace()); DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy); // Emit global variable debug descriptor for static vars. CGDebugInfo *DI = getDebugInfo(); if (DI) { DI->setLocation(D.getLocation()); DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D); } }
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr, bool PerformInit) { const Expr *Init = D.getInit(); QualType T = D.getType(); if (!T->isReferenceType()) { if (PerformInit) EmitDeclInit(*this, D, DeclPtr); if (CGM.isTypeConstant(D.getType(), true)) EmitDeclInvariant(*this, D, DeclPtr); else EmitDeclDestroy(*this, D, DeclPtr); return; } assert(PerformInit && "cannot have constant initializer which needs " "destruction for reference"); unsigned Alignment = getContext().getDeclAlign(&D).getQuantity(); RValue RV = EmitReferenceBindingToExpr(Init, &D); EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T); }