// We make a reasonable effort to resolve types early, since TypeSpecifier is // quite a large structure (48 bytes on x86, as of this writing, and it will // only get bigger). We want to eliminate it from the AST, as well as reduce // dependence on TypeResolver which is a rather expensive pass. TypeExpr NameResolver::resolve(TypeSpecifier &spec, TypeSpecHelper *helper) { Type *type = resolveBase(spec); if (!type) return delay(spec); // Note: we are only updating the base type! We can't overwrite the whole // spec because it gets reused for parsing some decls. spec.setResolvedBaseType(type); // If the base type is an unresolved typedef, we have to wait. if (type->isUnresolvedTypedef()) return delay(spec); // If we have an array, but it's not a valid construction, then we just error // and delay (we'll never reach type resolution after we error). if (spec.rank()) { if (!tr_.checkArrayInnerType(&spec, type)) return delay(spec); } if (spec.dims()) { // If we have explicit dimension sizes, we have to bail out and wait for // type resolution (which also does constant resolution). We do special // case the very common integer literal, single-dimension case. if (spec.rank() != 1) return delay(spec); Expression *expr = spec.sizeOfRank(0); if (!expr || !expr->isIntegerLiteral()) return delay(spec); IntegerLiteral *lit = expr->toIntegerLiteral(); if (lit->value() < 1 || lit->value() > ArrayType::kMaxSize) return delay(spec); type = cc_.types()->newArray(type, (int32_t)lit->value()); } else if (spec.rank()) { for (size_t i = 0; i < spec.rank(); i++) type = cc_.types()->newArray(type, ArrayType::kUnsized); } if (spec.isConst()) type = tr_.applyConstQualifier(&spec, type, helper); return TypeExpr(type); }
MemoryAccessDetail TransferFunctions::checkStride(Expr *EX, Expr *EY) { bool stride_x=true, stride_y=true; if (isa<IntegerLiteral>(EX->IgnoreParenImpCasts())) { IntegerLiteral *IL = dyn_cast<IntegerLiteral>(EX->IgnoreParenImpCasts()); if (IL->getValue().getSExtValue()==0) { stride_x = false; } } if (isa<IntegerLiteral>(EY->IgnoreParenImpCasts())) { IntegerLiteral *IL = dyn_cast<IntegerLiteral>(EY->IgnoreParenImpCasts()); if (IL->getValue().getSExtValue()==0) { stride_y = false; } } if (stride_x && stride_y) return STRIDE_XY; if (stride_x) return STRIDE_X; if (stride_y) return STRIDE_Y; return NO_STRIDE; }
void DeclExtractor::EnforceInitOrder(llvm::SmallVectorImpl<Stmt*>& Stmts){ Scope* TUScope = m_Sema->TUScope; DeclContext* TUDC = static_cast<DeclContext*>(TUScope->getEntity()); // We can't PushDeclContext, because we don't have scope. Sema::ContextRAII pushedDC(*m_Sema, TUDC); std::string FunctionName = "__fd"; createUniqueName(FunctionName); IdentifierInfo& IIFD = m_Context->Idents.get(FunctionName); SourceLocation Loc; NamedDecl* ND = m_Sema->ImplicitlyDefineFunction(Loc, IIFD, TUScope); if (FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(ND)) { FD->setImplicit(false); // Better for debugging // Add a return statement if it doesn't exist if (!isa<ReturnStmt>(Stmts.back())) { Sema::ContextRAII pushedDC(*m_Sema, FD); // Generate the return statement: // First a literal 0, then the return taking that literal. // One bit is enough: llvm::APInt ZeroInt(m_Context->getIntWidth(m_Context->IntTy), 0, /*isSigned=*/true); IntegerLiteral* ZeroLit = IntegerLiteral::Create(*m_Context, ZeroInt, m_Context->IntTy, SourceLocation()); Stmts.push_back(m_Sema->ActOnReturnStmt(ZeroLit->getExprLoc(), ZeroLit).take()); } // Wrap Stmts into a function body. llvm::ArrayRef<Stmt*> StmtsRef(Stmts.data(), Stmts.size()); CompoundStmt* CS = new (*m_Context)CompoundStmt(*m_Context, StmtsRef, Loc, Loc); FD->setBody(CS); // We know the transaction is closed, but it is safe. getTransaction()->forceAppend(FD); // Create the VarDecl with the init std::string VarName = "__vd"; createUniqueName(VarName); IdentifierInfo& IIVD = m_Context->Idents.get(VarName); VarDecl* VD = VarDecl::Create(*m_Context, TUDC, Loc, Loc, &IIVD, FD->getReturnType(), (TypeSourceInfo*)0, SC_None); LookupResult R(*m_Sema, FD->getDeclName(), Loc, Sema::LookupMemberName); R.addDecl(FD); CXXScopeSpec CSS; Expr* UnresolvedLookup = m_Sema->BuildDeclarationNameExpr(CSS, R, /*ADL*/ false).take(); Expr* TheCall = m_Sema->ActOnCallExpr(TUScope, UnresolvedLookup, Loc, MultiExprArg(), Loc).take(); assert(VD && TheCall && "Missing VD or its init!"); VD->setInit(TheCall); // We know the transaction is closed, but it is safe. getTransaction()->forceAppend(VD); // Add it to the transaction for codegenning TUDC->addHiddenDecl(VD); Stmts.clear(); return; } llvm_unreachable("Must be able to enforce init order."); }
bool isIntegerLiteral(Expr *expr, int value) { IntegerLiteral *integerLiteral = dyn_cast<IntegerLiteral>(expr); return integerLiteral && integerLiteral->getValue() == value; }
bool FindGPUMacro::VisitForStmt(ForStmt *fstmt) { Stmt *body = fstmt->getBody(); analyze_data_struct(body); int tx = 1, ty = 1, tz = 1 , bx = 1, by = 1, bz = 1, gpu_time = 0, cpu_time = 0, instanceNum = 0; for (Stmt::child_iterator it = body->child_begin(), eit = body->child_end(); it != eit; it++) { Stmt *s = *it; if (DeclStmt *ds = dyn_cast<DeclStmt>(s)){ if (VarDecl *vd = dyn_cast<VarDecl>(ds->getSingleDecl())){ string className = vd->getTypeSourceInfo()->getType().getBaseTypeIdentifier()->getName(); if (className == "profile_time") { if (CXXConstructExpr *ce = dyn_cast<CXXConstructExpr>(vd->getInit()->IgnoreImpCasts())) { if (MaterializeTemporaryExpr *me = dyn_cast<MaterializeTemporaryExpr>(ce->getArg(0)->IgnoreImpCasts())) { if (CXXTemporaryObjectExpr *co = dyn_cast<CXXTemporaryObjectExpr>(me->GetTemporaryExpr()->IgnoreImpCasts())) { IntegerLiteral *x = dyn_cast<IntegerLiteral>(co->getArg(0)); IntegerLiteral *y = dyn_cast<IntegerLiteral>(co->getArg(1)); IntegerLiteral *z = dyn_cast<IntegerLiteral>(co->getArg(2)); instanceNum = x->getValue().getSExtValue(); gpu_time = y->getValue().getSExtValue(); cpu_time = z->getValue().getSExtValue(); } } } } if (className == "sc_gpu_thread_hierarchy") { if (CXXConstructExpr *ce = dyn_cast<CXXConstructExpr>(vd->getInit()->IgnoreImpCasts())) { if (MaterializeTemporaryExpr *me = dyn_cast<MaterializeTemporaryExpr>(ce->getArg(0)->IgnoreImpCasts())) { if (CXXTemporaryObjectExpr *co = dyn_cast<CXXTemporaryObjectExpr>(me->GetTemporaryExpr()->IgnoreImpCasts())) { IntegerLiteral *x = dyn_cast<IntegerLiteral>(co->getArg(1)); IntegerLiteral *y = dyn_cast<IntegerLiteral>(co->getArg(2)); IntegerLiteral *z = dyn_cast<IntegerLiteral>(co->getArg(3)); IntegerLiteral *w = dyn_cast<IntegerLiteral>(co->getArg(4)); instanceNum = x->getValue().getSExtValue(); tx = x->getValue().getSExtValue(); ty = y->getValue().getSExtValue(); tz = z->getValue().getSExtValue(); } } } } if (className == "sc_gpu_block_hierarchy") { if (CXXConstructExpr *ce = dyn_cast<CXXConstructExpr>(vd->getInit()->IgnoreImpCasts())) { if (MaterializeTemporaryExpr *me = dyn_cast<MaterializeTemporaryExpr>(ce->getArg(0)->IgnoreImpCasts())) { if (CXXTemporaryObjectExpr *co = dyn_cast<CXXTemporaryObjectExpr>(me->GetTemporaryExpr()->IgnoreImpCasts())) { IntegerLiteral *x = dyn_cast<IntegerLiteral>(co->getArg(1)); IntegerLiteral *y = dyn_cast<IntegerLiteral>(co->getArg(2)); IntegerLiteral *z = dyn_cast<IntegerLiteral>(co->getArg(3)); IntegerLiteral *w = dyn_cast<IntegerLiteral>(co->getArg(4)); instanceNum = x->getValue().getSExtValue(); bx = y->getValue().getSExtValue(); by = z->getValue().getSExtValue(); bz = w->getValue().getSExtValue(); } } } } } } //_os <<"\n gpu_time : " <<gpu_time<<" cpu_time : " <<cpu_time<<" instanceNum : " <<_instanceNum<<" " <<instanceNum; if (tx && ty && tz && bx && by && bz && gpu_time && cpu_time && (_instanceNum == instanceNum)) { //_os <<"\n instance num : " <<_instanceNum<<" " <<instanceNum; GPUMacro *gm = new GPUMacro(bx, by, bz, tx, ty, tz, gpu_time, cpu_time); //_os <<"\n for stmt : " <<fstmt; forStmtInstanceIdPairType forStmtInstanceId = make_pair(_instanceNum, fstmt); _forStmtGPUMacroMap.insert(forStmtGPUMacroPairType(forStmtInstanceId, gm)); break; } } return true; }