//Register exact MD for each global variable. //Note you should call this function as early as possible, e.g, before process //all regions. Because that will assign smaller MD id to global variable. void RegionMgr::registerGlobalMD() { //Only top region can do initialize MD for global variable. ASSERT0(m_var_mgr); VarVec * varvec = m_var_mgr->get_var_vec(); for (INT i = 0; i <= varvec->get_last_idx(); i++) { VAR * v = varvec->get(i); if (v == NULL || VAR_is_local(v)) { continue; } ASSERT0(VAR_is_global(v)); //User sometime intentionally declare non-allocable //global variable to custmized usage. //ASSERT0(VAR_allocable(v)); if (v->is_string() && genDedicateStrMD() != NULL) { continue; } //We allocate MDTab for VAR which is func-decl or fake as well. //Since some Passes such as AA may need fake VAR to do analysis. MD md; MD_base(&md) = v; MD_ofst(&md) = 0; MD_size(&md) = v->getByteSize(get_type_mgr()); if (VAR_is_fake(v) || VAR_is_func_decl(v)) { MD_ty(&md) = MD_UNBOUND; } else { MD_ty(&md) = MD_EXACT; } m_md_sys->registerMD(md); } }
Index *IncLit::index(Grounder *, Formula *, VarSet &bound) { VarSet vars; VarVec bind; var_->vars(vars); std::set_difference(vars.begin(), vars.end(), bound.begin(), bound.end(), std::back_insert_iterator<VarVec>(bind)); bound.insert(bind.begin(), bind.end()); return new IncIndex(this); }
PredIndex::PredIndex(Domain *dom, const TermPtrVec &terms, const VarVec &index, const VarVec &bind) : map_(0, ValCmp(&indexVec_, index.size()), ValCmp(&indexVec_, index.size())) , dom_(dom) , terms_(terms) , index_(index) , bind_(bind) , indexVec_(index.size()) , finished_(0) , lastExtend_(0) { }
// searches a new source for the atom node head. // If a new source is found the function returns true. // Otherwise the function returns false and unfounded_ contains head // as well as atoms with no source that circularly depend on head. bool DefaultUnfoundedCheck::findSource(NodeId head) { assert(unfounded_.empty()); enqueueUnfounded(head); // unfounded, unless we find a new source VarVec noSourceYet; bool changed = false; const NodeId* bodyIt, *bodyEnd; while (!unfounded_.empty()) { head = unfounded_.front(); if (!atoms_[head].hasSource()) { // no source unfounded_.pop_front(); // note: current atom is still marked AtomNodeP headNode(graph_->getAtom(head)); for (bodyIt = headNode.node->bodies(), bodyEnd = headNode.node->bodies_end(); bodyIt != bodyEnd; ++bodyIt) { BodyNodeP bodyNode(graph_->getBody(*bodyIt)); if (!solver_->isFalse(bodyNode.node->lit)) { if (bodyNode.node->scc != headNode.node->scc || isValidSource(bodyNode)) { atoms_[head].ufs = 0; // found a new source, setSource(headNode, bodyNode); // set the new source propagateSource(); // and propagate it forward changed = true; // may source atoms in noSourceYet! break; } else { addUnsourced(bodyNode); } } } if (!atoms_[head].hasSource()) { noSourceYet.push_back(head);// no source found } } else { // head has a source dequeueUnfounded(); } } // while unfounded_.emtpy() == false unfounded_.clear(); if (changed) { // remove all atoms that have a source as they are not unfounded VarVec::iterator it; for (it = noSourceYet.begin(); it != noSourceYet.end(); ++it) { if ( atoms_[*it].hasSource() ) { atoms_[*it].ufs = 0; } else { unfounded_.push_back(*it); } } } else { // all atoms in noSourceYet are unfounded! noSourceYet.swap(unfounded_.vec_); } return unfounded_.empty(); }
void BlockDataRegion::LazyInitializeReferencedVars() { if (ReferencedVars) return; AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext(); AnalysisDeclContext::referenced_decls_iterator I, E; llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl()); if (I == E) { ReferencedVars = (void*) 0x1; return; } MemRegionManager &MemMgr = *getMemRegionManager(); llvm::BumpPtrAllocator &A = MemMgr.getAllocator(); BumpVectorContext BC(A); typedef BumpVector<const MemRegion*> VarVec; VarVec *BV = (VarVec*) A.Allocate<VarVec>(); new (BV) VarVec(BC, E - I); VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>(); new (BVOriginal) VarVec(BC, E - I); for ( ; I != E; ++I) { const VarDecl *VD = *I; const VarRegion *VR = 0; const VarRegion *OriginalVR = 0; if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) { VR = MemMgr.getVarRegion(VD, this); OriginalVR = MemMgr.getVarRegion(VD, LC); } else { if (LC) { VR = MemMgr.getVarRegion(VD, LC); OriginalVR = VR; } else { VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion()); OriginalVR = MemMgr.getVarRegion(VD, LC); } } assert(VR); assert(OriginalVR); BV->push_back(VR, BC); BVOriginal->push_back(OriginalVR, BC); } ReferencedVars = BV; OriginalVars = BVOriginal; }
void SymmetricSolver::circulantMul(const BlockCMat& M, mvec& v, unsigned int nPhi) { assert(!(v.size()%nPhi)); assert(M.nCols()*nPhi == v.size()); // stuff vector into vector-of-vectors for circulant blocks VarVec<mvec> vv; for(unsigned int i=0; i<v.size()/nPhi; i++) vv.push_back(mvec(&v[i*nPhi], &v[i*nPhi]+nPhi)); vv = M.lMultiply<mvec,mvec>(vv); // pull data back out v.getData().resize(M.nRows()*nPhi); for(unsigned int i=0; i<M.nRows(); i++) for(unsigned int j=0; j<nPhi; j++) v[i*nPhi+j] = vv[i][j]; }
void ParityAggrLit::index(Grounder *g, Groundable *gr, VarSet &bound) { (void)g; if(assign_) { VarSet vars; VarVec bind; lower_->vars(vars); std::set_difference(vars.begin(), vars.end(), bound.begin(), bound.end(), std::back_insert_iterator<VarVec>(bind)); if(bind.size() > 0) { bound.insert(bind.begin(), bind.end()); return; } } gr->instantiator()->append(new MatchIndex(this)); }
void BlockDataRegion::LazyInitializeReferencedVars() { if (ReferencedVars) return; AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext(); const auto &ReferencedBlockVars = AC->getReferencedBlockVars(BC->getDecl()); auto NumBlockVars = std::distance(ReferencedBlockVars.begin(), ReferencedBlockVars.end()); if (NumBlockVars == 0) { ReferencedVars = (void*) 0x1; return; } MemRegionManager &MemMgr = *getMemRegionManager(); llvm::BumpPtrAllocator &A = MemMgr.getAllocator(); BumpVectorContext BC(A); typedef BumpVector<const MemRegion*> VarVec; VarVec *BV = A.Allocate<VarVec>(); new (BV) VarVec(BC, NumBlockVars); VarVec *BVOriginal = A.Allocate<VarVec>(); new (BVOriginal) VarVec(BC, NumBlockVars); for (const VarDecl *VD : ReferencedBlockVars) { const VarRegion *VR = nullptr; const VarRegion *OriginalVR = nullptr; std::tie(VR, OriginalVR) = getCaptureRegions(VD); assert(VR); assert(OriginalVR); BV->push_back(VR, BC); BVOriginal->push_back(OriginalVR, BC); } ReferencedVars = BV; OriginalVars = BVOriginal; }
void BlockDataRegion::LazyInitializeReferencedVars() { if (ReferencedVars) return; AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext(); AnalysisDeclContext::referenced_decls_iterator I, E; llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl()); if (I == E) { ReferencedVars = (void*) 0x1; return; } MemRegionManager &MemMgr = *getMemRegionManager(); llvm::BumpPtrAllocator &A = MemMgr.getAllocator(); BumpVectorContext BC(A); typedef BumpVector<const MemRegion*> VarVec; VarVec *BV = (VarVec*) A.Allocate<VarVec>(); new (BV) VarVec(BC, E - I); VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>(); new (BVOriginal) VarVec(BC, E - I); for ( ; I != E; ++I) { const VarRegion *VR = 0; const VarRegion *OriginalVR = 0; llvm::tie(VR, OriginalVR) = getCaptureRegions(*I); assert(VR); assert(OriginalVR); BV->push_back(VR, BC); BVOriginal->push_back(OriginalVR, BC); } ReferencedVars = BV; OriginalVars = BVOriginal; }