ExecStatus Incremental<View>::propagate(Space& home, const ModEventDelta&) { assert(!w_support.empty() || !w_remove.empty() || unassigned==0); // Set up datastructures // Bit-sets for amortized O(1) access to domains Region r(home); // Add initial supports BitSet* dom = r.alloc<BitSet>(x.size()); init_dom(home, dom); // Work loop while (!w_support.empty() || !w_remove.empty()) { while (!w_remove.empty()) { int i, n; w_remove.pop(home,i,n); // Work is still relevant if (dom[i].get(static_cast<unsigned int>(n-ts()->min))) { GECODE_ME_CHECK(x[i].nq(home,n)); dom[i].clear(static_cast<unsigned int>(n-ts()->min)); } } while (!w_support.empty()) { int i, n; w_support.pop(home,i,n); // Work is still relevant if (dom[i].get(static_cast<unsigned int>(n-ts()->min))) find_support(home, dom, i, n); } } if (unassigned != 0) return ES_FIX; return home.ES_SUBSUMED(*this); }
forceinline Incremental<View>::Incremental(Home home, ViewArray<View>& x, const TupleSet& t) : Base<View,false>(home,x,t), support_data(NULL), unassigned(x.size()), ac(home) { init_support(home); // Post advisors for (int i = x.size(); i--; ) if (x[i].assigned()) { --unassigned; } else { x[i].subscribe(home,*new (home) SupportAdvisor(home,*this,ac,i)); } Region r(home); // Add initial supports BitSet* dom = r.alloc<BitSet>(x.size()); init_dom(home, dom); for (int i = x.size(); i--; ) for (ViewValues<View> vv(x[i]); vv(); ++vv) find_support(home, dom, i, vv.val()); // Work to be done or subsumption if (!w_support.empty() || !w_remove.empty() || (unassigned == 0)) View::schedule(home,*this, (unassigned != x.size()) ? ME_INT_VAL : ME_INT_DOM); }
ExecStatus Basic<View,shared>::propagate(Space& home, const ModEventDelta&) { // Set up datastructures // Bit-sets for amortized O(1) access to domains Region r(home); BitSet* dom = r.alloc<BitSet>(x.size()); init_dom(home, dom); // Bit-sets for processed values. BitSet* has_support = r.alloc<BitSet>(x.size()); for (int i = x.size(); i--; ) has_support[i].init(home, ts()->domsize); // Values to prune Support::StaticStack<int,Region> nq(r,static_cast<int>(ts()->domsize)); // Run algorithm // Check consistency for each view-value pair for (int i = x.size(); i--; ) { for (ViewValues<View> vv(x[i]); vv(); ++vv) { // Value offset for indexing int val = vv.val() - ts()->min; if (!has_support[i].get(static_cast<unsigned int>(val))) { // Find support for value vv.val() in view Tuple l = find_support(dom, i, val); if (l == NULL) { // No possible supports left nq.push(vv.val()); } else { // Mark values as supported // Only forward direction marking is needed since all // previous values have been checked for (int j = i; j--; ) { has_support[j].set(static_cast<unsigned int>(l[j]- ts()->min)); assert(has_support[j].get(l[j] - ts()->min)); } } } } // Prune values for x[i] which do not have support anymore while (!nq.empty()) GECODE_ME_CHECK(x[i].nq(home,nq.pop())); } for (int i = x.size(); i--; ) if (!x[i].assigned()) return shared ? ES_NOFIX : ES_FIX; return home.ES_SUBSUMED(*this); }
// Determine if a sequent of this form: // // A1, A2, ..., An |- C // // denotes a valid proof. // // The proof is valid if any Ai proves C. The proof is invalid when // no Ai proves C. The proof is incomplete when it is invalid by // some Ai is non-atomic. // // TODO: I wonder if there's an opportunity to quickly reject a // proof with non-reduced antecedents. That would avoid multiple // (potentially) exponential invocations of derive(), but it would // also likely lead to more aggressive creation of goals. Validation check_term(Proof& p, Prop_list& ants, Cons const& c) { // If antecedent set (syntacically) contains C, then the // proof is valid. if (ants.contains(c)) return valid_proof; // FIXME: Memoization? // Actually derive a proof of C from AS. If the result // is invalid, by the thre are incomplete terms, then // the result is incomplete. Validation v = find_support(p, ants, c); if (v == invalid_proof) { if (!is_reduced(ants)) return incomplete_proof; } return v; }