void operator()(goal_ref const & g, goal_ref_buffer & result, model_converter_ref & mc, proof_converter_ref & pc, expr_dependency_ref & core) { SASSERT(g->is_well_sorted()); mc = 0; pc = 0; core = 0; tactic_report report("elim-term-ite", *g); bool produce_proofs = g->proofs_enabled(); m_rw.cfg().m_produce_models = g->models_enabled(); m_rw.m_cfg.m_num_fresh = 0; m_rw.m_cfg.m_goal = g.get(); expr_ref new_curr(m); proof_ref new_pr(m); unsigned size = g->size(); for (unsigned idx = 0; idx < size; idx++) { expr * curr = g->form(idx); m_rw(curr, new_curr, new_pr); if (produce_proofs) { proof * pr = g->pr(idx); new_pr = m.mk_modus_ponens(pr, new_pr); } g->update(idx, new_curr, new_pr, g->dep(idx)); } mc = m_rw.m_cfg.m_mc.get(); report_tactic_progress(":elim-term-ite-consts", m_rw.m_cfg.m_num_fresh); g->inc_depth(); result.push_back(g.get()); TRACE("elim_term_ite", g->display(tout););
virtual void operator()(goal_ref const & g, goal_ref_buffer & result, model_converter_ref & mc, proof_converter_ref & pc, expr_dependency_ref & core) { SASSERT(g->is_well_sorted()); ast_manager & m = g->m(); bool produce_proofs = g->proofs_enabled(); rw r(m, produce_proofs); #pragma omp critical (tactic_cancel) { m_rw = &r; } mc = 0; pc = 0; core = 0; result.reset(); tactic_report report("distribute-forall", *g); expr_ref new_curr(m); proof_ref new_pr(m); unsigned size = g->size(); for (unsigned idx = 0; idx < size; idx++) { if (g->inconsistent()) break; expr * curr = g->form(idx); r(curr, new_curr, new_pr); if (g->proofs_enabled()) { proof * pr = g->pr(idx); new_pr = m.mk_modus_ponens(pr, new_pr); } g->update(idx, new_curr, new_pr, g->dep(idx)); } g->inc_depth(); result.push_back(g.get()); TRACE("distribute-forall", g->display(tout););
void operator()(goal_ref const & g, goal_ref_buffer & result, model_converter_ref & mc, proof_converter_ref & pc, expr_dependency_ref & core) override { mc = nullptr; pc = nullptr; core = nullptr; bool produce_proofs = g->proofs_enabled(); tactic_report report("dt2bv", *g); unsigned size = g->size(); expr_fast_mark1 visited; check_fd proc(*this); for (unsigned i = 0; i < size; ++i) { quick_for_each_expr(proc, visited, g->form(i)); } obj_hashtable<sort>::iterator it = m_non_fd_sorts.begin(), end = m_non_fd_sorts.end(); for (; it != end; ++it) { m_fd_sorts.remove(*it); } if (!m_fd_sorts.empty()) { ref<extension_model_converter> ext = alloc(extension_model_converter, m); ref<filter_model_converter> filter = alloc(filter_model_converter, m); enum2bv_rewriter rw(m, m_params); rw.set_is_fd(&m_is_fd); expr_ref new_curr(m); proof_ref new_pr(m); for (unsigned idx = 0; idx < size; idx++) { rw(g->form(idx), new_curr, new_pr); if (produce_proofs) { proof * pr = g->pr(idx); new_pr = m.mk_modus_ponens(pr, new_pr); } g->update(idx, new_curr, new_pr, g->dep(idx)); } expr_ref_vector bounds(m); rw.flush_side_constraints(bounds); for (unsigned i = 0; i < bounds.size(); ++i) { g->assert_expr(bounds[i].get()); } { obj_map<func_decl, func_decl*>::iterator it = rw.enum2bv().begin(), end = rw.enum2bv().end(); for (; it != end; ++it) { filter->insert(it->m_value); } } { obj_map<func_decl, expr*>::iterator it = rw.enum2def().begin(), end = rw.enum2def().end(); for (; it != end; ++it) { ext->insert(it->m_key, it->m_value); } } mc = concat(filter.get(), ext.get()); report_tactic_progress(":fd-num-translated", rw.num_translated()); } g->inc_depth(); result.push_back(g.get()); TRACE("dt2bv", g->display(tout););
void process_current() { expr * curr = m_goal->form(m_idx); expr_ref new_curr(m); proof_ref new_pr(m); if (!m_subst->empty()) { m_r(curr, new_curr, new_pr); } else { new_curr = curr; if (m.proofs_enabled()) new_pr = m.mk_reflexivity(curr); } TRACE("shallow_context_simplifier_bug", tout << mk_ismt2_pp(curr, m) << "\n---->\n" << mk_ismt2_pp(new_curr, m) << "\n";);
model_converter * operator()(assertion_set & s, app * d) { if (d && !is_distinct(d)) d = 0; app * r = 0; unsigned sz = s.size(); for (unsigned i = 0; i < sz; i++) { expr * curr = s.form(i); if (curr == d) break; if (is_distinct(curr)) { if (!r || to_app(curr)->get_num_args() > r->get_num_args()) r = to_app(curr); } } if (d != 0) r = d; if (r == 0) return 0; sort * u = m().get_sort(to_app(r)->get_arg(0)); u2i conv(m(), u); { critical_flet<u2i*> l1(m_u2i, &conv); expr_ref new_curr(m()); for (unsigned i = 0; i < sz; i++) { expr * curr = s.form(i); if (curr == r) { unsigned num = r->get_num_args(); for (unsigned j = 0; j < num; j++) { expr * arg = r->get_arg(j); conv(arg, new_curr); expr * eq = m().mk_eq(new_curr, conv.autil().mk_numeral(rational(j), true)); s.assert_expr(eq); } new_curr = m().mk_true(); } else { conv(curr, new_curr); } s.update(i, new_curr); } } // TODO: create model converter return 0; }
void operator()(assertion_set & s, model_converter_ref & mc) { mc = 0; if (s.inconsistent()) return; { as_st_report report("elim-term-ite", s); m_rw.m_cfg.m_num_fresh = 0; m_rw.m_cfg.m_set = &s; expr_ref new_curr(m); proof_ref new_pr(m); unsigned size = s.size(); for (unsigned idx = 0; idx < size; idx++) { expr * curr = s.form(idx); m_rw(curr, new_curr, new_pr); if (m.proofs_enabled()) { proof * pr = s.pr(idx); new_pr = m.mk_modus_ponens(pr, new_pr); } s.update(idx, new_curr, new_pr); } mc = m_rw.m_cfg.m_mc.get(); } report_st_progress(":elim-term-ite-consts", m_rw.m_cfg.m_num_fresh); }
void operator()(goal_ref const & g, goal_ref_buffer & result, model_converter_ref & mc, proof_converter_ref & pc, expr_dependency_ref & core) { mc = 0; pc = 0; core = 0; if (!is_target(*g)) throw tactic_exception("bv1 blaster cannot be applied to goal"); tactic_report report("bv1-blaster", *g); m_num_steps = 0; bool proofs_enabled = g->proofs_enabled(); expr_ref new_curr(m()); proof_ref new_pr(m()); unsigned size = g->size(); for (unsigned idx = 0; idx < size; idx++) { if (g->inconsistent()) break; expr * curr = g->form(idx); m_rw(curr, new_curr, new_pr); m_num_steps += m_rw.get_num_steps(); if (proofs_enabled) { proof * pr = g->pr(idx); new_pr = m().mk_modus_ponens(pr, new_pr); } g->update(idx, new_curr, new_pr, g->dep(idx)); } if (g->models_enabled()) mc = mk_bv1_blaster_model_converter(m(), m_rw.cfg().m_const2bits); g->inc_depth(); result.push_back(g.get()); m_rw.cfg().cleanup(); }
virtual void operator()( goal_ref const & g, goal_ref_buffer & result, model_converter_ref & mc, proof_converter_ref & pc, expr_dependency_ref & core) { SASSERT(g->is_well_sorted()); mc = 0; pc = 0; core = 0; m_trail.reset(); m_fd.reset(); m_max.reset(); m_nonfd.reset(); m_bounds.reset(); ref<bvmc> mc1 = alloc(bvmc); tactic_report report("eq2bv", *g); m_bounds(*g); for (unsigned i = 0; i < g->size(); i++) { collect_fd(g->form(i)); } cleanup_fd(mc1); if (m_max.empty()) { result.push_back(g.get()); return; } for (unsigned i = 0; i < g->size(); i++) { expr_ref new_curr(m); proof_ref new_pr(m); if (is_bound(g->form(i))) { g->update(i, m.mk_true(), 0, 0); continue; } m_rw(g->form(i), new_curr, new_pr); if (m.proofs_enabled() && !new_pr) { new_pr = m.mk_rewrite(g->form(i), new_curr); new_pr = m.mk_modus_ponens(g->pr(i), new_pr); } g->update(i, new_curr, new_pr, g->dep(i)); } obj_map<expr, unsigned>::iterator it = m_max.begin(), end = m_max.end(); for (; it != end; ++it) { expr* c = it->m_key; bool strict; rational r; if (m_bounds.has_lower(c, r, strict)) { SASSERT(!strict); expr* d = m_fd.find(c); g->assert_expr(bv.mk_ule(bv.mk_numeral(r, m.get_sort(d)), d), m_bounds.lower_dep(c)); } if (m_bounds.has_upper(c, r, strict)) { SASSERT(!strict); expr* d = m_fd.find(c); g->assert_expr(bv.mk_ule(d, bv.mk_numeral(r, m.get_sort(d))), m_bounds.upper_dep(c)); } } g->inc_depth(); mc = mc1.get(); result.push_back(g.get()); TRACE("pb", g->display(tout););