/// decide whether to drop a state bool path_searcht::drop_state(const statet &state) { goto_programt::const_targett pc=state.get_instruction(); // depth limit if(depth_limit_set && state.get_depth()>depth_limit) return true; // context bound if(context_bound_set && state.get_no_thread_interleavings()>context_bound) return true; // branch bound if(branch_bound_set && state.get_no_branches()>branch_bound) return true; // unwinding limit -- loops if(unwind_limit_set && state.get_instruction()->is_backwards_goto()) { for(const auto &loop_info : state.unwinding_map) if(loop_info.second>unwind_limit) return true; } // unwinding limit -- recursion if(unwind_limit_set && state.get_instruction()->is_function_call()) { for(const auto &rec_info : state.recursion_map) if(rec_info.second>unwind_limit) return true; } if(pc->is_assume() && simplify_expr(pc->guard, ns).is_false()) { debug() << "aborting path on assume(false) at " << pc->source_location << " thread " << state.get_current_thread(); const irep_idt &c=pc->source_location.get_comment(); if(!c.empty()) debug() << ": " << c; debug() << eom; return true; } return false; }
void shared_accesst::rw_sets( statet& state, std::vector<bool>& is_shared, std::vector<sharedt::sett>& reads, std::vector<sharedt::sett>& writes) { unsigned nr_threads=state.threads.size(); reads.clear(); reads.resize(nr_threads); writes.clear(); writes.resize(nr_threads); is_shared.clear(); is_shared.resize(nr_threads, false); if(state.atomic_section_count) { return; } sharedt shared(state); unsigned current_thread=state.get_current_thread(); for(unsigned thr=0; thr<nr_threads; ++thr) { if(state.threads[thr].active) { state.set_current_thread(thr); shared(reads[thr], writes[thr]); is_shared[thr]=!(reads[thr].empty() && writes[thr].empty()); } } state.set_current_thread(current_thread); }