void goto_symext::claim(const expr2tc &claim_expr, const std::string &msg) { if (unwinding_recursion_assumption) return ; // Can happen when evaluating certain special intrinsics. Gulp. if (cur_state->guard.is_false()) return; total_claims++; expr2tc new_expr = claim_expr; cur_state->rename(new_expr); // first try simplifier on it do_simplify(new_expr); if (is_true(new_expr)) return; cur_state->guard.guard_expr(new_expr); cur_state->global_guard.guard_expr(new_expr); remaining_claims++; target->assertion(cur_state->guard.as_expr(), new_expr, msg, cur_state->gen_stack_trace(), cur_state->source); }
void goto_symext::symex_assume(statet &state, const exprt &cond) { exprt simplified_cond=cond; do_simplify(simplified_cond); if(simplified_cond.is_true()) return; if(state.threads.size()==1) { exprt tmp=simplified_cond; state.guard.guard_expr(tmp); target.assumption(state.guard.as_expr(), tmp, state.source); } // symex_target_equationt::convert_assertions would fail to // consider assumptions of threads that have a thread-id above that // of the thread containing the assertion: // T0 T1 // x=0; assume(x==1); // assert(x!=42); x=42; else state.guard.add(simplified_cond); if(state.atomic_section_id!=0 && state.guard.is_false()) symex_atomic_end(state); }
void goto_symext::vcc( const exprt &vcc_expr, const std::string &msg, statet &state) { total_vccs++; exprt expr=vcc_expr; // we are willing to re-write some quantified expressions rewrite_quantifiers(expr, state); // now rename, enables propagation state.rename(expr, ns); // now try simplifier on it do_simplify(expr); if(expr.is_true()) return; state.guard.guard_expr(expr); remaining_vccs++; target.assertion(state.guard.as_expr(), expr, msg, state.source); }
void goto_symext::symex_atomic_end(statet &state) { if(state.guard.is_false()) return; if(state.atomic_section_id==0) throw "ATOMIC_END unmatched"; // NOLINT(readability/throw) const unsigned atomic_section_id=state.atomic_section_id; state.atomic_section_id=0; for(goto_symex_statet::read_in_atomic_sectiont::const_iterator r_it=state.read_in_atomic_section.begin(); r_it!=state.read_in_atomic_section.end(); ++r_it) { ssa_exprt r=r_it->first; r.set_level_2(r_it->second.first); // guard is the disjunction over reads assert(!r_it->second.second.empty()); guardt read_guard(r_it->second.second.front()); for(std::list<guardt>::const_iterator it=++(r_it->second.second.begin()); it!=r_it->second.second.end(); ++it) read_guard|=*it; exprt read_guard_expr=read_guard.as_expr(); do_simplify(read_guard_expr); target.shared_read( read_guard_expr, r, atomic_section_id, state.source); } for(goto_symex_statet::written_in_atomic_sectiont::const_iterator w_it=state.written_in_atomic_section.begin(); w_it!=state.written_in_atomic_section.end(); ++w_it) { ssa_exprt w=w_it->first; w.set_level_2(state.level2.current_count(w.get_identifier())); // guard is the disjunction over writes assert(!w_it->second.empty()); guardt write_guard(w_it->second.front()); for(std::list<guardt>::const_iterator it=++(w_it->second.begin()); it!=w_it->second.end(); ++it) write_guard|=*it; exprt write_guard_expr=write_guard.as_expr(); do_simplify(write_guard_expr); target.shared_write( write_guard_expr, w, atomic_section_id, state.source); } target.atomic_end( state.guard.as_expr(), atomic_section_id, state.source); }
{ assert(is_code_printf2t(rhs)); code_printf2tc new_rhs(to_code_printf2t(rhs)); cur_state->rename(new_rhs); // The expr2tc in position 0 is the string format const irep_idt fmt = get_string_argument(new_rhs->operands[0]); // Now we pop the format new_rhs->operands.erase(new_rhs->operands.begin()); std::list<expr2tc> args; new_rhs->foreach_operand([this, &args](const expr2tc &e) { expr2tc tmp = e; do_simplify(tmp); args.push_back(tmp); }); target->output( cur_state->guard.as_expr(), cur_state->source, fmt.as_string(), args); } void goto_symext::symex_cpp_new(const expr2tc &lhs, const sideeffect2t &code) { bool do_array; do_array = (code.kind == sideeffect2t::cpp_new_arr); unsigned int &dynamic_counter = get_dynamic_counter(); dynamic_counter++;
void goto_symext::symex_step(reachability_treet & art) { assert(!cur_state->call_stack.empty()); const goto_programt::instructiont &instruction = *cur_state->source.pc; // depth exceeded? { if (depth_limit != 0 && cur_state->depth > depth_limit) cur_state->guard.add(false_expr); cur_state->depth++; } // actually do instruction switch (instruction.type) { case SKIP: case LOCATION: // really ignore cur_state->source.pc++; break; case END_FUNCTION: symex_end_of_function(); // Potentially skip to run another function ptr target; if not, // continue if (!run_next_function_ptr_target(false)) cur_state->source.pc++; break; case GOTO: { expr2tc tmp(instruction.guard); replace_nondet(tmp); dereference(tmp, false); replace_dynamic_allocation(tmp); symex_goto(tmp); } break; case ASSUME: if (!cur_state->guard.is_false()) { expr2tc tmp = instruction.guard; replace_nondet(tmp); dereference(tmp, false); replace_dynamic_allocation(tmp); cur_state->rename(tmp); do_simplify(tmp); if (!is_true(tmp)) { expr2tc tmp2 = tmp; expr2tc tmp3 = tmp2; cur_state->guard.guard_expr(tmp2); assume(tmp2); // we also add it to the state guard cur_state->guard.add(tmp3); } } cur_state->source.pc++; break; case ASSERT: if (!cur_state->guard.is_false()) { if (!no_assertions || !cur_state->source.pc->location.user_provided() || deadlock_check) { std::string msg = cur_state->source.pc->location.comment().as_string(); if (msg == "") msg = "assertion"; expr2tc tmp = instruction.guard; replace_nondet(tmp); dereference(tmp, false); replace_dynamic_allocation(tmp); claim(tmp, msg); } } cur_state->source.pc++; break; case RETURN: if (!cur_state->guard.is_false()) { expr2tc thecode = instruction.code, assign; if (make_return_assignment(assign, thecode)) { goto_symext::symex_assign(assign); } symex_return(); } cur_state->source.pc++; break; case ASSIGN: if (!cur_state->guard.is_false()) { code_assign2tc deref_code = instruction.code; // XXX jmorse -- this is not fully symbolic. if (thrown_obj_map.find(cur_state->source.pc) != thrown_obj_map.end()) { symbol2tc thrown_obj = thrown_obj_map[cur_state->source.pc]; if (is_pointer_type(deref_code.get()->target.get()->type) && !is_pointer_type(thrown_obj.get()->type)) { expr2tc new_thrown_obj(new address_of2t(thrown_obj.get()->type, thrown_obj)); deref_code.get()->source = new_thrown_obj; } else deref_code.get()->source = thrown_obj; thrown_obj_map.erase(cur_state->source.pc); } replace_nondet(deref_code); code_assign2t &assign = to_code_assign2t(deref_code); dereference(assign.target, true); dereference(assign.source, false); replace_dynamic_allocation(deref_code); symex_assign(deref_code); } cur_state->source.pc++; break; case FUNCTION_CALL: { expr2tc deref_code = instruction.code; replace_nondet(deref_code); code_function_call2t &call = to_code_function_call2t(deref_code); if (!is_nil_expr(call.ret)) { dereference(call.ret, true); } replace_dynamic_allocation(deref_code); for (std::vector<expr2tc>::iterator it = call.operands.begin(); it != call.operands.end(); it++) if (!is_nil_expr(*it)) dereference(*it, false); // Always run intrinsics, whether guard is false or not. This is due to the // unfortunate circumstance where a thread starts with false guard due to // decision taken in another thread in this trace. In that case the // terminate intrinsic _has_ to run, or we explode. if (is_symbol2t(call.function)) { const irep_idt &id = to_symbol2t(call.function).thename; if (has_prefix(id.as_string(), "c::__ESBMC")) { cur_state->source.pc++; std::string name = id.as_string().substr(3); run_intrinsic(call, art, name); return; } else if (has_prefix(id.as_string(), "cpp::__ESBMC")) { cur_state->source.pc++; std::string name = id.as_string().substr(5); name = name.substr(0, name.find("(")); run_intrinsic(call, art, name); return; } } // Don't run a function call if the guard is false. if (!cur_state->guard.is_false()) { symex_function_call(deref_code); } else { cur_state->source.pc++; } } break; case OTHER: if (!cur_state->guard.is_false()) { symex_other(); } cur_state->source.pc++; break; case CATCH: symex_catch(); break; case THROW: if (!cur_state->guard.is_false()) { if(symex_throw()) cur_state->source.pc++; } else { cur_state->source.pc++; } break; case THROW_DECL: symex_throw_decl(); cur_state->source.pc++; break; case THROW_DECL_END: // When we reach THROW_DECL_END, we must clear any throw_decl if(stack_catch.size()) { // Get to the correct try (always the last one) goto_symex_statet::exceptiont* except=&stack_catch.top(); except->has_throw_decl=false; except->throw_list_set.clear(); } cur_state->source.pc++; break; default: std::cerr << "GOTO instruction type " << instruction.type; std::cerr << " not handled in goto_symext::symex_step" << std::endl; abort(); } }
exprt goto_symext::address_arithmetic( const exprt &expr, statet &state, guardt &guard, bool keep_array) { exprt result; if(expr.id()==ID_byte_extract_little_endian || expr.id()==ID_byte_extract_big_endian) { // address_of(byte_extract(op, offset, t)) is // address_of(op) + offset with adjustments for arrays const byte_extract_exprt &be=to_byte_extract_expr(expr); // recursive call result=address_arithmetic(be.op(), state, guard, keep_array); if(ns.follow(be.op().type()).id()==ID_array && result.id()==ID_address_of) { address_of_exprt &a=to_address_of_expr(result); // turn &a of type T[i][j] into &(a[0][0]) for(const typet *t=&(ns.follow(a.type().subtype())); t->id()==ID_array && !base_type_eq(expr.type(), *t, ns); t=&(ns.follow(*t).subtype())) a.object()=index_exprt(a.object(), from_integer(0, index_type())); } // do (expr.type() *)(((char *)op)+offset) result=typecast_exprt(result, pointer_typet(char_type())); // there could be further dereferencing in the offset exprt offset=be.offset(); dereference_rec(offset, state, guard, false); result=plus_exprt(result, offset); // treat &array as &array[0] const typet &expr_type=ns.follow(expr.type()); pointer_typet dest_type; if(expr_type.id()==ID_array && !keep_array) dest_type.subtype()=expr_type.subtype(); else dest_type.subtype()=expr_type; result=typecast_exprt(result, dest_type); } else if(expr.id()==ID_index || expr.id()==ID_member) { object_descriptor_exprt ode; ode.build(expr, ns); byte_extract_exprt be(byte_extract_id()); be.type()=expr.type(); be.op()=ode.root_object(); be.offset()=ode.offset(); // recursive call result=address_arithmetic(be, state, guard, keep_array); do_simplify(result); } else if(expr.id()==ID_dereference) { // ANSI-C guarantees &*p == p no matter what p is, // even if it's complete garbage // just grab the pointer, but be wary of further dereferencing // in the pointer itself result=to_dereference_expr(expr).pointer(); dereference_rec(result, state, guard, false); } else if(expr.id()==ID_if) { if_exprt if_expr=to_if_expr(expr); // the condition is not an address dereference_rec(if_expr.cond(), state, guard, false); // recursive call if_expr.true_case()= address_arithmetic(if_expr.true_case(), state, guard, keep_array); if_expr.false_case()= address_arithmetic(if_expr.false_case(), state, guard, keep_array); result=if_expr; } else if(expr.id()==ID_symbol || expr.id()==ID_string_constant || expr.id()==ID_label || expr.id()==ID_array) { // give up, just dereference result=expr; dereference_rec(result, state, guard, false); // turn &array into &array[0] if(ns.follow(result.type()).id()==ID_array && !keep_array) result=index_exprt(result, from_integer(0, index_type())); // handle field-sensitive SSA symbol mp_integer offset=0; if(expr.id()==ID_symbol && expr.get_bool(ID_C_SSA_symbol)) { offset=compute_pointer_offset(expr, ns); assert(offset>=0); } if(offset>0) { byte_extract_exprt be(byte_extract_id()); be.type()=expr.type(); be.op()=to_ssa_expr(expr).get_l1_object(); be.offset()=from_integer(offset, index_type()); result=address_arithmetic(be, state, guard, keep_array); do_simplify(result); } else result=address_of_exprt(result); } else throw "goto_symext::address_arithmetic does not handle "+expr.id_string(); const typet &expr_type=ns.follow(expr.type()); assert((expr_type.id()==ID_array && !keep_array) || base_type_eq(pointer_typet(expr_type), result.type(), ns)); return result; }