static bool check_cndfs_proviso (wctx_t *ctx) { alg_local_t *loc = ctx->local; if (PINS_POR == 0 || proviso != Proviso_CNDFS) return false; // Only check proviso if the state is volatile. It may be that: // - the reduced successor set is already involatile, or // - the proviso was checked before for this state, i.e. it is backtracked for the second time // Both imply that locally this thread visited all involatile successors if (bitvector_is_set(&loc->stackbits, cur(ctx,INVOL))) return false; bool no_cycle = bitvector_is_set (&loc->stackbits, cur(ctx,NOCYCLE)); cndfs_proviso_t prov = no_cycle ? VOLATILE : INVOLATILE; int success = state_store_try_set_counters (ctx->state->ref, 2, UNKNOWN, prov); if (( success && prov == INVOLATILE) || (!success && state_store_get_wip(ctx->state->ref) == INVOLATILE)) { bitvector_set (&loc->stackbits, cur(ctx,INVOL)); loc->counters.ignoring += success != 0; return true; } return false; }
void user_bitvector_print (bitvector_t *bv) { int i; int s = bitvector_size (bv); printf ("bitvector: %d\n", s); for (i = 0; i < s; i++) printf ("%c", bitvector_is_set (bv, i) ? '1' : '0'); printf ("\n"); }
int dm_is_set (const matrix_t *m, int row, int col) { // get permutation int rowp = m->row_perm.data[row].becomes; int colp = m->col_perm.data[col].becomes; // calculate bit number int bitnr = rowp * (m->bits_per_row) + colp; return bitvector_is_set (&(m->bits), bitnr); }
void reach_sat_fix(reach_proc_t reach_proc, vset_t visited, bitvector_t *reach_groups, long *eg_count, long *next_count, long *guard_count) { (void) reach_proc; (void) guard_count; if (PINS_USE_GUARDS) Abort("guard-splitting not supported with saturation=sat-fix"); int level = 0; vset_t old_vis = vset_create(domain, -1, NULL); vset_t deadlocks = dlk_detect?vset_create(domain, -1, NULL):NULL; vset_t dlk_temp = dlk_detect?vset_create(domain, -1, NULL):NULL; LACE_ME; while (!vset_equal(visited, old_vis)) { if (trc_output != NULL) save_level(visited); vset_copy(old_vis, visited); stats_and_progress_report(NULL, visited, level); level++; for(int i = 0; i < nGrps; i++){ if (!bitvector_is_set(reach_groups, i)) continue; expand_group_next(i, visited); reach_chain_stop(); (*eg_count)++; } if (dlk_detect) vset_copy(deadlocks, visited); if (USE_PARALLELISM) vset_least_fixpoint_par(visited, visited, group_next, nGrps); else vset_least_fixpoint(visited, visited, group_next, nGrps); (*next_count)++; check_invariants(visited, level); if (dlk_detect) { for (int i = 0; i < nGrps; i++) { vset_prev(dlk_temp, visited, group_next[i],deadlocks); reduce(i, dlk_temp); vset_minus(deadlocks, dlk_temp); vset_clear(dlk_temp); } deadlock_check(deadlocks, reach_groups); } vset_reorder(domain); } vset_destroy(old_vis); if (dlk_detect) { vset_destroy(deadlocks); vset_destroy(dlk_temp); } }
/* NDFS dfs_blue */ void ndfs_blue (run_t *run, wctx_t *ctx) { alg_local_t *loc = ctx->local; transition_info_t ti = GB_NO_TRANSITION; ndfs_blue_handle (ctx, ctx->initial, &ti, 0); ctx->counters->trans = 0; //reset trans count while ( !run_is_stopped(run) ) { raw_data_t state_data = dfs_stack_top (loc->stack); if (NULL != state_data) { state_info_deserialize (ctx->state, state_data); nndfs_color_t color = nn_get_color (&loc->color_map, ctx->state->ref); if ( nn_color_eq(color, NNWHITE) ) { if (all_red) bitvector_set ( &loc->stackbits, ctx->counters->level_cur ); nn_set_color (&loc->color_map, ctx->state->ref, NNCYAN); ndfs_explore_state_blue (ctx); } else { if ( all_red && ctx->counters->level_cur != 0 && !nn_color_eq(color, NNPINK) ) bitvector_unset ( &loc->stackbits, ctx->counters->level_cur - 1); dfs_stack_pop (loc->stack); } } else { //backtrack if (0 == dfs_stack_nframes (loc->stack)) return; dfs_stack_leave (loc->stack); ctx->counters->level_cur--; state_data = dfs_stack_top (loc->stack); state_info_deserialize (loc->seed, state_data); if ( all_red && bitvector_is_set(&loc->stackbits, ctx->counters->level_cur) ) { /* exit if backtrack hits seed, leave stack the way it was */ nn_set_color (&loc->color_map, loc->seed->ref, NNPINK); loc->counters.allred++; if ( GBbuchiIsAccepting(ctx->model, state_info_state(loc->seed)) ) loc->counters.accepting++; } else if ( GBbuchiIsAccepting(ctx->model, state_info_state(loc->seed)) ) { /* call red DFS for accepting states */ ndfs_red (ctx, loc->seed->ref); nn_set_color (&loc->color_map, loc->seed->ref, NNPINK); } else { if (all_red && ctx->counters->level_cur > 0) bitvector_unset (&loc->stackbits, ctx->counters->level_cur - 1); nn_set_color (&loc->color_map, loc->seed->ref, NNBLUE); } dfs_stack_pop (loc->stack); } } }
void reach_sat(reach_proc_t reach_proc, vset_t visited, bitvector_t *reach_groups, long *eg_count, long *next_count, long *guard_count) { (void) reach_proc; (void) next_count; (void) guard_count; if (PINS_USE_GUARDS) Abort("guard-splitting not supported with saturation=sat"); if (act_detect != NULL && trc_output != NULL) Abort("Action detection with trace generation not supported"); for (int i = 0; i < nGrps; i++) { if (bitvector_is_set(reach_groups, i)) { struct expand_info *ctx = RTmalloc(sizeof(struct expand_info)); ctx->group = i; ctx->group_explored = group_explored[i]; ctx->eg_count = eg_count; vrel_set_expand(group_next[i], expand_group_next_projected, ctx); } } if (trc_output != NULL) save_level(visited); stats_and_progress_report(NULL, visited, 0); if (USE_PARALLELISM) vset_least_fixpoint_par(visited, visited, group_next, nGrps); else vset_least_fixpoint(visited, visited, group_next, nGrps); stats_and_progress_report(NULL, visited, 1); check_invariants(visited, -1); if (dlk_detect) { vset_t deadlocks = vset_create(domain, -1, NULL); vset_t dlk_temp = vset_create(domain, -1, NULL); vset_copy(deadlocks, visited); for (int i = 0; i < nGrps; i++) { vset_prev(dlk_temp, visited, group_next[i],deadlocks); reduce(i, dlk_temp); vset_minus(deadlocks, dlk_temp); vset_clear(dlk_temp); } deadlock_check(deadlocks, reach_groups); vset_destroy(deadlocks); vset_destroy(dlk_temp); } }
void dm_unset (matrix_t *m, int row, int col) { // get permutation int rowp = m->row_perm.data[row].becomes; int colp = m->col_perm.data[col].becomes; // calculate bit number int bitnr = rowp * (m->bits_per_row) + colp; // counts if (bitvector_is_set (&(m->bits), bitnr)) { m->row_perm.count[rowp]--; m->col_perm.count[colp]--; } bitvector_unset (&(m->bits), bitnr); }
/* ENDFS dfs_blue */ void endfs_blue (run_t *run, wctx_t *ctx) { HREassert (ecd, "CNDFS's correctness depends crucially on ECD"); alg_local_t *loc = ctx->local; cndfs_alg_local_t *cloc = (cndfs_alg_local_t *) ctx->local; transition_info_t ti = GB_NO_TRANSITION; uint32_t global_color; int accepting; cloc->successors = NONEC; endfs_handle_blue (ctx, ctx->initial, &ti, 0); ctx->counters->trans = 0; //reset trans count cloc->accepting_depth = 0; alg_global_t *sm = ctx->global; while ( !run_is_stopped(ctx->run) ) { raw_data_t state_data = dfs_stack_top (loc->stack); if (NULL != state_data) { state_info_deserialize (ctx->state, state_data); global_color = state_store_get_colors (ctx->state->ref); if (global_color < CBLUE && !on_stack_accepting_up(ctx, &accepting)) { if (global_color == CWHITE) update_color (ctx, ctx->state->ref, CCYAN, 0); if (all_red) bitvector_set (&loc->stackbits, cur(ctx,REDALL)); bitvector_unset (&loc->stackbits, cur(ctx,INVOL)); endfs_explore_state_blue (ctx); } else { if ( all_red && ctx->counters->level_cur != 0 && global_color != CRED ) bitvector_unset (&loc->stackbits, pred(ctx,REDALL)); dfs_stack_pop (loc->stack); } } else { //backtrack if (0 == dfs_stack_nframes(loc->stack)) break; dfs_stack_leave (loc->stack); ctx->counters->level_cur--; /* call red DFS for accepting states */ state_data = dfs_stack_top (loc->stack); state_info_deserialize (loc->seed, state_data); if (check_cndfs_proviso(ctx)) { reach_explore_all (ctx, loc->seed); continue; } accepting = pins_state_is_accepting(ctx->model, state_info_state(loc->seed)) != 0; /* Mark state GGREEN on backtrack */ update_color (ctx, loc->seed->ref, CBLUE, 1); if ( all_red && bitvector_is_set(&loc->stackbits, cur(ctx,REDALL)) ) { /* all successors are red */ //permute_trans (loc->permute, ctx->state, check, ctx); set_all_red2 (ctx, loc->seed); } else if ( accepting ) { sm->work = loc->seed->ref; endfs_red (ctx); if (Strat_ENDFS == loc->strat) endfs_handle_dangerous (ctx); else cndfs_handle_nonseed_accepting (ctx); sm->work = SIZE_MAX; } else if (all_red && ctx->counters->level_cur > 0 && state_store_get_colors (loc->seed->ref) != CRED) { /* unset the all-red flag (only for non-initial nodes) */ bitvector_unset (&loc->stackbits, pred(ctx,REDALL)); } accepting_down (ctx, loc->seed, accepting); dfs_stack_pop (loc->stack); } } HREassert (run_is_stopped(ctx->run) || fset_count(cloc->pink) == 0); // if the recursive strategy uses global bits (global pruning) // then do simple load balancing (only for the top-level strategy) if ( Strat_ENDFS == loc->strat && run == run->shared->top_level && (Strat_LTLG & sm->rec->local->strat) ) { endfs_lb (ctx); } if (global->exit_status == LTSMIN_EXIT_SUCCESS && ctx->id == 0) { Warning(info," "); Warning(info,"Empty product with LTL!"); Warning(info," "); } }
static void init_domain(vset_implementation_t impl) { domain = vdom_create_domain(N, impl); for (int i = 0; i < dm_ncols(GBgetDMInfo(model)); i++) { vdom_set_name(domain, i, lts_type_get_state_name(ltstype, i)); } group_next = (vrel_t*)RTmalloc(nGrps * sizeof(vrel_t)); group_explored = (vset_t*)RTmalloc(nGrps * sizeof(vset_t)); group_tmp = (vset_t*)RTmalloc(nGrps * sizeof(vset_t)); r_projs = (ci_list **)RTmalloc(sizeof(ci_list *[nGrps])); w_projs = (ci_list **)RTmalloc(sizeof(ci_list *[nGrps])); l_projs = (ci_list **)RTmalloc(sizeof(ci_list *[sLbls])); label_false = (vset_t*)RTmalloc(sLbls * sizeof(vset_t)); label_true = (vset_t*)RTmalloc(sLbls * sizeof(vset_t)); label_tmp = (vset_t*)RTmalloc(sLbls * sizeof(vset_t)); if (!vdom_separates_rw(domain) && !PINS_USE_GUARDS) { read_matrix = GBgetDMInfo(model); write_matrix = GBgetDMInfo(model); Warning(info, "Using GBgetTransitionsShort as next-state function"); transitions_short = GBgetTransitionsShort; } else if (!vdom_separates_rw(domain) && PINS_USE_GUARDS) { read_matrix = GBgetMatrix(model, GBgetMatrixID(model, LTSMIN_MATRIX_ACTIONS_READS)); write_matrix = GBgetDMInfo(model); Warning(info, "Using GBgetActionsShort as next-state function"); transitions_short = GBgetActionsShort; } else if (vdom_separates_rw(domain) && !PINS_USE_GUARDS) { read_matrix = GBgetDMInfoRead(model); write_matrix = GBgetDMInfoMayWrite(model); Warning(info, "Using GBgetTransitionsShortR2W as next-state function"); transitions_short = GBgetTransitionsShortR2W; } else { // vdom_separates_rw(domain) && PINS_USE_GUARDS read_matrix = GBgetMatrix(model, GBgetMatrixID(model, LTSMIN_MATRIX_ACTIONS_READS)); write_matrix = GBgetDMInfoMayWrite(model); Warning(info, "Using GBgetActionsShortR2W as next-state function"); transitions_short = GBgetActionsShortR2W; } if (PINS_USE_GUARDS) { if (no_soundness_check) { Warning(info, "Guard-splitting: not checking soundness of the specification, this may result in an incorrect state space!"); } else { Warning(info, "Guard-splitting: checking soundness of specification, this may be slow!"); } } r_projs = (ci_list **) dm_rows_to_idx_table (read_matrix); w_projs = (ci_list **) dm_rows_to_idx_table (write_matrix); for(int i = 0; i < nGrps; i++) { if (HREme(HREglobal())==0) { if (vdom_separates_rw(domain)) { group_next[i] = vrel_create_rw (domain, r_projs[i]->count, r_projs[i]->data, w_projs[i]->count, w_projs[i]->data); } else { group_next[i] = vrel_create (domain, r_projs[i]->count, r_projs[i]->data); } group_explored[i] = vset_create (domain, r_projs[i]->count, r_projs[i]->data); group_tmp[i] = vset_create (domain, r_projs[i]->count, r_projs[i]->data); if (inhibit_matrix != NULL) { inhibit_class_count = dm_nrows(inhibit_matrix); class_enabled = (vset_t*)RTmalloc(inhibit_class_count * sizeof(vset_t)); for(int i=0; i<inhibit_class_count; i++) { class_enabled[i] = vset_create(domain, -1, NULL); } } } } l_projs = (ci_list **) dm_rows_to_idx_table (GBgetStateLabelInfo(model)); for (int i = 0; i < sLbls; i++) { /* Indeed, we skip unused state labels, but allocate memory for pointers * (to vset_t's). Is this bad? Maybe a hashmap is worse. */ if (bitvector_is_set(&state_label_used, i)) { if (HREme(HREglobal()) == 0) { label_false[i] = vset_create(domain, l_projs[i]->count, l_projs[i]->data); label_true[i] = vset_create(domain, l_projs[i]->count, l_projs[i]->data); label_tmp[i] = vset_create(domain, l_projs[i]->count, l_projs[i]->data); } } else { label_false[i] = NULL; label_true[i] = NULL; label_tmp[i] = NULL; } } inv_set = (vset_t *) RTmalloc(sizeof(vset_t[num_inv])); for (int i = 0; i < num_inv; i++) { inv_set[i] = vset_create (domain, inv_proj[i]->count, inv_proj[i]->data); inv_info_prepare (inv_expr[i], inv_parse_env[i], i); } }