model_t GBaddCheck (model_t model) { HREassert (model != NULL, "No model"); if (!PINS_CORRECTNESS_CHECK) return model; Print1 (info, "Matrix checking layer activated."); model_t check = GBcreateBase (); check_ctx_t *ctx = RTmalloc (sizeof(check_ctx_t)); ctx->N = pins_get_state_variable_count (model); ctx->K = pins_get_group_count (model); ctx->L = pins_get_edge_label_count (model); ctx->S = pins_get_state_label_count (model); ctx->src2 = RTmalloc(sizeof(int[ctx->N])); ctx->check_must = ci_create (ctx->N); ctx->read = (ci_list **) dm_rows_to_idx_table (GBgetDMInfoRead(model)); ctx->must = (ci_list **) dm_rows_to_idx_table (GBgetDMInfoMustWrite(model)); ctx->may = GBgetDMInfoMayWrite(model); ctx->stack = isba_create (ctx->N); ctx->parent = model; ctx->magic[0] = RTmalloc(sizeof(int[ctx->N])); ctx->magic[1] = RTmalloc(sizeof(int[ctx->N])); for (int i = 0; i < ctx->N; i++) { int max = type_max (ctx, i); int min = type_min (ctx, i); int c = max - min; HREassert (c > 0, "Empty type range for slot: %d -- %s", i, str_slot(ctx, NULL, i)); ctx->magic[0][i] = min; ctx->magic[1][i] = min + 1; } ctx->reentrent = 0; GBsetContext (check, ctx); GBsetNextStateAll (check, check_all); GBsetNextStateLong (check, check_long); GBsetNextStateShort (check, check_short); //GBsetActionsLong (check, check_long); //GBsetActionsShort (check, check_short); GBinitModelDefaults (&check, model); return check; }
static void init_domain(vset_implementation_t impl) { domain = vdom_create_domain(N, impl); for (int i = 0; i < dm_ncols(GBgetDMInfo(model)); i++) { vdom_set_name(domain, i, lts_type_get_state_name(ltstype, i)); } group_next = (vrel_t*)RTmalloc(nGrps * sizeof(vrel_t)); group_explored = (vset_t*)RTmalloc(nGrps * sizeof(vset_t)); group_tmp = (vset_t*)RTmalloc(nGrps * sizeof(vset_t)); r_projs = (ci_list **)RTmalloc(sizeof(ci_list *[nGrps])); w_projs = (ci_list **)RTmalloc(sizeof(ci_list *[nGrps])); l_projs = (ci_list **)RTmalloc(sizeof(ci_list *[sLbls])); label_false = (vset_t*)RTmalloc(sLbls * sizeof(vset_t)); label_true = (vset_t*)RTmalloc(sLbls * sizeof(vset_t)); label_tmp = (vset_t*)RTmalloc(sLbls * sizeof(vset_t)); if (!vdom_separates_rw(domain) && !PINS_USE_GUARDS) { read_matrix = GBgetDMInfo(model); write_matrix = GBgetDMInfo(model); Warning(info, "Using GBgetTransitionsShort as next-state function"); transitions_short = GBgetTransitionsShort; } else if (!vdom_separates_rw(domain) && PINS_USE_GUARDS) { read_matrix = GBgetMatrix(model, GBgetMatrixID(model, LTSMIN_MATRIX_ACTIONS_READS)); write_matrix = GBgetDMInfo(model); Warning(info, "Using GBgetActionsShort as next-state function"); transitions_short = GBgetActionsShort; } else if (vdom_separates_rw(domain) && !PINS_USE_GUARDS) { read_matrix = GBgetDMInfoRead(model); write_matrix = GBgetDMInfoMayWrite(model); Warning(info, "Using GBgetTransitionsShortR2W as next-state function"); transitions_short = GBgetTransitionsShortR2W; } else { // vdom_separates_rw(domain) && PINS_USE_GUARDS read_matrix = GBgetMatrix(model, GBgetMatrixID(model, LTSMIN_MATRIX_ACTIONS_READS)); write_matrix = GBgetDMInfoMayWrite(model); Warning(info, "Using GBgetActionsShortR2W as next-state function"); transitions_short = GBgetActionsShortR2W; } if (PINS_USE_GUARDS) { if (no_soundness_check) { Warning(info, "Guard-splitting: not checking soundness of the specification, this may result in an incorrect state space!"); } else { Warning(info, "Guard-splitting: checking soundness of specification, this may be slow!"); } } r_projs = (ci_list **) dm_rows_to_idx_table (read_matrix); w_projs = (ci_list **) dm_rows_to_idx_table (write_matrix); for(int i = 0; i < nGrps; i++) { if (HREme(HREglobal())==0) { if (vdom_separates_rw(domain)) { group_next[i] = vrel_create_rw (domain, r_projs[i]->count, r_projs[i]->data, w_projs[i]->count, w_projs[i]->data); } else { group_next[i] = vrel_create (domain, r_projs[i]->count, r_projs[i]->data); } group_explored[i] = vset_create (domain, r_projs[i]->count, r_projs[i]->data); group_tmp[i] = vset_create (domain, r_projs[i]->count, r_projs[i]->data); if (inhibit_matrix != NULL) { inhibit_class_count = dm_nrows(inhibit_matrix); class_enabled = (vset_t*)RTmalloc(inhibit_class_count * sizeof(vset_t)); for(int i=0; i<inhibit_class_count; i++) { class_enabled[i] = vset_create(domain, -1, NULL); } } } } l_projs = (ci_list **) dm_rows_to_idx_table (GBgetStateLabelInfo(model)); for (int i = 0; i < sLbls; i++) { /* Indeed, we skip unused state labels, but allocate memory for pointers * (to vset_t's). Is this bad? Maybe a hashmap is worse. */ if (bitvector_is_set(&state_label_used, i)) { if (HREme(HREglobal()) == 0) { label_false[i] = vset_create(domain, l_projs[i]->count, l_projs[i]->data); label_true[i] = vset_create(domain, l_projs[i]->count, l_projs[i]->data); label_tmp[i] = vset_create(domain, l_projs[i]->count, l_projs[i]->data); } } else { label_false[i] = NULL; label_true[i] = NULL; label_tmp[i] = NULL; } } inv_set = (vset_t *) RTmalloc(sizeof(vset_t[num_inv])); for (int i = 0; i < num_inv; i++) { inv_set[i] = vset_create (domain, inv_proj[i]->count, inv_proj[i]->data); inv_info_prepare (inv_expr[i], inv_parse_env[i], i); } }