static int type_max (check_ctx_t *ctx, int idx) { model_t model = ctx->parent; lts_type_t ltstype = GBgetLTStype (model); int typeno = lts_type_get_state_typeno (ltstype, idx); int c; switch (lts_type_get_format (ltstype, typeno)) { case LTStypeDirect: GBgetInitialState (model, ctx->src2); c = ctx->src2[idx]; return c == 0 ? 1 : c; case LTStypeRange: return lts_type_get_min (ltstype, typeno); case LTStypeEnum: c = pins_chunk_count (model, typeno); HREassert (c > 0, "Empty enum table for slot: %d -- %s", idx, str_slot(ctx, NULL, idx)); return c; case LTStypeChunk: c = pins_chunk_count (model, typeno); return c == 0 ? 1 : c; case LTStypeBool: return 1; case LTStypeTrilean: return 2; case LTStypeSInt32: return (1ULL<<31) - 1; default: { HREassert(false); return -1; } } }
static inline bool internal_delete (fset_t *dbs, mem_hash_t *m, void *key, void **data) { size_t ref; size_t k = dbs->key_length; size_t tomb = NONE; mem_hash_t mem = m == NULL ? EMPTY : *m; int found = fset_locate (dbs, &mem, key, &ref, &tomb); HREassert (found != FSET_FULL); if (found == 0) return false; wipe_chain (dbs, ref); *data = bucket(dbs, dbs->data, ref) + k; if (dbs->size != dbs->init_size && dbs->load < dbs->size >> 3) { memcpy (dbs->delled_data, *data, dbs->data_length); *data = dbs->delled_data; bool res = resize (dbs, SHRINK); // <12.5% keys ==> shrink HREassert (res, "Cannot shrink table?"); } else if (dbs->tombs << 1 > dbs->size) { memcpy (dbs->delled_data, *data, dbs->data_length); *data = dbs->delled_data; bool res = resize (dbs, REHASH); // >50% tombs ==> rehash HREassert (res, "Cannot rehash table?"); } return true; }
void cndfs_shared_init (run_t *run) { HREassert (GRED.g == 0); HREassert (GGREEN.g == 1); HREassert (GDANGEROUS.g == 2); set_alg_local_init (run->alg, cndfs_local_init); set_alg_global_init (run->alg, cndfs_global_init); set_alg_global_deinit (run->alg, cndfs_global_deinit); set_alg_local_deinit (run->alg, cndfs_local_deinit); set_alg_print_stats (run->alg, cndfs_print_stats); set_alg_run (run->alg, endfs_blue); set_alg_state_seen (run->alg, cndfs_state_seen); set_alg_reduce (run->alg, cndfs_reduce); if (run->shared != NULL) return; run->shared = RTmallocZero (sizeof(alg_shared_t)); run->shared->color_bit_shift = 0; run->shared->top_level = run; run->shared->run_is_stopped = run_get_is_stopped (run); run->shared->run_stop = run_get_stop (run); run_set_is_stopped (run, cndfs_is_stopped); run_set_stop (run, cndfs_stop); int i = 1; run_t *previous = run; run_t *next = NULL; while (strategy[i] != Strat_None) { next = run_create (false); next->shared = RTmallocZero (sizeof(alg_shared_t)); next->shared->previous = previous; next->shared->top_level = run; next->shared->rec = NULL; run_set_is_stopped (next, cndfs_is_stopped); run_set_stop (next, cndfs_stop); next->shared->color_bit_shift = previous->shared->color_bit_shift + num_global_bits (strategy[i]); alg_shared_init_strategy (next, strategy[i]); previous->shared->rec = next; previous = next; i++; } }
static void ltsmin_expr_lookup_value(ltsmin_expr_t top, ltsmin_expr_t e, int typeno, ltsmin_parse_env_t env, model_t model) { switch(e->node_type) { case VAR: case CHUNK: case INT: break; default: return; } chunk c; data_format_t format = lts_type_get_format(GBgetLTStype(model), typeno); switch (format) { case LTStypeDirect: case LTStypeRange: if (INT != e->node_type) Abort ("Expected an integer value for comparison: %s", LTSminPrintExpr(top, env)); break; case LTStypeEnum: case LTStypeChunk: c.data = env->buffer; c.len = LTSminSPrintExpr(c.data, e, env); HREassert (c.len < ENV_BUFFER_SIZE, "Buffer overflow in print expression"); lookup_type_value (e, typeno, c, model, format==LTStypeEnum); Debug ("Bound '%s' to %d in table for type '%s'", c.data, e->num, lts_type_get_state_type(GBgetLTStype(model),typeno)); break; } }
/** * returns: * - CLAIM_FIRST : if we initialized the state * - CLAIM_FOUND : if the state is LIVE and we have visited its SCC before * - CLAIM_SUCCESS : if the state is LIVE and we have not yet visited its SCC * - CLAIM_DEAD : if the state is part of a completed SCC */ char uf_make_claim (const uf_t *uf, ref_t state, size_t worker) { HREassert (worker < WORKER_BITS); sz_w w_id = 1ULL << worker; ref_t f = uf_find (uf, state); sz_w orig_pset; // is the state dead? if (atomic_read (&uf->array[f].uf_status) == UF_DEAD) return CLAIM_DEAD; // did we previously explore a state in this SCC? if ( (atomic_read (&uf->array[f].p_set) & w_id ) != 0) { return CLAIM_FOUND; // NB: cycle is possibly missed (in case f got updated) // - however, next iteration should detect this } // Add our worker ID to the set, and ensure it is the UF representative orig_pset = fetch_or (&uf->array[f].p_set, w_id); while ( atomic_read (&uf->array[f].parent) != 0 ) { f = uf_find (uf, f); fetch_or (&uf->array[f].p_set, w_id); } if (orig_pset == 0ULL) return CLAIM_FIRST; else return CLAIM_SUCCESS; }
void cndfs_local_setup (run_t *run, wctx_t *ctx) { cndfs_alg_local_t *cloc = (cndfs_alg_local_t *) ctx->local; cloc->timer = RTcreateTimer (); ndfs_local_setup (run, ctx); size_t len = state_info_serialize_int_size (ctx->state); cloc->in_stack = dfs_stack_create (len); cloc->out_stack = dfs_stack_create (len); if ((get_strategy(run->alg) & Strat_TA) == 0) { cloc->pink = fset_create (sizeof(ref_t), sizeof(size_t), FSET_MIN_SIZE, 24); } if (get_strategy(run->alg) & Strat_CNDFS) return; if (run->shared->rec == NULL) { Abort ("Missing recursive strategy for %s!", key_search(strategies, get_strategy(run->alg))); return; } HREassert (ctx->global != NULL, "Run global before local init"); // We also need to finalize the worker initialization: ctx->global->rec = run_init (run->shared->rec, ctx->model); // Recursive strategy maybe unaware of its caller, so here we update its // recursive bits (top-level strategy always has rec_bits == 0, which // is ensured by ndfs_local_setup): ctx->global->rec->local->rec_bits = run->shared->color_bit_shift; cloc->rec = ctx->global->rec->local; }
static void len_resize(void*arg,void*old_array,int old_size,void*new_array,int new_size){ DEBUG("extend during len resize from %d to %d",old_size,new_size); (void)old_array; (void)new_array; string_index_t si=(string_index_t)arg; expand_free_list(si,old_size,new_size); if ((si->mask*FILL_OUTOF)<(si->count*FILL_MAX)){ int i,current,next,N; uint32_t hash; uint32_t len; int bucket; N=si->mask+1; DEBUG("resizing table from %d to %d",N,N+N); si->mask=(si->mask<<1)+1; si->table=(int*)RTrealloc(si->table,(si->mask+1)*sizeof(int)); for(i=0;i<N;i++){ current=si->table[i]; si->table[i]=END_OF_LIST; si->table[N+i]=END_OF_LIST; while(current!=END_OF_LIST){ next=si->next[current]; len=si->len[current]; hash=SuperFastHash(si->data[current],len,0); bucket=hash&si->mask; HREassert(bucket==i||bucket==N+i,"error"); si->next[current]=si->table[bucket]; si->table[bucket]=current; DEBUG("moving %s from %d to %d",si->data[current],i,bucket); current=next; } } } }
void mark_predicate (model_t m, ltsmin_expr_t e, int *dep, ltsmin_parse_env_t env) { if (!e) return; switch(e->node_type) { case BINARY_OP: mark_predicate(m,e->arg1,dep,env); mark_predicate(m,e->arg2,dep,env); break; case UNARY_OP: mark_predicate(m,e->arg1,dep,env); break; default: switch(e->token) { case PRED_TRUE: case PRED_FALSE: case PRED_NUM: case PRED_VAR: case PRED_CHUNK: break; case PRED_EQ: mark_predicate(m,e->arg1, dep,env); mark_predicate(m,e->arg2, dep,env); break; case PRED_SVAR: { lts_type_t ltstype = GBgetLTStype(m); int N = lts_type_get_state_length (ltstype); if (e->idx < N) { // state variable dep[e->idx] = 1; } else { // state label HREassert (e->idx < N + lts_type_get_state_label_count(ltstype)); matrix_t *sl = GBgetStateLabelInfo (m); HREassert (N == dm_ncols(sl)); for (int i = 0; i < N; i++) { if (dm_is_set(sl, e->idx - N, i)) dep[i] = 1; } } break; } default: LTSminLogExpr (error, "Unhandled predicate expression: ", e, env); HREabort (LTSMIN_EXIT_FAILURE); } break; } }
static inline void accepting_down (wctx_t* ctx, state_info_t *state, int accepting) { alg_local_t *loc = ctx->local; cndfs_alg_local_t *cloc = (cndfs_alg_local_t *) ctx->local; size_t *depth = NULL; int success = fset_delete_get_data (cloc->pink, NULL, &state->ref, (void**)&depth); Debug ("Delled state %zu %s with depth %zu.\t\tCurrent accepting depth: %zu", state->ref, (accepting ? "(accepting)" : ""), *depth, cloc->accepting_depth); HREassert (success, "Not cyan: %zu??", loc->seed->ref); HREassert (accepting == (*depth != cloc->accepting_depth)); HREassert (!accepting || *depth == cloc->accepting_depth - 1, "Wrong level: %zu, depth=%zu, accepting depth=%zu", loc->seed->ref, *depth, cloc->accepting_depth); cloc->accepting_depth -= accepting; (void) loc; }
void SIputCAt(string_index_t si,const char*str,int len,int pos){ int idx; idx=SIlookupC(si,str,len); if (idx==pos) return; HREassert (idx==SI_INDEX_FAILED, "Cannot put %s at %d: already at %d",str,pos,idx); PutEntry(si,str,len,pos); }
model_t GBaddCheck (model_t model) { HREassert (model != NULL, "No model"); if (!PINS_CORRECTNESS_CHECK) return model; Print1 (info, "Matrix checking layer activated."); model_t check = GBcreateBase (); check_ctx_t *ctx = RTmalloc (sizeof(check_ctx_t)); ctx->N = pins_get_state_variable_count (model); ctx->K = pins_get_group_count (model); ctx->L = pins_get_edge_label_count (model); ctx->S = pins_get_state_label_count (model); ctx->src2 = RTmalloc(sizeof(int[ctx->N])); ctx->check_must = ci_create (ctx->N); ctx->read = (ci_list **) dm_rows_to_idx_table (GBgetDMInfoRead(model)); ctx->must = (ci_list **) dm_rows_to_idx_table (GBgetDMInfoMustWrite(model)); ctx->may = GBgetDMInfoMayWrite(model); ctx->stack = isba_create (ctx->N); ctx->parent = model; ctx->magic[0] = RTmalloc(sizeof(int[ctx->N])); ctx->magic[1] = RTmalloc(sizeof(int[ctx->N])); for (int i = 0; i < ctx->N; i++) { int max = type_max (ctx, i); int min = type_min (ctx, i); int c = max - min; HREassert (c > 0, "Empty type range for slot: %d -- %s", i, str_slot(ctx, NULL, i)); ctx->magic[0][i] = min; ctx->magic[1][i] = min + 1; } ctx->reentrent = 0; GBsetContext (check, ctx); GBsetNextStateAll (check, check_all); GBsetNextStateLong (check, check_long); GBsetNextStateShort (check, check_short); //GBsetActionsLong (check, check_long); //GBsetActionsShort (check, check_short); GBinitModelDefaults (&check, model); return check; }
/** * set the UF status for the representative of state to DEAD */ bool uf_mark_dead (const uf_t *uf, ref_t state) { bool result = false; ref_t f = uf_find (uf, state); uf_status status = atomic_read (&uf->array[f].uf_status); while ( status != UF_DEAD ) { if (status == UF_LIVE) result = cas (&uf->array[f].uf_status, UF_LIVE, UF_DEAD); status = atomic_read (&uf->array[f].uf_status); } HREassert (atomic_read (&uf->array[f].parent) == 0, "the parent of a DEAD representative should not change"); HREassert (uf_is_dead (uf, state), "state should be dead"); return result; }
static inline void set_proviso_stack (wctx_t* ctx, alg_local_t* loc, cndfs_alg_local_t* cloc) { switch (cloc->successors) { case NONEC: bitvector_set (&loc->stackbits, pred(ctx, NOCYCLE)); break; case CYCLE: bitvector_unset (&loc->stackbits, pred(ctx, NOCYCLE)); break; case SRCINV: bitvector_set (&loc->stackbits, pred(ctx, INVOL)); break; default: HREassert (false); } }
void mark_visible(model_t model, ltsmin_expr_t e, ltsmin_parse_env_t env) { int *visibility = GBgetPorGroupVisibility (model); HREassert (visibility != NULL, "POR layer present, but no visibility info found."); if (!e) return; switch(e->node_type) { case BINARY_OP: mark_visible(model,e->arg1,env); mark_visible(model,e->arg2,env); break; case UNARY_OP: mark_visible(model,e->arg1,env); break; default: switch(e->token) { case PRED_TRUE: case PRED_FALSE: case PRED_NUM: case PRED_VAR: case PRED_CHUNK: break; case PRED_EQ: mark_visible(model, e->arg1,env); mark_visible(model, e->arg2,env); break; case PRED_SVAR: { int N = pins_get_state_variable_count (model); if (e->idx < N) { pins_add_state_variable_visible (model, e->idx); } else { // state label HREassert (e->idx < N + (int)pins_get_state_label_count(model)); pins_add_state_label_visible (model, e->idx - N); } } break; default: LTSminLogExpr (error, "Unhandled predicate expression: ", e, env); HREabort (LTSMIN_EXIT_FAILURE); } break; } }
static void endfs_handle_red (void *arg, state_info_t *successor, transition_info_t *ti, int seen) { wctx_t *ctx = (wctx_t *) arg; alg_local_t *loc = ctx->local; cndfs_alg_local_t *cloc = (cndfs_alg_local_t *) ctx->local; int onstack; /* Find cycle back to the seed */ HREassert (cloc->accepting_depth > 0); size_t *level; onstack = ctx->state->ref == loc->seed->ref; if (!onstack) { onstack = fset_find (cloc->pink, NULL, &successor->ref, (void**)&level, false); HREassert (onstack != FSET_FULL); } if ( onstack && *level < cloc->accepting_depth ) ndfs_report_cycle (ctx, ctx->model, loc->stack, successor); /* Mark states dangerous if necessary */ if ( Strat_ENDFS == loc->strat && pins_state_is_accepting(ctx->model, state_info_state(successor)) && state_store_get_colors (successor->ref) != CRED ) state_store_try_color(successor->ref, GDANGEROUS, loc->rec_bits); if ( !onstack && state_store_get_colors (successor->ref) != CRED ) { raw_data_t stack_loc = dfs_stack_push (loc->stack, NULL); state_info_serialize (successor, stack_loc); } // check proviso if (PINS_POR && proviso == Proviso_CNDFS && cloc->successors == NONEC) { if (ti->por_proviso != 0) { // state already fully expanded cloc->successors = SRCINV; } else if (onstack) { // cycle check if (onstack) cloc->successors = CYCLE; } // avoid full exploration (proviso is enforced later in backtrack) ti->por_proviso = 1; // avoid full exploration } (void) seen; }
void ndfs_local_setup (run_t *run, wctx_t *ctx) { alg_local_t *loc = ctx->local; size_t local_bits = 2; int res = bitvector_create (&loc->color_map, local_bits<<dbs_size); HREassert (res != -1, "Failure to allocate a color_map bitvector."); if (all_red) res = bitvector_create (&loc->stackbits, MAX_STACK); HREassert (res != -1, "Failure to allocate a all_red bitvector."); loc->rec_bits = 0; loc->strat = get_strategy (run->alg); loc->seed = state_info_create (); size_t len = state_info_serialize_int_size (ctx->state); //state_info_add_simple (ctx->state, sizeof(int), &loc->bits); //state_info_add_simple (ctx->local->seed, sizeof(int), &loc->seed_bits); loc->stack = dfs_stack_create (len); }
/** * move a search_stack state to the tarjan_stack */ static void move_tarjan (wctx_t *ctx, state_info_t *state, raw_data_t state_data) { alg_local_t *loc = ctx->local; raw_data_t *addr; hash32_t hash; int found; // add state to tarjan_stack raw_data_t tarjan_loc = dfs_stack_push (loc->tarjan_stack, NULL); state_info_serialize (state, tarjan_loc); // Update reference to the new stack hash = ref_hash (state->ref); found = fset_find (loc->visited_states, &hash, &state->ref, (void**) &addr, false); HREassert (*addr == state_data, "Wrong addr?"); HREassert (found, "Could not find key in set"); *addr = tarjan_loc; }
/** * initializer for the UF array */ uf_t * uf_create () { HREassert (sizeof (uf_state_t) == sizeof (int[8]), "Improper structure for uf_state_t. Expected: size = %zu", sizeof (int[8])); uf_t *uf = RTmalloc (sizeof (uf_t)); uf->array = RTalignZero (sizeof(int[8]), sizeof (uf_state_t) * (1ULL << dbs_size) ); return uf; }
/** * Enums need their values to be present in the tables, hence the strict lookup. */ static void lookup_type_value (ltsmin_expr_t e, int type, const chunk c, model_t m, bool strict) { HREassert (NULL != c.data, "Empty chunk"); int count = GBchunkCount(m,type); e->num = GBchunkPut(m,type,c); if (strict && count != GBchunkCount(m,type)) // value was added Warning (info, "Value for identifier '%s' cannot be found in table for enum type '%s'.", c.data, lts_type_get_type(GBgetLTStype(m),type)); e->lts_type = type; }
void tarjan_shared_init (run_t *run) { HREassert (SCC_STATE.g == 0); set_alg_local_init (run->alg, tarjan_local_init); set_alg_global_init (run->alg, tarjan_global_init); set_alg_global_deinit (run->alg, tarjan_global_deinit); set_alg_local_deinit (run->alg, tarjan_local_deinit); set_alg_print_stats (run->alg, tarjan_print_stats); set_alg_run (run->alg, tarjan_run); set_alg_reduce (run->alg, tarjan_reduce); }
/** * initializer for the UF array */ uf_t * uf_create () { HREassert (sizeof (uf_state_t) == sizeof (int[8]), "Improper structure for uf_state_t. Expected: size = %zu", sizeof (int[8])); uf_t *uf = RTmalloc (sizeof (uf_t)); // allocate one entry extra since [0] is not used uf->array = RTalignZero (sizeof(int[8]), sizeof (uf_state_t) * ( (1ULL << dbs_size) + 1 ) ); return uf; }
int fset_find (fset_t *dbs, mem_hash_t *m, void *key, void **data, bool insert_absent) { HREassert (dbs->data_length == 0 || data); size_t ref; size_t tomb = NONE; size_t k = dbs->key_length; mem_hash_t mem = m == NULL ? EMPTY : *m; int found = fset_locate (dbs, &mem, key, &ref, &tomb); if (insert_absent && !found) { // insert: if (tomb != NONE) { ref = tomb; dbs->tombs--; } if (dbs->key_length) memcpy (bucket(dbs, dbs->data, ref), key, k); *memoized(dbs, ref) = mem; dbs->load++; dbs->max_load = max (dbs->max_load, dbs->load); if (dbs->tombs << 1 > dbs->size) { bool res = resize (dbs, REHASH); // >50% tombs ==> rehash HREassert (res, "Cannot rehash table?"); fset_locate (dbs, &mem, key, &ref, &tomb); // update ref } else if (((dbs->load + dbs->tombs) << 2) > dbs->size3) { if (!resize(dbs, GROW)) { // > 75% full ==> grow Debug ("Hash table almost full (size = %zu, load = %zu, tombs = %zu)", dbs->size, dbs->load, dbs->tombs); } fset_locate (dbs, &mem, key, &ref, &tomb); // update ref } } if (data) *data = bucket(dbs, dbs->data, ref) + k; return found; }
static int cost_ (matrix_t *r, matrix_t *mayw) { HREassert( dm_ncols(r) == dm_ncols(mayw) && dm_nrows(r) == dm_nrows(mayw), "matrix sizes do not match"); int i, result; result = 0; for (i = 0; i < dm_nrows (r); i++) result += row_costs_ (r, mayw, i); return result; }
void chunk2string (chunk src, size_t dst_size, char *dst) { Warning (debug, "encoding chunk of length %d", src.len); HREassert (dst_size >= sizeof "##...", "Chunk too small (sizeof '##...')"); size_t k = 0; for (size_t i = 0; i < src.len; ++i) { if (!isprint((unsigned char)src.data[i])) goto hex; } /* quotable: */ size_t dang_esc = 0; dst[k++] = '"'; for (size_t i = 0; i < src.len && k+1 < dst_size; ++i) { switch (src.data[i]) { case '"': case '\\': dst[k++] = '\\'; if (k == dst_size - sizeof "#...") dang_esc = 1; break; } dst[k++] = src.data[i]; } if (k+2 < dst_size) { dst[k++] = '"'; } else { Warning(info, "chunk overflow: truncating to %zu characters", dst_size-1); k = dst_size - sizeof "\"..."; if (dang_esc) --k; /* dangling escape: "\\"... */ dst[k++] = '"'; dst[k++] = '.'; dst[k++] = '.'; dst[k++] = '.'; } dst[k++] = '\0'; return; hex: dst[k++] = '#'; for (size_t i = 0; i < src.len && k+1 < dst_size; ++i) { dst[k++] = HEX[(src.data[i]>>4)&(char)0x0F]; dst[k++] = HEX[src.data[i]&(char)0x0F]; } if (k+2 < dst_size) { dst[k++] = '#'; } else { Warning(info, "chunk overflow: truncating to %zu characters", dst_size-1); k = dst_size - sizeof "#..."; if (k % 2 == 0) --k; /* dangling hex digit: #1#... */ dst[k++] = '#'; dst[k++] = '.'; dst[k++] = '.'; dst[k++] = '.'; } dst[k++] = '\0'; }
static inline int on_stack_accepting_up (wctx_t *ctx, int *accepting) { cndfs_alg_local_t *cloc = (cndfs_alg_local_t *) ctx->local; size_t *depth; int on_stack; on_stack = fset_find (cloc->pink, NULL, &ctx->state->ref, (void**)&depth, true); HREassert (on_stack != FSET_FULL); if (!on_stack) { *accepting = pins_state_is_accepting(ctx->model, state_info_state(ctx->state)) != 0; Debug ("Added state %zu %s with depth %zu accepting depth", ctx->state->ref, (*accepting ? "(accepting)" : ""), cloc->accepting_depth); *depth = cloc->accepting_depth; // write currect accepting depth cloc->accepting_depth += *accepting; } return on_stack; }
static inline void perm_todo (permute_t *perm, transition_info_t *ti, int seen) { HREassert (perm->nstored < K+TODO_MAX); permute_todo_t *todo = perm->todos + perm->nstored; perm->tosort[perm->nstored] = perm->nstored; todo->ref = perm->next->ref; todo->seen = seen; todo->seen = perm->state_seen (perm->call_ctx, ti, todo->ref, seen); todo->lattice = perm->next->lattice; todo->ti.group = ti->group; todo->ti.por_proviso = ti->por_proviso; if (EXPECT_FALSE(act_detect || files[1] || (PINS_BUCHI_TYPE == PINS_BUCHI_TYPE_TGBA))) memcpy (todo->ti.labels, ti->labels, sizeof(int*[perm->labels])); perm->nstored++; ti->por_proviso = perm->por_proviso; }
/** * move a stack state to the completed SCC set */ static void move_scc (wctx_t *ctx, ref_t state) { alg_local_t *loc = ctx->local; hash32_t hash; int success; Debug ("Marking %zu as SCC", state); // remove reference to stack state hash = ref_hash (state); success = fset_delete (loc->visited_states, &hash, &state); HREassert (success, "Could not remove SCC state from set"); // set SCC globally state_store_try_color (state, SCC_STATE, 0); }
static void PutEntry(string_index_t si,const char*str,int s_len,int index){ uint32_t hash; int bucket; ensure_access(si->man,index); HREassert (si->next[index] < 0, "Cannot put %s at %d: position occupied by %s", str,index,si->data[index]); cut_from_free_list(si,index); si->len[index]=s_len; si->data[index]=RTmalloc(s_len+1); memcpy(si->data[index],str,s_len); (si->data[index])[s_len]=0; hash=SuperFastHash(str,s_len,0); bucket=hash&si->mask; si->next[index]=si->table[bucket]; si->table[bucket]=index; si->count++; }
int cndfs_state_seen (void *ptr, transition_info_t *ti, ref_t ref, int seen) { wctx_t *ctx = (wctx_t *) ptr; cndfs_alg_local_t *cloc = (cndfs_alg_local_t *) ctx->local; void *level; if (!seen) { seen = fset_find (cloc->pink, NULL, &ref, &level, false); HREassert (seen != FSET_FULL); } if (seen) return 1; uint32_t old = state_store_get_colors (ref) & 3; return -(old == CCYAN); (void) ti; }
int print_chunk (model_t model, char *res, int max, int typeno, int val) { chunk c; switch (lts_type_get_format (GBgetLTStype(model), typeno)) { case LTStypeDirect: case LTStypeRange: return snprintf (res, max, "%d", val); case LTStypeEnum: case LTStypeChunk: c = pins_chunk_get (model, typeno, val); return snprintf (res, max, "%s", c.data); default: { HREassert(false); return -1; } } }