/** * returns: * - CLAIM_FIRST : if we initialized the state * - CLAIM_FOUND : if the state is LIVE and we have visited its SCC before * - CLAIM_SUCCESS : if the state is LIVE and we have not yet visited its SCC * - CLAIM_DEAD : if the state is part of a completed SCC */ char uf_make_claim (const uf_t *uf, ref_t state, size_t worker) { HREassert (worker < WORKER_BITS); sz_w w_id = 1ULL << worker; ref_t f = uf_find (uf, state); sz_w orig_pset; // is the state dead? if (atomic_read (&uf->array[f].uf_status) == UF_DEAD) return CLAIM_DEAD; // did we previously explore a state in this SCC? if ( (atomic_read (&uf->array[f].p_set) & w_id ) != 0) { return CLAIM_FOUND; // NB: cycle is possibly missed (in case f got updated) // - however, next iteration should detect this } // Add our worker ID to the set, and ensure it is the UF representative orig_pset = fetch_or (&uf->array[f].p_set, w_id); while ( atomic_read (&uf->array[f].parent) != 0 ) { f = uf_find (uf, f); fetch_or (&uf->array[f].p_set, w_id); } if (orig_pset == 0ULL) return CLAIM_FIRST; else return CLAIM_SUCCESS; }
/* unlock; all updates so far must be released */ store_release(&control->state, INACTIVE(ASYNC)); if (load_relaxed(&stop_the_world_flag)) { mutex_lock(&control->inactive_wait_lock); cond_signal(&control->inactive_wait_cond); mutex_unlock(&control->inactive_wait_lock); } } #else /* !WITHOUT_MULTITHREAD && !WITHOUT_CONCURRENCY */ static void control_leave(struct sml_control *control) { unsigned int old; assert(IS_ACTIVE(load_relaxed(&control->state))); /* progress even phase to odd phase */ /* unlock; all updates so far must be released */ old = fetch_or(release, &control->state, INACTIVE_FLAG | 1); if (old == ACTIVE(PRESYNC1)) sync1_action(); else if (old == ACTIVE(PRESYNC2)) sync2_action(control); }
/** * unites two sets and ensures that their cyclic lists are combined to one list */ bool uf_union (const uf_t *uf, ref_t a, ref_t b) { ref_t a_r, b_r, a_l, b_l, a_n, b_n, r, q; sz_w q_w, r_w; while ( 1 ) { a_r = uf_find (uf, a); b_r = uf_find (uf, b); // find the representatives if (a_r == b_r) { return 0; } // decide on the new root (deterministically) // take the highest index as root r = a_r; q = b_r; if (a_r < b_r) { r = b_r; q = a_r; } // lock the non-root if ( !uf_lock_uf (uf, q) ) continue; break; } // lock the list entries if ( !uf_lock_list (uf, a, &a_l) ) { // HREassert ( uf_is_dead(uf, a) && uf_sameset(uf, a, b) ); return 0; } if ( !uf_lock_list (uf, b, &b_l) ) { // HREassert ( uf_is_dead(uf, b) && uf_sameset(uf, a, b) ); uf_unlock_list (uf, a_l); return 0; } // swap the list entries a_n = atomic_read (&uf->array[a_l].list_next); b_n = atomic_read (&uf->array[b_l].list_next); if (a_n == 0) // singleton a_n = a_l; if (b_n == 0) // singleton b_n = b_l; atomic_write (&uf->array[a_l].list_next, b_n); atomic_write (&uf->array[b_l].list_next, a_n); // update parent atomic_write (&uf->array[q].parent, r); // only update worker set for r if q adds workers q_w = atomic_read (&uf->array[q].p_set); r_w = atomic_read (&uf->array[r].p_set); if ( (q_w | r_w) != r_w) { // update! fetch_or (&uf->array[r].p_set, q_w); while (atomic_read (&uf->array[r].parent) != 0) { r = uf_find (uf, r); fetch_or (&uf->array[r].p_set, q_w); } } // unlock uf_unlock_list (uf, a_l); uf_unlock_list (uf, b_l); uf_unlock_uf (uf, q); return 1; }
__host__ __device__ int_type operator|=(int_type val) volatile { return fetch_or(val) + val; }
T operator |= (T value) { return fetch_or(value) | value; }