void ck_barrier_centralized(struct ck_barrier_centralized *barrier, struct ck_barrier_centralized_state *state, unsigned int n_threads) { unsigned int sense, value; /* * Every execution context has a sense associated with it. * This sense is reversed when the barrier is entered. Every * thread will spin on the global sense until the last thread * reverses it. */ sense = state->sense = ~state->sense; value = ck_pr_faa_uint(&barrier->value, 1); if (value == n_threads - 1) { ck_pr_store_uint(&barrier->value, 0); ck_pr_fence_memory(); ck_pr_store_uint(&barrier->sense, sense); return; } ck_pr_fence_load(); while (sense != ck_pr_load_uint(&barrier->sense)) ck_pr_stall(); ck_pr_fence_memory(); return; }
int main(void) { int r = 0; /* Below serves as a marker. */ ck_pr_sub_int(&r, 31337); /* * This is a simple test to help ensure all fences compile or crash * on target. Below are generated according to the underlying memory * model's ordering. */ ck_pr_fence_atomic(); ck_pr_fence_atomic_store(); ck_pr_fence_atomic_load(); ck_pr_fence_store_atomic(); ck_pr_fence_load_atomic(); ck_pr_fence_load(); ck_pr_fence_load_store(); ck_pr_fence_store(); ck_pr_fence_store_load(); ck_pr_fence_memory(); ck_pr_fence_release(); ck_pr_fence_acquire(); ck_pr_fence_acqrel(); ck_pr_fence_lock(); ck_pr_fence_unlock(); /* Below serves as a marker. */ ck_pr_sub_int(&r, 31337); /* The following are generating assuming RMO. */ ck_pr_fence_strict_atomic(); ck_pr_fence_strict_atomic_store(); ck_pr_fence_strict_atomic_load(); ck_pr_fence_strict_store_atomic(); ck_pr_fence_strict_load_atomic(); ck_pr_fence_strict_load(); ck_pr_fence_strict_load_store(); ck_pr_fence_strict_store(); ck_pr_fence_strict_store_load(); ck_pr_fence_strict_memory(); ck_pr_fence_strict_release(); ck_pr_fence_strict_acquire(); ck_pr_fence_strict_acqrel(); ck_pr_fence_strict_lock(); ck_pr_fence_strict_unlock(); return 0; }
ph_thread_t *ph_thread_spawn(ph_thread_func func, void *arg) { ph_thread_t *thr = NULL; struct ph_thread_boot_data data; pthread_t pt; ck_backoff_t backoff = CK_BACKOFF_INITIALIZER; data.thr = &thr; data.func = func; data.arg = arg; if (pthread_create(&pt, NULL, ph_thread_boot, &data)) { return NULL; } // semi busy wait for the TLS to be set up ck_pr_fence_load(); while (ck_pr_load_ptr(&thr) == 0) { ck_backoff_eb(&backoff); ck_pr_fence_load(); } return ck_pr_load_ptr(&thr); }