void _ojc_val_cleanup() { ojcVal v; ojcVal head; Bstr bhead; Bstr b; while (atomic_flag_test_and_set(&free_vals.busy)) { } head = free_vals.head; free_vals.head = NULL; free_vals.tail = NULL; atomic_flag_clear(&free_vals.busy); while (atomic_flag_test_and_set(&free_bstrs.busy)) { } bhead = free_bstrs.head; free_bstrs.head = NULL; free_bstrs.tail = NULL; atomic_flag_clear(&free_bstrs.busy); while (0 != head) { v = head; head = v->next; free(v); } while (0 != bhead) { b = bhead; bhead = b->next; free(b); } }
int avs_condvar_wait(avs_condvar_t *condvar, avs_mutex_t *mutex, avs_time_monotonic_t deadline) { // Precondition: mutex is locked by the current thread // although we can't check if it's the current thread that locked it :( AVS_ASSERT(atomic_flag_test_and_set(&mutex->locked), "attempted to use a condition variable with an unlocked mutex"); bool use_deadline = avs_time_monotonic_valid(deadline); bool flag_value; condvar_waiter_node_t waiter; insert_new_waiter(condvar, &waiter); avs_mutex_unlock(mutex); do { flag_value = atomic_flag_test_and_set(&waiter.waiting); } while (flag_value && (!use_deadline || avs_time_monotonic_before(avs_time_monotonic_now(), deadline))); avs_mutex_lock(mutex); remove_waiter(condvar, &waiter); // flag_value == 0 -> it means it was cleared, so we've been woken up // flag_value == 1 -> it mean we haven't, so timeout occurred return flag_value ? AVS_CONDVAR_TIMEOUT : 0; }
int main () { int b; if (!atomic_is_lock_free (&a)) abort (); if (atomic_flag_test_and_set (&a)) abort (); atomic_flag_clear_explicit (&a, memory_order_relaxed); if (atomic_flag_test_and_set (&a)) abort (); atomic_flag_clear (&a); b = atomic_flag_test_and_set_explicit (&a, memory_order_seq_cst); if (!atomic_flag_test_and_set (&a) || b != 0) abort (); b = atomic_flag_test_and_set_explicit (&a, memory_order_acq_rel); if (!atomic_flag_test_and_set (&a) || b != 1) abort (); atomic_flag_clear_explicit (&a, memory_order_seq_cst); if (atomic_flag_test_and_set (&a)) abort (); return 0; }
static void test_atomic_flag(void) { atomic_flag flag = ATOMIC_FLAG_INIT; ovs_assert(atomic_flag_test_and_set(&flag) == false); ovs_assert(atomic_flag_test_and_set(&flag) == true); atomic_flag_clear(&flag); ovs_assert(atomic_flag_test_and_set(&flag) == false); }
TEST(stdatomic, atomic_flag) { atomic_flag f = ATOMIC_FLAG_INIT; ASSERT_FALSE(atomic_flag_test_and_set(&f)); ASSERT_TRUE(atomic_flag_test_and_set(&f)); atomic_flag_clear(&f); ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed)); ASSERT_TRUE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed)); atomic_flag_clear_explicit(&f, memory_order_relaxed); ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed)); }
void _ojc_bstr_create_batch(size_t cnt, MList list) { Bstr v; if (free_bstrs.head != free_bstrs.tail) { while (atomic_flag_test_and_set(&free_bstrs.busy)) { } if (free_bstrs.head != free_bstrs.tail) { Bstr prev = free_bstrs.head; list->head = prev; for (v = prev; 0 < cnt && v != free_bstrs.tail; cnt--, v = v->next) { prev = v; } free_bstrs.head = v; list->tail = prev; list->tail->next = 0; } atomic_flag_clear(&free_bstrs.busy); } for (; 0 < cnt; cnt--) { v = _ojc_bstr_create(); v->next = 0; if (0 == list->head) { list->head = v; } else { list->tail->next = v; } list->tail = v; } }
ojcVal _ojc_val_create(ojcValType type) { ojcVal val = NULL; // Carelessly check to see if a new val is needed. It doesn't matter if we // get it wrong here. if (NULL == free_vals.head || free_vals.head == free_vals.tail) { val = (ojcVal)malloc(sizeof(struct _ojcVal)); } else { // Looks like we need to lock it down for a moment using the atomic busy // flag. while (atomic_flag_test_and_set(&free_vals.busy)) { } if (NULL == free_vals.head || free_vals.head == free_vals.tail) { val = (ojcVal)malloc(sizeof(struct _ojcVal)); } else { val = free_vals.head; free_vals.head = free_vals.head->next; } atomic_flag_clear(&free_vals.busy); } val->next = NULL; val->key_type = STR_NONE; val->str_type = STR_NONE; val->members.head = NULL; val->members.tail = NULL; val->type = type; val->expect = NEXT_NONE; return val; }
void _ojc_val_create_batch(size_t cnt, List vals) { ojcVal v; if (free_vals.head != free_vals.tail) { while (atomic_flag_test_and_set(&free_vals.busy)) { } if (free_vals.head != free_vals.tail) { ojcVal prev = free_vals.head; vals->head = prev; for (v = prev; 0 < cnt && v != free_vals.tail; cnt--, v = v->next) { prev = v; } free_vals.head = v; vals->tail = prev; vals->tail->next = 0; } atomic_flag_clear(&free_vals.busy); } for (; 0 < cnt; cnt--) { v = _ojc_val_create(OJC_NULL); if (0 == vals->head) { vals->head = v; } else { vals->tail->next = v; } vals->tail = v; } }
int main() { atomic_flag af = ATOMIC_FLAG_INIT; if (!atomic_flag_test_and_set(&af)) atomic_flag_clear(&af); return 0; }
TEST(stdatomic, init) { atomic_int v = ATOMIC_VAR_INIT(123); ASSERT_EQ(123, atomic_load(&v)); atomic_init(&v, 456); ASSERT_EQ(456, atomic_load(&v)); atomic_flag f = ATOMIC_FLAG_INIT; ASSERT_FALSE(atomic_flag_test_and_set(&f)); }
static void spinhaltIfAlreadyExiting(void) { #ifndef LAUNCHER volatile int temp; if (atomic_flag_test_and_set(&thisLocaleAlreadyExiting)) { // spin forever if somebody else already set it to 1 temp = 1; while (temp); } #endif }
int main() { atomic_flag f; atomic_flag* p = &f; memory_order m = memory_order_relaxed; // For position only. atomic_flag_test_and_set(p); atomic_flag_test_and_set_explicit(p, m); atomic_flag_clear(p); atomic_flag_clear_explicit(p, m); return 0; }
int main(int argc, char** argv) { atomic_flag flag; atomic_flag_clear(&flag); assert( false == atomic_flag_test_and_set(&flag) ); assert( true == atomic_flag_test_and_set(&flag) ); atomic_thread_fence(memory_order_seq_cst); atomic_signal_thread_fence(memory_order_seq_cst); { atomic_uint_least8_t tmp; atomic_load_uint_least8_t(&tmp); } /* test(uint_least8_t); test(uint_least16_t); test(uint_least32_t); test(uint_least64_t); test(uintptr_t); */ return 0; }
void _ojc_bstr_return(MList freed) { if (0 == freed->head) { return; } while (atomic_flag_test_and_set(&free_bstrs.busy)) { } if (0 == free_bstrs.head) { free_bstrs.head = freed->head; } else { free_bstrs.tail->next = freed->head; } free_bstrs.tail = freed->tail; atomic_flag_clear(&free_bstrs.busy); }
static void insert_new_waiter(avs_condvar_t *condvar, condvar_waiter_node_t *waiter) { avs_mutex_lock(&condvar->waiters_mutex); // Initialize the waiting flag to true atomic_flag_clear(&waiter->waiting); bool value = atomic_flag_test_and_set(&waiter->waiting); assert(!value); (void) value; // Insert waiter as the first element on the list waiter->next = condvar->first_waiter; condvar->first_waiter = waiter; avs_mutex_unlock(&condvar->waiters_mutex); }
inline void tal_lock(lock_t* lock) { #ifdef WITH_POSIX_THREAD_PROCESS_SHARED if (pthread_mutex_lock(&(lock->mtx)) != 0) { fprintf(stderr, "Error: Could not lock\n"); } #else #if LOCK_ROUND_ROBIN == 1 while (lock->current_idx != fork_idx) { sched_yield(); } #else while (atomic_flag_test_and_set(&lock->cat)) { sched_yield(); }; #endif #endif }
int platform_poll(void *priv) { struct remoteproc *rproc = priv; struct remoteproc_priv *prproc; unsigned int flags; prproc = rproc->priv; while(1) { flags = metal_irq_save_disable(); if (!(atomic_flag_test_and_set(&prproc->ipi_nokick))) { metal_irq_restore_enable(flags); remoteproc_get_notification(rproc, RSC_NOTIFY_ID_ANY); break; } _rproc_wait(); metal_irq_restore_enable(flags); } return 0; }
Bstr _ojc_bstr_create() { Bstr bstr; if (free_bstrs.head == free_bstrs.tail) { bstr = (Bstr)malloc(sizeof(union _Bstr)); } else { while (atomic_flag_test_and_set(&free_bstrs.busy)) { } if (free_bstrs.head == free_bstrs.tail) { bstr = (Bstr)malloc(sizeof(union _Bstr)); } else { bstr = free_bstrs.head; free_bstrs.head = free_bstrs.head->next; } atomic_flag_clear(&free_bstrs.busy); } *bstr->ca = '\0'; return bstr; }
void _ojc_set_key(ojcVal val, const char *key, int klen) { struct _MList freed_bstrs = { 0, 0 }; free_key(val, &freed_bstrs); while (atomic_flag_test_and_set(&free_bstrs.busy)) { } if (0 != freed_bstrs.head) { if (0 == free_bstrs.head) { free_bstrs.head = freed_bstrs.head; } else { free_bstrs.tail->next = freed_bstrs.head; } free_bstrs.tail = freed_bstrs.tail; } atomic_flag_clear(&free_bstrs.busy); if (0 != key) { if (0 >= klen) { klen = strlen(key); } if ((int)sizeof(union _Bstr) <= klen) { val->key_type = STR_PTR; val->key.str = strndup(key, klen); val->key.str[klen] = '\0'; } else if ((int)sizeof(val->key.ca) <= klen) { val->key_type = STR_BLOCK; val->key.bstr = _ojc_bstr_create(); memcpy(val->key.bstr->ca, key, klen); val->key.bstr->ca[klen] = '\0'; } else { val->key_type = STR_ARRAY; memcpy(val->key.ca, key, klen); val->key.ca[klen] = '\0'; } } }
void _ojc_val_return(List freed, MList freed_bstrs) { if (0 == freed->head) { return; } while (atomic_flag_test_and_set(&free_vals.busy)) { } if (0 == free_vals.head) { free_vals.head = freed->head; } else { free_vals.tail->next = freed->head; } free_vals.tail = freed->tail; if (0 != freed_bstrs->head) { if (0 == free_bstrs.head) { free_bstrs.head = freed_bstrs->head; } else { free_bstrs.tail->next = freed_bstrs->head; } free_bstrs.tail = freed_bstrs->tail; } atomic_flag_clear(&free_vals.busy); }
/////////////////////////////////////////////////////////////////////////// // Fast spinlock implmentation. No backoff when busy /////////////////////////////////////////////////////////////////////////// CAtomicSpinLock::CAtomicSpinLock(std::atomic_flag& lock) : m_Lock(lock) { while (atomic_flag_test_and_set(&m_Lock)) {} // Lock }