示例#1
0
void
test_add ()
{
  v = 0;
  count = 1;

  __atomic_add_fetch (&v, count, __ATOMIC_RELAXED);
  if (v != 1)
    abort ();

  __atomic_fetch_add (&v, count, __ATOMIC_CONSUME);
  if (v != 2)
    abort ();

  __atomic_add_fetch (&v, 1 , __ATOMIC_ACQUIRE);
  if (v != 3)
    abort ();

  __atomic_fetch_add (&v, 1, __ATOMIC_RELEASE);
  if (v != 4)
    abort ();

  __atomic_add_fetch (&v, count, __ATOMIC_ACQ_REL);
  if (v != 5)
    abort ();

  __atomic_fetch_add (&v, count, __ATOMIC_SEQ_CST);
  if (v != 6)
    abort ();
}
void
__gcov_pow2_profiler_atomic (gcov_type *counters, gcov_type value)
{
  if (value == 0 || (value & (value - 1)))
    __atomic_fetch_add (&counters[0], 1, __ATOMIC_RELAXED);
  else
    __atomic_fetch_add (&counters[1], 1, __ATOMIC_RELAXED);
}
示例#3
0
文件: common.c 项目: tvieira/netdata
static inline void mmap_accounting(size_t size) {
#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
    __atomic_fetch_add(&memory_statistics.malloc_calls_made, 1, __ATOMIC_SEQ_CST);
    __atomic_fetch_add(&memory_statistics.mmapped_memory, size, __ATOMIC_SEQ_CST);
#else
    // this is for debugging - we don't care locking it
    memory_statistics.memory_calls_made++;
    memory_statistics.mmapped_memory += size;
#endif
}
void
__gcov_interval_profiler_atomic (gcov_type *counters, gcov_type value,
				 int start, unsigned steps)
{
  gcov_type delta = value - start;
  if (delta < 0)
    __atomic_fetch_add (&counters[steps + 1], 1, __ATOMIC_RELAXED);
  else if (delta >= steps)
    __atomic_fetch_add (&counters[steps], 1, __ATOMIC_RELAXED);
  else
    __atomic_fetch_add (&counters[delta], 1, __ATOMIC_RELAXED);
}
示例#5
0
文件: common.c 项目: tvieira/netdata
static inline void calloc_accounting(const char *file, const char *function, const unsigned long line, size_t size) {
#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
    __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST);
    __atomic_fetch_add(&memory_statistics.calloc_calls_made, 1, __ATOMIC_SEQ_CST);
    __atomic_fetch_add(&memory_statistics.allocated_memory, size, __ATOMIC_SEQ_CST);
#else
    // this is for debugging - we don't care locking it
    memory_statistics.memory_calls_made++;
    memory_statistics.calloc_calls_made++;
    memory_statistics.allocated_memory += size;
#endif
    print_allocations(file, function, line);
}
示例#6
0
void test_atomic_bool (_Atomic _Bool *a)
{
  enum { SEQ_CST = __ATOMIC_SEQ_CST };
  
  __atomic_fetch_add (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_add." } */
  __atomic_fetch_sub (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_sub." } */
  __atomic_fetch_and (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_and." } */
  __atomic_fetch_xor (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_xor." } */
  __atomic_fetch_or (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_or." } */
  __atomic_fetch_nand (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_nand." } */

  __atomic_add_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_add_fetch." } */
  __atomic_sub_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_sub_fetch." } */
  __atomic_and_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_and_fetch." } */
  __atomic_xor_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_xor_fetch." } */
  __atomic_or_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_or_fetch." } */
  __atomic_nand_fetch (a, 1, SEQ_CST);   /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_nand_fetch." } */

  /* The following are valid and must be accepted.  */
  _Bool val = 0, ret = 0;
  __atomic_exchange (a, &val, &ret, SEQ_CST);
  __atomic_exchange_n (a, val, SEQ_CST);
  __atomic_compare_exchange (a, &val, &ret, !1, SEQ_CST, SEQ_CST);
  __atomic_compare_exchange_n (a, &val, ret, !1, SEQ_CST, SEQ_CST);
  __atomic_test_and_set (a, SEQ_CST);
  __atomic_clear (a, SEQ_CST);
}
示例#7
0
void atomic_add(struct list *l, size_t n)
{
	for (size_t i=0; i < n; i++) {
		__atomic_fetch_add(&l->val, 1, __ATOMIC_RELAXED);
		l = l->next;
	}
}
示例#8
0
文件: common.c 项目: tvieira/netdata
static inline void free_accounting(const char *file, const char *function, const unsigned long line, void *ptr) {
    (void)file;
    (void)function;
    (void)line;

    if(likely(ptr)) {
#if defined(HAVE_C___ATOMIC) && !defined(NETDATA_NO_ATOMIC_INSTRUCTIONS)
        __atomic_fetch_add(&memory_statistics.memory_calls_made, 1, __ATOMIC_SEQ_CST);
        __atomic_fetch_add(&memory_statistics.free_calls_made, 1, __ATOMIC_SEQ_CST);
#else
        // this is for debugging - we don't care locking it
        memory_statistics.memory_calls_made++;
        memory_statistics.free_calls_made++;
#endif
    }
}
示例#9
0
/**
 * @see mpscifo.h
 */
void add(MpscFifo_t *pQ, Msg_t *pMsg) {
#if 0
  if (pMsg != NULL) {
    // Be sure pMsg->pNext == NULL
    pMsg->pNext = NULL;

    // Using Builtin Clang doesn't seem to support stdatomic.h
    Msg_t** ptr_pHead = &pQ->pHead;
    Msg_t *pPrevHead = __atomic_exchange_n(ptr_pHead, pMsg, __ATOMIC_SEQ_CST); //ACQ_REL);
    Msg_t** ptr_pNext = &pPrevHead->pNext;
    __atomic_store_n(ptr_pNext, pMsg, __ATOMIC_SEQ_CST); //RELEASE);
    int32_t* ptr_count = &pQ->count;
    __atomic_fetch_add(ptr_count, 1, __ATOMIC_SEQ_CST);
    // TODO: Support "blocking" which means use condition variable
  }
#else
  if (pMsg != NULL) {
    pMsg->pNext = NULL;
    void** ptr_pHead = (void*)&pQ->pHead;
    Msg_t* pPrevHead = __atomic_exchange_n(ptr_pHead, pMsg, __ATOMIC_SEQ_CST); //ACQ_REL);
    pPrevHead->pNext = pMsg;
    pQ->count += 1;
  }
#endif
}
示例#10
0
inline T Atomic<T>::fetchAndAdd ( const T& val )
{
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
   return __atomic_fetch_add(&_value, val, __ATOMIC_ACQ_REL);
#else
   return __sync_fetch_and_add( &_value,val );
#endif
}
void lock(simple_futex_t *futex) {
    unsigned number = __atomic_fetch_add(&futex->number, 1, __ATOMIC_SEQ_CST);
    unsigned turn = futex->turn;

    while (number != turn) {
        syscall(__NR_futex, &futex->turn, FUTEX_WAIT, turn, NULL, 0, 0);
        turn = futex->turn;
    }
}
示例#12
0
文件: device.c 项目: Defragster/ty
hs_device *hs_device_ref(hs_device *dev)
{
    assert(dev);

#ifdef _MSC_VER
    InterlockedIncrement(&dev->refcount);
#else
    __atomic_fetch_add(&dev->refcount, 1, __ATOMIC_RELAXED);
#endif
    return dev;
}
示例#13
0
文件: dummy_alloc.c 项目: natsys/blog
void *
dummy_alloc(size_t size)
{
	int off = __atomic_fetch_add(&ptr, size, __ATOMIC_SEQ_CST);

	if (__builtin_expect(off + size > ALLOC_SZ, 0)) {
		fprintf(stderr, "out of memory, off=%lu", off);
		return NULL;
	}

	return mem + off;
}
示例#14
0
文件: atomics.cpp 项目: NVlabs/nvbio
int64 host_atomic_add(int64* value, const int64 op)
{
#if defined(__GNUC__)
    return __atomic_fetch_add( value, op, __ATOMIC_RELAXED );
#else
    Mutex mutex;
    ScopedLock lock( &mutex );

    const int64 old = *value;
    *value += op;
    return old;
#endif
}
示例#15
0
int simple(void) {
  ai += 1;
  bi = 3;

  _Atomic(int) tmp;
  tmp = ai;
  ai = bi;
  bi = tmp;
  printf("ai=%d bi=%d tmp=%d\n", ai, bi, tmp);

  __atomic_store_n(&i, -1, __ATOMIC_RELEASE);
  __atomic_store_n(&j, -2, __ATOMIC_SEQ_CST); //RELEASE); //__ATOMIC_SEQ_CST);

  __atomic_fetch_add(&i, 2, __ATOMIC_SEQ_CST);
  __atomic_fetch_add(&j, 1, __ATOMIC_SEQ_CST);

  //printf("before exchange j=%d i=%d\n", j, i);
  __atomic_exchange(&i, &j, &j, __ATOMIC_SEQ_CST);
  printf("after  exchange j=%d i=%d\n", j, i);

  return 0;
}
static inline void
__gcov_one_value_profiler_body (gcov_type *counters, gcov_type value,
				int use_atomic)
{
  if (value == counters[0])
    counters[1]++;
  else if (counters[1] == 0)
    {
      counters[1] = 1;
      counters[0] = value;
    }
  else
    counters[1]--;

  if (use_atomic)
    __atomic_fetch_add (&counters[2], 1, __ATOMIC_RELAXED);
  else
    counters[2]++;
}
示例#17
0
__host__ __device__
typename enable_if<
  sizeof(Integer64) == 8,
  Integer64
>::type
atomic_fetch_add(Integer64 *x, Integer64 y)
{
#if defined(__CUDA_ARCH__)
  return atomicAdd(x, y);
#elif defined(__GNUC__)
  return __atomic_fetch_add(x, y, __ATOMIC_SEQ_CST);
#elif defined(_MSC_VER)
  return InterlockedExchangeAdd64(x, y);
#elif defined(__clang__)
  return __c11_atomic_fetch_add(x, y)
#else
#error "No atomic_fetch_add implementation."
#endif
}
示例#18
0
int
main ()
{

  ac = __atomic_exchange_n (&bc, cc, __ATOMIC_RELAXED);
  if (bc != 1)
    abort ();

  as = __atomic_load_n (&bs, __ATOMIC_SEQ_CST);
  if (bs != 1)
    abort ();

  __atomic_store_n (&ac, bc, __ATOMIC_RELAXED);
  if (ac != 1)
    abort ();

  __atomic_compare_exchange_n (&as, &bs, cs, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
  if (as != 1)
    abort ();

  ac = __atomic_fetch_add (&cc, 15, __ATOMIC_SEQ_CST);
  if (cc != 1)
    abort ();

  /* This should be translated to __atomic_fetch_add for the library */
  as = __atomic_add_fetch (&cs, 10, __ATOMIC_RELAXED);

  if (cs != 1)
    abort ();

  /* The fake external function should return 10.  */
  if (__atomic_is_lock_free (4, 0) != 10)
    abort ();
   
  /* PR 51040 was caused by arithmetic code not patching up nand_fetch properly
     when used an an external function.  Look for proper return value here.  */
  ac = 0x3C;
  bc = __atomic_nand_fetch (&ac, 0x0f, __ATOMIC_RELAXED);
  if (bc != ac)
    abort ();

  return 0;
}
示例#19
0
文件: zf_log.c 项目: pjc42/zf_log
static INLINE int tcache_get(const struct timeval *const tv, struct tm *const tm)
{
	unsigned mode;
	mode = __atomic_load_n(&g_tcache_mode, __ATOMIC_RELAXED);
	if (0 == (mode & TCACHE_FLUID))
	{
		mode = __atomic_fetch_add(&g_tcache_mode, 1, __ATOMIC_ACQUIRE);
		if (0 == (mode & TCACHE_FLUID))
		{
			if (g_tcache_tv.tv_sec == tv->tv_sec)
			{
				*tm = g_tcache_tm;
				__atomic_sub_fetch(&g_tcache_mode, 1, __ATOMIC_RELEASE);
				return !0;
			}
			__atomic_or_fetch(&g_tcache_mode, TCACHE_STALE, __ATOMIC_RELAXED);
		}
		__atomic_sub_fetch(&g_tcache_mode, 1, __ATOMIC_RELEASE);
	}
	return 0;
}
示例#20
0
int
fair_futex_lock(fair_futex_t *lock) {

	uint32_t ticket;
	uint32_t old_futex;
	int pause_cnt;

	/*
	 * Possibly wrap: if we have more than 64K lockers waiting, the ticket
	 * value will wrap and two lockers will simultaneously be granted the
	 * lock.
	 */
	ticket = __atomic_fetch_add(&lock->fairlock.fair_lock_waiter, 1,
				      __ATOMIC_SEQ_CST);
retry:
	__sync_synchronize();
	old_futex = lock->futex;
	if(old_futex == (uint32_t)ticket / SPIN_CONTROL) {
//		printf("ticket %d spins (lo: %d)\n", ticket,
//		       lock->fairlock.fair_lock_owner);
		while (ticket != lock->fairlock.fair_lock_owner) ;
	}
	else {
//		printf("ticket %d sleeps (lo: %d)\n", ticket,
//		       lock->fairlock.fair_lock_owner);
		sys_futex((void*)&lock->futex, FUTEX_WAIT, old_futex, 0, 0, 0);
		goto retry;
	}

	/*
	 * Applications depend on a barrier here so that operations holding the
	 * lock see consistent data.
	 */
	__sync_synchronize();

//	printf("ticket %d got lock\n", ticket);

	return ticket;
}
示例#21
0
文件: fairlock.c 项目: fedorova/misc
/*
 * fair_lock --
 *	Get a lock.
 */
int
fair_lock(fair_lock_t *lock)
{
	uint16_t ticket;
	int pause_cnt;

	/*
	 * Possibly wrap: if we have more than 64K lockers waiting, the ticket
	 * value will wrap and two lockers will simultaneously be granted the
	 * lock.
	 */
	ticket = __atomic_fetch_add(&lock->fair_lock_waiter, 1,
				      __ATOMIC_SEQ_CST);

	for (pause_cnt = 0; ticket != lock->fair_lock_owner;) {
		/*
		 * We failed to get the lock; pause before retrying and if we've
		 * paused enough, sleep so we don't burn CPU to no purpose. This
		 * situation happens if there are more threads than cores in the
		 * system and we're thrashing on shared resources.
		 */
#if 0
		if (++pause_cnt < SPINCOUNT)
			;
		else
			__sleep(0, 10);
#endif
	}

	/*
	 * Applications depend on a barrier here so that operations holding the
	 * lock see consistent data.
	 */
	__sync_synchronize();

	return (0);
}
示例#22
0
/**
 * Add an initialized tcb following pcur
 *
 * @param pnew is a tcb which will after pcur.
 * @param pcur is a tcb in the list which will preceed pnew
 *
 * @return 0 if successful
 */
STATIC ac_uint add_tcb_after(tcb_x86* pnew, tcb_x86* pcur) {
  ac_uint rslt;
  ac_uint flags = disable_intr();

  tcb_x86* ptmp = pcur->pnext_tcb;
  if (pnew->pnext_tcb == AC_NULL) {
#ifdef SUPPORT_READY_LENGTH
    __atomic_fetch_add(&ready_length, 1, __ATOMIC_RELAXED);
#endif

    pnew->pnext_tcb = ptmp;
    pnew->pprev_tcb = pcur;
    ptmp->pprev_tcb = pnew;
    pcur->pnext_tcb = pnew;
    //ac_printf("add_tcb_after: ret rslt=0, pcur=0x%x pnew=0x%x\n", pcur, pnew);
    rslt = 0;
  } else {
    //ac_printf("add_tcb_after: ret rslt=1, pcur=0x%x pnew=0x%x\n", pcur, pnew);
    rslt = 1;
  }

  restore_intr(flags);
  return rslt;
}
示例#23
0
void
test_fetch_add ()
{
  v = 0;
  count = 1;

  if (__atomic_fetch_add (&v, count, __ATOMIC_RELAXED) != 0)
    abort ();

  if (__atomic_fetch_add (&v, 1, __ATOMIC_CONSUME) != 1) 
    abort ();

  if (__atomic_fetch_add (&v, count, __ATOMIC_ACQUIRE) != 2)
    abort ();

  if (__atomic_fetch_add (&v, 1, __ATOMIC_RELEASE) != 3) 
    abort ();

  if (__atomic_fetch_add (&v, count, __ATOMIC_ACQ_REL) != 4) 
    abort ();

  if (__atomic_fetch_add (&v, 1, __ATOMIC_SEQ_CST) != 5) 
    abort ();
}
示例#24
0
文件: atomic-p8.c 项目: Alexpux/GCC
/* Test for the byte atomic operations on power8 using lbarx/stbcx.  */
char
char_fetch_add_relaxed (char *ptr, int value)
{
  return __atomic_fetch_add (ptr, value, __ATOMIC_RELAXED);
}
示例#25
0
void rt1_launcher(void *arg)
{
    int idx = (int)(intptr_t)arg;
    ABT_thread cur_thread;
    ABT_pool cur_pool;
    ABT_sched_config config;
    ABT_sched sched;
    size_t size;
    double t_start, t_end;

    ABT_sched_config_var cv_event_freq = {
        .idx = 0,
        .type = ABT_SCHED_CONFIG_INT
    };

    ABT_sched_config_var cv_idx = {
        .idx = 1,
        .type = ABT_SCHED_CONFIG_INT
    };

    ABT_sched_def sched_def = {
        .type = ABT_SCHED_TYPE_ULT,
        .init = sched_init,
        .run = sched_run,
        .free = sched_free,
        .get_migr_pool = NULL
    };

    /* Create a scheduler */
    ABT_sched_config_create(&config,
                            cv_event_freq, 10,
                            cv_idx, idx,
                            ABT_sched_config_var_end);
    ABT_sched_create(&sched_def, 1, &rt1_data->pool, config, &sched);

    /* Push the scheduler to the current pool */
    ABT_thread_self(&cur_thread);
    ABT_thread_get_last_pool(cur_thread, &cur_pool);
    ABT_pool_add_sched(cur_pool, sched);

    /* Free */
    ABT_sched_config_free(&config);

    t_start = ABT_get_wtime();
    while (1) {
        rt1_app(idx);

        ABT_pool_get_total_size(cur_pool, &size);
        if (size == 0) {
            ABT_sched_free(&sched);
            int rank;
            ABT_xstream_self_rank(&rank);
            printf("ES%d: finished\n", rank);
            ABT_mutex_lock(rt1_data->mutex);
            rt1_data->xstreams[rank] = ABT_XSTREAM_NULL;
            rt1_data->num_xstreams--;
            ABT_mutex_unlock(rt1_data->mutex);
            break;
        }

        t_end = ABT_get_wtime();
        if ((t_end - t_start) > g_timeout) {
            ABT_sched_finish(sched);
        }
    }
}

static void rt1_app(int eid)
{
    int i, num_comps;
    size_t size;
    ABT_thread cur_thread;
    ABT_pool cur_pool;

    ABT_thread_self(&cur_thread);
    ABT_thread_get_last_pool(cur_thread, &cur_pool);

    if (eid == 0) ABT_event_prof_start();

    num_comps = rt1_data->num_comps;
    for (i = 0; i < num_comps * 2; i += 2) {
        ABT_thread_create(rt1_data->pool, rt1_app_compute,
                          (void *)(intptr_t)(eid * num_comps * 2 + i),
                          ABT_THREAD_ATTR_NULL, NULL);
        ABT_task_create(rt1_data->pool, rt1_app_compute,
                        (void *)(intptr_t)(eid * num_comps * 2 + i + 1),
                        NULL);
    }

    do {
        ABT_thread_yield();

        /* If the size of cur_pool is zero, it means the stacked scheduler has
         * been terminated because of the shrinking event. */
        ABT_pool_get_total_size(cur_pool, &size);
        if (size == 0) break;

        ABT_pool_get_total_size(rt1_data->pool, &size);
    } while (size > 0);

    if (eid == 0) {
        ABT_event_prof_stop();

        int cnt = __atomic_exchange_n(&rt1_data->cnt, 0, __ATOMIC_SEQ_CST);
        double local_work = (double)(cnt * rt1_data->num_iters);
        ABT_event_prof_publish("ops", local_work, local_work);
    }
}

static void rt1_app_compute(void *arg)
{
    int pos = (int)(intptr_t)arg;
    int i;

    rt1_data->app_data[pos] = 0;
    for (i = 0; i < rt1_data->num_iters; i++) {
        rt1_data->app_data[pos] += sin((double)pos);
    }

    __atomic_fetch_add(&rt1_data->cnt, 1, __ATOMIC_SEQ_CST);
}
示例#26
0
void __VERIFIER_atomic_inc_r()
{
  unsigned int value = __atomic_fetch_add(&r,1,__ATOMIC_RELAXED);
  assume(value!=-1); //to avoid overflows
}
示例#27
0
int
atomic_fetch_add_ACQUIRE (int a)
{
  return __atomic_fetch_add (&v, a, __ATOMIC_ACQUIRE);
}
示例#28
0
文件: quad-atomic.c 项目: 0day-ci/gcc
static __int128_t
quad_fetch_add (__int128_t *ptr, __int128_t value)
{
  return __atomic_fetch_add (ptr, value, __ATOMIC_ACQUIRE);
}
示例#29
0
文件: atomic-p8.c 项目: Alexpux/GCC
/* Test for the half word atomic operations on power8 using lharx/sthcx.  */
short
short_fetch_add_relaxed (short *ptr, int value)
{
  return __atomic_fetch_add (ptr, value, __ATOMIC_RELAXED);
}
示例#30
-14
void test_presence(void)
{
  // CHECK-LABEL: @test_presence
  // CHECK: atomicrmw add i32* {{.*}} seq_cst
  __atomic_fetch_add(&i, 1, memory_order_seq_cst);
  // CHECK: atomicrmw sub i32* {{.*}} seq_cst
  __atomic_fetch_sub(&i, 1, memory_order_seq_cst);
  // CHECK: load atomic i32, i32* {{.*}} seq_cst
  int r;
  __atomic_load(&i, &r, memory_order_seq_cst);
  // CHECK: store atomic i32 {{.*}} seq_cst
  r = 0;
  __atomic_store(&i, &r, memory_order_seq_cst);

  // CHECK: __atomic_fetch_add_8
  __atomic_fetch_add(&l, 1, memory_order_seq_cst);
  // CHECK: __atomic_fetch_sub_8
  __atomic_fetch_sub(&l, 1, memory_order_seq_cst);
  // CHECK: __atomic_load_8
  long long rl;
  __atomic_load(&l, &rl, memory_order_seq_cst);
  // CHECK: __atomic_store_8
  rl = 0;
  __atomic_store(&l, &rl, memory_order_seq_cst);
}