Exemplo n.º 1
0
// Expand #1:
//   wait if other threads already have token. return if this thread is the first.
int tree_simple_begin_expand(TreeBlock* parent, BlockOffset parent_offset, TreeBlock **child) {
  BlockBits expansion_before = __atomic_fetch_or(
      &parent->expansion,
      BIT(parent_offset),
      __ATOMIC_ACQ_REL);
  if (!TEST_BIT(expansion_before, parent_offset)) {
    *child = NULL;
    return EXPAND_STATUS_FIRST;  // we've got to do it
  }

  ChildInfo* cinfo = &parent->children[parent_offset];
  TreeBlock* c;

  // Wait if no one has done it yet.
  if ((c = __atomic_load_n(&cinfo->child, __ATOMIC_ACQUIRE)) == NULL) {
    // We've got to wait.
    for (;;) {
      EventCountKey key = event_count_prepare(&cinfo->event_count);
      if ((c = __atomic_load_n(&cinfo->child, __ATOMIC_ACQUIRE)) != NULL) {
        event_count_cancel(&cinfo->event_count);
        break;
      }
      event_count_wait(&cinfo->event_count, key);
    }
  }

  *child = c;
  return EXPAND_STATUS_DONE;
}
Exemplo n.º 2
0
int
main ()
{
  v = 0;
  count = 0;

  if (__atomic_load_n (&v, __ATOMIC_RELAXED) != count++) 
    abort(); 
  else 
    v++;

  if (__atomic_load_n (&v, __ATOMIC_ACQUIRE) != count++) 
    abort(); 
  else 
    v++;

  if (__atomic_load_n (&v, __ATOMIC_CONSUME) != count++) 
    abort(); 
  else 
    v++;

  if (__atomic_load_n (&v, __ATOMIC_SEQ_CST) != count++) 
    abort(); 
  else 
    v++;

  /* Now test the generic variants.  */

  __atomic_load (&v, &count, __ATOMIC_RELAXED);
  if (count != v)
    abort(); 
  else 
    v++;

  __atomic_load (&v, &count, __ATOMIC_ACQUIRE);
  if (count != v)
    abort(); 
  else 
    v++;

  __atomic_load (&v, &count, __ATOMIC_CONSUME);
  if (count != v)
    abort(); 
  else 
    v++;

  __atomic_load (&v, &count, __ATOMIC_SEQ_CST);
  if (count != v)
    abort(); 
  else 
    v++;


  return 0;
}
Exemplo n.º 3
0
UTYPE
SIZE(libat_load) (UTYPE *mptr, int smodel)
{
  if (maybe_specialcase_relaxed(smodel))
    return __atomic_load_n (mptr, __ATOMIC_RELAXED);
  else if (maybe_specialcase_acqrel(smodel))
    /* Note that REL and ACQ_REL are not valid for loads.  */
    return __atomic_load_n (mptr, __ATOMIC_ACQUIRE);
  else
    return __atomic_load_n (mptr, __ATOMIC_SEQ_CST);
}
Exemplo n.º 4
0
bool
gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
				   gomp_barrier_state_t state)
{
  unsigned int generation, gen;

  if (__builtin_expect (state & BAR_WAS_LAST, 0))
    {
      /* Next time we'll be awaiting TOTAL threads again.  */
      /* BAR_CANCELLED should never be set in state here, because
	 cancellation means that at least one of the threads has been
	 cancelled, thus on a cancellable barrier we should never see
	 all threads to arrive.  */
      struct gomp_thread *thr = gomp_thread ();
      struct gomp_team *team = thr->ts.team;

      bar->awaited = bar->total;
      team->work_share_cancelled = 0;
      if (__builtin_expect (team->task_count, 0))
	{
	  gomp_barrier_handle_tasks (state);
	  state &= ~BAR_WAS_LAST;
	}
      else
	{
	  state += BAR_INCR - BAR_WAS_LAST;
	  __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
	  futex_wake ((int *) &bar->generation, INT_MAX);
	  return false;
	}
    }

  if (__builtin_expect (state & BAR_CANCELLED, 0))
    return true;

  generation = state;
  do
    {
      do_wait ((int *) &bar->generation, generation);
      gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
      if (__builtin_expect (gen & BAR_CANCELLED, 0))
	return true;
      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
	{
	  gomp_barrier_handle_tasks (state);
	  gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
	}
      generation |= gen & BAR_WAITING_FOR_TASK;
    }
  while (gen != state + BAR_INCR);

  return false;
}
Exemplo n.º 5
0
bool
SIZE(libat_test_and_set) (UTYPE *mptr, int smodel)
{
  UWORD wval, woldval, shift, *wptr, t;

  pre_barrier (smodel);

  if (N < WORDSIZE)
    {
      wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
      shift = SIZE(INVERT_MASK);
    }
  else
    {
      wptr = (UWORD *)mptr;
      shift = 0;
    }

  wval = (UWORD)__GCC_ATOMIC_TEST_AND_SET_TRUEVAL << shift;
  woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
  do
    {
      t = woldval | wval;
    }
  while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
				     __ATOMIC_RELAXED, __ATOMIC_RELAXED));

  post_barrier (smodel);
  return woldval != 0;
}
bool
sol_worker_thread_impl_cancel_check(const void *handle)
{
    const struct sol_worker_thread_posix *thread = handle;

    return __atomic_load_n(&thread->cancel, __ATOMIC_SEQ_CST);
}
Exemplo n.º 7
0
static void *
outer_thread (void *closure)
{
  pthread_t *threads = xcalloc (sizeof (*threads), inner_thread_count);
  while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
    {
      pthread_barrier_t barrier;
      xpthread_barrier_init (&barrier, NULL, inner_thread_count + 1);
      for (int i = 0; i < inner_thread_count; ++i)
        {
          void *(*func) (void *);
          if ((i  % 2) == 0)
            func = malloc_first_thread;
          else
            func = wait_first_thread;
          threads[i] = xpthread_create (NULL, func, &barrier);
        }
      xpthread_barrier_wait (&barrier);
      for (int i = 0; i < inner_thread_count; ++i)
        xpthread_join (threads[i]);
      xpthread_barrier_destroy (&barrier);
    }

  free (threads);

  return NULL;
}
Exemplo n.º 8
0
/**
 * @return: the approximate number of threads currently
 * on the ready queue. For diagnostic purproses ONLY.
 */
ac_u32 get_ready_length(void) {
#ifdef SUPPORT_READY_LENGTH
  return __atomic_load_n(&ready_length, __ATOMIC_RELAXED);
#else
  return 0;
#endif
}
Exemplo n.º 9
0
/**
 * Allocate size bytes
 *
 * @param: size is the number of bytes in each item
 *
 * @return: pointer to the memory
 */
void* ac_malloc(ac_size_t size) {
  ac_size_t cur_idx;
  ac_size_t next_idx;
  ac_bool ok;

  // We must return AC_NULL if size is 0
  if (size == 0) {
    return AC_NULL;
  }

  // Roundup to next alignment factor
  size = (size + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1);

  // Loop until we can allocate or we have no more memory
  do {
    cur_idx = __atomic_load_n(&idx, __ATOMIC_ACQUIRE);

    next_idx = cur_idx + size;
    if ((next_idx >= MAX_IDX) || (next_idx <= cur_idx)) {
      return AC_NULL;
    }
    ok = __atomic_compare_exchange_n(&idx, &idx, next_idx,
        AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
  } while (!ok);

  return &mem_array[cur_idx];
}
Exemplo n.º 10
0
/**
 * Let timer mdify the tcb to be scheduled.
 * Interrupts disabled!
 */
STATIC tcb_x86* timer_scheduler_intr_disabled(tcb_x86* next_tcb, ac_u64 now) {
  //ac_printf("timer_scheduler_intr_disabled:+ next_tcb=%p now=%ld\n");
  tcb_x86* pwaiting_tcb = waiting_tcb_peek_intr_disabled();
  if (pwaiting_tcb != AC_NULL) {

    // There is a tcb with a timer waiting to expire
    ac_u64 waiting_deadline = __atomic_load_n(&pwaiting_tcb->waiting_deadline, __ATOMIC_ACQUIRE);
    if (now >= waiting_deadline) {
      // Timed out so schedule it.
      add_tcb_before(pwaiting_tcb, next_tcb);
      next_tcb = pwaiting_tcb;
      next_tcb->slice_deadline = now + next_tcb->slice;
      waiting_tcb_remove_intr_disabled();
    } else if ((next_tcb->slice_deadline) > waiting_deadline) {
      // Shorten the next_tcb's slice_deadline so we'll more accurately
      // start the waiting tcb.
      next_tcb->slice_deadline = waiting_deadline;
    } else {
      // The waiting tcb in the future, do nothing
    }
  } else {
    // There are no tcb waiting for a timer to expire.
  }

  //ac_printf("timer_scheduler_intr_disabled:+ next_tcb=%p\n", next_tcb);
  return next_tcb;
}
Exemplo n.º 11
0
static void* thread_proc(void* param) {
    int robot_n = (int) param;
    for (;;) {
        long long start = microseconds();
        long long count = 0;
        while (microseconds() - start < 1000000) {
            long long iterations = 10000;
            for (int i = 0; i < iterations; ++i) {
                while (__atomic_load_n(&current, __ATOMIC_SEQ_CST) != robot_n * 2) {
#ifdef USE_PAUSE
                    _mm_pause();
#endif
                }
                __atomic_store_n(&current, robot_n * 2 + 1, __ATOMIC_SEQ_CST);
                //printf("%d\n", robot_n);
                __atomic_store_n(&current, (robot_n + 1) % thread_count * 2, __ATOMIC_SEQ_CST);
            }
            count += iterations;
        }
        if (robot_n == 0) {
            long long dns = 1000ll * (microseconds() - start);
            long long ns_per_call = dns / count;
            printf("%lld ns per step\n", ns_per_call);
        }
    }
}
Exemplo n.º 12
0
Arquivo: exch_n.c Projeto: 0day-ci/gcc
UTYPE
SIZE(libat_exchange) (UTYPE *mptr, UTYPE newval, int smodel)
{
  UWORD mask, shift, woldval, wnewval, t, *wptr;

  pre_barrier (smodel);

  if (N < WORDSIZE)
    {
      wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
      shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
      mask = SIZE(MASK) << shift;
    }
  else
    {
      wptr = (UWORD *)mptr;
      shift = 0;
      mask = -1;
    }

  wnewval = (UWORD)newval << shift;
  woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
  do
    {
      t = (woldval & ~mask) | wnewval;
    }
  while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
				     __ATOMIC_RELAXED, __ATOMIC_RELAXED));

  post_barrier (smodel);
  return woldval >> shift;
}
Exemplo n.º 13
0
__attribute__((__always_inline__)) static inline bool ReallyWaitForConditionVariable(volatile uintptr_t *puControl, _MCFCRT_ConditionVariableUnlockCallback pfnUnlockCallback, _MCFCRT_ConditionVariableRelockCallback pfnRelockCallback, intptr_t nContext, size_t uMaxSpinCountInitial, bool bMayTimeOut, uint64_t u64UntilFastMonoClock, bool bRelockIfTimeOut){
	size_t uMaxSpinCount, uSpinMultiplier;
	bool bSignaled, bSpinnable;
	{
		uintptr_t uOld, uNew;
		uOld = __atomic_load_n(puControl, __ATOMIC_RELAXED);
		do {
			const size_t uSpinFailureCount = (uOld & MASK_SPIN_FAILURE_COUNT) / SPIN_FAILURE_COUNT_ONE;
			if(uMaxSpinCountInitial > MIN_SPIN_COUNT){
				uMaxSpinCount = (uMaxSpinCountInitial >> uSpinFailureCount) | MIN_SPIN_COUNT;
				uSpinMultiplier = MAX_SPIN_MULTIPLIER >> uSpinFailureCount;
			} else {
				uMaxSpinCount = uMaxSpinCountInitial;
				uSpinMultiplier = 0;
			}
			bSignaled = (uOld & MASK_THREADS_RELEASED) != 0;
			bSpinnable = false;
			if(!bSignaled){
				if(uMaxSpinCount != 0){
					const size_t uThreadsSpinning = (uOld & MASK_THREADS_SPINNING) / THREADS_SPINNING_ONE;
					bSpinnable = uThreadsSpinning < THREADS_SPINNING_MAX;
				}
				if(!bSpinnable){
					break;
				}
				uNew = uOld + THREADS_SPINNING_ONE;
			} else {
				const bool bSpinFailureCountDecremented = uSpinFailureCount != 0;
				uNew = uOld - THREADS_RELEASED_ONE - bSpinFailureCountDecremented * SPIN_FAILURE_COUNT_ONE;
			}
		} while(_MCFCRT_EXPECT_NOT(!__atomic_compare_exchange_n(puControl, &uOld, uNew, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)));
	}
Exemplo n.º 14
0
bool
ring_array_empty(const struct ring_array *arr)
{
    /* take snapshot of wpos, since another thread might be pushing */
    uint32_t wpos = __atomic_load_n(&(arr->wpos), __ATOMIC_RELAXED);
    return ring_array_nelem(arr->rpos, wpos, arr->cap) == 0;
}
Exemplo n.º 15
0
void
GOMP_doacross_post (long *counts)
{
  struct gomp_thread *thr = gomp_thread ();
  struct gomp_work_share *ws = thr->ts.work_share;
  struct gomp_doacross_work_share *doacross = ws->doacross;
  unsigned long ent;
  unsigned int i;

  if (__builtin_expect (doacross == NULL, 0))
    {
      __sync_synchronize ();
      return;
    }

  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
    ent = thr->ts.team_id;
  else if (ws->sched == GFS_GUIDED)
    ent = counts[0];
  else
    ent = counts[0] / doacross->chunk_size;
  unsigned long *array = (unsigned long *) (doacross->array
					    + ent * doacross->elt_sz);

  if (__builtin_expect (doacross->flattened, 1))
    {
      unsigned long flattened
	= (unsigned long) counts[0] << doacross->shift_counts[0];

      for (i = 1; i < doacross->ncounts; i++)
	flattened |= (unsigned long) counts[i]
		     << doacross->shift_counts[i];
      flattened++;
      if (flattened == __atomic_load_n (array, MEMMODEL_ACQUIRE))
	__atomic_thread_fence (MEMMODEL_RELEASE);
      else
	__atomic_store_n (array, flattened, MEMMODEL_RELEASE);
      return;
    }

  __atomic_thread_fence (MEMMODEL_ACQUIRE);
  for (i = doacross->ncounts; i-- > 0; )
    {
      if (counts[i] + 1UL != __atomic_load_n (&array[i], MEMMODEL_RELAXED))
	__atomic_store_n (&array[i], counts[i] + 1UL, MEMMODEL_RELEASE);
    }
}
Exemplo n.º 16
0
static void barrier_wait(uint32_t *barrier)
{
	uint32_t val = __atomic_sub_fetch(barrier, 1, __ATOMIC_RELAXED);
	while (val != 0)
		val = __atomic_load_n(barrier, __ATOMIC_RELAXED);

	__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
Exemplo n.º 17
0
inline T Atomic<T>::value() const
{
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
   return __atomic_load_n(&_value, __ATOMIC_ACQUIRE);
#else
   return _value;
#endif
}
Exemplo n.º 18
0
unsigned char cnn_data_wait_until_evaluated_bit(CNNData* data,
                                                unsigned char bit) {
  unsigned char v = __atomic_load_n(&data->evaluated, __ATOMIC_ACQUIRE);
  if (!TEST_BIT(v, bit)) {
    EventCount* ev = &data->event_counts[bit];
    for (;;) {
      EventCountKey ek = event_count_prepare(ev);
      v = __atomic_load_n(&data->evaluated, __ATOMIC_ACQUIRE);
      if (TEST_BIT(v, bit)) {
        event_count_cancel(ev);
        break;
      }
      event_count_wait(ev, ek);
    }
  }
  return v;
}
Exemplo n.º 19
0
inline Lock::state_t Lock::getState () const
{
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
   return __atomic_load_n(&state_, __ATOMIC_ACQUIRE);
#else
   return state_;
#endif
}
Exemplo n.º 20
0
void
GOMP_doacross_wait (long first, ...)
{
  struct gomp_thread *thr = gomp_thread ();
  struct gomp_work_share *ws = thr->ts.work_share;
  struct gomp_doacross_work_share *doacross = ws->doacross;
  va_list ap;
  unsigned long ent;
  unsigned int i;

  if (__builtin_expect (doacross == NULL, 0))
    {
      __sync_synchronize ();
      return;
    }

  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
    {
      if (ws->chunk_size == 0)
	{
	  if (first < doacross->boundary)
	    ent = first / (doacross->q + 1);
	  else
	    ent = (first - doacross->boundary) / doacross->q
		  + doacross->t;
	}
      else
	ent = first / ws->chunk_size % thr->ts.team->nthreads;
    }
  else if (ws->sched == GFS_GUIDED)
    ent = first;
  else
    ent = first / doacross->chunk_size;
  unsigned long *array = (unsigned long *) (doacross->array
					    + ent * doacross->elt_sz);

  if (__builtin_expect (doacross->flattened, 1))
    {
      unsigned long flattened
	= (unsigned long) first << doacross->shift_counts[0];
      unsigned long cur;

      va_start (ap, first);
      for (i = 1; i < doacross->ncounts; i++)
	flattened |= (unsigned long) va_arg (ap, long)
		     << doacross->shift_counts[i];
      cur = __atomic_load_n (array, MEMMODEL_ACQUIRE);
      if (flattened < cur)
	{
	  __atomic_thread_fence (MEMMODEL_RELEASE);
	  va_end (ap);
	  return;
	}
      doacross_spin (array, flattened, cur);
      __atomic_thread_fence (MEMMODEL_RELEASE);
      va_end (ap);
      return;
    }
Exemplo n.º 21
0
/**
 * Next tcb
 */
STATIC __inline__ tcb_x86* next_tcb(tcb_x86* pcur_tcb) {
  // Next thread
  tcb_x86* pnext = pcur_tcb->pnext_tcb;

  // Skip any ZOMBIE threads it is ASSUMED the list
  // has at least one non ZOMBIE thread, the idle thread,
  // so this is guarranteed not to be an endless loop.
  ac_s32* pthread_id = &pnext->thread_id;
  ac_s32 thread_id = __atomic_load_n(pthread_id, __ATOMIC_ACQUIRE);
  while(thread_id == AC_THREAD_ID_ZOMBIE) {
    // Skip the ZOMBIE
    pnext = pnext->pnext_tcb;
    pthread_id = &pnext->thread_id;
    thread_id = __atomic_load_n(pthread_id, __ATOMIC_ACQUIRE);
  }

  return pnext;
}
Exemplo n.º 22
0
void dump_kcov_buffer(void)
{
	unsigned long n, i;

	/* Read number of PCs collected. */
	n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
	for (i = 0; i < n; i++)
		printf("0x%lx\n", cover[i + 1]);
}
Exemplo n.º 23
0
void simulate_thread_main()
{
  int x;

  /* Execute loads with value changing at various cyclic values.  */
  for (table_cycle_size = 16; table_cycle_size > 4 ; table_cycle_size--)
    {
      ret = __atomic_load_n (&value, __ATOMIC_SEQ_CST);
      /* In order to verify the returned value (which is not atomic), it needs
	 to be atomically stored into another variable and check that.  */
      __atomic_store_n (&result, ret, __ATOMIC_SEQ_CST);

      /* Execute the fetch/store a couple of times just to ensure the cycles
         have a chance to be interesting.  */
      ret = __atomic_load_n (&value, __ATOMIC_SEQ_CST);
      __atomic_store_n (&result, ret, __ATOMIC_SEQ_CST);
    }
}
Exemplo n.º 24
0
void
gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
  unsigned int generation, gen;

  if (__builtin_expect (state & BAR_WAS_LAST, 0))
    {
      /* Next time we'll be awaiting TOTAL threads again.  */
      struct gomp_thread *thr = gomp_thread ();
      struct gomp_team *team = thr->ts.team;

      bar->awaited = bar->total;
      team->work_share_cancelled = 0;
      if (__builtin_expect (team->task_count, 0))
	{
	  gomp_barrier_handle_tasks (state);
	  state &= ~BAR_WAS_LAST;
	}
      else
	{
	  state &= ~BAR_CANCELLED;
	  state += BAR_INCR - BAR_WAS_LAST;
	  __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
	  futex_wake ((int *) &bar->generation, INT_MAX);
	  return;
	}
    }

  generation = state;
  state &= ~BAR_CANCELLED;
  do
    {
      do_wait ((int *) &bar->generation, generation);
      gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
	{
	  gomp_barrier_handle_tasks (state);
	  gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
	}
      generation |= gen & BAR_WAITING_FOR_TASK;
    }
  while (gen != state + BAR_INCR);
}
Exemplo n.º 25
0
inline bool DependableObject::isSubmitted()
{
   return
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
      __atomic_load_n(&_submitted, __ATOMIC_ACQUIRE)
#else
      _submitted
#endif
      ;
}
Exemplo n.º 26
0
struct timeout_event *
core_admin_register(uint64_t intvl_ms, timeout_cb_fn cb, void *arg)
{
    struct timeout delay;

    ASSERT(!__atomic_load_n(&admin_running, __ATOMIC_RELAXED));
    ASSERT(admin_init);

    timeout_set_ms(&delay, intvl_ms);
    return timing_wheel_insert(tw, &delay, true, cb, arg);
}
Exemplo n.º 27
0
void atomic_rw(struct list *l, size_t n, bool modify)
{
	for (size_t i=0; i < n; i++) {
		if (modify) {
			l->val += 1;
		} else {
			__atomic_load_n(&l->val, __ATOMIC_RELAXED);
		}
		l = l->next;
	}
}
Exemplo n.º 28
0
intptr_t __MCFCRT_gthread_unlock_callback_recursive_mutex(intptr_t context){
	__gthread_recursive_mutex_t *const recur_mutex = (__gthread_recursive_mutex_t *)context;
	_MCFCRT_ASSERT(_MCFCRT_GetCurrentThreadId() == __atomic_load_n(&(recur_mutex->__owner), __ATOMIC_RELAXED));

	const size_t old_count = recur_mutex->__count;
	recur_mutex->__count = 0;
	__atomic_store_n(&(recur_mutex->__owner), 0, __ATOMIC_RELAXED);

	__gthread_mutex_unlock(&(recur_mutex->__mutex));
	return (intptr_t)old_count;
}
Exemplo n.º 29
0
/**
 * @see mpscifo.h
 */
Msg_t *rmv_raw(MpscFifo_t *pQ) {
  Msg_t *pResult = pQ->pTail;
  Msg_t *pNext = __atomic_load_n(&pResult->pNext, __ATOMIC_SEQ_CST); //ACQUIRE);
  if (pNext != NULL) {
    __atomic_fetch_sub(&pQ->count, 1, __ATOMIC_SEQ_CST);
    __atomic_store_n(&pQ->pTail, pNext, __ATOMIC_SEQ_CST); //RELEASE
  } else {
    pResult = NULL;
  }
  return pResult;
}
Exemplo n.º 30
0
void
_cgo_wait_runtime_init_done (void)
{
  int err;

  if (__atomic_load_n (&runtime_init_done, __ATOMIC_ACQUIRE))
    return;

  err = pthread_mutex_lock (&runtime_init_mu);
  if (err != 0)
    abort ();
  while (!__atomic_load_n (&runtime_init_done, __ATOMIC_ACQUIRE))
    {
      err = pthread_cond_wait (&runtime_init_cond, &runtime_init_mu);
      if (err != 0)
	abort ();
    }
  err = pthread_mutex_unlock (&runtime_init_mu);
  if (err != 0)
    abort ();
}