void   AO_initialize_lock_g( AO_lock_ptr_t lock,
                             AO_lock_val_t *val) {
    if( val != 0)
        AO_store_release((ao_t *)lock,(ao_t)*val);
    else
        AO_store_release((ao_t *)lock,0);
}
Exemplo n.º 2
0
AO_t *
AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux * a)
{
  unsigned i;
  int j = 0;
  AO_t first;
  AO_t * first_ptr;
  AO_t next;

 retry:
  first = AO_load(list);
  if (0 == first) return 0;
  /* Insert first into aux black list.                                  */
  /* This may spin if more than AO_BL_SIZE removals using auxiliary     */
  /* structure a are currently in progress.                             */
  for (i = 0; ; )
    {
      if (PRECHECK(a -> AO_stack_bl[i])
          AO_compare_and_swap_acquire(a->AO_stack_bl+i, 0, first))
        break;
      ++i;
      if ( i >= AO_BL_SIZE )
        {
          i = 0;
          AO_pause(++j);
        }
    }
  assert(i < AO_BL_SIZE);
  assert(a -> AO_stack_bl[i] == first);
  /* First is on the auxiliary black list.  It may be removed by        */
  /* another thread before we get to it, but a new insertion of x       */
  /* cannot be started here.                                            */
  /* Only we can remove it from the black list.                         */
  /* We need to make sure that first is still the first entry on the    */
  /* list.  Otherwise it's possible that a reinsertion of it was        */
  /* already started before we added the black list entry.              */
  if (AO_EXPECT_FALSE(first != AO_load(list))) {
    AO_store_release(a->AO_stack_bl+i, 0);
    goto retry;
  }
  first_ptr = AO_REAL_NEXT_PTR(first);
  next = AO_load(first_ptr);
  if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next))) {
    AO_store_release(a->AO_stack_bl+i, 0);
    goto retry;
  }
  assert(*list != first);
  /* Since we never insert an entry on the black list, this cannot have */
  /* succeeded unless first remained on the list while we were running. */
  /* Thus its next link cannot have changed out from under us, and we   */
  /* removed exactly one entry and preserved the rest of the list.      */
  /* Note that it is quite possible that an additional entry was        */
  /* inserted and removed while we were running; this is OK since the   */
  /* part of the list following first must have remained unchanged, and */
  /* first must again have been at the head of the list when the        */
  /* compare_and_swap succeeded.                                        */
  AO_store_release(a->AO_stack_bl+i, 0);
  return first_ptr;
}
int  AO_unlock( AO_lock_ptr_t lock ) {
    pthread_t self = pthread_self();
    if( AO_load((ao_t*) lock) != (ao_t) self ) return 0;
    AO_store_release(lock,0);
    futex_wake(lock,1); // wake up exactly one thread
    return 1;
}
int AO_compare_and_swap_emulation(volatile AO_T *addr, AO_T old,
                                  AO_T new_val)
{
    AO_TS_T *my_lock = AO_locks + AO_HASH(addr);
    sigset_t old_sigs;
    int result;

    if (!AO_load_acquire(&initialized))
    {
        lock(&init_lock);
        if (!initialized) sigfillset(&all_sigs);
        unlock(&init_lock);
        AO_store_release(&initialized, 1);
    }
    sigprocmask(SIG_BLOCK, &all_sigs, &old_sigs);
    /* Neither sigprocmask nor pthread_sigmask is 100%	*/
    /* guaranteed to work here.  Sigprocmask is not 	*/
    /* guaranteed be thread safe, and pthread_sigmask	*/
    /* is not async-signal-safe.  Under linuxthreads,	*/
    /* sigprocmask may block some pthreads-internal		*/
    /* signals.  So long as we do that for short periods,	*/
    /* we should be OK.					*/
    lock(my_lock);
    if (*addr == old)
    {
        *addr = new_val;
        result = 1;
    }
    else
        result = 0;
    unlock(my_lock);
    sigprocmask(SIG_SETMASK, &old_sigs, NULL);
    return result;
}
Exemplo n.º 5
0
GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word * bm, size_t len)
{
    signed_word last_set_bit = len - 1;
    GC_descr result;
    DCL_LOCK_STATE;

#   if defined(AO_HAVE_load_acquire) && defined(AO_HAVE_store_release)
      if (!EXPECT(AO_load_acquire(&GC_explicit_typing_initialized), TRUE)) {
        LOCK();
        if (!GC_explicit_typing_initialized) {
          GC_init_explicit_typing();
          AO_store_release(&GC_explicit_typing_initialized, TRUE);
        }
        UNLOCK();
      }
#   else
      LOCK();
      if (!EXPECT(GC_explicit_typing_initialized, TRUE)) {
        GC_init_explicit_typing();
        GC_explicit_typing_initialized = TRUE;
      }
      UNLOCK();
#   endif

    while (last_set_bit >= 0 && !GC_get_bit(bm, last_set_bit))
      last_set_bit--;
    if (last_set_bit < 0) return(0 /* no pointers */);

#   if ALIGNMENT == CPP_WORDSZ/8
    {
      signed_word i;

      for (i = 0; i < last_set_bit; i++) {
        if (!GC_get_bit(bm, i)) {
          break;
        }
      }
      if (i == last_set_bit) {
        /* An initial section contains all pointers.  Use length descriptor. */
        return (WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
      }
    }
#   endif
    if ((word)last_set_bit < BITMAP_BITS) {
        signed_word i;

        /* Hopefully the common case.                   */
        /* Build bitmap descriptor (with bits reversed) */
        result = SIGNB;
        for (i = last_set_bit - 1; i >= 0; i--) {
            result >>= 1;
            if (GC_get_bit(bm, i)) result |= SIGNB;
        }
        result |= GC_DS_BITMAP;
    } else {
Exemplo n.º 6
0
 AO_INLINE void block_all_signals(sigset_t *old_sigs_ptr)
 {
   if (!AO_load_acquire(&initialized))
   {
     lock(&init_lock);
     if (!initialized)
       sigfillset(&all_sigs);
     unlock(&init_lock);
     AO_store_release(&initialized, 1);
   }
   sigprocmask(SIG_BLOCK, &all_sigs, old_sigs_ptr);
       /* Neither sigprocmask nor pthread_sigmask is 100%      */
       /* guaranteed to work here.  Sigprocmask is not         */
       /* guaranteed be thread safe, and pthread_sigmask       */
       /* is not async-signal-safe.  Under linuxthreads,       */
       /* sigprocmask may block some pthreads-internal         */
       /* signals.  So long as we do that for short periods,   */
       /* we should be OK.                                     */
 }
Exemplo n.º 7
0
int PREFIXED(setspecific) (tsd * key, void * value) {
    pthread_t self = pthread_self();
    int hash_val = HASH(self);
    volatile tse * entry = (volatile tse *)MALLOC_CLEAR(sizeof (tse));
    
    GC_ASSERT(self != INVALID_THREADID);
    if (0 == entry) return ENOMEM;
    pthread_mutex_lock(&(key -> lock));
    /* Could easily check for an existing entry here.	*/
    entry -> next = key -> hash[hash_val];
    entry -> thread = self;
    entry -> value = value;
    GC_ASSERT(entry -> qtid == INVALID_QTID);
    /* There can only be one writer at a time, but this needs to be	*/
    /* atomic with respect to concurrent readers.			*/ 
    AO_store_release((volatile AO_t *)(key -> hash + hash_val), (AO_t)entry);
    pthread_mutex_unlock(&(key -> lock));
    return 0;
}
Exemplo n.º 8
0
int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
                                                AO_t old_val1, AO_t old_val2,
                                                AO_t new_val1, AO_t new_val2)
{
  AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
  int result;

# ifndef AO_USE_NO_SIGNALS
    sigset_t old_sigs;
    if (!AO_load_acquire(&initialized))
    {
      lock(&init_lock);
      if (!initialized) sigfillset(&all_sigs);
      unlock(&init_lock);
      AO_store_release(&initialized, 1);
    }
    sigprocmask(SIG_BLOCK, &all_sigs, &old_sigs);
        /* Neither sigprocmask nor pthread_sigmask is 100%      */
        /* guaranteed to work here.  Sigprocmask is not         */
        /* guaranteed be thread safe, and pthread_sigmask       */
        /* is not async-signal-safe.  Under linuxthreads,       */
        /* sigprocmask may block some pthreads-internal         */
        /* signals.  So long as we do that for short periods,   */
        /* we should be OK.                                     */
# endif
  lock(my_lock);
  if (addr -> AO_val1 == old_val1 && addr -> AO_val2 == old_val2)
    {
      addr -> AO_val1 = new_val1;
      addr -> AO_val2 = new_val2;
      result = 1;
    }
  else
    result = 0;
  unlock(my_lock);
# ifndef AO_USE_NO_SIGNALS
    sigprocmask(SIG_SETMASK, &old_sigs, NULL);
# endif
  return result;
}
Exemplo n.º 9
0
/* thread being deleted.					*/
void GC_delete_gc_thread(GC_vthread gc_id)
{
  CloseHandle(gc_id->handle);
  if (GC_win32_dll_threads) {
    /* This is intended to be lock-free.				*/
    /* It is either called synchronously from the thread being deleted,	*/
    /* or by the joining thread.					*/
    /* In this branch asynchronosu changes to *gc_id are possible.	*/
    gc_id -> stack_base = 0;
    gc_id -> id = 0;
#   ifdef CYGWIN32
      gc_id -> pthread_id = 0;
#   endif /* CYGWIN32 */
#   ifdef GC_WIN32_PTHREADS
      gc_id -> pthread_id.p = NULL;
#   endif /* GC_WIN32_PTHREADS */
    AO_store_release(&(gc_id->in_use), FALSE);
  } else {
    /* Cast away volatile qualifier, since we have lock. */
    GC_thread gc_nvid = (GC_thread)gc_id;
    DWORD id = gc_nvid -> id;
    word hv = ((word)id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    register GC_thread prev = 0;

    GC_ASSERT(I_HOLD_LOCK());
    while (p != gc_nvid) {
        prev = p;
        p = p -> next;
    }
    if (prev == 0) {
        GC_threads[hv] = p -> next;
    } else {
        prev -> next = p -> next;
    }
    GC_INTERNAL_FREE(p);
  }
}
void AO_steal_lock_g( AO_lock_ptr_t lock,AO_lock_val_t self ) {
    AO_store_release((ao_t *)lock,(ao_t)self);
}
int  AO_unlock_g( AO_lock_ptr_t lock, AO_lock_val_t self ) {
    if( AO_load((ao_t*) lock) != (ao_t) self ) return 0;
    AO_store_release(lock,0);
    futex_wake(lock,1); // wake up exactly one thread
    return 1;
}
void AO_steal_lock( AO_lock_ptr_t lock ) {
    AO_store_release(lock,pthread_self());
}
void   AO_initialize_lock( AO_lock_ptr_t lock, int locked ) {
    if( locked )
        AO_store_release((ao_t *)lock,pthread_self());
    else
        AO_store_release((ao_t *)lock,0);
}