예제 #1
0
/* for an unchanged version number, not an unchanged pointer.   */
void AO_stack_push_release(AO_stack_t *list, AO_t *element)
{
    AO_t version;
    AO_t next_ptr;

    do {
      /* Again version must be loaded first, for different reason.      */
      version = AO_load_acquire(&(list -> version));
      next_ptr = AO_load(&(list -> ptr));
      *element = next_ptr;
    } while (!AO_compare_and_swap_double_release(
                           list, version,
                           version+1, (AO_t) element));
}
예제 #2
0
AO_t *AO_stack_pop_acquire(AO_stack_t *list)
{
    AO_t *cptr;
    AO_t next;
    AO_t cversion;

    do {
      cversion = AO_load_acquire(&(list -> version));
      cptr = (AO_t *)AO_load(&(list -> ptr));
      if (cptr == 0) return 0;
      next = *cptr;
    } while (!AO_compare_double_and_swap_double_release
                    (list, cversion, (AO_t) cptr, cversion+1, next));
    return cptr;
}
예제 #3
0
 AO_INLINE void block_all_signals(sigset_t *old_sigs_ptr)
 {
   if (!AO_load_acquire(&initialized))
   {
     lock(&init_lock);
     if (!initialized)
       sigfillset(&all_sigs);
     unlock(&init_lock);
     AO_store_release(&initialized, 1);
   }
   sigprocmask(SIG_BLOCK, &all_sigs, old_sigs_ptr);
       /* Neither sigprocmask nor pthread_sigmask is 100%      */
       /* guaranteed to work here.  Sigprocmask is not         */
       /* guaranteed be thread safe, and pthread_sigmask       */
       /* is not async-signal-safe.  Under linuxthreads,       */
       /* sigprocmask may block some pthreads-internal         */
       /* signals.  So long as we do that for short periods,   */
       /* we should be OK.                                     */
 }
예제 #4
0
int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
                                                AO_t old_val1, AO_t old_val2,
                                                AO_t new_val1, AO_t new_val2)
{
  AO_TS_t *my_lock = AO_locks + AO_HASH(addr);
  int result;

# ifndef AO_USE_NO_SIGNALS
    sigset_t old_sigs;
    if (!AO_load_acquire(&initialized))
    {
      lock(&init_lock);
      if (!initialized) sigfillset(&all_sigs);
      unlock(&init_lock);
      AO_store_release(&initialized, 1);
    }
    sigprocmask(SIG_BLOCK, &all_sigs, &old_sigs);
        /* Neither sigprocmask nor pthread_sigmask is 100%      */
        /* guaranteed to work here.  Sigprocmask is not         */
        /* guaranteed be thread safe, and pthread_sigmask       */
        /* is not async-signal-safe.  Under linuxthreads,       */
        /* sigprocmask may block some pthreads-internal         */
        /* signals.  So long as we do that for short periods,   */
        /* we should be OK.                                     */
# endif
  lock(my_lock);
  if (addr -> AO_val1 == old_val1 && addr -> AO_val2 == old_val2)
    {
      addr -> AO_val1 = new_val1;
      addr -> AO_val2 = new_val2;
      result = 1;
    }
  else
    result = 0;
  unlock(my_lock);
# ifndef AO_USE_NO_SIGNALS
    sigprocmask(SIG_SETMASK, &old_sigs, NULL);
# endif
  return result;
}
예제 #5
0
AO_t *AO_stack_pop_acquire(AO_stack_t *list)
{
#   ifdef __clang__
      AO_t *volatile cptr;
                        /* Use volatile to workaround a bug in          */
                        /* clang-1.1/x86 causing test_stack failure.    */
#   else
      AO_t *cptr;
#   endif
    AO_t next;
    AO_t cversion;

    do {
      /* Version must be loaded first.  */
      cversion = AO_load_acquire(&(list -> version));
      cptr = (AO_t *)AO_load(&(list -> ptr));
      if (cptr == 0) return 0;
      next = *cptr;
    } while (!AO_compare_double_and_swap_double_release
                    (list, cversion, (AO_t) cptr, cversion+1, (AO_t) next));
    return cptr;
}
예제 #6
0
/* Assumes we do NOT hold the allocation lock.			*/
static GC_thread GC_lookup_pthread(pthread_t id)
{
  if (GC_win32_dll_threads) {
    int i;
    LONG my_max = GC_get_max_thread_index();

    for (i = 0;
         i <= my_max &&
         (!AO_load_acquire(&(dll_thread_table[i].in_use))
	  || THREAD_EQUAL(dll_thread_table[i].pthread_id, id));
       /* Must still be in_use, since nobody else can store our thread_id. */
       i++);
    if (i > my_max) return 0;
    return (GC_thread)(dll_thread_table + i);
  } else {
    /* We first try the cache.  If that fails, we use a very slow	*/
    /* approach.							*/
    int hv_guess = GET_PTHREAD_MAP_CACHE(id) % THREAD_TABLE_SZ;
    int hv;
    GC_thread p;

    LOCK();
    for (p = GC_threads[hv_guess]; 0 != p; p = p -> next) {
      if (THREAD_EQUAL(p -> pthread_id, id))
	goto foundit; 
    }
    for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
      for (p = GC_threads[hv]; 0 != p; p = p -> next) {
        if (THREAD_EQUAL(p -> pthread_id, id))
	  goto foundit; 
      }
    }
    p = 0;
   foundit:
    UNLOCK();
    return p;
  }
}
예제 #7
0
/* Also used (for assertion checking only) from thread_local_alloc.c.	*/
GC_thread GC_lookup_thread_inner(DWORD thread_id) {
  if (GC_win32_dll_threads) {
    int i;
    LONG my_max = GC_get_max_thread_index();
    for (i = 0;
       i <= my_max &&
       (!AO_load_acquire(&(dll_thread_table[i].in_use))
	|| dll_thread_table[i].id != thread_id);
       /* Must still be in_use, since nobody else can store our thread_id. */
       i++) {}
    if (i > my_max) {
      return 0;
    } else {
      return (GC_thread)(dll_thread_table + i);
    }
  } else {
    word hv = ((word)thread_id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    
    GC_ASSERT(I_HOLD_LOCK());
    while (p != 0 && p -> id != thread_id) p = p -> next;
    return(p);
  }
}