Exemple #1
0
/* Force a lazy pair.
   NB: When an error occurs during forcing, we release the lock of the
   pair, so that the pair can be forced again.  However, the generator
   has already caused some side-effect before the error, so the next
   forcing may not yield a correct next value.  Another plausible option
   is to mark the pair 'unforcible' permanently, by lp->owner == (AO_t)2,
   and let subsequent attempt of forcing the pair fail.
 */
ScmObj Scm_ForceLazyPair(volatile ScmLazyPair *lp)
{
    static const struct timespec req = {0, 1000000};
    struct timespec rem;
    ScmVM *vm = Scm_VM();

    do {
        if (AO_compare_and_swap_full(&lp->owner, 0, SCM_WORD(vm))) {
            /* Here we own the lazy pair. */
            ScmObj item = lp->item;
            /* Calling generator might change VM state, so we protect
               incomplete stack frame if there's any. */
            int extra_frame_pushed = Scm__VMProtectStack(vm);
            SCM_UNWIND_PROTECT {
                ScmObj val = Scm_ApplyRec0(lp->generator);
                ScmObj newgen = (vm->numVals == 1)? lp->generator : vm->vals[0];
                vm->numVals = 1; /* make sure the extra val won't leak out */

                if (SCM_EOFP(val)) {
                    lp->item = SCM_NIL;
                    lp->generator = SCM_NIL;
                } else {
                    ScmObj newlp = Scm_MakeLazyPair(val, newgen);
                    lp->item = newlp;
                    lp->generator = SCM_NIL;
                }
                AO_nop_full();
                SCM_SET_CAR(lp, item);
                /* We don't need barrier here. */
                lp->owner = (AO_t)1;
            } SCM_WHEN_ERROR {
                lp->owner = (AO_t)0; /*NB: See above about error handling*/
                SCM_NEXT_HANDLER;
            } SCM_END_PROTECT;
            if (extra_frame_pushed) {
                Scm__VMUnprotectStack(vm);
            }
            return SCM_OBJ(lp); /* lp is now an (extended) pair */
        }
        /* Check if we're already working on forcing this pair.  Unlike
           force/delay, We don't allow recursive forcing of lazy pair.
           Since generators are supposed to be called every time to yield
           a new value, so it is ambiguous what value should be returned
           if a generator calls itself recursively. */
        if (lp->owner == SCM_WORD(vm)) {
            /* NB: lp->owner will be reset by the original caller of
               the generator. */
            Scm_Error("Attempt to recursively force a lazy pair.");
        }
        /* Somebody's already working on forcing.  Let's wait for it
           to finish, or to abort. */
        while (SCM_HTAG(lp) == 7 && lp->owner != 0) {
            nanosleep(&req, &rem);
        }
    } while (lp->owner == 0); /* we retry if the previous owner abandoned. */
Exemple #2
0
/* since GC_attached_thread was explicitly reset.		*/
GC_bool GC_started_thread_while_stopped(void)
{
  AO_t result;

  if (GC_win32_dll_threads) {
    AO_nop_full();	/* Prior heap reads need to complete earlier. */
    result = AO_load(&GC_attached_thread);
    if (result) {
      AO_store(&GC_attached_thread, FALSE);
    }
    return ((GC_bool)result);
  } else {
    return FALSE;
  }
}
Exemple #3
0
/*
 * harris_find inserts a new node with the given value val in the list
 * (if the value was absent) or does nothing (if the value is already present).
 */
int harris_insert(intset_t *set, val_t val) {
    node_t *newnode, *right_node, *left_node;
    left_node = set->head;

    do {
        right_node = harris_search(set, val, &left_node);
        if (right_node->val == val)
            return 0;
        newnode = new_node(val, right_node, 0);
        /* mem-bar between node creation and insertion */
        AO_nop_full();
        if (ATOMIC_CAS_MB(&left_node->next, right_node, newnode))
            return 1;
    } while(1);
}
Exemple #4
0
AO_t *
AO_stack_pop_explicit_aux_acquire(volatile AO_t *list, AO_stack_aux * a)
{
  unsigned i;
  int j = 0;
  AO_t first;
  AO_t * first_ptr;
  AO_t next;

 retry:
  first = AO_load(list);
  if (0 == first) return 0;
  /* Insert first into aux black list.                                  */
  /* This may spin if more than AO_BL_SIZE removals using auxiliary     */
  /* structure a are currently in progress.                             */
  for (i = 0; ; )
    {
      if (PRECHECK(a -> AO_stack_bl[i])
          AO_compare_and_swap(a->AO_stack_bl+i, 0, first))
        break;
      ++i;
      if ( i >= AO_BL_SIZE )
        {
          i = 0;
          AO_pause(++j);
        }
    }
  assert(i < AO_BL_SIZE);
  assert(a -> AO_stack_bl[i] == first);
  /* First is on the auxiliary black list.  It may be removed by        */
  /* another thread before we get to it, but a new insertion of x       */
  /* cannot be started here.                                            */
  /* Only we can remove it from the black list.                         */
  /* We need to make sure that first is still the first entry on the    */
  /* list.  Otherwise it's possible that a reinsertion of it was        */
  /* already started before we added the black list entry.              */
  AO_nop_full(); /* TODO: Suboptimal on x86 */
# if defined(__alpha__) && (__GNUC__ == 4)
    if (first != AO_load(list))
                        /* Workaround __builtin_expect bug found in     */
                        /* gcc-4.6.3/alpha causing test_stack failure.  */
# else
    if (AO_EXPECT_FALSE(first != AO_load(list)))
# endif
  {
    AO_store_release(a->AO_stack_bl+i, 0);
    goto retry;
  }
  first_ptr = AO_REAL_NEXT_PTR(first);
  next = AO_load(first_ptr);
# if defined(__alpha__) && (__GNUC__ == 4)
    if (!AO_compare_and_swap_release(list, first, next))
# else
    if (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, first, next)))
# endif
  {
    AO_store_release(a->AO_stack_bl+i, 0);
    goto retry;
  }
  assert(*list != first);
  /* Since we never insert an entry on the black list, this cannot have */
  /* succeeded unless first remained on the list while we were running. */
  /* Thus its next link cannot have changed out from under us, and we   */
  /* removed exactly one entry and preserved the rest of the list.      */
  /* Note that it is quite possible that an additional entry was        */
  /* inserted and removed while we were running; this is OK since the   */
  /* part of the list following first must have remained unchanged, and */
  /* first must again have been at the head of the list when the        */
  /* compare_and_swap succeeded.                                        */
  AO_store_release(a->AO_stack_bl+i, 0);
  return first_ptr;
}
Exemple #5
0
/*
 * This may be called from DllMain, and hence operates under unusual
 * constraints.  In particular, it must be lock-free if GC_win32_dll_threads
 * is set.  Always called from the thread being added.
 * If GC_win32_dll_threads is not set, we already hold the allocation lock,
 * except possibly during single-threaded start-up code.
 */
static GC_thread GC_register_my_thread_inner(struct GC_stack_base *sb,
					     DWORD thread_id)
{
  GC_vthread me;

  /* The following should be a noop according to the win32	*/
  /* documentation.  There is empirical evidence that it	*/
  /* isn't.		- HB					*/
# if defined(MPROTECT_VDB)
#   if defined(GWW_VDB)
      if (GC_incremental && !GC_gww_dirty_init())
	SetUnhandledExceptionFilter(GC_write_fault_handler);
#   else
      if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler);
#   endif
# endif

  if (GC_win32_dll_threads) {
    int i;
    /* It appears to be unsafe to acquire a lock here, since this	*/
    /* code is apparently not preeemptible on some systems.		*/
    /* (This is based on complaints, not on Microsoft's official	*/
    /* documentation, which says this should perform "only simple	*/
    /* initialization tasks".)						*/
    /* Hence we make do with nonblocking synchronization.		*/
    /* It has been claimed that DllMain is really only executed with	*/
    /* a particular system lock held, and thus careful use of locking	*/
    /* around code that doesn't call back into the system libraries	*/
    /* might be OK.  But this hasn't been tested across all win32	*/
    /* variants.							*/
                /* cast away volatile qualifier */
    for (i = 0; InterlockedExchange((IE_t)&dll_thread_table[i].in_use,1) != 0;
	 i++) {
      /* Compare-and-swap would make this cleaner, but that's not 	*/
      /* supported before Windows 98 and NT 4.0.  In Windows 2000,	*/
      /* InterlockedExchange is supposed to be replaced by		*/
      /* InterlockedExchangePointer, but that's not really what I	*/
      /* want here.							*/
      /* FIXME: We should eventually declare Win95 dead and use AO_	*/
      /* primitives here.						*/
      if (i == MAX_THREADS - 1)
        ABORT("too many threads");
    }
    /* Update GC_max_thread_index if necessary.  The following is safe,	*/
    /* and unlike CompareExchange-based solutions seems to work on all	*/
    /* Windows95 and later platforms.					*/
    /* Unfortunately, GC_max_thread_index may be temporarily out of 	*/
    /* bounds, so readers have to compensate.				*/
    while (i > GC_max_thread_index) {
      InterlockedIncrement((IE_t)&GC_max_thread_index);
    }
    if (GC_max_thread_index >= MAX_THREADS) {
      /* We overshot due to simultaneous increments.	*/
      /* Setting it to MAX_THREADS-1 is always safe.	*/
      GC_max_thread_index = MAX_THREADS - 1;
    }
    me = dll_thread_table + i;
  } else /* Not using DllMain */ {
    GC_ASSERT(I_HOLD_LOCK());
    GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
    me = GC_new_thread(thread_id);
    GC_in_thread_creation = FALSE;
  }
# ifdef GC_PTHREADS
    /* me can be NULL -> segfault */
    me -> pthread_id = pthread_self();
# endif

  if (!DuplicateHandle(GetCurrentProcess(),
                 	GetCurrentThread(),
		        GetCurrentProcess(),
		        (HANDLE*)&(me -> handle),
		        0,
		        0,
		        DUPLICATE_SAME_ACCESS)) {
	DWORD last_error = GetLastError();
	GC_err_printf("Last error code: %d\n", last_error);
	ABORT("DuplicateHandle failed");
  }
  me -> stack_base = sb -> mem_base;
  /* Up until this point, GC_push_all_stacks considers this thread	*/
  /* invalid.								*/
  /* Up until this point, this entry is viewed as reserved but invalid	*/
  /* by GC_delete_thread.						*/
  me -> id = thread_id;
# if defined(THREAD_LOCAL_ALLOC)
      GC_init_thread_local((GC_tlfs)(&(me->tlfs)));
# endif
  if (me -> stack_base == NULL) 
      ABORT("Bad stack base in GC_register_my_thread_inner");
  if (GC_win32_dll_threads) {
    if (GC_please_stop) {
      AO_store(&GC_attached_thread, TRUE);
      AO_nop_full();  // Later updates must become visible after this.
    }
    /* We'd like to wait here, but can't, since waiting in DllMain 	*/
    /* provokes deadlocks.						*/
    /* Thus we force marking to be restarted instead.			*/
  } else {
    GC_ASSERT(!GC_please_stop);
  	/* Otherwise both we and the thread stopping code would be	*/
  	/* holding the allocation lock.					*/
  }
  return (GC_thread)(me);
}