Exemple #1
0
static inline void _populate_free_list( lockless_cache_t *cache ) {
	assert( cache != NULL );
	slab_list_t *sl = &( cache->slab_list );

	slab_t *newhead = NULL, *newtail = NULL;
	_prepopulate_list( cache, &newhead, &newtail );

	if( ! AO_compare_and_swap_full( &( sl->free_list ), NULL, newhead ) ) {
		slab_t *oldfl;
		do {
			oldfl = AO_load_full( &( sl->free_list ) );
			AO_store_full( &( newtail->next ), oldfl );
		} while(
			! AO_compare_and_swap_full( &( sl->free_list ), oldfl, newhead )
		)
	}
Exemple #2
0
void
ecl_atomic_push(cl_object *slot, cl_object c)
{
        cl_object cons = ecl_list1(c), car;
        do {
                car = (cl_object)AO_load((AO_t*)slot);
                ECL_RPLACD(cons, car);
        } while (!AO_compare_and_swap_full((AO_t*)slot, (AO_t)car, (AO_t)cons));
}
Exemple #3
0
cl_object
ecl_atomic_get(cl_object *slot)
{
	cl_object old;
	do {
		old = (cl_object)AO_load((AO_t*)slot);
	} while (!AO_compare_and_swap_full((AO_t*)slot, (AO_t)old, (AO_t)ECL_NIL));
	return old;
}
Exemple #4
0
cl_object
ecl_atomic_pop(cl_object *slot)
{
        cl_object cons, rest;
        do {
                cons = (cl_object)AO_load((AO_t*)slot);
		rest = CDR(cons);
        } while (!AO_compare_and_swap_full((AO_t*)slot, (AO_t)cons, (AO_t)rest));
        return cons;
}
Exemple #5
0
/* Force a lazy pair.
   NB: When an error occurs during forcing, we release the lock of the
   pair, so that the pair can be forced again.  However, the generator
   has already caused some side-effect before the error, so the next
   forcing may not yield a correct next value.  Another plausible option
   is to mark the pair 'unforcible' permanently, by lp->owner == (AO_t)2,
   and let subsequent attempt of forcing the pair fail.
 */
ScmObj Scm_ForceLazyPair(volatile ScmLazyPair *lp)
{
    static const struct timespec req = {0, 1000000};
    struct timespec rem;
    ScmVM *vm = Scm_VM();

    do {
        if (AO_compare_and_swap_full(&lp->owner, 0, SCM_WORD(vm))) {
            /* Here we own the lazy pair. */
            ScmObj item = lp->item;
            /* Calling generator might change VM state, so we protect
               incomplete stack frame if there's any. */
            int extra_frame_pushed = Scm__VMProtectStack(vm);
            SCM_UNWIND_PROTECT {
                ScmObj val = Scm_ApplyRec0(lp->generator);
                ScmObj newgen = (vm->numVals == 1)? lp->generator : vm->vals[0];
                vm->numVals = 1; /* make sure the extra val won't leak out */

                if (SCM_EOFP(val)) {
                    lp->item = SCM_NIL;
                    lp->generator = SCM_NIL;
                } else {
                    ScmObj newlp = Scm_MakeLazyPair(val, newgen);
                    lp->item = newlp;
                    lp->generator = SCM_NIL;
                }
                AO_nop_full();
                SCM_SET_CAR(lp, item);
                /* We don't need barrier here. */
                lp->owner = (AO_t)1;
            } SCM_WHEN_ERROR {
                lp->owner = (AO_t)0; /*NB: See above about error handling*/
                SCM_NEXT_HANDLER;
            } SCM_END_PROTECT;
            if (extra_frame_pushed) {
                Scm__VMUnprotectStack(vm);
            }
            return SCM_OBJ(lp); /* lp is now an (extended) pair */
        }
        /* Check if we're already working on forcing this pair.  Unlike
           force/delay, We don't allow recursive forcing of lazy pair.
           Since generators are supposed to be called every time to yield
           a new value, so it is ambiguous what value should be returned
           if a generator calls itself recursively. */
        if (lp->owner == SCM_WORD(vm)) {
            /* NB: lp->owner will be reset by the original caller of
               the generator. */
            Scm_Error("Attempt to recursively force a lazy pair.");
        }
        /* Somebody's already working on forcing.  Let's wait for it
           to finish, or to abort. */
        while (SCM_HTAG(lp) == 7 && lp->owner != 0) {
            nanosleep(&req, &rem);
        }
    } while (lp->owner == 0); /* we retry if the previous owner abandoned. */
Exemple #6
0
cl_index
ecl_atomic_index_incf(cl_index *slot)
{
	AO_t old;
	AO_t next;
	do {
		old = AO_load((AO_t*)slot);
		next = old+1;
	} while (!AO_compare_and_swap_full((AO_t*)slot, (AO_t)old, (AO_t)next));
	return (cl_index)next;
}
//**********************************************
void write_lock(void *vlock)
{
    rwl_lock_t *lock = (rwl_lock_t *)vlock;

    Thread_Stats[STAT_WRITE]++;
    while (!AO_compare_and_swap_full(&lock->reader_count_and_flag, 
                0, RWL_ACTIVE_WRITER_FLAG))
    {
        // wait
        Thread_Stats[STAT_WSPIN]++;
    }
    //assert((AO_load(&lock->reader_count_and_flag) & RWL_ACTIVE_WRITER_FLAG) != 0);
}