예제 #1
0
void
MR_lc_spawn_off_func(MR_LoopControl *lc, MR_Unsigned lcs_idx, MR_Code
                     *code_ptr)
{
    MR_LoopControlSlot *lcs = &(lc->MR_lc_slots[lcs_idx]);

#if MR_DEBUG_LOOP_CONTROL
    fprintf(stderr, "lc_spawn_off(%p, %d, %p) sp: %p\n",
            lc, lcs_idx, code_ptr, MR_sp);
#endif

    lcs->MR_lcs_context->MR_ctxt_resume = code_ptr;
    lcs->MR_lcs_context->MR_ctxt_parent_sp = MR_sp;
    MR_schedule_context(lcs->MR_lcs_context);
}
예제 #2
0
/*
** In the low level C grades, the "condition variable" created when an STM
** transaction blocks is actually a pointer to the transaction log.
** "Signalling" it consists of going through the STM variables listed in the
** log and removing the waiters attached to them for the context listed
** in the log. After this, the context can be safely rescheduled.
*/
void
MR_STM_condvar_signal(MR_STM_ConditionVar *cvar)
{
    /*
    ** Calling MR_STM_unwait here should be safe, as this signalling is called
    ** in response to a commit, while the committing thread holds the global
    ** STM lock. Note that a MR_STM_ConditionVar IS a MR_STM_TransLog if
    ** MR_HIGHLEVEL_CODE is not defined, which is why cvar is passed twice.
    */
    MR_STM_unwait(cvar, cvar);

#if defined(MR_STM_DEBUG)
        fprintf(stderr, "STM RESCHEDULING: log <0x%.8lx>\n", (MR_Word)cvar);
#endif

    MR_schedule_context(MR_STM_context_from_condvar(cvar));
}
예제 #3
0
void
MR_lc_join(MR_LoopControl *lc, MR_Unsigned lcs_idx)
{
    MR_LoopControlSlot  *lcs;
    MR_bool             last_worker;
    MR_Context          *wakeup_context;

    lcs = &(lc->MR_lc_slots[lcs_idx]);

#ifdef MR_DEBUG_LOOP_CONTROL
    fprintf(stderr, "lc_join(%p, %d)\n", lc, lcs_idx);
#endif

    lcs->MR_lcs_is_free = MR_TRUE;
    lc->MR_lc_free_slot_hint = lcs_idx;
    /* Ensure the slot is free before we perform the decrement. */
    MR_CPU_SFENCE;

    /*
    ** We have to determine if we're either the last of first workers to
    ** finish.  To do this we cannot use atomic decrement since we need to do
    ** more than one comparison, Therefore we use a CAS.
    */
    last_worker =
        MR_atomic_dec_and_is_zero_int(&(lc->MR_lc_outstanding_workers));

    /*
    ** If this is the last worker to finish, then take the lock before checking
    ** the master context field, otherwise we might race and end up never
    ** resuming the master.
    */
    if (last_worker) {
        MR_US_SPIN_LOCK(&(lc->MR_lc_master_context_lock));
        /*
        ** Don't read the master field until after we have the lock
        */
        MR_CPU_MFENCE;
    }

    /*
    ** If the master thread is suspended, wake it up, provided that either:
    ** - the loop has finished and this is the last worker to exit, or
    ** - the loop has not finished (so the master can create more work).
    */
    if ((lc->MR_lc_master_context != NULL) &&
            ((lc->MR_lc_finished && last_worker) || (!lc->MR_lc_finished)))
    {
        /*
        ** Now take a lock and re-read the master context field.
        */
        if (!last_worker) {
            MR_US_SPIN_LOCK(&(lc->MR_lc_master_context_lock));
            /*
            ** Don't read the master field until after we have the lock
            */
            MR_CPU_MFENCE;
        }
        wakeup_context = lc->MR_lc_master_context;
        lc->MR_lc_master_context = NULL;
        MR_CPU_SFENCE; /* Make sure the field to nULL */
        MR_US_UNLOCK(&(lc->MR_lc_master_context_lock));
        if (wakeup_context != NULL) {
#ifdef MR_DEBUG_LOOP_CONTROL
            fprintf(stderr, "Waking up master\n");
#endif
            /*
            ** XXX: it is faster to switch to this context ourselves
            ** since we are going to unload our own context.
            ** Or we should switch to another worker context if there is one.
            */
            MR_schedule_context(wakeup_context);
        }
    } else if (last_worker) {
        MR_US_UNLOCK(&(lc->MR_lc_master_context_lock));
    }
}