Exemplo n.º 1
0
ABT_bool ABTI_sched_has_to_stop(ABTI_sched *p_sched, ABTI_xstream *p_xstream)
{
    ABT_bool stop = ABT_FALSE;
    size_t size;

    /* Check exit request */
    if (p_sched->request & ABTI_SCHED_REQ_EXIT) {
        ABTI_mutex_spinlock(&p_xstream->top_sched_mutex);
        p_sched->state = ABT_SCHED_STATE_TERMINATED;
        stop = ABT_TRUE;
        goto fn_exit;
    }

    size = ABTI_sched_get_effective_size(p_sched);
    if (size == 0) {
        if (p_sched->request & ABTI_SCHED_REQ_FINISH) {
            /* Check join request */
            /* We need to lock in case someone wants to migrate to this
             * scheduler */
            ABTI_mutex_spinlock(&p_xstream->top_sched_mutex);
            size_t size = ABTI_sched_get_effective_size(p_sched);
            if (size == 0) {
                p_sched->state = ABT_SCHED_STATE_TERMINATED;
                stop = ABT_TRUE;
            } else {
                ABTI_mutex_unlock(&p_xstream->top_sched_mutex);
            }
        } else if (p_sched->used == ABTI_SCHED_IN_POOL) {
            /* If the scheduler is a stacked one, we have to escape from the
             * scheduling function. The scheduler will be stopped if it is a
             * tasklet type. However, if the scheduler is a ULT type, we
             * context switch to the parent scheduler. */
            if (p_sched->type == ABT_SCHED_TYPE_TASK) {
                p_sched->state = ABT_SCHED_STATE_TERMINATED;
                stop = ABT_TRUE;
            } else {
                ABTI_ASSERT(p_sched->type == ABT_SCHED_TYPE_ULT);
                ABTI_sched *p_par_sched;
                p_par_sched = ABTI_xstream_get_parent_sched(p_xstream);
                ABTD_thread_context_switch(p_sched->p_ctx, p_par_sched->p_ctx);
            }
        }
    }

  fn_exit:
    return stop;
}
Exemplo n.º 2
0
static inline void ABTD_thread_terminate(ABTI_thread *p_thread)
{
#if defined(ABT_CONFIG_USE_FCONTEXT)
    ABTD_thread_context *p_fctx = &p_thread->ctx;

    /* Now, the ULT has finished its job. Terminate the ULT. */
    if (p_thread->request & ABTI_THREAD_REQ_JOIN_MANY) {
        /* ABT_thread_join_many case */
        p_thread->state = ABT_THREAD_STATE_TERMINATED;

        ABTI_thread_req_arg *p_req_arg;
        ABTI_thread_join_arg *p_jarg;
        p_req_arg = ABTI_thread_get_req_arg(p_thread, ABTI_THREAD_REQ_JOIN_MANY);
        p_jarg = (ABTI_thread_join_arg *)p_req_arg->p_arg;

        p_jarg->counter++;
        if (p_jarg->counter < p_jarg->num_threads) {
            int i;
            ABTI_thread *p_next;
            for (i = p_jarg->counter; i < p_jarg->num_threads; i++) {
                p_next = ABTI_thread_get_ptr(p_jarg->p_threads[i]);
                if (p_next->state != ABT_THREAD_STATE_TERMINATED) {
                    ABTI_xstream *p_xstream = p_thread->p_last_xstream;
                    ABTI_POOL_REMOVE(p_next->p_pool, p_next->unit, p_xstream);
                    ABTI_thread_put_req_arg(p_next, p_req_arg);
                    /* FIXME: we may need ABTI_thread_set_request */
                    p_next->request |= ABTI_THREAD_REQ_JOIN_MANY;
                    p_next->p_last_xstream = p_xstream;
                    p_next->state = ABT_THREAD_STATE_RUNNING;
                    ABTI_local_set_thread(p_next);
                    ABTD_thread_finish_context(p_fctx, &p_next->ctx);
                    return;
                } else {
                    p_jarg->counter++;
                }
            }
        } else {
            /* Switch back to the caller ULT of join_many */
            ABTI_thread *p_caller = p_jarg->p_caller;
            ABTI_local_set_thread(p_caller);
            ABTD_thread_finish_context(p_fctx, &p_caller->ctx);
            return;
        }
    }

    if (p_fctx->p_link) {
        /* If p_link is set, it means that other ULT has called the join. */
        ABTI_thread *p_joiner = (ABTI_thread *)p_fctx->p_link;
        if (p_thread->p_last_xstream == p_joiner->p_last_xstream) {
            /* Only when the current ULT is on the same ES as p_joiner's,
             * we can jump to the joiner ULT. */
            p_thread->state = ABT_THREAD_STATE_TERMINATED;
            LOG_EVENT("[U%" PRIu64 ":E%" PRIu64 "] terminated\n",
                      ABTI_thread_get_id(p_thread),
                      p_thread->p_last_xstream->rank);

            ABTD_thread_finish_context(p_fctx, p_fctx->p_link);
            return;
        } else {
            /* If the current ULT's associated ES is different from p_joiner's,
             * we can't directly jump to p_joiner.  Instead, we wake up
             * p_joiner here so that p_joiner's scheduler can resume it. */
            ABTI_thread_set_ready(p_joiner);

            /* We don't need to use the atomic operation here because the ULT
             * will be terminated regardless of other requests. */
            p_thread->request |= ABTI_THREAD_REQ_TERMINATE;
        }
    } else {
        uint32_t req = ABTD_atomic_fetch_or_uint32(&p_thread->request,
                ABTI_THREAD_REQ_JOIN | ABTI_THREAD_REQ_TERMINATE);
        if (req & ABTI_THREAD_REQ_JOIN) {
            /* This case means there has been a join request and the joiner has
             * blocked.  We have to wake up the joiner ULT. */
            while ((volatile abt_ucontext_t *)p_fctx->p_link == NULL) {
                ABTD_atomic_mem_barrier();
            }
            ABTI_thread_set_ready((ABTI_thread *)p_fctx->p_link);
        }
    }

    /* No other ULT is waiting or blocked for this ULT. Since fcontext does
     * not switch to other fcontext when it finishes, we need to explicitly
     * switch to the scheduler. */
    ABTD_thread_context *p_sched_ctx;
#ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
    if (p_thread->is_sched) {
        /* If p_thread is a scheduler ULT, we have to context switch to
         * the parent scheduler. */
        ABTI_sched *p_par_sched;
        p_par_sched = ABTI_xstream_get_parent_sched(p_thread->p_last_xstream);
        p_sched_ctx = p_par_sched->p_ctx;
        ABTI_LOG_SET_SCHED(p_par_sched);
    } else {
#endif
        p_sched_ctx = ABTI_xstream_get_sched_ctx(p_thread->p_last_xstream);
        ABTI_LOG_SET_SCHED((p_sched_ctx == p_fctx->p_link)
                           ? ABTI_xstream_get_top_sched(p_thread->p_last_xstream)
                           : NULL);
#ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
    }
#endif
    ABTD_thread_finish_context(p_fctx, p_sched_ctx);
#else
#error "Not implemented yet"
#endif
}