Exemplo n.º 1
0
void ABTD_thread_cancel(ABTI_thread *p_thread)
{
    /* When we cancel a ULT, if other ULT is blocked to join the canceled ULT,
     * we have to wake up the joiner ULT.  However, unlike the case when the
     * ULT has finished its execution and calls ABTD_thread_terminate/exit,
     * this function is called by the scheduler.  Therefore, we should not
     * context switch to the joiner ULT and need to always wake it up. */
#if defined(ABT_CONFIG_USE_FCONTEXT)
    ABTD_thread_context *p_fctx = &p_thread->ctx;

    if (p_fctx->p_link) {
        /* If p_link is set, it means that other ULT has called the join. */
        ABTI_thread *p_joiner = (ABTI_thread *)p_fctx->p_link;
        ABTI_thread_set_ready(p_joiner);
    } else {
        uint32_t req = ABTD_atomic_fetch_or_uint32(&p_thread->request,
                ABTI_THREAD_REQ_JOIN | ABTI_THREAD_REQ_TERMINATE);
        if (req & ABTI_THREAD_REQ_JOIN) {
            /* This case means there has been a join request and the joiner has
             * blocked.  We have to wake up the joiner ULT. */
            while ((volatile abt_ucontext_t *)p_fctx->p_link == NULL) {
                ABTD_atomic_mem_barrier();
            }
            ABTI_thread *p_joiner = (ABTI_thread *)p_fctx->p_link;
            ABTI_thread_set_ready(p_joiner);
        }
    }
#else
#error "Not implemented yet"
#endif
}
Exemplo n.º 2
0
/**
 * @ingroup COND
 * @brief   Broadcast a condition.
 *
 * \c ABT_cond_broadcast() signals all ULTs that are waiting on the
 * condition variable.
 * This routine shall have no effect if no ULTs are currently blocked on the
 * condition variable.
 *
 * @param[in] cond   handle to the condition variable
 * @return Error code
 * @retval ABT_SUCCESS on success
 */
int ABT_cond_broadcast(ABT_cond cond)
{
    int abt_errno = ABT_SUCCESS;
    ABTI_cond *p_cond = ABTI_cond_get_ptr(cond);
    ABTI_CHECK_NULL_COND_PTR(p_cond);

    ABTI_mutex_spinlock(&p_cond->mutex);

    if (p_cond->num_waiters == 0) {
        ABTI_mutex_unlock(&p_cond->mutex);
        goto fn_exit;
    }

    /* Wake up all waiting ULTs */
    ABTI_unit *p_head = p_cond->p_head;
    ABTI_unit *p_unit = p_head;
    while (1) {
        ABTI_unit *p_next = p_unit->p_next;

        p_unit->p_prev = NULL;
        p_unit->p_next = NULL;

        if (p_unit->type == ABT_UNIT_TYPE_THREAD) {
            ABTI_thread *p_thread = ABTI_thread_get_ptr(p_unit->thread);
            ABTI_thread_set_ready(p_thread);
        } else {
            /* When the head is an external thread */
            volatile int *p_ext_signal = (volatile int *)p_unit->pool;
            *p_ext_signal = 1;
        }

        /* Next ULT */
        if (p_next != p_head) {
            p_unit = p_next;
        } else {
            break;
        }
    }

    p_cond->p_waiter_mutex = NULL;
    p_cond->num_waiters = 0;
    p_cond->p_head = NULL;
    p_cond->p_tail = NULL;

    ABTI_mutex_unlock(&p_cond->mutex);

  fn_exit:
    return abt_errno;

  fn_fail:
    HANDLE_ERROR_FUNC_WITH_CODE(abt_errno);
    goto fn_exit;
}
Exemplo n.º 3
0
static inline void ABTD_thread_terminate(ABTI_thread *p_thread)
{
#if defined(ABT_CONFIG_USE_FCONTEXT)
    ABTD_thread_context *p_fctx = &p_thread->ctx;

    /* Now, the ULT has finished its job. Terminate the ULT. */
    if (p_thread->request & ABTI_THREAD_REQ_JOIN_MANY) {
        /* ABT_thread_join_many case */
        p_thread->state = ABT_THREAD_STATE_TERMINATED;

        ABTI_thread_req_arg *p_req_arg;
        ABTI_thread_join_arg *p_jarg;
        p_req_arg = ABTI_thread_get_req_arg(p_thread, ABTI_THREAD_REQ_JOIN_MANY);
        p_jarg = (ABTI_thread_join_arg *)p_req_arg->p_arg;

        p_jarg->counter++;
        if (p_jarg->counter < p_jarg->num_threads) {
            int i;
            ABTI_thread *p_next;
            for (i = p_jarg->counter; i < p_jarg->num_threads; i++) {
                p_next = ABTI_thread_get_ptr(p_jarg->p_threads[i]);
                if (p_next->state != ABT_THREAD_STATE_TERMINATED) {
                    ABTI_xstream *p_xstream = p_thread->p_last_xstream;
                    ABTI_POOL_REMOVE(p_next->p_pool, p_next->unit, p_xstream);
                    ABTI_thread_put_req_arg(p_next, p_req_arg);
                    /* FIXME: we may need ABTI_thread_set_request */
                    p_next->request |= ABTI_THREAD_REQ_JOIN_MANY;
                    p_next->p_last_xstream = p_xstream;
                    p_next->state = ABT_THREAD_STATE_RUNNING;
                    ABTI_local_set_thread(p_next);
                    ABTD_thread_finish_context(p_fctx, &p_next->ctx);
                    return;
                } else {
                    p_jarg->counter++;
                }
            }
        } else {
            /* Switch back to the caller ULT of join_many */
            ABTI_thread *p_caller = p_jarg->p_caller;
            ABTI_local_set_thread(p_caller);
            ABTD_thread_finish_context(p_fctx, &p_caller->ctx);
            return;
        }
    }

    if (p_fctx->p_link) {
        /* If p_link is set, it means that other ULT has called the join. */
        ABTI_thread *p_joiner = (ABTI_thread *)p_fctx->p_link;
        if (p_thread->p_last_xstream == p_joiner->p_last_xstream) {
            /* Only when the current ULT is on the same ES as p_joiner's,
             * we can jump to the joiner ULT. */
            p_thread->state = ABT_THREAD_STATE_TERMINATED;
            LOG_EVENT("[U%" PRIu64 ":E%" PRIu64 "] terminated\n",
                      ABTI_thread_get_id(p_thread),
                      p_thread->p_last_xstream->rank);

            ABTD_thread_finish_context(p_fctx, p_fctx->p_link);
            return;
        } else {
            /* If the current ULT's associated ES is different from p_joiner's,
             * we can't directly jump to p_joiner.  Instead, we wake up
             * p_joiner here so that p_joiner's scheduler can resume it. */
            ABTI_thread_set_ready(p_joiner);

            /* We don't need to use the atomic operation here because the ULT
             * will be terminated regardless of other requests. */
            p_thread->request |= ABTI_THREAD_REQ_TERMINATE;
        }
    } else {
        uint32_t req = ABTD_atomic_fetch_or_uint32(&p_thread->request,
                ABTI_THREAD_REQ_JOIN | ABTI_THREAD_REQ_TERMINATE);
        if (req & ABTI_THREAD_REQ_JOIN) {
            /* This case means there has been a join request and the joiner has
             * blocked.  We have to wake up the joiner ULT. */
            while ((volatile abt_ucontext_t *)p_fctx->p_link == NULL) {
                ABTD_atomic_mem_barrier();
            }
            ABTI_thread_set_ready((ABTI_thread *)p_fctx->p_link);
        }
    }

    /* No other ULT is waiting or blocked for this ULT. Since fcontext does
     * not switch to other fcontext when it finishes, we need to explicitly
     * switch to the scheduler. */
    ABTD_thread_context *p_sched_ctx;
#ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
    if (p_thread->is_sched) {
        /* If p_thread is a scheduler ULT, we have to context switch to
         * the parent scheduler. */
        ABTI_sched *p_par_sched;
        p_par_sched = ABTI_xstream_get_parent_sched(p_thread->p_last_xstream);
        p_sched_ctx = p_par_sched->p_ctx;
        ABTI_LOG_SET_SCHED(p_par_sched);
    } else {
#endif
        p_sched_ctx = ABTI_xstream_get_sched_ctx(p_thread->p_last_xstream);
        ABTI_LOG_SET_SCHED((p_sched_ctx == p_fctx->p_link)
                           ? ABTI_xstream_get_top_sched(p_thread->p_last_xstream)
                           : NULL);
#ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
    }
#endif
    ABTD_thread_finish_context(p_fctx, p_sched_ctx);
#else
#error "Not implemented yet"
#endif
}