Esempio n. 1
0
void ABTI_log_pool_remove(ABTI_pool *p_pool, ABT_unit unit,
                          ABTI_xstream *p_consumer)
{
    if (gp_ABTI_global->use_logging == ABT_FALSE) return;

    ABTI_thread *p_thread = NULL;
    ABTI_task *p_task = NULL;
    switch (p_pool->u_get_type(unit)) {
        case ABT_UNIT_TYPE_THREAD:
            p_thread = ABTI_thread_get_ptr(p_pool->u_get_thread(unit));
            if (p_thread->p_last_xstream) {
                LOG_EVENT("[U%" PRIu64 ":E%" PRIu64 "] removed from "
                          "P%" PRIu64 " (consumer: E%" PRIu64 ")\n",
                          ABTI_thread_get_id(p_thread),
                          p_thread->p_last_xstream->rank,
                          p_pool->id,
                          p_consumer->rank);
            } else {
                LOG_EVENT("[U%" PRIu64 "] removed from P%" PRIu64 " "
                          "(consumer: E%" PRIu64 ")\n",
                          ABTI_thread_get_id(p_thread),
                          p_pool->id,
                          p_consumer->rank);
            }
            break;

        case ABT_UNIT_TYPE_TASK:
            p_task = ABTI_task_get_ptr(p_pool->u_get_task(unit));
            if (p_task->p_xstream) {
                LOG_EVENT("[T%" PRIu64 ":E%" PRIu64 "] removed from "
                          "P%" PRIu64 " (consumer: E%" PRIu64 ")\n",
                          ABTI_task_get_id(p_task),
                          p_task->p_xstream->rank,
                          p_pool->id,
                          p_consumer->rank);
            } else {
                LOG_EVENT("[T%" PRIu64 "] removed from P%" PRIu64 " "
                          "(consumer: E%" PRIu64 ")\n",
                          ABTI_task_get_id(p_task),
                          p_pool->id,
                          p_consumer->rank);
            }
            break;

        default:
            ABTI_ASSERT(0);
            break;
    }
}
Esempio n. 2
0
void ABTI_log_event(FILE *fh, const char *format, ...)
{
    if (gp_ABTI_global->use_logging == ABT_FALSE) return;

    ABT_unit_type type;
    ABTI_xstream *p_xstream = NULL;
    ABTI_thread *p_thread = NULL;
    ABTI_task *p_task = NULL;
    char *prefix_fmt = NULL, *prefix = NULL;
    char *newfmt;
    size_t tid, rank;
    int tid_len = 0, rank_len = 0;
    size_t newfmt_len;

    ABT_self_get_type(&type);
    switch (type) {
        case ABT_UNIT_TYPE_THREAD:
            p_xstream = ABTI_local_get_xstream();
            p_thread = ABTI_local_get_thread();
            if (p_thread == NULL) {
                if (p_xstream && p_xstream->type != ABTI_XSTREAM_TYPE_PRIMARY) {
                    prefix_fmt = "<U%" PRIu64 ":E%" PRIu64 "> %s";
                    rank = p_xstream->rank;
                    tid = 0;
                } else {
                    prefix = "<U0:E0> ";
                    prefix_fmt = "%s%s";
                }
            } else {
                rank = p_xstream->rank;
                if (lp_ABTI_log->p_sched) {
                    prefix_fmt = "<S%" PRIu64 ":E%" PRIu64 "> %s";
                    tid = lp_ABTI_log->p_sched->id;
                } else {
                    prefix_fmt = "<U%" PRIu64 ":E%" PRIu64 "> %s";
                    tid = ABTI_thread_get_id(p_thread);
                }
            }
            break;

        case ABT_UNIT_TYPE_TASK:
            p_xstream = ABTI_local_get_xstream();
            rank = p_xstream->rank;
            p_task = ABTI_local_get_task();
            if (lp_ABTI_log->p_sched) {
                prefix_fmt = "<S%" PRIu64 ":E%" PRIu64 "> %s";
                tid = lp_ABTI_log->p_sched->id;
            } else {
                prefix_fmt = "<T%" PRIu64 ":E%" PRIu64 "> %s";
                tid = ABTI_task_get_id(p_task);
            }
            break;

        case ABT_UNIT_TYPE_EXT:
            prefix = "<EXT> ";
            prefix_fmt = "%s%s";
            break;

        default:
            prefix = "<UNKNOWN> ";
            prefix_fmt = "%s%s";
            break;
    }

    if (prefix == NULL) {
        tid_len = ABTU_get_int_len(tid);
        rank_len = ABTU_get_int_len(rank);
        newfmt_len = 6 + tid_len + rank_len + strlen(format);
        newfmt = (char *)ABTU_malloc(newfmt_len + 1);
        sprintf(newfmt, prefix_fmt, tid, rank, format);
    } else {
        newfmt_len = strlen(prefix) + strlen(format);
        newfmt = (char *)ABTU_malloc(newfmt_len + 1);
        sprintf(newfmt, prefix_fmt, prefix, format);
    }

    va_list list;
    va_start(list, format);
    vfprintf(fh, newfmt, list);
    va_end(list);
    fflush(fh);

    ABTU_free(newfmt);
}
Esempio n. 3
0
static inline void ABTD_thread_terminate(ABTI_thread *p_thread)
{
#if defined(ABT_CONFIG_USE_FCONTEXT)
    ABTD_thread_context *p_fctx = &p_thread->ctx;

    /* Now, the ULT has finished its job. Terminate the ULT. */
    if (p_thread->request & ABTI_THREAD_REQ_JOIN_MANY) {
        /* ABT_thread_join_many case */
        p_thread->state = ABT_THREAD_STATE_TERMINATED;

        ABTI_thread_req_arg *p_req_arg;
        ABTI_thread_join_arg *p_jarg;
        p_req_arg = ABTI_thread_get_req_arg(p_thread, ABTI_THREAD_REQ_JOIN_MANY);
        p_jarg = (ABTI_thread_join_arg *)p_req_arg->p_arg;

        p_jarg->counter++;
        if (p_jarg->counter < p_jarg->num_threads) {
            int i;
            ABTI_thread *p_next;
            for (i = p_jarg->counter; i < p_jarg->num_threads; i++) {
                p_next = ABTI_thread_get_ptr(p_jarg->p_threads[i]);
                if (p_next->state != ABT_THREAD_STATE_TERMINATED) {
                    ABTI_xstream *p_xstream = p_thread->p_last_xstream;
                    ABTI_POOL_REMOVE(p_next->p_pool, p_next->unit, p_xstream);
                    ABTI_thread_put_req_arg(p_next, p_req_arg);
                    /* FIXME: we may need ABTI_thread_set_request */
                    p_next->request |= ABTI_THREAD_REQ_JOIN_MANY;
                    p_next->p_last_xstream = p_xstream;
                    p_next->state = ABT_THREAD_STATE_RUNNING;
                    ABTI_local_set_thread(p_next);
                    ABTD_thread_finish_context(p_fctx, &p_next->ctx);
                    return;
                } else {
                    p_jarg->counter++;
                }
            }
        } else {
            /* Switch back to the caller ULT of join_many */
            ABTI_thread *p_caller = p_jarg->p_caller;
            ABTI_local_set_thread(p_caller);
            ABTD_thread_finish_context(p_fctx, &p_caller->ctx);
            return;
        }
    }

    if (p_fctx->p_link) {
        /* If p_link is set, it means that other ULT has called the join. */
        ABTI_thread *p_joiner = (ABTI_thread *)p_fctx->p_link;
        if (p_thread->p_last_xstream == p_joiner->p_last_xstream) {
            /* Only when the current ULT is on the same ES as p_joiner's,
             * we can jump to the joiner ULT. */
            p_thread->state = ABT_THREAD_STATE_TERMINATED;
            LOG_EVENT("[U%" PRIu64 ":E%" PRIu64 "] terminated\n",
                      ABTI_thread_get_id(p_thread),
                      p_thread->p_last_xstream->rank);

            ABTD_thread_finish_context(p_fctx, p_fctx->p_link);
            return;
        } else {
            /* If the current ULT's associated ES is different from p_joiner's,
             * we can't directly jump to p_joiner.  Instead, we wake up
             * p_joiner here so that p_joiner's scheduler can resume it. */
            ABTI_thread_set_ready(p_joiner);

            /* We don't need to use the atomic operation here because the ULT
             * will be terminated regardless of other requests. */
            p_thread->request |= ABTI_THREAD_REQ_TERMINATE;
        }
    } else {
        uint32_t req = ABTD_atomic_fetch_or_uint32(&p_thread->request,
                ABTI_THREAD_REQ_JOIN | ABTI_THREAD_REQ_TERMINATE);
        if (req & ABTI_THREAD_REQ_JOIN) {
            /* This case means there has been a join request and the joiner has
             * blocked.  We have to wake up the joiner ULT. */
            while ((volatile abt_ucontext_t *)p_fctx->p_link == NULL) {
                ABTD_atomic_mem_barrier();
            }
            ABTI_thread_set_ready((ABTI_thread *)p_fctx->p_link);
        }
    }

    /* No other ULT is waiting or blocked for this ULT. Since fcontext does
     * not switch to other fcontext when it finishes, we need to explicitly
     * switch to the scheduler. */
    ABTD_thread_context *p_sched_ctx;
#ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
    if (p_thread->is_sched) {
        /* If p_thread is a scheduler ULT, we have to context switch to
         * the parent scheduler. */
        ABTI_sched *p_par_sched;
        p_par_sched = ABTI_xstream_get_parent_sched(p_thread->p_last_xstream);
        p_sched_ctx = p_par_sched->p_ctx;
        ABTI_LOG_SET_SCHED(p_par_sched);
    } else {
#endif
        p_sched_ctx = ABTI_xstream_get_sched_ctx(p_thread->p_last_xstream);
        ABTI_LOG_SET_SCHED((p_sched_ctx == p_fctx->p_link)
                           ? ABTI_xstream_get_top_sched(p_thread->p_last_xstream)
                           : NULL);
#ifndef ABT_CONFIG_DISABLE_STACKABLE_SCHED
    }
#endif
    ABTD_thread_finish_context(p_fctx, p_sched_ctx);
#else
#error "Not implemented yet"
#endif
}