Exemplo n.º 1
0
int main(int   argc,
         char *argv[])
{
    ptl_handle_ni_t ni_logical;
    ptl_pt_index_t  logical_pt_index;
    ptl_process_t   myself;
    struct timeval  start, stop;
    int             potato = 0;
    ENTRY_T         potato_catcher;
    HANDLE_T        potato_catcher_handle;
    ptl_md_t        potato_launcher;
    ptl_handle_md_t potato_launcher_handle;
    int             num_procs;

    CHECK_RETURNVAL(PtlInit());

    CHECK_RETURNVAL(libtest_init());

    num_procs = libtest_get_size();

    if (NULL != getenv("MAKELEVEL") && num_procs > 2) {
        return 77;
    }

    CHECK_RETURNVAL(PtlNIInit
                    (PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL, PTL_PID_ANY,
                     NULL, NULL, &ni_logical));

    CHECK_RETURNVAL(PtlSetMap(ni_logical, num_procs,
                              libtest_get_mapping(ni_logical)));

    CHECK_RETURNVAL(PtlGetId(ni_logical, &myself));
    CHECK_RETURNVAL(PtlPTAlloc
                    (ni_logical, 0, PTL_EQ_NONE, PTL_PT_ANY,
                     &logical_pt_index));
    assert(logical_pt_index == 0);
    /* Now do the initial setup on ni_logical */
    potato_catcher.start   = &potato;
    potato_catcher.length  = sizeof(potato);
    potato_catcher.uid     = PTL_UID_ANY;
    potato_catcher.options = OPTIONS;
#if INTERFACE == 1
    potato_catcher.match_id.rank = PTL_RANK_ANY;
    potato_catcher.match_bits    = 1;
    potato_catcher.ignore_bits   = ~potato_catcher.match_bits;
#endif
    CHECK_RETURNVAL(PtlCTAlloc(ni_logical, &potato_catcher.ct_handle));
    CHECK_RETURNVAL(APPEND
                    (ni_logical, logical_pt_index, &potato_catcher,
                     PTL_PRIORITY_LIST, NULL, &potato_catcher_handle));
    /* Now do a barrier (on ni_physical) to make sure that everyone has their
     * logical interface set up */
    libtest_barrier();

    /* now I can communicate between ranks with ni_logical */

    /* set up the potato launcher */
    potato_launcher.start   = &potato;
    potato_launcher.length  = sizeof(potato);
    potato_launcher.options =
        PTL_MD_EVENT_CT_ACK | PTL_MD_EVENT_CT_SEND;
    potato_launcher.eq_handle = PTL_EQ_NONE;    // i.e. don't queue send events
    CHECK_RETURNVAL(PtlCTAlloc(ni_logical, &potato_launcher.ct_handle));
    CHECK_RETURNVAL(PtlMDBind
                    (ni_logical, &potato_launcher, &potato_launcher_handle));

    /* rank 0 starts the potato going */
    if (myself.rank == 0) {
        ptl_process_t nextrank;
        nextrank.rank  = myself.rank + 1;
        nextrank.rank *= (nextrank.rank <= num_procs - 1);
        gettimeofday(&start, NULL);
        CHECK_RETURNVAL(PtlPut(potato_launcher_handle, 0, potato_launcher.length,
                               (LOOPS == 1) ? PTL_OC_ACK_REQ : PTL_NO_ACK_REQ,
                               nextrank, logical_pt_index, 1, 0,
                               NULL, 1));
    }

    {   /* the potato-passing loop */
        size_t         waitfor;
        ptl_ct_event_t ctc;
        ptl_process_t  nextrank;
        nextrank.rank  = myself.rank + 1;
        nextrank.rank *= (nextrank.rank <= num_procs - 1);
        for (waitfor = 1; waitfor <= LOOPS; ++waitfor) {
            CHECK_RETURNVAL(PtlCTWait(potato_catcher.ct_handle, waitfor, &ctc));        // wait for potato
            assert(ctc.failure == 0);
            assert(ctc.success == waitfor);
            /* I have the potato! */
            ++potato;
            if (potato < LOOPS * (num_procs)) { // otherwise, the recipient may have exited
                /* Bomb's away! */
                if (myself.rank == 0) {
                    CHECK_RETURNVAL(PtlPut(potato_launcher_handle, 0,
                                           potato_launcher.length,
                                           (waitfor == (LOOPS - 1)) ? PTL_OC_ACK_REQ : PTL_NO_ACK_REQ,
                                           nextrank, logical_pt_index, 3, 0, NULL, 2));
                } else {
                    CHECK_RETURNVAL(PtlPut(potato_launcher_handle, 0,
                                           potato_launcher.length,
                                           (waitfor == LOOPS) ? PTL_OC_ACK_REQ : PTL_NO_ACK_REQ,
                                           nextrank, logical_pt_index, 3, 0, NULL, 2));
                }
            }
        }
        // make sure that last send completed before exiting
        CHECK_RETURNVAL(PtlCTWait(potato_launcher.ct_handle, LOOPS+1, &ctc));
        assert(ctc.failure == 0);
    }
    if (myself.rank == 0) {
        double accumulate = 0.0;
        gettimeofday(&stop, NULL);
        accumulate =
            (stop.tv_sec + stop.tv_usec * 1e-6) - (start.tv_sec +
                    start.tv_usec * 1e-6);
        /* calculate the average time waiting */
        printf("Total time: %g secs\n", accumulate);
        accumulate /= LOOPS;
        printf("Average time around the loop: %g microseconds\n",
               accumulate * 1e6);
        accumulate /= num_procs;
        printf("Average catch-to-toss latency: %g microseconds\n",
               accumulate * 1e6);
    }

    /* cleanup */
    CHECK_RETURNVAL(PtlMDRelease(potato_launcher_handle));
    CHECK_RETURNVAL(PtlCTFree(potato_launcher.ct_handle));
    CHECK_RETURNVAL(UNLINK(potato_catcher_handle));
    CHECK_RETURNVAL(PtlCTFree(potato_catcher.ct_handle));

    /* major cleanup */
    CHECK_RETURNVAL(PtlPTFree(ni_logical, logical_pt_index));
    CHECK_RETURNVAL(PtlNIFini(ni_logical));
    CHECK_RETURNVAL(libtest_fini());
    PtlFini();

    return 0;
}
Exemplo n.º 2
0
static int
component_init(bool enable_progress_threads, bool enable_mpi_threads)
{
    int ret;
    ptl_ni_limits_t actual;

    ret = PtlInit();
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_osc_base_framework.framework_output,
                            "%s:%d: PtlInit failed: %d\n",
                            __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    ret = PtlNIInit(PTL_IFACE_DEFAULT,
                    PTL_NI_PHYSICAL | PTL_NI_MATCHING,
                    PTL_PID_ANY,
                    NULL,
                    &actual,
                    &mca_osc_portals4_component.matching_ni_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_osc_base_framework.framework_output,
                            "%s:%d: PtlNIInit failed: %d\n",
                            __FILE__, __LINE__, ret);
        return ret;
    }

    /* BWB: FIX ME: Need to make sure our ID matches with the MTL... */

    mca_osc_portals4_component.matching_atomic_max = actual.max_atomic_size;
    mca_osc_portals4_component.matching_fetch_atomic_max = actual.max_fetch_atomic_size;
    mca_osc_portals4_component.matching_atomic_ordered_size =
        MAX(actual.max_waw_ordered_size, actual.max_war_ordered_size);

    ret = PtlEQAlloc(mca_osc_portals4_component.matching_ni_h,
                     4096,
                     &mca_osc_portals4_component.matching_eq_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_osc_base_framework.framework_output,
                            "%s:%d: PtlEQAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        return ret;
    }

    ret = PtlPTAlloc(mca_osc_portals4_component.matching_ni_h,
                     0,
                     mca_osc_portals4_component.matching_eq_h,
                     4,
                     &mca_osc_portals4_component.matching_pt_idx);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_osc_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        return ret;
    }

    OBJ_CONSTRUCT(&mca_osc_portals4_component.requests, opal_free_list_t);
    ret = opal_free_list_init (&mca_osc_portals4_component.requests,
                               sizeof(ompi_osc_portals4_request_t),
                               opal_cache_line_size,
                               OBJ_CLASS(ompi_osc_portals4_request_t),
                               0, 0, 8, 0, 8, NULL, 0, NULL, NULL, NULL);
    if (OMPI_SUCCESS != ret) {
        opal_output_verbose(1, ompi_osc_base_framework.framework_output,
                            "%s:%d: opal_free_list_init failed: %d\n",
                            __FILE__, __LINE__, ret);
        return ret;
    }

    ret = opal_progress_register(progress_callback);
    if (OMPI_SUCCESS != ret) {
        opal_output_verbose(1, ompi_osc_base_framework.framework_output,
                            "%s:%d: opal_progress_register failed: %d\n",
                            __FILE__, __LINE__, ret);
        return ret;
    }

    return OMPI_SUCCESS;
}
Exemplo n.º 3
0
int main(int   argc,
         char *argv[])
{
    ptl_handle_ni_t ni_handle;
    ptl_process_t   *procs;
    int             rank;
    ptl_pt_index_t  pt_index, signal_pt_index;
    HANDLE_T        signal_e_handle;
    HANDLE_T        signal_e2_handle;
    int             num_procs;
    ptl_handle_eq_t eq_handle;
    ptl_handle_ct_t ct_handle;
    ptl_handle_md_t md_handle;
    ptl_ni_limits_t limits_reqd, limits_actual;
    ENTRY_T         value_e;

    limits_reqd.max_entries = 1024;
    limits_reqd.max_unexpected_headers = ITERS*2;
    limits_reqd.max_mds = 1024;
    limits_reqd.max_eqs = 1024;
    limits_reqd.max_cts = 1024;
    limits_reqd.max_pt_index = 64;
    limits_reqd.max_iovecs = 1024;
    limits_reqd.max_list_size = 1024;
    limits_reqd.max_triggered_ops = 1024;
    limits_reqd.max_msg_size = 1048576;
    limits_reqd.max_atomic_size = 1048576;
    limits_reqd.max_fetch_atomic_size = 1048576;
    limits_reqd.max_waw_ordered_size = 1048576;
    limits_reqd.max_war_ordered_size = 1048576;
    limits_reqd.max_volatile_size = 1048576;
    limits_reqd.features = 0;

    CHECK_RETURNVAL(PtlInit());

    CHECK_RETURNVAL(libtest_init());

    rank = libtest_get_rank();
    num_procs = libtest_get_size();
    if (num_procs < 2) {
        fprintf(stderr, "test_flowctl_noeq requires at least two processes\n");
        return 77;
    }

    int iters;

    if (num_procs < ITERS)
        iters = ITERS*2+1;
    else
        iters = ITERS;

    CHECK_RETURNVAL(PtlNIInit(PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL,
                              PTL_PID_ANY, &limits_reqd, &limits_actual, &ni_handle));
    procs = libtest_get_mapping(ni_handle);
    CHECK_RETURNVAL(PtlSetMap(ni_handle, num_procs, procs));


    if (0 == rank) {

        /* create data PT space */
        CHECK_RETURNVAL(PtlEQAlloc(ni_handle, (num_procs - 1) * iters + 64, &eq_handle));
        CHECK_RETURNVAL(PtlPTAlloc(ni_handle, PTL_PT_FLOWCTRL, eq_handle, 5,
                                   &pt_index));

        /* create signal ME */
        CHECK_RETURNVAL(PtlCTAlloc(ni_handle, &ct_handle));
        CHECK_RETURNVAL(PtlPTAlloc(ni_handle, 1, eq_handle, 6,
                                   &signal_pt_index));
        value_e.start = NULL;
        value_e.length = 0;
        value_e.ct_handle = ct_handle;
        value_e.uid = PTL_UID_ANY;
        value_e.options = OPTIONS | PTL_LE_EVENT_CT_COMM;
#if INTERFACE == 1
        value_e.match_id.rank = PTL_RANK_ANY;
        value_e.match_bits = 0;
        value_e.ignore_bits = 0;
#endif
        CHECK_RETURNVAL(APPEND(ni_handle, 5, &value_e, PTL_OVERFLOW_LIST, NULL, &signal_e_handle));
    } else {
        ptl_md_t        md;

        /* 16 extra just in case... */
        CHECK_RETURNVAL(PtlEQAlloc(ni_handle, iters*2 + 16, &eq_handle));

        md.start = NULL;
        md.length = 0;
        md.options = 0;
        md.eq_handle = eq_handle;
        md.ct_handle = PTL_CT_NONE;

        CHECK_RETURNVAL(PtlMDBind(ni_handle, &md, &md_handle));
    }

    fprintf(stderr,"at barrier \n");
    libtest_barrier();

    if (0 == rank) {
        ptl_ct_event_t  ct;
        ptl_event_t ev;
        int ret, count = 0, saw_flowctl = 0;

        fprintf(stderr,"begin ctwait \n");
        /* wait for signal counts */
        CHECK_RETURNVAL(PtlCTWait(ct_handle, iters / 2 , &ct));
        if (ct.success != iters / 2 || ct.failure != 0) {
            return 1;
        }
        fprintf(stderr,"done CT wait \n");
        /* wait for event entries */
        while (1) {
            ret = PtlEQGet(eq_handle, &ev);
            if (PTL_OK == ret) {
                count++;
                fprintf(stderr, "found EQ value \n");
            } else if (ret == PTL_EQ_EMPTY) {
                continue;
            } else {
                fprintf(stderr, "0: Unexpected return code from EQGet: %d\n", ret);
                return 1;
            }

            if (ev.type == PTL_EVENT_PT_DISABLED) {
                saw_flowctl++;
                break;
            }
        }

        fprintf(stderr, "0: Saw %d flowctl\n", saw_flowctl);
        if (saw_flowctl == 0) {
            return 1;
        }
        /* Now clear out all of the unexpected messages so we can clean up everything */
        CHECK_RETURNVAL(APPEND(ni_handle, 5, &value_e, PTL_PRIORITY_LIST, NULL, &signal_e2_handle));
        ret = PTL_OK;
        while (ret != PTL_EQ_EMPTY)
            ret = PtlEQGet(eq_handle, &ev);
    } else {
        ptl_process_t target;
        ptl_event_t ev;
        int ret, count = 0, fails = 0;
        int i;

        target.rank = 0;
        printf("beginning puts \n");
        for (i = 0 ; i < iters ; ++i) {
            CHECK_RETURNVAL(PtlPut(md_handle,
                                   0,
                                   0,
                                   PTL_ACK_REQ,
                                   target,
                                   5,
                                   0,
                                   0,
                                   NULL,
                                   0));
            usleep(100);
        }

        while (count < iters) {
            ret = PtlEQGet(eq_handle, &ev);
            if (PTL_EQ_EMPTY == ret) {
                continue;
            } else if (PTL_OK != ret) {
                fprintf(stderr, "%d: PtlEQGet returned %d\n", rank, ret);
                return 1;
            }

            if (ev.ni_fail_type == PTL_NI_OK) {
                if (ev.type == PTL_EVENT_SEND) {
                    continue;
                } else if (ev.type == PTL_EVENT_ACK) {
                    count++;
                } else {
                    fprintf(stderr, "%d: Unexpected event type %d\n", rank, ev.type);
                }
            } else if (ev.ni_fail_type == PTL_NI_PT_DISABLED) {
                count++;
                fails++;
            } else if (ev.ni_fail_type == PTL_EQ_EMPTY) {
                continue;
            } else if (ev.ni_fail_type == PTL_EQ_DROPPED) {
                continue;
            } else {
                fprintf(stderr, "%d: Unexpected fail type: %d\n", rank, ev.ni_fail_type);
                return 1;
            }
        }

        fprintf(stderr, "%d: Saw %d of %d ACKs as fails\n", rank, fails, count);
    }

    fprintf(stderr,"at final barrier \n");

    libtest_barrier();

    if (0 == rank) {

        CHECK_RETURNVAL(UNLINK(signal_e_handle));
        CHECK_RETURNVAL(UNLINK(signal_e2_handle));
        CHECK_RETURNVAL(PtlPTFree(ni_handle, signal_pt_index));
        CHECK_RETURNVAL(PtlCTFree(ct_handle));
        CHECK_RETURNVAL(PtlPTFree(ni_handle, pt_index));
        CHECK_RETURNVAL(PtlEQFree(eq_handle));
    } else {
        CHECK_RETURNVAL(PtlMDRelease(md_handle));
        CHECK_RETURNVAL(PtlEQFree(eq_handle));
    }

    fprintf(stderr,"final cleanup \n");
    CHECK_RETURNVAL(PtlNIFini(ni_handle));
    CHECK_RETURNVAL(libtest_fini());
    PtlFini();

    return 0;
}
Exemplo n.º 4
0
/*
    /!\ Called for each processes /!\
 */
static int
portals4_init_query(bool enable_progress_threads,
        bool enable_mpi_threads)
{
    int ret;
    ptl_md_t md;
    ptl_me_t me;

    /* Initialize Portals and create a physical, matching interface */
    ret = PtlInit();
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlInit failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    ret = PtlNIInit(PTL_IFACE_DEFAULT,
            PTL_NI_PHYSICAL | PTL_NI_MATCHING,
            PTL_PID_ANY,
            NULL,
            &mca_coll_portals4_component.ni_limits,
            &mca_coll_portals4_component.ni_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlNIInit failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }


    ret = PtlGetId(mca_coll_portals4_component.ni_h, &mca_coll_portals4_component.id);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlGetid failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }
    /* FIX ME: Need to make sure our ID matches with the MTL... */
    ret = PtlGetUid(mca_coll_portals4_component.ni_h, &mca_coll_portals4_component.uid);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlGetUid failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    ret = PtlEQAlloc(mca_coll_portals4_component.ni_h,
            MCA_COLL_PORTALS4_EQ_SIZE,
            &mca_coll_portals4_component.eq_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlEQAlloc failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    ret = PtlPTAlloc(mca_coll_portals4_component.ni_h,
            0,
            mca_coll_portals4_component.eq_h,
            REQ_COLL_TABLE_ID,
            &mca_coll_portals4_component.pt_idx);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlPTAlloc failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    if (mca_coll_portals4_component.pt_idx != REQ_COLL_TABLE_ID) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlPTAlloc return wrong pt_idx: %d\n",
                __FILE__, __LINE__,
                mca_coll_portals4_component.finish_pt_idx);
        return OMPI_ERROR;
    }

    ret = PtlPTAlloc(mca_coll_portals4_component.ni_h,
            0,
            mca_coll_portals4_component.eq_h,
            REQ_COLL_FINISH_TABLE_ID,
            &mca_coll_portals4_component.finish_pt_idx);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlPTAlloc failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    if (mca_coll_portals4_component.finish_pt_idx != REQ_COLL_FINISH_TABLE_ID) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlPTAlloc return wrong pt_idx: %d\n",
                __FILE__, __LINE__,
                mca_coll_portals4_component.finish_pt_idx);
        return OMPI_ERROR;
    }

    /* Bind MD/MDs across all memory.  We prefer (for obvious reasons)
       to have a single MD across all of memory */
    memset(&md, 0, sizeof(ptl_md_t));
    md.start = 0;
    md.length = 0;
    md.options = 0;
    md.eq_handle = PTL_EQ_NONE;
    md.ct_handle = PTL_CT_NONE;

    ret = PtlMDBind(mca_coll_portals4_component.ni_h,
            &md,
            &mca_coll_portals4_component.zero_md_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlMDBind failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    md.start = 0;
    md.length = PTL_SIZE_MAX;
    md.options = 0;
    md.eq_handle = PTL_EQ_NONE;
    md.ct_handle = PTL_CT_NONE;

    ret = PtlMDBind(mca_coll_portals4_component.ni_h,
            &md,
            &mca_coll_portals4_component.data_md_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlMDBind failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }
    OPAL_OUTPUT_VERBOSE((90, ompi_coll_base_framework.framework_output, "PtlMDBind start=%p length=%x\n", md.start, md.length));

    /* setup finish ack ME */
    me.start = NULL;
    me.length = 0;
    me.ct_handle = PTL_CT_NONE;
    me.min_free = 0;
    me.uid = mca_coll_portals4_component.uid;
    me.options = PTL_ME_OP_PUT |
            PTL_ME_EVENT_LINK_DISABLE | PTL_ME_EVENT_UNLINK_DISABLE;
    me.match_id.phys.nid = PTL_NID_ANY;
    me.match_id.phys.pid = PTL_PID_ANY;
    me.match_bits = 0;
    me.ignore_bits = 0;

    ret = PtlMEAppend(mca_coll_portals4_component.ni_h,
            mca_coll_portals4_component.finish_pt_idx,
            &me,
            PTL_PRIORITY_LIST,
            NULL,
            &mca_coll_portals4_component.finish_me_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlMEAppend of barrier unexpected failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    /* This ME is used for RTR exchange only */
    me.start = NULL;
    me.length = 0;
    me.ct_handle = PTL_CT_NONE;
    me.min_free = 0;
    me.uid = mca_coll_portals4_component.uid;
    me.options = PTL_ME_OP_PUT |
            PTL_ME_EVENT_SUCCESS_DISABLE | PTL_ME_EVENT_OVER_DISABLE |
            PTL_ME_EVENT_LINK_DISABLE | PTL_ME_EVENT_UNLINK_DISABLE;
    me.match_id.phys.nid = PTL_NID_ANY;
    me.match_id.phys.pid = PTL_PID_ANY;

    /* Note : the RTR bit must be set to match this ME,
     * this allows to discriminate the RTR from data flow
     * (especially for the Barrier operations)
     */
    COLL_PORTALS4_SET_BITS(me.match_bits, 0, 0, 1, 0, 0, 0);
    me.ignore_bits = ~COLL_PORTALS4_RTR_MASK;

    ret = PtlMEAppend(mca_coll_portals4_component.ni_h,
            mca_coll_portals4_component.pt_idx,
            &me,
            PTL_OVERFLOW_LIST,
            NULL,
            &mca_coll_portals4_component.unex_me_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: PtlMEAppend of barrier unexpected failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;
    }

    /* activate progress callback */
    ret = opal_progress_register(portals4_progress);
    if (OMPI_SUCCESS != ret) {
        opal_output_verbose(1, ompi_coll_base_framework.framework_output,
                "%s:%d: opal_progress_register failed: %d\n",
                __FILE__, __LINE__, ret);
        return OMPI_ERROR;

    }
    return OMPI_SUCCESS;

}
Exemplo n.º 5
0
static int ptl_init(MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p)
{
    int mpi_errno = MPI_SUCCESS;
    int ret;
    ptl_md_t md;
    ptl_ni_limits_t desired;
    MPIR_FUNC_VERBOSE_STATE_DECL(MPID_STATE_PTL_INIT);

    MPIR_FUNC_VERBOSE_ENTER(MPID_STATE_PTL_INIT);

    /* first make sure that our private fields in the vc and req fit into the area provided  */
    MPIR_Assert(sizeof(MPID_nem_ptl_vc_area) <= MPIDI_NEM_VC_NETMOD_AREA_LEN);
    MPIR_Assert(sizeof(MPID_nem_ptl_req_area) <= MPIDI_NEM_REQ_NETMOD_AREA_LEN);

    /* Make sure our IOV is the same as portals4's IOV */
    MPIR_Assert(sizeof(ptl_iovec_t) == sizeof(MPL_IOV));
    MPIR_Assert(((void*)&(((ptl_iovec_t*)0)->iov_base)) == ((void*)&(((MPL_IOV*)0)->MPL_IOV_BUF)));
    MPIR_Assert(((void*)&(((ptl_iovec_t*)0)->iov_len))  == ((void*)&(((MPL_IOV*)0)->MPL_IOV_LEN)));
    MPIR_Assert(sizeof(((ptl_iovec_t*)0)->iov_len) == sizeof(((MPL_IOV*)0)->MPL_IOV_LEN));
            

    mpi_errno = MPIDI_CH3I_Register_anysource_notification(MPID_nem_ptl_anysource_posted, MPID_nem_ptl_anysource_matched);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    MPIDI_Anysource_improbe_fn = MPID_nem_ptl_anysource_improbe;

    /* init portals */
    ret = PtlInit();
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlinit", "**ptlinit %s", MPID_nem_ptl_strerror(ret));
    
    /* do an interface pre-init to get the default limits struct */
    ret = PtlNIInit(PTL_IFACE_DEFAULT, PTL_NI_MATCHING | PTL_NI_PHYSICAL,
                    PTL_PID_ANY, NULL, &desired, &MPIDI_nem_ptl_ni);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlniinit", "**ptlniinit %s", MPID_nem_ptl_strerror(ret));

    /* finalize the interface so we can re-init with our desired maximums */
    ret = PtlNIFini(MPIDI_nem_ptl_ni);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlnifini", "**ptlnifini %s", MPID_nem_ptl_strerror(ret));

    /* set higher limits if they are determined to be too low */
    if (desired.max_unexpected_headers < UNEXPECTED_HDR_COUNT && getenv("PTL_LIM_MAX_UNEXPECTED_HEADERS") == NULL)
        desired.max_unexpected_headers = UNEXPECTED_HDR_COUNT;
    if (desired.max_list_size < LIST_SIZE && getenv("PTL_LIM_MAX_LIST_SIZE") == NULL)
        desired.max_list_size = LIST_SIZE;
    if (desired.max_entries < ENTRY_COUNT && getenv("PTL_LIM_MAX_ENTRIES") == NULL)
        desired.max_entries = ENTRY_COUNT;

    /* do the real init */
    ret = PtlNIInit(PTL_IFACE_DEFAULT, PTL_NI_MATCHING | PTL_NI_PHYSICAL,
                    PTL_PID_ANY, &desired, &MPIDI_nem_ptl_ni_limits, &MPIDI_nem_ptl_ni);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlniinit", "**ptlniinit %s", MPID_nem_ptl_strerror(ret));

    /* allocate EQs for each portal */
    ret = PtlEQAlloc(MPIDI_nem_ptl_ni, EVENT_COUNT, &MPIDI_nem_ptl_eq);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptleqalloc", "**ptleqalloc %s", MPID_nem_ptl_strerror(ret));

    ret = PtlEQAlloc(MPIDI_nem_ptl_ni, EVENT_COUNT, &MPIDI_nem_ptl_get_eq);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptleqalloc", "**ptleqalloc %s", MPID_nem_ptl_strerror(ret));

    ret = PtlEQAlloc(MPIDI_nem_ptl_ni, EVENT_COUNT, &MPIDI_nem_ptl_control_eq);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptleqalloc", "**ptleqalloc %s", MPID_nem_ptl_strerror(ret));

    ret = PtlEQAlloc(MPIDI_nem_ptl_ni, EVENT_COUNT, &MPIDI_nem_ptl_rpt_eq);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptleqalloc", "**ptleqalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate a separate EQ for origin events. with this, we can implement rate-limit operations
       to prevent a locally triggered flow control even */
    ret = PtlEQAlloc(MPIDI_nem_ptl_ni, EVENT_COUNT, &MPIDI_nem_ptl_origin_eq);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptleqalloc", "**ptleqalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate portal for matching messages */
    ret = PtlPTAlloc(MPIDI_nem_ptl_ni, PTL_PT_ONLY_USE_ONCE | PTL_PT_ONLY_TRUNCATE | PTL_PT_FLOWCTRL, MPIDI_nem_ptl_eq,
                     PTL_PT_ANY, &MPIDI_nem_ptl_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate portal for large messages where receiver does a get */
    ret = PtlPTAlloc(MPIDI_nem_ptl_ni, PTL_PT_ONLY_USE_ONCE | PTL_PT_ONLY_TRUNCATE | PTL_PT_FLOWCTRL, MPIDI_nem_ptl_get_eq,
                     PTL_PT_ANY, &MPIDI_nem_ptl_get_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate portal for MPICH control messages */
    ret = PtlPTAlloc(MPIDI_nem_ptl_ni, PTL_PT_ONLY_USE_ONCE | PTL_PT_ONLY_TRUNCATE | PTL_PT_FLOWCTRL, MPIDI_nem_ptl_control_eq,
                     PTL_PT_ANY, &MPIDI_nem_ptl_control_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate portal for MPICH control messages */
    ret = PtlPTAlloc(MPIDI_nem_ptl_ni, PTL_PT_ONLY_USE_ONCE | PTL_PT_ONLY_TRUNCATE | PTL_PT_FLOWCTRL, MPIDI_nem_ptl_rpt_eq,
                     PTL_PT_ANY, &MPIDI_nem_ptl_rpt_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate portal for MPICH control messages */
    ret = PtlPTAlloc(MPIDI_nem_ptl_ni, PTL_PT_ONLY_USE_ONCE | PTL_PT_ONLY_TRUNCATE | PTL_PT_FLOWCTRL, MPIDI_nem_ptl_rpt_eq,
                     PTL_PT_ANY, &MPIDI_nem_ptl_get_rpt_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* allocate portal for MPICH control messages */
    ret = PtlPTAlloc(MPIDI_nem_ptl_ni, PTL_PT_ONLY_USE_ONCE | PTL_PT_ONLY_TRUNCATE | PTL_PT_FLOWCTRL, MPIDI_nem_ptl_rpt_eq,
                     PTL_PT_ANY, &MPIDI_nem_ptl_control_rpt_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* create an MD that covers all of memory */
    md.start = 0;
    md.length = (ptl_size_t)-1;
    md.options = 0x0;
    md.eq_handle = MPIDI_nem_ptl_origin_eq;
    md.ct_handle = PTL_CT_NONE;
    ret = PtlMDBind(MPIDI_nem_ptl_ni, &md, &MPIDI_nem_ptl_global_md);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlmdbind", "**ptlmdbind %s", MPID_nem_ptl_strerror(ret));

    /* currently, rportlas only works with a single NI and EQ */
    ret = MPID_nem_ptl_rptl_init(MPIDI_Process.my_pg->size, ORIGIN_EVENTS, get_target_info);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlniinit", "**ptlniinit %s", MPID_nem_ptl_strerror(ret));

    /* allow rportal to manage the primary portal and retransmit if needed */
    ret = MPID_nem_ptl_rptl_ptinit(MPIDI_nem_ptl_ni, MPIDI_nem_ptl_origin_eq, MPIDI_nem_ptl_pt, MPIDI_nem_ptl_rpt_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* allow rportal to manage the get and control portals, but we
     * don't expect retransmission to be needed on the get portal, so
     * we pass PTL_PT_ANY as the dummy portal.  unfortunately, portals
     * does not have an "invalid" PT constant, which would have been
     * more appropriate to pass over here. */
    ret = MPID_nem_ptl_rptl_ptinit(MPIDI_nem_ptl_ni, MPIDI_nem_ptl_origin_eq, MPIDI_nem_ptl_get_pt, MPIDI_nem_ptl_get_rpt_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    ret = MPID_nem_ptl_rptl_ptinit(MPIDI_nem_ptl_ni, MPIDI_nem_ptl_origin_eq, MPIDI_nem_ptl_control_pt, MPIDI_nem_ptl_control_rpt_pt);
    MPIR_ERR_CHKANDJUMP1(ret, mpi_errno, MPI_ERR_OTHER, "**ptlptalloc", "**ptlptalloc %s", MPID_nem_ptl_strerror(ret));

    /* create business card */
    mpi_errno = get_business_card(pg_rank, bc_val_p, val_max_sz_p);
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    /* init other modules */
    mpi_errno = MPID_nem_ptl_poll_init();
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);

    mpi_errno = MPID_nem_ptl_nm_init();
    if (mpi_errno) MPIR_ERR_POP(mpi_errno);
    
 fn_exit:
    MPIR_FUNC_VERBOSE_EXIT(MPID_STATE_PTL_INIT);
    return mpi_errno;
 fn_fail:
    goto fn_exit;
}
Exemplo n.º 6
0
static int
btl_portals4_init_interface(void)
{
    mca_btl_portals4_module_t *portals4_btl;
    unsigned int ret, interface;
    ptl_md_t md;
    ptl_me_t me;

// The initialisation of EQ, PT and ME must be done after the SetMap !
    for (interface=0; interface<mca_btl_portals4_component.num_btls; interface++) {
        portals4_btl = mca_btl_portals4_component.btls[interface];

        /* create event queue */
        ret = PtlEQAlloc(portals4_btl->portals_ni_h,
                     mca_btl_portals4_component.recv_queue_size,
                     &portals4_btl->recv_eq_h);
        if (PTL_OK != ret) {
            opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: PtlEQAlloc failed for NI %d: %d",
                            __FILE__, __LINE__, interface, ret);
            goto error;
        }
        mca_btl_portals4_component.eqs_h[interface] = portals4_btl->recv_eq_h;
        OPAL_OUTPUT_VERBOSE((90, opal_btl_base_framework.framework_output,
            "PtlEQAlloc (recv_eq=%d) OK for NI %d\n", portals4_btl->recv_eq_h, interface));

        /* Create recv_idx portal table entry */
        ret = PtlPTAlloc(portals4_btl->portals_ni_h,
                     PTL_PT_ONLY_TRUNCATE,
                     portals4_btl->recv_eq_h,
                     REQ_BTL_TABLE_ID,
                     &portals4_btl->recv_idx);
        if (PTL_OK != ret) {
            opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc failed for NI %d: %d",
                            __FILE__, __LINE__, interface, ret);
            goto error;
        }
        OPAL_OUTPUT_VERBOSE((90, opal_btl_base_framework.framework_output,
            "PtlPTAlloc (recv_idx) OK for NI %d recv_idx=%d", interface, portals4_btl->recv_idx));

        if (portals4_btl->recv_idx != REQ_BTL_TABLE_ID) {
            opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc did not allocate the requested PT: %d",
                           __FILE__, __LINE__, portals4_btl->recv_idx);
            goto error;
        }

        /* bind zero-length md for sending acks */
        md.start     = NULL;
        md.length    = 0;
        md.options   = 0;
        md.eq_handle = PTL_EQ_NONE;
        md.ct_handle = PTL_CT_NONE;

        ret = PtlMDBind(portals4_btl->portals_ni_h,
                    &md,
                    &portals4_btl->zero_md_h);
        if (PTL_OK != ret) {
            opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: PtlMDBind failed for NI %d: %d",
                            __FILE__, __LINE__, interface, ret);
            goto error;
        }
        OPAL_OUTPUT_VERBOSE((90, opal_btl_base_framework.framework_output,
            "PtlMDBind (zero-length md=%d) OK for NI %d", portals4_btl->zero_md_h, interface));

        /* Bind MD across all memory */
        md.start = 0;
        md.length = PTL_SIZE_MAX;
        md.options = 0;
        md.eq_handle = portals4_btl->recv_eq_h;
        md.ct_handle = PTL_CT_NONE;

        ret = PtlMDBind(portals4_btl->portals_ni_h,
                    &md,
                    &portals4_btl->send_md_h);
        if (PTL_OK != ret) {
            opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: PtlMDBind failed for NI %d: %d\n",
                            __FILE__, __LINE__, interface, ret);
            goto error;
        }

        /* Handle long overflows */
        me.start = NULL;
        me.length = 0;
        me.ct_handle = PTL_CT_NONE;
        me.min_free = 0;
        me.uid = PTL_UID_ANY;
        me.options = PTL_ME_OP_PUT |
            PTL_ME_EVENT_LINK_DISABLE |
            PTL_ME_EVENT_COMM_DISABLE |
            PTL_ME_EVENT_UNLINK_DISABLE;
        if (mca_btl_portals4_component.use_logical) {
            me.match_id.rank = PTL_RANK_ANY;
        } else {
            me.match_id.phys.nid = PTL_NID_ANY;
            me.match_id.phys.pid = PTL_PID_ANY;
        }
        me.match_bits = BTL_PORTALS4_LONG_MSG;
        me.ignore_bits = BTL_PORTALS4_CONTEXT_MASK |
            BTL_PORTALS4_SOURCE_MASK |
            BTL_PORTALS4_TAG_MASK;
        ret = PtlMEAppend(portals4_btl->portals_ni_h,
                      portals4_btl->recv_idx,
                      &me,
                      PTL_OVERFLOW_LIST,
                      NULL,
                      &portals4_btl->long_overflow_me_h);
        if (PTL_OK != ret) {
            opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: PtlMEAppend failed for NI %d: %d",
                            __FILE__, __LINE__, interface, ret);
            goto error;
        }
        OPAL_OUTPUT_VERBOSE((90, opal_btl_base_framework.framework_output, "PtlMEAppend (overflow list) OK for NI %d", interface));
    }

    ret = mca_btl_portals4_recv_enable(portals4_btl);
    if (PTL_OK != ret) {
        opal_output_verbose(1, opal_btl_base_framework.framework_output,
                            "%s:%d: Initialization of recv buffer failed: %d",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    return OPAL_SUCCESS;

 error:
    opal_output_verbose(1, opal_btl_base_framework.framework_output, "Error in btl_portals4_init_interface");

    for (interface=0; interface<mca_btl_portals4_component.num_btls; interface++) {
        portals4_btl = mca_btl_portals4_component.btls[interface];
        if (NULL != portals4_btl) mca_btl_portals4_free_module(portals4_btl);
    }
    mca_btl_portals4_component.num_btls = 0;
    if (NULL != mca_btl_portals4_component.btls)  free(mca_btl_portals4_component.btls);
    if (NULL != mca_btl_portals4_component.eqs_h) free(mca_btl_portals4_component.eqs_h);
    mca_btl_portals4_component.btls = NULL;
    mca_btl_portals4_component.eqs_h = NULL;

    return OPAL_ERROR;
}
Exemplo n.º 7
0
int
shmem_transport_startup(void)
{
    int ret, i;
    ptl_process_t *desired = NULL;
    ptl_md_t md;
    ptl_le_t le;
    ptl_uid_t uid = PTL_UID_ANY;
    ptl_process_t my_id;
#ifdef USE_ON_NODE_COMMS
    int num_on_node = 0;
#endif

#ifdef ENABLE_REMOTE_VIRTUAL_ADDRESSING
    /* Make sure the heap and data bases are actually symmetric */
    {
        int peer;
        uint64_t bases[2];

        peer = (shmem_internal_my_pe + 1) % shmem_internal_num_pes;

        ret = shmem_runtime_get(peer, "portals4-bases", bases, sizeof(uint64_t) * 2);
        if (0 != ret) {
            fprintf(stderr, "[%03d] ERROR: runtime_put failed: %d\n",
                    shmem_internal_my_pe, ret);
            return ret;
        }

        if ((uintptr_t) shmem_internal_heap_base != bases[0]) {
            fprintf(stderr, "[%03d] ERROR: heap base address does not match with rank %03d and virtual addressing is enabled\n",
                    shmem_internal_my_pe, peer);
            return -1;
        }
        if ((uintptr_t) shmem_internal_data_base != bases[1]) {
            fprintf(stderr, "[%03d] ERROR: data base address does not match with rank %03d and virtual addressing is enabled\n",
                    shmem_internal_my_pe, peer);
            return -1;
        }
    }
#endif

    desired = malloc(sizeof(ptl_process_t) * shmem_internal_num_pes);
    if (NULL == desired) {
        ret = 1;
        goto cleanup;
    }

    ret = PtlGetPhysId(shmem_transport_portals4_ni_h, &my_id);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlGetPhysId failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    for (i = 0 ; i < shmem_internal_num_pes; ++i) {
        ret = shmem_runtime_get(i, "portals4-procid",
                                &desired[i], sizeof(ptl_process_t));
        if (0 != ret) {
            fprintf(stderr, "[%03d] ERROR: runtime_get failed: %d\n",
                    shmem_internal_my_pe, ret);
            goto cleanup;
        }

#ifdef USE_ON_NODE_COMMS
        /* update the connectivity map... */
        if (desired[i].phys.nid == my_id.phys.nid) {
            SHMEM_SET_RANK_SAME_NODE(i, num_on_node++);
            if (num_on_node > 255) {
                fprintf(stderr, "[%03d] ERROR: Too many local ranks.\n",
                        shmem_internal_my_pe);
                goto cleanup;
            }
        }
#endif
    }

    ret = PtlSetMap(shmem_transport_portals4_ni_h,
                    shmem_internal_num_pes,                    
                    desired);
    if (PTL_OK != ret && PTL_IGNORED != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlSetMap failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    ret = PtlGetUid(shmem_transport_portals4_ni_h, &uid);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlGetUid failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    shmem_transport_portals4_max_volatile_size = ni_limits.max_volatile_size;
    shmem_transport_portals4_max_atomic_size = ni_limits.max_atomic_size;
    shmem_transport_portals4_max_fetch_atomic_size = ni_limits.max_fetch_atomic_size;
    shmem_transport_portals4_max_msg_size = ni_limits.max_msg_size;

    if (shmem_transport_portals4_max_volatile_size < sizeof(long double complex)) {
        fprintf(stderr, "[%03d] ERROR: Max volatile size found to be %lu, too small to continue\n",
                shmem_internal_my_pe, (unsigned long) shmem_transport_portals4_max_volatile_size);
        goto cleanup;
    }
    if (shmem_transport_portals4_max_atomic_size < sizeof(long double complex)) {
        fprintf(stderr, "[%03d] ERROR: Max atomic size found to be %lu, too small to continue\n",
                shmem_internal_my_pe, (unsigned long) shmem_transport_portals4_max_atomic_size);
        goto cleanup;
    }
    if (shmem_transport_portals4_max_fetch_atomic_size < sizeof(long double complex)) {
        fprintf(stderr, "[%03d] ERROR: Max fetch atomic size found to be %lu, too small to continue\n",
                shmem_internal_my_pe, (unsigned long) shmem_transport_portals4_max_fetch_atomic_size);
        goto cleanup;
    }

    /* create portal table entries */
    ret = PtlEQAlloc(shmem_transport_portals4_ni_h, 
                     shmem_transport_portals4_event_slots,
                     &shmem_transport_portals4_eq_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlEQAlloc failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

#ifdef ENABLE_REMOTE_VIRTUAL_ADDRESSING
    ret = PtlPTAlloc(shmem_transport_portals4_ni_h,
                     0,
                     shmem_transport_portals4_eq_h,
                     shmem_transport_portals4_pt,
                     &all_pt);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlPTAlloc of table entry failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }
#else
    ret = PtlPTAlloc(shmem_transport_portals4_ni_h,
                     0,
                     shmem_transport_portals4_eq_h,
                     shmem_transport_portals4_data_pt,
                     &data_pt);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlPTAlloc of data table failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }
    ret = PtlPTAlloc(shmem_transport_portals4_ni_h,
                     0,
                     shmem_transport_portals4_eq_h,
                     shmem_transport_portals4_heap_pt,
                     &heap_pt);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlPTAlloc of heap table failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }
#endif

#ifndef ENABLE_HARD_POLLING
    /* target ct */
    ret = PtlCTAlloc(shmem_transport_portals4_ni_h, &shmem_transport_portals4_target_ct_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlCTAlloc of target ct failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    le.ct_handle = shmem_transport_portals4_target_ct_h;
#endif
    le.uid = uid;
    le.options = PTL_LE_OP_PUT | PTL_LE_OP_GET | 
        PTL_LE_EVENT_LINK_DISABLE |
        PTL_LE_EVENT_SUCCESS_DISABLE;
#if !defined(ENABLE_HARD_POLLING)
    le.options |= PTL_LE_EVENT_CT_COMM;
#endif
#ifdef ENABLE_REMOTE_VIRTUAL_ADDRESSING
    le.start = NULL;
    le.length = PTL_SIZE_MAX;
    ret = PtlLEAppend(shmem_transport_portals4_ni_h,
                      shmem_transport_portals4_pt,
                      &le,
                      PTL_PRIORITY_LIST,
                      NULL,
                      &shmem_transport_portals4_le_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlLEAppend of all memory failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }
#else
    /* Open LE to heap section */
    le.start = shmem_internal_heap_base;
    le.length = shmem_internal_heap_length;
    ret = PtlLEAppend(shmem_transport_portals4_ni_h,
                      shmem_transport_portals4_heap_pt,
                      &le,
                      PTL_PRIORITY_LIST,
                      NULL,
                      &shmem_transport_portals4_heap_le_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlLEAppend of heap section failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    /* Open LE to data section */
    le.start = shmem_internal_data_base;
    le.length = shmem_internal_data_length;
    ret = PtlLEAppend(shmem_transport_portals4_ni_h,
                      shmem_transport_portals4_data_pt,
                      &le,
                      PTL_PRIORITY_LIST,
                      NULL,
                      &shmem_transport_portals4_data_le_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlLEAppend of data section failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }
#endif

    /* Open MD to all memory */
    ret = PtlCTAlloc(shmem_transport_portals4_ni_h, &shmem_transport_portals4_put_ct_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlCTAlloc of put ct failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }
    ret = PtlCTAlloc(shmem_transport_portals4_ni_h, &shmem_transport_portals4_get_ct_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlCTAlloc of get ct failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    md.start = 0;
    md.length = PTL_SIZE_MAX;
    md.options = PTL_MD_EVENT_CT_ACK;
    if (1 == PORTALS4_TOTAL_DATA_ORDERING) {
        md.options |= PTL_MD_UNORDERED;
    }
    md.eq_handle = shmem_transport_portals4_eq_h;
    md.ct_handle = shmem_transport_portals4_put_ct_h;
    ret = PtlMDBind(shmem_transport_portals4_ni_h,
                    &md,
                    &shmem_transport_portals4_put_event_md_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlMDBind of put MD failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    md.start = 0;
    md.length = PTL_SIZE_MAX;
    md.options = PTL_MD_EVENT_CT_ACK |
        PTL_MD_EVENT_SUCCESS_DISABLE |
        PTL_MD_VOLATILE;
    if (1 == PORTALS4_TOTAL_DATA_ORDERING) {
        md.options |= PTL_MD_UNORDERED;
    }
    md.eq_handle = shmem_transport_portals4_eq_h;
    md.ct_handle = shmem_transport_portals4_put_ct_h;
    ret = PtlMDBind(shmem_transport_portals4_ni_h,
                    &md,
                    &shmem_transport_portals4_put_volatile_md_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlMDBind of put MD failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    md.start = 0;
    md.length = PTL_SIZE_MAX;
    md.options = PTL_MD_EVENT_CT_ACK |
        PTL_MD_EVENT_SUCCESS_DISABLE;
    if (1 == PORTALS4_TOTAL_DATA_ORDERING) {
        md.options |= PTL_MD_UNORDERED;
    }
    md.eq_handle = shmem_transport_portals4_eq_h;
    md.ct_handle = shmem_transport_portals4_put_ct_h;
    ret = PtlMDBind(shmem_transport_portals4_ni_h,
                    &md,
                    &shmem_transport_portals4_put_cntr_md_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlMDBind of put cntr MD failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    md.start = 0;
    md.length = PTL_SIZE_MAX;
    md.options = PTL_MD_EVENT_CT_REPLY | 
        PTL_MD_EVENT_SUCCESS_DISABLE;
    if (1 == PORTALS4_TOTAL_DATA_ORDERING) {
        md.options |= PTL_MD_UNORDERED;
    }
    md.eq_handle = shmem_transport_portals4_eq_h;
    md.ct_handle = shmem_transport_portals4_get_ct_h;
    ret = PtlMDBind(shmem_transport_portals4_ni_h,
                    &md,
                    &shmem_transport_portals4_get_md_h);
    if (PTL_OK != ret) {
        fprintf(stderr, "[%03d] ERROR: PtlMDBind of get MD failed: %d\n",
                shmem_internal_my_pe, ret);
        goto cleanup;
    }

    ret = 0;

 cleanup:
    if (NULL != desired) free(desired);
    return ret;
}
Exemplo n.º 8
0
int main(int   argc,
         char *argv[])
{
    ptl_handle_ni_t ni_handle;
    ptl_process_t   *procs;
    int             rank;
    ptl_pt_index_t  pt_index, signal_pt_index;
    HANDLE_T        value_e_handle, signal_e_handle;
    int             num_procs;
    ptl_handle_eq_t eq_handle;
    ptl_handle_ct_t ct_handle;
    ptl_handle_md_t md_handle;

    CHECK_RETURNVAL(PtlInit());

    CHECK_RETURNVAL(libtest_init());

    rank = libtest_get_rank();
    num_procs = libtest_get_size();
    if (num_procs < 2) {
        fprintf(stderr, "test_flowctl_noeq requires at least two processes\n");
        return 77;
    }

    CHECK_RETURNVAL(PtlNIInit(PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL,
                              PTL_PID_ANY, NULL, NULL, &ni_handle));
    procs = libtest_get_mapping(ni_handle);
    CHECK_RETURNVAL(PtlSetMap(ni_handle, num_procs, procs));


    if (0 == rank) {
        ENTRY_T         value_e;

        /* create data ME */
        CHECK_RETURNVAL(PtlEQAlloc(ni_handle, (num_procs - 1) * ITERS / 2, &eq_handle));
        CHECK_RETURNVAL(PtlPTAlloc(ni_handle, PTL_PT_FLOWCTRL, eq_handle, 5,
                                   &pt_index));
        value_e.start = NULL;
        value_e.length = 0;
        value_e.ct_handle = PTL_CT_NONE;
        value_e.uid = PTL_UID_ANY;
        value_e.options = OPTIONS;
#if INTERFACE == 1
        value_e.match_id.rank = PTL_RANK_ANY;
        value_e.match_bits = 0;
        value_e.ignore_bits = 0;
#endif
        CHECK_RETURNVAL(APPEND(ni_handle, 5, &value_e, PTL_PRIORITY_LIST, NULL, &value_e_handle));

        /* create signal ME */
        CHECK_RETURNVAL(PtlCTAlloc(ni_handle, &ct_handle));
        CHECK_RETURNVAL(PtlPTAlloc(ni_handle, 0, PTL_EQ_NONE, 6,
                                   &signal_pt_index));
        value_e.start = NULL;
        value_e.length = 0;
        value_e.ct_handle = ct_handle;
        value_e.uid = PTL_UID_ANY;
        value_e.options = OPTIONS | PTL_LE_EVENT_SUCCESS_DISABLE | PTL_LE_EVENT_CT_COMM;
#if INTERFACE == 1
        value_e.match_id.rank = PTL_RANK_ANY;
        value_e.match_bits = 0;
        value_e.ignore_bits = 0;
#endif
        CHECK_RETURNVAL(APPEND(ni_handle, 6, &value_e, PTL_PRIORITY_LIST, NULL, &signal_e_handle));
    } else {
        ptl_md_t        md;

        /* 16 extra just in case... */
        CHECK_RETURNVAL(PtlEQAlloc(ni_handle, ITERS * 2 + 16, &eq_handle));

        md.start = NULL;
        md.length = 0;
        md.options = 0;
        md.eq_handle = eq_handle;
        md.ct_handle = PTL_CT_NONE;

        CHECK_RETURNVAL(PtlMDBind(ni_handle, &md, &md_handle));
    }

    libtest_barrier();

    if (0 == rank) {
        ptl_ct_event_t  ct;
        ptl_event_t ev;
        int ret, count = 0, saw_dropped = 0, saw_flowctl = 0;

        /* wait for signal counts */
        CHECK_RETURNVAL(PtlCTWait(ct_handle, num_procs - 1, &ct));
        if (ct.success != num_procs - 1 || ct.failure != 0) {
            return 1;
        }

        /* wait for event entries */
        while (count < ITERS * (num_procs - 1)) {
            ret = PtlEQWait(eq_handle, &ev);
            if (PTL_OK == ret) {
                ;
            } else if (PTL_EQ_DROPPED == ret) {
                saw_dropped++;
                if (ev.type == PTL_EVENT_PT_DISABLED){
                    saw_flowctl++;
                    CHECK_RETURNVAL(PtlPTEnable(ni_handle, pt_index));
                }
                break;
            } else {
                fprintf(stderr, "0: Unexpected return code from EQWait: %d\n", ret);
                return 1;
            }

            if (ev.type == PTL_EVENT_PT_DISABLED) {
                CHECK_RETURNVAL(PtlPTEnable(ni_handle, pt_index));
                saw_flowctl++;
            } else {
                count++;
            }
        }

        fprintf(stderr, "0: Saw %d dropped, %d flowctl\n", saw_dropped, saw_flowctl);
        if (saw_flowctl == 0) {
            return 1;
        }
    } else {
        ptl_process_t target;
        ptl_event_t ev;
        int ret, count = 0, fails = 0;
        int i;
        int *fail_seen;

        fail_seen = malloc(sizeof(int) * ITERS);
        if (NULL == fail_seen) {
             fprintf(stderr, "%d: malloc failed\n", rank);
             return 1;
        }
        memset(fail_seen, 0, sizeof(int) * ITERS);

        target.rank = 0;
        for (i = 0 ; i < ITERS ; ++i) {
            CHECK_RETURNVAL(PtlPut(md_handle,
                                   0,
                                   0,
                                   PTL_ACK_REQ,
                                   target,
                                   5,
                                   0,
                                   0,
                                   (void*)(size_t)i,
                                   0));
            usleep(100);
        }

        while (count < ITERS) {
            ret = PtlEQGet(eq_handle, &ev);
            if (PTL_EQ_EMPTY == ret) {
                continue;
            } else if (PTL_OK != ret) {
                fprintf(stderr, "%d: PtlEQGet returned %d\n", rank, ret);
                return 1;
            }

            if (ev.ni_fail_type == PTL_NI_OK) {
                if (ev.type == PTL_EVENT_SEND) {
                    continue;
                } else if (ev.type == PTL_EVENT_ACK) {
                    count++;
                } else {
                    fprintf(stderr, "%d: Unexpected event type %d\n", rank, ev.type);
                }
            } else if (ev.ni_fail_type == PTL_NI_PT_DISABLED) {
                int iter = (size_t) ev.user_ptr;
                if (fail_seen[iter]++ > 0) {
                    fprintf(stderr, "%d: Double report of PT_DISABLED for "
                            "iteration %d\n", rank, iter);
                    return 1;
                }
                count++;
                fails++;
            } else {
                fprintf(stderr, "%d: Unexpected fail type: %d\n", rank, ev.ni_fail_type);
                return 1;
            }
        }

        fprintf(stderr, "%d: Saw %d of %d events as fails\n", rank, fails, count);

        CHECK_RETURNVAL(PtlPut(md_handle,
                               0,
                               0,
                               PTL_NO_ACK_REQ,
                               target,
                               6,
                               0,
                               0,
                               NULL,
                               0));
        /* wait for the send event on the last put */
        CHECK_RETURNVAL(PtlEQWait(eq_handle, &ev));

        while (fails > 0) {
            CHECK_RETURNVAL(PtlPut(md_handle,
                                   0,
                                   0,
                                   PTL_ACK_REQ,
                                   target,
                                   5,
                                   0,
                                   0,
                                   NULL,
                                   0));
            while (1) {
                ret = PtlEQWait(eq_handle, &ev);
                if (PTL_OK != ret) {
                    fprintf(stderr, "%d: PtlEQWait returned %d\n", rank, ret);
                    return 1;
                }

                if (ev.ni_fail_type == PTL_NI_OK) {
                    if (ev.type == PTL_EVENT_SEND) {
                        continue;
                    } else if (ev.type == PTL_EVENT_ACK) {
                        fails--;
                        break;
                    } else {
                        fprintf(stderr, "%d: Unexpected event type %d\n", rank, ev.type);
                    }
                } else if (ev.ni_fail_type == PTL_NI_PT_DISABLED) {
                    break;
                } else {
                    fprintf(stderr, "%d: Unexpected fail type: %d\n", rank, ev.ni_fail_type);
                    return 1;
                }
            }
        }
    }

    libtest_barrier();

    if (0 == rank) {
        CHECK_RETURNVAL(UNLINK(signal_e_handle));
        CHECK_RETURNVAL(PtlPTFree(ni_handle, signal_pt_index));
        CHECK_RETURNVAL(PtlCTFree(ct_handle));
        CHECK_RETURNVAL(UNLINK(value_e_handle));
        CHECK_RETURNVAL(PtlPTFree(ni_handle, pt_index));
        CHECK_RETURNVAL(PtlEQFree(eq_handle));
    } else {
        CHECK_RETURNVAL(PtlMDRelease(md_handle));
        CHECK_RETURNVAL(PtlEQFree(eq_handle));
    }

    CHECK_RETURNVAL(PtlNIFini(ni_handle));
    CHECK_RETURNVAL(libtest_fini());
    PtlFini();

    return 0;
}
Exemplo n.º 9
0
void test_prepostME(int             cache_size,
                    int            *cache_buf,
                    ptl_handle_ni_t ni,
                    int             npeers,
                    int             nmsgs,
                    int             nbytes,
                    int             niters)
{
    int    i, j, k;
    double tmp, total = 0;

    ptl_handle_md_t send_md_handle;
    ptl_md_t        send_md;
    ptl_process_t   dest;
    ptl_size_t      offset;
    ptl_pt_index_t  index;
    ptl_handle_eq_t recv_eq_handle;
    ptl_handle_me_t me_handles[npeers * nmsgs];
    ptl_event_t     event;

    ptl_assert(PtlEQAlloc(ni, nmsgs * npeers + 1,
                          &send_md.eq_handle), PTL_OK);

    send_md.start     = send_buf;
    send_md.length    = SEND_BUF_SIZE;
    send_md.options   = PTL_MD_UNORDERED;
    send_md.ct_handle = PTL_CT_NONE;

    ptl_assert(PtlMDBind(ni, &send_md, &send_md_handle), PTL_OK);

    ptl_assert(PtlEQAlloc(ni, nmsgs * npeers + 1, &recv_eq_handle), PTL_OK);

    ptl_assert(PtlPTAlloc(ni, 0, recv_eq_handle, TestSameDirectionIndex,
                          &index), PTL_OK);

    ptl_assert(TestSameDirectionIndex, index);

    tmp = timer();
    for (j = 0; j < npeers; ++j) {
        for (k = 0; k < nmsgs; ++k) {
            ptl_process_t src;
            src.rank = recv_peers[j];
            postME(ni, index, recv_buf + (nbytes * (k + j * nmsgs)),
                   nbytes, src, magic_tag, &me_handles[k + j * nmsgs]);
        }
    }
    total += (timer() - tmp);

    for (i = 0; i < niters - 1; ++i) {
        cache_invalidate(cache_size, cache_buf);

        libtest_Barrier();

        tmp = timer();
        for (j = 0; j < npeers; ++j) {
            for (k = 0; k < nmsgs; ++k) {
                offset    = (nbytes * (k + j * nmsgs));
                dest.rank = send_peers[npeers - j - 1],
                ptl_assert(libtest_Put_offset(send_md_handle, offset, nbytes,
                                           dest, index, magic_tag, offset), PTL_OK);
            }
        }

        /* wait for sends */
        for (j = 0; j < npeers * nmsgs; ++j) {
            ptl_assert(PtlEQWait(send_md.eq_handle, &event), PTL_OK);
            ptl_assert(event.type, PTL_EVENT_SEND);
        }

        /* wait for receives */
        for (j = 0; j < npeers * nmsgs; j++) {
            PtlEQWait(recv_eq_handle, &event);
        }

        for (j = 0; j < npeers; ++j) {
            for (k = 0; k < nmsgs; ++k) {
                ptl_process_t src;
                src.rank = recv_peers[j];
                postME(ni, index, recv_buf + (nbytes * (k + j * nmsgs)),
                       nbytes, src, magic_tag, &me_handles[k + j * nmsgs]);
            }
        }
        total += (timer() - tmp);
    }

    libtest_Barrier();

    tmp = timer();
    for (j = 0; j < npeers; ++j) {
        for (k = 0; k < nmsgs; ++k) {
            offset    = (nbytes * (k + j * nmsgs));
            dest.rank = send_peers[npeers - j - 1],
            ptl_assert(libtest_Put_offset(send_md_handle, offset, nbytes, dest,
                                       index, magic_tag, offset), PTL_OK);
        }
    }
    /* wait for sends */
    for (j = 0; j < npeers * nmsgs; ++j) {
        ptl_assert(PtlEQWait(send_md.eq_handle, &event), PTL_OK);
        ptl_assert(event.type, PTL_EVENT_SEND);
    }

    /* wait for receives */
    for (j = 0; j < npeers * nmsgs; j++) {
        PtlEQWait(recv_eq_handle, &event);
    }

    total += (timer() - tmp);

    ptl_assert(PtlEQFree(send_md.eq_handle), PTL_OK);

    ptl_assert(PtlMDRelease(send_md_handle), PTL_OK);

    ptl_assert(PtlEQFree(recv_eq_handle), PTL_OK);

    ptl_assert(PtlPTFree(ni, index), PTL_OK);

    tmp = libtest_AllreduceDouble(total, PTL_SUM);
    display_result("pre-post", (niters * npeers * nmsgs * 2) / (tmp / world_size));
}
Exemplo n.º 10
0
int
ompi_mtl_portals4_flowctl_init(void)
{
    ptl_me_t me;
    int ret;

    ompi_mtl_portals4.flowctl.flowctl_active = false;

    OBJ_CONSTRUCT(&ompi_mtl_portals4.flowctl.pending_sends, opal_list_t);

    OBJ_CONSTRUCT(&ompi_mtl_portals4.flowctl.pending_fl, opal_free_list_t);
    opal_free_list_init(&ompi_mtl_portals4.flowctl.pending_fl,
                        sizeof(ompi_mtl_portals4_pending_request_t),
                        opal_cache_line_size,
                        OBJ_CLASS(ompi_mtl_portals4_pending_request_t),
                        0, 0, 1, -1, 1, NULL, 0, NULL, NULL, NULL);

    ompi_mtl_portals4.flowctl.max_send_slots = (ompi_mtl_portals4.send_queue_size - 3) / 3;
    ompi_mtl_portals4.flowctl.send_slots = ompi_mtl_portals4.flowctl.max_send_slots;

    ompi_mtl_portals4.flowctl.alert_req.type = portals4_req_flowctl;
    ompi_mtl_portals4.flowctl.alert_req.event_callback = flowctl_alert_callback;

    ompi_mtl_portals4.flowctl.fanout_req.type = portals4_req_flowctl;
    ompi_mtl_portals4.flowctl.fanout_req.event_callback = flowctl_fanout_callback;

    ompi_mtl_portals4.flowctl.epoch_counter = -1;

    ret = PtlPTAlloc(ompi_mtl_portals4.ni_h,
                     PTL_PT_ONLY_TRUNCATE,
                     ompi_mtl_portals4.send_eq_h,
                     REQ_FLOWCTL_TABLE_ID,
                     &ompi_mtl_portals4.flowctl_idx);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    ret = PtlCTAlloc(ompi_mtl_portals4.ni_h,
                     &ompi_mtl_portals4.flowctl.trigger_ct_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlCTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* everyone creates the trigger ME, even if the root may be the
       only to use it */
    me.start = NULL;
    me.length = 0;
    me.min_free = 0;
    me.uid = ompi_mtl_portals4.uid;
    if (ompi_mtl_portals4.use_logical) {
        me.match_id.rank = PTL_RANK_ANY;
    } else {
        me.match_id.phys.nid = PTL_NID_ANY;
        me.match_id.phys.pid = PTL_PID_ANY;
    }
    me.ignore_bits = 0;

    me.options = PTL_ME_OP_PUT | 
        PTL_ME_ACK_DISABLE | 
        PTL_ME_EVENT_LINK_DISABLE |
        PTL_ME_EVENT_UNLINK_DISABLE |
        PTL_ME_EVENT_COMM_DISABLE |
        PTL_ME_EVENT_CT_COMM;
    me.ct_handle = ompi_mtl_portals4.flowctl.trigger_ct_h;
    me.match_bits = MTL_PORTALS4_FLOWCTL_TRIGGER;
    ret = PtlMEAppend(ompi_mtl_portals4.ni_h,
                      ompi_mtl_portals4.flowctl_idx,
                      &me,
                      PTL_PRIORITY_LIST,
                      NULL,
                      &ompi_mtl_portals4.flowctl.trigger_me_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMEAppend failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }


    /* Alert CT/ME for broadcasting out alert when root receives a
       trigger */
    ret = PtlCTAlloc(ompi_mtl_portals4.ni_h,
                     &ompi_mtl_portals4.flowctl.alert_ct_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlCTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
    me.options = PTL_ME_OP_PUT | 
        PTL_ME_ACK_DISABLE | 
        PTL_ME_EVENT_LINK_DISABLE |
        PTL_ME_EVENT_UNLINK_DISABLE |
        PTL_ME_EVENT_CT_COMM;
    me.ct_handle = ompi_mtl_portals4.flowctl.alert_ct_h;
    me.match_bits = MTL_PORTALS4_FLOWCTL_ALERT;
    ret = PtlMEAppend(ompi_mtl_portals4.ni_h,
                      ompi_mtl_portals4.flowctl_idx,
                      &me,
                      PTL_PRIORITY_LIST,
                      &ompi_mtl_portals4.flowctl.alert_req,
                      &ompi_mtl_portals4.flowctl.alert_me_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMEAppend failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* Fanin CT/ME for receiving fan-in for restart */
    ret = PtlCTAlloc(ompi_mtl_portals4.ni_h,
                     &ompi_mtl_portals4.flowctl.fanin_ct_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlCTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
    me.options = PTL_ME_OP_PUT | 
        PTL_ME_ACK_DISABLE | 
        PTL_ME_EVENT_COMM_DISABLE |
        PTL_ME_EVENT_LINK_DISABLE |
        PTL_ME_EVENT_UNLINK_DISABLE |
        PTL_ME_EVENT_CT_COMM;
    me.ct_handle = ompi_mtl_portals4.flowctl.fanin_ct_h;
    me.match_bits = MTL_PORTALS4_FLOWCTL_FANIN;
    ret = PtlMEAppend(ompi_mtl_portals4.ni_h,
                      ompi_mtl_portals4.flowctl_idx,
                      &me,
                      PTL_PRIORITY_LIST,
                      NULL,
                      &ompi_mtl_portals4.flowctl.fanin_me_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMEAppend failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* Fan-out CT/ME for sending restart messages after fan-in */
    ret = PtlCTAlloc(ompi_mtl_portals4.ni_h,
                     &ompi_mtl_portals4.flowctl.fanout_ct_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlCTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
    me.options = PTL_ME_OP_PUT | 
        PTL_ME_ACK_DISABLE | 
        PTL_ME_EVENT_LINK_DISABLE |
        PTL_ME_EVENT_UNLINK_DISABLE |
        PTL_ME_EVENT_CT_COMM;
    me.ct_handle = ompi_mtl_portals4.flowctl.fanout_ct_h;
    me.match_bits = MTL_PORTALS4_FLOWCTL_FANOUT;
    ret = PtlMEAppend(ompi_mtl_portals4.ni_h,
                      ompi_mtl_portals4.flowctl_idx,
                      &me,
                      PTL_PRIORITY_LIST,
                      &ompi_mtl_portals4.flowctl.fanout_req,
                      &ompi_mtl_portals4.flowctl.fanout_me_h);
    if (OPAL_UNLIKELY(PTL_OK != ret)) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMEAppend failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    ompi_mtl_portals4.flowctl.num_children = 0;

    gettimeofday(&ompi_mtl_portals4.flowctl.tv, NULL);
    ompi_mtl_portals4.flowctl.backoff_count = 0;

    ret = OMPI_SUCCESS;

 error:
    return ret;
}
Exemplo n.º 11
0
int main(int   argc,
         char *argv[])
{
    ptl_handle_ni_t ni_h;
    ptl_pt_index_t  pt_index;
    uint64_t       *buf;
    ENTRY_T         entry;
    HANDLE_T        entry_h;
    ptl_md_t        md;
    ptl_handle_md_t md_h;
    int             rank;
    int             num_procs;
    int             ret;
    ptl_process_t  *procs;
    ptl_handle_eq_t eq_h;
    ptl_event_t     ev;
    ptl_hdr_data_t rcvd = 0;
    ptl_hdr_data_t goal = 0;
    ptl_hdr_data_t hdr_data = 1;
    ptl_size_t offset = sizeof(uint64_t);
    uint32_t distance;
    int sends = 0;

    CHECK_RETURNVAL(PtlInit());
    CHECK_RETURNVAL(libtest_init());

    rank = libtest_get_rank();
    num_procs = libtest_get_size();

    /* This test only succeeds if we have more than one rank */
    if (num_procs < 2) return 77;

    CHECK_RETURNVAL(PtlNIInit(PTL_IFACE_DEFAULT, NI_TYPE | PTL_NI_LOGICAL,
                              PTL_PID_ANY, NULL, NULL, &ni_h));

    procs = libtest_get_mapping(ni_h);
    CHECK_RETURNVAL(PtlSetMap(ni_h, num_procs, procs));

    CHECK_RETURNVAL(PtlEQAlloc(ni_h, 1024, &eq_h));
    CHECK_RETURNVAL(PtlPTAlloc(ni_h, 0, eq_h, 0, &pt_index));
    assert(pt_index == 0);
    
    buf = malloc(sizeof(uint64_t) * num_procs);
    assert(NULL != buf);

    md.start = buf;
    md.length = sizeof(uint64_t) * num_procs;
    md.options = PTL_MD_UNORDERED;
    md.eq_handle = eq_h;
    md.ct_handle = PTL_CT_NONE;
    CHECK_RETURNVAL(PtlMDBind(ni_h, &md, &md_h));

    entry.start = buf;
    entry.length = sizeof(uint64_t) * num_procs;
    entry.ct_handle = PTL_CT_NONE;
    entry.uid = PTL_UID_ANY;
    entry.options = OPTIONS;
#if MATCHING == 1
    entry.match_id.rank = PTL_RANK_ANY;
    entry.match_bits = 0;
    entry.ignore_bits = 0;
    entry.min_free = 0;
#endif
    CHECK_RETURNVAL(APPEND(ni_h, pt_index, &entry,
                           PTL_PRIORITY_LIST, NULL, &entry_h));

    /* ensure ME is linked before the barrier */
    CHECK_RETURNVAL(PtlEQWait(eq_h, &ev));
    assert( ev.type == PTL_EVENT_LINK );

    libtest_barrier();

    /* Bruck's Concatenation Algorithm */
    memcpy(buf, &rank, sizeof(uint64_t));
    for (distance = 1; distance < num_procs; distance *= 2) {
        ptl_size_t to_xfer;
        int peer;
        ptl_process_t proc;

        if (rank >= distance) {
            peer = rank - distance;
        } else {
            peer = rank + (num_procs - distance);
        }

        to_xfer = sizeof(uint64_t) * MIN(distance, num_procs - distance);
        proc.rank = peer;
        CHECK_RETURNVAL(PtlPut(md_h, 
                               0, 
                               to_xfer, 
                               PTL_NO_ACK_REQ, 
                               proc,
                               0,
                               0,
                               offset,
                               NULL,
                               hdr_data));
        sends += 1;

        /* wait for completion of the proper receive, and keep count
           of uncompleted sends.  "rcvd" is an accumulator to deal
           with out-of-order receives, which are IDed by the
           hdr_data */
        goal |= hdr_data;
        while ((rcvd & goal) != goal) {
            ret = PtlEQWait(eq_h, &ev);
            switch (ret) {
            case PTL_OK:
                if (ev.type == PTL_EVENT_SEND) {
                    sends -= 1;
                } else {
                    rcvd |= ev.hdr_data;
                    assert(ev.type == PTL_EVENT_PUT);
                    assert(ev.rlength == ev.mlength);
                    assert((ev.rlength == to_xfer) || (ev.hdr_data != hdr_data));
                }
                break;
            default:
                fprintf(stderr, "PtlEQWait failure: %d\n", ret);
                abort();
            }
        }
        
        hdr_data <<= 1;
        offset += to_xfer;
    }

    /* wait for any SEND_END events not yet seen */
    while (sends) {
        ret = PtlEQWait(eq_h, &ev);
        switch (ret) {
        case PTL_OK:
            assert( ev.type == PTL_EVENT_SEND );
            sends -= 1;
            break;
        default:
            fprintf(stderr, "PtlEQWait failure: %d\n", ret);
            abort();
        }
    }

    CHECK_RETURNVAL(UNLINK(entry_h));
    CHECK_RETURNVAL(PtlMDRelease(md_h));
    free(buf);

    libtest_barrier();

    /* cleanup */
    CHECK_RETURNVAL(PtlPTFree(ni_h, pt_index));
    CHECK_RETURNVAL(PtlEQFree(eq_h));
    CHECK_RETURNVAL(PtlNIFini(ni_h));
    CHECK_RETURNVAL(libtest_fini());
    PtlFini();

    return 0;
}
Exemplo n.º 12
0
void
Init(ArgStruct *p, int* pargc, char*** pargv)
{

int rc;
ptl_pt_index_t pt_handle;

    /* Initialize Portals and get some runtime info */
    rc= PtlInit();
    LIBTEST_CHECK(rc, "PtlInit");

    libtest_init();
    _my_rank= libtest_get_rank();
    _nprocs= libtest_get_size();

    if (_nprocs < 2)   {
	if (_my_rank == 0)   {
	    fprintf(stderr, "Need at least two processes!\n", _my_rank);
	}
        exit(-2);
    }

    /*
    ** We need an ni to do barriers and allreduces on.
    ** It needs to be a non-matching ni.
    */
    rc= PtlNIInit(PTL_IFACE_DEFAULT, PTL_NI_NO_MATCHING | PTL_NI_LOGICAL, PTL_PID_ANY, NULL, NULL, &ni_logical);
    LIBTEST_CHECK(rc, "PtlNIInit");

    rc= PtlSetMap(ni_logical, _nprocs, libtest_get_mapping(ni_logical));
    LIBTEST_CHECK(rc, "PtlSetMap");

    /* Initialize the barrier in the P4support library.  */
    libtest_BarrierInit(ni_logical, _my_rank, _nprocs);

    /* Allocate a Portal Table Index entry for data transmission */
    PtlPTAlloc(ni_logical, 0, PTL_EQ_NONE, PTL_XMIT_INDEX, &pt_handle);

    /* Allocate a Portal Table Index entry to receive an int */
    PtlPTAlloc(ni_logical, 0, PTL_EQ_NONE, PTL_SEND_INT_INDEX, &pt_handle);

    /* Allocate a Portal Table Index entry to receive a double */
    PtlPTAlloc(ni_logical, 0, PTL_EQ_NONE, PTL_SEND_DOUBLE_INDEX, &pt_handle);

    /* Set up the MD to send a single int */
    send_int_ct_handle= PTL_INVALID_HANDLE;
    libtest_CreateMDCT(ni_logical, &send_int, sizeof(int), &send_int_md_handle, &send_int_ct_handle);

    /* Set up the MD to send a single double */
    send_double_ct_handle= PTL_INVALID_HANDLE;
    libtest_CreateMDCT(ni_logical, &send_double, sizeof(double), &send_double_md_handle, &send_double_ct_handle);

    /* Create a persistent LE to receive a single int */
    recv_int_ct_handle= PTL_INVALID_HANDLE;
    libtest_CreateLECT(ni_logical, PTL_SEND_INT_INDEX, &recv_int, sizeof(int), &recv_int_le_handle, &recv_int_ct_handle);

    /* Create a persistent LE to receive a single double */
    recv_double_ct_handle= PTL_INVALID_HANDLE;
    libtest_CreateLECT(ni_logical, PTL_SEND_DOUBLE_INDEX, &recv_double, sizeof(double), &recv_double_le_handle, &recv_double_ct_handle);

    /*
    ** Initialize the benchmark data ct handles. Once allocated we'll
    ** reuse them, instead of reallocating them each time in
    ** AfterAlignmentInit()
    */
    send_ct_handle= PTL_INVALID_HANDLE;
    recv_ct_handle= PTL_INVALID_HANDLE;
    md_handle= PTL_INVALID_HANDLE;
    md_size= -1;
    md_buf= NULL;

    le_handle= PTL_INVALID_HANDLE;
    le_size= -1;
    le_buf= NULL;

    libtest_barrier();

}  /* end of Init() */
Exemplo n.º 13
0
static int
portals4_init_interface(void)
{
    unsigned int ret;
    ptl_md_t md;
    ptl_me_t me;

    /* create event queues */
    ret = PtlEQAlloc(ompi_mtl_portals4.ni_h,
                     ompi_mtl_portals4.send_queue_size,
                     &ompi_mtl_portals4.send_eq_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlEQAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
    ret = PtlEQAlloc(ompi_mtl_portals4.ni_h,
                     ompi_mtl_portals4.recv_queue_size,
                     &ompi_mtl_portals4.recv_eq_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlEQAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* Create send and long message (read) portal table entries */
    ret = PtlPTAlloc(ompi_mtl_portals4.ni_h,
                     PTL_PT_ONLY_USE_ONCE |
                     PTL_PT_ONLY_TRUNCATE |
                     PTL_PT_FLOWCTRL,
                     ompi_mtl_portals4.recv_eq_h,
                     REQ_RECV_TABLE_ID,
                     &ompi_mtl_portals4.recv_idx);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
    if (ompi_mtl_portals4.recv_idx != REQ_RECV_TABLE_ID) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc did not allocate the requested PT: %d\n",
                            __FILE__, __LINE__, ompi_mtl_portals4.recv_idx);
        goto error;
    }

    ret = PtlPTAlloc(ompi_mtl_portals4.ni_h,
                     PTL_PT_ONLY_USE_ONCE |
                     PTL_PT_ONLY_TRUNCATE,
                     ompi_mtl_portals4.send_eq_h,
                     REQ_READ_TABLE_ID,
                     &ompi_mtl_portals4.read_idx);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
    if (ompi_mtl_portals4.read_idx != REQ_READ_TABLE_ID) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlPTAlloc did not allocate the requested PT: %d\n",
                            __FILE__, __LINE__, ompi_mtl_portals4.read_idx);
        goto error;
    }

    /* bind zero-length md for sending acks */
    md.start     = NULL;
    md.length    = 0;
    md.options   = 0;
    md.eq_handle = PTL_EQ_NONE;
    md.ct_handle = PTL_CT_NONE;

    ret = PtlMDBind(ompi_mtl_portals4.ni_h,
                    &md,
                    &ompi_mtl_portals4.zero_md_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMDBind failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* Bind MD across all memory */
    md.start = 0;
    md.length = PTL_SIZE_MAX;
    md.options = 0;
    md.eq_handle = ompi_mtl_portals4.send_eq_h;
    md.ct_handle = PTL_CT_NONE;

    ret = PtlMDBind(ompi_mtl_portals4.ni_h,
                    &md,
                    &ompi_mtl_portals4.send_md_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMDBind failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* Handle long overflows */
    me.start = NULL;
    me.length = 0;
    me.ct_handle = PTL_CT_NONE;
    me.min_free = 0;
    me.uid = ompi_mtl_portals4.uid;
    me.options = PTL_ME_OP_PUT |
        PTL_ME_EVENT_LINK_DISABLE |
        PTL_ME_EVENT_COMM_DISABLE |
        PTL_ME_EVENT_UNLINK_DISABLE;
    if (ompi_mtl_portals4.use_logical) {
        me.match_id.rank = PTL_RANK_ANY;
    } else {
        me.match_id.phys.nid = PTL_NID_ANY;
        me.match_id.phys.pid = PTL_PID_ANY;
    }
    me.match_bits = MTL_PORTALS4_LONG_MSG;
    me.ignore_bits = MTL_PORTALS4_CONTEXT_MASK |
        MTL_PORTALS4_SOURCE_MASK |
        MTL_PORTALS4_TAG_MASK;
    ret = PtlMEAppend(ompi_mtl_portals4.ni_h,
                      ompi_mtl_portals4.recv_idx,
                      &me,
                      PTL_OVERFLOW_LIST,
                      NULL,
                      &ompi_mtl_portals4.long_overflow_me_h);
    if (PTL_OK != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: PtlMEAppend failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    /* attach short unex recv blocks */
    ret = ompi_mtl_portals4_recv_short_init();
    if (OMPI_SUCCESS != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: short receive block initialization failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }

    ompi_mtl_portals4.opcount = 0;
#if OPAL_ENABLE_DEBUG
    ompi_mtl_portals4.recv_opcount = 0;
#endif

#if OMPI_MTL_PORTALS4_FLOW_CONTROL
    ret = ompi_mtl_portals4_flowctl_init();
    if (OMPI_SUCCESS != ret) {
        opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
                            "%s:%d: ompi_mtl_portals4_flowctl_init failed: %d\n",
                            __FILE__, __LINE__, ret);
        goto error;
    }
#endif

    return OMPI_SUCCESS;

 error:
    if (!PtlHandleIsEqual(ompi_mtl_portals4.long_overflow_me_h, PTL_INVALID_HANDLE)) {
        PtlMEUnlink(ompi_mtl_portals4.long_overflow_me_h);
    }
    if (!PtlHandleIsEqual(ompi_mtl_portals4.zero_md_h, PTL_INVALID_HANDLE)) {
        PtlMDRelease(ompi_mtl_portals4.zero_md_h);
    }
    if (!PtlHandleIsEqual(ompi_mtl_portals4.send_md_h, PTL_INVALID_HANDLE)) {
        PtlMDRelease(ompi_mtl_portals4.send_md_h);
    }
    if (ompi_mtl_portals4.read_idx != (ptl_pt_index_t) ~0UL) {
        PtlPTFree(ompi_mtl_portals4.ni_h, ompi_mtl_portals4.read_idx);
    }
    if (ompi_mtl_portals4.recv_idx != (ptl_pt_index_t) ~0UL) {
        PtlPTFree(ompi_mtl_portals4.ni_h, ompi_mtl_portals4.recv_idx);
    }
    if (!PtlHandleIsEqual(ompi_mtl_portals4.send_eq_h, PTL_INVALID_HANDLE)) {
        PtlEQFree(ompi_mtl_portals4.send_eq_h);
    }
    if (!PtlHandleIsEqual(ompi_mtl_portals4.recv_eq_h, PTL_INVALID_HANDLE)) {
        PtlEQFree(ompi_mtl_portals4.recv_eq_h);
    }
    return OMPI_ERROR;
}