Esempio n. 1
0
void destroy_monitoring_result(monitoring_result * res)
{
    int MPIT_result;

    MPIT_result = MPI_T_pvar_handle_free(session, &(res->pvar_handle));
    if (MPIT_result != MPI_SUCCESS) {
        printf("ERROR : failed to free handle on \"%s\" pvar, check that you have enabled the monitoring pml\n", res->pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    free(res->pvar_name);
    free(res->vector);
}
Esempio n. 2
0
int main(int argc, char **argv)
{
    int i;
    int num;
    int rank, size;
/*#define STR_SZ (15)*/
#define STR_SZ (50)
    int name_len = STR_SZ;
    char name[STR_SZ] = "";
    int desc_len = STR_SZ;
    char desc[STR_SZ] = "";
    int verb;
    MPI_Datatype dtype;
    int count;
    int bind;
    int varclass;
    int readonly, continuous, atomic;
    int provided;
    MPI_T_enum enumtype;
    int pq_idx = -1, uq_idx = -1, pqm_idx = -1, uqm_idx = -1;
    int pqm_writable = -1, uqm_writable = -1;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    provided = 0xdeadbeef;
    MPI_T_init_thread(MPI_THREAD_SINGLE, &provided);
    assert(provided != 0xdeadbeef);

    num = 0xdeadbeef;
    MPI_T_pvar_get_num(&num);
    printf("get_num=%d\n", num);
    assert(num != 0xdeadbeef);
    for (i = 0; i < num; ++i) {
        name_len = desc_len = STR_SZ;
        MPI_T_pvar_get_info(i, name, &name_len, &verb, &varclass, &dtype, &enumtype, desc, &desc_len, &bind, &readonly, &continuous, &atomic);
        printf("index=%d\n", i);
        printf("--> name='%s' name_len=%d desc='%s' desc_len=%d\n", name, name_len, desc, desc_len);
        printf("--> verb=%d varclass=%d dtype=%#x bind=%d readonly=%d continuous=%d atomic=%d\n",
               verb, varclass, dtype, bind, readonly, continuous, atomic);

        if (0 == strcmp(name, "posted_recvq_length")) {
            pq_idx = i;
        }
        else if (0 == strcmp(name, "unexpected_recvq_length")) {
            uq_idx = i;
        }
        else if (0 == strcmp(name, "posted_recvq_match_attempts")) {
            pqm_idx = i;
            pqm_writable = !readonly;
        }
        else if (0 == strcmp(name, "unexpected_recvq_match_attempts")) {
            uqm_idx = i;
            uqm_writable = !readonly;
        }
    }

    printf("pq_idx=%d uq_idx=%d pqm_idx=%d uqm_idx=%d\n", pq_idx, uq_idx, pqm_idx, uqm_idx);

    /* setup a session and handles for the PQ and UQ length variables */
    session = MPI_T_PVAR_SESSION_NULL;
    MPI_T_pvar_session_create(&session);
    assert(session != MPI_T_PVAR_SESSION_NULL);

    pq_handle = MPI_T_PVAR_HANDLE_NULL;
    MPI_T_pvar_handle_alloc(session, pq_idx, NULL, &pq_handle, &count);
    assert(count = 1);
    assert(pq_handle != MPI_T_PVAR_HANDLE_NULL);

    uq_handle = MPI_T_PVAR_HANDLE_NULL;
    MPI_T_pvar_handle_alloc(session, uq_idx, NULL, &uq_handle, &count);
    assert(count = 1);
    assert(uq_handle != MPI_T_PVAR_HANDLE_NULL);

    pqm_handle = MPI_T_PVAR_HANDLE_NULL;
    MPI_T_pvar_handle_alloc(session, pqm_idx, NULL, &pqm_handle, &count);
    assert(count = 1);
    assert(pqm_handle != MPI_T_PVAR_HANDLE_NULL);

    uqm_handle = MPI_T_PVAR_HANDLE_NULL;
    MPI_T_pvar_handle_alloc(session, uqm_idx, NULL, &uqm_handle, &count);
    assert(count = 1);
    assert(uqm_handle != MPI_T_PVAR_HANDLE_NULL);

    /* now send/recv some messages and track the lengths of the queues */
    {
        int buf1, buf2, buf3, buf4;
        MPI_Request r1, r2, r3, r4;

        buf1 = buf2 = buf3 = buf4 = 0xfeedface;
        r1 = r2 = r3 = r4 = MPI_REQUEST_NULL;

        posted_qlen = 0x0123abcd;
        unexpected_qlen = 0x0123abcd;
        posted_queue_match_attempts = 0x0123abcd;
        unexpected_queue_match_attempts = 0x0123abcd;
        print_vars(1);

        MPI_Isend(&buf1, 1, MPI_INT, 0, /*tag=*/11, MPI_COMM_SELF, &r1);
        print_vars(2);
        printf("expected (posted_qlen,unexpected_qlen) = (0,1)\n");

        MPI_Isend(&buf1, 1, MPI_INT, 0, /*tag=*/22, MPI_COMM_SELF, &r2);
        print_vars(3);
        printf("expected (posted_qlen,unexpected_qlen) = (0,2)\n");

        MPI_Irecv(&buf2, 1, MPI_INT, 0, /*tag=*/33, MPI_COMM_SELF, &r3);
        print_vars(4);
        printf("expected (posted_qlen,unexpected_qlen) = (1,2)\n");

        MPI_Recv(&buf3, 1, MPI_INT, 0, /*tag=*/22, MPI_COMM_SELF, MPI_STATUS_IGNORE);
        MPI_Wait(&r2, MPI_STATUS_IGNORE);
        print_vars(5);
        printf("expected (posted_qlen,unexpected_qlen) = (1,1)\n");

        MPI_Recv(&buf3, 1, MPI_INT, 0, /*tag=*/11, MPI_COMM_SELF, MPI_STATUS_IGNORE);
        MPI_Wait(&r1, MPI_STATUS_IGNORE);
        print_vars(6);
        printf("expected (posted_qlen,unexpected_qlen) = (1,0)\n");

        MPI_Send(&buf3, 1, MPI_INT, 0, /*tag=*/33, MPI_COMM_SELF);
        MPI_Wait(&r3, MPI_STATUS_IGNORE);
        print_vars(7);
        printf("expected (posted_qlen,unexpected_qlen) = (0,0)\n");
    }

    if (pqm_writable) {
        posted_queue_match_attempts = 0;
        MPI_T_pvar_write(session, pqm_handle, &posted_queue_match_attempts);
    }
    if (uqm_writable) {
        unexpected_queue_match_attempts = 0;
        MPI_T_pvar_write(session, uqm_handle, &unexpected_queue_match_attempts);
    }
    print_vars(8);

    /* cleanup */
    MPI_T_pvar_handle_free(session, &uqm_handle);
    MPI_T_pvar_handle_free(session, &pqm_handle);
    MPI_T_pvar_handle_free(session, &uq_handle);
    MPI_T_pvar_handle_free(session, &pq_handle);
    MPI_T_pvar_session_free(&session);

    MPI_T_finalize();
    MPI_Finalize();

    return 0;
}
Esempio n. 3
0
int main(int argc, char* argv[])
{
    int rank, size, n, to, from, tagno, MPIT_result, provided, count;
    MPI_T_pvar_session session;
    MPI_Status status;
    MPI_Comm newcomm;
    MPI_Request request;
    char filename[1024];


    /* first phase : make a token circulated in MPI_COMM_WORLD */
    n = -1;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    to = (rank + 1) % size;
    from = (rank - 1) % size;
    tagno = 201;

    MPIT_result = MPI_T_init_thread(MPI_THREAD_SINGLE, &provided);
    if (MPIT_result != MPI_SUCCESS)
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);

    MPIT_result = MPI_T_pvar_get_index(flush_pvar_name, MPI_T_PVAR_CLASS_GENERIC, &flush_pvar_idx);
    if (MPIT_result != MPI_SUCCESS) {
        printf("cannot find monitoring MPI_T \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    MPIT_result = MPI_T_pvar_session_create(&session);
    if (MPIT_result != MPI_SUCCESS) {
        printf("cannot create a session for \"%s\" pvar\n", flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    /* Allocating a new PVAR in a session will reset the counters */
    MPIT_result = MPI_T_pvar_handle_alloc(session, flush_pvar_idx,
                                          MPI_COMM_WORLD, &flush_handle, &count);
    if (MPIT_result != MPI_SUCCESS) {
        printf("failed to allocate handle on \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    MPIT_result = MPI_T_pvar_start(session, flush_handle);
    if (MPIT_result != MPI_SUCCESS) {
        printf("failed to start handle on \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }
    
    if (rank == 0) {
        n = 25;
        MPI_Isend(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD,&request);
    }
    while (1) {
        MPI_Irecv(&n,1,MPI_INT,from,tagno,MPI_COMM_WORLD, &request);
        MPI_Wait(&request,&status);
        if (rank == 0) {n--;tagno++;}
        MPI_Isend(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD, &request);
        if (rank != 0) {n--;tagno++;}
        if (n<0){
            break;
        }
    }

    /* Build one file per processes
       Every thing that has been monitored by each
       process since the last flush will be output in filename */

    /*
      Requires directory prof to be created.
      Filename format should display the phase number
      and the process rank for ease of parsing with
      aggregate_profile.pl script
    */
    sprintf(filename,"prof/phase_1_%d.prof",rank);
    if( MPI_SUCCESS != MPI_T_pvar_write(session, flush_handle, filename) ) {
        fprintf(stderr, "Process %d cannot save monitoring in %s\n", rank, filename);
    }
    /* Force the writing of the monitoring data */
    MPIT_result = MPI_T_pvar_stop(session, flush_handle);
    if (MPIT_result != MPI_SUCCESS) {
        printf("failed to stop handle on \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    MPIT_result = MPI_T_pvar_start(session, flush_handle);
    if (MPIT_result != MPI_SUCCESS) {
        printf("failed to start handle on \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }
    /* Don't set a filename. If we stop the session before setting it, then no output ile
     * will be generated.
     */
    if( MPI_SUCCESS != MPI_T_pvar_write(session, flush_handle, NULL) ) {
        fprintf(stderr, "Process %d cannot save monitoring in %s\n", rank, filename);
    }
    
    /*
      Second phase. Work with different communicators.
      even ranls will circulate a token
      while odd ranks wil perform a all_to_all
    */
    MPI_Comm_split(MPI_COMM_WORLD, rank%2, rank, &newcomm);

    /* the filename for flushing monitoring now uses 2 as phase number! */
    sprintf(filename, "prof/phase_2_%d.prof", rank);

    if(rank%2){ /*even ranks (in COMM_WORD) circulate a token*/
        MPI_Comm_rank(newcomm, &rank);
        MPI_Comm_size(newcomm, &size);
        if( size > 1 ) {
            to = (rank + 1) % size;;
            from = (rank - 1) % size ;
            tagno = 201;
            if (rank == 0){
                n = 50;
                MPI_Send(&n, 1, MPI_INT, to, tagno, newcomm);
            }
            while (1){
                MPI_Recv(&n, 1, MPI_INT, from, tagno, newcomm, &status);
                if (rank == 0) {n--; tagno++;}
                MPI_Send(&n, 1, MPI_INT, to, tagno, newcomm);
                if (rank != 0) {n--; tagno++;}
                if (n<0){
                    if( MPI_SUCCESS != MPI_T_pvar_write(session, flush_handle, filename) ) {
                        fprintf(stderr, "Process %d cannot save monitoring in %s\n", rank, filename);
                    }
                    break;
                }
            }
        }
    } else { /*odd ranks (in COMM_WORD) will perform a all_to_all and a barrier*/
        int send_buff[10240];
        int recv_buff[10240];
        MPI_Comm_rank(newcomm, &rank);
        MPI_Comm_size(newcomm, &size);
        MPI_Alltoall(send_buff, 10240/size, MPI_INT, recv_buff, 10240/size, MPI_INT, newcomm);
        MPI_Comm_split(newcomm, rank%2, rank, &newcomm);
        MPI_Barrier(newcomm);
        if( MPI_SUCCESS != MPI_T_pvar_write(session, flush_handle, filename) ) {
            fprintf(stderr, "Process %d cannot save monitoring in %s\n", rank, filename);
        }
    }

    MPIT_result = MPI_T_pvar_stop(session, flush_handle);
    if (MPIT_result != MPI_SUCCESS) {
        printf("failed to stop handle on \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    MPIT_result = MPI_T_pvar_handle_free(session, &flush_handle);
    if (MPIT_result != MPI_SUCCESS) {
        printf("failed to free handle on \"%s\" pvar, check that you have monitoring pml\n",
               flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    MPIT_result = MPI_T_pvar_session_free(&session);
    if (MPIT_result != MPI_SUCCESS) {
        printf("cannot close a session for \"%s\" pvar\n", flush_pvar_name);
        MPI_Abort(MPI_COMM_WORLD, MPIT_result);
    }

    (void)PMPI_T_finalize();

    /* Now, in MPI_Finalize(), the pml_monitoring library outputs, in
       STDERR, the aggregated recorded monitoring of all the phases*/
    MPI_Finalize();
    return 0;
}
int main(int argc, char *argv[])
{
    int i, size, num, name_len, desc_len, verb, thread_support;
    int varclass, bind, readonly, continuous, atomic, uqsize_idx, count;
    char name[STR_LEN], desc[STR_LEN];
    MPI_Datatype dtype;
    MPI_T_enum enumtype;

    MPI_Init(NULL, NULL);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank == 0) {
        printf("MPIT pvar test: unexpected_recvq_buffer_size\n");
        fflush(stdout);
    }

    /* Ensure we're using exactly two ranks. */
    /* Future tests (using collectives) might need this because of the MPI_Barrier */
    assert(size == 2);

    /* Standard MPIT initialization. */
    TRY(MPI_T_init_thread(MPI_THREAD_SINGLE, &thread_support));
    TRY(MPI_T_pvar_get_num(&num));

    int found = 0;

    /* Locate desired MPIT variable. */
    for (i = 0; i < num; i++) {
        name_len = desc_len = STR_LEN;
        TRY(MPI_T_pvar_get_info(i, name, &name_len, &verb, &varclass, &dtype,
                                &enumtype, desc, &desc_len, &bind, &readonly,
                                &continuous, &atomic));

        if (strcmp(name, "unexpected_recvq_buffer_size") == 0) {
            uqsize_idx = i;
            found = 1;
        }
    }

    if (found) {
        /* Initialize MPIT session & variable handle. */
        MPI_T_pvar_session_create(&session);
        MPI_T_pvar_handle_alloc(session, uqsize_idx, NULL, &uqsize_handle, &count);

        /* Ensure the variable is of the correct size. */
        assert(count == 1);

        /* Run a batch of tests. */
        reversed_tags_test();
        rndv_test();

        /* Cleanup. */
        MPI_T_pvar_handle_free(session, &uqsize_handle);
        MPI_T_pvar_session_free(&session);
    }

    if (rank == 0) {
        printf("finished\n");
        fflush(stdout);
    }

    TRY(MPI_T_finalize());
    MPI_Finalize();

    return 0;
}