コード例 #1
0
ファイル: igatherf.c プロジェクト: mpifl/mpich3newforfile
FORT_DLL_SPEC void FORT_CALL mpi_igather_ ( void*v1, MPI_Fint *v2, MPI_Fint *v3, void*v4, MPI_Fint *v5, MPI_Fint *v6, MPI_Fint *v7, MPI_Fint *v8, MPI_Fint *v9, MPI_Fint *ierr ){

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif
    if (v1 == MPIR_F_MPI_IN_PLACE) v1 = MPI_IN_PLACE;
    *ierr = MPI_Igather( v1, (int)*v2, (MPI_Datatype)(*v3), v4, (int)*v5, (MPI_Datatype)(*v6), (int)*v7, (MPI_Comm)(*v8), (MPI_Request *)(v9) );
}
コード例 #2
0
ファイル: mixing.cpp プロジェクト: msurkovsky/aislinn
int main(int argc, char **argv)
{
	MPI_Init(&argc, &argv);

	int rank;

	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	
	int in[3], out[3];

	if (rank == 0) {
        MPI_Request r;
		MPI_Igather(in, 3, MPI_INT, out, 3, MPI_INT, 0, MPI_COMM_WORLD, &r);
	}
	if (rank == 1) {
		MPI_Gather(in, 3, MPI_INT, out, 3, MPI_INT, 0, MPI_COMM_WORLD);
	}

	MPI_Finalize();
	return 0;
}
コード例 #3
0
ファイル: nonblocking.c プロジェクト: Niharikareddy/mpich
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *sbuf = NULL;
    int *rbuf = NULL;
    int *scounts = NULL;
    int *rcounts = NULL;
    int *sdispls = NULL;
    int *rdispls = NULL;
    int *types = NULL;
    MPI_Comm comm;
    MPI_Request req;

    /* intentionally not using MTest_Init/MTest_Finalize in order to make it
     * easy to take this test and use it as an NBC sanity test outside of the
     * MPICH test suite */
    MPI_Init(&argc, &argv);

    comm = MPI_COMM_WORLD;

    MPI_Comm_size(comm, &size);
    MPI_Comm_rank(comm, &rank);

    /* enough space for every process to contribute at least NUM_INTS ints to any
     * collective operation */
    sbuf = malloc(NUM_INTS * size * sizeof(int));
    my_assert(sbuf);
    rbuf = malloc(NUM_INTS * size * sizeof(int));
    my_assert(rbuf);
    scounts = malloc(size * sizeof(int));
    my_assert(scounts);
    rcounts = malloc(size * sizeof(int));
    my_assert(rcounts);
    sdispls = malloc(size * sizeof(int));
    my_assert(sdispls);
    rdispls = malloc(size * sizeof(int));
    my_assert(rdispls);
    types = malloc(size * sizeof(int));
    my_assert(types);

    for (i = 0; i < size; ++i) {
        sbuf[2 * i] = i;
        sbuf[2 * i + 1] = i;
        rbuf[2 * i] = i;
        rbuf[2 * i + 1] = i;
        scounts[i] = NUM_INTS;
        rcounts[i] = NUM_INTS;
        sdispls[i] = i * NUM_INTS;
        rdispls[i] = i * NUM_INTS;
        types[i] = MPI_INT;
    }

    MPI_Ibarrier(comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ibcast(sbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Igather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    if (0 == rank)
        MPI_Igather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    else
        MPI_Igather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    if (0 == rank)
        MPI_Igatherv(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, rcounts, rdispls, MPI_INT, 0, comm,
                     &req);
    else
        MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    if (0 == rank)
        MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, 0, comm, &req);
    else
        MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    if (0 == rank)
        MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, 0, comm,
                      &req);
    else
        MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iallgather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iallgather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, NUM_INTS, MPI_INT, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iallgatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iallgatherv(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, rcounts, rdispls, MPI_INT, comm,
                    &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ialltoall(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ialltoall(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, NUM_INTS, MPI_INT, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ialltoallv(sbuf, scounts, sdispls, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ialltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, rbuf, rcounts, rdispls, MPI_INT,
                   comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ialltoallw(sbuf, scounts, sdispls, types, rbuf, rcounts, rdispls, types, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ialltoallw(MPI_IN_PLACE, NULL, NULL, NULL, rbuf, rcounts, rdispls, types, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ireduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    if (0 == rank)
        MPI_Ireduce(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
    else
        MPI_Ireduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iallreduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iallreduce(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ireduce_scatter(sbuf, rbuf, rcounts, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ireduce_scatter(MPI_IN_PLACE, rbuf, rcounts, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ireduce_scatter_block(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Ireduce_scatter_block(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iscan(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iexscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    MPI_Iexscan(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
    MPI_Wait(&req, MPI_STATUS_IGNORE);

    if (sbuf)
        free(sbuf);
    if (rbuf)
        free(rbuf);
    if (scounts)
        free(scounts);
    if (rcounts)
        free(rcounts);
    if (sdispls)
        free(sdispls);
    if (rdispls)
        free(rdispls);

    if (rank == 0) {
        if (errs)
            fprintf(stderr, "Found %d errors\n", errs);
        else
            printf(" No errors\n");
    }
    MPI_Finalize();
    return 0;
}
コード例 #4
0
ファイル: nonblocking3.c プロジェクト: R7R8/simgrid
/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
 * in (*req) a request handle corresonding to that operation.  This call should
 * be considered collective over comm (with a consistent value for "rndnum"),
 * even though the operation may only be a point-to-point request. */
static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request *req, struct laundry *l)
{
    int i, j;
    int rank, size;
    int *buf = NULL;
    int *recvbuf = NULL;
    int *sendcounts = NULL;
    int *recvcounts = NULL;
    int *sdispls = NULL;
    int *rdispls = NULL;
    int *sendtypes = NULL;
    int *recvtypes = NULL;
    signed char *buf_alias = NULL;

    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);

    *req = MPI_REQUEST_NULL;

    l->case_num = -1;
    l->comm = comm;

    l->buf        = buf        = malloc(COUNT*size*sizeof(int));
    l->recvbuf    = recvbuf    = malloc(COUNT*size*sizeof(int));
    l->sendcounts = sendcounts = malloc(size*sizeof(int));
    l->recvcounts = recvcounts = malloc(size*sizeof(int));
    l->sdispls    = sdispls    = malloc(size*sizeof(int));
    l->rdispls    = rdispls    = malloc(size*sizeof(int));
    l->sendtypes  = sendtypes  = malloc(size*sizeof(MPI_Datatype));
    l->recvtypes  = recvtypes  = malloc(size*sizeof(MPI_Datatype));

#define NUM_CASES (21)
    l->case_num = rand_range(rndnum, 0, NUM_CASES);
    switch (l->case_num) {
        case 0: /* MPI_Ibcast */
            for (i = 0; i < COUNT; ++i) {
                if (rank == 0) {
                    buf[i] = i;
                }
                else {
                    buf[i] = 0xdeadbeef;
                }
            }
            MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
            /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
            buf_alias = (signed char *)buf;
            my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
            for (i = 0; i < PRIME; ++i) {
                if (rank == 0)
                    buf_alias[i] = i;
                else
                    buf_alias[i] = 0xdb;
            }
            for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
                buf_alias[i] = 0xbf;
            }
            MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
            break;

        case 2: /* MPI_Ibarrier */
            MPI_Ibarrier(comm, req);
            break;

        case 3: /* MPI_Ireduce */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
            break;

        case 4: /* same again, use a user op and free it before the wait */
            {
                MPI_Op op = MPI_OP_NULL;
                MPI_Op_create(sum_fn, /*commute=*/1, &op);
                for (i = 0; i < COUNT; ++i) {
                    buf[i] = rank + i;
                    recvbuf[i] = 0xdeadbeef;
                }
                MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
                MPI_Op_free(&op);
            }
            break;

        case 5: /* MPI_Iallreduce */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
            for (i = 0; i < size; ++i) {
                sendcounts[i] = COUNT;
                recvcounts[i] = COUNT;
                sdispls[i] = COUNT * i;
                rdispls[i] = COUNT * i;
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + (i * j);
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
            break;

        case 7: /* MPI_Igather */
            for (i = 0; i < size*COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
            {
                MPI_Datatype type = MPI_DATATYPE_NULL;
                MPI_Type_dup(MPI_INT, &type);
                for (i = 0; i < size*COUNT; ++i) {
                    buf[i] = rank + i;
                    recvbuf[i] = 0xdeadbeef;
                }
                MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
                MPI_Type_free(&type); /* should cause implementations that don't refcount
                                         correctly to blow up or hang in the wait */
            }
            break;

        case 9: /* MPI_Iscatter */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    if (rank == 0)
                        buf[i*COUNT+j] = i + j;
                    else
                        buf[i*COUNT+j] = 0xdeadbeef;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 10: /* MPI_Iscatterv */
            for (i = 0; i < size; ++i) {
                /* weak test, just test the regular case where all counts are equal */
                sendcounts[i] = COUNT;
                sdispls[i] = i * COUNT;
                for (j = 0; j < COUNT; ++j) {
                    if (rank == 0)
                        buf[i*COUNT+j] = i + j;
                    else
                        buf[i*COUNT+j] = 0xdeadbeef;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 11: /* MPI_Ireduce_scatter */
            for (i = 0; i < size; ++i) {
                recvcounts[i] = COUNT;
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + i;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
            break;

        case 12: /* MPI_Ireduce_scatter_block */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + i;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 13: /* MPI_Igatherv */
            for (i = 0; i < size*COUNT; ++i) {
                buf[i] = 0xdeadbeef;
                recvbuf[i] = 0xdeadbeef;
            }
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
            }
            for (i = 0; i < size; ++i) {
                recvcounts[i] = COUNT;
                rdispls[i] = i * COUNT;
            }
            MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
            break;

        case 14: /* MPI_Ialltoall */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + (i * j);
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
            break;

        case 15: /* MPI_Iallgather */
            for (i = 0; i < size*COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
            break;

        case 16: /* MPI_Iallgatherv */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
                recvcounts[i] = COUNT;
                rdispls[i] = i * COUNT;
            }
            for (i = 0; i < COUNT; ++i)
                buf[i] = rank + i;
            MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
            break;

        case 17: /* MPI_Iscan */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 18: /* MPI_Iexscan */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
            for (i = 0; i < size; ++i) {
                sendcounts[i] = COUNT;
                recvcounts[i] = COUNT;
                sdispls[i] = COUNT * i * sizeof(int);
                rdispls[i] = COUNT * i * sizeof(int);
                sendtypes[i] = MPI_INT;
                recvtypes[i] = MPI_INT;
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + (i * j);
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, req);
            break;

        case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
            /* even ranks send to odd ranks, but only if we have a full pair */
            if ((rank % 2 != 0) || (rank != size-1)) {
                for (j = 0; j < COUNT; ++j) {
                    buf[j] = j;
                    recvbuf[j] = 0xdeadbeef;
                }
                if (rank % 2 == 0)
                    MPI_Isend(buf, COUNT, MPI_INT, rank+1, 5, comm, req);
                else
                    MPI_Irecv(recvbuf, COUNT, MPI_INT, rank-1, 5, comm, req);
            }
            break;

        default:
            fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
            MPI_Abort(comm, 1);
            exit(1);
            break;
    }
}
コード例 #5
0
ファイル: Fleet_Sim3.c プロジェクト: haoboji/FIT5139
int main(int argc, char **argv) {

    // Track initialization time
    clock_t begin, end;
    begin = clock();
    setlocale(LC_NUMERIC, "");
    double time_init;

    // Track elapsed time
    double time_start;
    double time_elapse;

    // Loop variable
    unsigned int i;
    unsigned int round = 1;
    unsigned int launches = 0;

    // Initialize the MPI environment
    MPI_Init(&argc, &argv);


    // Get the number of processes, which is also the fleet size
    int world_size;
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);

    // Get the rank of the process, also fleet size
    int world_rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);

    // Seed
    srand(time(NULL) + world_rank);

    // Track initialization time
    MPI_Barrier(MPI_COMM_WORLD);
    if (world_rank == 0) {
        end = clock();
        // initialization time
        time_init = (double) (end - begin) / CLOCKS_PER_SEC;
        // Track elapsed time
        time_start = 0.0 - MPI_Wtime() + time_init; // use MPI_Wtime for high accuracy
    }

    // Store generated locations
    unsigned short locations[2][world_size];
    // Store generated location
    unsigned short location;

    // For Igather
    MPI_Request request;

    // Run for the first time
    // Each ship generate its location
    location = rand() % NUM_LOCATIONS;
    // Gather locations to master process
    MPI_Igather(&location, 1, MPI_UNSIGNED_SHORT, locations[0], 1, MPI_UNSIGNED_SHORT, 0, MPI_COMM_WORLD, &request);



    // Loop other rounds
    while (1) {
        // Each ship generate its location for next round
        location = rand() % NUM_LOCATIONS;
        // Wait current round to finish
        MPI_Wait(&request, MPI_STATUS_IGNORE);
        // Kick off next round
        MPI_Igather(&location, 1, MPI_UNSIGNED_SHORT, locations[round % 2], 1, MPI_UNSIGNED_SHORT, 0, MPI_COMM_WORLD,
                    &request);
        // Calculate current round
        if (world_rank == 0) {
            // Convert locations to ships array with element being location
            unsigned short ships[NUM_LOCATIONS] = {0};
            for (i = 0; i < world_rank; i++) {
                unsigned short locationTemp = locations[(round + 1) % 2][i];
                ships[locationTemp]++;
            }
            // Loop through ships to calculate launches
            for (i = 0; i < NUM_LOCATIONS; i++) {
                if (ships[i] > 4) {
                    launches++;
                }
            }
            // Track time elapse
            time_elapse = time_start + MPI_Wtime();
            // Make sure root process will end loop
            if (time_elapse > 60) {
                break;
            }
            // Print one round information
            printf("Round %u: Launches = %i, Time elapsed = %lf\n", round, launches, time_elapse);
        }
        // Enter next round
        round++;
    }

    // Master
    if (world_rank == 0) {
        printf("=============================================\n");
        printf("Initialization Time = %lf\n", time_init);
        printf("Total Sampling Rounds = %'u\n", round);
        printf("Total Launches = %u\n", launches);
        // End all processes after printing all wanted results
        MPI_Abort(MPI_COMM_WORLD, 0);
    }

    return (0);

    // Each slave process generate a location and send to master
//    if (world_rank != 0) {
//        MPI_Request request[2];
//        if (1) {
//            // Each ship generate location
//            unsigned short location = rand() % NUM_LOCATIONS;
//            // Send non-blocking message to master, use tag to represent round number
//            MPI_Isend(&location, 1, MPI_UNSIGNED_SHORT, 0, 1, MPI_COMM_WORLD, &request[1]);
//            // Sampling interval
//            // wait(0.01);
//        }
//        for (round = 2; 1; round++) {
//            // Each ship generate location
//            unsigned short location = rand() % NUM_LOCATIONS;
//            // Send non-blocking message to master, use tag to represent round number
//            MPI_Request request;
//            MPI_Isend(&location, 1, MPI_UNSIGNED_SHORT, 0, round, MPI_COMM_WORLD, &request);
//            // Sampling interval
//            // wait(0.01);
//        }
//    }
//
//    unsigned short fleet_size = world_size - 1;
//
//    // Master process(Flag ship) receive locations
//    unsigned short locations[fleet_size]; // Store generated locations
//    if (world_rank == 0) {
//        for (round = 1; 1; round++) {
//            // Blocking Receive
//            for (i = 0; i < fleet_size; i++) {
//                MPI_Recv(&locations[i], 1, MPI_UNSIGNED_SHORT, MPI_ANY_SOURCE, round, MPI_COMM_WORLD, NULL);
//            }
//            // Convert locations to ships array with element being location
//            unsigned short ships[NUM_LOCATIONS] = {0};
//            for (i = 0; i < fleet_size; i++) {
//                ships[locations[i]]++;
//            }
//            // Loop through ships to calculate launches
//            for (i = 0; i < NUM_LOCATIONS; i++) {
//                if (ships[i] > 4) {
//                    launches++;
//                }
//            }
//
//        }
//    }



}
コード例 #6
0
ファイル: IMB_shr_spmd_gather.c プロジェクト: jedwards4b/IMB
void IMB_igather_pure(struct comm_info* c_info,
                      int size,
                      struct iter_schedule* ITERATIONS,
                      MODES RUN_MODE,
                      double* time)
/*


                      MPI-NBC benchmark kernel
                      Benchmarks IMB_Igather_pure



Input variables:

-c_info               (type struct comm_info*)
                      Collection of all base data for MPI;
                      see [1] for more information


-size                 (type int)
                      Basic message size in bytes

-ITERATIONS           (type struct iter_schedule *)
                      Repetition scheduling

-RUN_MODE             (type MODES)


Output variables:

-time                 (type double*)
                      Timing result per sample


*/
{
    int         i = 0;
    Type_Size   s_size,
                r_size;
    int         s_num = 0,
                r_num;
    MPI_Request request;
    MPI_Status  status;
    double      t_pure = 0.;

#ifdef CHECK
    defect=0.;
#endif
    ierr = 0;

    /* GET SIZE OF DATA TYPE */
    MPI_Type_size(c_info->s_data_type, &s_size);
    MPI_Type_size(c_info->s_data_type, &r_size);
    if ((s_size != 0) && (r_size != 0)) {
        s_num = size / s_size;
        r_num = size / r_size;
    }

    if(c_info->rank != -1) {
        for (i = 0; i < N_BARR; i++) {
            MPI_Barrier(c_info->communicator);
        }

        t_pure = MPI_Wtime();
        for(i = 0; i < ITERATIONS->n_sample; i++)
        {
            ierr = MPI_Igather((char*)c_info->s_buffer + i % ITERATIONS->s_cache_iter * ITERATIONS->s_offs,
                               s_num,
                               c_info->s_data_type,
                               (char*)c_info->r_buffer + i % ITERATIONS->r_cache_iter * ITERATIONS->r_offs,
                               r_num,
                               c_info->r_data_type,
                               i % c_info->num_procs, // root = round robin
                               c_info->communicator,
                               &request);
            MPI_ERRHAND(ierr);
            MPI_Wait(&request, &status);
#ifdef CHECK
             if (c_info->rank == i % c_info->num_procs) {
                  CHK_DIFF("Igather_pure", c_info,
                           (char*)c_info->r_buffer + i % ITERATIONS->r_cache_iter * ITERATIONS->r_offs,
                           0, 0, ((size_t)c_info->num_procs * (size_t)size), 1,
                           put, 0, ITERATIONS->n_sample, i, -2, &defect);
             }
#endif // CHECK
        }
        t_pure = (MPI_Wtime() - t_pure) / ITERATIONS->n_sample;
    }

    time[0] = t_pure;
}
コード例 #7
0
ファイル: osu_igather.c プロジェクト: allevin/sst-macro
int main(int argc, char *argv[])
{
    setbuf(stdout, NULL);
    int i = 0, rank, size;
    int numprocs;
    double latency = 0.0, t_start = 0.0, t_stop = 0.0;
    double tcomp = 0.0, tcomp_total = 0.0, latency_in_secs = 0.0;
    double timer = 0.0;
    double test_time = 0.0, test_total = 0.0;
    double wait_time = 0.0, init_time = 0.0;
    double init_total = 0.0, wait_total = 0.0;
    char *sendbuf = NULL;
    char *recvbuf = NULL;
    int po_ret;
    size_t bufsize;

    set_header(HEADER);
    set_benchmark_name("osu_igather");
    enable_accel_support();
    po_ret = process_options(argc, argv);

    if (po_okay == po_ret && none != options.accel) {
        if (init_accel()) {
            fprintf(stderr, "Error initializing device\n");
            exit(EXIT_FAILURE);
        }
    }

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Request request;
    MPI_Status status;

    switch (po_ret) {
        case po_bad_usage:
            print_bad_usage_message(rank);
            MPI_Finalize();
            exit(EXIT_FAILURE);
        case po_help_message:
            print_help_message(rank);
            MPI_Finalize();
            exit(EXIT_SUCCESS);
        case po_version_message:
            print_version_message(rank);
            MPI_Finalize();
            exit(EXIT_SUCCESS);
        case po_okay:
            break;
    }

    if(numprocs < 2) {
        if (rank == 0) {
            fprintf(stderr, "This test requires at least two processes\n");
        }

        MPI_Finalize();
        exit(EXIT_FAILURE);
    }

    if (options.max_message_size > options.max_mem_limit) {
        options.max_message_size = options.max_mem_limit;
    }

    if (0 == rank) {
        bufsize = options.max_message_size * numprocs;
        if (allocate_buffer((void**)&recvbuf, bufsize, options.accel)) {
            fprintf(stderr, "Could Not Allocate Memory [rank %d]\n", rank);
            MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
        }
        set_buffer(recvbuf, options.accel, 1, bufsize);
    }

    if (allocate_buffer((void**)&sendbuf, options.max_message_size,
                options.accel)) {
        fprintf(stderr, "Could Not Allocate Memory [rank %d]\n", rank);
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }
    set_buffer(sendbuf, options.accel, 0, options.max_message_size);

    print_preamble_nbc(rank);

    for(size=options.min_message_size; size <= options.max_message_size; size *= 2) {
        if(size > LARGE_MESSAGE_SIZE) {
            options.skip = options.skip_large;
            options.iterations = options.iterations_large;
        }

        timer = 0.0;

        for(i=0; i < options.iterations + options.skip ; i++) {
            t_start = MPI_Wtime();
            MPI_Igather(sendbuf, size, MPI_CHAR,
                        recvbuf, size, MPI_CHAR,
                        0, MPI_COMM_WORLD, &request);
            MPI_Wait(&request,&status);

            t_stop = MPI_Wtime();

            if(i>=options.skip){
                timer += t_stop-t_start;
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }

        MPI_Barrier(MPI_COMM_WORLD);

        latency = (timer * 1e6) / options.iterations;

        latency_in_secs = timer/options.iterations;

        init_arrays(latency_in_secs);

        MPI_Barrier(MPI_COMM_WORLD);

        timer = 0.0; tcomp_total = 0; tcomp = 0;
        init_total = 0.0; wait_total = 0.0;
	    test_time = 0.0, test_total = 0.0;

	    /* for loop with dummy_compute */
        for(i=0; i < options.iterations + options.skip ; i++) {
            t_start = MPI_Wtime();

            init_time = MPI_Wtime();
            MPI_Igather(sendbuf, size, MPI_CHAR,
                        recvbuf, size, MPI_CHAR,
                        0, MPI_COMM_WORLD, &request);
            init_time = MPI_Wtime() - init_time;

            tcomp = MPI_Wtime();
            test_time = dummy_compute(latency_in_secs, &request);
            tcomp = MPI_Wtime() - tcomp;

            wait_time = MPI_Wtime();
            MPI_Wait(&request,&status);
            wait_time = MPI_Wtime() - wait_time;

            t_stop = MPI_Wtime();

            if(i>=options.skip){
                timer += t_stop-t_start;
                tcomp_total += tcomp;
                test_total += test_time;
		        init_total += init_time;
                wait_total += wait_time;
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }

        MPI_Barrier(MPI_COMM_WORLD);

        calculate_and_print_stats(rank, size, numprocs,
                                  timer, latency,
                                  test_total, tcomp_total,
                                  wait_total, init_total);
    }

    if (0 == rank) {
        free_buffer(sendbuf, options.accel);
    }
    free_buffer(recvbuf, options.accel);
    MPI_Finalize();

    if (none != options.accel) {
        if (cleanup_accel()) {
            fprintf(stderr, "Error cleaning up device\n");
            exit(EXIT_FAILURE);
        }
    }

    return EXIT_SUCCESS;
}
コード例 #8
0
ファイル: noalias3.c プロジェクト: NexMirror/MPICH
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *sbuf = NULL;
    int *rbuf = NULL;
    int *scounts = NULL;
    int *rcounts = NULL;
    int *sdispls = NULL;
    int *rdispls = NULL;
    MPI_Datatype *types = NULL;
    MPI_Comm comm;
    MPI_Request req;

    /* intentionally not using MTest_Init/MTest_Finalize in order to make it
     * easy to take this test and use it as an NBC sanity test outside of the
     * MPICH test suite */
    MPI_Init(&argc, &argv);

    comm = MPI_COMM_WORLD;

    MPI_Comm_size(comm, &size);
    MPI_Comm_rank(comm, &rank);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /* enough space for every process to contribute at least NUM_INTS ints to any
     * collective operation */
    sbuf = malloc(NUM_INTS * size * sizeof(int));
    my_assert(sbuf);
    rbuf = malloc(NUM_INTS * size * sizeof(int));
    my_assert(rbuf);
    scounts = malloc(size * sizeof(int));
    my_assert(scounts);
    rcounts = malloc(size * sizeof(int));
    my_assert(rcounts);
    sdispls = malloc(size * sizeof(int));
    my_assert(sdispls);
    rdispls = malloc(size * sizeof(int));
    my_assert(rdispls);
    types = malloc(size * sizeof(MPI_Datatype));
    my_assert(types);

    for (i = 0; i < size; ++i) {
        sbuf[2 * i] = i;
        sbuf[2 * i + 1] = i;
        rbuf[2 * i] = i;
        rbuf[2 * i + 1] = i;
        scounts[i] = NUM_INTS;
        rcounts[i] = NUM_INTS;
        sdispls[i] = i * NUM_INTS;
        rdispls[i] = i * NUM_INTS;
        types[i] = MPI_INT;
    }

    if (rank == 0 && MPI_SUCCESS ==
        MPI_Igather(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
        errs++;

    if (rank == 0 && MPI_SUCCESS ==
        MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, sbuf, rcounts, rdispls, MPI_INT, 0, comm, &req))
        errs++;

    if (rank == 0 && MPI_SUCCESS ==
        MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
        errs++;

    if (rank == 0 && MPI_SUCCESS ==
        MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
        errs++;

    if (MPI_SUCCESS == MPI_Iallgather(&sbuf[rank], 1, MPI_INT, sbuf, 1, MPI_INT, comm, &req))
        errs++;

    if (MPI_SUCCESS ==
        MPI_Iallgatherv(&sbuf[rank * rcounts[rank]], rcounts[rank], MPI_INT, sbuf, rcounts, rdispls,
                        MPI_INT, comm, &req))
        errs++;

    if (MPI_SUCCESS == MPI_Ialltoall(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, comm, &req))
        errs++;

    if (MPI_SUCCESS ==
        MPI_Ialltoallv(sbuf, scounts, sdispls, MPI_INT, sbuf, scounts, sdispls, MPI_INT, comm,
                       &req))
        errs++;

    if (MPI_SUCCESS ==
        MPI_Ialltoallw(sbuf, scounts, sdispls, types, sbuf, scounts, sdispls, types, comm, &req))
        errs++;

    if (rank == 0 && MPI_SUCCESS ==
        MPI_Ireduce(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req))
        errs++;

    if (MPI_SUCCESS == MPI_Iallreduce(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
        errs++;

    if (MPI_SUCCESS == MPI_Ireduce_scatter(sbuf, sbuf, rcounts, MPI_INT, MPI_SUM, comm, &req))
        errs++;

    if (MPI_SUCCESS ==
        MPI_Ireduce_scatter_block(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
        errs++;

    if (MPI_SUCCESS == MPI_Iscan(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
        errs++;

    if (MPI_SUCCESS == MPI_Iexscan(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
        errs++;

    if (sbuf)
        free(sbuf);
    if (rbuf)
        free(rbuf);
    if (scounts)
        free(scounts);
    if (rcounts)
        free(rcounts);
    if (sdispls)
        free(sdispls);
    if (rdispls)
        free(rdispls);
    if (types)
        free(types);

    if (rank == 0) {
        if (errs)
            fprintf(stderr, "Found %d errors\n", errs);
        else
            printf(" No errors\n");
    }
    MPI_Finalize();
    return 0;
}