int main(int argc, char **argv) {
    int rank, size, i;
    double* recv_buf;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    recv_buf = (double*) malloc(size * sizeof(double));

    gtmpi_init(size);

    //struct timespec start, end;
    struct timeval start, end;
    //clock_gettime(CLOCK_REALTIME, &start);
    gettimeofday(&start, NULL);
    for (i = 0; i < ROUNDS; i++) {
        gtmpi_barrier(); 
    }
    gettimeofday(&end, NULL);
    //clock_gettime(CLOCK_REALTIME, &end);

    //long avg_time = (end.tv_sec * 1000000000L + end.tv_nsec - (start.tv_sec * 1000000000L + start.tv_nsec)) / ROUNDS;
    double avg_time = ((end.tv_sec * 1000000L + end.tv_usec) - (start.tv_sec * 1000000L + start.tv_usec)) / (double)ROUNDS;

    MPI_Gather(&avg_time, 1, MPI_DOUBLE, recv_buf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        double sum_avg_time = 0;
        for (i = 0; i < size; i++) sum_avg_time += recv_buf[i];
        printf("%d\t%f\n", size, sum_avg_time / size);
    }

    free(recv_buf);
    gtmpi_finalize();
    MPI_Finalize();

    return 0;
}
Example #2
0
int main(int argc, char **argv)
{
    int           cnt;
    int           i;
    int           my_id;
    int           num_iterations = 2;
    int           num_processes;
    int           num_threads = 5;
    int           opt;
    extern int    optind;
    extern char * optarg;

    while( (opt=getopt(argc,argv, "hn:t:")) != -1 )
    {
        switch(opt)
        {
            case 'n':                   /* number of iterations               */
                cnt = atoi(optarg);
                if( cnt < 1 )
                {
                    fprintf(stderr, "number of iterations of %s too low, using %d\n",
                            optarg, num_iterations);
                }
                else
                {
                    num_iterations = cnt;
                }
                break;

            case 't':                   /* number of threads               */
                cnt = atoi(optarg);
                if( cnt < 1 )
                {
                    fprintf(stderr, "number of threads of %s too low, using %d\n",
                            optarg, num_threads);
                }
                else
                {
                    num_threads = cnt;
                }
                break;

            default:
                fprintf(stderr, "Unknown options: %s\n", optarg);
                /* fall through */

            case 'h':
                fprintf(stderr, "Usage:  barrier_test [-n #]\n");
                fprintf(stderr, "        -h   - this help message\n");
                fprintf(stderr, "        -n # - the # of iterations to run (default: 2)\n");
                fprintf(stderr, "        -t # - the # of threads to use (default: 5)\n");
                exit(10);
                break;
        }
    }    

    gtmpi_init(num_threads);

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_id);

    /*
     * make sure we're configured with the right number of threads & processes
     */
    assert( num_threads == num_processes );

    for(i=0; i < num_iterations; i++)
    {

        fprintf(stdout, "thread[%d]: before barrier %d...\n", my_id, i);
        fflush(stdout);

        /*
         * The barrier
         */
        gtmpi_barrier();

        fprintf(stdout, "thread[%d]: after barrier %d...\n", my_id, i);
        fflush(stdout);

        /*
         * The barrier
         */
        gtmpi_barrier();

    }

    MPI_Finalize();
    gtmpi_finalize();

    return 0;