示例#1
0
int main(int argc, char* argv[])
{
    double t1[MAX_TIMER_TEST], tick[MAX_TIMER_TEST], tickval;
    double minDiff, maxDiff, diff;
    int i, nZeros = 0;
    int errs = 0;

    MTest_Init(&argc,&argv);

    for (i=0; i<MAX_TIMER_TEST; i++) {
	t1[i] = MPI_Wtime();
    }

    for (i=0; i<MAX_TIMER_TEST; i++) {
	tick[i] = MPI_Wtick();
    }

    /* Look at the values */
    /* Look at the tick */
    tickval = MPI_Wtick();
    for (i=0; i<MAX_TIMER_TEST; i++) {
	if (tickval != tick[i]) {
	    fprintf( stderr, "Nonconstant value for MPI_Wtick: %e != %e\n",
		     tickval, tick[i] );
	    errs ++;
	}
    }

    /* Look at the timer */
    minDiff = 1.e20;
    maxDiff = -1.0;
    nZeros  = 0;
    for (i=1; i<MAX_TIMER_TEST; i++) {
	diff = t1[i] - t1[i-1];
	if (diff == 0.0) nZeros++;
	else if (diff < minDiff) minDiff = diff;
	if (diff > maxDiff) maxDiff = diff;
    }

    /* Are the time diff values and tick values consistent */
    if (verbose) {
	printf( "Tick = %e, timer range = [%e,%e]\n", tickval, minDiff, 
		maxDiff );
	if (nZeros) printf( "Wtime difference was 0 %d times\n", nZeros );
    }    

    MTest_Finalize(errs);
    MPI_Finalize();

    return 0;
}
示例#2
0
int main(int argc, char* argv[]) {
   double start, finish;
   long i, n;
   double elapsed;
   double x = 0;
   int pm = 1;

   MPI_Init(&argc, &argv);

   if (argc != 2) {
      fprintf(stderr, "usage:  %s <n>\n", argv[0]);
      exit(0);
   }
   n = strtol(argv[1], NULL, 10);

   start = MPI_Wtime();
   for (i = 0; i < n; i++, pm = -pm)
      x += pm*exp(sin((double) i));
   finish = MPI_Wtime();

   elapsed = finish - start;
   printf("Elapsed time = %.14e seconds\n", elapsed);
   printf("Resolution of MPI_Wtime = %.14e seconds\n", MPI_Wtick());

   MPI_Finalize();

   return 0;
}
示例#3
0
文件: gtime.c 项目: dhascome/simgrid
/*
 * Check time tests that the timers are synchronized 
 */
int CheckTime( void )
{
    int        rank, size, i;
    double     wtick, t1, t2, t3, delta_t;
    int        ntest=20;
    MPI_Status status;
    int        err = 0;
    double     max_diff = 0.0;

    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    if (rank == 0) {
	wtick = MPI_Wtick();
#ifdef DEBUG
	printf( "Wtick is %lf\n", wtick );
#endif
	while (ntest--) {
	    for (i=1; i<size; i++) {
		MPI_Send( MPI_BOTTOM, 0, MPI_INT, i, 0, MPI_COMM_WORLD );
		MPI_Recv( MPI_BOTTOM, 0, MPI_INT, i, 1, MPI_COMM_WORLD, 
			  &status );
		t1 = MPI_Wtime();
		MPI_Send( &t1, 1, MPI_DOUBLE, i, 2, MPI_COMM_WORLD );
		MPI_Recv( &t2, 1, MPI_DOUBLE, i, 3, MPI_COMM_WORLD, &status );
		t3 = MPI_Wtime();
#ifdef DEBUG
		printf( "Process %d(%f) to 0(%f): diff= %f\n", 
			i, 0.5 * (t1 + t3), t2, 0.5*(t1+t3)-t2 );
#endif
		delta_t = fabs( 0.5 * (t1 + t3) - t2 );
		if( delta_t > (t3 - t1 + wtick)) {
		    err++;
		    printf( "Process %d has %f; Process 0 has %f\n",
			    i, t2, 0.5 * (t1 + t3) );
		}
		if (delta_t > max_diff) max_diff = delta_t;
	    }
#ifdef DEBUG	    
	    printf( "delta_t = %lf\n", delta_t );
#endif
	    /* Release all process for the next pass */
	    for (i=1; i<size; i++) {
		MPI_Send( MPI_BOTTOM, 0, MPI_INT, i, 3, MPI_COMM_WORLD );
	    }
	}
    }
    else {
	while (ntest--) {
	    MPI_Recv( MPI_BOTTOM, 0, MPI_INT, 0, 0, MPI_COMM_WORLD, &status );
	    MPI_Send( MPI_BOTTOM, 0, MPI_INT, 0, 1, MPI_COMM_WORLD );
	    /* Insure a symmetric transfer */
	    MPI_Recv( &t1, 1, MPI_DOUBLE, 0, 2, MPI_COMM_WORLD, &status );
	    t2 = MPI_Wtime();
	    MPI_Send( &t2, 1, MPI_DOUBLE, 0, 3, MPI_COMM_WORLD );
	    MPI_Recv( MPI_BOTTOM, 0, MPI_INT, 0, 3, MPI_COMM_WORLD, &status );
	}
    }
    return err;
}
示例#4
0
/**
 * \brief Time the force calculation.
 * This times the force calculation without
 * propagating the system. It therfor does
 * not include e.g. verlet list updates.
 *
 * @return Time per integration in ms.
 */
double time_force_calc(int default_samples)
{
  int rds = timing_samples > 0 ? timing_samples : default_samples;
  int i;
  Utils::Statistics::RunningAverage<double> running_average;
  
  if (mpi_integrate(0, 0))
    return -1;

  /* perform force calculation test */
  for (i = 0; i < rds; i++) {
    const double tick = MPI_Wtime();
    
    if (mpi_integrate(0, -1))
      return -1;

    const double tock = MPI_Wtime();
    running_average.add_sample((tock - tick));
  }

  if(running_average.avg() <= 5*MPI_Wtick()) {
    runtimeWarning("Clock resolution is to low to reliably time integration.");
  }

  if(running_average.sig() >= 0.1*running_average.avg()) {
    runtimeWarning("Statistics of tuning samples is very bad.");
  }

  /* MPI returns s, return value should be in ms. */
  return 1000.*running_average.avg();
}
示例#5
0
int main(int argc,char **argv)
{
  double x,y;
  int    ierr;

  ierr = PetscInitialize(&argc,&argv,0,0);if (ierr) return ierr;
  /* To take care of paging effects */
  y = MPI_Wtime();

  x = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();
  y = MPI_Wtime();

  fprintf(stdout,"%-15s : %e sec\n","MPI_Wtime",(y-x)/10.0);
  y = MPI_Wtick();
  fprintf(stdout,"%-15s : %e sec\n","MPI_Wtick",y);

  x    = MPI_Wtime();
  ierr = PetscSleep(10);CHKERRQ(ierr);
  y    = MPI_Wtime();
  fprintf(stdout,"%-15s : %e sec - Slept for 10 sec \n","MPI_Wtime",(y-x));

  ierr = PetscFinalize();
  return ierr;
}
示例#6
0
int main(int argc, char** argv)
{
    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_SINGLE, &provided);

    int size, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    rank_printf(0, "Running with %d MPI processes\n", size);

    if (rank == 0) {
        double tickSize = MPI_Wtick();
        printf("MPI wall clock resolution: %0.4le seconds\n", tickSize);
    }

    int mvals[18];
    mvals[0] = 0;
    for (int k = 2; k <= 18; k++) {
        mvals[k-1] = (1 << k);
    }

    rank_printf(0, "\n");

    const int numIters = 5;
    const int numPerIter = 1000;

    print_header();

    run_tests(0, 1, rank, mvals, 18, numIters, numPerIter);
    run_tests(0, size-1, rank, mvals, 18, numIters, numPerIter);
    run_tests(2, 3, rank, mvals, 18, numIters, numPerIter);

    MPI_Finalize();
    return 0;
}
示例#7
0
文件: mpicomm.c 项目: xrf/phy905-hw
int main(int argc, char **argv)
{
    const int rank = init_mpi(&argc, &argv);

    /* parse args (unsafe) */
    xensure(argc > 3);
    int i = atoi(argv[1]);
    int j = atoi(argv[2]);
    size_t m = atoi(argv[3]);

    unsigned char *buf = calloc((m ? m : 1) * 2, 1);
    xensure(buf);
    unsigned char *sendbuf = buf;
    unsigned char *recvbuf = buf + (m ? m : 1);
    init_data(sendbuf, m, rank);

    if (rank == i) {
        double tick = MPI_Wtick();
        printf("tick /s = %.17g\n", tick);
        double min_time = tick * TICK_FACTOR;
        printf("min_time /s = %.17g\n", min_time);
        bench_all(1, sendbuf, recvbuf, m, j, min_time);
    } else if (rank == j) {
        bench_all(0, sendbuf, recvbuf, m, i, NAN);
    }

    xtry(MPI_Finalize());
    free(buf);
}
示例#8
0
void SetupDelay( double usec )
{
    double t, tick;
    double sec = 1.0e-6 * usec;
    int nLoop, i, direction;


    /* Compute the number of times to run the tests to get an accurate
       number given the timer resolution. */
    nLoop = 1;
    tick = 100 * MPI_Wtick();
    do {
        nLoop = 2 * nLoop;
        t = MPI_Wtime();
        for (i=0; i<nLoop; i++) {
            MPI_Wtime();
        }
        t = MPI_Wtime() - t;
    }
    while ( t < tick && nLoop < 100000 );

    if (verbose) printf( "nLoop = %d\n", nLoop );

    /* Start with an estimated count */
    lCount = 128;
    direction = 0;
    while (1) {
        t = MPI_Wtime();
        for (i=0; i<nLoop; i++) {
            Delay( lCount );
        }
        t = MPI_Wtime() - t;
        t = t / nLoop;
        if (verbose) printf( "lCount = %d, time = %e\n", lCount, t );
        if (t > 10 * tick) nLoop = nLoop / 2;

        /* Compare measured delay */
        if (t > 2*sec) {
            lCount = lCount / 2;
            if (direction == 1) break;
            direction = -1;
        }
        else if (t < sec / 2) {
            lCount = lCount * 2;
            if (direction == -1) break;
            direction = 1;
        }
        else if (t < sec) {
            /* sec/2 <= t < sec , so estimate the lCount to hit sec */
            lCount = (sec/t) * lCount;
        }
        else
            break;
    }

    if (verbose) printf( "lCount = %d, t = %e\n", lCount, t );

    /* Should coordinate with the other processes - take the max? */
}
示例#9
0
static void print_result( int length, int cycles, double time )
{
    double bandwidth, clock_prec;

    clock_prec = MPI_Wtick();
    bandwidth = (length * clock_prec * cycles) / (1024.0 * 1024.0) / (time * clock_prec);
    printf( "%8d\t%.6f\t%.4f MB/s\n", length, time / cycles, bandwidth );
}
示例#10
0
/*ARGSUSED*/
static PyObject* wtick(PyObject* pySelf, PyObject* args) {
   double theTick = 0.0;
   PyObject* result = 0;

   NOARGUMENTS();

   theTick = MPI_Wtick();
   PYCHECK( result = PyFloat_FromDouble(theTick) );
   return result;

 pythonError:
   return 0;
}
double 	MPI_Wtick_Wrapper(void)
{
#ifdef COMMPI
  char *me = ft_mpi_routine_names[MPI_Wtick_cntr];
  double derr;
  ft_mpi_cntrs[MPI_Total_cntr]++;
  ft_mpi_cntrs[MPI_Wtick_cntr]++;
  derr = MPI_Wtick();
  return(derr);
#else
  return(0);
#endif
}
示例#12
0
double Zoltan_Time_Resolution(int timer)
{
  double t = -1.;

  if (timer==ZOLTAN_TIME_WALL)
    t = MPI_Wtick();
  else if (timer==ZOLTAN_TIME_CPU)
    t = (double) 1. / ((double) CLOCKS_PER_SEC);
#ifndef NO_TIMES
  else if (timer==ZOLTAN_TIME_USER)
    t = (double) 1. / ((double) sysconf(_SC_CLK_TCK));
#endif 

  return t;
}
示例#13
0
double
hypre_thread_MPI_Wtick( )
{
  double returnval;
  int unthreaded = pthread_equal(initial_thread,pthread_self());
  int I_call_mpi = unthreaded || pthread_equal(hypre_thread[0],pthread_self());
  if (I_call_mpi)
  {
    returnval=MPI_Wtick();
  }
  else
  {
    returnval=0.0;
  }
  return returnval;
}
示例#14
0
/*===================================================================*/
engine::engine(){
  net_comm = new MPIComm;
  mailbox = new MailBox(net_comm);
  SetStartTime();
  initComm();
  last_task_handle = 0;
  term_ok = TERMINATE_INIT;
  memory_policy = ENGINE_ALLOCATION;
  runMultiThread=true;
  dlb.initDLB();
  thread_model = 0;
  task_submission_finished=false;
  data_memory = NULL;
  thread_manager = NULL;
  LOG_INFO(LOG_MULTI_THREAD,"mpi tick :%f\n",MPI_Wtick());
  char fn[20];
  sprintf(fn,"comm_log_%02d.txt",me);
  comm_log=fopen(fn,"w");
  terminate_barrier = MPI_REQUEST_NULL;
  user_ctx = NULL;
  LOG_INFO(LOG_DATA,"Comm Log file : %s opened %p",fn,comm_log);
}
示例#15
0
JNIEXPORT jdouble JNICALL Java_mpi_MPI_wtick_1jni(JNIEnv *env, jclass jthis)
{
    return MPI_Wtick();
}
示例#16
0
    }
/* Try several times to get a 1 second sleep */
    for (i = 0; i<10; i++) {
	t1 = MPI_Wtime();
	sleep(1);
	t2 = MPI_Wtime();
	if (t2 - t1 >= (1.0 - 0.01) && t2 - t1 <= 5.0) break;
	if (t2 - t1 > 5.0) i = 9;
    }
    if (i == 10) {
	fprintf( stderr, 
		 "Timer around sleep(1) did not give 1 second; gave %f\n",
             t2 - t1 );
	fprintf( stderr, "If the sigchk check shows that SIGALRM is in use, \n\
this indicates only that user programs must NOT use any system call or\n\
library that uses SIGALRM.  SIGALRM is not used by MPICH but may be used\n\
by the software the MPICH uses to implement communication to other \n\
processes\n" );
	err++;
    } 
    tick = MPI_Wtick();
    if (tick > 1.0 || tick < 0.0) {
	err++;
	fprintf( stderr, "MPI_Wtick gave a strange result: (%f)\n", tick );
    }
    Test_Waitforall( );
    MPI_Finalize( );
    
    return err;
}
示例#17
0
double xmp_wtick_(void) {
  return MPI_Wtick();
}
示例#18
0
double mpi_wtick_(void) {
  return MPI_Wtick();
}
示例#19
0
double
hypre_MPI_Wtick( )
{
   return MPI_Wtick();
}
示例#20
0
int main(int argc, char *argv[]) 
{ 
    MPI_Datatype column, xpose;
    double t[5], ttmp, tmin, tmax, ttick;
    static int sizes[5] = { 10, 100, 1000, 10000, 20000 };
    int i, isMonotone, errs=0, nrows, ncols, isvalid;
 
    MPI_Init(&argc,&argv); 

    ttick = MPI_Wtick();

    for (i=0; i<5; i++) {
         nrows = ncols = sizes[i];
         ttmp = MPI_Wtime();
         /* create datatype for one column */
         MPI_Type_vector(nrows, 1, ncols, MPI_INT, &column);
         /* create datatype for matrix in column-major order */
         MPI_Type_hvector(ncols, 1, sizeof(int), column, &xpose);
         MPI_Type_commit(&xpose);
         t[i] = MPI_Wtime() - ttmp;
         MPI_Type_free( &xpose );
         MPI_Type_free( &column );
     }

     /* Now, analyze the times to see that they are (a) small and (b)
        nearly independent of size */
     tmin = 10000;
     tmax = 0;
     isvalid = 1;
     for (i=0; i<5; i++) {
	 if (t[i] < 10*ttick) {
	     /* Timing is invalid - resolution is too low */
	     isvalid = 0;
	 }
	 else {
	     if (t[i] < tmin) tmin = t[i];
	     if (t[i] > tmax) tmax = t[i];
	 }
     }
     if (isvalid) {
	 /* Monotone times are a warning */
	 isMonotone = 1;
	 for (i=1; i<5; i++) {
	     if (t[i] < t[i-1]) isMonotone = 0;
	 }
	 if (tmax > 100 * tmin) {
	     errs++;
	     fprintf( stderr, "Range of times appears too large\n" );
	     if (isMonotone) {
		 fprintf( stderr, "Vector types may use processing proportion to count\n" );
	     }
	     for (i=0; i<5; i++) {
		 fprintf( stderr, "n = %d, time = %f\n", sizes[i], t[i] );
	     }
	     fflush(stderr);
	 }
     }
     else {
	 fprintf( stderr, "Timing failed - recorded times are too small relative to MPI_Wtick\n" );
	 /* Note that this is not an error in the MPI implementation - it is a 
	    failure in the test */
     }

    if (errs) {
        printf( " Found %d errors\n", errs );
    }
    else {
        printf( " No Errors\n" );
    } 
    MPI_Finalize(); 
    return 0; 
} 
示例#21
0
文件: timer.hpp 项目: AsherBond/PDAL
inline double timer::elapsed_min() const
{
  return MPI_Wtick();
}
示例#22
0
文件: bcast.c 项目: acaldero/MiMPI
int main(int argc, char *argv[])
{
	int             ret;
	char           *buf;
	char            processor_name[MPI_MAX_PROCESSOR_NAME];
	int             namelen;
	double          start_time;
        double          used_time;
        double          avg_time;
        double          barrier_time;
        double          us_rate;
        int             max_len, lenbuf;
        int             j;
	int             me, nproc;
        FILE           *fparam ;


        /*
         *  begining ...
         */

	setbuf(stdout, NULL) ;

        /*
         *  max_len  ...
         */
/*
	if (argc != 2)
        {
                printf("Use: bcast <max_len> \n") ;
		exit(1) ;
        }
	max_len =atoi(argv[1]) ;
*/
/*
#if defined(__LINUX__)
        fparam = fopen("bcast.in","rt") ;
#endif
#if defined(__SUNOS__)
        fparam = fopen("bcast.in","rt") ;
#endif
#if defined(__SP2__)
        fparam = fopen("/u/fperez/XMP/MiMPI/test/mp/mpi/performance/bcast/bcast.in","rt") ;
#endif
        if (fparam == NULL)
        {
                printf("ERROR: can not open bcast.in, sorry.\n") ;
		exit(1) ;
        }
        ret = fscanf(fparam,"max_len=%i",&max_len) ;
        fclose(fparam) ;
        if (ret != 1)
        {
                printf("ERROR: can not read a valid 'max_len' value from bcast.in, sorry.\n") ;
		exit(1) ;
        }
*/
	max_len = 1024 * 1024;

        if ( (max_len <= 0) || (max_len >= 8*1024*1024) )
        {
                printf("ERROR: (max_len <= 0) || (max_len >= 8*1024*1024)\n") ;
                exit(1) ;
        }

        /*
         *  MPI init  ...
         */
	ret = MPI_Init(&argc, &argv);	
	if (ret < 0)
	{
		printf("Can't init\n") ;
		exit(1) ;
	}

	MPI_Comm_rank(MPI_COMM_WORLD,&me) ;
	MPI_Get_processor_name(processor_name,&namelen) ;
	MPI_Comm_size(MPI_COMM_WORLD, &nproc) ;

#if (0)
	printf("Process %d; total %d is alive on %s\n",me,nproc,processor_name) ;
#endif


        buf = (char *) malloc((unsigned) max_len) ;
        if (buf == NULL)
        {
                perror("Error en malloc") ;
                exit(1) ;
        }
	memset(buf,'x',max_len) ;

	printf("barrier\n") ;
	 MPI_Barrier(MPI_COMM_WORLD) ;

        /* ... Barrier ... */
	start_time = MPI_Wtime() ;
	for(j = 0; j < 10; j++)
        {
	  MPI_Barrier(MPI_COMM_WORLD) ;
        }
	barrier_time = (MPI_Wtime() - start_time) ;
	barrier_time = barrier_time / 2000.0;

	if (me == 0)
        	printf(">>>>>>>>> BARRERA1  =%e\n",  barrier_time);

	/*barrier_time = 0;*/

        /* ... test ... */
	lenbuf = 1;
        while (lenbuf <= max_len)
        {
	        MPI_Barrier(MPI_COMM_WORLD) ;

		avg_time = 0.0;
		if (me != 0)
		{
			for(j = 0; j < PRUEBAS; j++)
                        {

				ret = MPI_Bcast(buf,lenbuf,MPI_CHAR,0,
				          MPI_COMM_WORLD) ;
				if (ret != MPI_SUCCESS)
						printf("ERROR EN BCAST \n");

	                    
			}
		}
		else
		{
			start_time = MPI_Wtime() ;
			for(j = 0; j < PRUEBAS; j++)
                        {

				ret = MPI_Bcast(buf,lenbuf,MPI_CHAR,0,
                                      MPI_COMM_WORLD) ;
	                  if (ret != MPI_SUCCESS)
						printf("ERROR EN BCAST \n");


			}
			used_time = (MPI_Wtime() - start_time) ;

                	avg_time =  used_time  / (float)  PRUEBAS;
				
			if (avg_time > 0)    /* rate is megabytes per second */
                        	us_rate = (double)((nproc * lenbuf)/
					(avg_time*(double)1000000)) ;
                	else
                        	us_rate = 0.0;

                	printf("len_bytes=%e avg_time_sec=%e rate_Mbytes_sec=%e\n", 
			        (double)lenbuf, (double)avg_time, (double)us_rate) ;
                }

                lenbuf *= 2;
        }

		if (me == 0)
		{
				char c;
				read(0, &c, 1);
		}

#if (0)
	if (me == 0)
	    printf("\nclock resolution in seconds: %10.8f\n", MPI_Wtick()) ;
#endif

	MPI_Finalize() ;
        free(buf) ;
	exit(0) ;

}
示例#23
0
文件: twovec.c 项目: R7R8/simgrid
int main(int argc, char *argv[])
{
    MPI_Datatype column[LOOPS], xpose[LOOPS];
    double t[NUM_SIZES], ttmp, tmean;
    double tMeanLower, tMeanHigher;
    int size;
    int i, j, errs = 0, nrows, ncols;

    MPI_Init(&argc, &argv);

    tmean = 0;
    size  = 1;
    for (i = -SKIP; i < NUM_SIZES; i++) {
        nrows = ncols = size;

        ttmp = MPI_Wtime();

        for (j = 0; j < LOOPS; j++) {
            MPI_Type_vector(nrows, 1, ncols, MPI_INT, &column[j]);
            MPI_Type_hvector(ncols, 1, sizeof(int), column[j], &xpose[j]);
            MPI_Type_commit(&xpose[j]);
        }

        if (i >= 0) {
            t[i] = MPI_Wtime() - ttmp;
            if (t[i] < 100 * MPI_Wtick()) {
                /* Time is too inaccurate to use.  Set to zero.
                   Consider increasing the LOOPS value to make this
                   time large enough */
                t[i] = 0;
            }
            tmean += t[i];
        }

        for (j = 0; j < LOOPS; j++) {
            MPI_Type_free(&xpose[j]);
            MPI_Type_free(&column[j]);
        }

        if (i >= 0)
            size *= 2;
    }
    tmean /= NUM_SIZES;

    /* Now, analyze the times to see that they do not grow too fast
       as a function of size.  As that is a vague criteria, we do the
       following as a simple test:
          Compute the mean of the first half and the second half of the
          data
          Compare the two means
          If the mean of the second half is more than FRACTION times the
          mean of the first half, then the time may be growing too fast.
     */
    tMeanLower = tMeanHigher = 0;
    for (i=0; i<NUM_SIZES/2; i++)
        tMeanLower += t[i];
    tMeanLower /= (NUM_SIZES/2);
    for (i=NUM_SIZES/2; i<NUM_SIZES; i++)
        tMeanHigher += t[i];
    tMeanHigher /= (NUM_SIZES - NUM_SIZES/2);
    /* A large value (even 1 or greater) is a good choice for
       FRACTION here - the goal is to detect significant growth in
       execution time as the size increases, and there is no MPI
       standard requirement here to meet.

       If the times were too small, then the test also passes - the
       goal is to find implementation problems that lead to excessive
       time in these routines.
    */
    if (tMeanLower > 0 && tMeanHigher > (1 + FRACTION) * tMeanLower) errs++;

    if (errs) {
        fprintf(stderr, "too much difference in performance: ");
        for (i = 0; i < NUM_SIZES; i++)
            fprintf(stderr, "%.3f ", t[i] * 1e6);
        fprintf(stderr, "\n");
    }
    else
        printf(" No Errors\n");

    MPI_Finalize();
    return 0;
}
示例#24
0
文件: ping.c 项目: acaldero/MiMPI
int main(int argc, char *argv[])
{
	int             ret;
	char           *buf;
	char            processor_name[MPI_MAX_PROCESSOR_NAME];
	int             namelen;
	double          start_time;
        double          used_time;
        double          avg_time;
        double          us_rate;
        int             max_len, lenbuf;
        int             j;
	int             me, nproc;
	MPI_Status      status;
        FILE           *fparam ;


	setbuf(stdout, NULL);
/*
        if (argc != 2)
        {
                printf("Use: ping <max_len> \n");
                exit(1);
        }
        max_len =atoi(argv[1]);
*/
/*
        fparam = fopen
                 (
                   "/home/proyectos/mpi/src/MiMPI/test_II/mpich_linux/ping/ping.in",
                   "rt"
                 ) ;
        if (fparam == NULL)
        {
                printf("ERROR: can not open ping.in, sorry.\n");
		exit(1);
        }
        ret = fscanf(fparam,"max_len=%i",&max_len) ;
        fclose(fparam) ;
        if (ret != 1)
        {
                printf("ERROR: can not read a valid 'max_len' value from ping.in, sorry.\n");
		exit(1);
        }
*/
	max_len = 1024 * 1024;

        if ( (max_len <= 0) || (max_len >= 8*1024*1024) )
        {
                printf("ERROR: max_len = %i\n",max_len);
                printf("ERROR: (max_len <= 0) || (max_len >= 4*1024*1024)\n");
                exit(1) ;
        }


	ret = MPI_Init(&argc, &argv);	
	if (ret < 0)
	{
		printf("Can't init\n");
		exit(1);
	}

	MPI_Comm_rank(MPI_COMM_WORLD,&me);
	MPI_Get_processor_name(processor_name,&namelen);
	MPI_Comm_size(MPI_COMM_WORLD, &nproc);

#if (0)
	printf("Process %d; total %d is alive on %s\n",me,nproc,processor_name);
#endif


	MPI_Barrier(MPI_COMM_WORLD) ;

        buf = (char *) malloc((unsigned) max_len);
        if (buf == NULL)
        {
                perror("Error en malloc");
                exit(1);
        }

#if (0)
	printf("PING LISTO \n");
	if (me == 0)
	{
	  ret=MPI_Send(buf,12,MPI_CHAR,1,1, MPI_COMM_WORLD);
	}
	else
	{
	  ret=MPI_Recv(buf,12,MPI_CHAR,0,1, MPI_COMM_WORLD, &status);
	  printf("count = %d\n", status.count);
	}
#endif

	lenbuf = 1 ;
        while (lenbuf <= max_len)
        {
		avg_time = 0.0;
		if (me == 0)
		{
			for(j = 0; j < PRUEBAS; j++)
                	{
				ret=MPI_Recv(buf,lenbuf,MPI_CHAR,1,1,
						MPI_COMM_WORLD, &status);
                                /*
                		if (ret != MPI_SUCCESS)
                        		perror("Error en MPI_Recv");
                                */

				ret=MPI_Send(buf,lenbuf,MPI_CHAR,1,1,
					MPI_COMM_WORLD);
                                /*
                		if (ret != MPI_SUCCESS)
                        		perror("Error en MPI_Send\n");
                                */
			}
		}
		else
		{
			for(j = 0; j < PRUEBAS; j++)
                        {
				start_time = MPI_Wtime();

				ret=MPI_Send(buf,lenbuf,MPI_CHAR,0,1,
                                        MPI_COMM_WORLD);

                                /*
                        	if (ret != MPI_SUCCESS)
                                	perror("Error en MPI_Send\n");
                                */
				ret=MPI_Recv(buf,lenbuf,MPI_CHAR,0,1,
                                                MPI_COMM_WORLD, &status);
                                /*
                        	if (ret != MPI_SUCCESS)
                                	perror("Error en MPI_Recv");
                                */

				used_time = (MPI_Wtime() - start_time);
				avg_time = avg_time + used_time;
			}

                	avg_time =  avg_time / (float)  PRUEBAS;
			if (avg_time > 0)    /* rate is megabytes per second */
                        	us_rate = (double)((nproc * lenbuf)/
					(avg_time*(double)1000000));
                	else
                        	us_rate = 0.0;

                	printf("len_bytes=%e avg_time_sec=%e rate_Mbytes_sec=%e\n", 
			        (double)lenbuf, (double)avg_time, (double)us_rate);
                }

                lenbuf *= 2;
        }

#if (0)
	if (me != 0)
	    printf("\nclock resolution in seconds: %10.8f\n", MPI_Wtick());
#endif

	MPI_Finalize();
        free(buf);
	exit(0);

}
示例#25
0
	double wtick() const {
	  return MPI_Wtick();
	}
示例#26
0
文件: mpiio_util.c 项目: YHUCD/NEKCEM
void printio_(int *fparam, int* piostep)
#endif
{
	//printf("format param is %d, iostep is %d\n", (int)*fparam, *piostep);

	int formatparam = *fparam;
	int iostep = *piostep;

	double overall_max, overall_min, overall_avg, overall_sum;
  double io_time_max = 0.0;
  double file_io_max = 0.0;
	if( formatparam == 2 || formatparam == 3 || formatparam == 4 || formatparam == 5)
	{
		MPI_Comm_size(MPI_COMM_WORLD, &mysize);
		MPI_Allreduce(  &overall_time, &overall_min, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
		MPI_Allreduce(  &overall_time, &overall_max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
		MPI_Allreduce(  &overall_time, &overall_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
		overall_avg = overall_sum / mysize;
	}
	else if(formatparam == 6 || formatparam == -6 || formatparam == 8 || formatparam == 18)
	{
		if(mySpecies == 1)
		{
		MPI_Allreduce(  &overall_time, &overall_min, 1, MPI_DOUBLE, MPI_MIN, localcomm);
		MPI_Allreduce(  &overall_time, &overall_max, 1, MPI_DOUBLE, MPI_MAX, localcomm);
		MPI_Allreduce(  &overall_time, &overall_sum, 1, MPI_DOUBLE, MPI_SUM, localcomm);
		overall_avg = overall_sum / localsize;

		MPI_Allreduce(  &file_io_time, &file_io_max, 1, MPI_DOUBLE, MPI_MAX, localcomm);
		MPI_Allreduce(  &io_time, &io_time_max, 1, MPI_DOUBLE, MPI_MAX, localcomm);
		}
		else if(mySpecies == 2)
		{
			overall_time = 0;
			overall_min = 0;
			overall_max = 0;
			overall_avg = 0;
		}
	}

	int temp_rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &temp_rank);

	if(temp_rank == 0) {

    printf("**************************************\n");
		printf("I/O time (io_step=%d) stats: overall avg = %lf sec, min = %lf sec, max = %lf sec "
           "(io_max = %lf sec, file_io_max = %lf sec, wtick=%lf sec),"
					 "checkpoint file path is %s, machine is %s, io_option = %d, num_groups = %d "
           "(DEBUG_FLAG=%d, COMPUTE_TRACE_FLAG=%d).\n",
					 io_step, overall_avg, overall_min, overall_max, io_time_max, file_io_max, MPI_Wtick(),
           path, mach_name, formatparam, numGroups,
           DEBUG_FLAG, COMPUTE_TRACE_FLAG);
    printf("**************************************\n");
	}

	MPI_Barrier(MPI_COMM_WORLD);

  // return if IO trace flag not set, otherwise write timing trace of each i/o op
	if(IOTRACE_FLAG != 1)
		return;

  char tracefname[128];
  memset((void*)tracefname, 0, 128);
  sprintf(tracefname, "iotrace-t%.5d.dat", iostep);

  // write the actual file
  if (1) {
		MPI_File timefile;
		int rc;
		rc = MPI_File_open(MPI_COMM_WORLD, tracefname,
									 		MPI_MODE_CREATE | MPI_MODE_WRONLY , MPI_INFO_NULL, &timefile);

		char mytime[128];
		sprintf(mytime, "\n%10d %10.3lf %10.3lf %10.3lf %10.3lf ",
						temp_rank, overall_time, overall_avg, overall_min, overall_max);

		long long offsets = temp_rank * 56 ;
		MPI_Status write_data_status;

		MPI_File_write_at_all_begin(timefile,
													 			offsets,
																mytime,
																56,
																MPI_CHAR);
		MPI_File_write_at_all_end(timefile,
															mytime,
															&write_data_status);
		MPI_File_close( & timefile );
	}
}
示例#27
0
 static duration tick(::yampi::environment const&)
 { return static_cast<duration>(MPI_Wtick()); }
示例#28
0
文件: ping.c 项目: acaldero/MiMPI
/*
 * MAIN
 */
int main(int argc, char *argv[])
{
	int             ret;
	char            processor_name[MPI_MAX_PROCESSOR_NAME];
	int             namelen;
	double          start_time;
        double          used_time;
        double          avg_time;
        double          us_rate;
        int             lenbuf;
	MPI_Status      status;
/*        FILE           *fparam ;*/
	int 		i;
	int 		k;
	int 		j;
	struct thr	t[N_THREADS+1];
	double		crear_threads[N_THREADS+1];
#if defined(HAVE_WINDOWS_H)
    HANDLE 	     thid[N_THREADS+1];
	DWORD        thinfo ;
#else
	pthread_attr_t   attr ;
    pthread_t 	     thid[N_THREADS+1];
#endif



	setbuf(stdout, NULL);
#if (0)
	if (argc != 2)
        {
                printf("Use: client <max_len> \n");
		exit(1);
        }
	max_len =atoi(argv[1]);
#endif

	/*
#if (1)
#if defined(__LINUX__)
        fparam = fopen("ping.in","rt") ;
#endif
#if defined(__SUNOS__)
        fparam = fopen("ping.in","rt") ;
#endif
#if defined(__SP2__)
        fparam = fopen("/home/ssoo/gpmimd/FELIX/XMP/xmp/test/mpi/ping_r/ping.in","rt") ;
#endif
        if (fparam == NULL)
        {
                printf("ERROR: can not open ping.in, sorry.\n");
		exit(1);
        }
        ret = fscanf(fparam,"max_len=%i",&max_len) ;
        fclose(fparam) ;
        if (ret != 1)
        {
                printf("ERROR: can not read a valid 'max_len' value from ping.in, sorry.\n");
		exit(1);
        }
#endif
*/
	    max_len = 1024 * 1024 ;

        if ( (max_len <= 0) || (max_len >= 8*1024*1024) )
        {
                printf("ERROR: max_len = %i\n",max_len);
                printf("ERROR: (max_len <= 0) || (max_len >= 4*1024*1024)\n");
                exit(1) ;
        }


	ret = MPI_Init(&argc, &argv);	
	if (ret < 0)
	{
		printf("Can't init\n");
		exit(1);
	}

	MPI_Comm_rank(MPI_COMM_WORLD,&me);
	MPI_Get_processor_name(processor_name,&namelen);
	MPI_Comm_size(MPI_COMM_WORLD, &nproc);

#if (0)
	printf("Process %d; total %d is alive on %s\n",me,nproc,processor_name);
#endif


	MPI_Barrier(MPI_COMM_WORLD) ;

       

#if (0)
	printf("PING LISTO \n");
	if (me == 0)
	{
	  ret=MPI_Send(buf,12,MPI_CHAR,1,1, MPI_COMM_WORLD);
	}
	else
	{
	  ret=MPI_Recv(buf,12,MPI_CHAR,0,1, MPI_COMM_WORLD, &status);
	  printf("count = %d\n", status.count);
	}
#endif


/* 
 *	tiempo en crear y esperar por N threads 
 */
	/*
if (me == 0)
{
	i = 1;
    while(i <= N_THREADS)
	{
		start_time = MPI_Wtime();

#if defined(HAVE_WINDOWS_H)
		for(j = 0; j < PRUEBAS_THREADS; j ++)
		{
		   for (k = 0; k < i; k++)
				thid[k] = CreateThread (NULL, 0, (LPTHREAD_START_ROUTINE) nulo,NULL,0,&thinfo);

		   WaitForMultipleObjects(k, thid, TRUE, INFINITE);
		}
#else
		for(j = 0; j < PRUEBAS_THREADS; j ++)
		{
			for (k = 0; k < i; k++)
              	 pthread_create(&thid[k], &attr, (void *(*)(void *))nulo, NULL);

			for (k = 0; k < i; k++)
                 pthread_join(thid[k], NULL);
		}
#endif

		crear_threads[i]=((MPI_Wtime() - start_time)) / PRUEBAS_THREADS;
		printf("Tiempo para %d threads = %f\n", i, crear_threads[i]);

		  i *= 2;

	}
}
*/

/* 
 *	envios y recepciones... 
 */
	i = 1 ;
	while(i <= N_THREADS)
	{

		if (me == 0)
			printf("N_THREADS = %d \n\n", i);
		MPI_Barrier(MPI_COMM_WORLD) ;

		lenbuf = 1;
		while (lenbuf <= max_len)
       	{

			 buf = (char *) malloc((unsigned) lenbuf * N_THREADS );
		     if (buf == NULL)
			 {
                perror("Error en malloc");
                exit(1);
			 }

		 	start_time = MPI_Wtime();
	
#if (0)
			transfiere(&t[i]);
#endif


#if defined(HAVE_WINDOWS_H)
			for (k = 0; k < i; k++)
			{
				t[k].lenbuf = lenbuf;
				t[k].tag = k;
				t[k].buf = buf + (k * lenbuf);

				thid[k] = CreateThread (NULL, 0, (LPTHREAD_START_ROUTINE) transfiere,&(t[k]),0,&thinfo);
			}

			WaitForMultipleObjects(k, thid, TRUE, INFINITE);
#else
			for (k = 0; k < i; k++)
			{
				t[k].lenbuf = lenbuf;
				t[k].tag = k;
				t[k].buf = buf + (k * lenbuf);

				pthread_create(&thid[k], &attr, (void *(*)(void *))transfiere, &t[k]);
			}

			for (k = 0; k < i; k++)
				pthread_join(thid[k], NULL);
#endif

			used_time = (MPI_Wtime() - start_time);
	
       		avg_time =  used_time / (float)  PRUEBAS;
	

			if (avg_time > 0)    /* rate is megabytes per second */
                   	us_rate = (double)((nproc * lenbuf * (i))/
					(avg_time*(double)1000000));
      			else
       				us_rate = 0.0;


			if (me == 0)
       			printf("len_bytes=%d avg_time_sec=%f rate_Mbytes_sec=%f\n", lenbuf, (double)avg_time/(2.0*i), (double)us_rate);
	
			lenbuf *= 2;
			free(buf);
		}

		i *= 2;

	}

		if (me == 0)
		{
			char c;
			read(0, &c, 1);
		}


#if (0)
	if (me != 0)
	    printf("\nclock resolution in seconds: %10.8f\n", MPI_Wtick());
#endif

	MPI_Finalize();
    free(buf);
	exit(0);

}
示例#29
0
/************************************************************************* 
 * method: initpympi
 * This is called right after python has been initialized.  MPI has already
 * been initialized here 
 * ************************************************************************/
void initpympi() 
{
    PyObject* mpiName = 0;
    char versionString[32];
    PyObject* lastWish = 0;
    int version;
    int subversion;
    PyObject* pickleModule = 0;
    PyObject* pickleDict = 0;
    PyObject* docString = 0;
    PyObject* pyWorld = 0;
    PyObject* member = 0;
    PyMethodDef* methodPtr = 0;
    char* docExtra = 0;
    int myRank = 0;
    int result = MPI_Comm_rank(MPI_COMM_WORLD, &myRank );

    /* ----------------------------------------------- */
    /* The IBM poe environment is brain dead           */
    /* ----------------------------------------------- */
#ifdef _AIX
    Py_InteractiveFlag++;
#endif

    /* ----------------------------------------------- */
    /* Cover our butts on assumptions                  */
    /* ----------------------------------------------- */
    Assert( sizeof(MPI_Comm) <= sizeof(long) );

    /* ----------------------------------------------- */
    /* We subvert the input stream to handle broadcast */
    /* ----------------------------------------------- */
    Original_ReadlineFunctionPointer = PyOS_ReadlineFunctionPointer;
    if ( !Original_ReadlineFunctionPointer ) {
        Original_ReadlineFunctionPointer = PyOS_StdioReadline;
    }

    PyOS_ReadlineFunctionPointer = MPI_ReadlineFunctionPointer;

    /* ----------------------------------------------- */
    /* Setup the initial mpi module                    */
    /* ----------------------------------------------- */
    module = Py_InitModule("mpi",MPI_methods);
    Assert(module);
    PyMPI_dictionary = PyModule_GetDict(module);
    Assert(PyMPI_dictionary);

    /* ----------------------------------------------- */
    /* Set up a docstring for the mpi module itself    */
    /* ----------------------------------------------- */
    docExtra = DocStringFromMethods(MPI_methods,"mpi\n\nBasic mpi calls\n\n");
    Assert(docExtra);
    docString = PyString_FromString(docExtra);
    free(docExtra);

    /* ----------------------------------------------- */
    /* We start off with errors handled with flag      */
    /* ----------------------------------------------- */
    if ( MPI_Errhandler_set(MPI_COMM_WORLD,MPI_ERRORS_RETURN)
            != MPI_SUCCESS ) {
        PYCHECK( PyErr_SetString(PyExc_SystemError,"MPI Failure -- MPI_Errhandler_set()") );
    }

    /* ----------------------------------------------- */
    /* See if we conform!                              */
    /* ----------------------------------------------- */
    MPICHECKCOMMLESS( MPI_Get_version(&version,&subversion) );
    Assert(version == MPI_VERSION && subversion == MPI_SUBVERSION);

    /* ----------------------------------------------- */
    /* We have some cleanup work to do on exit         */
    /* ----------------------------------------------- */
    PYCHECK( lastWish = PyCFunction_New(&lastwishMethods,module) );
    Assert(lastWish);
    sysExitfunc = PySys_GetObject("exitfunc");
    PyErr_Clear();
    PYCHECK( PySys_SetObject("exitfunc",lastWish) );

    /* ----------------------------------------------- */
    /* Set common attributes                           */
    /* ----------------------------------------------- */
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary,"stdout",Py_None) );
    Py_INCREF(Py_None);

    PYCHECK( PyString_ConcatFromString(&docString,"name: Name of MPI model (MPICH, LAM, mpi)") );
#ifdef MPICH_NAME
    PYCHECK( mpiName = PyString_FromString("MPICH") );
#else
# ifdef LAM_MPI
    PYCHECK( mpiName = PyString_FromString("LAM") );
# else
    PYCHECK( mpiName = PyString_FromString("mpi") );
# endif
#endif
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary,"name",mpiName) );

    PYCHECK( PyString_ConcatFromString(&docString,"rank: Rank of MPI_COMM_WORLD communicator\n") );
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_rank(MPI_COMM_WORLD,&worldRank));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"rank",
                PyInt_FromLong((long)worldRank)));

    PYCHECK(PyString_ConcatFromString(&docString,"procs: Size of MPI_COMM_WORLD communicator\n"));
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_size(MPI_COMM_WORLD,&worldProcs));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"procs",
                PyInt_FromLong((long)worldProcs)));

    PYCHECK(PyString_ConcatFromString(&docString,"tick: Tick size of high-resolution timer\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"tick",
                PyFloat_FromDouble(MPI_Wtick())));

    PYCHECK(PyString_ConcatFromString(&docString,"version: String showing mpi version\n"));
    sprintf(versionString,"%d.%d",MPI_VERSION,MPI_SUBVERSION);
#if defined(MPICH_NAME) && MPI_VERSION == 1 && MPI_SUBVERSION == 1 && MPI_STATUS_SIZE == 5
    strcat(versionString,".2"); /* MPICH 1.1.2 is evil */
#endif
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"version",
                PyString_FromString(versionString)));

    PYCHECK(PyString_ConcatFromString(&docString,"COMM_WORLD: MPI_COMM_WORLD communicator\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"COMM_WORLD",
                PyMPI_Comm(MPI_COMM_WORLD)));

    PYCHECK(PyString_ConcatFromString(&docString,"COMM_NULL: MPI_COMM_NULL communicator (non-functional)\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"COMM_NULL",
                PyMPI_Comm(MPI_COMM_NULL)));

    PYCHECK(PyString_ConcatFromString(&docString,"MAX: MPI_MAX\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAX",
      PyInt_FromLong((long)MPI_MAX)));*/
    reduceOpLookup[eMaxOp] = MPI_MAX;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAX",
                PyInt_FromLong((long)eMaxOp)));

    PYCHECK(PyString_ConcatFromString(&docString,"MIN: MPI_MIN\n"));

    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MIN",
      PyInt_FromLong((long)MPI_MIN)));*/
    reduceOpLookup[eMinOp] = MPI_MIN;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MIN",
                PyInt_FromLong((long)eMinOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"SUM: MPI_SUM\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"SUM",
      PyInt_FromLong((long)MPI_SUM)));*/
    reduceOpLookup[eSumOp] = MPI_SUM;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"SUM",
                PyInt_FromLong((long)eSumOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"PROD: MPI_PROD\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"PROD",
      PyInt_FromLong((long)MPI_PROD)));*/
    reduceOpLookup[eProdOp] = MPI_PROD;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"PROD",
                PyInt_FromLong((long)eProdOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"LAND: MPI_LAND\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LAND",
      PyInt_FromLong((long)MPI_LAND)));*/
    reduceOpLookup[eLandOp] = MPI_LAND;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LAND",
                PyInt_FromLong((long)eLandOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"BAND: MPI_BAND\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BAND",
      PyInt_FromLong((long)MPI_BAND)));*/
    reduceOpLookup[eBandOp] = MPI_BAND;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BAND",
                PyInt_FromLong((long)eBandOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"LOR: MPI_LOR\n"));
    /*PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LOR",
      PyInt_FromLong((long)MPI_LOR)));*/
    reduceOpLookup[eLorOp] = MPI_LOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LOR",
                PyInt_FromLong((long)eLorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"BOR: MPI_BOR\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BOR",
         PyInt_FromLong((long)MPI_BOR)));*/
    reduceOpLookup[eBorOp] = MPI_BOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BOR",
                PyInt_FromLong((long)eBorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"LXOR: MPI_LXOR\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LXOR",
         PyInt_FromLong((long)MPI_LXOR)));*/
    reduceOpLookup[eLxorOp] = MPI_LXOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"LXOR",
                PyInt_FromLong((long)eLxorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"BXOR: MPI_BXOR\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BXOR",
         PyInt_FromLong((long)MPI_BXOR)));*/
    reduceOpLookup[eBxorOp] = MPI_BXOR;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"BXOR",
                PyInt_FromLong((long)eBxorOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"MINLOC: MPI_MINLOC\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MINLOC",
         PyInt_FromLong((long)MPI_MINLOC)));*/
    reduceOpLookup[eMinlocOp] = MPI_MINLOC;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MINLOC",
                PyInt_FromLong((long)eMinlocOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"MAXLOC: MPI_MAXLOC\n"));
    /*   PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAXLOC",
         PyInt_FromLong((long)MPI_MAXLOC)));*/
    reduceOpLookup[eMaxlocOp] = MPI_MAXLOC;
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"MAXLOC",
                PyInt_FromLong((long)eMaxlocOp)));

    PYCHECK(   PyString_ConcatFromString(&docString,"ANY_SOURCE: MPI_ANY_SOURCE (used for untargeted recv)\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"ANY_SOURCE",
                PyInt_FromLong((long)MPI_ANY_SOURCE)));

    PYCHECK(   PyString_ConcatFromString(&docString,"ANY_TAG: MPI_ANY_TAG (used for untargeted recv)\n"));
    PYCHECK(PyDict_SetItemString(PyMPI_dictionary,"ANY_TAG",
                PyInt_FromLong((long)MPI_ANY_TAG)));

    /* ----------------------------------------------- */
    /* We set up an internal communicator for PYTHON   */
    /* messaging and another for Python I/O            */
    /* ----------------------------------------------- */
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_dup(MPI_COMM_WORLD,&PyMPI_COMM_WORLD));
    MPICHECK( MPI_COMM_WORLD,
            MPI_Comm_dup(MPI_COMM_WORLD,&PyMPI_COMM_INPUT));
    PYCHECK( PyString_ConcatFromString(&docString,"WORLD: Internal Python communicator\n") );
    PYCHECK( pyWorld = PyMPI_Comm(PyMPI_COMM_WORLD) );
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary,"WORLD",pyWorld) );

    /* ----------------------------------------------- */
    /* Fetch member functions to appear as mpi.xxxx    */
    /* Make pyWorld immortal to avoid dealloc issues   */
    /* Magic Rating: 10/10                             */
    /* ----------------------------------------------- */
    Py_INCREF(pyWorld);
    for(methodPtr = PyMPIMethods_Comm; methodPtr->ml_name; ++methodPtr) {
        PYCHECK( member = PyObject_GetAttrString(pyWorld,methodPtr->ml_name) );
        Py_INCREF(member);
        PYCHECK( PyDict_SetItemString(PyMPI_dictionary,methodPtr->ml_name,member) );
    }

    /* ----------------------------------------------- */
    /* Set up the overloaded input                     */
    /* ----------------------------------------------- */
    PYCHECK( overloadedInput = PyMPI_File(worldRank != 0,PySys_GetObject("stdin"),PyMPI_COMM_INPUT) );
    Py_INCREF(overloadedInput);
    PyDict_SetItemString(PyMPI_dictionary,"stdin",overloadedInput);
    PySys_SetObject("stdin",overloadedInput);

    /* ----------------------------------------------- */
    /* Initial model is no output throttle             */
    /* ----------------------------------------------- */
    PYCHECK( PyMPI_UnrestrictedOutput(3) );

    /* ----------------------------------------------- */
    /* Have to set up some stuff for communicating     */
    /* arbitrary python objects                        */
    /* ----------------------------------------------- */

    /* -- set up PyMPI_pythonPickleType  -- */
    MPICHECKCOMMLESS( MPI_Type_contiguous(1,MPI_CHAR,&PyMPI_pythonPickleType) );
    MPICHECKCOMMLESS( MPI_Type_commit(&PyMPI_pythonPickleType) );
    Assert( PyMPI_pythonPickleType != MPI_CHAR );
    /* -- set up PyMPI_pythonFuncPickleType  -- */
    MPICHECKCOMMLESS( MPI_Type_contiguous(1,MPI_CHAR,&PyMPI_pythonFuncPickleType) );
    MPICHECKCOMMLESS( MPI_Type_commit(&PyMPI_pythonFuncPickleType) );
    Assert( PyMPI_pythonFuncPickleType != MPI_CHAR );

    /* -- set up PyMPI_pythonFuncPickleType  -- */
    pickleModule = PyImport_ImportModule("cPickle");
    if ( !pickleModule || PyErr_Occurred() ) {
        PyErr_Clear();
    } else {
        PYCHECK( pickleDict = PyModule_GetDict(pickleModule) );
        PYCHECK( PyMPI_pickleDumperFunction = PyDict_GetItemString(pickleDict,"dumps") );
        PYCHECK( PyMPI_pickleLoaderFunction = PyDict_GetItemString(pickleDict,"loads") );
    }

    /* ----------------------------------------------- */
    /* Set up the __doc__ string of the communicator   */
    /* type with more info (based on list)             */
    /* ----------------------------------------------- */
    PyMPIObject_Communicator_Type.tp_doc =
        DocStringFromMethods(PyMPIMethods_Comm,
                PyMPIObject_Communicator_Type.tp_doc);

    /* ----------------------------------------------- */
    /* Set up same info in module doc                  */
    /* ----------------------------------------------- */
    docExtra = 
        DocStringFromMethods(PyMPIMethods_Comm,
                "\nAnd these communicator methods map to the Python world communicator (not MPI_COMM_WORLD)\n\n");
    Assert(docExtra);
    PYCHECK( PyString_ConcatFromString(&docString,docExtra) );
    free(docExtra);

    /* ----------------------------------------------- */
    /* Stick in the doc string and we're ready to go   */
    /* ----------------------------------------------- */
    PYCHECK( PyDict_SetItemString(PyMPI_dictionary, "__doc__", docString) );

    return;

pythonError:
    Assert( PyErr_Occurred() );
    return; /* We have set a Python exception, let Python handle it */
}
示例#30
0
文件: gather.c 项目: acaldero/MiMPI
int main(int argc, char *argv[])
{

	int             ret;
	char           *send_buf;
	char           *recv_buf;
	char            processor_name[MPI_MAX_PROCESSOR_NAME];
	int             namelen;
	double          start_time;
	double          stop_time;
        double          used_time;
        double          avg_time;
        double          barrier_time;
        double          us_rate;
        int             max_len, lenbuf;
        int             j;
	int             me, nproc;
        FILE           *fparam ;


        /*
         *  welcome
         */
	setbuf(stdout, NULL) ;

        /*
         *  get values
         */
        fparam = fopen("gather.in","rt") ;
        if (fparam == NULL)
        {
          printf("ERROR: can not open gather.in, sorry.\n") ;
          exit(1) ;
        }
        ret = fscanf(fparam,"max_len=%i",&max_len) ;
        fclose(fparam) ;
        if (ret != 1)
        {
          printf("ERROR: can not read 'max_len' from gather.in, sorry.\n") ;
	  exit(1) ;
        }
        if ( (max_len <= 0) || (max_len >= 8*1024*1024) )
        {
                printf("ERROR: (max_len <= 0) || (max_len >= 8*1024*1024)\n") ;
                exit(1) ;
        }


        /*
         *  init MPI
         */
	ret = MPI_Init(&argc, &argv);	
	if (ret < 0)
	{
		printf("Can't init\n") ;
		exit(1) ;
	}

	MPI_Comm_rank(MPI_COMM_WORLD,&me) ;
	MPI_Get_processor_name(processor_name,&namelen) ;
	MPI_Comm_size(MPI_COMM_WORLD, &nproc) ;

	MPI_Barrier(MPI_COMM_WORLD) ;

#if (0)
	printf("Process %d; total %d is alive on %s\n",me,nproc,processor_name) ;
#endif


        /*
         *  buffers
         */
        send_buf = (char *) malloc((unsigned) max_len) ;
        if (send_buf == NULL)
        {
                perror("Error en malloc") ;
                exit(1) ;
        }
	memset(send_buf,'x',max_len) ;

        recv_buf = (char *) malloc((unsigned) max_len * nproc) ;
        if (recv_buf == NULL)
        {
                perror("Error en malloc") ;
                exit(1) ;
        }
	memset(recv_buf,'x',max_len * nproc) ;


        /*
         *  barrier time
         */
	start_time = MPI_Wtime() ;
	for(j = 0; j < PRUEBAS; j++)
        {
	  MPI_Barrier(MPI_COMM_WORLD) ;
        }
	barrier_time = (MPI_Wtime() - start_time) ;
	if (me == 0)
        	printf(">>>>>>>>> BARRERA1  =%e\n",  barrier_time);


        /*
         *  test
         */
        for (lenbuf = 1; (lenbuf <= max_len); lenbuf *= 2)
        {
	        MPI_Barrier(MPI_COMM_WORLD) ;

		if (me != 0)
		{
			for(j = 0; j < PRUEBAS; j++)
                	{

				MPI_Gather(send_buf,lenbuf,MPI_CHAR,
				           recv_buf,lenbuf,MPI_CHAR,
                                           0,MPI_COMM_WORLD) ;
	                        MPI_Barrier(MPI_COMM_WORLD) ;
			}
		}
		else
		{
		        avg_time = 0.0;
			start_time = MPI_Wtime() ;
			for(j = 0; j < PRUEBAS; j++)
                        {

				MPI_Gather(send_buf,lenbuf,MPI_CHAR,
				           recv_buf,lenbuf,MPI_CHAR,
                                           0,MPI_COMM_WORLD) ;
	                        MPI_Barrier(MPI_COMM_WORLD) ;

			}
			stop_time = MPI_Wtime() ;
			used_time = (stop_time - start_time) ;

                	avg_time =  (used_time - barrier_time)  / (float)  PRUEBAS;
			if (avg_time > 0)    /* rate is megabytes per second */
                        	us_rate = (double)((nproc * lenbuf)/
					(avg_time*(double)1000000)) ;
                	else
                        	us_rate = 0.0;

                	printf("len_bytes=%e avg_time_sec=%e rate_Mbytes_sec=%e\n", 
			        (double)lenbuf, (double)avg_time, (double)us_rate) ;
                }

        }

#if (0)
	if (me == 0)
	    printf("\nclock resolution in seconds: %10.8f\n", MPI_Wtick()) ;
#endif

	MPI_Finalize() ;
        free(send_buf) ;
        free(recv_buf) ;
	exit(0) ;

}