예제 #1
0
파일: f_wrappers.c 프로젝트: apcraig/cime
int gptlpr_summary_file (int *fcomm, char *file, int nc1)
{
  char *locfile;
  int c;
  int ret;

#ifdef HAVE_MPI
  MPI_Comm ccomm;
#ifdef HAVE_COMM_F2C
  ccomm = MPI_Comm_f2c (*fcomm);
#else
  /* Punt and try just casting the Fortran communicator */
  ccomm = (MPI_Comm) *fcomm;
#endif
#else
  int ccomm = 0;
#endif

  if ( ! (locfile = (char *) malloc (nc1+1)))
    return GPTLerror ("gptlpr_summary_file: malloc error\n");

  //pw  snprintf (locfile, nc1+1, "%s", file);
  for (c = 0; c < nc1; c++) {
    locfile[c] = file[c];
  }
  locfile[c] = '\0';

  ret = GPTLpr_summary_file (ccomm, locfile);
  free (locfile);
  return ret;
}
예제 #2
0
int gptlpr_summary_file (char *outfile, int nc1)
{
  char *locfile;
  int ret;

  if ( ! (locfile = (char *) malloc (nc1+1)))
    return GPTLerror ("gptlpr_summary_file: malloc error\n");

  snprintf (locfile, nc1+1, "%s", outfile);
  ret = GPTLpr_summary_file (locfile);
  free (locfile);
  return ret;
}
예제 #3
0
int gptlpr_summary_file (int *fcomm, char *outfile, int nc1)
{
  MPI_Comm ccomm;
  char *locfile;
  int ret;

  if ( ! (locfile = (char *) malloc (nc1+1)))
    return GPTLerror ("gptlpr_summary_file: malloc error\n");

  snprintf (locfile, nc1+1, "%s", outfile);

#ifdef HAVE_COMM_F2C
  ccomm = MPI_Comm_f2c (*fcomm);
#else
  /* Punt and try just casting the Fortran communicator */
  ccomm = (MPI_Comm) *fcomm;
#endif
  ret = GPTLpr_summary_file (ccomm, locfile);
  free (locfile);
  return ret;
}
예제 #4
0
파일: summary.c 프로젝트: jmrosinski/GPTL
int main (int argc, char **argv)
{
  char pname[MPI_MAX_PROCESSOR_NAME];

  int iter;
  int counter;
  int c;
  int tnum = 0;
  int resultlen;
  int ret;
  double value;
  extern char *optarg;

  while ((c = getopt (argc, argv, "p:")) != -1) {
    switch (c) {
    case 'p':
      if ((ret = GPTLevent_name_to_code (optarg, &counter)) != 0) {
	printf ("Failure from GPTLevent_name_to_code\n");
	return 1;
      }
      if (GPTLsetoption (counter, 1) < 0) {
	printf ("Failure from GPTLsetoption (%s,1)\n", optarg);
	return 1;
      }
      break;
    default:
      printf ("unknown option %c\n", c);
      printf ("Usage: %s [-p option_name]\n", argv[0]);
      return 2;
    }
  }
  
  ret = GPTLsetoption (GPTLabort_on_error, 1);
  ret = GPTLsetoption (GPTLoverhead, 1);
  ret = GPTLsetoption (GPTLnarrowprint, 1);

  if (MPI_Init (&argc, &argv) != MPI_SUCCESS) {
    printf ("Failure from MPI_Init\n");
    return 1;
  }

  ret = GPTLinitialize ();
  ret = GPTLstart ("total");
	 
  ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam);
  ret = MPI_Comm_size (MPI_COMM_WORLD, &nproc);

  ret = MPI_Get_processor_name (pname, &resultlen);
  printf ("Rank %d is running on processor %s\n", iam, pname);

#ifdef THREADED_OMP
  nthreads = omp_get_max_threads ();
#pragma omp parallel for private (iter, ret, tnum)
#endif

  for (iter = 1; iter <= nthreads; iter++) {
#ifdef THREADED_OMP
    tnum = omp_get_thread_num ();
#endif
    printf ("Thread %d of rank %d on processor %s\n", tnum, iam, pname);
    value = sub (iter);
  }

  ret = GPTLstop ("total");
  ret = GPTLpr (iam);

  if (iam == 0) {
    printf ("summary: testing GPTLpr_summary...\n");
    printf ("Number of threads was %d\n", nthreads);
    printf ("Number of tasks was %d\n", nproc);
  }

  // NOTE: if ENABLE_PMPI is set, 2nd pr call below will show some extra send/recv calls
  // due to MPI calls from within GPTLpr_summary_file
  if (GPTLpr_summary (MPI_COMM_WORLD) != 0)
    return 1;

  if (GPTLpr_summary_file (MPI_COMM_WORLD, "timing.summary.duplicate") != 0)
    return 1;

  ret = MPI_Finalize ();

  if (GPTLfinalize () != 0)
    return 1;

  return 0;
}