Пример #1
0
static int PyMPELog_Start(void)
{
  int ierr = 0;
#if HAVE_MPE
  ierr = MPE_Start_log();
#endif /* HAVE_MPE */
  return ierr;
}
Пример #2
0
int MPI_Init(int *argc, char*** argv)
{
    int wynik=PMPI_Init(argc,argv);
    MPE_Init_log();
    int proc;
    MPI_Comm_rank(MPI_COMM_WORLD,&proc);
    if(proc==0)
    {
        MPE_Describe_state(START_BCAST,END_BCAST,"broadcast","red");
        MPE_Describe_state(START_SEND,END_SEND,"send","blue");
        MPE_Describe_state(START_RECV,END_RECV,"recv","green");
    };
    MPE_Start_log();

    return wynik;
};
Пример #3
0
int
main(int argc, char **argv)
{
    /* MPI stuff. */
    int mpi_namelen;		
    char mpi_name[MPI_MAX_PROCESSOR_NAME];
    int mpi_size, mpi_rank;
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;
    double start_time = 0, total_time;

    /* Netcdf-4 stuff. */
    int ncid, varid, dimids[NDIMS];
    size_t start[NDIMS] = {0, 0, 0};
    size_t count[NDIMS] = {1, DIMSIZE, DIMSIZE};
    int data[DIMSIZE * DIMSIZE], data_in[DIMSIZE * DIMSIZE];
    int j, i;
    
    char file_name[NC_MAX_NAME + 1];
    int ndims_in, nvars_in, natts_in, unlimdimid_in;

#ifdef USE_MPE
    int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */

    /* Initialize MPI. */
    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Get_processor_name(mpi_name, &mpi_namelen);
    /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank);*/

    /* Must be able to evenly divide my slabs between processors. */
    if (NUM_SLABS % mpi_size != 0)
    {
       if (!mpi_rank) printf("NUM_SLABS (%d) is not evenly divisible by mpi_size(%d)\n", 
                             NUM_SLABS, mpi_size);
       ERR;
    }

#ifdef USE_MPE
    MPE_Init_log();
    s_init = MPE_Log_get_event_number(); 
    e_init = MPE_Log_get_event_number(); 
    s_define = MPE_Log_get_event_number(); 
    e_define = MPE_Log_get_event_number(); 
    s_write = MPE_Log_get_event_number(); 
    e_write = MPE_Log_get_event_number(); 
    s_close = MPE_Log_get_event_number(); 
    e_close = MPE_Log_get_event_number(); 
    s_open = MPE_Log_get_event_number(); 
    e_open = MPE_Log_get_event_number(); 
    MPE_Describe_state(s_init, e_init, "Init", "red");
    MPE_Describe_state(s_define, e_define, "Define", "yellow");
    MPE_Describe_state(s_write, e_write, "Write", "green");
    MPE_Describe_state(s_close, e_close, "Close", "purple");
    MPE_Describe_state(s_open, e_open, "Open", "blue");
    MPE_Start_log();
    MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */

/*     if (!mpi_rank) */
/*     { */
/*        printf("\n*** Testing parallel I/O some more.\n"); */
/*        printf("*** writing a %d x %d x %d file from %d processors...\n",  */
/*               NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size); */
/*     } */

    /* We will write the same slab over and over. */
    for (i = 0; i < DIMSIZE * DIMSIZE; i++)
       data[i] = mpi_rank;

#ifdef USE_MPE
    MPE_Log_event(e_init, 0, "end init");
    MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */

    /* Create a parallel netcdf-4 file. */
    sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME);
    if (nc_create_par(file_name, NC_PNETCDF, comm, info, &ncid)) ERR;

    /* A global attribute holds the number of processors that created
     * the file. */
    if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR;

    /* Create three dimensions. */
    if (nc_def_dim(ncid, DIM1_NAME, NUM_SLABS, dimids)) ERR;
    if (nc_def_dim(ncid, DIM2_NAME, DIMSIZE, &dimids[1])) ERR;
    if (nc_def_dim(ncid, DIM3_NAME, DIMSIZE, &dimids[2])) ERR;

    /* Create one var. */
    if (nc_def_var(ncid, VAR_NAME, NC_INT, NDIMS, dimids, &varid)) ERR;

    /* Write metadata to file. */
    if (nc_enddef(ncid)) ERR;

#ifdef USE_MPE
    MPE_Log_event(e_define, 0, "end define file");
    if (mpi_rank)
       sleep(mpi_rank);
#endif /* USE_MPE */

/*    if (nc_var_par_access(ncid, varid, NC_COLLECTIVE)) ERR;*/
    if (nc_var_par_access(ncid, varid, NC_INDEPENDENT)) ERR;

    if (!mpi_rank)
       start_time = MPI_Wtime();

    /* Write all the slabs this process is responsible for. */
    for (i = 0; i < NUM_SLABS / mpi_size; i++)
    {
       start[0] = NUM_SLABS / mpi_size * mpi_rank + i;

#ifdef USE_MPE
       MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */

       /* Write one slab of data. */
       if (nc_put_vara_int(ncid, varid, start, count, data)) ERR;

#ifdef USE_MPE
       MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
    }

    if (!mpi_rank)
    {
       total_time = MPI_Wtime() - start_time;
/*       printf("num_proc\ttime(s)\n");*/
       printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS * sizeof(int) / total_time);
    }

#ifdef USE_MPE
    MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

    /* Close the netcdf file. */
    if (nc_close(ncid))	ERR;
    
#ifdef USE_MPE
    MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

    /* Reopen the file and check it. */
    if (nc_open_par(file_name, NC_NOWRITE, comm, info, &ncid)) ERR;
    if (nc_inq(ncid, &ndims_in, &nvars_in, &natts_in, &unlimdimid_in)) ERR;
    if (ndims_in != NDIMS || nvars_in != 1 || natts_in != 1 || 
        unlimdimid_in != -1) ERR;

    /* Read all the slabs this process is responsible for. */
    for (i = 0; i < NUM_SLABS / mpi_size; i++)
    {
       start[0] = NUM_SLABS / mpi_size * mpi_rank + i;

#ifdef USE_MPE
       MPE_Log_event(s_read, 0, "start read slab");
#endif /* USE_MPE */

       /* Read one slab of data. */
       if (nc_get_vara_int(ncid, varid, start, count, data_in)) ERR;
       
       /* Check data. */
       for (j = 0; j < DIMSIZE * DIMSIZE; j++)
	  if (data_in[j] != mpi_rank) 
	  {
	     ERR;
	     break;
	  }

#ifdef USE_MPE
       MPE_Log_event(e_read, 0, "end read file");
#endif /* USE_MPE */
    }

#ifdef USE_MPE
    MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

    /* Close the netcdf file. */
    if (nc_close(ncid))	ERR;
    
#ifdef USE_MPE
    MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

    /* Delete this large file. */
    remove(file_name); 

    /* Shut down MPI. */
    MPI_Finalize();

/*     if (!mpi_rank) */
/*     { */
/*        SUMMARIZE_ERR; */
/*        FINAL_RESULTS; */
/*     } */
    return total_err;
}
Пример #4
0
int
main(int argc, char **argv)
{
   int p, my_rank;

#ifdef USE_MPE
   int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */

   MPI_Init(&argc, &argv);
   MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
   MPI_Comm_size(MPI_COMM_WORLD, &p);

#ifdef USE_MPE
   MPE_Init_log();
   s_init = MPE_Log_get_event_number(); 
   e_init = MPE_Log_get_event_number(); 
   s_define = MPE_Log_get_event_number(); 
   e_define = MPE_Log_get_event_number(); 
   s_write = MPE_Log_get_event_number(); 
   e_write = MPE_Log_get_event_number(); 
   s_close = MPE_Log_get_event_number(); 
   e_close = MPE_Log_get_event_number(); 
   MPE_Describe_state(s_init, e_init, "Init", "red");
   MPE_Describe_state(s_define, e_define, "Define", "yellow");
   MPE_Describe_state(s_write, e_write, "Write", "green");
   MPE_Describe_state(s_close, e_close, "Close", "purple");
   MPE_Start_log();
   MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */

   if (!my_rank)
      printf("*** Creating file for parallel I/O read, and rereading it...");
   {
      hid_t fapl_id, fileid, whole_spaceid, dsid, slice_spaceid, whole_spaceid1, xferid;
      hsize_t start[NDIMS], count[NDIMS];
      hsize_t dims[1];
      int data[SC1], data_in[SC1];
      int num_steps;
      double ftime;
      int write_us, read_us;
      int max_write_us, max_read_us;
      float write_rate, read_rate;
      int i, s;

      /* We will write the same slice of random data over and over to
       * fill the file. */
      for (i = 0; i < SC1; i++)
	 data[i] = rand();
      
#ifdef USE_MPE
      MPE_Log_event(e_init, 0, "end init");
      MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */

      /* Create file. */
      if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR;
      if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR;
      if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, 
			      fapl_id)) < 0) ERR;

      /* Create a space to deal with one slice in memory. */
      dims[0] = SC1;
      if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR;

      /* Create a space to write all slices. */
      dims[0] = DIM2_LEN;
      if ((whole_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR;

      /* Create dataset. */
      if ((dsid = H5Dcreate1(fileid, VAR_NAME, H5T_NATIVE_INT, 
      whole_spaceid, H5P_DEFAULT)) < 0) ERR;

      /* Use collective write operations. */
      if ((xferid = H5Pcreate(H5P_DATASET_XFER)) < 0) ERR;
      if (H5Pset_dxpl_mpio(xferid, H5FD_MPIO_COLLECTIVE) < 0) ERR;

#ifdef USE_MPE
      MPE_Log_event(e_define, 0, "end define file");
      if (my_rank)
	 sleep(my_rank);
#endif /* USE_MPE */

      /* Write the data in num_step steps. */
      ftime = MPI_Wtime();      
      num_steps = (DIM2_LEN/SC1) / p;
      for (s = 0; s < num_steps; s++)
      {
#ifdef USE_MPE
	 MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */

	 /* Select hyperslab for write of one slice. */
	 start[0] = s * SC1 * p + my_rank * SC1;
	 count[0] = SC1;
	 if (H5Sselect_hyperslab(whole_spaceid, H5S_SELECT_SET, 
	 start, NULL, count, NULL) < 0) ERR;
	 
	 if (H5Dwrite(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid, 
	 xferid, data) < 0) ERR;

#ifdef USE_MPE
	 MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
      }
      write_us = (MPI_Wtime() - ftime) * MILLION;
      MPI_Reduce(&write_us, &max_write_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
      if (!my_rank)
      {
	 write_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_write_us;      
	 printf("\np=%d, write_rate=%g", p, write_rate);
      }
      
#ifdef USE_MPE
      MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

      /* Close. These collective operations will allow every process
       * to catch up. */
      if (H5Dclose(dsid) < 0 ||
      H5Sclose(whole_spaceid) < 0 ||
      H5Sclose(slice_spaceid) < 0 ||
      H5Pclose(fapl_id) < 0 ||
      H5Fclose(fileid) < 0)
	 ERR;

#ifdef USE_MPE
      MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

      /* Open the file. */
      if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR;
      if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) ERR;


      if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) ERR;
      if ((fileid = H5Fopen(FILE_NAME, H5F_ACC_RDONLY, fapl_id)) < 0) ERR;

      /* Create a space to deal with one slice in memory. */
      dims[0] = SC1;
      if ((slice_spaceid = H5Screate_simple(NDIMS, dims, NULL)) < 0) ERR;

      /* Open the dataset. */
      if ((dsid = H5Dopen(fileid, VAR_NAME)) < 0) ERR;
      if ((whole_spaceid1 = H5Dget_space(dsid)) < 0) ERR;

      ftime = MPI_Wtime();      
      
      /* Read the data, a slice at a time. */
      for (s = 0; s < num_steps; s++)
      {
	 /* Select hyperslab for read of one slice. */
	 start[0] = s * SC1 * p + my_rank * SC1;
	 count[0] = SC1;
	 if (H5Sselect_hyperslab(whole_spaceid1, H5S_SELECT_SET, 
	 start, NULL, count, NULL) < 0) 
	 {
	    ERR;
	    return 2;
	 }

	 if (H5Dread(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid1, 
	 H5P_DEFAULT, data_in) < 0) 
	 {
	    ERR;
	    return 2;
	 }

/* 	 /\* Check the slice of data. *\/ */
/* 	 for (i = 0; i < SC1; i++) */
/* 	    if (data[i] != data_in[i])  */
/* 	    { */
/* 	       ERR; */
/* 	       return 2; */
/* 	    } */
      }
      read_us = (MPI_Wtime() - ftime) * MILLION;
      MPI_Reduce(&read_us, &max_read_us, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
      if (!my_rank)
      {
	 read_rate = (float)(DIM2_LEN * sizeof(int))/(float)max_read_us;      
	 printf(", read_rate=%g\n", read_rate);
      }
      
      /* Close down. */
      if (H5Dclose(dsid) < 0 ||
      H5Sclose(slice_spaceid) < 0 ||
      H5Sclose(whole_spaceid1) < 0 ||
      H5Pclose(fapl_id) < 0 ||
      H5Fclose(fileid) < 0)
	 ERR;
   }
   if (!my_rank)
      SUMMARIZE_ERR;

   MPI_Finalize();

   if (!my_rank)
      FINAL_RESULTS;
   return 0;
}
int main(int argc, char** argv)
{
  int         my_rank;          /* My process rank           */
  int         p;                /* The number of processes   */
  float       a = 0.0;          /* Left endpoint             */
  float       b = 30.0;         /* Right endpoint            */
  long int    n = 10000000;     /* Number of trapezoids      */
  double      h;                /* Trapezoid base length     */
  float       local_a;          /* Left endpoint my process  */
  float       local_b;          /* Right endpoint my process */
  long int    local_n;          /* Number of trapezoids for  */
                                /* my calculation            */
  long double      integral;         /* Integral over my interval */
  long double      total_integral;   /* Total integral            */
  int         source;           /* Process sending integral  */
  int         dest = 0;         /* All messages go to 0      */
  int         tag = 0;
  MPI_Status  status;
  double startTime, endTime, timeDifference;

  /* Let the system do what it needs to start up MPI */
  MPI_Init(&argc, &argv);
  /* Get my process rank */
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  /* Find out how many processes are being used */
  MPI_Comm_size(MPI_COMM_WORLD, &p);  

    // Initializing the log, after communication
  MPE_Init_log();
  int event1a = MPE_Log_get_event_number(); 
  int event1b = MPE_Log_get_event_number();
  int event2a = MPE_Log_get_event_number(); 
  int event2b = MPE_Log_get_event_number();
  int event3a = MPE_Log_get_event_number(); 
  int event3b = MPE_Log_get_event_number();
  MPE_Describe_state(event1a, event1b, "Receive", "blue");
  MPE_Describe_state(event2a, event2b, "Send", "yellow");
  MPE_Describe_state(event3a, event3b, "Compute", "red");
  // Starting to log
  MPE_Start_log();

  if (my_rank==0)
	  startTime=MPI_Wtime();

  local_n = n / p;    /* So is the number of trapezoids */

  /* Length of each process' interval of integration = local_n * h.
   * So my interval starts at: */
  MPE_Log_event(event3a, 0, "start compute");
  integral = throwNeedles(local_n);
  MPE_Log_event(event3b, 0, "end compute");

  /* Add up the integrals calculated by each process */
  if (my_rank == 0)
  {
    total_integral = integral;
    for (source=1; source<p; source++)
    {
      MPE_Log_event(event1a, 0, "Start to receive");
      MPI_Recv(&integral, 1, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &status);
      MPE_Log_event(event1b, 0, "Recieved");

      total_integral = total_integral + integral;
    }
  }
  else {
    MPE_Log_event(event2a, 0, "Start to Send");
    MPI_Send(&integral, 1, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
    MPE_Log_event(event2b, 0, "Sent");
  }

  if (my_rank==0)
  {
    endTime = MPI_Wtime();
    timeDifference = endTime - startTime;	  
  }

  /* Print the result */
  if(my_rank == 0)
  {
    printf("The real value of PI is 3.141592653589793238462643\n");
    printf("our estimate of the value of PI is %.25Lf\n", total_integral/p);
    printf("Time taken for whole computation = %f seconds\n", timeDifference);
  }  
  // Before finalize
  MPE_Finish_log(argv[1]);
  /* Shut down MPI */
  MPI_Finalize();
  
  return 0;
} /* main */
Пример #6
0
static Int
p_start()                /* mpe_start */
{
  return (MPE_Start_log() == 0);
}
Пример #7
0
int
main(int argc, char **argv)
{
    /* MPI stuff. */
    int mpi_namelen;		
    char mpi_name[MPI_MAX_PROCESSOR_NAME];
    int mpi_size, mpi_rank;
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;

    /* Netcdf-4 stuff. */
    int ncid, v1id, dimids[NDIMS];
    size_t start[NDIMS], count[NDIMS];

    int data[DIMSIZE * DIMSIZE], i, res;
    int slab_data[DIMSIZE * DIMSIZE / 4]; /* one slab */
    char file_name[NC_MAX_NAME + 1];

#ifdef USE_MPE
    int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */

    /* Initialize MPI. */
    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Get_processor_name(mpi_name, &mpi_namelen);
    /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, 
      mpi_size, mpi_rank);*/

#ifdef USE_MPE
    MPE_Init_log();
    s_init = MPE_Log_get_event_number(); 
    e_init = MPE_Log_get_event_number(); 
    s_define = MPE_Log_get_event_number(); 
    e_define = MPE_Log_get_event_number(); 
    s_write = MPE_Log_get_event_number(); 
    e_write = MPE_Log_get_event_number(); 
    s_close = MPE_Log_get_event_number(); 
    e_close = MPE_Log_get_event_number(); 
    MPE_Describe_state(s_init, e_init, "Init", "red");
    MPE_Describe_state(s_define, e_define, "Define", "yellow");
    MPE_Describe_state(s_write, e_write, "Write", "green");
    MPE_Describe_state(s_close, e_close, "Close", "purple");
    MPE_Start_log();
    MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */

    if (mpi_rank == 1)
    {
       printf("\n*** tst_parallel testing very basic parallel access.\n");
       printf("*** tst_parallel testing whether we can create file for parallel access and write to it...");
    }

    /* Create phony data. We're going to write a 24x24 array of ints,
       in 4 sets of 144. */
    /*printf("mpi_rank*QTR_DATA=%d (mpi_rank+1)*QTR_DATA-1=%d\n",
      mpi_rank*QTR_DATA, (mpi_rank+1)*QTR_DATA);*/
    for (i = mpi_rank * QTR_DATA; i < (mpi_rank + 1) * QTR_DATA; i++)
       data[i] = mpi_rank;
    for (i = 0; i < DIMSIZE * DIMSIZE / 4; i++)
       slab_data[i] = mpi_rank;

#ifdef USE_MPE
    MPE_Log_event(e_init, 0, "end init");
    MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */

    /* Create a parallel netcdf-4 file. */
    /*nc_set_log_level(3);*/
    sprintf(file_name, "%s/%s", TEMP_LARGE, FILE);
    if ((res = nc_create_par(file_name, NC_NETCDF4|NC_MPIIO, comm, 
			     info, &ncid))) ERR;

    /* Create three dimensions. */
    if (nc_def_dim(ncid, "d1", DIMSIZE, dimids)) ERR;
    if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
    if (nc_def_dim(ncid, "d3", NUM_SLABS, &dimids[2])) ERR;

    /* Create one var. */
    if ((res = nc_def_var(ncid, "v1", NC_INT, NDIMS, dimids, &v1id))) ERR;

    /* Write metadata to file. */
    if ((res = nc_enddef(ncid))) ERR;

#ifdef USE_MPE
    MPE_Log_event(e_define, 0, "end define file");
    if (mpi_rank)
       sleep(mpi_rank);
#endif /* USE_MPE */

    /* Set up slab for this process. */
    start[0] = mpi_rank * DIMSIZE/mpi_size;
    start[1] = 0;
    count[0] = DIMSIZE/mpi_size;
    count[1] = DIMSIZE;
    count[2] = 1;
    /*printf("mpi_rank=%d start[0]=%d start[1]=%d count[0]=%d count[1]=%d\n",
      mpi_rank, start[0], start[1], count[0], count[1]);*/

    if (nc_var_par_access(ncid, v1id, NC_COLLECTIVE)) ERR;
/*    if (nc_var_par_access(ncid, v1id, NC_INDEPENDENT)) ERR;*/

    for (start[2] = 0; start[2] < NUM_SLABS; start[2]++)
    {
#ifdef USE_MPE
       MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */

       /* Write slabs of phoney data. */
       if (nc_put_vara_int(ncid, v1id, start, count, slab_data)) ERR;
#ifdef USE_MPE
       MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
    }

#ifdef USE_MPE
    MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */

    /* Close the netcdf file. */
    if ((res = nc_close(ncid)))	ERR;
    
#ifdef USE_MPE
    MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */

    /* Delete this large file. */
    remove(file_name); 

    /* Shut down MPI. */
    MPI_Finalize();

    if (mpi_rank == 1)
    {
       SUMMARIZE_ERR;
       FINAL_RESULTS;
    }
    return 0;
}