Exemple #1
0
static
void check_attached_buffer_usage(int ncid,
                                 MPI_Offset expected_size,
                                 MPI_Offset expected_usage,
                                 int lineno)
/* check attached buf usage */
{
    int err, rank;
    MPI_Offset usage, buf_size;

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (rank >= 4) return;

    err = ncmpi_inq_buffer_size(ncid, &buf_size);
    if (err != NC_NOERR) printf("Error at line=%d: %s\n", __LINE__, ncmpi_strerror(err));
    if (expected_size != buf_size)
        printf("Error at line %d: expect buffer size %lld but got %lld\n",
               lineno, expected_size, buf_size);

    err = ncmpi_inq_buffer_usage(ncid, &usage);
    if (err != NC_NOERR) printf("Error at line=%d: %s\n", __LINE__, ncmpi_strerror(err));
    if (expected_usage != usage)
        printf("Error at line %d: expect buffer usage %lld but got %lld\n",
               lineno, expected_usage, usage);
}
Exemple #2
0
void
check_err(const int stat, const int line, const char *file) {
    if (stat != NC_NOERR) {
           (void) fprintf(stderr, "line %d of %s: %s\n", line, file, ncmpi_strerror(stat));
        exit(1);
    }
}
Exemple #3
0
static int
check_num_pending_reqs(int ncid, int expected, int lineno)
/* check if PnetCDF can reports expected number of pending requests */
{
    int err, n_pendings;
    err = ncmpi_inq_nreqs(ncid, &n_pendings);
    if (err != NC_NOERR) printf("Error at line=%d: %s\n", __LINE__, ncmpi_strerror(err));
    if (n_pendings != expected) {
        printf("Error at line %d: expect %d pending requests but got %d\n",
               lineno, expected, n_pendings);
        return 1;
    }
    return 0;
}
Exemple #4
0
static
void clear_file_contents(int ncid, int *varid)
{
    int i, err, rank;
    unsigned int *w_buffer = (unsigned int*) malloc(NY*NX * sizeof(unsigned int));
    for (i=0; i<NY*NX; i++) w_buffer[i] = 99999;

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    for (i=0; i<4; i++) {
        err = ncmpi_put_var_uint_all(ncid, varid[i], w_buffer);
        if (err != NC_NOERR) printf("Error at line=%d: %s\n", __LINE__, ncmpi_strerror(err));
    }
    free(w_buffer);
}
Exemple #5
0
static
int check_contents_for_fail(int ncid, int *varid)
{
    /* all processes read entire variables back and check contents */
    int i, j, err, nprocs;
    unsigned int expected[4][NY*NX] = {{3, 3, 3, 1, 1, 0, 0, 2, 1, 1,
                                        0, 2, 2, 2, 3, 1, 1, 2, 2, 2,
                                        1, 1, 2, 3, 3, 3, 0, 0, 1, 1,
                                        0, 0, 0, 2, 1, 1, 1, 3, 3, 3},
                                       {2, 2, 2, 0, 0, 3, 3, 1, 0, 0,
                                        3, 1, 1, 1, 2, 0, 0, 1, 1, 1,
                                        0, 0, 1, 2, 2, 2, 3, 3, 0, 0,
                                        3, 3, 3, 1, 0, 0, 0, 2, 2, 2},
                                       {1, 1, 1, 3, 3, 2, 2, 0, 3, 3,
                                        2, 0, 0, 0, 1, 3, 3, 0, 0, 0,
                                        3, 3, 0, 1, 1, 1, 2, 2, 3, 3,
                                        2, 2, 2, 0, 3, 3, 3, 1, 1, 1},
                                       {0, 0, 0, 2, 2, 1, 1, 3, 2, 2,
                                        1, 3, 3, 3, 0, 2, 2, 3, 3, 3,
                                        2, 2, 3, 0, 0, 0, 1, 1, 2, 2,
                                        1, 1, 1, 3, 2, 2, 2, 0, 0, 0}};

    unsigned int *r_buffer = (unsigned int*) malloc(NY*NX * sizeof(unsigned int));

    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    if (nprocs > 4) MPI_Barrier(MPI_COMM_WORLD);

    for (i=0; i<4; i++) {
        for (j=0; j<NY*NX; j++) r_buffer[j] = 99999;
        err = ncmpi_get_var_uint_all(ncid, varid[i], r_buffer);
        if (err != NC_NOERR) printf("Error at line=%d: %s\n", __LINE__, ncmpi_strerror(err));

        /* check if the contents of buf are expected */
        for (j=0; j<NY*NX; j++) {
            if (expected[i][j] >= nprocs) continue;
            if (r_buffer[j] != expected[i][j]) {
                printf("Expected read buf[%d][%d]=%u, but got %u\n",
                       i,j,expected[i][j],r_buffer[j]);
                free(r_buffer);
                return 1;
            }
        }
    }
    free(r_buffer);
    return 0;
}
static void handle_error(int status) {
  fprintf(stderr, "%s\n", ncmpi_strerror(status));
}
Exemple #7
0
/**
 * Write a parallel-nedcdf file.
 *
 * We assume here that localData is a scalar.
 *
 * Pnetcdf uses row-major format (same as FFTW).
 *
 * \param[in]  filename  : PnetCDF filename
 * \param[in]  starts    : offset to where to start reading data
 * \param[in]  counts    : number of elements read (3D sub-domain inside global)
 * \param[in]  gsizes    : global sizes
 * \param[in]  localData : actual data buffer (size : nx*ny*nz*sizeof(float))
 *
 */
void write_pnetcdf(const std::string &filename,
		   MPI_Offset         starts[3],
		   MPI_Offset         counts[3],
		   int                gsizes[3],
		   float            *localData)
{
  int myRank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

  // netcdf file id
  int ncFileId;
  int err;

  // file creation mode
  int ncCreationMode = NC_CLOBBER;

  // CDF-5 is almost mandatory for very large files (>= 2x10^9 cells)
  // not useful here
  bool useCDF5 = false;
  if (useCDF5)
    ncCreationMode = NC_CLOBBER|NC_64BIT_DATA;
  else // use CDF-2 file format
    ncCreationMode = NC_CLOBBER|NC_64BIT_OFFSET;

  // verbose log ?
  //bool pnetcdf_verbose = false;

  int nbVar=1;
  int dimIds[3], varIds[nbVar];
  //MPI_Offset write_size, sum_write_size;
  MPI_Info mpi_info_used;
  //char str[512];

  // time measurement variables
  //float write_timing, max_write_timing, write_bw;

  /*
   * Create NetCDF file
   */
  err = ncmpi_create(MPI_COMM_WORLD, filename.c_str(),
		     ncCreationMode,
		     MPI_INFO_NULL, &ncFileId);
  if (err != NC_NOERR) {
    printf("Error: ncmpi_create() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err));
    MPI_Abort(MPI_COMM_WORLD, -1);
    exit(1);
  }

  /*
   * Define global dimensions
   */
  err = ncmpi_def_dim(ncFileId, "x", gsizes[0], &dimIds[0]);
  PNETCDF_HANDLE_ERROR;

  err = ncmpi_def_dim(ncFileId, "y", gsizes[1], &dimIds[1]);
  PNETCDF_HANDLE_ERROR;

  err = ncmpi_def_dim(ncFileId, "z", gsizes[2], &dimIds[2]);
  PNETCDF_HANDLE_ERROR;

  /*
   * Define variables to write (give a name)
   */
  nc_type       ncDataType =  NC_FLOAT;
  MPI_Datatype mpiDataType = MPI_FLOAT;

  err = ncmpi_def_var(ncFileId, "data", ncDataType, 3, dimIds, &varIds[0]);
  PNETCDF_HANDLE_ERROR;

  /*
   * global attributes
   */
  // did we use CDF-2 or CDF-5
  {
    int useCDF5_int = useCDF5 ? 1 : 0;
    err = ncmpi_put_att_int(ncFileId, NC_GLOBAL, "CDF-5 mode", NC_INT, 1, &useCDF5_int);
    PNETCDF_HANDLE_ERROR;
  }

  /*
   * exit the define mode
   */
  err = ncmpi_enddef(ncFileId);
  PNETCDF_HANDLE_ERROR;

  /*
   * Get all the MPI_IO hints used
   */
  err = ncmpi_get_file_info(ncFileId, &mpi_info_used);
  PNETCDF_HANDLE_ERROR;

  // copy data to write in intermediate buffer
  int nItems = counts[IX]*counts[IY]*counts[IZ];

  {

    // debug
    // printf("Pnetcdf [rank=%d] starts=%lld %lld %lld, counts =%lld %lld %lld, gsizes=%d %d %d\n",
    //	   myRank,
    //	   starts[0],starts[1],starts[2],
    //	   counts[0],counts[1],counts[2],
    //	   gsizes[0],gsizes[1],gsizes[2]);

    /*
     * make sure PNetCDF doesn't complain when starts is outside of global domain
     * bound. When nItems is null, off course we don't write anything, but starts
     * offset have to be inside global domain.
     * So there is no harm, setting starts to origin.
     */
    if (nItems == 0) {
      starts[0]=0;
      starts[1]=0;
      starts[2]=0;
    }

    err = ncmpi_put_vara_all(ncFileId,
			     varIds[0],
			     starts,
			     counts,
			     localData,
			     nItems,
			     mpiDataType);
    PNETCDF_HANDLE_ERROR;
  }


  /*
   * close the file
   */
  err = ncmpi_close(ncFileId);
  PNETCDF_HANDLE_ERROR;

} // write_pnetcdf
Exemple #8
0
/**
 * Read a single-precision parallel-nedcdf file.
 *
 * We assume here that localData is a scalar.
 *
 * Pnetcdf uses row-major format (same as FFTW).
 *
 * \param[in]  filename  : PnetCDF filename
 * \param[in]  starts    : offset to where to start reading data
 * \param[in]  counts    : number of elements read (3D sub-domain inside global)
 * \param[in]  gsizes    : global sizes
 * \param[out] localData : actual data buffer (size : nx*ny*nz*sizeof(float))
 *
 * localData must have been allocated prior to calling this routine.
 */
void read_pnetcdf(const std::string &filename,
		  MPI_Offset         starts[3],
		  MPI_Offset         counts[3],
		  int                gsizes[3],
		  float            *localData)
{

  int myRank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

  // netcdf file id
  int ncFileId;
  int err;

  // file opening mode
  int ncOpenMode = NC_NOWRITE;

  int nbVar=1;
  int varIds[nbVar];
  MPI_Info mpi_info_used;

  /*
   * Open NetCDF file
   */
  err = ncmpi_open(MPI_COMM_WORLD, filename.c_str(), 
		   ncOpenMode,
		   MPI_INFO_NULL, &ncFileId);
  if (err != NC_NOERR) {
    printf("Error: ncmpi_open() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err));
    MPI_Abort(MPI_COMM_WORLD, -1);
    exit(1);
  }

  /*
   * Query NetCDF mode
   */
  int NC_mode;
  err = ncmpi_inq_version(ncFileId, &NC_mode);
  if (myRank==0) {
    if (NC_mode == NC_64BIT_DATA)
      std::cout << "Pnetcdf Input mode : NC_64BIT_DATA (CDF-5)\n";
    else if (NC_mode == NC_64BIT_OFFSET)
      std::cout << "Pnetcdf Input mode : NC_64BIT_OFFSET (CDF-2)\n";
    else
      std::cout << "Pnetcdf Input mode : unknown\n";
  }

  /*
   * Query information about variable named "data"
   */
  {
    int ndims, nvars, ngatts, unlimited;
    err = ncmpi_inq(ncFileId, &ndims, &nvars, &ngatts, &unlimited);
    PNETCDF_HANDLE_ERROR;

    err = ncmpi_inq_varid(ncFileId, "data", &varIds[0]);
    PNETCDF_HANDLE_ERROR;
  }

  /*
   * Define expected data types (no conversion done here)
   */
  MPI_Datatype mpiDataType = MPI_FLOAT;

  /*
   * Get all the MPI_IO hints used (just in case, we want to print it after
   * reading data...
   */
  err = ncmpi_get_file_info(ncFileId, &mpi_info_used);
  PNETCDF_HANDLE_ERROR;

  /*
   * Read heavy data (take care of row-major / column major format !)
   */
  int nItems = counts[IX]*counts[IY]*counts[IZ];
  {

    err = ncmpi_get_vara_all(ncFileId,
			     varIds[0],
			     starts,
			     counts,
			     localData,
			     nItems,
			     mpiDataType);
    PNETCDF_HANDLE_ERROR;
  } // end reading heavy data

  /*
   * close the file
   */
  err = ncmpi_close(ncFileId);
  PNETCDF_HANDLE_ERROR;

} // read_pnetcdf
Exemple #9
0
static void handle_error_nc(int ncerr, char *str)
{
	        fprintf(stderr, "%s: %s\n", str, ncmpi_strerror(ncerr));
		        MPI_Abort(MPI_COMM_WORLD, 1);
}
Exemple #10
0
/*----< main() >------------------------------------------------------------*/
int main(int argc, char **argv)
{
    extern int optind;
    char *filename="testfile.nc";
    int i, j, rank, nprocs, len, ncid, bufsize, verbose=1, err;
    int psizes[2], local_rank[2], dimids[2], varid, nghosts;
    int *buf, *buf_ptr;
    MPI_Offset gsizes[2], starts[2], counts[2], imap[2];

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* get command-line arguments */
    while ((i = getopt(argc, argv, "hq")) != EOF)
        switch(i) {
            case 'q': verbose = 0;
                      break;
            case 'h':
            default:  if (rank==0) usage(argv[0]);
                      MPI_Finalize();
                      return 0;
        }
    argc -= optind;
    argv += optind;
    if (argc >= 1) filename = argv[0];  /* optional argument */
    len = 4;
    if (argc >= 2) len = atoi(argv[1]); /* optional argument */

    /* calculate number of processes along each dimension */
    psizes[0] = psizes[1] = 0;
    MPI_Dims_create(nprocs, 2, psizes);
    if (verbose && rank == 0)
        printf("psizes=%d %d\n", psizes[0], psizes[1]);

    gsizes[0] = len     * psizes[0]; /* global array size */
    gsizes[1] = (len+1) * psizes[1];
    if (verbose && rank == 0)
        printf("global variable shape: %lld %lld\n", gsizes[0],gsizes[1]);

    /* find its local rank IDs along each dimension */
    local_rank[0] = rank / psizes[1];
    local_rank[1] = rank % psizes[1];
    if (verbose)
        printf("rank %d: dim rank=%d %d\n", rank,local_rank[0],local_rank[1]);

    counts[0] = len;
    counts[1] = len+1;
    starts[0] = local_rank[0] * counts[0];
    starts[1] = local_rank[1] * counts[1];
    if (verbose)
        printf("starts=%lld %lld counts=%lld %lld\n",starts[0],starts[1],counts[0],counts[1]);

    /* allocate and initialize buffer with ghost cells on both ends of each dim */
    nghosts = 2;
    bufsize = (counts[0] + 2 * nghosts) * (counts[1] + 2 * nghosts);
    buf = (int *) malloc(bufsize * sizeof(int));
    for (i=0; i<counts[0]+2*nghosts; i++)
    for (j=0; j<counts[1]+2*nghosts; j++) {
        if (nghosts <= i && i < counts[0]+nghosts &&
            nghosts <= j && j < counts[1]+nghosts)
            buf[i*(counts[1]+2*nghosts) + j] = rank;
        else
            buf[i*(counts[1]+2*nghosts) + j] = -8; /* all ghost cells have value -8 */
    }

    /* create the file */
    err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER|NC_64BIT_DATA,
                       MPI_INFO_NULL, &ncid);
    if (err != NC_NOERR) {
        printf("Error: ncmpi_create() file %s (%s)\n",filename,ncmpi_strerror(err));
        MPI_Abort(MPI_COMM_WORLD, -1);
        exit(1);
    }

    /* define dimensions */
    err = ncmpi_def_dim(ncid, "Y", gsizes[0], &dimids[0]);
    HANDLE_ERROR
    err = ncmpi_def_dim(ncid, "X", gsizes[1], &dimids[1]);
    HANDLE_ERROR

    /* define variable */
    err = ncmpi_def_var(ncid, "var", NC_INT, 2, dimids, &varid);
    HANDLE_ERROR

    /* exit the define mode */
    err = ncmpi_enddef(ncid);
    HANDLE_ERROR

    /* set up imap[] for excluding ghost cells */
    imap[1] = 1;
    imap[0] = counts[1] + 2 * nghosts;

    /* find the first non-ghost cell of the user buf */
    buf_ptr = buf + nghosts * (counts[1]+2*nghosts + 1);

    /* write the whole variable in file */
    err = ncmpi_put_varm_int_all(ncid, varid, starts, counts, NULL, imap, buf_ptr);
    HANDLE_ERROR

    /* close the file */
    err = ncmpi_close(ncid);
    HANDLE_ERROR

    free(buf);

    MPI_Finalize();
    return 0;
}
Exemple #11
0
void read_pnetcdf(const std::string &filename,
		  int                iVar,
		  ConfigMap         &configMap, 
		  HostArray<double> &localData)
{
  int myRank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  int nbMpiProc;
  MPI_Comm_size(MPI_COMM_WORLD, &nbMpiProc);

  // netcdf file id
  int ncFileId;
  int err;
  
  // ghostWidth
  int ghostWidth = configMap.getInteger("mesh","ghostWidth",3);

  // file creation mode
  int ncOpenMode = NC_NOWRITE;
  
  int varIds[8];
  MPI_Offset starts[3], counts[3]; // read_size, sum_read_size;
  MPI_Info mpi_info_used;
  
  // domain local size
  int nx,ny,nz;

  // sizes to read
  //int nx_r,  ny_r,  nz_r;  // logical sizes / per sub-domain
  //int nx_g, ny_g, nz_g; // sizes with ghost zones included / per sub-domain

  /* read domain sizes */
  nx=configMap.getInteger("mesh","nx",32);
  ny=configMap.getInteger("mesh","ny",32);
  nz=configMap.getInteger("mesh","nz",32);

  // nx_g = nx+2*ghostWidth;
  // ny_g = ny+2*ghostWidth;
  // nz_g = nz+2*ghostWidth;

  // get input filename from configMap
  //std::string filename = configMap.getString("input", "filename", "");

  /* 
   * Open NetCDF file
   */
  err = ncmpi_open(MPI_COMM_WORLD, filename.c_str(), 
		   ncOpenMode,
		   MPI_INFO_NULL, &ncFileId);
  if (err != NC_NOERR) {
    printf("Error: ncmpi_open() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err));
    MPI_Abort(MPI_COMM_WORLD, -1);
    exit(1);
  }

  /*
   * Query NetCDF mode
   */
  int NC_mode;
  err = ncmpi_inq_version(ncFileId, &NC_mode);
  if (myRank==0) {
    if (NC_mode == NC_64BIT_DATA)
      std::cout << "Pnetcdf Input mode : NC_64BIT_DATA (CDF-5)\n";
    else if (NC_mode == NC_64BIT_OFFSET)
      std::cout << "Pnetcdf Input mode : NC_64BIT_OFFSET (CDF-2)\n";
    else
      std::cout << "Pnetcdf Input mode : unknown\n";
  }

  /*
   * Query information about variables
   */
  {
    int ndims, nvars, ngatts, unlimited;
    err = ncmpi_inq(ncFileId, &ndims, &nvars, &ngatts, &unlimited);
    PNETCDF_HANDLE_ERROR;

    err = ncmpi_inq_varid(ncFileId, "rho", &varIds[ID]);
    PNETCDF_HANDLE_ERROR;
    err = ncmpi_inq_varid(ncFileId, "E", &varIds[IP]);
    PNETCDF_HANDLE_ERROR;
    err = ncmpi_inq_varid(ncFileId, "rho_vx", &varIds[IU]);
    PNETCDF_HANDLE_ERROR;
    err = ncmpi_inq_varid(ncFileId, "rho_vy", &varIds[IV]);
    PNETCDF_HANDLE_ERROR;
    err = ncmpi_inq_varid(ncFileId, "rho_vz", &varIds[IW]);
    PNETCDF_HANDLE_ERROR;    
    err = ncmpi_inq_varid(ncFileId, "Bx", &varIds[IA]);
    PNETCDF_HANDLE_ERROR;
    err = ncmpi_inq_varid(ncFileId, "By", &varIds[IB]);
    PNETCDF_HANDLE_ERROR;
    err = ncmpi_inq_varid(ncFileId, "Bz", &varIds[IC]);
    PNETCDF_HANDLE_ERROR;	
  } // end query information

  /* 
   * Define expected data types (no conversion done here)
   */
  //nc_type ncDataType;
  MPI_Datatype mpiDataType;
  
  //ncDataType  = NC_DOUBLE;
  mpiDataType = MPI_DOUBLE;

  /* 
   * Get all the MPI_IO hints used (just in case, we want to print it after 
   * reading data...
   */
  err = ncmpi_get_file_info(ncFileId, &mpi_info_used);
  PNETCDF_HANDLE_ERROR;

  /*
   * Read heavy data (take care of row-major / column major format !)
   */
  // use overlapping domains
  // counts[IZ] = nx_rg;
  // counts[IY] = ny_rg;
  // counts[IX] = nz_rg;
  
  // starts[IZ] = 0;
  // starts[IY] = 0;
  // starts[IX] = myRank*nz_r;

  counts[IZ] = nx;
  counts[IY] = ny;
  counts[IX] = nz;
  
  starts[IZ] = ghostWidth;
  starts[IY] = ghostWidth;
  starts[IX] = ghostWidth+myRank*nz;

  int nItems = counts[IX]*counts[IY]*counts[IZ];

  /*
   * Actual reading
   */
  {
    double* data;
    //data = &(localData(0,0,0,0));
    data = localData.data();
    
    err = ncmpi_get_vara_all(ncFileId, varIds[iVar], 
			     starts, counts, data, nItems, mpiDataType);
    PNETCDF_HANDLE_ERROR;

  } // end for loop reading heavy data

  /* 
   * close the file 
   */
  err = ncmpi_close(ncFileId);
  PNETCDF_HANDLE_ERROR;

} // read_pnetcdf
Exemple #12
0
/*
 * adapted from HydroRunBaseMpi::outputPnetcdf
 *
 * assumes here that localData have size nx,ny,nz (no ghostWidth)
 *
 * see : test_pnetcdf_write.cpp
 *
 * Note that if ghostIncluded is false local_data must be sized upon nx,ny,nz
 * if not size must be nx+2*ghostWidth,ny+2*ghostWidth,nz+2*ghostWidth 
 *
 */
void write_pnetcdf(const std::string &filename,
		   HostArray<double> &localData,
		   ConfigMap         &configMap)
{
  int myRank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

  // read local domain sizes
  int nx=configMap.getInteger("mesh","nx",32);
  int ny=configMap.getInteger("mesh","ny",32);
  int nz=configMap.getInteger("mesh","nz",32);

  // read mpi geometry
  int mx=configMap.getInteger("mpi","mx",1);
  int my=configMap.getInteger("mpi","my",1);
  int mz=configMap.getInteger("mpi","mz",1);


  // MPI cartesian coordinates
  // myRank = mpiCoord[0] + mx*mpiCoord[1] + mx*my*mpiCoord[2]
  int mpiCoord[3];
  {
    mpiCoord[2] =  myRank/(mx*my);
    mpiCoord[1] = (myRank - mx*my*mpiCoord[2])/mx;
    mpiCoord[0] =  myRank - mx*my*mpiCoord[2] -mx*mpiCoord[1];
  }

  bool ghostIncluded = configMap.getBool("output", "ghostIncluded",false);
  int  ghostWidth    = configMap.getInteger("mesh","ghostWidth",3);

  // global size
  int NX=nx*mx, NY=ny*my, NZ=nz*mz;
  int gsizes[3];
  gsizes[IZ] = NX;
  gsizes[IY] = NY;
  gsizes[IX] = NZ;
  
  if ( ghostIncluded ) {
    gsizes[IZ] += 2*ghostWidth;
    gsizes[IY] += 2*ghostWidth;
    gsizes[IX] += 2*ghostWidth;
  }

  // netcdf file id
  int ncFileId;
  int err;

  // file creation mode
  int ncCreationMode = NC_CLOBBER;
  bool useCDF5 = configMap.getBool("output","pnetcdf_cdf5",false);
  if (useCDF5)
    ncCreationMode = NC_CLOBBER|NC_64BIT_DATA;
  else // use CDF-2 file format
    ncCreationMode = NC_CLOBBER|NC_64BIT_OFFSET;

  // verbose log ?
  bool pnetcdf_verbose = configMap.getBool("output","pnetcdf_verbose",false);
  
  int nbVar=8;
  int dimIds[3], varIds[nbVar];
  MPI_Offset write_size, sum_write_size;
  MPI_Info mpi_info_used;
  char str[512];
  
  // time measurement variables
  double write_timing, max_write_timing, write_bw;

  /*
   * writing parameter (offset and size)
   */
  MPI_Offset         starts[3] = {0};
  MPI_Offset         counts[3] = {nz, ny, nx};
  
  // take care that row-major / column major format
  starts[IZ] = mpiCoord[IX]*nx;
  starts[IY] = mpiCoord[IY]*ny;
  starts[IX] = mpiCoord[IZ]*nz;

  if ( ghostIncluded ) {

    if ( mpiCoord[IX] == 0 )
      counts[IZ] += ghostWidth;
    if ( mpiCoord[IY] == 0 )
      counts[IY] += ghostWidth;
    if ( mpiCoord[IZ] == 0 )
      counts[IX] += ghostWidth;

    if ( mpiCoord[IX] == mx-1 )
      counts[IZ] += ghostWidth;
    if ( mpiCoord[IY] == my-1 )
      counts[IY] += ghostWidth;
    if ( mpiCoord[IZ] == mz-1 )
      counts[IX] += ghostWidth;

    starts[IZ] += ghostWidth;
    starts[IY] += ghostWidth;
    starts[IX] += ghostWidth;

    if ( mpiCoord[IX] == 0 )
      starts[IZ] -= ghostWidth;
    if ( mpiCoord[IY] == 0 )
      starts[IY] -= ghostWidth;
    if ( mpiCoord[IZ] == 0 )
      starts[IX] -= ghostWidth;
  
  }

  /* 
   * Create NetCDF file
   */
  err = ncmpi_create(MPI_COMM_WORLD, filename.c_str(), 
		     ncCreationMode,
		     MPI_INFO_NULL, &ncFileId);
  if (err != NC_NOERR) {
    printf("Error: ncmpi_create() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err));
    MPI_Abort(MPI_COMM_WORLD, -1);
    exit(1);
  }

  /*
   * Define dimensions
   */
  err = ncmpi_def_dim(ncFileId, "x", gsizes[0], &dimIds[0]);
  PNETCDF_HANDLE_ERROR;
  
  err = ncmpi_def_dim(ncFileId, "y", gsizes[1], &dimIds[1]);
  PNETCDF_HANDLE_ERROR;
  
  err = ncmpi_def_dim(ncFileId, "z", gsizes[2], &dimIds[2]);
  PNETCDF_HANDLE_ERROR;

  /* 
   * Define variables
   */
  nc_type       ncDataType =  NC_DOUBLE;
  MPI_Datatype mpiDataType = MPI_DOUBLE;

  err = ncmpi_def_var(ncFileId, "rho", ncDataType, 3, dimIds, &varIds[ID]);
  PNETCDF_HANDLE_ERROR;
  err = ncmpi_def_var(ncFileId, "E", ncDataType, 3, dimIds, &varIds[IP]);
  PNETCDF_HANDLE_ERROR;
  err = ncmpi_def_var(ncFileId, "rho_vx", ncDataType, 3, dimIds, &varIds[IU]);
  PNETCDF_HANDLE_ERROR;
  err = ncmpi_def_var(ncFileId, "rho_vy", ncDataType, 3, dimIds, &varIds[IV]);
  PNETCDF_HANDLE_ERROR;
  err = ncmpi_def_var(ncFileId, "rho_vz", ncDataType, 3, dimIds, &varIds[IW]);
  PNETCDF_HANDLE_ERROR;
  
  err = ncmpi_def_var(ncFileId, "Bx", ncDataType, 3, dimIds, &varIds[IA]);
  PNETCDF_HANDLE_ERROR;
  err = ncmpi_def_var(ncFileId, "By", ncDataType, 3, dimIds, &varIds[IB]);
  PNETCDF_HANDLE_ERROR;
  err = ncmpi_def_var(ncFileId, "Bz", ncDataType, 3, dimIds, &varIds[IC]);
  PNETCDF_HANDLE_ERROR;

  /*
   * global attributes
   */
  // did we use CDF-2 or CDF-5
  {
    int useCDF5_int = useCDF5 ? 1 : 0;
    err = ncmpi_put_att_int(ncFileId, NC_GLOBAL, "CDF-5 mode", NC_INT, 1, &useCDF5_int);
    PNETCDF_HANDLE_ERROR;
  }
  
  /* 
   * exit the define mode 
   */
  err = ncmpi_enddef(ncFileId);
  PNETCDF_HANDLE_ERROR;
  
  /* 
   * Get all the MPI_IO hints used
   */
  err = ncmpi_get_file_info(ncFileId, &mpi_info_used);
  PNETCDF_HANDLE_ERROR;
  
  int nItems = counts[IX]*counts[IY]*counts[IZ];
  
  for (int iVar=0; iVar<nbVar; iVar++) {
    double *data = &(localData(0,0,0,iVar));
    err = ncmpi_put_vara_all(ncFileId, varIds[iVar], starts, counts, data, nItems, mpiDataType);
    PNETCDF_HANDLE_ERROR;
  }

  /* 
   * close the file 
   */
  err = ncmpi_close(ncFileId);
  PNETCDF_HANDLE_ERROR;
  
} // write_pnetcdf
static void handle_error(int err, int lineno)
{
    fprintf(stderr, "Error at line %d of %s: %s\n", lineno, __FILE__, ncmpi_strerror(err));
    MPI_Abort(MPI_COMM_WORLD, 1);
}
Exemple #14
0
int main(int argc, char** argv)
{
    extern int optind;
    char filename[256];
    int i, j, verbose=1, rank, nprocs, err, nerrs=0;
    int myNX, G_NX, myOff, num_reqs;
    int ncid, cmode, varid, dimid[2], *reqs, *sts, **buf;
    MPI_Offset start[2], count[2];
    MPI_Info info;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* get command-line arguments */
    while ((i = getopt(argc, argv, "hq")) != EOF)
        switch(i) {
            case 'q': verbose = 0;
                      break;
            case 'h':
            default:  if (rank==0) usage(argv[0]);
                      MPI_Finalize();
                      return 1;
        }
    if (argv[optind] == NULL) strcpy(filename, "testfile.nc");
    else                      snprintf(filename, 256, "%s", argv[optind]);

    /* set an MPI-IO hint to disable file offset alignment for fixed-size
     * variables */
    MPI_Info_create(&info);
    MPI_Info_set(info, "nc_var_align_size", "1");

    cmode = NC_CLOBBER | NC_64BIT_DATA;
    err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, info, &ncid);
    ERR

    MPI_Info_free(&info);

    /* the global array is NY * (NX * nprocs) */
    G_NX  = NX * nprocs;
    myOff = NX * rank;
    myNX  = NX;
    if (verbose) printf("%2d: myOff=%3d myNX=%3d\n",rank,myOff,myNX);

    err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]);
    ERR
    err = ncmpi_def_dim(ncid, "X", G_NX, &dimid[1]);
    ERR
    err = ncmpi_def_var(ncid, "var", NC_INT, 2, dimid, &varid);
    ERR
    err = ncmpi_enddef(ncid);
    ERR

    /* First, fill the entire array with zeros, using a blocking I/O.
       Every process writes a subarray of size NY * myNX */
    buf    = (int**) malloc(myNX * sizeof(int*));
    buf[0] = (int*)  calloc(NY * myNX, sizeof(int));
    start[0] = 0;   start[1] = myOff;
    count[0] = NY;  count[1] = myNX;
    err = ncmpi_put_vara_int_all(ncid, varid, start, count, buf[0]);
    free(buf[0]);

    /* initialize the buffer with rank ID. Also make the case interesting,
       by allocating buffers separately */
    for (i=0; i<myNX; i++) {
        buf[i] = (int*) malloc(NY * sizeof(int));
        for (j=0; j<NY; j++) buf[i][j] = rank;
    }

    reqs = (int*) malloc(myNX * sizeof(int));
    sts  = (int*) malloc(myNX * sizeof(int));

    /* each proc writes myNX single columns of the 2D array */
    start[0]  = 0;   start[1] = rank;
    count[0]  = NY;  count[1] = 1;
    if (verbose)
        printf("%2d: start=%3lld %3lld count=%3lld %3lld\n",
               rank, start[0],start[1], count[0],count[1]);

    num_reqs = 0;
    for (i=0; i<myNX; i++) {
        err = ncmpi_iput_vara_int(ncid, varid, start, count, buf[i],
                                  &reqs[num_reqs++]);
        ERR
        start[1] += nprocs;
    }
    err = ncmpi_wait_all(ncid, num_reqs, reqs, sts);
    ERR

    /* check status of all requests */
    for (i=0; i<num_reqs; i++)
        if (sts[i] != NC_NOERR)
            printf("Error at line %d in %s: nonblocking write fails on request %d (%s)\n",
                   __LINE__,__FILE__,i, ncmpi_strerror(sts[i]));

    err = ncmpi_close(ncid); ERR

    /* read back using the same access pattern */
    err = ncmpi_open(MPI_COMM_WORLD, filename, NC_NOWRITE, info, &ncid); ERR

    err = ncmpi_inq_varid(ncid, "var", &varid); ERR

    for (i=0; i<myNX; i++)
        for (j=0; j<NY; j++) buf[i][j] = -1;

    /* each proc reads myNX single columns of the 2D array */
    start[0]  = 0;   start[1] = rank;
    count[0]  = NY;  count[1] = 1;

    num_reqs = 0;
    for (i=0; i<myNX; i++) {
        err = ncmpi_iget_vara_int(ncid, varid, start, count, buf[i],
                                  &reqs[num_reqs++]);
        ERR
        start[1] += nprocs;
    }
    err = ncmpi_wait_all(ncid, num_reqs, reqs, sts);
    ERR

    /* check status of all requests */
    for (i=0; i<num_reqs; i++)
        if (sts[i] != NC_NOERR)
            printf("Error at line %d in %s: nonblocking write fails on request %d (%s)\n",
                   __LINE__,__FILE__,i, ncmpi_strerror(sts[i]));

    for (i=0; i<myNX; i++) {
        for (j=0; j<NY; j++)
            if (buf[i][j] != rank)
                printf("Error at line %d in %s: expect buf[%d][%d]=%d but got %d\n",
                __LINE__,__FILE__,i,j,rank,buf[i][j]);
    }

    err = ncmpi_close(ncid);
    ERR

    free(sts);
    free(reqs);
    for (i=0; i<myNX; i++) free(buf[i]);
    free(buf);

    /* check if there is any PnetCDF internal malloc residue */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0)
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
    }

    MPI_Finalize();
    return (nerrs > 0);
}
Exemple #15
0
static int test_ivarn(int ncid)
{
    int i, j, rank, nprocs, err, nerrs=0, num_reqs[4], dimids[2], varid[4], *buffer[4];
    int req[5], st[5];
    MPI_Offset **starts[4], **counts[4];

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* pick arbitrary numbers of requests for 4 processes */
    num_reqs[0] = 4;
    num_reqs[1] = 6;
    num_reqs[2] = 5;
    num_reqs[3] = 4;

    for (i=0; i<4; i++) {
        starts[i]    = (MPI_Offset**) malloc(num_reqs[i] *    sizeof(MPI_Offset*));
        counts[i]    = (MPI_Offset**) malloc(num_reqs[i] *    sizeof(MPI_Offset*));
        starts[i][0] = (MPI_Offset*)  calloc(num_reqs[i] * 2, sizeof(MPI_Offset));
        counts[i][0] = (MPI_Offset*)  calloc(num_reqs[i] * 2, sizeof(MPI_Offset));
        for (j=1; j<num_reqs[i]; j++) {
            starts[i][j] = starts[i][j-1] + 2;
            counts[i][j] = counts[i][j-1] + 2;
        }
    }

    /* assign arbitrary starts and counts */
    const int y=0, x=1;
    starts[0][0][y] = 0; starts[0][0][x] = 5; counts[0][0][y] = 1; counts[0][0][x] = 2;
    starts[0][1][y] = 1; starts[0][1][x] = 0; counts[0][1][y] = 1; counts[0][1][x] = 1;
    starts[0][2][y] = 2; starts[0][2][x] = 6; counts[0][2][y] = 1; counts[0][2][x] = 2;
    starts[0][3][y] = 3; starts[0][3][x] = 0; counts[0][3][y] = 1; counts[0][3][x] = 3;

    starts[1][0][y] = 0; starts[1][0][x] = 3; counts[1][0][y] = 1; counts[1][0][x] = 2;
    starts[1][1][y] = 0; starts[1][1][x] = 8; counts[1][1][y] = 1; counts[1][1][x] = 2;
    starts[1][2][y] = 1; starts[1][2][x] = 5; counts[1][2][y] = 1; counts[1][2][x] = 2;
    starts[1][3][y] = 2; starts[1][3][x] = 0; counts[1][3][y] = 1; counts[1][3][x] = 2;
    starts[1][4][y] = 2; starts[1][4][x] = 8; counts[1][4][y] = 1; counts[1][4][x] = 2;
    starts[1][5][y] = 3; starts[1][5][x] = 4; counts[1][5][y] = 1; counts[1][5][x] = 3;

    starts[2][0][y] = 0; starts[2][0][x] = 7; counts[2][0][y] = 1; counts[2][0][x] = 1;
    starts[2][1][y] = 1; starts[2][1][x] = 1; counts[2][1][y] = 1; counts[2][1][x] = 3;
    starts[2][2][y] = 1; starts[2][2][x] = 7; counts[2][2][y] = 1; counts[2][2][x] = 3;
    starts[2][3][y] = 2; starts[2][3][x] = 2; counts[2][3][y] = 1; counts[2][3][x] = 1;
    starts[2][4][y] = 3; starts[2][4][x] = 3; counts[2][4][y] = 1; counts[2][4][x] = 1;

    starts[3][0][y] = 0; starts[3][0][x] = 0; counts[3][0][y] = 1; counts[3][0][x] = 3;
    starts[3][1][y] = 1; starts[3][1][x] = 4; counts[3][1][y] = 1; counts[3][1][x] = 1;
    starts[3][2][y] = 2; starts[3][2][x] = 3; counts[3][2][y] = 1; counts[3][2][x] = 3;
    starts[3][3][y] = 3; starts[3][3][x] = 7; counts[3][3][y] = 1; counts[3][3][x] = 3;

    for (i=0; i<4; i++) {
        buffer[i] = (int*) malloc(4*10 * sizeof(int));
        for (j=0; j<4*10; j++) buffer[i][j] = rank+100;
    }

    TRC(ncmpi_redef)(ncid); CHECK_ERR
    err = ncmpi_def_dim(ncid, "M",  4, &dimids[0]); CHECK_ERR
    err = ncmpi_def_dim(ncid, "N", 10, &dimids[1]); CHECK_ERR
    err = ncmpi_def_var(ncid, "var0", NC_INT, 2, dimids, &varid[0]); CHECK_ERR
    err = ncmpi_def_var(ncid, "var1", NC_INT, 2, dimids, &varid[1]); CHECK_ERR
    err = ncmpi_def_var(ncid, "var2", NC_INT, 2, dimids, &varid[2]); CHECK_ERR
    err = ncmpi_def_var(ncid, "var3", NC_INT, 2, dimids, &varid[3]); CHECK_ERR
    TRC(ncmpi_enddef)(ncid); CHECK_ERR

    for (i=0; i<3; i++) {
        j = (nprocs > 1) ? (i + rank) % nprocs : i;
        TRC(ncmpi_iput_varn_int)(ncid, varid[j], num_reqs[j], starts[j], counts[j], buffer[j], &req[i]); CHECK_ERR
    }
    TRC(ncmpi_wait_all)(ncid, 3, req, st); CHECK_ERR

    j = (nprocs > 1) ? (3 + rank) % nprocs : 3;
    TRC(ncmpi_iput_varn_int)(ncid, varid[j], num_reqs[j], starts[j], counts[j], buffer[j], &req[3]); CHECK_ERR
    for (i=0; i<3; i++) {
        j = (nprocs > 1) ? (i + rank) % nprocs : i;
        TRC(ncmpi_iget_varn_int)(ncid, varid[j], num_reqs[j], starts[j], counts[j], buffer[j], &req[i]); CHECK_ERR
    }
    TRC(ncmpi_wait_all)(ncid, 4, req, st); CHECK_ERR
    if (err != NC_NOERR) {
        for (i=0; i<4; i++) {
            if (st[i] != NC_NOERR) {
                printf("Error at line %d in %s: st[%d] %s\n",
                __FILE__,__LINE__,i,ncmpi_strerror(st[i]));
            }
        }
    }

    for (i=0; i<4; i++) {
        free(buffer[i]);
        free(starts[i][0]);
        free(counts[i][0]);
        free(starts[i]);
        free(counts[i]);
    }
    return nerrs;
}
static void handle_error(int status, int lineno)
{
    fprintf(stderr, "Error at line %d: %s\n", lineno, ncmpi_strerror(status));
    MPI_Abort(MPI_COMM_WORLD, 1);
}
Exemple #17
0
static void handle_error(int status) {
    printf("%s\n", ncmpi_strerror(status));
}
Exemple #18
0
 *
 * This file is automatically generated by buildiface -infile=../lib/pnetcdf.h -deffile=defs
 * DO NOT EDIT
 */
#include "mpinetcdf_impl.h"


#ifdef F77_NAME_UPPER
#define nfmpi_xstrerror_ NFMPI_XSTRERROR
#elif defined(F77_NAME_LOWER_2USCORE)
#define nfmpi_xstrerror_ nfmpi_xstrerror__
#elif !defined(F77_NAME_LOWER_USCORE)
#define nfmpi_xstrerror_ nfmpi_xstrerror
/* Else leave name alone */
#endif


/* Prototypes for the Fortran interfaces */
#include "mpifnetcdf.h"
FORTRAN_API int FORT_CALL nfmpi_xstrerror_ ( MPI_Fint *v1, char *v2 FORT_MIXED_LEN(d2) FORT_END_LEN(d2) ){
    const char *p = ncmpi_strerror( *v1 );
    int i;
    /* d2 is the length of the string passed into the routine */
    for (i=0; i<d2 && *p; i++) {
	v2[i] = *p++;
    }
    /* Blank pad */
    for (; i<d2; i++) v2[i] = ' ';
    return 0;
}