Пример #1
0
int main(int argc, char **argv) {
  MPI_Offset i, j, k;
  int status;
  int ncid;
  int dimid1, dimid2, dimid3, udimid;
  int square_dim[2], cube_dim[3], xytime_dim[3], time_dim[1];
  MPI_Offset square_start[2], cube_start[3] = {0, 0, 0};
  MPI_Offset square_count[2] = {50, 50}, cube_count[3] = {100, 50, 50};
  MPI_Offset xytime_start[3] = {0, 0, 0};
  MPI_Offset xytime_count[3] = {100, 50, 50};
  MPI_Offset time_start[1], time_count[1] = {25};
  int square_id, cube_id, xytime_id, time_id;
  static char title[] = "example netCDF dataset";
  static char description[] = "2-D integer array";
  int data[100][50][50], buffer[100];
  int rank;
  int nprocs;
  MPI_Comm comm = MPI_COMM_WORLD;
  double TotalWriteTime;
  params opts;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (rank == 0) 
	  fprintf(stderr, "Testing write ... \n");
  parse_write_args(argc, argv, rank, &opts);

  MPI_Barrier(MPI_COMM_WORLD);
  TotalWriteTime = MPI_Wtime();

  /**********  START OF NETCDF ACCESS **************/

  /**
   * Create the dataset
   *   File name: "testwrite.nc"
   *   Dataset API: Collective
   */

  status = ncmpi_create(comm, opts.outfname, NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid);
  if (status != NC_NOERR) handle_error(status);


  /**
   * Create a global attribute:
   *    :title = "example netCDF dataset";
   */
  sprintf(title, "%s:%d of %d", title, rank, nprocs);
  printf("title:%s\n", title);
  status = ncmpi_put_att_text (ncid, NC_GLOBAL, "title",
                          strlen(title), title);
  if (status != NC_NOERR) handle_error(status);
  
   
  /**
   * Add 4 pre-defined dimensions:
   *   x = 100, y = 100, z = 100, time = NC_UNLIMITED
   */
  status = ncmpi_def_dim(ncid, "x", 100L, &dimid1);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_dim(ncid, "y", 100L, &dimid2);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_dim(ncid, "z", 100L, &dimid3);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_dim(ncid, "time", NC_UNLIMITED, &udimid);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Define the dimensionality and then add 4 variables:
   *    square(x, y), cube(x,y,z), time(time), xytime(time, x, y)  
   */

  square_dim[0] = cube_dim[0] = xytime_dim[1] = dimid1;
  square_dim[1] = cube_dim[1] = xytime_dim[2] = dimid2;
  cube_dim[2] = dimid3;
  xytime_dim[0] = udimid;
  time_dim[0] = udimid;
  status = ncmpi_def_var (ncid, "square", NC_INT, 2, square_dim, &square_id);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_var (ncid, "cube", NC_INT, 3, cube_dim, &cube_id);
  if (status != NC_NOERR) handle_error(status);
 // status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id);
  status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_var (ncid, "xytime", NC_INT, 3, xytime_dim, &xytime_id);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Add an attribute for variable: 
   *    square: decsription = "2-D integer array"
   */

  status = ncmpi_put_att_text (ncid, square_id, "description",
                          strlen(description), description);
  if (status != NC_NOERR) handle_error(status);

  /**
   * End Define Mode (switch to data mode)
   *   Dataset API: Collective
   */
  
  status = ncmpi_enddef(ncid);
  if (status != NC_NOERR){  
	handle_error(status);
  	status = ncmpi_close(ncid);
 	if (status != NC_NOERR) handle_error(status);
	if (rank == 0) {
	  fprintf(stderr, "Fatal Error: file header is inconsistent!\n");
	}
 }
  /**
   * Data Partition (Assume 4 processors):
   *   square: 2-D, (Block, Block), 50*50 from 100*100 
   *   cube:   3-D, (*, Block, Block), 100*50*50 from 100*100*100
   *   xytime: 3-D, (*, Block, Block), 100*50*50 from 100*100*100
   *   time:   1-D, Block-wise, 25 from 100
   */
  else {
	  square_start[0] = cube_start[1] = xytime_start[1] = (rank/2) * 50;
	  square_start[1] = cube_start[2] = xytime_start[2] = (rank%2) * 50;
	  time_start[0] = (rank%4) * 25;


  /**
   * Packing data in the buffer 
   */

  /* Data for variable: time */
	  for ( i = time_start[0]; i < time_start[0] + time_count[0]; i++ )
	    buffer[i - time_start[0]] = i;   

  /* Data for variable: square, cube and xytime */
	  for ( i = 0; i < 100; i++ )
	    for ( j = square_start[0]; j < square_start[0]+square_count[0]; j++ )
	      for ( k = square_start[1]; k < square_start[1]+square_count[1]; k++ )
	        data[i][j-square_start[0]][k-square_start[1]] = i*100*100 + j*100 + k;

  /**
   * Write data into variables: square, cube, time and xytime  
   *   Access Method: subarray
   *   Data Mode API: collective
   */ 
  
	  status = ncmpi_put_vara_int_all(ncid, square_id,
                    square_start, square_count,
                    &data[0][0][0]);
	  if (status != NC_NOERR) handle_error(status);
	  status = ncmpi_put_vara_int_all(ncid, cube_id,
                    cube_start, cube_count,
                    &data[0][0][0]);
	  if (status != NC_NOERR) handle_error(status);
	  status = ncmpi_put_vara_int_all(ncid, time_id,
                    time_start, time_count,
                    (void *)buffer);
	  if (status != NC_NOERR) handle_error(status);
	  status = ncmpi_put_vara_int_all(ncid, xytime_id,
                    xytime_start, xytime_count,
                    &data[0][0][0]);
	  if (status != NC_NOERR) handle_error(status);

/*
status = ncmpi_sync(ncid);
if (status != NC_NOERR) handle_error(status); 
status = ncmpi_redef(ncid);
if (status != NC_NOERR) handle_error(status);
status = ncmpi_del_att(ncid, square_id, "description");
if (status != NC_NOERR) handle_error(status); 
status = ncmpi_enddef(ncid);
if (status != NC_NOERR) handle_error(status);
*/

  /**
   * Close the dataset
   *   Dataset API:  collective
   */

	  status = ncmpi_close(ncid);
	  if (status != NC_NOERR) handle_error(status);
  /*******************  END OF NETCDF ACCESS  ****************/

	MPI_Barrier(MPI_COMM_WORLD); 
	TotalWriteTime = MPI_Wtime() - TotalWriteTime;

	if (rank == 0) {
	  fprintf(stderr, "OK\nFile written to: %s!\n", opts.outfname);
	  fprintf(stderr, "Total Write Time = %10.8f\n", TotalWriteTime);
	}
  }
  MPI_Finalize();
  return 0;
}
Пример #2
0
int test(char* fname, int enable_log) {
    int buffer[MAXPROCESSES];
    MPI_Offset start[MAXPROCESSES][2], count[MAXPROCESSES][2];
    MPI_Offset *sp[MAXPROCESSES], *cp[MAXPROCESSES];
    MPI_Offset stride[2];

    int i, j, ret;
    int NProc, MyRank, NP;      // Total process; Rank
    int fid;        // Data set ID
    int did[2];     // IDs of dimension
    int vid[4];        // IDs for variables
    int dims[2];
    char tmp[1024];
    MPI_Info Info;

    MPI_Comm_size(MPI_COMM_WORLD, &NP);
    MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);

    if (NP == 1) {    // Act if there is WIDTH processes for easy debugging. Most debugger supports only single proccesses.
        NProc = SINGLEPROCNP;
        MyRank = SINGLEPROCRANK;
    }
    else{
        NProc = NP;
    }

    if (MyRank < MAXPROCESSES) {
        // Ensure each process have a independent buffer directory

        MPI_Info_create(&Info);
        if (enable_log) {
            MPI_Info_set(Info, "pnetcdf_log", "enable");
        }

        // Create new cdf file
        ret = ncmpi_create(MPI_COMM_WORLD, fname, NC_CLOBBER, Info, &fid);
        if (ret != NC_NOERR) {
            printf("Error create file\n");
            goto ERROR;
        }
        ret = ncmpi_set_fill(fid, NC_FILL, NULL);
        if (ret != NC_NOERR) {
            printf("Error set fill\n");
            goto ERROR;
        }
        ret = ncmpi_def_dim(fid, "X", NProc, did);  // X
        if (ret != NC_NOERR) {
            printf("Error def dim X\n");
            goto ERROR;
        }
        ret = ncmpi_def_dim(fid, "Y", NProc * 4, did + 1);    // Y
        if (ret != NC_NOERR) {
            printf("Error def dim Y\n");
            goto ERROR;
        }
        ret = ncmpi_def_var(fid, "M0", NC_INT, 2, did, vid + 0);
        if (ret != NC_NOERR) {
            printf("Error def var M0\n");
            goto ERROR;
        }
        ret = ncmpi_def_var(fid, "M1", NC_INT, 2, did, vid + 1);
        if (ret != NC_NOERR) {
            printf("Error def var M1\n");
            goto ERROR;
        }
        ret = ncmpi_def_var(fid, "M2", NC_INT, 2, did, vid + 2);
        if (ret != NC_NOERR) {
            printf("Error def var M2\n");
            goto ERROR;
        }
        ret = ncmpi_def_var(fid, "M3", NC_INT, 2, did, vid + 3);
        if (ret != NC_NOERR) {
            printf("Error def var M3\n");
            goto ERROR;
        }
        ret = ncmpi_enddef(fid);
        if (ret != NC_NOERR) {
            printf("Error enddef\n");
            goto ERROR;
        }

        // We all write rank from now on
        for (i = 0; i < NProc; i++) {
            buffer[i] = MyRank;
        }

        // put_var1
        for (i = 0; i < 4; i++) {
            for (j = 0; j < NProc; j++) {
                start[0][0] = MyRank;
                start[0][1] = i * NProc + j;
                ret = ncmpi_put_var1_int_all(fid, vid[i], start[0], buffer);
                if (ret != NC_NOERR) {
                    printf("Error put_var1\n");
                    goto ERROR;
                }
            }
        }

        // put_vara
        for (i = 0; i < 4; i++) {
            start[0][0] = 0;
            start[0][1] = ((i + 1) % 4) * NProc + MyRank;
            count[0][0] = NProc;
            count[0][1] = 1;
            ret = ncmpi_put_vara_int_all(fid, vid[i], start[0], count[0], buffer);
            if (ret != NC_NOERR) {
                printf("Error put_vara\n");
                goto ERROR;
            }
        }

        // put_vars
        for (i = 0; i < 4; i++) {
            start[0][0] = MyRank;
            start[0][1] = ((i + 2) % 4) * NProc + (MyRank % 2);
            count[0][0] = 1;
            count[0][1] = NProc / 2;
            stride[0] = 1;
            stride[1] = 2;
            ret = ncmpi_put_vars_int_all(fid, vid[i], start[0], count[0], stride, buffer);
            if (ret != NC_NOERR) {
                printf("Error put_vars\n");
                goto ERROR;
            }
        }

        // put_varn
        for (j = 0; j < 4; j++) {
            for (i = 0; i < NProc; i++) {
                count[i][0] = 1;
                count[i][1] = 1;
                start[i][0] = (MyRank + i) % NProc;
                start[i][1] = i + ((j + 3) % 4) * NProc;
                sp[i] = (MPI_Offset*)start[i];
                cp[i] = (MPI_Offset*)count[i];
            }
            ret = ncmpi_put_varn_int_all(fid, vid[j], NProc, sp, cp, buffer);
            if (ret != NC_NOERR) {
                printf("Error put_varn\n");
                goto ERROR;
            }
        }

        // Commit log into cdf file

        ret = ncmpi_close(fid);       // Close file
        if (ret != NC_NOERR) {
            printf("Error close");
            goto ERROR;
        }
    }

ERROR:;
    return 0;
}
Пример #3
0
// Traj_NcEnsemble::writeArray() // TODO RemdValues
int Traj_NcEnsemble::writeArray(int set, FramePtrArray const& Farray) {
# ifdef HAS_PNETCDF
  MPI_Offset pstart_[4];
  MPI_Offset pcount_[4];
# define start_ pstart_
# define count_ pcount_
# endif
  start_[0] = ncframe_; // Frame
  start_[2] = 0;        // Atoms
  start_[3] = 0;        // XYZ
  count_[0] = 1; // Frame
  count_[1] = 1; // Ensemble
  count_[3] = 3; // XYZ
  for (int member = ensembleStart_; member != ensembleEnd_; member++) {
    //rprintf("DEBUG: Writing set %i, member %i\n", set+1, member); 
#   ifdef MPI
    Frame* frm = Farray[0];
#   else
    Frame* frm = Farray[member];
#   endif
    start_[1] = member;   // Ensemble
    count_[2] = Ncatom(); // Atoms
    // Write Coords
    //DebugIndices(); // DEBUG
    DoubleToFloat(Coord_, frm->xAddress());
#   ifdef HAS_PNETCDF
    if (ncmpi_put_vara_float_all(ncid_, coordVID_, start_, count_, Coord_))
#   else
    if (NC::CheckErr(nc_put_vara_float(ncid_, coordVID_, start_, count_, Coord_)))
#   endif
    {
      mprinterr("Error: Netcdf Writing coords frame %i\n", set+1);
      return 1;
    }
    // Write velocity.
    if (velocityVID_ != -1) {
      DoubleToFloat(Coord_, frm->vAddress());
#     ifdef HAS_PNETCDF
      if (ncmpi_put_vara_float_all(ncid_, velocityVID_, start_, count_, Coord_))
#     else
      if (NC::CheckErr(nc_put_vara_float(ncid_, velocityVID_, start_, count_, Coord_)) )
#     endif
      {
        mprinterr("Error: Netcdf writing velocity frame %i\n", set+1);
        return 1;
      }
    }
    // Write box
    if (cellLengthVID_ != -1) {
      count_[2] = 3;
#     ifdef HAS_PNETCDF
      if (ncmpi_put_vara_double_all(ncid_,cellLengthVID_,start_,count_,frm->bAddress()))
#     else
      if (NC::CheckErr(nc_put_vara_double(ncid_,cellLengthVID_,start_,count_,frm->bAddress())) )
#     endif
      {
        mprinterr("Error: Writing cell lengths frame %i.\n", set+1);
        return 1;
      }
#     ifdef HAS_PNETCDF
      if (ncmpi_put_vara_double_all(ncid_,cellAngleVID_,start_,count_,frm->bAddress()+3))
#     else
      if (NC::CheckErr(nc_put_vara_double(ncid_,cellAngleVID_,start_,count_,frm->bAddress()+3)))
#     endif
      {
        mprinterr("Error: Writing cell angles frame %i.\n", set+1);
        return 1;
      }
    }
    // Write temperature
    if (TempVID_!=-1) {
#     ifdef HAS_PNETCDF
      if (ncmpi_put_vara_double_all(ncid_,TempVID_,start_,count_,frm->tAddress()))
#     else
      if (NC::CheckErr(nc_put_vara_double(ncid_,TempVID_,start_,count_,frm->tAddress())))
#     endif
      {
        mprinterr("Error: Writing temperature frame %i.\n", set+1);
        return 1;
      }
    }
    // Write indices
    if (indicesVID_ != -1) {
      count_[2] = remd_dimension_;
#     ifdef HAS_PNETCDF
      if (ncmpi_put_vara_int_all(ncid_,indicesVID_,start_,count_,frm->iAddress()))
#     else
      if (NC::CheckErr(nc_put_vara_int(ncid_,indicesVID_,start_,count_,frm->iAddress())))
#     endif
      {
        mprinterr("Error: Writing indices frame %i.\n", set+1);
        return 1;
      }
    }
  }
# ifdef HAS_PNETCDF
  //ncmpi_sync(ncid_);
# else
  nc_sync(ncid_); // Necessary after every write??
# endif
  ++ncframe_;
# ifdef HAS_PNETCDF
  // DEBUG
# undef start_
# undef count_
# endif
  return 0;
}
Пример #4
0
int main(int argc, char **argv) {

  int i, j;
  int status;
  int ncid1, ncid2;
  int ndims, nvars, ngatts, unlimdimid;
  char name[NC_MAX_NAME];
  nc_type type, vartypes[NC_MAX_VARS];
  MPI_Offset attlen;
  MPI_Offset dimlen, shape[NC_MAX_VAR_DIMS], varsize, start[NC_MAX_VAR_DIMS];
  void *valuep;
  int dimids[NC_MAX_DIMS], varids[NC_MAX_VARS];
  int vardims[NC_MAX_VARS][NC_MAX_VAR_DIMS/16]; /* divided by 16 due to my memory limitation */
  int varndims[NC_MAX_VARS], varnatts[NC_MAX_VARS];
  int isRecvar;
  params opts;

  int rank;
  int nprocs;
  MPI_Comm comm = MPI_COMM_WORLD;
  

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (rank == 0)
	  fprintf(stderr, "Testing read ... ");
  parse_read_args(argc, argv, rank, &opts);

  /**********  START OF NETCDF ACCESS **************/


  /* Read a netCDF file and write it out to another file */

  /**
   * Open the input dataset - ncid1:
   *   File name: "../data/test_float.nc"
   *   Dataset API: Collective
   * And create the output dataset - ncid2:
   *   File name: "testread.nc"
   *   Dataset API: Collective
   */

  status = ncmpi_open(comm, opts.infname, 0, MPI_INFO_NULL, &ncid1);
  if (status != NC_NOERR) handle_error(status);

  status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid2);
  if (status != NC_NOERR) handle_error(status);


  /**
   * Inquire the dataset definitions of input dataset AND
   * Add dataset definitions for output dataset.
   */

  status = ncmpi_inq(ncid1, &ndims, &nvars, &ngatts, &unlimdimid);
  if (status != NC_NOERR) handle_error(status);


  /* Inquire global attributes, assume CHAR attributes. */

  for (i = 0; i < ngatts; i++) {
    status = ncmpi_inq_attname(ncid1, NC_GLOBAL, i, name);
    if (status != NC_NOERR) handle_error(status);
    status = ncmpi_inq_att (ncid1, NC_GLOBAL, name, &type, &attlen);
    if (status != NC_NOERR) handle_error(status);
    switch (type) {
      case NC_CHAR: 
	valuep = (void *)malloc(attlen * sizeof(char));
	status = ncmpi_get_att_text(ncid1, NC_GLOBAL, name, valuep);
	if (status != NC_NOERR) handle_error(status);
	status = ncmpi_put_att_text (ncid2, NC_GLOBAL, name, attlen, (char *)valuep);
	if (status != NC_NOERR) handle_error(status);
	free(valuep);
        break;
      case NC_SHORT:
        valuep = (void *)malloc(attlen * sizeof(short));
        status = ncmpi_get_att_short(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_short (ncid2, NC_GLOBAL, name, type, attlen, (short *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_INT:
        valuep = (void *)malloc(attlen * sizeof(int));
        status = ncmpi_get_att_int(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_int (ncid2, NC_GLOBAL, name, type, attlen, (int *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_FLOAT:
        valuep = (void *)malloc(attlen * sizeof(float));
        status = ncmpi_get_att_float(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_float (ncid2, NC_GLOBAL, name, type, attlen, (float *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_DOUBLE:
        valuep = (void *)malloc(attlen * sizeof(double));
        status = ncmpi_get_att_double(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_double (ncid2, NC_GLOBAL, name, type, attlen, (double *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      default:
	;
	/* TODO: handle unexpected types */
    }
  }

  /* Inquire dimension */

  for (i = 0; i < ndims; i++) {
    status = ncmpi_inq_dim(ncid1, i, name, &dimlen);
    if (status != NC_NOERR) handle_error(status);
    if (i == unlimdimid)
      dimlen = NC_UNLIMITED;
    status = ncmpi_def_dim(ncid2, name, dimlen, dimids+i);
    if (status != NC_NOERR) handle_error(status);
  }

  /* Inquire variables */

  for (i = 0; i < nvars; i++) {
    status = ncmpi_inq_var (ncid1, i, name, vartypes+i, varndims+i, vardims[i], varnatts+i);
    if (status != NC_NOERR) handle_error(status);

    status = ncmpi_def_var(ncid2, name, vartypes[i], varndims[i], vardims[i], varids+i);
    if (status != NC_NOERR) handle_error(status);

    /* var attributes, assume CHAR attributes */

    for (j = 0; j < varnatts[i]; j++) {
      status = ncmpi_inq_attname(ncid1, i, j, name);
      if (status != NC_NOERR) handle_error(status);
      status = ncmpi_inq_att (ncid1, i, name, &type, &attlen);
      if (status != NC_NOERR) handle_error(status);
      switch (type) {
        case NC_CHAR: 
	  valuep = (void *)malloc(attlen * sizeof(char));
	  status = ncmpi_get_att_text(ncid1, i, name, valuep);
	  if (status != NC_NOERR) handle_error(status);
	  status = ncmpi_put_att_text (ncid2, varids[i], name, attlen, (char *)valuep);
	  if (status != NC_NOERR) handle_error(status);
	  free(valuep);
          break;
        case NC_SHORT:
          valuep = (void *)malloc(attlen * sizeof(short));
          status = ncmpi_get_att_short(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_short (ncid2, varids[i], name, type, attlen, (short *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
        case NC_INT:
          valuep = (void *)malloc(attlen * sizeof(int));
          status = ncmpi_get_att_int(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_int (ncid2, varids[i], name, type, attlen, (int *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
        case NC_FLOAT:
          valuep = (void *)malloc(attlen * sizeof(float));
          status = ncmpi_get_att_float(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_float (ncid2, varids[i], name, type, attlen, (float *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
        case NC_DOUBLE:
          valuep = (void *)malloc(attlen * sizeof(double));
          status = ncmpi_get_att_double(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_double (ncid2, varids[i], name, type, attlen, (double *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
	default:
	  ; /* TODO: handle unexpected types */
      }
    }
  }

  /**
   * End Define Mode (switch to data mode) for output dataset
   *   Dataset API: Collective
   */

  status = ncmpi_enddef(ncid2);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Read data of variables from input dataset 
   * (ONLY DEAL WITH: NC_INT, NC_FLOAT, NC_DOUBLE for now)
   * Write the data out to the corresponding variables in the output dataset
   *
   *  Data Partition (Assume 4 processors):
   *   square: 2-D, (Block, *), 25*100 from 100*100
   *   cube:   3-D, (Block, *, *), 25*100*100 from 100*100*100
   *   xytime: 3-D, (Block, *, *), 25*100*100 from 100*100*100
   *   time:   1-D, Block-wise, 25 from 100
   *
   *  Data Mode API: collective
   */

  for (i = 0; i < NC_MAX_VAR_DIMS; i++)
    start[i] = 0;
  for (i = 0; i < nvars; i++) {
    isRecvar = 0;
    varsize = 1;
    for (j = 0; j < varndims[i]; j++) {
      status = ncmpi_inq_dim(ncid1, vardims[i][j], name, shape + j);
      if (status != NC_NOERR) handle_error(status);
      if (j == 0) {
        shape[j] /= nprocs;
	start[j] = shape[j] * rank;
      }
      varsize *= shape[j];
      if (vardims[i][j] == unlimdimid)
	isRecvar = 1;
    }
    switch (vartypes[i]) {
      case NC_CHAR: 
        break;
      case NC_SHORT:
        valuep = (void *)malloc(varsize * sizeof(short));
        status = ncmpi_get_vara_short_all(ncid1, i, start, shape, (short *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_short_all(ncid2, varids[i],
                                     start, shape, (short *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_INT:
	valuep = (void *)malloc(varsize * sizeof(int));
        status = ncmpi_get_vara_int_all(ncid1, i, start, shape, (int *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_int_all(ncid2, varids[i],
                                     start, shape, (int *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
	break;
      case NC_FLOAT:
        valuep = (void *)malloc(varsize * sizeof(float));
        status = ncmpi_get_vara_float_all(ncid1, i, start, shape, (float *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_float_all(ncid2, varids[i],
                                     start, shape, (float *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_DOUBLE:
        valuep = (void *)malloc(varsize * sizeof(double));
        status = ncmpi_get_vara_double_all(ncid1, i, start, shape, (double *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_double_all(ncid2, varids[i],
                                     start, shape, (double *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      default:
	; /* TODO: handle unexpected types */
    }
  }

  /**
   * Close the datasets
   *   Dataset API:  collective
   */

  status = ncmpi_close(ncid1);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_close(ncid2);
  if (status != NC_NOERR) handle_error(status);

  /*******************  END OF NETCDF ACCESS  ****************/

if (rank == 0)
  fprintf(stderr, "OK\nInput file %s copied to: %s!\n", opts.infname, opts.outfname);

  MPI_Finalize();
  return 0;
}
Пример #5
0
/* write out variable's data from in-memory structure */
void
load_netcdf(void *rec_start)
{
    int i, idim;
    int stat = NC_NOERR;
    MPI_Offset *start, *count;
    char *charvalp = NULL;
    short *shortvalp = NULL;
    int *intvalp = NULL;
    float *floatvalp = NULL;
    double *doublevalp = NULL;
    unsigned char *ubytevalp = NULL;
    unsigned short *ushortvalp = NULL;
    unsigned int *uintvalp = NULL;
    long long *int64valp = NULL;
    unsigned long long *uint64valp = NULL;
    MPI_Offset total_size;

    /* load values into variable */

    switch (vars[varnum].type) {
      case NC_CHAR:
      case NC_BYTE:
	charvalp = (char *) rec_start;
	break;
      case NC_SHORT:
	shortvalp = (short *) rec_start;
	break;
      case NC_INT:
	intvalp = (int *) rec_start;
	break;
      case NC_FLOAT:
	floatvalp = (float *) rec_start;
	break;
      case NC_DOUBLE:
	doublevalp = (double *) rec_start;
	break;
      case NC_UBYTE:
	ubytevalp = (unsigned char *) rec_start;
	break;
      case NC_USHORT:
	ushortvalp = (unsigned short *) rec_start;
	break;
      case NC_UINT:
	uintvalp = (unsigned int *) rec_start;
	break;
      case NC_INT64:
	int64valp = (long long *) rec_start;
	break;
      case NC_UINT64:
	uint64valp = (unsigned long long *) rec_start;
	break;
      default:
	derror("Unhandled type %d\n", vars[varnum].type);
	break;
    }

    start = (MPI_Offset*) malloc(vars[varnum].ndims * 2 * sizeof(MPI_Offset));
    count = start + vars[varnum].ndims;

    if (vars[varnum].ndims > 0) {
	/* initialize start to upper left corner (0,0,0,...) */
	start[0] = 0;
	if (vars[varnum].dims[0] == rec_dim) {
	    count[0] = vars[varnum].nrecs;
	}
	else {
	    count[0] = dims[vars[varnum].dims[0]].size;
	}
    }

    for (idim = 1; idim < vars[varnum].ndims; idim++) {
	start[idim] = 0;
	count[idim] = dims[vars[varnum].dims[idim]].size;
    }

    total_size = nctypesize(vars[varnum].type);
    for (idim=0; idim<vars[varnum].ndims; idim++)
        total_size *= count[idim];

    /* If the total put size is more than 2GB, then put one subarray at a time.
     * Here the subarray is from 1, 2, ... ndims, except 0.
     * This is not a perfect solution. To be improved.
     */
    if (total_size > INT_MAX) {
        MPI_Offset nchunks=count[0];
        MPI_Offset subarray_nelems=1;
        for (idim=1; idim<vars[varnum].ndims; idim++)
            subarray_nelems *= count[idim];

        count[0] = 1;
        switch (vars[varnum].type) {
            case NC_BYTE:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_schar_all(ncid, varnum, start, count, (signed char *)charvalp);
                     check_err(stat, "ncmpi_put_vara_schar_all", __func__, __LINE__, __FILE__);
                     charvalp += subarray_nelems;
                 }
                 break;
            case NC_CHAR:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_text_all(ncid, varnum, start, count, charvalp);
                     check_err(stat, "ncmpi_put_vara_text_all", __func__, __LINE__, __FILE__);
                     charvalp += subarray_nelems;
                 }
                 break;
            case NC_SHORT:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_short_all(ncid, varnum, start, count, shortvalp);
                     check_err(stat, "ncmpi_put_vara_short_all", __func__, __LINE__, __FILE__);
                     shortvalp += subarray_nelems;
                 }
                 break;
            case NC_INT:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_int_all(ncid, varnum, start, count, intvalp);
                     check_err(stat, "ncmpi_put_vara_int_all", __func__, __LINE__, __FILE__);
                     intvalp += subarray_nelems;
                 }
                 break;
            case NC_FLOAT:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_float_all(ncid, varnum, start, count, floatvalp);
                     check_err(stat, "ncmpi_put_vara_float_all", __func__, __LINE__, __FILE__);
                     floatvalp += subarray_nelems;
                 }
                 break;
            case NC_DOUBLE:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_double_all(ncid, varnum, start, count, doublevalp);
                     check_err(stat, "ncmpi_put_vara_double_all", __func__, __LINE__, __FILE__);
                     doublevalp += subarray_nelems;
                 }
                 break;
            case NC_UBYTE:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_uchar_all(ncid, varnum, start, count, ubytevalp);
                     check_err(stat, "ncmpi_put_vara_uchar_all", __func__, __LINE__, __FILE__);
                     ubytevalp += subarray_nelems;
                 }
                 break;
            case NC_USHORT:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_ushort_all(ncid, varnum, start, count, ushortvalp);
                     check_err(stat, "ncmpi_put_vara_ushort_all", __func__, __LINE__, __FILE__);
                     ushortvalp += subarray_nelems;
                 }
                 break;
            case NC_UINT:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_uint_all(ncid, varnum, start, count, uintvalp);
                     check_err(stat, "ncmpi_put_vara_uint_all", __func__, __LINE__, __FILE__);
                     uintvalp += subarray_nelems;
                 }
                 break;
            case NC_INT64:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_longlong_all(ncid, varnum, start, count, int64valp);
                     check_err(stat, "ncmpi_put_vara_longlong_all", __func__, __LINE__, __FILE__);
                     int64valp += subarray_nelems;
                 }
                 break;
            case NC_UINT64:
                 for (i=0; i<nchunks; i++) {
                     start[0] = i;
                     stat = ncmpi_put_vara_ulonglong_all(ncid, varnum, start, count, uint64valp);
                     check_err(stat, "ncmpi_put_vara_ulonglong_all", __func__, __LINE__, __FILE__);
                     uint64valp += subarray_nelems;
                 }
                 break;
            default:
                     derror("Unhandled type %d\n", vars[varnum].type);
                     break;
        }
    }
    else {
        switch (vars[varnum].type) {
            case NC_BYTE:
                stat = ncmpi_put_vara_schar_all(ncid, varnum, start, count, (signed char *)charvalp);
                check_err(stat, "ncmpi_put_vara_schar_all", __func__, __LINE__, __FILE__);
                break;
            case NC_CHAR:
                stat = ncmpi_put_vara_text_all(ncid, varnum, start, count, charvalp);
                check_err(stat, "ncmpi_put_vara_text_all", __func__, __LINE__, __FILE__);
                break;
            case NC_SHORT:
                stat = ncmpi_put_vara_short_all(ncid, varnum, start, count, shortvalp);
                check_err(stat, "ncmpi_put_vara_short_all", __func__, __LINE__, __FILE__);
                break;
            case NC_INT:
                stat = ncmpi_put_vara_int_all(ncid, varnum, start, count, intvalp);
                check_err(stat, "ncmpi_put_vara_int_all", __func__, __LINE__, __FILE__);
                break;
            case NC_FLOAT:
                stat = ncmpi_put_vara_float_all(ncid, varnum, start, count, floatvalp);
                check_err(stat, "ncmpi_put_vara_float_all", __func__, __LINE__, __FILE__);
                break;
            case NC_DOUBLE:
                stat = ncmpi_put_vara_double_all(ncid, varnum, start, count, doublevalp);
                check_err(stat, "ncmpi_put_vara_double_all", __func__, __LINE__, __FILE__);
                break;
            case NC_UBYTE:
                stat = ncmpi_put_vara_uchar_all(ncid, varnum, start, count, ubytevalp);
                check_err(stat, "ncmpi_put_vara_uchar_all", __func__, __LINE__, __FILE__);
                break;
            case NC_USHORT:
                stat = ncmpi_put_vara_ushort_all(ncid, varnum, start, count, ushortvalp);
                check_err(stat, "ncmpi_put_vara_ushort_all", __func__, __LINE__, __FILE__);
                break;
            case NC_UINT:
                stat = ncmpi_put_vara_uint_all(ncid, varnum, start, count, uintvalp);
                check_err(stat, "ncmpi_put_vara_uint_all", __func__, __LINE__, __FILE__);
                break;
            case NC_INT64:
                stat = ncmpi_put_vara_longlong_all(ncid, varnum, start, count, int64valp);
                check_err(stat, "ncmpi_put_vara_longlong_all", __func__, __LINE__, __FILE__);
                break;
            case NC_UINT64:
                stat = ncmpi_put_vara_ulonglong_all(ncid, varnum, start, count, uint64valp);
                check_err(stat, "ncmpi_put_vara_ulonglong_all", __func__, __LINE__, __FILE__);
                break;
            default:
                derror("Unhandled type %d\n", vars[varnum].type);
                break;
        }
    }
    free(start);
}
int main(int argc, char **argv) {
    int ret, ncfile, nprocs, rank, dimid, varid1, varid2, ndims=1;
    MPI_Offset start, count=1;
    char buf[13] = "Hello World\n";
    int *data;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (argc != 2) {
        if (rank == 0) printf("Usage: %s filename\n", argv[0]);
        MPI_Finalize();
        exit(-1);
    }

    if (rank == 0) {
        ret = ncmpi_create(MPI_COMM_SELF, argv[1],
                           NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncfile);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        ret = ncmpi_def_dim(ncfile, "d1", nprocs, &dimid);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        ret = ncmpi_def_var(ncfile, "v1", NC_INT, ndims, &dimid, &varid1);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        ret = ncmpi_def_var(ncfile, "v2", NC_INT, ndims, &dimid, &varid2);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        ret = ncmpi_put_att_text(ncfile, NC_GLOBAL, "string", 13, buf);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);
        
        ret = ncmpi_enddef(ncfile);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        /* first reason this approach is not scalable:  need to allocate
        * enough memory to hold data from all processors */
        data = calloc(nprocs, sizeof(int));
    }

    /* second reason this approch is not scalable: sending to rank 0
     * introduces a serialization point, even if using an optimized
     * collective routine */
    MPI_Gather(&rank, 1, MPI_INT, data, 1, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        /* and lastly, the third reason this approach is not scalable: I/O
         * happens from a single processor.  This approach can be ok if the
         * amount of data is quite small, but almost always the underlying
         * MPI-IO library can do a better job */
        start=0, count=nprocs;
        ret = ncmpi_put_vara_int_all(ncfile, varid1, &start, &count, data);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        ret = ncmpi_put_vara_int_all(ncfile, varid2, &start, &count, data);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        ret = ncmpi_close(ncfile);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);
    }

    MPI_Finalize();
    return 0;
}
Пример #7
0
int main(int argc, char **argv)
{
    char filename[256];
    int err, nerrs=0, ncid, dimid[NDIMS], varid[5], ndims=NDIMS;
    int i, j, k, nprocs, rank, req, *buf;
    MPI_Offset start[NDIMS] = {0};
    MPI_Offset count[NDIMS] = {0};
    MPI_Offset stride[NDIMS] = {0};

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (argc > 2) {
        if (!rank) printf("Usage: %s [filename]\n",argv[0]);
        MPI_Finalize();
        return 1;
    }
    if (argc == 2) snprintf(filename, 256, "%s", argv[1]);
    else           strcpy(filename, "testfile.nc");

    if (rank == 0) {
        char *cmd_str = (char*)malloc(strlen(argv[0]) + 256);
        sprintf(cmd_str, "*** TESTING C   %s for NULL stride ", basename(argv[0]));
        printf("%-66s ------ ", cmd_str); fflush(stdout);
        free(cmd_str);
    }

    err = ncmpi_create(MPI_COMM_WORLD, filename, 0, MPI_INFO_NULL, &ncid);
    CHECK_ERR

    err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]);
    CHECK_ERR

    err = ncmpi_def_dim(ncid, "X", nprocs*NX, &dimid[1]);
    CHECK_ERR

    err = ncmpi_def_var(ncid, "v0", NC_INT, ndims, dimid, &varid[0]);
    CHECK_ERR

    err = ncmpi_def_var(ncid, "v1", NC_INT, ndims, dimid, &varid[1]);
    CHECK_ERR

    err = ncmpi_def_var(ncid, "v2", NC_INT, ndims, dimid, &varid[2]);
    CHECK_ERR

    err = ncmpi_def_var(ncid, "v3", NC_INT, ndims, dimid, &varid[3]);
    CHECK_ERR

    err = ncmpi_def_var(ncid, "v4", NC_INT, ndims, dimid, &varid[4]);
    CHECK_ERR

    err = ncmpi_enddef(ncid);
    CHECK_ERR

    start[0] = 0;
    start[1] = rank*NX;
    count[0] = NY;
    count[1] = NX;
    buf = (int*) malloc((size_t)NY * NX * sizeof(int));
    for (i=0; i<NY*NX; i++) buf[i] = rank+10;

    err = ncmpi_put_vara_int_all(ncid, varid[0], start, count, buf);
    CHECK_ERR
    CHECK_PUT_BUF

    err = ncmpi_put_vars_int_all(ncid, varid[1], start, count, NULL, buf);
    CHECK_ERR
    CHECK_PUT_BUF

    start[0] = 0;
    start[1] = rank;
    count[0] = NY;
    count[1] = NX;
    stride[0] = 1;
    stride[1] = nprocs;
    err = ncmpi_put_vars_int_all(ncid, varid[2], start, count, stride, buf);
    CHECK_ERR
    CHECK_PUT_BUF

    /* test bput_vars */
    err = ncmpi_buffer_attach(ncid, NY*NX*sizeof(int));
    CHECK_ERR

    start[0] = 0;
    start[1] = rank*NX;
    count[0] = NY;
    count[1] = NX;
    err = ncmpi_bput_vars_int(ncid, varid[3], start, count, NULL, buf, &req);
    CHECK_ERR

    err = ncmpi_wait_all(ncid, 1, &req, NULL);
    CHECK_ERR
    CHECK_PUT_BUF

    start[0] = 0;
    start[1] = rank;
    count[0] = NY;
    count[1] = NX;
    stride[0] = 1;
    stride[1] = nprocs;
    err = ncmpi_bput_vars_int(ncid, varid[4], start, count, stride, buf, &req);
    CHECK_ERR

    err = ncmpi_wait_all(ncid, 1, &req, NULL);
    CHECK_ERR
    CHECK_PUT_BUF
    free(buf);

    err = ncmpi_buffer_detach(ncid);
    CHECK_ERR

    buf = (int*) malloc((size_t)NY * NX * nprocs * sizeof(int));
    memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int));
    err = ncmpi_get_var_int_all(ncid, varid[0], buf);
    CHECK_ERR

    /* check read buffer contents */
    /*  v0 =
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13 ;
     */
    for (i=0; i<NY; i++) {
        for (j=0; j<nprocs; j++) {
            for (k=0; k<NX; k++) {
                if (buf[i*nprocs*NX+j*NX+k] != j+10) {
                    printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n",
                           __LINE__,__FILE__,i*nprocs*NX+j*NX+k, j+10, buf[i*nprocs*NX+j*NX+k]);
                    nerrs++;
                }
            }
        }
    }

    memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int));
    err = ncmpi_get_var_int_all(ncid, varid[1], buf);
    CHECK_ERR

    /* check read buffer contents */
    /*  v1 =
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13 ;
     */
    for (i=0; i<NY; i++) {
        for (j=0; j<nprocs; j++) {
            for (k=0; k<NX; k++) {
                if (buf[i*nprocs*NX+j*NX+k] != j+10) {
                    printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n",
                           __LINE__,__FILE__,i*nprocs*NX+j*NX+k, j+10, buf[i*nprocs*NX+j*NX+k]);
                    nerrs++;
                }
            }
        }
    }

    memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int));
    err = ncmpi_get_var_int_all(ncid, varid[2], buf);
    CHECK_ERR

    /* check read buffer contents */
    /*  v2 =
     *    10, 11, 12, 13, 10, 11, 12, 13,
     *    10, 11, 12, 13, 10, 11, 12, 13,
     *    10, 11, 12, 13, 10, 11, 12, 13,
     *    10, 11, 12, 13, 10, 11, 12, 13 ;
     */
    for (i=0; i<NY; i++) {
        for (k=0; k<NX; k++) {
            for (j=0; j<nprocs; j++) {
                if (buf[i*nprocs*NX+k*nprocs+j] != j+10) {
                    printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n",
                           __LINE__,__FILE__,i*nprocs*NX+k*nprocs+j, j+10, buf[i*nprocs*NX+k*nprocs+j]);
                    nerrs++;
                }
            }
        }
    }

    memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int));
    err = ncmpi_get_var_int_all(ncid, varid[3], buf);
    CHECK_ERR

    /* check read buffer contents */
    /*  v3 =
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13,
     *    10, 10, 11, 11, 12, 12, 13, 13 ;
     */
    for (i=0; i<NY; i++) {
        for (j=0; j<nprocs; j++) {
            for (k=0; k<NX; k++) {
                if (buf[i*nprocs*NX+j*NX+k] != j+10) {
                    printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n",
                           __LINE__,__FILE__,i*nprocs*NX+j*NX+k, j+10, buf[i*nprocs*NX+j*NX+k]);
                    nerrs++;
                }
            }
        }
    }

    memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int));
    err = ncmpi_get_var_int_all(ncid, varid[4], buf);
    CHECK_ERR

    /* check read buffer contents */
    /*  v4 =
     *    10, 11, 12, 13, 10, 11, 12, 13,
     *    10, 11, 12, 13, 10, 11, 12, 13,
     *    10, 11, 12, 13, 10, 11, 12, 13,
     *    10, 11, 12, 13, 10, 11, 12, 13 ;
     */
    for (i=0; i<NY; i++) {
        for (k=0; k<NX; k++) {
            for (j=0; j<nprocs; j++) {
                if (buf[i*nprocs*NX+k*nprocs+j] != j+10) {
                    printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n",
                           __LINE__,__FILE__,i*nprocs*NX+k*nprocs+j, j+10, buf[i*nprocs*NX+k*nprocs+j]);
                    nerrs++;
                }
            }
        }
    }

    err = ncmpi_close(ncid);
    CHECK_ERR

    free(buf);

    /* check if PnetCDF freed all internal malloc */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0)
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
        if (malloc_size > 0) ncmpi_inq_malloc_list();
    }

    MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (rank == 0) {
        if (nerrs) printf(FAIL_STR,nerrs);
        else       printf(PASS_STR);
    }

    MPI_Finalize();
    return (nerrs > 0);
}
Пример #8
0
int main(int argc, char** argv)
{
    extern int optind;
    char filename[256];
    int i, j, rank, nprocs, verbose=1, err, nerrs=0;
    int ncid, cmode, varid, dimid[2], buf[NY][NX];
    char str_att[128];
    float float_att[100];
    MPI_Offset  global_ny, global_nx;
    MPI_Offset start[2], count[2];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* get command-line arguments */
    while ((i = getopt(argc, argv, "hq")) != EOF)
        switch(i) {
            case 'q': verbose = 0;
                      break;
            case 'h':
            default:  if (rank==0) usage(argv[0]);
                      MPI_Finalize();
                      return 1;
        }
    if (argv[optind] == NULL) strcpy(filename, "testfile.nc");
    else                      snprintf(filename, 256, "%s", argv[optind]);

    MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD);

    if (verbose && rank == 0) printf("%s: example of using put_vara APIs\n",__FILE__);

    /* create a new file for writing ----------------------------------------*/
    cmode = NC_CLOBBER | NC_64BIT_DATA;
    err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid);
    ERR

    /* the global array is NY * (NX * nprocs) */
    global_ny = NY;
    global_nx = NX * nprocs;

    for (i=0; i<NY; i++)
        for (j=0; j<NX; j++)
             buf[i][j] = rank;

    /* add a global attribute: a time stamp at rank 0 */
    time_t ltime = time(NULL); /* get the current calendar time */
    asctime_r(localtime(&ltime), str_att);

    /* make sure the time string are consistent among all processes */
    MPI_Bcast(str_att, strlen(str_att), MPI_CHAR, 0, MPI_COMM_WORLD);

    err = ncmpi_put_att_text(ncid, NC_GLOBAL, "history", strlen(str_att),
                             &str_att[0]);
    ERR

    /* define dimensions x and y */
    err = ncmpi_def_dim(ncid, "Y", global_ny, &dimid[0]);
    ERR
    err = ncmpi_def_dim(ncid, "X", global_nx, &dimid[1]);
    ERR

    /* define a 2D variable of integer type */
    err = ncmpi_def_var(ncid, "var", NC_INT, 2, dimid, &varid);
    ERR

    /* add attributes to the variable */
    strcpy(str_att, "example attribute of type text.");
    err = ncmpi_put_att_text(ncid, varid, "str_att_name", strlen(str_att),
                             &str_att[0]);
    ERR

    for (i=0; i<8; i++) float_att[i] = i;
    err = ncmpi_put_att_float(ncid, varid, "float_att_name", NC_FLOAT, 8,
                              &float_att[0]);
    ERR
    long long int64_att=10000000000LL;
    err = ncmpi_put_att_longlong(ncid, varid, "int64_att_name", NC_INT64, 1,
                              &int64_att);
    ERR

    /* do not forget to exit define mode */
    err = ncmpi_enddef(ncid);
    ERR

    /* now we are in data mode */
    start[0] = 0;
    start[1] = NX * rank;
    count[0] = NY;
    count[1] = NX;

    err = ncmpi_put_vara_int_all(ncid, varid, start, count, &buf[0][0]);
    ERR

    err = ncmpi_close(ncid);
    ERR

    /* check if there is any PnetCDF internal malloc residue */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0)
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
    }

    MPI_Finalize();
    return (nerrs > 0);
}
Пример #9
0
/*----< main() >------------------------------------------------------------*/
int main(int argc, char **argv)
{
    int i, j, err, nerrs=0, rank, nprocs;
    int ncid, dimid[2], varid, req, status;

    MPI_Offset start[2], count[2], stride[2], imap[2];
    int   var[6][4];
    float k, rh[4][6];
    signed char  varT[4][6];
    char filename[256];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (argc > 2) {
        if (!rank) printf("Usage: %s [filename]\n",argv[0]);
        MPI_Finalize();
        return 1;
    }
    if (argc == 2) snprintf(filename, 256, "%s", argv[1]);
    else           strcpy(filename, "testfile.nc");

    if (rank == 0) {
        char *cmd_str = (char*)malloc(strlen(argv[0]) + 256);
        sprintf(cmd_str, "*** TESTING C   %s for get/put varm ", basename(argv[0]));
        printf("%-66s ------ ", cmd_str); fflush(stdout);
        free(cmd_str);
    }

#ifdef DEBUG
    if (nprocs > 1 && rank == 0)
        printf("Warning: %s is designed to run on 1 process\n", argv[0]);
#endif

    err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER | NC_64BIT_DATA,
                       MPI_INFO_NULL, &ncid); CHECK_ERR

    /* define a variable of a 6 x 4 integer array in the nc file */
    err = ncmpi_def_dim(ncid, "Y", 6, &dimid[0]); CHECK_ERR
    err = ncmpi_def_dim(ncid, "X", 4, &dimid[1]); CHECK_ERR
    err = ncmpi_def_var(ncid, "var", NC_INT, 2, dimid, &varid); CHECK_ERR
    err = ncmpi_enddef(ncid); CHECK_ERR

    /* create a 6 x 4 integer variable in the file with contents:
           0,  1,  2,  3,
           4,  5,  6,  7,
           8,  9, 10, 11,
          12, 13, 14, 15,
          16, 17, 18, 19,
          20, 21, 22, 23
     */
    for (j=0; j<6; j++) for (i=0; i<4; i++) var[j][i] = j*4+i;

    start[0] = 0; start[1] = 0;
    count[0] = 6; count[1] = 4;
    if (rank > 0) count[0] = count[1] = 0;
    err = ncmpi_put_vara_int_all(ncid, varid, start, count, &var[0][0]); CHECK_ERR

    if (nprocs > 1) MPI_Barrier(MPI_COMM_WORLD);

    err = ncmpi_close(ncid); CHECK_ERR

    err = ncmpi_open(MPI_COMM_WORLD, filename, NC_NOWRITE, MPI_INFO_NULL, &ncid); CHECK_ERR

    err = ncmpi_inq_varid(ncid, "var", &varid); CHECK_ERR

    /* read the variable back in the matrix transposed way, rh is 4 x 6 */
     count[0] = 6;  count[1] = 4;
    stride[0] = 1; stride[1] = 1;
      imap[0] = 1;   imap[1] = 6;   /* would be {4, 1} if not transposing */

    for (i=0; i<6; i++) for (j=0; j<4; j++) rh[j][i] = -1.0;

    err = ncmpi_iget_varm_float(ncid, varid, start, count, stride, imap, &rh[0][0], &req); CHECK_ERR

    err = ncmpi_wait_all(ncid, 1, &req, &status); CHECK_ERR
    err = status; CHECK_ERR

    /* check the contents of read */
    k = 0.0;
    for (i=0; i<6; i++) {
        for (j=0; j<4; j++) {
            if (rh[j][i] != k) {
#ifdef PRINT_ERR_ON_SCREEN
                printf("Error at line %d in %s: expecting rh[%d][%d]=%f but got %f\n",
                __LINE__,__FILE__,j,i,k,rh[j][i]);
#endif
                nerrs++;
                break;
            }
            k += 1.0;
        }
    }
#ifdef PRINT_ON_SCREEN
    /* print the contents of read */
    for (j=0; j<4; j++) {
        printf("[%2d]: ",j);
        for (i=0; i<6; i++) {
            printf("%5.1f",rh[j][i]);
        }
        printf("\n");
    }
#endif
    /* the stdout should be:
           [ 0]:   0.0  4.0  8.0 12.0 16.0 20.0
           [ 1]:   1.0  5.0  9.0 13.0 17.0 21.0
           [ 2]:   2.0  6.0 10.0 14.0 18.0 22.0
           [ 3]:   3.0  7.0 11.0 15.0 19.0 23.0
     */

    for (i=0; i<6; i++) for (j=0; j<4; j++) rh[j][i] = -1.0;

    err = ncmpi_get_varm_float_all(ncid, varid, start, count, stride, imap, &rh[0][0]); CHECK_ERR

    /* check the contents of read */
    k = 0.0;
    for (i=0; i<6; i++) {
        for (j=0; j<4; j++) {
            if (rh[j][i] != k) {
#ifdef PRINT_ERR_ON_SCREEN
                printf("Error at line %d in %s: expecting rh[%d][%d]=%f but got %f\n",
                __LINE__,__FILE__,j,i,k,rh[j][i]);
#endif
                nerrs++;
                break;
            }
            k += 1.0;
        }
    }
#ifdef PRINT_ON_SCREEN
    /* print the contents of read */
    for (j=0; j<4; j++) {
        printf("[%2d]: ",j);
        for (i=0; i<6; i++) {
            printf("%5.1f",rh[j][i]);
        }
        printf("\n");
    }
#endif
    /* the stdout should be:
           [ 0]:   0.0  4.0  8.0 12.0 16.0 20.0
           [ 1]:   1.0  5.0  9.0 13.0 17.0 21.0
           [ 2]:   2.0  6.0 10.0 14.0 18.0 22.0
           [ 3]:   3.0  7.0 11.0 15.0 19.0 23.0
     */


    err = ncmpi_close(ncid); CHECK_ERR

    err = ncmpi_open(MPI_COMM_WORLD, filename, NC_WRITE, MPI_INFO_NULL, &ncid); CHECK_ERR

    err = ncmpi_inq_varid(ncid, "var", &varid); CHECK_ERR

    /* testing get_varm(), first zero-out the variable in the file */
    memset(&var[0][0], 0, 6*4*sizeof(int));
    start[0] = 0; start[1] = 0;
    count[0] = 6; count[1] = 4;
    if (rank > 0) count[0] = count[1] = 0;
    err = ncmpi_put_vara_int_all(ncid, varid, start, count, &var[0][0]); CHECK_ERR

    /* set the contents of the write buffer varT, a 4 x 6 char array
          50, 51, 52, 53, 54, 55,
          56, 57, 58, 59, 60, 61,
          62, 63, 64, 65, 66, 67,
          68, 69, 70, 71, 72, 73
     */
    for (j=0; j<4; j++) for (i=0; i<6; i++) varT[j][i] = j*6+i + 50;

    /* write varT to the NC variable in the matrix transposed way */
    start[0]  = 0; start[1]  = 0;
    count[0]  = 6; count[1]  = 4;
    stride[0] = 1; stride[1] = 1;
    imap[0]   = 1; imap[1]   = 6;   /* would be {4, 1} if not transposing */
    if (rank > 0) count[0] = count[1] = 0;

    err = ncmpi_iput_varm_schar(ncid, varid, start, count, stride, imap, &varT[0][0], &req); CHECK_ERR

    err = ncmpi_wait_all(ncid, 1, &req, &status); CHECK_ERR
    err = status; CHECK_ERR

    /* the output from command "ncmpidump -v var test.nc" should be:
           var =
            50, 56, 62, 68,
            51, 57, 63, 69,
            52, 58, 64, 70,
            53, 59, 65, 71,
            54, 60, 66, 72,
            55, 61, 67, 73 ;
     */

    /* check if the contents of write buffer have been altered */
    for (j=0; j<4; j++) {
        for (i=0; i<6; i++) {
            if (varT[j][i] != j*6+i + 50) {
#ifdef PRINT_ERR_ON_SCREEN
                /* this error is a pnetcdf internal error, if occurs */
                printf("Error at line %d in %s: expecting varT[%d][%d]=%d but got %d\n",
                __LINE__,__FILE__,j,i,j*6+i + 50,varT[j][i]);
#endif
                nerrs++;
                break;
            }
        }
    }
    err = ncmpi_put_varm_schar_all(ncid, varid, start, count, stride, imap, &varT[0][0]); CHECK_ERR

    /* check if the contents of write buffer have been altered */
    for (j=0; j<4; j++) {
        for (i=0; i<6; i++) {
            if (varT[j][i] != j*6+i + 50) {
#ifdef PRINT_ERR_ON_SCREEN
                /* this error is a pnetcdf internal error, if occurs */
                printf("Error at line %d in %s: expecting varT[%d][%d]=%d but got %d\n",
                __LINE__,__FILE__,j,i,j*6+i + 50,varT[j][i]);
#endif
                nerrs++;
                break;
            }
        }
    }

    err = ncmpi_close(ncid); CHECK_ERR

    /* check if PnetCDF freed all internal malloc */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0)
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
        if (malloc_size > 0) ncmpi_inq_malloc_list();
    }

    MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (rank == 0) {
        if (nerrs) printf(FAIL_STR,nerrs);
        else       printf(PASS_STR);
    }

    MPI_Finalize();
    return (nerrs > 0);
}
Пример #10
0
/*
  writes output in pnetcdf format

  nblocks: local number of blocks
  vblocks: pointer to array of vblocks
  out_file: output file name
  comm: MPI communicator
*/
void pnetcdf_write(int nblocks, struct vblock_t *vblocks,
       char *out_file, MPI_Comm comm) {

#ifdef USEPNETCDF
  int err;
  int ncid, cmode, varids[23], dimids[8], dimids_2D[2];
  MPI_Offset start[2], count[2];

  MPI_Offset quants[NUM_QUANTS]; /* quantities per block */
  MPI_Offset proc_quants[NUM_QUANTS]; /* quantities per process */
  MPI_Offset tot_quants[NUM_QUANTS]; /* total quantities all global blocks */
  MPI_Offset block_ofsts[NUM_QUANTS]; /* starting offsets for each block */

  /* init */
  int i;
  for (i = 0; i < NUM_QUANTS; i++) {
    quants[i] = 0;
    proc_quants[i] = 0;
    tot_quants[i] = 0;
    block_ofsts[i] = 0;
  }

  /* sum quantities over local blocks */
  int b;
  for (b = 0; b < nblocks; b++) {
    proc_quants[NUM_VERTS] += vblocks[b].num_verts;
    proc_quants[NUM_COMP_CELLS] += vblocks[b].num_complete_cells;
    proc_quants[NUM_CELL_FACES] += vblocks[b].tot_num_cell_faces;
    proc_quants[NUM_FACE_VERTS] += vblocks[b].tot_num_face_verts;
    proc_quants[NUM_ORIG_PARTS] += vblocks[b].num_orig_particles;
    proc_quants[NUM_NEIGHBORS] += DIY_Num_neighbors(0, b);
  }
  proc_quants[NUM_BLOCKS] = nblocks;

  /* sum per process values to be global ones */
  MPI_Allreduce(proc_quants, tot_quants, NUM_QUANTS, MPI_OFFSET, MPI_SUM, comm);

  /* prefix sum proc offsets */
  MPI_Exscan(proc_quants, &block_ofsts, NUM_QUANTS, MPI_OFFSET, MPI_SUM, comm);

  /* create a new file for writing */
  cmode = NC_CLOBBER | NC_64BIT_DATA;
  err = ncmpi_create(comm, out_file, cmode, MPI_INFO_NULL, &ncid); ERR;

  /* define dimensions */
  err = ncmpi_def_dim(ncid, "num_g_blocks", tot_quants[NUM_BLOCKS],
          &dimids[0]); ERR;
  err = ncmpi_def_dim(ncid, "XYZ", 3, &dimids[1]); ERR;
  err = ncmpi_def_dim(ncid, "num_g_verts", tot_quants[NUM_VERTS],
          &dimids[2]); ERR;
  err = ncmpi_def_dim(ncid, "num_g_complete_cells", tot_quants[NUM_COMP_CELLS],
          &dimids[3]); ERR;
  err = ncmpi_def_dim(ncid, "tot_num_g_cell_faces", tot_quants[NUM_CELL_FACES],
          &dimids[4]); ERR;
  err = ncmpi_def_dim(ncid, "tot_num_g_face_verts", tot_quants[NUM_FACE_VERTS],
          &dimids[5]); ERR;
  err = ncmpi_def_dim(ncid, "num_g_orig_particles", tot_quants[NUM_ORIG_PARTS],
          &dimids[6]); ERR;
  err = ncmpi_def_dim(ncid, "num_g_neighbors", tot_quants[NUM_NEIGHBORS],
          &dimids[7]); ERR;

  /* define variables */
  err = ncmpi_def_var(ncid, "num_verts", NC_INT, 1, &dimids[0],
          &varids[0]); ERR;
  err = ncmpi_def_var(ncid, "num_complete_cells", NC_INT, 1, &dimids[0],
          &varids[1]); ERR;
  err = ncmpi_def_var(ncid, "tot_num_cell_faces", NC_INT, 1, &dimids[0],
          &varids[2]); ERR;
  err = ncmpi_def_var(ncid, "tot_num_face_verts", NC_INT, 1, &dimids[0],
          &varids[3]); ERR;
  err = ncmpi_def_var(ncid, "num_orig_particles", NC_INT, 1, &dimids[0],
          &varids[4]); ERR;

  /* block offsets */
  err = ncmpi_def_var(ncid, "block_off_num_verts", NC_INT64, 1, &dimids[0],
          &varids[5]); ERR;
  err = ncmpi_def_var(ncid, "block_off_num_complete_cells", NC_INT64, 1,
          &dimids[0], &varids[6]); ERR;
  err = ncmpi_def_var(ncid, "block_off_tot_num_cell_faces", NC_INT64, 1,
          &dimids[0], &varids[7]); ERR;
  err = ncmpi_def_var(ncid, "block_off_tot_num_face_verts", NC_INT64, 1,
          &dimids[0], &varids[8]); ERR;
  err = ncmpi_def_var(ncid, "block_off_num_orig_particles", NC_INT64, 1,
          &dimids[0], &varids[9]); ERR;

  dimids_2D[0] = dimids[0];
  dimids_2D[1] = dimids[1];
  err = ncmpi_def_var(ncid, "mins", NC_FLOAT, 2, dimids_2D, &varids[11]); ERR;
  err = ncmpi_def_var(ncid, "maxs", NC_FLOAT, 2, dimids_2D, &varids[12]); ERR;

  dimids_2D[0] = dimids[2];
  dimids_2D[1] = dimids[1];
  err = ncmpi_def_var(ncid, "save_verts", NC_FLOAT, 2, dimids_2D,
          &varids[13]); ERR;
  dimids_2D[0] = dimids[6];
  dimids_2D[1] = dimids[1];
  err = ncmpi_def_var(ncid, "sites", NC_FLOAT, 2, dimids_2D,
          &varids[14]); ERR;
  err = ncmpi_def_var(ncid, "complete_cells", NC_INT, 1, &dimids[3],
          &varids[15]); ERR;
  err = ncmpi_def_var(ncid, "areas", NC_FLOAT, 1, &dimids[3],
          &varids[16]); ERR;
  err = ncmpi_def_var(ncid, "vols", NC_FLOAT, 1, &dimids[3], &varids[17]); ERR;
  err = ncmpi_def_var(ncid, "num_cell_faces", NC_INT, 1, &dimids[3],
          &varids[18]); ERR;
  err = ncmpi_def_var(ncid, "num_face_verts", NC_INT, 1, &dimids[4],
          &varids[19]); ERR;
  err = ncmpi_def_var(ncid, "face_verts", NC_INT, 1, &dimids[5],
          &varids[20]); ERR;
  err = ncmpi_def_var(ncid, "neighbors", NC_INT, 1, &dimids[7],
          &varids[21]); ERR;
  err = ncmpi_def_var(ncid, "g_block_ids", NC_INT, 1, &dimids[0],
          &varids[22]); ERR;

  /* exit define mode */
  err = ncmpi_enddef(ncid); ERR;

  /* write all variables.
     to improve: we can try nonblocking I/O to aggregate small requests */

  for (b = 0; b < nblocks; b++) {

    struct vblock_t *v = &vblocks[b];

    /* quantities */
    start[0] = block_ofsts[NUM_BLOCKS];
    count[0] = 1;
    err = ncmpi_put_vara_int_all(ncid, varids[0], start, count,
         &v->num_verts); ERR;
    err = ncmpi_put_vara_int_all(ncid, varids[1], start, count,
         &v->num_complete_cells); ERR;
    err = ncmpi_put_vara_int_all(ncid, varids[2], start, count,
         &v->tot_num_cell_faces); ERR;
    err = ncmpi_put_vara_int_all(ncid, varids[3], start, count,
         &v->tot_num_face_verts); ERR;
    err = ncmpi_put_vara_int_all(ncid, varids[4], start, count,
         &v->num_orig_particles); ERR;

    /* block offsets */
    err = ncmpi_put_vara_longlong_all(ncid, varids[5], start, count,
              &block_ofsts[NUM_VERTS]); ERR;
    err = ncmpi_put_vara_longlong_all(ncid, varids[6], start, count,
              &block_ofsts[NUM_COMP_CELLS]); ERR;
    err = ncmpi_put_vara_longlong_all(ncid, varids[7], start, count,
              &block_ofsts[NUM_CELL_FACES]); ERR;
    err = ncmpi_put_vara_longlong_all(ncid, varids[8], start, count,
              &block_ofsts[NUM_FACE_VERTS]); ERR;
    err = ncmpi_put_vara_longlong_all(ncid, varids[9], start, count,
              &block_ofsts[NUM_ORIG_PARTS]); ERR;

    /* block bounds */
    start[0] = block_ofsts[NUM_BLOCKS];
    count[0] = 1;
    start[1] = 0;
    count[1] = 3;
    err = ncmpi_put_vara_float_all(ncid, varids[11], start, count,
           v->mins); ERR;
    err = ncmpi_put_vara_float_all(ncid, varids[12], start, count,
           v->maxs); ERR;

    /* save_verts */
    start[0] = block_ofsts[NUM_VERTS];
    start[1] = 0;
    count[0] = v->num_verts;
    count[1] = 3;
    err = ncmpi_put_vara_float_all(ncid, varids[13], start, count,
           v->save_verts); ERR;

    /* sites */
    start[0] = block_ofsts[NUM_ORIG_PARTS];
    start[1] = 0;
    count[0] = v->num_orig_particles;
    count[1] = 3;
    err = ncmpi_put_vara_float_all(ncid, varids[14], start, count,
           v->sites); ERR;

    /* complete cells */
    start[0] = block_ofsts[NUM_COMP_CELLS];
    count[0] = v->num_complete_cells;
    err = ncmpi_put_vara_int_all(ncid, varids[15], start, count,
         v->complete_cells); ERR;

    /* areas */
    start[0] = block_ofsts[NUM_COMP_CELLS];
    count[0] = v->num_complete_cells;
    err = ncmpi_put_vara_float_all(ncid, varids[16], start, count,
           v->areas); ERR;

    /* volumes */
    start[0] = block_ofsts[NUM_COMP_CELLS];
    count[0] = v->num_complete_cells;
    err = ncmpi_put_vara_float_all(ncid, varids[17], start, count,
           v->vols); ERR;

    /* num_cell_faces */
    start[0] = block_ofsts[NUM_COMP_CELLS];
    count[0] = v->num_complete_cells;
    err = ncmpi_put_vara_int_all(ncid, varids[18], start, count,
         v->num_cell_faces); ERR;

    /* num_face_verts */
    start[0] = block_ofsts[NUM_CELL_FACES];
    count[0] = v->tot_num_cell_faces;
    err = ncmpi_put_vara_int_all(ncid, varids[19], start, count,
         v->num_face_verts); ERR;

    /* face verts */
    start[0] = block_ofsts[NUM_FACE_VERTS];
    count[0] = v->tot_num_face_verts;
    err = ncmpi_put_vara_int_all(ncid, varids[20], start, count,
         v->face_verts); ERR;

    /* neighbors */
    int *neighbors = (int*)malloc(DIY_Num_neighbors(0, b) * sizeof(int));
    int num_neighbors = DIY_Get_neighbors(0, b, neighbors);
    start[0] = block_ofsts[NUM_NEIGHBORS];
    count[0] = num_neighbors;
    err = ncmpi_put_vara_int_all(ncid, varids[21], start, count, neighbors);
    ERR;

    /* gids */
    int gid = DIY_Gid(0, b);
    start[0] = block_ofsts[NUM_BLOCKS];
    count[0] = 1;
    err = ncmpi_put_vara_int_all(ncid, varids[22], start, count,
         &gid); ERR;

    /* update block offsets */
    block_ofsts[NUM_VERTS] += v->num_verts;
    block_ofsts[NUM_COMP_CELLS] += v->num_complete_cells;
    block_ofsts[NUM_CELL_FACES] += v->tot_num_cell_faces;
    block_ofsts[NUM_FACE_VERTS] += v->tot_num_face_verts;
    block_ofsts[NUM_ORIG_PARTS] += v->num_orig_particles;
    block_ofsts[NUM_NEIGHBORS] += num_neighbors;
    block_ofsts[NUM_BLOCKS]++;

    /* debug */
/*     fprintf(stderr, "gid = %d num_verts = %d num_complete_cells = %d " */
/* 	    "tot_num_cell_faces = %d tot_num_face_verts = %d " */
/* 	    "num_orig_particles = %d\n", */
/* 	    gid, v->num_verts, v->num_complete_cells, v->tot_num_cell_faces, */
/* 	    v->tot_num_face_verts, v->num_orig_particles); */

  }

  err = ncmpi_close(ncid); ERR;
#endif

}
Пример #11
0
int main(int argc, char** argv)
{
    extern int optind;
    char filename[256];
    int i, j, verbose=1, rank, nprocs, err, nerrs=0;
    int myNX, G_NX, myOff, num_reqs;
    int ncid, cmode, varid, dimid[2], *reqs, *sts, **buf;
    MPI_Offset start[2], count[2];
    MPI_Info info;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* get command-line arguments */
    while ((i = getopt(argc, argv, "hq")) != EOF)
        switch(i) {
            case 'q': verbose = 0;
                      break;
            case 'h':
            default:  if (rank==0) usage(argv[0]);
                      MPI_Finalize();
                      return 1;
        }
    if (argv[optind] == NULL) strcpy(filename, "testfile.nc");
    else                      snprintf(filename, 256, "%s", argv[optind]);

    /* set an MPI-IO hint to disable file offset alignment for fixed-size
     * variables */
    MPI_Info_create(&info);
    MPI_Info_set(info, "nc_var_align_size", "1");

    cmode = NC_CLOBBER | NC_64BIT_DATA;
    err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, info, &ncid);
    ERR

    MPI_Info_free(&info);

    /* the global array is NY * (NX * nprocs) */
    G_NX  = NX * nprocs;
    myOff = NX * rank;
    myNX  = NX;
    if (verbose) printf("%2d: myOff=%3d myNX=%3d\n",rank,myOff,myNX);

    err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]);
    ERR
    err = ncmpi_def_dim(ncid, "X", G_NX, &dimid[1]);
    ERR
    err = ncmpi_def_var(ncid, "var", NC_INT, 2, dimid, &varid);
    ERR
    err = ncmpi_enddef(ncid);
    ERR

    /* First, fill the entire array with zeros, using a blocking I/O.
       Every process writes a subarray of size NY * myNX */
    buf    = (int**) malloc(myNX * sizeof(int*));
    buf[0] = (int*)  calloc(NY * myNX, sizeof(int));
    start[0] = 0;   start[1] = myOff;
    count[0] = NY;  count[1] = myNX;
    err = ncmpi_put_vara_int_all(ncid, varid, start, count, buf[0]);
    free(buf[0]);

    /* initialize the buffer with rank ID. Also make the case interesting,
       by allocating buffers separately */
    for (i=0; i<myNX; i++) {
        buf[i] = (int*) malloc(NY * sizeof(int));
        for (j=0; j<NY; j++) buf[i][j] = rank;
    }

    reqs = (int*) malloc(myNX * sizeof(int));
    sts  = (int*) malloc(myNX * sizeof(int));

    /* each proc writes myNX single columns of the 2D array */
    start[0]  = 0;   start[1] = rank;
    count[0]  = NY;  count[1] = 1;
    if (verbose)
        printf("%2d: start=%3lld %3lld count=%3lld %3lld\n",
               rank, start[0],start[1], count[0],count[1]);

    num_reqs = 0;
    for (i=0; i<myNX; i++) {
        err = ncmpi_iput_vara_int(ncid, varid, start, count, buf[i],
                                  &reqs[num_reqs++]);
        ERR
        start[1] += nprocs;
    }
    err = ncmpi_wait_all(ncid, num_reqs, reqs, sts);
    ERR

    /* check status of all requests */
    for (i=0; i<num_reqs; i++)
        if (sts[i] != NC_NOERR)
            printf("Error at line %d in %s: nonblocking write fails on request %d (%s)\n",
                   __LINE__,__FILE__,i, ncmpi_strerror(sts[i]));

    err = ncmpi_close(ncid); ERR

    /* read back using the same access pattern */
    err = ncmpi_open(MPI_COMM_WORLD, filename, NC_NOWRITE, info, &ncid); ERR

    err = ncmpi_inq_varid(ncid, "var", &varid); ERR

    for (i=0; i<myNX; i++)
        for (j=0; j<NY; j++) buf[i][j] = -1;

    /* each proc reads myNX single columns of the 2D array */
    start[0]  = 0;   start[1] = rank;
    count[0]  = NY;  count[1] = 1;

    num_reqs = 0;
    for (i=0; i<myNX; i++) {
        err = ncmpi_iget_vara_int(ncid, varid, start, count, buf[i],
                                  &reqs[num_reqs++]);
        ERR
        start[1] += nprocs;
    }
    err = ncmpi_wait_all(ncid, num_reqs, reqs, sts);
    ERR

    /* check status of all requests */
    for (i=0; i<num_reqs; i++)
        if (sts[i] != NC_NOERR)
            printf("Error at line %d in %s: nonblocking write fails on request %d (%s)\n",
                   __LINE__,__FILE__,i, ncmpi_strerror(sts[i]));

    for (i=0; i<myNX; i++) {
        for (j=0; j<NY; j++)
            if (buf[i][j] != rank)
                printf("Error at line %d in %s: expect buf[%d][%d]=%d but got %d\n",
                __LINE__,__FILE__,i,j,rank,buf[i][j]);
    }

    err = ncmpi_close(ncid);
    ERR

    free(sts);
    free(reqs);
    for (i=0; i<myNX; i++) free(buf[i]);
    free(buf);

    /* check if there is any PnetCDF internal malloc residue */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0)
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
    }

    MPI_Finalize();
    return (nerrs > 0);
}
Пример #12
0
/*---< pnetcdf_write() >-----------------------------------------------------*/
int pnetcdf_write(char      *filename,     /* input file name */
                  int        save_in_new_file,  /* 0 or 1 */
                  int        numClusters,  /* no. clusters */
                  int        numObjs,      /* no. data objects */
                  int        numCoords,    /* no. coordinates (local) */
                  float    **clusters,     /* [numClusters][numCoords] centers*/
                  int       *membership,   /* [numObjs] */
                  int        totalNumObjs, /* total no. data objects */
                  MPI_Comm   comm,
                  int        verbose)
{
    int   rank, nproc, divd, rem;
    char  outFileName[1024];
    int        ncid, dimids[2], dim_num_obj, clusters_varid, membership_varid, retval;
    MPI_Offset start, count;

    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &nproc);

    /* output: the coordinates of the cluster centres ----------------------*/
    /* only proc 0 matters this, because clusters[] are the same across all proc */

    /* to save the results in a new netCDF file */
    if (save_in_new_file) {
        if (strcasecmp(filename+strlen(filename)-3, ".nc") == 0) {
            strcpy(outFileName, filename);
            outFileName[strlen(filename)-3] = '\0';
            strcat(outFileName, ".cluster_centres.nc");
        }
        else
            sprintf(outFileName, "%s.cluster_centres.nc", filename);

        if (rank == 0 && verbose) {
            printf("Writing coordinates of K=%d cluster centers to file \"%s\"\n",
                   numClusters, outFileName);

            printf("Writing membership of N=%d data objects to file \"%s\"\n",
                   totalNumObjs, outFileName);
        }

        /* Create the file. The NC_CLOBBER parameter tells netCDF to
         * overwrite this file, if it already exists.*/
        if ((retval = ncmpi_create(comm, outFileName, NC_CLOBBER | NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid)))
            ERR2(retval);

        /* Define the dimensions. NetCDF will hand back an ID for each. */
        if ((retval = ncmpi_def_dim(ncid, "num_clusters", numClusters, &dimids[0])))
            ERR2(retval);

        if ((retval = ncmpi_def_dim(ncid, "num_coordinates", numCoords, &dimids[1])))
            ERR2(retval);

        if ((retval = ncmpi_def_dim(ncid, "num_elements", totalNumObjs, &dim_num_obj)))
            ERR2(retval);

        /* Define the clusters variable. The type of the variable in this case is
         * NC_FLOAT (4-byte float). */
        if ((retval = ncmpi_def_var(ncid, "clusters", NC_FLOAT, 2, dimids, &clusters_varid)))
            ERR2(retval);

        /* Define the membership variable. The type of the variable in this case is
         * NC_INT (4-byte integer). */
        if ((retval = ncmpi_def_var(ncid, "membership", NC_INT, 1, &dim_num_obj, &membership_varid)))
            ERR2(retval);

        /* End define mode. This tells netCDF we are done defining
         * metadata. */
        if ((retval = ncmpi_enddef(ncid)))
            ERR2(retval);
    }
    else { /* add new variables into existing netCDF file */
    }

    /* write cluster centers */
    if ((retval = ncmpi_put_var_float_all(ncid, clusters_varid, *clusters)))
        ERR2(retval);

    /* write membership variable */
    divd  = totalNumObjs / nproc;
    rem   = totalNumObjs % nproc;
    start = (rank < rem) ? rank*(divd+1) : rank*divd + rem;
    count = numObjs;
    if (_debug) printf("%2d: start=%lld count=%lld\n",rank,start,count);

    if ((retval = ncmpi_put_vara_int_all(ncid, membership_varid, &start, &count, membership)))
        ERR2(retval);

   /* Close the file. This frees up any internal netCDF resources
    * associated with the file, and flushes any buffers. */
   if ((retval = ncmpi_close(ncid)))
      ERR2(retval);

    return 1;
}