示例#1
0
// Traj_NcEnsemble::readArray() //TODO RemdValues
int Traj_NcEnsemble::readArray(int set, FrameArray& f_ensemble) {
# ifdef HAS_PNETCDF
  MPI_Offset pstart_[4];
  MPI_Offset pcount_[4];
# define start_ pstart_
# define count_ pcount_
# endif
  start_[0] = set; // Frame
  start_[2] = 0;   // Atoms
  start_[3] = 0;   // XYZ
  count_[0] = 1;        // Frame
  count_[1] = 1;        // Ensemble
  count_[3] = 3;        // XYZ
  //rprintf("DEBUG: Reading frame %i\n", set+1);
  for (int member = ensembleStart_; member != ensembleEnd_; member++) {
#   ifdef MPI
    Frame& frm = f_ensemble[0];
#   else
    Frame& frm = f_ensemble[member];
#   endif
    start_[1] = member;   // Ensemble
    count_[2] = Ncatom(); // Atoms
    // Read Coords
#   ifdef HAS_PNETCDF
    if (checkPNCerr(ncmpi_get_vara_float_all(ncid_, coordVID_, start_, count_, Coord_)))
#   else
    if (NC::CheckErr(nc_get_vara_float(ncid_, coordVID_, start_, count_, Coord_)))
#   endif
    {
      rprinterr("Error: Getting coordinates for frame %i\n", set+1);
      return 1;
    }
    FloatToDouble(frm.xAddress(), Coord_);
    //mprintf("Frm=%8i Rep=%8i ", set+1, member+1); // DEBUG
    //frm.printAtomCoord(0); // DEBUG
    // Read Velocities
    if (velocityVID_ != -1) {
#     ifdef HAS_PNETCDF
      if (checkPNCerr(ncmpi_get_vara_float_all(ncid_, velocityVID_, start_, count_, Coord_)))
#     else
      if (NC::CheckErr(nc_get_vara_float(ncid_, velocityVID_, start_, count_, Coord_)))
#     endif
      {
        rprinterr("Error: Getting velocities for frame %i\n", set+1);
        return 1;
      }
      FloatToDouble(frm.vAddress(), Coord_);
    }
    // Read Box
    if (cellLengthVID_ != -1) {
      count_[2] = 3;
#     ifdef HAS_PNETCDF
      if (checkPNCerr(ncmpi_get_vara_double_all(ncid_, cellLengthVID_, start_, count_, frm.bAddress())))
#     else
      if (NC::CheckErr(nc_get_vara_double(ncid_, cellLengthVID_, start_, count_, frm.bAddress())))
#     endif
      {
        rprinterr("Error: Getting cell lengths for frame %i.\n", set+1);
        return 1;
      }
#     ifdef HAS_PNETCDF
      if (checkPNCerr(ncmpi_get_vara_double_all(ncid_, cellAngleVID_, start_, count_, frm.bAddress()+3)))
#     else
      if (NC::CheckErr(nc_get_vara_double(ncid_, cellAngleVID_, start_, count_, frm.bAddress()+3)))
#     endif
      {
        rprinterr("Error: Getting cell angles for frame %i.\n", set+1);
        return 1;
      }
    }
    // Read Temperature
    if (TempVID_!=-1) {
#     ifdef HAS_PNETCDF
      if (checkPNCerr(ncmpi_get_vara_double_all(ncid_, TempVID_, start_, count_, frm.tAddress())))
#     else
      if (NC::CheckErr(nc_get_vara_double(ncid_, TempVID_, start_, count_, frm.tAddress())))
#     endif
      {
        rprinterr("Error: Getting replica temperature for frame %i.\n", set+1);
        return 1;
      }
      //fprintf(stderr,"DEBUG: Replica Temperature %lf\n",F->T);
    }
    // Read indices
    if (indicesVID_!=-1) {
      count_[2] = remd_dimension_;
#     ifdef HAS_PNETCDF
      if (checkPNCerr(ncmpi_get_vara_int_all(ncid_, indicesVID_, start_, count_, frm.iAddress())))
#     else
      if (NC::CheckErr(nc_get_vara_int(ncid_, indicesVID_, start_, count_, frm.iAddress())))
#     endif
      {
        rprinterr("Error: Getting replica indices for frame %i.\n", set+1);
        return 1;
      }
      // DEBUG
      //char buffer[128];
      //char* ptr = buffer;
      //ptr += sprintf(buffer,"DEBUG:\tReplica indices:");
      //for (int dim=0; dim < remd_dimension_; dim++) ptr += sprintf(ptr, " %i", frm.RemdIndices()[dim]);
      //sprintf(ptr,"\n");
      //rprintf("%s", buffer);
    }
  }
# ifdef HAS_PNETCDF
  // DEBUG
# undef start_
# undef count_
# endif
  return 0;
}
示例#2
0
int main(int argc, char **argv) {

  int i, j;
  int status;
  int ncid1, ncid2;
  int ndims, nvars, ngatts, unlimdimid;
  char name[NC_MAX_NAME];
  nc_type type, vartypes[NC_MAX_VARS];
  MPI_Offset attlen;
  MPI_Offset dimlen, shape[NC_MAX_VAR_DIMS], varsize, start[NC_MAX_VAR_DIMS];
  void *valuep;
  int dimids[NC_MAX_DIMS], varids[NC_MAX_VARS];
  int vardims[NC_MAX_VARS][NC_MAX_VAR_DIMS/16]; /* divided by 16 due to my memory limitation */
  int varndims[NC_MAX_VARS], varnatts[NC_MAX_VARS];
  int isRecvar;
  params opts;

  int rank;
  int nprocs;
  MPI_Comm comm = MPI_COMM_WORLD;
  

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (rank == 0)
	  fprintf(stderr, "Testing read ... ");
  parse_read_args(argc, argv, rank, &opts);

  /**********  START OF NETCDF ACCESS **************/


  /* Read a netCDF file and write it out to another file */

  /**
   * Open the input dataset - ncid1:
   *   File name: "../data/test_float.nc"
   *   Dataset API: Collective
   * And create the output dataset - ncid2:
   *   File name: "testread.nc"
   *   Dataset API: Collective
   */

  status = ncmpi_open(comm, opts.infname, 0, MPI_INFO_NULL, &ncid1);
  if (status != NC_NOERR) handle_error(status);

  status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid2);
  if (status != NC_NOERR) handle_error(status);


  /**
   * Inquire the dataset definitions of input dataset AND
   * Add dataset definitions for output dataset.
   */

  status = ncmpi_inq(ncid1, &ndims, &nvars, &ngatts, &unlimdimid);
  if (status != NC_NOERR) handle_error(status);


  /* Inquire global attributes, assume CHAR attributes. */

  for (i = 0; i < ngatts; i++) {
    status = ncmpi_inq_attname(ncid1, NC_GLOBAL, i, name);
    if (status != NC_NOERR) handle_error(status);
    status = ncmpi_inq_att (ncid1, NC_GLOBAL, name, &type, &attlen);
    if (status != NC_NOERR) handle_error(status);
    switch (type) {
      case NC_CHAR: 
	valuep = (void *)malloc(attlen * sizeof(char));
	status = ncmpi_get_att_text(ncid1, NC_GLOBAL, name, valuep);
	if (status != NC_NOERR) handle_error(status);
	status = ncmpi_put_att_text (ncid2, NC_GLOBAL, name, attlen, (char *)valuep);
	if (status != NC_NOERR) handle_error(status);
	free(valuep);
        break;
      case NC_SHORT:
        valuep = (void *)malloc(attlen * sizeof(short));
        status = ncmpi_get_att_short(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_short (ncid2, NC_GLOBAL, name, type, attlen, (short *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_INT:
        valuep = (void *)malloc(attlen * sizeof(int));
        status = ncmpi_get_att_int(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_int (ncid2, NC_GLOBAL, name, type, attlen, (int *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_FLOAT:
        valuep = (void *)malloc(attlen * sizeof(float));
        status = ncmpi_get_att_float(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_float (ncid2, NC_GLOBAL, name, type, attlen, (float *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_DOUBLE:
        valuep = (void *)malloc(attlen * sizeof(double));
        status = ncmpi_get_att_double(ncid1, NC_GLOBAL, name, valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_att_double (ncid2, NC_GLOBAL, name, type, attlen, (double *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      default:
	;
	/* TODO: handle unexpected types */
    }
  }

  /* Inquire dimension */

  for (i = 0; i < ndims; i++) {
    status = ncmpi_inq_dim(ncid1, i, name, &dimlen);
    if (status != NC_NOERR) handle_error(status);
    if (i == unlimdimid)
      dimlen = NC_UNLIMITED;
    status = ncmpi_def_dim(ncid2, name, dimlen, dimids+i);
    if (status != NC_NOERR) handle_error(status);
  }

  /* Inquire variables */

  for (i = 0; i < nvars; i++) {
    status = ncmpi_inq_var (ncid1, i, name, vartypes+i, varndims+i, vardims[i], varnatts+i);
    if (status != NC_NOERR) handle_error(status);

    status = ncmpi_def_var(ncid2, name, vartypes[i], varndims[i], vardims[i], varids+i);
    if (status != NC_NOERR) handle_error(status);

    /* var attributes, assume CHAR attributes */

    for (j = 0; j < varnatts[i]; j++) {
      status = ncmpi_inq_attname(ncid1, i, j, name);
      if (status != NC_NOERR) handle_error(status);
      status = ncmpi_inq_att (ncid1, i, name, &type, &attlen);
      if (status != NC_NOERR) handle_error(status);
      switch (type) {
        case NC_CHAR: 
	  valuep = (void *)malloc(attlen * sizeof(char));
	  status = ncmpi_get_att_text(ncid1, i, name, valuep);
	  if (status != NC_NOERR) handle_error(status);
	  status = ncmpi_put_att_text (ncid2, varids[i], name, attlen, (char *)valuep);
	  if (status != NC_NOERR) handle_error(status);
	  free(valuep);
          break;
        case NC_SHORT:
          valuep = (void *)malloc(attlen * sizeof(short));
          status = ncmpi_get_att_short(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_short (ncid2, varids[i], name, type, attlen, (short *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
        case NC_INT:
          valuep = (void *)malloc(attlen * sizeof(int));
          status = ncmpi_get_att_int(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_int (ncid2, varids[i], name, type, attlen, (int *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
        case NC_FLOAT:
          valuep = (void *)malloc(attlen * sizeof(float));
          status = ncmpi_get_att_float(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_float (ncid2, varids[i], name, type, attlen, (float *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
        case NC_DOUBLE:
          valuep = (void *)malloc(attlen * sizeof(double));
          status = ncmpi_get_att_double(ncid1, i, name, valuep);
          if (status != NC_NOERR) handle_error(status);
          status = ncmpi_put_att_double (ncid2, varids[i], name, type, attlen, (double *)valuep);
          if (status != NC_NOERR) handle_error(status);
          free(valuep);
          break;
	default:
	  ; /* TODO: handle unexpected types */
      }
    }
  }

  /**
   * End Define Mode (switch to data mode) for output dataset
   *   Dataset API: Collective
   */

  status = ncmpi_enddef(ncid2);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Read data of variables from input dataset 
   * (ONLY DEAL WITH: NC_INT, NC_FLOAT, NC_DOUBLE for now)
   * Write the data out to the corresponding variables in the output dataset
   *
   *  Data Partition (Assume 4 processors):
   *   square: 2-D, (Block, *), 25*100 from 100*100
   *   cube:   3-D, (Block, *, *), 25*100*100 from 100*100*100
   *   xytime: 3-D, (Block, *, *), 25*100*100 from 100*100*100
   *   time:   1-D, Block-wise, 25 from 100
   *
   *  Data Mode API: collective
   */

  for (i = 0; i < NC_MAX_VAR_DIMS; i++)
    start[i] = 0;
  for (i = 0; i < nvars; i++) {
    isRecvar = 0;
    varsize = 1;
    for (j = 0; j < varndims[i]; j++) {
      status = ncmpi_inq_dim(ncid1, vardims[i][j], name, shape + j);
      if (status != NC_NOERR) handle_error(status);
      if (j == 0) {
        shape[j] /= nprocs;
	start[j] = shape[j] * rank;
      }
      varsize *= shape[j];
      if (vardims[i][j] == unlimdimid)
	isRecvar = 1;
    }
    switch (vartypes[i]) {
      case NC_CHAR: 
        break;
      case NC_SHORT:
        valuep = (void *)malloc(varsize * sizeof(short));
        status = ncmpi_get_vara_short_all(ncid1, i, start, shape, (short *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_short_all(ncid2, varids[i],
                                     start, shape, (short *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_INT:
	valuep = (void *)malloc(varsize * sizeof(int));
        status = ncmpi_get_vara_int_all(ncid1, i, start, shape, (int *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_int_all(ncid2, varids[i],
                                     start, shape, (int *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
	break;
      case NC_FLOAT:
        valuep = (void *)malloc(varsize * sizeof(float));
        status = ncmpi_get_vara_float_all(ncid1, i, start, shape, (float *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_float_all(ncid2, varids[i],
                                     start, shape, (float *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      case NC_DOUBLE:
        valuep = (void *)malloc(varsize * sizeof(double));
        status = ncmpi_get_vara_double_all(ncid1, i, start, shape, (double *)valuep);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_put_vara_double_all(ncid2, varids[i],
                                     start, shape, (double *)valuep);
        if (status != NC_NOERR) handle_error(status);
        free(valuep);
        break;
      default:
	; /* TODO: handle unexpected types */
    }
  }

  /**
   * Close the datasets
   *   Dataset API:  collective
   */

  status = ncmpi_close(ncid1);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_close(ncid2);
  if (status != NC_NOERR) handle_error(status);

  /*******************  END OF NETCDF ACCESS  ****************/

if (rank == 0)
  fprintf(stderr, "OK\nInput file %s copied to: %s!\n", opts.infname, opts.outfname);

  MPI_Finalize();
  return 0;
}
示例#3
0
文件: io.c 项目: gcasey/cosmotools
/*
  reads input in pnetcdf format

  nblocks: (output) local number of blocks
  tot_blocks: (output) total number of blocks
  vblocks: (output) pointer to array of vblocks
  in_file: input file name
  comm: MPI communicator
  gids: (output) gids of local blocks (allocated by this function)
  num_neighbors: (output) number of neighbors for each local block
   (allocated by this function)
  neighbors: (output) gids of neighbors of each local block
   (allocated by this function)

  side effects: allocates vblocks, gids, num_neighbors, neighbors

*/
void pnetcdf_read(int *nblocks, int *tot_blocks, struct vblock_t ***vblocks,
      char *in_file, MPI_Comm comm, int *gids, int *num_neighbors,
      int **neighbors) {

#ifdef USEPNETCDF
  int err;
  int ncid, varids[23], dimids[8];
  MPI_Offset start[2], count[2];
  nc_type type;
  int ndims, natts;
  int dims[2];
  int rank, groupsize; /* MPI usual */

  /* open file for reading */
  err = ncmpi_open(comm, in_file, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR;

  err = ncmpi_inq_varid(ncid, "block_off_num_verts", &varids[5]); ERR;
  err = ncmpi_inq_varid(ncid, "block_off_num_complete_cells", &varids[6]); ERR;
  err = ncmpi_inq_varid(ncid, "block_off_tot_num_cell_faces", &varids[7]); ERR;
  err = ncmpi_inq_varid(ncid, "block_off_tot_num_face_verts", &varids[8]); ERR;
  err = ncmpi_inq_varid(ncid, "block_off_num_orig_particles", &varids[9]); ERR;

  /* get number of blocks */
  MPI_Offset num_g_blocks; /* 64 bit version of tot_blcoks */
  err = ncmpi_inq_dimlen(ncid, dimids[0], &num_g_blocks); ERR;
  *tot_blocks = num_g_blocks;
  MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &groupsize);
  int start_block_ofst =  rank * (*tot_blocks / groupsize);
  *nblocks = (rank < groupsize - 1 ? (*tot_blocks / groupsize) :
        *tot_blocks - (rank * *tot_blocks / groupsize));

  /* block offsets */
  int64_t *block_ofsts = (int64_t*)malloc(*tot_blocks * sizeof(int64_t));
  *vblocks = (struct vblock_t**)malloc(*nblocks * sizeof(struct vblock_t*));

  /* read all blocks */
  gids = (int *)malloc(*nblocks * sizeof(int));
  num_neighbors = (int *)malloc(*nblocks * sizeof(int));
  neighbors = (int **)malloc(*nblocks * sizeof(int *));
  int b;
  for (b = 0; b < *nblocks; b++) {

    struct vblock_t* v = (struct vblock_t*)malloc(sizeof(struct vblock_t));

    /* quantities */
    start[0] = start_block_ofst + b;
    count[0] = 1;
    err = ncmpi_inq_varid(ncid, "num_verts", &varids[0]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[0], start, count,
         &(v->num_verts)); ERR;
    err = ncmpi_inq_varid(ncid, "num_complete_cells", &varids[1]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[1], start, count,
         &(v->num_complete_cells)); ERR;
    err = ncmpi_inq_varid(ncid, "tot_num_cell_faces", &varids[2]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[2], start, count,
         &(v->tot_num_cell_faces)); ERR;
    err = ncmpi_inq_varid(ncid, "tot_num_face_verts", &varids[3]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[3], start, count,
         &(v->tot_num_face_verts)); ERR;
    err = ncmpi_inq_varid(ncid, "num_orig_particles", &varids[4]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[4], start, count,
         &(v->num_orig_particles)); ERR;
    err = ncmpi_inq_varid(ncid, "neighbors", &varids[21]); ERR;
    err = ncmpi_inq_var(ncid, varids[21], 0, &type, &ndims,
      dimids, &natts);

    /* block bounds */
    start[0] = start_block_ofst + b;
    start[1] = 0;
    count[0] = 1;
    count[1] = 3;
    err = ncmpi_inq_varid(ncid, "mins", &varids[11]); ERR;
    err = ncmpi_get_vara_float_all(ncid, varids[11], start, count,
           v->mins); ERR;
    err = ncmpi_inq_varid(ncid, "maxs", &varids[12]); ERR;
    err = ncmpi_get_vara_float_all(ncid, varids[12], start, count,
           v->maxs); ERR;

    /* save_verts */
    start[0] = 0;
    count[0] = *tot_blocks;
    err = ncmpi_get_vara_longlong_all(ncid, varids[5], start, count,
              block_ofsts); ERR;
    v->save_verts = (float *)malloc(v->num_verts * 3 * sizeof(float));
    start[0] = block_ofsts[start_block_ofst + b];
    start[1] = 0;
    count[0] = v->num_verts;
    count[1] = 3;
    err = ncmpi_inq_varid(ncid, "save_verts", &varids[13]); ERR;
    err = ncmpi_get_vara_float_all(ncid, varids[13], start, count,
           v->save_verts); ERR;

    /* sites */
    start[0] = 0;
    count[0] = *tot_blocks;
    err = ncmpi_get_vara_longlong_all(ncid, varids[9], start, count,
              block_ofsts); ERR;
    v->sites = (float *)malloc(v->num_orig_particles * 3 * sizeof(float));
    start[0] = block_ofsts[start_block_ofst + b];
    start[1] = 0;
    count[0] = v->num_orig_particles;
    count[1] = 3;
    err = ncmpi_inq_varid(ncid, "sites", &varids[14]); ERR;
    err = ncmpi_get_vara_float_all(ncid, varids[14], start, count,
           v->sites); ERR;

    /* complete cells */
    start[0] = 0;
    count[0] = *tot_blocks;
    err = ncmpi_get_vara_longlong_all(ncid, varids[6], start, count,
              block_ofsts); ERR;
    v->complete_cells = (int *)malloc(v->num_complete_cells * sizeof(int));
    start[0] = block_ofsts[start_block_ofst + b];
    count[0] = v->num_complete_cells;
    err = ncmpi_inq_varid(ncid, "complete_cells", &varids[15]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[15], start, count,
         v->complete_cells); ERR;

    /* areas, uses same block offsets as complete cells */
    v->areas = (float *)malloc(v->num_complete_cells * sizeof(float));
    start[0] = block_ofsts[start_block_ofst + b];
    count[0] = v->num_complete_cells;
    err = ncmpi_inq_varid(ncid, "areas", &varids[16]); ERR;
    err = ncmpi_get_vara_float_all(ncid, varids[16], start, count,
           v->areas); ERR;

    /* volumes, uses same block offsets as complete cells */
    v->vols = (float *)malloc(v->num_complete_cells * sizeof(float));
    start[0] = block_ofsts[start_block_ofst + b];
    count[0] = v->num_complete_cells;
    err = ncmpi_inq_varid(ncid, "vols", &varids[17]); ERR;
    err = ncmpi_get_vara_float_all(ncid, varids[17], start, count,
           v->vols); ERR;

    /* num_cell_faces, uses same block offsets as complete cells */
    v->num_cell_faces = (int *)malloc(v->num_complete_cells * sizeof(int));
    start[0] = block_ofsts[start_block_ofst + b];
    count[0] = v->num_complete_cells;
    err = ncmpi_inq_varid(ncid, "num_cell_faces", &varids[18]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[18], start, count,
         v->num_cell_faces); ERR;

    /* num_face_verts */
    start[0] = 0;
    count[0] = *tot_blocks;
    err = ncmpi_get_vara_longlong_all(ncid, varids[7], start, count,
              block_ofsts); ERR;
    v->num_face_verts = (int *)malloc(v->tot_num_cell_faces * sizeof(int));
    start[0] = block_ofsts[start_block_ofst + b];
    count[0] = v->tot_num_cell_faces;
    err = ncmpi_inq_varid(ncid, "num_face_verts", &varids[19]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[19], start, count,
         v->num_face_verts); ERR;

    /* face_verts */
    start[0] = 0;
    count[0] = *tot_blocks;
    err = ncmpi_get_vara_longlong_all(ncid, varids[8], start, count,
              block_ofsts); ERR;
    v->face_verts = (int *)malloc(v->tot_num_face_verts * sizeof(int));
    start[0] = block_ofsts[start_block_ofst + b];
    count[0] = v->tot_num_face_verts;
    err = ncmpi_inq_varid(ncid, "face_verts", &varids[20]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[20], start, count,
         v->face_verts); ERR;

    /* neighbors */
    MPI_Offset n; /* temporary 64-bit version of number of neighbors */
    err = ncmpi_inq_varid(ncid, "neighbors", &varids[21]); ERR;
    err = ncmpi_inq_var(ncid, varids[2], 0, &type, &ndims,
      dims, &natts); ERR;
    err = ncmpi_inq_dimlen(ncid, dims[0], &n); ERR;
    num_neighbors[b] = n;
    neighbors[b] = (int *)malloc(num_neighbors[b] * sizeof(int));
    start[0] = start_block_ofst + b;
    count[0] = num_neighbors[b];
    err = ncmpi_get_vara_int_all(ncid, varids[21], start, count,
         neighbors[b]); ERR;

    /* gids */
    start[0] = start_block_ofst + b;
    count[0] = 1;
    err = ncmpi_inq_varid(ncid, "g_block_ids", &varids[22]); ERR;
    err = ncmpi_get_vara_int_all(ncid, varids[22], start, count,
         &gids[b]); ERR;

    (*vblocks)[b] = v;

  }

  /* cleanup */
  err = ncmpi_close(ncid); ERR;
  free(block_ofsts);
#endif
}
示例#4
0
static
int get_var_and_verify(int ncid,
                       int varid,
                       MPI_Offset *start,
                       MPI_Offset *count,
                       int **buf,
                       MPI_Datatype buftype,
                       MPI_Datatype ghost_buftype,
                       MPI_Datatype filetype)
{
    int i, j, rank, err, *ncbuf, nerrs=0;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    ncbuf = (int *) malloc((count[0]+4)*(count[1]+4)*sizeof(int));

    /* clear the contents of the read buffer */
    for (j=0; j<count[0]; j++) for (i=0; i<count[1]; i++) buf[j][i] = -1;

    /* read back using regular vara API */
    err = ncmpi_get_vara_int_all(ncid, varid, start, count, buf[0]); ERR

    /* check if the contents of buf are expected */
    CHECK_VALUE_PERMUTED

    /* clear the contents of the read buffer */
    for (j=0; j<count[0]; j++) for (i=0; i<count[1]; i++) buf[j][i] = -1;

    /* read back using flexible vara API */
    err = ncmpi_get_vara_all(ncid, varid, start, count, buf[1], 1, buftype); ERR

    /* check if the contents of buf are expected */
    CHECK_VALUE

    /* clear the contents of the read buffer */
    for (j=0; j<count[0]; j++) for (i=0; i<count[1]; i++) buf[j][i] = -1;

    /* read back using vard API and permuted buftype */
    err = ncmpi_get_vard_all(ncid, varid, filetype, buf[1], 1, buftype); ERR

    /* check if the contents of buf are expected */
    CHECK_VALUE

    /* clear the contents of the read buffer */
    for (j=0; j<count[0]; j++) for (i=0; i<count[1]; i++) buf[j][i] = -1;

    /* read back using vard API and no buftype */
    err = ncmpi_get_vard_all(ncid, varid, filetype, buf[0], 0, MPI_DATATYPE_NULL); ERR

    /* check if the contents of buf are expected */
    CHECK_VALUE_PERMUTED

    /* clear the contents of the read buffer */
    for (i=0; i<(count[0]+4)*(count[1]+4); i++) ncbuf[i] = -1;

    /* read back using ghost buftype */
    err = ncmpi_get_vard_all(ncid, varid, filetype, ncbuf, 1, ghost_buftype); ERR

    for (j=0; j<count[0]; j++) {
        for (i=0; i<count[1]; i++)
            if (buf[j][i] != ncbuf[(j+2)*(count[1]+4)+(i+2)]) {
                printf("Error at line %d: expecting ncbuf[%d][%d]=%d but got %d\n",
                       __LINE__,j,i,buf[j][i],ncbuf[(j+2)*(count[1]+4)+(i+2)]);
                nerrs++;
            }
    }
    free(ncbuf);
    return nerrs;
}
int main(int argc, char **argv) {

    int i, j, rank, nprocs, ret;
    int ncfile, ndims, nvars, ngatts, unlimited;
    int var_ndims, var_natts;;
    MPI_Offset *dim_sizes, var_size;
    MPI_Offset *start, *count;
    char varname[NC_MAX_NAME+1];
    int dimids[NC_MAX_VAR_DIMS];
    nc_type type;
    int *data;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (argc != 2) {
        if (rank == 0) printf("Usage: %s filename\n", argv[0]);
        MPI_Finalize();
        exit(-1);
    }

    ret = ncmpi_open(MPI_COMM_WORLD, argv[1], NC_NOWRITE, MPI_INFO_NULL,
                     &ncfile);
    if (ret != NC_NOERR) handle_error(ret, __LINE__);

    /* reader knows nothing about dataset, but we can interrogate with query
     * routines: ncmpi_inq tells us how many of each kind of "thing"
     * (dimension, variable, attribute) we will find in the file  */

    /* no commnunication needed after ncmpi_open: all processors have a cached
     * veiw of the metadata once ncmpi_open returns */

    ret = ncmpi_inq(ncfile, &ndims, &nvars, &ngatts, &unlimited);
    if (ret != NC_NOERR) handle_error(ret, __LINE__);

    /* we do not really need the name of the dimension or the variable for
     * reading in this example.  we could, in a different example, take the
     * name of a variable on the command line and read just that one */

    dim_sizes = calloc(ndims, sizeof(MPI_Offset));
    /* netcdf dimension identifiers are allocated sequentially starting
     * at zero; same for variable identifiers */
    for(i=0; i<ndims; i++)  {
        ret = ncmpi_inq_dimlen(ncfile, i, &(dim_sizes[i]) );
        if (ret != NC_NOERR) handle_error(ret, __LINE__);
    }

    for(i=0; i<nvars; i++) { 
        /* much less coordination in this case compared to rank 0 doing all
         * the i/o: everyone already has the necessary information */
        ret = ncmpi_inq_var(ncfile, i, varname, &type, &var_ndims, dimids,
                &var_natts);
        if (ret != NC_NOERR) handle_error(ret, __LINE__);

        start = calloc(var_ndims, sizeof(MPI_Offset));
        count = calloc(var_ndims, sizeof(MPI_Offset));

        /* we will simply decompose along one dimension.  Generally the
         * application has some algorithim for domain decomposistion.  Note
         * that data decomposistion can have an impact on i/o performance.
         * Often it's best just to do what is natural for the application,
         * but something to consider if performance is not what was
         * expected/desired */

        start[0] = (dim_sizes[dimids[0]]/nprocs)*rank;
        count[0] = (dim_sizes[dimids[0]]/nprocs);
        var_size = count[0];

        for (j=1; j<var_ndims; j++) {
            start[j] = 0;
            count[j] = dim_sizes[dimids[j]];
            var_size *= count[j];
        }

        switch(type) {
            case NC_INT:
                data = calloc(var_size, sizeof(int));
                ret = ncmpi_get_vara_int_all(ncfile, i, start, count, data);
                if (ret != NC_NOERR) handle_error(ret, __LINE__);
                break;
            default:
                /* we can do this for all the known netcdf types but this
                 * example is already getting too long  */
                fprintf(stderr, "unsupported NetCDF type \n");
        }

        free(start);
        free(count);
        if (data != NULL) free(data);
    }

    ret = ncmpi_close(ncfile);
    if (ret != NC_NOERR) handle_error(ret, __LINE__);

    MPI_Finalize();
    return 0;
}