コード例 #1
0
ファイル: Traj_AmberNetcdf.cpp プロジェクト: SAMAN-64/cpptraj
int Traj_AmberNetcdf::parallelOpenTrajout(Parallel::Comm const& commIn) {
  if (Ncid() != -1) return 0;
  int err = ncmpi_open(commIn.MPIcomm(), filename_.full(), NC_WRITE, MPI_INFO_NULL, &ncid_);
  if (checkPNCerr(err)) {
    mprinterr("Error: Opening NetCDF file '%s' for writing in parallel.\n", filename_.full());
    return 1;
  }
  err = ncmpi_begin_indep_data( ncid_ ); // Independent data mode
  return 0;
}
コード例 #2
0
ファイル: nc5dispatch.c プロジェクト: syntheticpp/netcdf-c
static int
NC5_open(const char *path, int cmode,
	    int basepe, size_t *chunksizehintp,
	    int use_parallel, void* mpidata,
	    struct NC_Dispatch* table, NC* nc)
{
    int res;
    NC5_INFO* nc5;
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;

    /* Check the cmode for only valid flags*/
    if(cmode & ~LEGAL_OPEN_FLAGS)
	return NC_EINVAL;

    /* Cannot have both MPIO flags */
    if((cmode & (NC_MPIIO|NC_MPIPOSIX)) == (NC_MPIIO|NC_MPIPOSIX))
	return NC_EINVAL;

    /* Appears that this comment is wrong; allow 64 bit offset*/
    /* Cannot have 64 bit offset flag */
    /* if(cmode & (NC_64BIT_OFFSET)) return NC_EINVAL; */

    if(mpidata != NULL) {
        comm = ((NC_MPI_INFO *)mpidata)->comm;
        info = ((NC_MPI_INFO *)mpidata)->info;
    } else {
	comm = MPI_COMM_WORLD;
	info = MPI_INFO_NULL;
    }

    /* Fix up the cmode by keeping only essential flags;
       these are the flags that are the same in netcf.h and pnetcdf.h
    */
    cmode &= (NC_WRITE | NC_NOCLOBBER | NC_LOCK | NC_SHARE | NC_64BIT_OFFSET);

    cmode |= (NC_NETCDF4); /* see comment in NC5_create */

    /* Create our specific NC5_INFO instance */
    nc5 = (NC5_INFO*)calloc(1,sizeof(NC5_INFO));
    if(nc5 == NULL) return NC_ENOMEM;

    /* Link nc5 and nc */
    NC5_DATA_SET(nc,nc5);

    res = ncmpi_open(comm, path, cmode, info, &(nc->int_ncid));

    /* Default to independent access, like netCDF-4/HDF5 files. */
    if(!res) {
	res = ncmpi_begin_indep_data(nc->int_ncid);
	nc5->pnetcdf_access_mode = NC_INDEPENDENT;
    }

    return res;
}
コード例 #3
0
ファイル: tst_small.c プロジェクト: LaHaine/ohpc
/* Test a small file with two record vars, which grow, and has
 * attributes added. */
static int
test_two_growing_with_att(const char *testfile, int cmode)
{
   int err, ncid, dimid, varid[NUM_VARS];
   char data[MAX_RECS], data_in;
   char att_name[NC_MAX_NAME + 1];
   MPI_Offset start[ONE_DIM], count[ONE_DIM], index[ONE_DIM], len_in;
   int v, r;

   /* Create a file with one ulimited dimensions, and one var. */
   err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR
   err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, &dimid); ERR
   err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 1, &dimid, &varid[0]); ERR
   err=ncmpi_def_var(ncid, VAR_NAME2, NC_CHAR, 1, &dimid, &varid[1]); ERR
   err=ncmpi_close(ncid); ERR

   /* Create some phoney data. */
   for (data[0] = 'a', r = 1; r < MAX_RECS; r++)
      data[r] = data[r - 1] + 1;

   /* Normally one would not close and reopen the file for each
    * record, nor add an attribute each time I add a record, but I am
    * giving the library a little work-out here... */
   for (r = 0; r < MAX_RECS; r++)
   {
      /* Write one record of var data, a single character. */
      err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_WRITE, MPI_INFO_NULL, &ncid); ERR
      count[0] = 1;
      start[0] = r;
      sprintf(att_name, "a_%d", data[r]);
      for (v = 0; v < NUM_VARS; v++)
      {
	 err=ncmpi_put_vara_text_all(ncid, varid[v], start, count, &data[r]); ERR
	 err=ncmpi_redef(ncid); ERR
	 err=ncmpi_put_att_text(ncid, varid[v], att_name, 1, &data[r]); ERR
	 err=ncmpi_enddef(ncid); ERR
      }
      err=ncmpi_close(ncid); ERR
      
      /* Reopen the file and check it. */
      err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR
      err=ncmpi_inq_dimlen(ncid, 0, &len_in); ERR
      if (len_in != r + 1) {printf("Error at line %d\n",__LINE__);return 1;}
      index[0] = r;
      err=ncmpi_begin_indep_data(ncid); ERR
      for (v = 0; v < NUM_VARS; v++)
      {
	 err=ncmpi_get_var1_text(ncid, varid[v], index, &data_in); ERR
	 if (data_in != data[r]) {printf("Error at line %d\n",__LINE__);return 1;}
      }
      err=ncmpi_close(ncid); ERR
   } /* Next record. */
   return 0;
}
コード例 #4
0
ファイル: pnetcdf-to-idx.c プロジェクト: spetruzza/PIDX
//----------------------------------------------------------------
// Return a negative value when failed, otherwise return 0
int read_var_from_netcdf(int file_id, const char *var_name, struct Type type)
{
  int TIMES=1;
  int LATS=local_box_size[1];
  int LONS=local_box_size[0];
  assert(var_name != 0);
  assert(var_data != 0);
  int varidp,ndims,nvars,ngatts,unlimited;
  nc_type xtypep;
//int dataset_id = H5Dopen2(file_id, var_name, H5P_DEFAULT);
 
  int dataset_id = ncmpi_inq_varid(file_id, var_name, &varidp);
  MPI_Offset start[]={local_box_offset[2],local_box_offset[1],local_box_offset[0]};
  MPI_Offset count[]={TIMES,LATS,LONS};

  dataset_id = ncmpi_inq_vartype(file_id,varidp, &xtypep);


//  if (dataset_id !=0)
//    terminate_with_error_msg("ERROR: Failed to open NetCDF dataset for variable %s\n", var_name);

  int read_error = 0;

  if (type.atomic_type == DOUBLE)
    
    read_error = ncmpi_get_vara_double(file_id, varidp, start, count, var_data);
  else if (type.atomic_type == FLOAT){

    ncmpi_begin_indep_data(file_id);
   read_error = ncmpi_get_vara_float(file_id, varidp, start, count, (float *)var_data);
   ncmpi_end_indep_data(file_id);
 if (read_error != NC_NOERR) 
      terminate_with_error_msg("ERROR: Can not read the data for the variable %s \n", var_name);

}

  else if (type.atomic_type == INT)
    read_error = ncmpi_get_vara_int(file_id, varidp, start, count, var_data);
//  else if (type.atomic_type == UINT)
//    read_error = ncmpi_get_vara_uint(file_id, varidp, start, count, var_data);

//  else if (type.atomic_type == CHAR)
//    read_error = ncmpi_get_vara_char(file_id, varidp, start, count, var_data);

  else if (type.atomic_type == UCHAR)
    read_error = ncmpi_get_vara_uchar(file_id, varidp, start, count, var_data);
  else
    terminate_with_error_msg("ERROR: Unsupported type. Type = %d\n", type.atomic_type);

  if (read_error < 0)
    return -1;
  return 0;
}
コード例 #5
0
ファイル: tst_small.c プロジェクト: LaHaine/ohpc
/* Test a small file with one record var, which grows. */
static int
test_one_growing(const char *testfile, int cmode)
{
   int err, ncid, dimid, varid;
   char data[MAX_RECS], data_in;
   MPI_Offset start[ONE_DIM], count[ONE_DIM], index[ONE_DIM], len_in;
   int r, f;

   /* Create some phoney data. */
   for (data[0] = 'a', r = 1; r < MAX_RECS; r++)
      data[r] = data[r - 1] + 1;

   /* Run this with and without fill mode. */
   for (f = 0; f < 2; f++)
   {
      /* Create a file with one ulimited dimensions, and one var. */
      err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR
      err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, &dimid); ERR
      err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 1, &dimid, &varid); ERR
      err=ncmpi_close(ncid); ERR

      /* Normally one would not close and reopen the file for each
       * record, but I am giving the library a little work-out here... */
      for (r = 0; r < MAX_RECS; r++)
      {
	 /* Write one record of var data, a single character. */
         err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_WRITE, MPI_INFO_NULL, &ncid); ERR
	 /* if (f) { err=ncmpi_set_fill(ncid, NC_NOFILL, NULL); ERR} */
	 count[0] = 1;
	 start[0] = r;
	 err=ncmpi_put_vara_text_all(ncid, varid, start, count, &data[r]); ERR
	 err=ncmpi_close(ncid); ERR
      
	 /* Reopen the file and check it. */
         err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR
	 err=ncmpi_inq_dimlen(ncid, 0, &len_in); ERR
	 if (len_in != r + 1) {printf("Error at line %d\n",__LINE__);return 1;}
	 index[0] = r;
	 err=ncmpi_begin_indep_data(ncid); ERR
	 err=ncmpi_get_var1_text(ncid, 0, index, &data_in); ERR
	 if (data_in != data[r]) {printf("Error at line %d\n",__LINE__);return 1;}
	 err=ncmpi_close(ncid); ERR
      } /* Next record. */
   }
   return 0;
}
コード例 #6
0
ファイル: bil_pio.c プロジェクト: ChengLiOSU/OSUFlow
void BIL_Pio_read_nc_blocks(MPI_Comm all_readers_comm, MPI_Comm io_comm,
                            int num_blocks, BIL_Block* blocks) {
  int i;
  for (i = 0; i < num_blocks; i++) {
    int fp;
    BIL_Timing_fopen_start(all_readers_comm);
    assert(ncmpi_open(io_comm, blocks[i].file_name, NC_NOWRITE,
           BIL->io_hints, &fp) == NC_NOERR);
    BIL_Timing_fopen_stop(all_readers_comm);
  
    ncmpi_begin_indep_data(fp);
  
    // Find the id, type, and size of the variable.
    int var_id;
    assert(ncmpi_inq_varid(fp, blocks[i].var_name, &var_id) == NC_NOERR);
    nc_type var_type;
    assert(ncmpi_inq_vartype(fp, var_id, &var_type) == NC_NOERR);
  
    // Create extra variables specifically for the netCDF API.
    MPI_Offset nc_dim_starts[BIL_MAX_NUM_DIMS];
    MPI_Offset nc_dim_sizes[BIL_MAX_NUM_DIMS];
    int j;
    for (j = 0; j < blocks[i].num_dims; j++) {
      nc_dim_starts[j] = blocks[i].starts[j];
      nc_dim_sizes[j] = blocks[i].sizes[j];
    }
    MPI_Datatype nc_var_type;
    BIL_Pio_nc_to_mpi_type(var_type, &nc_var_type, &(blocks[i].var_size));
    
    // Allocate room for data and read it independently.
    blocks[i].data = BIL_Misc_malloc(blocks[i].total_size * blocks[i].var_size);
    BIL_Timing_io_start(all_readers_comm);
    assert(ncmpi_get_vara(fp, var_id, nc_dim_starts, nc_dim_sizes,
                          blocks[i].data, blocks[i].total_size,
                          nc_var_type) == NC_NOERR);
    BIL_Timing_io_stop(all_readers_comm,
                       blocks[i].total_size * blocks[i].var_size);
    // Clean up.
    ncmpi_end_indep_data(fp);
    ncmpi_close(fp);
  }
}
コード例 #7
0
ファイル: nc5dispatch.c プロジェクト: syntheticpp/netcdf-c
static int
NC5_enddef(int ncid)
{
    int status;
    NC* nc;
    NC5_INFO* nc5;

    status = NC_check_id(ncid, &nc);
    if(status != NC_NOERR)
	return status;

    nc5 = NC5_DATA(nc);
    assert(nc5);

    status = ncmpi_enddef(nc->int_ncid);
    if(!status) {
	if (nc5->pnetcdf_access_mode == NC_INDEPENDENT)
	    status = ncmpi_begin_indep_data(nc->int_ncid);
    }
    return status;
}
コード例 #8
0
ファイル: ITL_random_field.cpp プロジェクト: GRAVITYLab/ITL
void
ITLRandomField::_AddTimeStamp(
		int iTimeStamp
		)
{
	// ADD-BY-LEETEN 09/01/2011-BEGIN
	if( iNcId >= 0 )
	{
		// write the time stamp
		#ifndef	WITH_PNETCDF		// ADD-BY-LEETEN 08/12/2011
	        size_t uStart = viTimeStamps.size();
		size_t uCount = 1;
		ASSERT_NETCDF(nc_put_vara_int(
				iNcId,
				iNcTimeVarId,
				&uStart,
				&uCount,
				&iTimeStamp));

		// ADD-BY-LEETEN 08/12/2011-BEGIN
		#else	// #ifndef	WITH_PNETCDF
		MPI_Offset uStart = viTimeStamps.size();
		MPI_Offset uCount = 1;

		ASSERT_NETCDF(ncmpi_begin_indep_data(iNcId));
		if( 0 == iRank )
			ASSERT_NETCDF(ncmpi_put_vara_int(
					iNcId,
					iNcTimeVarId,
					&uStart,
					&uCount,
					&iTimeStamp));
		ASSERT_NETCDF(ncmpi_end_indep_data(iNcId));
		#endif	// #ifndef	WITH_PNETCDF
		// ADD-BY-LEETEN 08/12/2011-END
	}
	// ADD-BY-LEETEN 09/01/2011-END
	viTimeStamps.push_back(iTimeStamp);
}
コード例 #9
0
ファイル: log.c プロジェクト: live-clones/pnetcdf
/*
The test write a NP * NP matrix M, NP is the number of process:
put_vara:
Process N write N copy of it's rank to row N ([N, 0...WIDTH]) using different APIs on different variable
final result should be:
0 0 0 0 ...
1 1 1 1 ...
2 2 2 2 ...
.
.
.
*/
int simpletest(char* fname, int enable_log) {
    int buffer[MAXPROCESSES];
    MPI_Offset start[2], count[2];

    int i, j, ret, errlen;
    int NProc, MyRank, NP;      // Total process; Rank
    int fid;        // Data set ID
    int did[2];     // IDs of dimension
    int vid;        // IDs for variables
    int dims[2];
    char tmp[1024], tmp2[1024];
    MPI_Info Info;
    MPI_Comm_size(MPI_COMM_WORLD, &NP);
    MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);

    if (NP == 1) {    // Act if there is WIDTH processes for easy debugging. Most debugger supports only single processes.
        NProc = SINGLEPROCNP;
        MyRank = SINGLEPROCRANK;
    }
    else{
        NProc = NP;
    }
    if (MyRank < MAXPROCESSES) {
        // Ensure each process have a independent buffer directory

        MPI_Info_create(&Info);
        if (enable_log) {
            MPI_Info_set(Info, "pnetcdf_log", "enable");
        }
        // Create new cdf file
        ret = ncmpi_create(MPI_COMM_WORLD, fname, NC_CLOBBER, Info, &fid);
        if (ret != NC_NOERR) {
            printf("Error create file\n");
            goto ERROR;
        }
        ret = ncmpi_set_fill(fid, NC_FILL, NULL);
        if (ret != NC_NOERR) {
            printf("Error set fill\n");
            goto ERROR;
        }
        ret = ncmpi_def_dim(fid, "X", NProc, did);  // X
        if (ret != NC_NOERR) {
            printf("Error def dim X\n");
            goto ERROR;
        }
        ret = ncmpi_def_dim(fid, "Y", NProc, did + 1);    // Y
        if (ret != NC_NOERR) {
            printf("Error def dim Y\n");
            goto ERROR;
        }
        ret = ncmpi_def_var(fid, "M", NC_INT, 2, did, vid);
        if (ret != NC_NOERR) {
            printf("Error def var M\n");
            goto ERROR;
        }
        ret = ncmpi_enddef(fid);
        if (ret != NC_NOERR) {
            printf("Error enddef\n");
            goto ERROR;
        }
        // Indep mode
        ret = ncmpi_begin_indep_data(fid);
        if (ret != NC_NOERR) {
            printf("Error begin indep\n");
            goto ERROR;
        }
        // We all write rank from now on
        for (i = 0; i < NProc; i++) {
            buffer[i] = MyRank;
        }

        // put_vara
        count[0] = 1;
        count[1] = NProc;
        start[0] = MyRank;
        start[1] = 0;
        ret = ncmpi_put_vara_int(fid, vid, start, count, buffer);
        if (ret != NC_NOERR) {
            MPI_Error_string(ret, tmp, &errlen);
            printf("Error put_varn: %d\n%s\n", errlen, tmp);
            goto ERROR;
        }
        // Collective mode
        ncmpi_end_indep_data(fid);
        if (ret != NC_NOERR) {
            printf("Error end indep");
            goto ERROR;
        }
        ncmpi_close(fid);       // Close file
        if (ret != NC_NOERR) {
            printf("Error close");
            goto ERROR;
        }
    }

ERROR:
    return 0;
}
コード例 #10
0
ファイル: netcdf_ptraj.c プロジェクト: zhangxiaoyu11/mAMBER
/*
 * NETCDF_open()
 * Open the trajectory specified by the filename and accessMode in trajInfo as 
 * a NETCDF traj.
 * Return 0 on success, 1 on failure
 */
int NETCDF_open(coordinateInfo *trajInfo) {
#ifdef BINTRAJ
  int err,ncid;

// NOTE: Put in a check, only open if coord is unknown or netcdf
  if (prnlev>0) fprintf(stdout,"[%i] NETCDF_open(): Opening %s\n",
                        worldrank,trajInfo->filename);

  switch (trajInfo->accessMode) {
    case 0: // Read 
#     ifdef MPI
      err = ncmpi_open(MPI_COMM_WORLD, trajInfo->filename, NC_NOWRITE, MPI_INFO_NULL, &ncid);
      /* This next line is a test. Apparently it puts the netcdf file in an
       * independent I/O mode. Not sure if it is bad to always put here. 
       * Originally this call was only made from ptrajPreprocess...
       */
      if (err == NC_NOERR)
        err = ncmpi_begin_indep_data(ncid);
#     else
      err = nc_open(trajInfo->filename, NC_NOWRITE, &ncid);
#     endif
    break;
    case 1: // Write
      //omode=NC_WRITE; 
#     ifdef MPI
      err = ncmpi_create(MPI_COMM_WORLD, trajInfo->filename, NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid);
      if (err == NC_NOERR)
        ncmpi_begin_indep_data(ncid);
#     else
      err = nc_create(trajInfo->filename, NC_64BIT_OFFSET, &ncid);
#     endif
    break;
    case 2: // Append
      printfone("Appending of NETCDF files is not supported.\n");
      return 1;
      break;
  }

  /* If opening succeeded and memory hasnt been allocated already
   *  initialize necessary data structure.
   * NOTE: Should this be in NETCDF_setup? If so ncid would have to 
   *       be its own variable in coordinateInfo.
   * NOTE: If this is an output file trajInfo->type has already been set.
   *       Not a huge problem but is a bit circular. TRAJOUT should eventually
   *       only set trajInfo->isNetcdf.
   */
  if (err == NC_NOERR) {
    trajInfo->type = COORD_AMBER_NETCDF;
    if (trajInfo->NCInfo==NULL) {
      trajInfo->NCInfo = (netcdfTrajectoryInfo *) safe_malloc(sizeof(netcdfTrajectoryInfo));
      INITIALIZE_netcdfTrajectoryInfo( trajInfo->NCInfo );
    }
    trajInfo->NCInfo->currentFrame = worldrank;
    // Always set NCID since it can change depending on when file is opened
    trajInfo->NCInfo->ncid = ncid;
    if (prnlev>0) fprintf(stdout,"NETCDF_open(): %s has been assigned ncid of %i\n",
                          trajInfo->filename,ncid);
    return 0;
  }

  // If we are here an error occured. Print the error message before exiting.
  fprintf(stdout,"Error: NETCDF_open(): Could not open %s with accessMode %i\n",
          trajInfo->filename,trajInfo->accessMode);
  fprintf(stdout,"%s\n",nc_strerror(err));
#endif
  // If no BINTRAJ always fail
  return 1;
}
コード例 #11
0
int main(int argc, char** argv)
{
    char filename[256];
    int i, j, rank, nprocs, err, nerrs=0;
    int ncid, varid, dimid[2], req, st;
    MPI_Offset start[2], count[2], stride[2];
    unsigned char buffer[NY][NX];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (argc > 2) {
        if (!rank) printf("Usage: %s [filename]\n",argv[0]);
        MPI_Finalize();
        return 1;
    }
    if (argc == 2) snprintf(filename, 256, "%s", argv[1]);
    else           strcpy(filename, "testfile.nc");
    MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        char *cmd_str = (char*)malloc(strlen(argv[0]) + 256);
        sprintf(cmd_str, "*** TESTING C   %s for ncmpi_end_indep_data ", basename(argv[0]));
        printf("%-66s ------ ",cmd_str);
        free(cmd_str);
    }

    err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER|NC_64BIT_DATA,
                       MPI_INFO_NULL, &ncid);
    CHECK_ERR

    err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); CHECK_ERR
    err = ncmpi_def_dim(ncid, "X", NX*nprocs,    &dimid[1]); CHECK_ERR
    err = ncmpi_def_var(ncid, "var", NC_UBYTE, NDIMS, dimid, &varid); CHECK_ERR
    err = ncmpi_enddef(ncid); CHECK_ERR

    for (i=0; i<NY; i++) for (j=0; j<NX; j++) buffer[i][j] = rank+10;

     start[0] = 0;     start[1] = NX*rank;
     count[0] = NY/2;  count[1] = NX/2;
    stride[0] = 2;    stride[1] = 2;
    err = ncmpi_buffer_attach(ncid, NY*NX); CHECK_ERR

    err = ncmpi_begin_indep_data(ncid); CHECK_ERR
    err = ncmpi_bput_vars_uchar(ncid, varid, start, count, stride,
                                &buffer[0][0], &req);
    CHECK_ERR

    /* check if write buffer contents have been altered */
    for (i=0; i<NY; i++)
        for (j=0; j<NX; j++) {
            if (buffer[i][j] != rank+10) {
                printf("Error at line %d in %s: put buffer[%d][%d]=%hhu altered, should be %d\n",
                       __LINE__,__FILE__,i,j,buffer[i][j],rank+10);
                nerrs++;
            }
        }

    err = ncmpi_end_indep_data(ncid); CHECK_ERR

    /* calling wait API after exiting independent data mode on purpose */
    err = ncmpi_wait_all(ncid, 1, &req, &st); CHECK_ERR
    err = st; CHECK_ERR

    /* check if write buffer contents have been altered */
    for (i=0; i<NY; i++)
        for (j=0; j<NX; j++) {
            if (buffer[i][j] != rank+10) {
                printf("Error at line %d in %s: put buffer[%d][%d]=%hhu altered, should be %d\n",
                       __LINE__,__FILE__,i,j,buffer[i][j],rank+10);
                nerrs++;
            }
        }

    err = ncmpi_buffer_detach(ncid); CHECK_ERR
    err = ncmpi_close(ncid); CHECK_ERR

    /* check if PnetCDF freed all internal malloc */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0) {
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
            ncmpi_inq_malloc_list();
        }
    }

    MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (rank == 0) {
        if (nerrs) printf(FAIL_STR,nerrs);
        else       printf(PASS_STR);
    }

    MPI_Finalize();
    return (nerrs > 0);
}
コード例 #12
0
ファイル: aiori-NCMPI.c プロジェクト: HPCStack/ior
IOR_offset_t
IOR_Xfer_NCMPI(int            access,
               void         * fd,
               IOR_size_t   * buffer,
               IOR_offset_t   length,
               IOR_param_t  * param)
{
    char         * bufferPtr          = (char *)buffer;
    static int     firstReadCheck     = FALSE,
                   startDataSet;
    int            var_id,
                   dim_id[NUM_DIMS];
    MPI_Offset     bufSize[NUM_DIMS],
                   offset[NUM_DIMS];
    IOR_offset_t   segmentPosition;
    int            segmentNum,
                   transferNum;

    /* Wei-keng Liao: In IOR.c line 1979 says "block size must be a multiple
       of transfer size."  Hence, length should always == param->transferSize
       below.  I leave it here to double check.
    */
    if (length != param->transferSize) {
        char errMsg[256];
        sprintf(errMsg,"length(%lld) != param->transferSize(%lld)\n",
                length, param->transferSize);
        NCMPI_CHECK(-1, errMsg);
    }

    /* determine by offset if need to start data set */
    if (param->filePerProc == TRUE) {
        segmentPosition = (IOR_offset_t)0;
    } else {
        segmentPosition = (IOR_offset_t)((rank + rankOffset) % param->numTasks)
                                        * param->blockSize;
    }
    if ((int)(param->offset - segmentPosition) == 0) {
        startDataSet = TRUE;
        /*
         * this toggle is for the read check operation, which passes through
         * this function twice; note that this function will open a data set
         * only on the first read check and close only on the second
         */
        if (access == READCHECK) {
            if (firstReadCheck == TRUE) {
                firstReadCheck = FALSE;
            } else {
                firstReadCheck = TRUE;
            }
        }
    }

    if (startDataSet == TRUE &&
        (access != READCHECK || firstReadCheck == TRUE)) {
        if (access == WRITE) {
            int numTransfers = param->blockSize / param->transferSize;

            /* Wei-keng Liao: change 1D array to 3D array of dimensions:
               [segmentCount*numTasksWorld][numTransfers][transferSize]
               Requirement: none of these dimensions should be > 4G,
            */
            NCMPI_CHECK(ncmpi_def_dim(*(int *)fd, "segments_times_np",
                        NC_UNLIMITED, &dim_id[0]),
                        "cannot define data set dimensions");
            NCMPI_CHECK(ncmpi_def_dim(*(int *)fd, "number_of_transfers",
                        numTransfers, &dim_id[1]),
                        "cannot define data set dimensions");
            NCMPI_CHECK(ncmpi_def_dim(*(int *)fd, "transfer_size",
                        param->transferSize, &dim_id[2]),
                        "cannot define data set dimensions");
            NCMPI_CHECK(ncmpi_def_var(*(int *)fd, "data_var", NC_BYTE,
                                      NUM_DIMS, dim_id, &var_id),
                        "cannot define data set variables");
            NCMPI_CHECK(ncmpi_enddef(*(int *)fd),
                        "cannot close data set define mode");
        
        } else {
            NCMPI_CHECK(ncmpi_inq_varid(*(int *)fd, "data_var", &var_id),
                        "cannot retrieve data set variable");
        }

        if (param->collective == FALSE) {
            NCMPI_CHECK(ncmpi_begin_indep_data(*(int *)fd),
                        "cannot enable independent data mode");
        }

        param->var_id = var_id;
        startDataSet = FALSE;
    }

    var_id = param->var_id;

    /* Wei-keng Liao: calculate the segment number */
    segmentNum  = param->offset / (param->numTasks * param->blockSize);

    /* Wei-keng Liao: calculate the transfer number in each block */
    transferNum = param->offset % param->blockSize / param->transferSize;

    /* Wei-keng Liao: read/write the 3rd dim of the dataset, each is of
       amount param->transferSize */
    bufSize[0] = 1;
    bufSize[1] = 1;
    bufSize[2] = param->transferSize;

    offset[0] = segmentNum * numTasksWorld + rank;
    offset[1] = transferNum;
    offset[2] = 0;

    /* access the file */
    if (access == WRITE) { /* WRITE */
        if (param->collective) {
            NCMPI_CHECK(ncmpi_put_vara_all(*(int *)fd, var_id, offset, bufSize,
                                           bufferPtr, length, MPI_BYTE),
                        "cannot write to data set");
        } else {
            NCMPI_CHECK(ncmpi_put_vara(*(int *)fd, var_id, offset, bufSize,
                                       bufferPtr, length, MPI_BYTE),
                        "cannot write to data set");
        }
    } else {               /* READ or CHECK */
        if (param->collective == TRUE) {
            NCMPI_CHECK(ncmpi_get_vara_all(*(int *)fd, var_id, offset, bufSize,
                                           bufferPtr, length, MPI_BYTE),
                        "cannot read from data set");
        } else {
            NCMPI_CHECK(ncmpi_get_vara(*(int *)fd, var_id, offset, bufSize,
                                       bufferPtr, length, MPI_BYTE),
                        "cannot read from data set");
        }
    }

    return(length);
} /* IOR_Xfer_NCMPI() */
コード例 #13
0
ファイル: ITL_random_field.cpp プロジェクト: GRAVITYLab/ITL
void
ITLRandomField::_CloseNetCdf
(
)
{
	// MOD-BY-LEETEN 09/01/2011-FROM:
		// if( iNcId > 0 )
	// TO:
	if( iNcId >= 0 )
	// MOD-BY-LEETEN 09/01/2011-END
	{
		// write the time stamp
		TBuffer<int> piTemp;
		piTemp.alloc(this->IGetNrOfTimeStamps());
		for(int t = 0; t < (int)piTemp.USize(); t++)
			piTemp[t] = this->viTimeStamps[t];

		#ifndef	WITH_PNETCDF		// ADD-BY-LEETEN 08/12/2011
		#if 0 // DEL-BY-LEETEN 09/01/2011-BEGIN
			// since the time step wil lbe written earlier, this part can be removed
			size_t uStart = 0;
			size_t uCount = piTemp.USize();
			ASSERT_NETCDF(nc_put_vara_int(
					iNcId,
					iNcTimeVarId,
					&uStart,
					&uCount,
					&piTemp[0]));
		#endif	// DEL-BY-LEETEN 09/01/2011-END
        /* Close the file. */
	    ASSERT_NETCDF(nc_close(iNcId));

		// ADD-BY-LEETEN 08/12/2011-BEGIN
		#else	// #ifndef	WITH_PNETCDF

	    #if 0 // DEL-BY-LEETEN 09/01/2011-BEGIN
			MPI_Offset uStart = 0;
			MPI_Offset uCount = piTemp.USize();

			ASSERT_NETCDF(ncmpi_begin_indep_data(iNcId));
			if( 0 == iRank )
				ASSERT_NETCDF(ncmpi_put_vara_int(
						iNcId,
						iNcTimeVarId,
						&uStart,
						&uCount,
						&piTemp[0]));
			ASSERT_NETCDF(ncmpi_end_indep_data(iNcId));
		#endif	// DEL-BY-LEETEN 09/01/2011-END

        /* Close the file. */
	    ASSERT_NETCDF(ncmpi_close(iNcId));
		#endif	// #ifndef	WITH_PNETCDF
		// ADD-BY-LEETEN 08/12/2011-END

	    // MOD-BY-LEETEN 09/01/2011-FROM:
	    	// iNcId = 0;
		// TO:
	    iNcId = -1;
	    // MOD-BY-LEETEN 09/01/2011-END
	}
};
コード例 #14
0
ファイル: test_vard.c プロジェクト: abhinavvishnu/matex
/*----< main() >------------------------------------------------------------*/
int main(int argc, char **argv) {

    char         filename[256];
    int          i, j, err, ncid, varid0, varid1, varid2, dimids[2], nerrs=0;
    int          rank, nprocs, debug=0, blocklengths[2], **buf, *bufptr;
    int          array_of_sizes[2], array_of_subsizes[2], array_of_starts[2];
    MPI_Offset   start[2], count[2];
    MPI_Aint     a0, a1, disps[2];
    MPI_Datatype buftype, ghost_buftype, rec_filetype, fix_filetype;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (argc > 2) {
        if (!rank) printf("Usage: %s [filename]\n",argv[0]);
        MPI_Finalize();
        return 0;
    }
    strcpy(filename, "testfile.nc");
    if (argc == 2) strcpy(filename, argv[1]);
    MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        char cmd_str[256];
        sprintf(cmd_str, "*** TESTING C   %s for flexible put and get ", argv[0]);
        printf("%-66s ------ ", cmd_str); fflush(stdout);
    }

    buf = (int**)malloc(NY * sizeof(int*));
    buf[0] = (int*)malloc(NY * NX * sizeof(int));
    for (i=1; i<NY; i++) buf[i] = buf[i-1] + NX;

    /* construct various MPI derived data types */

    /* construct an MPI derived data type for swapping 1st row with 2nd row */
    blocklengths[0] = blocklengths[1] = NX;
    MPI_Get_address(buf[1], &a0);
    MPI_Get_address(buf[0], &a1);
    disps[0] = 0;
    disps[1] = a1 - a0;
    bufptr = buf[1];
    err = MPI_Type_create_hindexed(2, blocklengths, disps, MPI_INT, &buftype);
    if (err != MPI_SUCCESS) printf("MPI error MPI_Type_create_hindexed\n");
    MPI_Type_commit(&buftype);

    start[0] = 0; start[1] = NX*rank;
    count[0] = 2; count[1] = NX;
    if (debug) printf("put start=%lld %lld count=%lld %lld\n",start[0],start[1],count[0],count[1]);

    /* create a file type for the fixed-size variable */
    array_of_sizes[0] = 2;
    array_of_sizes[1] = NX*nprocs;
    array_of_subsizes[0] = count[0];
    array_of_subsizes[1] = count[1];
    array_of_starts[0] = start[0];
    array_of_starts[1] = start[1];
    MPI_Type_create_subarray(2, array_of_sizes, array_of_subsizes,
                             array_of_starts, MPI_ORDER_C,
                             MPI_INT, &fix_filetype);
    MPI_Type_commit(&fix_filetype);

    /* create a buftype with ghost cells on each side */
    array_of_sizes[0] = count[0]+4;
    array_of_sizes[1] = count[1]+4;
    array_of_subsizes[0] = count[0];
    array_of_subsizes[1] = count[1];
    array_of_starts[0] = 2;
    array_of_starts[1] = 2;
    MPI_Type_create_subarray(2, array_of_sizes, array_of_subsizes,
                             array_of_starts, MPI_ORDER_C,
                             MPI_INT, &ghost_buftype);
    MPI_Type_commit(&ghost_buftype);

    /* create a new file for write */
    err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER, MPI_INFO_NULL,
                       &ncid); ERR

    /* define a 2D array */
    err = ncmpi_def_dim(ncid, "REC_DIM", NC_UNLIMITED, &dimids[0]); ERR
    err = ncmpi_def_dim(ncid, "X",       NX*nprocs,    &dimids[1]); ERR
    err = ncmpi_def_var(ncid, "rec_var", NC_INT, 2, dimids, &varid0); ERR
    err = ncmpi_def_var(ncid, "dummy_rec", NC_INT, 2, dimids, &varid2); ERR
    err = ncmpi_def_dim(ncid, "FIX_DIM", 2, &dimids[0]); ERR
    err = ncmpi_def_var(ncid, "fix_var", NC_INT, 2, dimids, &varid1); ERR
    err = ncmpi_enddef(ncid); ERR

    /* create a file type for the record variable */
    int *array_of_blocklengths=(int*) malloc(count[0]*sizeof(int));
    MPI_Aint *array_of_displacements=(MPI_Aint*) malloc(count[0]*sizeof(MPI_Aint));
    MPI_Offset recsize;
    err = ncmpi_inq_recsize(ncid, &recsize);
    for (i=0; i<count[0]; i++) {
        array_of_blocklengths[i] = count[1];
        array_of_displacements[i] = start[1]*sizeof(int) + recsize * i;
    }
    MPI_Type_create_hindexed(2, array_of_blocklengths, array_of_displacements,
                             MPI_INT, &rec_filetype);
    MPI_Type_commit(&rec_filetype);
    free(array_of_blocklengths);
    free(array_of_displacements);

    /* initialize the contents of the array */
    for (j=0; j<NY; j++) for (i=0; i<NX; i++) buf[j][i] = rank*100 + j*10 + i;

    /* write the record variable */
    err = ncmpi_put_vard_all(ncid, varid0, rec_filetype, bufptr, 1, buftype); ERR

    /* check if the contents of buf are altered */
    CHECK_VALUE

    /* check if root process can write to file header in data mode */
    err = ncmpi_rename_var(ncid, varid0, "rec_VAR"); ERR

    /* write the fixed-size variable */
    err = ncmpi_put_vard_all(ncid, varid1, fix_filetype, bufptr, 1, buftype); ERR

    /* check if the contents of buf are altered */
    CHECK_VALUE
 
    /* check if root process can write to file header in data mode */
    err = ncmpi_rename_var(ncid, varid0, "rec_var"); ERR

    /* test the same routines in independent data mode */
    err = ncmpi_begin_indep_data(ncid); ERR
    err = ncmpi_put_vard(ncid, varid0, rec_filetype, bufptr, 1, buftype); ERR
    CHECK_VALUE
    err = ncmpi_rename_var(ncid, varid0, "rec_VAR"); ERR
    err = ncmpi_put_vard(ncid, varid1, fix_filetype, bufptr, 1, buftype); ERR
    CHECK_VALUE
    err = ncmpi_rename_var(ncid, varid0, "rec_var"); ERR
    err = ncmpi_end_indep_data(ncid); ERR

    err = ncmpi_close(ncid); ERR

    /* open the same file and read back for validate */
    err = ncmpi_open(MPI_COMM_WORLD, filename, NC_NOWRITE, MPI_INFO_NULL,
                     &ncid); ERR

    err = ncmpi_inq_varid(ncid, "rec_var", &varid0); ERR
    err = ncmpi_inq_varid(ncid, "fix_var", &varid1); ERR

    nerrs += get_var_and_verify(ncid, varid0, start, count, buf, buftype, ghost_buftype, rec_filetype);
    nerrs += get_var_and_verify(ncid, varid1, start, count, buf, buftype, ghost_buftype, fix_filetype);

    err = ncmpi_close(ncid); ERR

    MPI_Type_free(&rec_filetype);
    MPI_Type_free(&fix_filetype);
    MPI_Type_free(&buftype);
    MPI_Type_free(&ghost_buftype);
    free(buf[0]); free(buf);

    /* check if PnetCDF freed all internal malloc */
    MPI_Offset malloc_size, sum_size;
    err = ncmpi_inq_malloc_size(&malloc_size);
    if (err == NC_NOERR) {
        MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD);
        if (rank == 0 && sum_size > 0)
            printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n",
                   sum_size);
    }

    MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (rank == 0) {
        if (nerrs) printf(FAIL_STR,nerrs);
        else       printf(PASS_STR);
    }

    MPI_Finalize();
    return 0;
}
コード例 #15
0
int
main(int argc, char ** argv)
{
   /* IDs for the netCDF file, dimensions, and variables. */
   int nprocs, rank;
   int ncid;
   int lon_dimid, lat_dimid, lvl_dimid, rec_dimid;
   int lat_varid, lon_varid, pres_varid, temp_varid;
   int dimids[NDIMS];

   /* The start and count arrays will tell the netCDF library where to
      write our data. */
   MPI_Offset start[NDIMS], count[NDIMS];

   /* Program variables to hold the data we will write out. We will only
      need enough space to hold one timestep of data; one record. */
   float pres_out[NLVL][NLAT][NLON];
   float temp_out[NLVL][NLAT][NLON];

   /* These program variables hold the latitudes and longitudes. */
   float lats[NLAT], lons[NLON];

   /* Loop indexes. */
   int lvl, lat, lon, rec, i = 0;
   
   /* Error handling. */
   int retval;

   MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);


   /* Create some pretend data. If this wasn't an example program, we
    * would have some real data to write, for example, model
    * output. */
   for (lat = 0; lat < NLAT; lat++)
      lats[lat] = START_LAT + 5.*lat;
   for (lon = 0; lon < NLON; lon++)
      lons[lon] = START_LON + 5.*lon;
   
   for (lvl = 0; lvl < NLVL; lvl++)
      for (lat = 0; lat < NLAT; lat++)
	 for (lon = 0; lon < NLON; lon++)
	 {
	    pres_out[lvl][lat][lon] = SAMPLE_PRESSURE + i;
	    temp_out[lvl][lat][lon] = SAMPLE_TEMP + i++;
	 }

   /* Create the file. */
   if ((retval = ncmpi_create(MPI_COMM_WORLD, FILE_NAME, NC_CLOBBER, MPI_INFO_NULL, &ncid)))

	check_err(retval,__LINE__,__FILE__);

   /* Define the dimensions. The record dimension is defined to have
    * unlimited length - it can grow as needed. In this example it is
    * the time dimension.*/
   if ((retval = ncmpi_def_dim(ncid, LVL_NAME, NLVL, &lvl_dimid)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_def_dim(ncid, LAT_NAME, NLAT, &lat_dimid)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_def_dim(ncid, LON_NAME, NLON, &lon_dimid)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_def_dim(ncid, REC_NAME, NC_UNLIMITED, &rec_dimid)))
      check_err(retval,__LINE__,__FILE__);

   /* Define the coordinate variables. We will only define coordinate
      variables for lat and lon.  Ordinarily we would need to provide
      an array of dimension IDs for each variable's dimensions, but
      since coordinate variables only have one dimension, we can
      simply provide the address of that dimension ID (&lat_dimid) and
      similarly for (&lon_dimid). */
   if ((retval = ncmpi_def_var(ncid, LAT_NAME, NC_FLOAT, 1, &lat_dimid, 
			    &lat_varid)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_def_var(ncid, LON_NAME, NC_FLOAT, 1, &lon_dimid, 
			    &lon_varid)))
      check_err(retval,__LINE__,__FILE__);

   /* Assign units attributes to coordinate variables. */
   if ((retval = ncmpi_put_att_text(ncid, lat_varid, UNITS, 
				 strlen(DEGREES_NORTH), DEGREES_NORTH)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_put_att_text(ncid, lon_varid, UNITS, 
				 strlen(DEGREES_EAST), DEGREES_EAST)))
      check_err(retval,__LINE__,__FILE__);

   /* The dimids array is used to pass the dimids of the dimensions of
      the netCDF variables. Both of the netCDF variables we are
      creating share the same four dimensions. In C, the
      unlimited dimension must come first on the list of dimids. */
   dimids[0] = rec_dimid;
   dimids[1] = lvl_dimid;
   dimids[2] = lat_dimid;
   dimids[3] = lon_dimid;

   /* Define the netCDF variables for the pressure and temperature
    * data. */
   if ((retval = ncmpi_def_var(ncid, PRES_NAME, NC_FLOAT, NDIMS, 
			    dimids, &pres_varid)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_def_var(ncid, TEMP_NAME, NC_FLOAT, NDIMS, 
			    dimids, &temp_varid)))
      check_err(retval,__LINE__,__FILE__);

   /* Assign units attributes to the netCDF variables. */
   if ((retval = ncmpi_put_att_text(ncid, pres_varid, UNITS, 
				 strlen(PRES_UNITS), PRES_UNITS)))
      check_err(retval,__LINE__,__FILE__);
   if ((retval = ncmpi_put_att_text(ncid, temp_varid, UNITS, 
				 strlen(TEMP_UNITS), TEMP_UNITS)))
      check_err(retval,__LINE__,__FILE__);

   /* End define mode. */
   if ((retval = ncmpi_enddef(ncid)))
      check_err(retval,__LINE__,__FILE__);

  retval = ncmpi_begin_indep_data(ncid);
   /* Write the coordinate variable data. This will put the latitudes
      and longitudes of our data grid into the netCDF file. */
   if ((retval = ncmpi_put_var_float(ncid, lat_varid, &lats[0]))){
      check_err(retval,__LINE__,__FILE__);
      printf("------------------------\n");
      }
   if ((retval = ncmpi_put_var_float(ncid, lon_varid, &lons[0])))
      check_err(retval,__LINE__,__FILE__);
  retval = ncmpi_end_indep_data(ncid);

   /* These settings tell netcdf to write one timestep of data. (The
     setting of start[0] inside the loop below tells netCDF which
                    &data[0][0][0]);
     timestep to write.) */
   count[0] = 1;
   count[1] = NLVL;
   count[2] = NLAT;
   count[3] = NLON;
   start[1] = 0;
   start[2] = 0;
   start[3] = 0;

   /* Write the pretend data. This will write our surface pressure and
      surface temperature data. The arrays only hold one timestep worth
      of data. We will just rewrite the same data for each timestep. In
      a real application, the data would change between timesteps. */

   for (rec = 0; rec < NREC; rec++)
   {
      start[0] = rec;
      if ((retval = ncmpi_put_vara_float_all(ncid, pres_varid, start, count, &pres_out[0][0][0])))
      check_err(retval,__LINE__,__FILE__);
      if ((retval = ncmpi_put_vara_float_all(ncid, temp_varid, start, count, &temp_out[0][0][0])))
      check_err(retval,__LINE__,__FILE__);
   }

   /* Close the file. */
   if ((retval = ncmpi_close(ncid)))
      check_err(retval,__LINE__,__FILE__);
   
   printf("*** SUCCESS writing example file %s!\n", FILE_NAME);
   MPI_Finalize();


   return 0;
}
コード例 #16
0
ファイル: vectors.c プロジェクト: timmorey/parallel-netcdf
int main(int argc, char ** argv)
{
	int ncid, dimid, varid;
	MPI_Init(&argc, &argv);
	MPI_Datatype vtype, rtype, usertype;
	MPI_Aint lb, extent;
	int userbufsz, *userbuf, *cmpbuf, i, errs=0;
	int count = 25;
	double pi = 3.14159;
	MPI_Offset start, acount;

	ncmpi_create(MPI_COMM_WORLD, "vectors.nc", NC_CLOBBER, MPI_INFO_NULL,
			&ncid);
	ncmpi_def_dim(ncid, "50k", 1024*50, &dimid);
	ncmpi_def_var(ncid, "vector", NC_DOUBLE, 1, &dimid, &varid);

	ncmpi_enddef(ncid);


	MPI_Type_vector(VECCOUNT, BLOCKLEN, STRIDE, MPI_INT, &vtype);
	MPI_Type_create_resized(vtype, 0, STRIDE*VECCOUNT*sizeof(int), &rtype);
	MPI_Type_contiguous(count, rtype, &usertype);
	MPI_Type_commit(&usertype);

	MPI_Type_free(&vtype);
	MPI_Type_free(&rtype);

	MPI_Type_get_extent(usertype, &lb, &extent);
	userbufsz = extent;
	userbuf = malloc(userbufsz);
	cmpbuf = calloc(userbufsz, 1);
	for (i=0; i< userbufsz/sizeof(int); i++) {
		userbuf[i] = pi*i;
	}


	start = 10; acount = count*12;
	ncmpi_begin_indep_data(ncid);
	ncmpi_put_vara(ncid, varid, &start, &acount, 
			userbuf, 1, usertype);

	ncmpi_close(ncid);

	NC_CHECK(ncmpi_open(MPI_COMM_WORLD, "vectors.nc", NC_NOWRITE,
				MPI_INFO_NULL, &ncid));
	ncmpi_begin_indep_data(ncid);
	NC_CHECK(ncmpi_inq_varid(ncid, "vector", &varid));
	NC_CHECK(ncmpi_get_vara(ncid, varid, &start, &acount,
			cmpbuf, 1, usertype));
	ncmpi_close(ncid);

	for (i=0; errs < 10 &&  i < acount; i++) {
		/* vector of 4,3,5, so skip 4th and 5th items of every block */
		if (i%STRIDE >= BLOCKLEN) continue;
		if (userbuf[i] != cmpbuf[i]) {
			errs++;
			fprintf(stderr, "%d: expected 0x%x got 0x%x\n", 
					i, userbuf[i], cmpbuf[i]);
		}
	}
	free(userbuf);
	free(cmpbuf);
	MPI_Type_free(&usertype);
	MPI_Finalize();
	return 0;
}
コード例 #17
0
int main(int argc, char **argv) {

  int i, j, k;
  int status;
  int ncid;
  int dimid1, dimid2, dimid3, udimid;
  int square_dim[2], cube_dim[3], xytime_dim[3], time_dim[1];
  MPI_Offset square_start[2], cube_start[3] = {0, 0, 0};
  MPI_Offset square_count[2] = {50, 50}, cube_count[3] = {100, 50, 50};
  MPI_Offset xytime_start[3] = {0, 0, 0};
  MPI_Offset xytime_count[3] = {100, 50, 50};
  MPI_Offset time_start[1], time_count[1] = {25};
  int square_id, cube_id, xytime_id, time_id;
  static char title[] = "example netCDF dataset";
  static char description[] = "2-D integer array";
  int data[100][50][50], buffer[100];
  int rank;
  int nprocs;
  MPI_Comm comm = MPI_COMM_WORLD;
  params opts;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (rank == 0) 
	  fprintf(stderr, "Testing independent write ... ");
  parse_write_args(argc, argv, rank, &opts);

  /**********  START OF NETCDF ACCESS **************/

  /**
   * Create the dataset
   *   File name: "testwrite.nc"
   *   Dataset API: Collective
   */

  status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid);
  if (status != NC_NOERR) handle_error(status);


  /**
   * Create a global attribute:
   *    :title = "example netCDF dataset";
   */

  status = ncmpi_put_att_text (ncid, NC_GLOBAL, "title",
                          strlen(title), title);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Add 4 pre-defined dimensions:
   *   x = 100, y = 100, z = 100, time = NC_UNLIMITED
   */

  status = ncmpi_def_dim(ncid, "x", 100L, &dimid1);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_dim(ncid, "y", 100L, &dimid2);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_dim(ncid, "z", 100L, &dimid3);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_dim(ncid, "time", NC_UNLIMITED, &udimid);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Define the dimensionality and then add 4 variables:
   *    square(x, y), cube(x,y,z), time(time), xytime(time, x, y)  
   */

  square_dim[0] = cube_dim[0] = xytime_dim[1] = dimid1;
  square_dim[1] = cube_dim[1] = xytime_dim[2] = dimid2;
  cube_dim[2] = dimid3;
  xytime_dim[0] = udimid;
  time_dim[0] = udimid;
  status = ncmpi_def_var (ncid, "square", NC_INT, 2, square_dim, &square_id);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_var (ncid, "cube", NC_INT, 3, cube_dim, &cube_id);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_def_var (ncid, "xytime", NC_INT, 3, xytime_dim, &xytime_id);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Add an attribute for variable: 
   *    square: decsription = "2-D integer array"
   */

/*
  status = ncmpi_put_att_text (ncid, square_id, "description",
                          strlen(description), description);
  if (status != NC_NOERR) handle_error(status);
*/

  /**
   * End Define Mode (switch to data mode)
   *   Dataset API: Collective
   */

  status = ncmpi_enddef(ncid);
  if (status != NC_NOERR) handle_error(status);

  /**
   * Data Partition (Assume 4 processors):
   *   square: 2-D, (Block, Block), 50*50 from 100*100 
   *   cube:   3-D, (*, Block, Block), 100*50*50 from 100*100*100
   *   xytime: 3-D, (*, Block, Block), 100*50*50 from 100*100*100
   *   time:   1-D, Block-wise, 25 from 100
   */

  square_start[0] = cube_start[1] = xytime_start[1] = (rank/2) * 50;
  square_start[1] = cube_start[2] = xytime_start[2] = (rank%2) * 50;
  time_start[0] = (rank%4) * 25;


  /**
   * Packing data in the buffer 
   */

  /* Data for variable: time */
  for ( i = time_start[0]; i < time_start[0] + time_count[0]; i++ )
    buffer[i - time_start[0]] = i;   

  /* Data for variable: square, cube and xytime */
  for ( i = 0; i < 100; i++ )
    for ( j = square_start[0]; j < square_start[0]+square_count[0]; j++ )
      for ( k = square_start[1]; k < square_start[1]+square_count[1]; k++ )
        data[i][j-square_start[0]][k-square_start[1]] = i*100*100 + j*100 + k;

  /**
   * Write data into variables: square, cube, time and xytime  
   *   Access Method: subarray
   *   Data Mode API: non-collective
   */ 
 
  status = ncmpi_begin_indep_data(ncid);
  if (status != NC_NOERR) handle_error(status); 
  
  status = ncmpi_put_vara_int(ncid, square_id,
                              square_start, square_count,
                              &data[0][0][0]);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_put_vara_int(ncid, cube_id,
                              cube_start, cube_count,
                              &data[0][0][0]);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_put_vara_int(ncid, time_id,
                              time_start, time_count,
                              (void *)buffer);
  if (status != NC_NOERR) handle_error(status);
  status = ncmpi_put_vara_int(ncid, xytime_id,
                              xytime_start, xytime_count,
                              &data[0][0][0]);
  if (status != NC_NOERR) handle_error(status);

  status = ncmpi_end_indep_data(ncid);
  if (status != NC_NOERR) handle_error(status);

status = ncmpi_redef(ncid);
if (status != NC_NOERR) handle_error(status);
status = ncmpi_put_att_text (ncid, square_id, "description",
                          strlen(description), description);
if (status != NC_NOERR) handle_error(status);
status = ncmpi_enddef(ncid);
if (status != NC_NOERR) handle_error(status);

  /**
   * Close the dataset
   *   Dataset API:  collective
   */

  status = ncmpi_close(ncid);
  if (status != NC_NOERR) handle_error(status);

  /*******************  END OF NETCDF ACCESS  ****************/

if (rank == 0)
  fprintf(stderr, "OK\nFile written to: %s!\n", opts.outfname);

  MPI_Finalize();
  return 0;
}
コード例 #18
0
ファイル: ncmpi_test.c プロジェクト: 00liujj/trilinos
int
main(int argc, char **argv) {			/* create foo.nc */

   int  stat;			/* return status */
   int  ncid;			/* netCDF id */

   /* dimension ids */
   int lat_dim;
   int lon_dim;
   int time_dim;

   /* dimension lengths */
   size_t lat_len = 10;
   size_t lon_len = 5;
   size_t time_len = NC_UNLIMITED;

   /* variable ids */
   int lat_id;
   int lon_id;
   int time_id;
   int z_id;
   int t_id;
   int p_id;
   int rh_id;

   /* rank (number of dimensions) for each variable */
#  define RANK_lat 1
#  define RANK_lon 1
#  define RANK_time 1
#  define RANK_z 3
#  define RANK_t 3
#  define RANK_p 3
#  define RANK_rh 3

   /* variable shapes */
   int lat_dims[RANK_lat];
   int lon_dims[RANK_lon];
   int time_dims[RANK_time];
   int z_dims[RANK_z];
   int t_dims[RANK_t];
   int p_dims[RANK_p];
   int rh_dims[RANK_rh];

   /* attribute vectors */
   double z_valid_range[2];
   double p__FillValue[1];
   int rh__FillValue[1];

  int stat=0;
   MPI_Init(&argc, &argv);
   /* enter define mode */
   stat = ncmpi_create(MPI_COMM_WORLD, "foo.nc", NC_CLOBBER, MPI_INFO_NULL, &ncid);
   check_err(stat,__LINE__,__FILE__);

   /* define dimensions */
   stat = ncmpi_def_dim(ncid, "lat", lat_len, &lat_dim);
   check_err(stat,__LINE__,__FILE__);
   stat = ncmpi_def_dim(ncid, "lon", lon_len, &lon_dim);
   check_err(stat,__LINE__,__FILE__);
   stat = ncmpi_def_dim(ncid, "time", time_len, &time_dim);
   check_err(stat,__LINE__,__FILE__);

   /* define variables */

   lat_dims[0] = lat_dim;
   stat = ncmpi_def_var(ncid, "lat", NC_INT, RANK_lat, lat_dims, &lat_id);
   check_err(stat,__LINE__,__FILE__);

   lon_dims[0] = lon_dim;
   stat = ncmpi_def_var(ncid, "lon", NC_INT, RANK_lon, lon_dims, &lon_id);
   check_err(stat,__LINE__,__FILE__);

   time_dims[0] = time_dim;
   stat = ncmpi_def_var(ncid, "time", NC_INT, RANK_time, time_dims, &time_id);
   check_err(stat,__LINE__,__FILE__);

   z_dims[0] = time_dim;
   z_dims[1] = lat_dim;
   z_dims[2] = lon_dim;
   stat = ncmpi_def_var(ncid, "z", NC_FLOAT, RANK_z, z_dims, &z_id);
   check_err(stat,__LINE__,__FILE__);

   t_dims[0] = time_dim;
   t_dims[1] = lat_dim;
   t_dims[2] = lon_dim;
   stat = ncmpi_def_var(ncid, "t", NC_FLOAT, RANK_t, t_dims, &t_id);
   check_err(stat,__LINE__,__FILE__);

   p_dims[0] = time_dim;
   p_dims[1] = lat_dim;
   p_dims[2] = lon_dim;
   stat = ncmpi_def_var(ncid, "p", NC_DOUBLE, RANK_p, p_dims, &p_id);
   check_err(stat,__LINE__,__FILE__);

   rh_dims[0] = time_dim;
   rh_dims[1] = lat_dim;
   rh_dims[2] = lon_dim;
   stat = ncmpi_def_var(ncid, "rh", NC_INT, RANK_rh, rh_dims, &rh_id);
   check_err(stat,__LINE__,__FILE__);

   /* assign attributes */
   stat = ncmpi_put_att_text(ncid, lat_id, "units", 13, "degrees_north");
   check_err(stat,__LINE__,__FILE__);
   stat = ncmpi_put_att_text(ncid, lon_id, "units", 12, "degrees_east");
   check_err(stat,__LINE__,__FILE__);
   stat = ncmpi_put_att_text(ncid, time_id, "units", 7, "seconds");
   check_err(stat,__LINE__,__FILE__);
   stat = ncmpi_put_att_text(ncid, z_id, "units", 6, "meters");
   check_err(stat,__LINE__,__FILE__);
   z_valid_range[0] = 0;
   z_valid_range[1] = 5000;
   stat = ncmpi_put_att_double(ncid, z_id, "valid_range", NC_DOUBLE, 2, z_valid_range);
   check_err(stat,__LINE__,__FILE__);
   p__FillValue[0] = -9999;
   stat = ncmpi_put_att_double(ncid, p_id, "_FillValue", NC_DOUBLE, 1, p__FillValue);
   check_err(stat,__LINE__,__FILE__);
   rh__FillValue[0] = -1;
   stat = ncmpi_put_att_int(ncid, rh_id, "_FillValue", NC_INT, 1, rh__FillValue);
   check_err(stat,__LINE__,__FILE__);

   /* leave define mode */
   stat = ncmpi_enddef (ncid);
   check_err(stat,__LINE__,__FILE__);

   {			/* store lat */
    static int lat[] = {0, 10, 20, 30, 40, 50, 60, 70, 80, 90};
    ncmpi_begin_indep_data(ncid);
    stat = ncmpi_put_var_int(ncid, lat_id, lat);
    ncmpi_end_indep_data(ncid);
    check_err(stat,__LINE__,__FILE__);
   }

   {			/* store lon */
    static int lon[] = {-140, -118, -96, -84, -52};
    ncmpi_begin_indep_data(ncid);
    stat = ncmpi_put_var_int(ncid, lon_id, lon);
    ncmpi_end_indep_data(ncid);
    check_err(stat,__LINE__,__FILE__);
   }
   stat = ncmpi_close(ncid);
   check_err(stat,__LINE__,__FILE__);
   MPI_Finalize();
   return 0;
}
コード例 #19
0
ファイル: test_read_indep.c プロジェクト: 00liujj/trilinos
int main(int argc, char **argv) {

    int i, j;
    int status;
    int ncid1, ncid2;
    int ndims, nvars, ngatts, unlimdimid;
    char name[NC_MAX_NAME];
    nc_type type, vartypes[NC_MAX_VARS];
    MPI_Offset attlen;
    MPI_Offset dimlen, shape[NC_MAX_VAR_DIMS], varsize, start[NC_MAX_VAR_DIMS];
    void *valuep;
    int dimids[NC_MAX_DIMS], varids[NC_MAX_VARS];
    int vardims[NC_MAX_VARS][NC_MAX_VAR_DIMS/16]; /* divided by 16 due to my memory limitation */
    int varndims[NC_MAX_VARS], varnatts[NC_MAX_VARS];
    params opts;

    int rank;
    int nprocs;
    MPI_Comm comm = MPI_COMM_WORLD;


    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank == 0)
        fprintf(stderr, "Testing independent read ... ");
    parse_read_args(argc, argv, rank, &opts);

    /**********  START OF NETCDF ACCESS **************/


    /* Read a netCDF file and write it out to another file */

    /**
     * Open the input dataset - ncid1:
     *   File name: "../data/test_int.nc"
     *   Dataset API: Collective
     * And create the output dataset - ncid2:
     *   File name: "testread.nc"
     *   Dataset API: Collective
     */

    status = ncmpi_open(comm, opts.infname, 0, MPI_INFO_NULL, &ncid1);
    if (status != NC_NOERR) handle_error(status);

    status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid2);
    if (status != NC_NOERR) handle_error(status);


    /**
     * Inquire the dataset definitions of input dataset AND
     * Add dataset definitions for output dataset.
     */

    status = ncmpi_inq(ncid1, &ndims, &nvars, &ngatts, &unlimdimid);
    if (status != NC_NOERR) handle_error(status);


    /* Inquire global attributes, assume CHAR attributes. */

    for (i = 0; i < ngatts; i++) {
        status = ncmpi_inq_attname(ncid1, NC_GLOBAL, i, name);
        if (status != NC_NOERR) handle_error(status);
        status = ncmpi_inq_att (ncid1, NC_GLOBAL, name, &type, &attlen);
        if (status != NC_NOERR) handle_error(status);
        switch (type) {
        case NC_CHAR:
            valuep = (void *)malloc(attlen * sizeof(char));
            status = ncmpi_get_att_text(ncid1, NC_GLOBAL, name, valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_att_text (ncid2, NC_GLOBAL, name, attlen, (char *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_SHORT:
            valuep = (void *)malloc(attlen * sizeof(short));
            status = ncmpi_get_att_short(ncid1, NC_GLOBAL, name, valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_att_short (ncid2, NC_GLOBAL, name, type, attlen, (short *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_INT:
            valuep = (void *)malloc(attlen * sizeof(int));
            status = ncmpi_get_att_int(ncid1, NC_GLOBAL, name, valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_att_int (ncid2, NC_GLOBAL, name, type, attlen, (int *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_FLOAT:
            valuep = (void *)malloc(attlen * sizeof(float));
            status = ncmpi_get_att_float(ncid1, NC_GLOBAL, name, valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_att_float (ncid2, NC_GLOBAL, name, type, attlen, (float *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_DOUBLE:
            valuep = (void *)malloc(attlen * sizeof(double));
            status = ncmpi_get_att_double(ncid1, NC_GLOBAL, name, valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_att_double (ncid2, NC_GLOBAL, name, type, attlen, (double *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        default:
            ;
            /* handle unexpected types */
        }
    }

    /* Inquire dimension */

    for (i = 0; i < ndims; i++) {
        status = ncmpi_inq_dim(ncid1, i, name, &dimlen);
        if (status != NC_NOERR) handle_error(status);
        if (i == unlimdimid)
            dimlen = NC_UNLIMITED;
        status = ncmpi_def_dim(ncid2, name, dimlen, dimids+i);
        if (status != NC_NOERR) handle_error(status);
    }

    /* Inquire variables */

    for (i = 0; i < nvars; i++) {
        status = ncmpi_inq_var (ncid1, i, name, vartypes+i, varndims+i, vardims[i], varnatts+i);
        if (status != NC_NOERR) handle_error(status);

        status = ncmpi_def_var(ncid2, name, vartypes[i], varndims[i], vardims[i], varids+i);
        if (status != NC_NOERR) handle_error(status);

        /* var attributes, assume CHAR attributes */

        for (j = 0; j < varnatts[i]; j++) {
            status = ncmpi_inq_attname(ncid1, varids[i], j, name);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_inq_att (ncid1, varids[i], name, &type, &attlen);
            if (status != NC_NOERR) handle_error(status);
            switch (type) {
            case NC_CHAR:
                valuep = (void *)malloc(attlen * sizeof(char));
                status = ncmpi_get_att_text(ncid1, varids[i], name, valuep);
                if (status != NC_NOERR) handle_error(status);
                status = ncmpi_put_att_text (ncid2, varids[i], name, attlen, (char *)valuep);
                if (status != NC_NOERR) handle_error(status);
                free(valuep);
                break;
            case NC_SHORT:
                valuep = (void *)malloc(attlen * sizeof(short));
                status = ncmpi_get_att_short(ncid1, varids[i], name, valuep);
                if (status != NC_NOERR) handle_error(status);
                status = ncmpi_put_att_short (ncid2, varids[i], name, type, attlen, (short *)valuep);
                if (status != NC_NOERR) handle_error(status);
                free(valuep);
                break;
            case NC_INT:
                valuep = (void *)malloc(attlen * sizeof(int));
                status = ncmpi_get_att_int(ncid1, varids[i], name, valuep);
                if (status != NC_NOERR) handle_error(status);
                status = ncmpi_put_att_int (ncid2, varids[i], name, type, attlen, (int *)valuep);
                if (status != NC_NOERR) handle_error(status);
                free(valuep);
                break;
            case NC_FLOAT:
                valuep = (void *)malloc(attlen * sizeof(float));
                status = ncmpi_get_att_float(ncid1, varids[i], name, valuep);
                if (status != NC_NOERR) handle_error(status);
                status = ncmpi_put_att_float (ncid2, varids[i], name, type, attlen, (float *)valuep);
                if (status != NC_NOERR) handle_error(status);
                free(valuep);
                break;
            case NC_DOUBLE:
                valuep = (void *)malloc(attlen * sizeof(double));
                status = ncmpi_get_att_double(ncid1, varids[i], name, valuep);
                if (status != NC_NOERR) handle_error(status);
                status = ncmpi_put_att_double (ncid2, varids[i], name, type, attlen, (double *)valuep);
                if (status != NC_NOERR) handle_error(status);
                free(valuep);
                break;
            default:
                ;
                /* handle unexpected types */
            }
        }
    }

    /**
     * End Define Mode (switch to data mode) for output dataset
     *   Dataset API: Collective
     */

    status = ncmpi_enddef(ncid2);
    if (status != NC_NOERR) handle_error(status);

    /**
     * Read data of variables from input dataset (assume INT variables)
     * Write the data out to the corresponding variables in the output dataset
     *
     *  Data Partition (Assume 4 processors):
     *   square: 2-D, (Block, *), 25*100 from 100*100
     *   cube:   3-D, (Block, *, *), 25*100*100 from 100*100*100
     *   xytime: 3-D, (Block, *, *), 25*100*100 from 100*100*100
     *   time:   1-D, Block-wise, 25 from 100
     *
     *  Data Mode API: non-collective
     */

    status = ncmpi_begin_indep_data(ncid1);
    if (status != NC_NOERR) handle_error(status);
    status =ncmpi_begin_indep_data(ncid2);
    if (status != NC_NOERR) handle_error(status);

    for (i = 0; i < NC_MAX_VAR_DIMS; i++)
        start[i] = 0;
    for (i = 0; i < nvars; i++) {
        varsize = 1;
        for (j = 0; j < varndims[i]; j++) {
            status = ncmpi_inq_dim(ncid1, vardims[i][j], name, shape + j);
            if (status != NC_NOERR) handle_error(status);
            if (j == 0) {
                shape[j] /= nprocs;
                start[j] = shape[j] * rank;
            }
            varsize *= shape[j];
        }
        switch (vartypes[i]) {
        case NC_CHAR:
            break;
        case NC_SHORT:
            valuep = (void *)malloc(varsize * sizeof(short));
            status = ncmpi_get_vara_short(ncid1, i, start, shape, (short *)valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_vara_short(ncid2, varids[i],
                                          start, shape, (short *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_INT:
            valuep = (void *)malloc(varsize * sizeof(int));
            status = ncmpi_get_vara_int(ncid1, i, start, shape, (int *)valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_vara_int(ncid2, varids[i],
                                        start, shape, (int *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_FLOAT:
            valuep = (void *)malloc(varsize * sizeof(float));
            status = ncmpi_get_vara_float(ncid1, i, start, shape, (float *)valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_vara_float(ncid2, varids[i],
                                          start, shape, (float *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        case NC_DOUBLE:
            valuep = (void *)malloc(varsize * sizeof(double));
            status = ncmpi_get_vara_double(ncid1, i, start, shape, (double *)valuep);
            if (status != NC_NOERR) handle_error(status);
            status = ncmpi_put_vara_double(ncid2, varids[i],
                                           start, shape, (double *)valuep);
            if (status != NC_NOERR) handle_error(status);
            free(valuep);
            break;
        default:
            ;
            /* handle unexpected types */
        }
    }

    status = ncmpi_end_indep_data(ncid1);
    if (status != NC_NOERR) handle_error(status);
    status = ncmpi_end_indep_data(ncid2);
    if (status != NC_NOERR) handle_error(status);

    status = ncmpi_sync(ncid1);
    if (status != NC_NOERR) handle_error(status);
    status = ncmpi_sync(ncid2);
    if (status != NC_NOERR) handle_error(status);

    /**
     * Close the datasets
     *   Dataset API:  collective
     */

    status = ncmpi_close(ncid1);
    if (status != NC_NOERR) handle_error(status);
    status = ncmpi_close(ncid2);
    if (status != NC_NOERR) handle_error(status);

    /*******************  END OF NETCDF ACCESS  ****************/

    if (rank == 0)
        fprintf(stderr, "OK\nInput file %s copied to: %s!\n", opts.infname, opts.outfname);

    MPI_Finalize();
    return 0;
}
コード例 #20
0
int main(int argc, char** argv) {

  int i;
  double power_M;
  int *array_of_sizes, *array_of_subsizes, *array_of_starts;
  int ncid, *dimids, varid_1, varid_2;
  MPI_Offset *local_starts, *local_edges;
  char dimname[20];
  nc_type nc_etype;
  MPI_Datatype mpi_etype, mpi_subarray;
  TEST_NATIVE_ETYPE *buf1, *buf2, *tmpbuf;
  void *packbuf;
  int packsz;
  int packpos;
  int total_sz, local_sz;
  int nprocs, rank;
  int status;
  int success, successall;
  int malloc_failure, malloc_failure_any;
  int request;


  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

 /* test initializations: nc_file, nc_variables, dimensions, arrays */

  parse_args(argc, argv, rank);
  TEST_SET_NCMPI_ETYPE(nc_etype, mpi_etype);
#ifdef TEST_NCTYPE
  nc_etype = TEST_NCTYPE;
#endif
  if (rank == 0) {
    printf("testing memory subarray layout ...\n");
  }

  status = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER,
			MPI_INFO_NULL, &ncid);
  TEST_HANDLE_ERR(status);

  array_of_sizes = (int *)
		   malloc(sizeof(int)*ndims*4 + sizeof(MPI_Offset)*ndims*4);
  array_of_subsizes = array_of_sizes + ndims;
  array_of_starts = array_of_subsizes + ndims;
  dimids = array_of_starts + ndims;
  local_starts = (MPI_Offset *)(dimids + ndims);
  local_edges = local_starts + ndims;

  total_sz = 1;
  power_M = 1;
  for (i=0; i<ndims; i++, power_M*=test_m) {
    array_of_sizes[i] = (int)(test_n*power_M);
    if (array_of_sizes[i] < 1) {
      /* lower bound check */
      array_of_sizes[i] = 1;
    } else if ( (double)total_sz*array_of_sizes[i] > (double)TEST_MAX_INT ){
      /* upper bound check */
      if (rank == 0) {
        fprintf(stderr, "Total size of array is too big to be represented\n");
        fprintf(stderr, "Current size = %f, Max size = %d\n",
                (double)total_sz*array_of_sizes[i], TEST_MAX_INT);
      }
      TEST_EXIT(-1);
    }
    total_sz *= array_of_sizes[i];
    sprintf(dimname, "dim_%d", i);
    status = ncmpi_def_dim(ncid, dimname,
			   (MPI_Offset)array_of_sizes[i], dimids+i);
    TEST_HANDLE_ERR(status);
  }

  if (order == MPI_ORDER_FORTRAN) {
    /* reverse the filearray dimension, since NC always use C ORDER */
    TEST_REVERSE(dimids, ndims, int);
  }

  status = ncmpi_def_var(ncid, "var_1", nc_etype, ndims, dimids, &varid_1);
  TEST_HANDLE_ERR(status);

  TEST_REVERSE(dimids, ndims, int);
  status = ncmpi_def_var(ncid, "var_2", nc_etype, ndims, dimids, &varid_2);
  TEST_HANDLE_ERR(status);

  status = ncmpi_enddef(ncid);
  TEST_HANDLE_ERR(status);

  if (rank == 0) {
    printf("\t Filesize = %2.3fMB, MAX_Memory_needed = %2.3fMB\n\n",
	   2*total_sz*TEST_NCTYPE_LEN(nc_etype)/1024.0/1024.0,
	   ( (2*total_sz + 4*total_sz/nprocs)*sizeof(TEST_NATIVE_ETYPE)
	   + total_sz*TEST_NCTYPE_LEN(nc_etype) )/1024.0/1024.0);
  }

  buf1 = (TEST_NATIVE_ETYPE *)malloc(total_sz*sizeof(TEST_NATIVE_ETYPE)*2);
  malloc_failure = (buf1 == NULL ||
		    (float)total_sz*sizeof(TEST_NATIVE_ETYPE)*2 > TEST_MAX_INT);
  MPI_Allreduce(&malloc_failure, &malloc_failure_any, 1, MPI_INT,
                MPI_LOR, MPI_COMM_WORLD);
  if (malloc_failure_any) {
    if (rank == 0) {
      fprintf(stderr, "malloc(%2.3fMB) failed!\n",
              (float)total_sz*sizeof(TEST_NATIVE_ETYPE)*2/1024/1024);
      fprintf(stderr, "The whole array may be too big for malloc to handle!\n");
      fprintf(stderr, "Please choose smaller array size.\n");
    }
    TEST_EXIT(-1);
  }

  buf2 = buf1 + total_sz;
  for (i=0; i<total_sz; i++)
    /* just make sure any type can represent the number */
    /* and make it irregular (cycle != power of 2) for random test */
    buf1[i] = buf2[i] = (TEST_NATIVE_ETYPE)(i%127);

 /* PARTITION and calculate the local target region */

  partition_array(ndims,
		  array_of_sizes, array_of_subsizes, array_of_starts,
                  nprocs, rank);

  local_sz = 1;
  for (i=0; i<ndims; i++) {
    local_sz *= array_of_subsizes[i];
    local_edges[i] = (MPI_Offset)array_of_subsizes[i];
    local_starts[i] = (MPI_Offset)array_of_starts[i];
  }

  if (order == MPI_ORDER_FORTRAN) {
    /* reverse the filearray dimension, since NC always use C ORDER */
    TEST_REVERSE(local_edges, ndims, MPI_Offset);
    TEST_REVERSE(local_starts, ndims, MPI_Offset);
  }

 /* CREATE local subarray memory view */

  if (local_sz == 0)
    MPI_Type_contiguous(0, mpi_etype, &mpi_subarray);
  else
    MPI_Type_create_subarray(ndims,
                             array_of_sizes,
                             array_of_subsizes,
                             array_of_starts,
                             order,
                             mpi_etype,
                             &mpi_subarray);
  MPI_Type_commit(&mpi_subarray);

 /* PRINT stats */
  if (rank == 0) {
    printf("Initialization:  NDIMS = %d, NATIVE_ETYPE = %s, NC_TYPE = %s\n\n",
	   ndims, TEST_NATIVE_ETYPE_STR, TEST_GET_NCTYPE_STR(nc_etype));

    printf("\t NC Var_1 Shape:\t [");
    if (order == MPI_ORDER_C) {
      TEST_PRINT_LIST(array_of_sizes, 0, ndims-1, 1);
    } else {
      TEST_PRINT_LIST(array_of_sizes, ndims-1, 0, -1);
    }
    printf("] Always ORDER_C\n");

    printf("\t NC Var_2 Shape:\t [");
    if (order == MPI_ORDER_C) {
      TEST_PRINT_LIST(array_of_sizes, ndims-1, 0, -1);
    } else {
      TEST_PRINT_LIST(array_of_sizes, 0, ndims-1, 1);
    }
    printf("] Always ORDER_C\n");

    printf("\t Memory Array Shape:\t [");
    TEST_PRINT_LIST(array_of_sizes, 0, ndims-1, 1);
    printf("] %s\n", ((order==MPI_ORDER_C)?"MPI_ORDER_C":"MPI_ORDER_FORTRAN"));
    printf("\t Memory Array Copys: buf1 for write, buf2 for read back (and compare)\n");

    printf("\n");

    printf("Logical Array Partition:\t BLOCK partition along all dimensions\n\n");

    printf("Access Pattern (subarray):  NPROCS = %d\n\n", nprocs);

  }

  fflush(stdout);
  MPI_Barrier(MPI_COMM_WORLD);

  for (i=0; i<nprocs; i++) {
    if (rank == i) {
      printf("\t Proc %2d of %2d:  starts = [", rank, nprocs);
      TEST_PRINT_LIST(local_starts, 0, ndims-1, 1);
      printf("], ");
      printf("counts = [");
      TEST_PRINT_LIST(local_edges, 0, ndims-1, 1);
      printf("] \n");

    }
    fflush(stdout);
    /* Synchronizer: processes should print out their stuffs in turn :) */
    MPI_Barrier(MPI_COMM_WORLD);
  }
  if (rank == 0) {
    printf("\n");
    fflush(stdout);
  }
  MPI_Barrier(MPI_COMM_WORLD);

 /* RESET the target region of buf2 */

  MPI_Pack_size(local_sz, mpi_etype, MPI_COMM_SELF, &packsz);
  tmpbuf = (TEST_NATIVE_ETYPE *)
	   malloc(local_sz*sizeof(TEST_NATIVE_ETYPE) + packsz);
  packbuf = (void *)(tmpbuf + local_sz);
  for (i=0; i<local_sz; i++)
    tmpbuf[i] = (TEST_NATIVE_ETYPE)(-1);
  packpos = 0;
  MPI_Pack((void *)tmpbuf, local_sz, mpi_etype,
	   packbuf, packsz, &packpos, MPI_COMM_SELF);
  packpos = 0;
  MPI_Unpack(packbuf, packsz, &packpos, buf2, 1, mpi_subarray, MPI_COMM_SELF);

/* Begin of TEST1: test local write-n-readback */

  fflush(stdout);

  if (rank == 0) {
    printf("TEST1: \n");
  }

 /* WRITE target region from buf1 */

  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    printf("\t [nonblocking] all procs writing their subarrays into Var_1 ...\n");
  }

  status = ncmpi_begin_indep_data(ncid);
  TEST_HANDLE_ERR(status);

  status = ncmpi_iput_vara(ncid, varid_1, local_starts, local_edges,
			   (const void *)buf1, 1, mpi_subarray, &request);
  TEST_HANDLE_ERR(status);

  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    printf("\t nonblocking I/O returns ...\n");
    fflush(stdout);
  }

  ncmpi_wait(ncid, 1, &request, &status);

  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    printf("\t nonblocking I/O finishes ...\n");
  }

  status = ncmpi_end_indep_data(ncid);
  TEST_HANDLE_ERR(status);

 /* READ target region back into buf2 */

  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    printf("\t [nonblocking] all procs reading their subarrays from Var_1 ...\n");
  }

  status = ncmpi_begin_indep_data(ncid);
  TEST_HANDLE_ERR(status);

  status = ncmpi_iget_vara(ncid, varid_1, local_starts, local_edges,
			   (void *)buf2, 1, mpi_subarray, &request);
  TEST_HANDLE_ERR(status);

  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    printf("\t nonblocking I/O returns ...\n");
    fflush(stdout);
  }

  ncmpi_wait(ncid, 1, &request, &status);

  MPI_Barrier(MPI_COMM_WORLD);
  if (rank == 0) {
    printf("\t nonblocking I/O finishes ...\n");
  }

  status = ncmpi_end_indep_data(ncid);
  TEST_HANDLE_ERR(status);

 /* COMPARE buf1 and buf2 for equality */

  if (memcmp((void *)buf1, (void *)buf2, total_sz*sizeof(TEST_NATIVE_ETYPE)))
    success = 0;
  else
    success = 1;

  MPI_Allreduce(&success, &successall, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

  if (rank == 0) {
    if (successall)
      printf("\t PASS: memory subarray layout passes test1! \n\n");
    else
      printf("\t ERROR: memory subarray layout fails test1! \n\n");
  }

/* End of TEST1 */

 /* test finalization */

  ncmpi_close(ncid);

  MPI_Type_free(&mpi_subarray);
  free(tmpbuf);
  free(buf1);
  free(array_of_sizes);

  MPI_Finalize();

  return !successall;
}