/* Test a small file with one record var, which grows, and has * attributes. */ static int test_one_growing_with_att(const char *testfile, int cmode) { int err, ncid, dimid, varid; char data[MAX_RECS], data_in; char att_name[NC_MAX_NAME + 1]; MPI_Offset start[ONE_DIM], count[ONE_DIM], index[ONE_DIM], len_in; int r; /* Create a file with one ulimited dimensions, and one var. */ err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, &dimid); ERR err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 1, &dimid, &varid); ERR err=ncmpi_close(ncid); ERR /* Create some phoney data. */ for (data[0] = 'a', r = 1; r < MAX_RECS; r++) data[r] = data[r - 1] + 1; /* Normally one would not close and reopen the file for each * record, nor add an attribute each time I add a record, but I am * giving the library a little work-out here... */ for (r = 0; r < MAX_RECS; r++) { /* Write one record of var data, a single character. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_WRITE, MPI_INFO_NULL, &ncid); ERR count[0] = 1; start[0] = r; err=ncmpi_put_vara_text_all(ncid, varid, start, count, &data[r]); ERR sprintf(att_name, "a_%d", data[r]); err=ncmpi_redef(ncid); ERR err=ncmpi_put_att_text(ncid, varid, att_name, 1, &data[r]); ERR err=ncmpi_close(ncid); ERR /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq_dimlen(ncid, 0, &len_in); ERR if (len_in != r + 1) {printf("Error at line %d\n",__LINE__);return 1;} index[0] = r; err=ncmpi_begin_indep_data(ncid); ERR err=ncmpi_get_var1_text(ncid, 0, index, &data_in); ERR if (data_in != data[r]) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_att_text(ncid, varid, att_name, &data_in); ERR if (data_in != data[r]) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_close(ncid); ERR } /* Next record. */ return 0; }
/* Test a small file with an unlimited dimension. NOTE: Normally I * write a NULL terminator for my attributes and text strings, but * this reproduces a bug that a fortran user sent us. So string data * are written to the file without null terminators. - Ed */ static int test_small_unlim(const char *testfile, int cmode) { int i, err, ncid, dimids[NDIMS], varid; char data[NUM_VALS][STR_LEN + 1], data_in[NUM_VALS][STR_LEN]; int ndims, nvars, natts, unlimdimid; MPI_Offset start[NDIMS], count[NDIMS]; /* Create null-terminated text strings of correct length. */ /*for (i = 0; i < NUM_VALS; i++) strcpy(data[i], source);*/ strcpy(data[0], "2005-04-11_12:00:00"); strcpy(data[1], "2005-04-11_13:00:00"); /* Create a file with two dimensions, one unlimited, and one * var, and a global att. */ err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, dimids); ERR err=ncmpi_def_dim(ncid, DIM2_NAME, STR_LEN, &dimids[1]); ERR err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 2, dimids, &varid); ERR err=ncmpi_put_att_text(ncid, NC_GLOBAL, ATT_NAME2, strlen(TITLE), TITLE); ERR err=ncmpi_enddef(ncid); ERR /* Write some records of var data. */ count[0] = 1; count[1] = STR_LEN; start[1] = 0; for (start[0] = 0; start[0] < NUM_VALS; start[0]++) { err=ncmpi_put_vara_text_all(ncid, varid, start, count, data[start[0]]); ERR } /* We're done! */ err=ncmpi_close(ncid); ERR /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq(ncid, &ndims, &nvars, &natts, &unlimdimid); ERR if (ndims != 2 && nvars != 1 && natts != 0 && unlimdimid != 0) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_var_text_all(ncid, varid, (char *)data_in); ERR for (i = 0; i < NUM_VALS; i++) /* if (strncmp(data[i], data_in[i], STR_LEN)) {printf("Error at line %d\n",__LINE__);return 1;} */ if (strncmp(data[i], data_in[i], STR_LEN)) { printf("i=%d data=%s data_in=%s\n",i,data[i],data_in[i]); } err=ncmpi_close(ncid); ERR return 0; }
void * IOR_Create_NCMPI(char * testFileName, IOR_param_t * param) { int * fd; int fd_mode; MPI_Info mpiHints = MPI_INFO_NULL; /* Wei-keng Liao: read and set MPI file hints from hintsFile */ SetHints(&mpiHints, param->hintsFileName); if (rank == 0 && param->showHints) { fprintf(stdout, "\nhints passed to MPI_File_open() {\n"); ShowHints(&mpiHints); fprintf(stdout, "}\n"); } fd = (int *)malloc(sizeof(int)); if (fd == NULL) ERR("Unable to malloc file descriptor"); fd_mode = GetFileMode(param); NCMPI_CHECK(ncmpi_create(testComm, testFileName, fd_mode, mpiHints, fd), "cannot create file"); /* Wei-keng Liao: print the MPI file hints currently used */ /* WEL - add when ncmpi_get_file_info() is in current parallel-netcdf release if (rank == 0 && param->showHints) { MPI_CHECK(ncmpi_get_file_info(*fd, &mpiHints), "cannot get file info"); fprintf(stdout, "\nhints returned from opened file {\n"); ShowHints(&mpiHints); fprintf(stdout, "}\n"); } */ /* Wei-keng Liao: free up the mpiHints object */ /* WEL - this needs future fix from next release of PnetCDF if (mpiHints != MPI_INFO_NULL) MPI_CHECK(MPI_Info_free(&mpiHints), "cannot free file info"); */ return(fd); } /* IOR_Create_NCMPI() */
static int test_small_atts(const char *testfile, int cmode) { int ncid, err; char att[MAX_LEN + 1], att_in[MAX_LEN + 1], source[MAX_LEN + 1] = "0123456"; int ndims, nvars, natts, unlimdimid; MPI_Offset len_in; int t, f; /* Run this with and without fill mode. */ for (f = 0; f < 2; f++) { /* Create small files with an attribute that grows by one each * time. */ for (t = 1; t < MAX_LEN; t++) { /* Create null-terminated text string of correct length. */ strncpy(att, source, t); att[t] = '\0'; /* Create a file with one attribute. */ err = ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_put_att_text(ncid, NC_GLOBAL, ATT_NAME, t + 1, att); ERR if (f) { err=ncmpi_set_fill(ncid, NC_NOFILL, NULL); ERR} err=ncmpi_close(ncid); ERR; /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq(ncid, &ndims, &nvars, &natts, &unlimdimid); ERR if (ndims != 0 && nvars != 0 && natts != 1 && unlimdimid != -1) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_inq_attlen(ncid, NC_GLOBAL, ATT_NAME, &len_in); ERR if (len_in != t + 1) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_att_text(ncid, NC_GLOBAL, ATT_NAME, att_in); ERR if (strncmp(att_in, att, t)) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_close(ncid); ERR } } return 0; }
static int check_add_var(char *filename) { int err, nerrs=0, ncid, cmode, varid, dimid[4]; /* create a new file ---------------------------------------------------*/ cmode = NC_CLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", 5, &dimid[1]); ERR err = ncmpi_def_dim(ncid, "YY", 66661, &dimid[2]); ERR err = ncmpi_def_dim(ncid, "XX", 66661, &dimid[3]); ERR err = ncmpi_def_var(ncid, "var", NC_INT, 1, dimid+1, &varid); ERR err = ncmpi_def_var(ncid, "var_last", NC_FLOAT, 2, dimid+2, &varid); ERR err = ncmpi_enddef(ncid); ERR /* add a new fixed-size variable */ err = ncmpi_redef(ncid); ERR err = ncmpi_def_var(ncid, "var_new", NC_INT, 2, dimid, &varid); ERR err = ncmpi_enddef(ncid); if (err != NC_EVARSIZE) { printf("\nError at line=%d: expecting error code NC_EVARSIZE but got %s\n",__LINE__,nc_err_code_name(err)); nerrs++; } err = ncmpi_close(ncid); if (err != NC_EVARSIZE) { printf("\nError at line=%d: expecting error code NC_EVARSIZE but got %s\n",__LINE__,nc_err_code_name(err)); nerrs++; } return nerrs; }
static int check_last_var(char *filename) { int err, nerrs=0, ncid, cmode, varid, dimid[4]; /* create a new file ---------------------------------------------------*/ cmode = NC_CLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", 5, &dimid[1]); ERR err = ncmpi_def_dim(ncid, "YY", 66661, &dimid[2]); ERR err = ncmpi_def_dim(ncid, "XX", 66661, &dimid[3]); ERR /* define only fixed-size variables */ err = ncmpi_def_var(ncid, "var", NC_INT, 1, dimid+1, &varid); ERR err = ncmpi_def_var(ncid, "var_last", NC_FLOAT, 2, dimid+2, &varid); ERR err = ncmpi_enddef(ncid); ERR err = ncmpi_close(ncid); ERR return nerrs; }
// ADD-BY-LEETEN 08/06/2011-BEGIN ///////////////////////////////////////////////////////////////// void ITLRandomField::_CreateNetCdf ( const char *szPath, const char *szFilenamePrefix ) { char szNetCdfPathFilename[1024]; #ifndef WITH_PNETCDF // ADD-BY-LEETEN 08/12/2011 sprintf(szNetCdfPathFilename, "%s/%s.rank_%d.nc", szPath, szFilenamePrefix, iRank); // Create the file. ASSERT_NETCDF(nc_create( szNetCdfPathFilename, NC_CLOBBER, &iNcId)); // ADD-BY-LEETEN 08/12/2011-BEGIN #else // #ifndef WITH_PNETCDF sprintf(szNetCdfPathFilename, "%s/%s.nc", szPath, szFilenamePrefix); ASSERT_NETCDF(ncmpi_create( MPI_COMM_WORLD, szNetCdfPathFilename, NC_CLOBBER, MPI_INFO_NULL, &iNcId)); #endif // #ifndef WITH_PNETCDF // ADD-BY-LEETEN 08/12/2011-END // find the maximal block dim int piBlockDimMaxLengths[CBlock::MAX_DIM]; for(int d = 0; d < CBlock::MAX_DIM; d++) { piBlockDimMaxLengths[d] = 0; } for(int b = 0; b < (int)pcBlockInfo.USize(); b++) for(int d = 0; d < CBlock::MAX_DIM; d++) piBlockDimMaxLengths[d] = max(piBlockDimMaxLengths[d], pcBlockInfo[b].piDimLengths[d]); // ADD-BY-LEETEN 08/12/2011-BEGIN // collect the max. length of all dim. #if 0 // MOD-BY-LEETEN 08/29/2011-FROM: for(int d = 0; d < CBlock::MAX_DIM; d++) { int iTemp = piBlockDimMaxLengths[d]; ASSERT_OR_LOG(MPI_SUCCESS == MPI_Reduce(&iTemp, &piBlockDimMaxLengths[d], 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD), ""); if( 0 == iRank ) ASSERT_OR_LOG(MPI_SUCCESS == MPI_Bcast(&piBlockDimMaxLengths[d], 1, MPI_INT, 0, MPI_COMM_WORLD), ""); } #else // MOD-BY-LEETEN 08/29/2011-TO: ASSERT_OR_LOG(MPI_SUCCESS == MPI_Allreduce(MPI_IN_PLACE, &piBlockDimMaxLengths[0], CBlock::MAX_DIM, MPI_INT, MPI_MAX, MPI_COMM_WORLD), ""); #endif // MOD-BY-LEETEN 08/29/2011-END #ifndef WITH_PNETCDF // ADD-BY-LEETEN 08/12/2011 for(int d = 0; d < CBlock::MAX_DIM; d++) ASSERT_NETCDF(nc_def_dim( iNcId, pszNcDimNames[d], piBlockDimMaxLengths[d], &piNcDimIds[d])); // Define the block dimension ASSERT_NETCDF(nc_def_dim( iNcId, pszNcDimNames[NC_DIM_BLOCK], IGetNrOfBlocks(), &piNcDimIds[NC_DIM_BLOCK])); // Define the time dimension with unlimited length ASSERT_NETCDF(nc_def_dim( iNcId, pszNcDimNames[NC_DIM_GLOBAL_TIME], NC_UNLIMITED, &piNcDimIds[NC_DIM_GLOBAL_TIME])); // Define the variables for the coordinates for(int d = 0; d < CBlock::MAX_DIM; d++) { int piBlockDims[3]; piBlockDims[0] = piNcDimIds[NC_DIM_GLOBAL_TIME]; piBlockDims[1] = piNcDimIds[NC_DIM_BLOCK]; piBlockDims[2] = piNcDimIds[d]; ASSERT_NETCDF(nc_def_var( iNcId, pszNcDimNames[d], NC_DOUBLE, sizeof(piBlockDims) / sizeof(piBlockDims[0]), piBlockDims, &piNcDimVarIds[d])); } // define the variable for time stamp ASSERT_NETCDF(nc_def_var( iNcId, pszNcDimNames[NC_DIM_GLOBAL_TIME], NC_INT, 1, &piNcDimIds[NC_DIM_GLOBAL_TIME], &iNcTimeVarId)); // define the variable for all data components for(int c = 0; c < this->IGetNrOfDataComponents(); c++) { int piBlockDims[6]; piBlockDims[0] = piNcDimIds[NC_DIM_GLOBAL_TIME]; piBlockDims[1] = piNcDimIds[NC_DIM_BLOCK]; piBlockDims[2] = piNcDimIds[NC_DIM_T]; piBlockDims[3] = piNcDimIds[NC_DIM_Z]; piBlockDims[4] = piNcDimIds[NC_DIM_Y]; piBlockDims[5] = piNcDimIds[NC_DIM_X]; ASSERT_NETCDF(nc_def_var( iNcId, this->CGetDataComponent(c).szName, NC_DOUBLE, sizeof(piBlockDims) / sizeof(piBlockDims[0]), piBlockDims, &this->CGetDataComponent(c).iVarId)); } // define the varaibles for all random variables for(int r = 0; r < this->IGetNrOfRandomVariables(); r++) { CRandomVariable& cRv = this->CGetRandomVariable(r); int piBlockDims[6]; piBlockDims[0] = piNcDimIds[NC_DIM_GLOBAL_TIME]; piBlockDims[1] = piNcDimIds[NC_DIM_BLOCK]; piBlockDims[2] = piNcDimIds[NC_DIM_T]; piBlockDims[3] = piNcDimIds[NC_DIM_Z]; piBlockDims[4] = piNcDimIds[NC_DIM_Y]; piBlockDims[5] = piNcDimIds[NC_DIM_X]; ASSERT_NETCDF(nc_def_var( iNcId, cRv.szName, NC_FLOAT, sizeof(piBlockDims) / sizeof(piBlockDims[0]), piBlockDims, &cRv.iVarId)); } // finish the definition mode ASSERT_NETCDF(nc_enddef( iNcId)); // ADD-BY-LEETEN 08/12/2011-BEGIN #else // #ifndef WITH_PNETCDF for(int d = 0; d < CBlock::MAX_DIM; d++) { ASSERT_NETCDF(ncmpi_def_dim( iNcId, pszNcDimNames[d], piBlockDimMaxLengths[d], &piNcDimIds[d])); } // Define the block dimension ASSERT_NETCDF(ncmpi_def_dim( iNcId, pszNcDimNames[NC_DIM_BLOCK], // MOD-BY-LEETEN 08/30/2011-FROM: // IGetNrOfBlocks(), // TO: iNrOfGlobalBlocks, // MOD-BY-LEETEN 08/30/2011-END &piNcDimIds[NC_DIM_BLOCK])); // Define the time dimension with unlimited length ASSERT_NETCDF(ncmpi_def_dim( iNcId, pszNcDimNames[NC_DIM_GLOBAL_TIME], NC_UNLIMITED, &piNcDimIds[NC_DIM_GLOBAL_TIME])); // Define the variables for the coordinates for(int d = 0; d < CBlock::MAX_DIM; d++) { int piBlockDims[3]; piBlockDims[0] = piNcDimIds[NC_DIM_GLOBAL_TIME]; piBlockDims[1] = piNcDimIds[NC_DIM_BLOCK]; piBlockDims[2] = piNcDimIds[d]; ASSERT_NETCDF(ncmpi_def_var( iNcId, pszNcDimNames[d], NC_DOUBLE, sizeof(piBlockDims) / sizeof(piBlockDims[0]), piBlockDims, &piNcDimVarIds[d])); } // define the variable for time stamp ASSERT_NETCDF(ncmpi_def_var( iNcId, pszNcDimNames[NC_DIM_GLOBAL_TIME], NC_INT, 1, &piNcDimIds[NC_DIM_GLOBAL_TIME], &iNcTimeVarId)); // define the variable for all data components for(int c = 0; c < this->IGetNrOfDataComponents(); c++) { int piBlockDims[6]; piBlockDims[0] = piNcDimIds[NC_DIM_GLOBAL_TIME]; piBlockDims[1] = piNcDimIds[NC_DIM_BLOCK]; piBlockDims[2] = piNcDimIds[NC_DIM_T]; piBlockDims[3] = piNcDimIds[NC_DIM_Z]; piBlockDims[4] = piNcDimIds[NC_DIM_Y]; piBlockDims[5] = piNcDimIds[NC_DIM_X]; ASSERT_NETCDF(ncmpi_def_var( iNcId, this->CGetDataComponent(c).szName, NC_DOUBLE, sizeof(piBlockDims) / sizeof(piBlockDims[0]), piBlockDims, &this->CGetDataComponent(c).iVarId)); } // define the varaibles for all random variables for(int r = 0; r < this->IGetNrOfRandomVariables(); r++) { CRandomVariable& cRv = this->CGetRandomVariable(r); int piBlockDims[6]; piBlockDims[0] = piNcDimIds[NC_DIM_GLOBAL_TIME]; piBlockDims[1] = piNcDimIds[NC_DIM_BLOCK]; piBlockDims[2] = piNcDimIds[NC_DIM_T]; piBlockDims[3] = piNcDimIds[NC_DIM_Z]; piBlockDims[4] = piNcDimIds[NC_DIM_Y]; piBlockDims[5] = piNcDimIds[NC_DIM_X]; ASSERT_NETCDF(ncmpi_def_var( iNcId, cRv.szName, NC_FLOAT, sizeof(piBlockDims) / sizeof(piBlockDims[0]), piBlockDims, &cRv.iVarId)); } // finish the definition mode ASSERT_NETCDF(ncmpi_enddef( iNcId)); #endif // #ifndef WITH_PNETCDF // ADD-BY-LEETEN 08/12/2011-END // enter the data mode... }
int main(int argc, char** argv) { extern int optind; char *filename="testfile.nc"; int i, rank, nprocs, verbose=1, err; int ncid, cmode, varid, dimid[2], num_reqs, *buffer, **bufs, *nvarids; MPI_Offset w_len, **starts, **counts, *bufcounts; MPI_Datatype *datatypes; MPI_Info info; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* get command-line arguments */ while ((i = getopt(argc, argv, "hq")) != EOF) switch(i) { case 'q': verbose = 0; break; case 'h': default: if (rank==0) usage(argv[0]); MPI_Finalize(); return 0; } argc -= optind; argv += optind; if (argc == 1) filename = argv[0]; /* optional argument */ if (nprocs != 4 && rank == 0 && verbose) printf("Warning: this program is intended to run on 4 processes\n"); /* set an MPI-IO hint to disable file offset alignment for fix-sized * variables */ MPI_Info_create(&info); MPI_Info_set(info, "nc_var_align_size", "1"); /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, info, &ncid); ERR MPI_Info_free(&info); /* create a global array of size NY * NX */ err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", NX, &dimid[1]); ERR err = ncmpi_def_var(ncid, "var", NC_INT, NDIMS, dimid, &varid); ERR err = ncmpi_enddef(ncid); ERR /* pick arbitrary numbers of requests for 4 processes */ num_reqs = 0; if (rank == 0) num_reqs = 4; else if (rank == 1) num_reqs = 6; else if (rank == 2) num_reqs = 5; else if (rank == 3) num_reqs = 4; starts = (MPI_Offset**) malloc(num_reqs * sizeof(MPI_Offset*)); counts = (MPI_Offset**) malloc(num_reqs * sizeof(MPI_Offset*)); starts[0] = (MPI_Offset*) calloc(num_reqs * NDIMS, sizeof(MPI_Offset)); counts[0] = (MPI_Offset*) calloc(num_reqs * NDIMS, sizeof(MPI_Offset)); for (i=1; i<num_reqs; i++) { starts[i] = starts[i-1] + NDIMS; counts[i] = counts[i-1] + NDIMS; } /* assign arbitrary starts and counts */ const int y=0, x=1; if (rank == 0) { starts[0][y] = 0; starts[0][x] = 5; counts[0][y] = 1; counts[0][x] = 2; starts[1][y] = 1; starts[1][x] = 0; counts[1][y] = 1; counts[1][x] = 1; starts[2][y] = 2; starts[2][x] = 6; counts[2][y] = 1; counts[2][x] = 2; starts[3][y] = 3; starts[3][x] = 0; counts[3][y] = 1; counts[3][x] = 3; /* rank 0 is writing the followings: ("-" means skip) - - - - - 0 0 - - - 0 - - - - - - - - - - - - - - - 0 0 - - 0 0 0 - - - - - - - */ } else if (rank ==1) { starts[0][y] = 0; starts[0][x] = 3; counts[0][y] = 1; counts[0][x] = 2; starts[1][y] = 0; starts[1][x] = 8; counts[1][y] = 1; counts[1][x] = 2; starts[2][y] = 1; starts[2][x] = 5; counts[2][y] = 1; counts[2][x] = 2; starts[3][y] = 2; starts[3][x] = 0; counts[3][y] = 1; counts[3][x] = 2; starts[4][y] = 2; starts[4][x] = 8; counts[4][y] = 1; counts[4][x] = 2; starts[5][y] = 3; starts[5][x] = 4; counts[5][y] = 1; counts[5][x] = 3; /* rank 1 is writing the followings: ("-" means skip) - - - 1 1 - - - 1 1 - - - - - 1 1 - - - 1 1 - - - - - - 1 1 - - - - 1 1 1 - - - */ } else if (rank ==2) { starts[0][y] = 0; starts[0][x] = 7; counts[0][y] = 1; counts[0][x] = 1; starts[1][y] = 1; starts[1][x] = 1; counts[1][y] = 1; counts[1][x] = 3; starts[2][y] = 1; starts[2][x] = 7; counts[2][y] = 1; counts[2][x] = 3; starts[3][y] = 2; starts[3][x] = 2; counts[3][y] = 1; counts[3][x] = 1; starts[4][y] = 3; starts[4][x] = 3; counts[4][y] = 1; counts[4][x] = 1; /* rank 2 is writing the followings: ("-" means skip) - - - - - - - 2 - - - 2 2 2 - - - 2 2 2 - - 2 - - - - - - - - - - 2 - - - - - - */ } else if (rank ==3) { starts[0][y] = 0; starts[0][x] = 0; counts[0][y] = 1; counts[0][x] = 3; starts[1][y] = 1; starts[1][x] = 4; counts[1][y] = 1; counts[1][x] = 1; starts[2][y] = 2; starts[2][x] = 3; counts[2][y] = 1; counts[2][x] = 3; starts[3][y] = 3; starts[3][x] = 7; counts[3][y] = 1; counts[3][x] = 3; /* rank 3 is writing the followings: ("-" means skip) 3 3 3 - - - - - - - - - - - 3 - - - - - - - - 3 3 3 - - - - - - - - - - - 3 3 3 */ } nvarids = (int*) malloc(num_reqs * sizeof(int)); bufcounts = (MPI_Offset*) malloc(num_reqs * sizeof(MPI_Offset)); datatypes = (MPI_Datatype*) malloc(num_reqs * sizeof(MPI_Datatype)); w_len = 0; for (i=0; i<num_reqs; i++) { nvarids[i] = varid; bufcounts[i] = counts[i][x]; datatypes[i] = MPI_INT; w_len += bufcounts[i]; } /* allocate I/O buffer and initialize its contents */ buffer = (int*) malloc(w_len * sizeof(int)); for (i=0; i<w_len; i++) buffer[i] = rank; /* set the buffer pointers to different offsets to the I/O buffer */ bufs = (int**) malloc(num_reqs * sizeof(int*)); bufs[0] = buffer; for (i=1; i<num_reqs; i++) bufs[i] = bufs[i-1] + bufcounts[i-1]; err = ncmpi_mput_vara_all(ncid, num_reqs, nvarids, starts, counts, (void**)bufs, bufcounts, datatypes); ERR err = ncmpi_close(ncid); ERR free(buffer); free(bufs); free(nvarids); free(bufcounts); free(datatypes); free(starts[0]); free(counts[0]); free(starts); free(counts); /* check if there is any PnetCDF internal malloc residue */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Finalize(); return 0; }
/** * Write a parallel-nedcdf file. * * We assume here that localData is a scalar. * * Pnetcdf uses row-major format (same as FFTW). * * \param[in] filename : PnetCDF filename * \param[in] starts : offset to where to start reading data * \param[in] counts : number of elements read (3D sub-domain inside global) * \param[in] gsizes : global sizes * \param[in] localData : actual data buffer (size : nx*ny*nz*sizeof(float)) * */ void write_pnetcdf(const std::string &filename, MPI_Offset starts[3], MPI_Offset counts[3], int gsizes[3], float *localData) { int myRank; MPI_Comm_rank(MPI_COMM_WORLD, &myRank); // netcdf file id int ncFileId; int err; // file creation mode int ncCreationMode = NC_CLOBBER; // CDF-5 is almost mandatory for very large files (>= 2x10^9 cells) // not useful here bool useCDF5 = false; if (useCDF5) ncCreationMode = NC_CLOBBER|NC_64BIT_DATA; else // use CDF-2 file format ncCreationMode = NC_CLOBBER|NC_64BIT_OFFSET; // verbose log ? //bool pnetcdf_verbose = false; int nbVar=1; int dimIds[3], varIds[nbVar]; //MPI_Offset write_size, sum_write_size; MPI_Info mpi_info_used; //char str[512]; // time measurement variables //float write_timing, max_write_timing, write_bw; /* * Create NetCDF file */ err = ncmpi_create(MPI_COMM_WORLD, filename.c_str(), ncCreationMode, MPI_INFO_NULL, &ncFileId); if (err != NC_NOERR) { printf("Error: ncmpi_create() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err)); MPI_Abort(MPI_COMM_WORLD, -1); exit(1); } /* * Define global dimensions */ err = ncmpi_def_dim(ncFileId, "x", gsizes[0], &dimIds[0]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_dim(ncFileId, "y", gsizes[1], &dimIds[1]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_dim(ncFileId, "z", gsizes[2], &dimIds[2]); PNETCDF_HANDLE_ERROR; /* * Define variables to write (give a name) */ nc_type ncDataType = NC_FLOAT; MPI_Datatype mpiDataType = MPI_FLOAT; err = ncmpi_def_var(ncFileId, "data", ncDataType, 3, dimIds, &varIds[0]); PNETCDF_HANDLE_ERROR; /* * global attributes */ // did we use CDF-2 or CDF-5 { int useCDF5_int = useCDF5 ? 1 : 0; err = ncmpi_put_att_int(ncFileId, NC_GLOBAL, "CDF-5 mode", NC_INT, 1, &useCDF5_int); PNETCDF_HANDLE_ERROR; } /* * exit the define mode */ err = ncmpi_enddef(ncFileId); PNETCDF_HANDLE_ERROR; /* * Get all the MPI_IO hints used */ err = ncmpi_get_file_info(ncFileId, &mpi_info_used); PNETCDF_HANDLE_ERROR; // copy data to write in intermediate buffer int nItems = counts[IX]*counts[IY]*counts[IZ]; { // debug // printf("Pnetcdf [rank=%d] starts=%lld %lld %lld, counts =%lld %lld %lld, gsizes=%d %d %d\n", // myRank, // starts[0],starts[1],starts[2], // counts[0],counts[1],counts[2], // gsizes[0],gsizes[1],gsizes[2]); /* * make sure PNetCDF doesn't complain when starts is outside of global domain * bound. When nItems is null, off course we don't write anything, but starts * offset have to be inside global domain. * So there is no harm, setting starts to origin. */ if (nItems == 0) { starts[0]=0; starts[1]=0; starts[2]=0; } err = ncmpi_put_vara_all(ncFileId, varIds[0], starts, counts, localData, nItems, mpiDataType); PNETCDF_HANDLE_ERROR; } /* * close the file */ err = ncmpi_close(ncFileId); PNETCDF_HANDLE_ERROR; } // write_pnetcdf
/* * adapted from HydroRunBaseMpi::outputPnetcdf * * assumes here that localData have size nx,ny,nz (no ghostWidth) * * see : test_pnetcdf_write.cpp * * Note that if ghostIncluded is false local_data must be sized upon nx,ny,nz * if not size must be nx+2*ghostWidth,ny+2*ghostWidth,nz+2*ghostWidth * */ void write_pnetcdf(const std::string &filename, HostArray<double> &localData, ConfigMap &configMap) { int myRank; MPI_Comm_rank(MPI_COMM_WORLD, &myRank); // read local domain sizes int nx=configMap.getInteger("mesh","nx",32); int ny=configMap.getInteger("mesh","ny",32); int nz=configMap.getInteger("mesh","nz",32); // read mpi geometry int mx=configMap.getInteger("mpi","mx",1); int my=configMap.getInteger("mpi","my",1); int mz=configMap.getInteger("mpi","mz",1); // MPI cartesian coordinates // myRank = mpiCoord[0] + mx*mpiCoord[1] + mx*my*mpiCoord[2] int mpiCoord[3]; { mpiCoord[2] = myRank/(mx*my); mpiCoord[1] = (myRank - mx*my*mpiCoord[2])/mx; mpiCoord[0] = myRank - mx*my*mpiCoord[2] -mx*mpiCoord[1]; } bool ghostIncluded = configMap.getBool("output", "ghostIncluded",false); int ghostWidth = configMap.getInteger("mesh","ghostWidth",3); // global size int NX=nx*mx, NY=ny*my, NZ=nz*mz; int gsizes[3]; gsizes[IZ] = NX; gsizes[IY] = NY; gsizes[IX] = NZ; if ( ghostIncluded ) { gsizes[IZ] += 2*ghostWidth; gsizes[IY] += 2*ghostWidth; gsizes[IX] += 2*ghostWidth; } // netcdf file id int ncFileId; int err; // file creation mode int ncCreationMode = NC_CLOBBER; bool useCDF5 = configMap.getBool("output","pnetcdf_cdf5",false); if (useCDF5) ncCreationMode = NC_CLOBBER|NC_64BIT_DATA; else // use CDF-2 file format ncCreationMode = NC_CLOBBER|NC_64BIT_OFFSET; // verbose log ? bool pnetcdf_verbose = configMap.getBool("output","pnetcdf_verbose",false); int nbVar=8; int dimIds[3], varIds[nbVar]; MPI_Offset write_size, sum_write_size; MPI_Info mpi_info_used; char str[512]; // time measurement variables double write_timing, max_write_timing, write_bw; /* * writing parameter (offset and size) */ MPI_Offset starts[3] = {0}; MPI_Offset counts[3] = {nz, ny, nx}; // take care that row-major / column major format starts[IZ] = mpiCoord[IX]*nx; starts[IY] = mpiCoord[IY]*ny; starts[IX] = mpiCoord[IZ]*nz; if ( ghostIncluded ) { if ( mpiCoord[IX] == 0 ) counts[IZ] += ghostWidth; if ( mpiCoord[IY] == 0 ) counts[IY] += ghostWidth; if ( mpiCoord[IZ] == 0 ) counts[IX] += ghostWidth; if ( mpiCoord[IX] == mx-1 ) counts[IZ] += ghostWidth; if ( mpiCoord[IY] == my-1 ) counts[IY] += ghostWidth; if ( mpiCoord[IZ] == mz-1 ) counts[IX] += ghostWidth; starts[IZ] += ghostWidth; starts[IY] += ghostWidth; starts[IX] += ghostWidth; if ( mpiCoord[IX] == 0 ) starts[IZ] -= ghostWidth; if ( mpiCoord[IY] == 0 ) starts[IY] -= ghostWidth; if ( mpiCoord[IZ] == 0 ) starts[IX] -= ghostWidth; } /* * Create NetCDF file */ err = ncmpi_create(MPI_COMM_WORLD, filename.c_str(), ncCreationMode, MPI_INFO_NULL, &ncFileId); if (err != NC_NOERR) { printf("Error: ncmpi_create() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err)); MPI_Abort(MPI_COMM_WORLD, -1); exit(1); } /* * Define dimensions */ err = ncmpi_def_dim(ncFileId, "x", gsizes[0], &dimIds[0]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_dim(ncFileId, "y", gsizes[1], &dimIds[1]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_dim(ncFileId, "z", gsizes[2], &dimIds[2]); PNETCDF_HANDLE_ERROR; /* * Define variables */ nc_type ncDataType = NC_DOUBLE; MPI_Datatype mpiDataType = MPI_DOUBLE; err = ncmpi_def_var(ncFileId, "rho", ncDataType, 3, dimIds, &varIds[ID]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "E", ncDataType, 3, dimIds, &varIds[IP]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "rho_vx", ncDataType, 3, dimIds, &varIds[IU]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "rho_vy", ncDataType, 3, dimIds, &varIds[IV]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "rho_vz", ncDataType, 3, dimIds, &varIds[IW]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "Bx", ncDataType, 3, dimIds, &varIds[IA]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "By", ncDataType, 3, dimIds, &varIds[IB]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_var(ncFileId, "Bz", ncDataType, 3, dimIds, &varIds[IC]); PNETCDF_HANDLE_ERROR; /* * global attributes */ // did we use CDF-2 or CDF-5 { int useCDF5_int = useCDF5 ? 1 : 0; err = ncmpi_put_att_int(ncFileId, NC_GLOBAL, "CDF-5 mode", NC_INT, 1, &useCDF5_int); PNETCDF_HANDLE_ERROR; } /* * exit the define mode */ err = ncmpi_enddef(ncFileId); PNETCDF_HANDLE_ERROR; /* * Get all the MPI_IO hints used */ err = ncmpi_get_file_info(ncFileId, &mpi_info_used); PNETCDF_HANDLE_ERROR; int nItems = counts[IX]*counts[IY]*counts[IZ]; for (int iVar=0; iVar<nbVar; iVar++) { double *data = &(localData(0,0,0,iVar)); err = ncmpi_put_vara_all(ncFileId, varIds[iVar], starts, counts, data, nItems, mpiDataType); PNETCDF_HANDLE_ERROR; } /* * close the file */ err = ncmpi_close(ncFileId); PNETCDF_HANDLE_ERROR; } // write_pnetcdf
static int tst_norm(char *filename, int cmode) { int ncid, dimid, varid; int dimids[NDIMS]; /* unnormalized UTF-8 encoding for Unicode 8-character "Hello" in Greek: */ unsigned char uname_utf8[] = { 0x41, /* LATIN CAPITAL LETTER A */ 0xCC, 0x80, /* COMBINING GRAVE ACCENT */ 0x41, /* LATIN CAPITAL LETTER A */ 0xCC, 0x81, /* COMBINING ACUTE ACCENT */ 0x41, /* LATIN CAPITAL LETTER A */ 0xCC, 0x82, /* COMBINING CIRCUMFLEX ACCENT */ 0x41, /* LATIN CAPITAL LETTER A */ 0xCC, 0x83, /* COMBINING TILDE */ 0x41, /* LATIN CAPITAL LETTER A */ 0xCC, 0x88, /* COMBINING DIAERESIS */ 0x41, /* LATIN CAPITAL LETTER A */ 0xCC, 0x8A, /* COMBINING RING ABOVE */ 0x43, /* LATIN CAPITAL LETTER C */ 0xCC, 0xA7, /* COMBINING CEDILLA */ 0x45, /* LATIN CAPITAL LETTER E */ 0xCC, 0x80, /* COMBINING GRAVE ACCENT */ 0x45, /* LATIN CAPITAL LETTER E */ 0xCC, 0x81, /* COMBINING ACUTE ACCENT */ 0x45, /* LATIN CAPITAL LETTER E */ 0xCC, 0x82, /* COMBINING CIRCUMFLEX ACCENT */ 0x45, /* LATIN CAPITAL LETTER E */ 0xCC, 0x88, /* COMBINING DIAERESIS */ 0x49, /* LATIN CAPITAL LETTER I */ 0xCC, 0x80, /* COMBINING GRAVE ACCENT */ 0x49, /* LATIN CAPITAL LETTER I */ 0xCC, 0x81, /* COMBINING ACUTE ACCENT */ 0x49, /* LATIN CAPITAL LETTER I */ 0xCC, 0x82, /* COMBINING CIRCUMFLEX ACCENT */ 0x49, /* LATIN CAPITAL LETTER I */ 0xCC, 0x88, /* COMBINING DIAERESIS */ 0x4E, /* LATIN CAPITAL LETTER N */ 0xCC, 0x83, /* COMBINING TILDE */ 0x00 }; /* NFC normalized UTF-8 encoding for same Unicode string: */ unsigned char nname_utf8[] = { 0xC3, 0x80, /* LATIN CAPITAL LETTER A WITH GRAVE */ 0xC3, 0x81, /* LATIN CAPITAL LETTER A WITH ACUTE */ 0xC3, 0x82, /* LATIN CAPITAL LETTER A WITH CIRCUMFLEX */ 0xC3, 0x83, /* LATIN CAPITAL LETTER A WITH TILDE */ 0xC3, 0x84, /* LATIN CAPITAL LETTER A WITH DIAERESIS */ 0xC3, 0x85, /* LATIN CAPITAL LETTER A WITH RING ABOVE */ 0xC3, 0x87, /* LATIN CAPITAL LETTER C WITH CEDILLA */ 0xC3, 0x88, /* LATIN CAPITAL LETTER E WITH GRAVE */ 0xC3, 0x89, /* LATIN CAPITAL LETTER E WITH ACUTE */ 0xC3, 0x8A, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX */ 0xC3, 0x8B, /* LATIN CAPITAL LETTER E WITH DIAERESIS */ 0xC3, 0x8C, /* LATIN CAPITAL LETTER I WITH GRAVE */ 0xC3, 0x8D, /* LATIN CAPITAL LETTER I WITH ACUTE */ 0xC3, 0x8E, /* LATIN CAPITAL LETTER I WITH CIRCUMFLEX */ 0xC3, 0x8F, /* LATIN CAPITAL LETTER I WITH DIAERESIS */ 0xC3, 0x91, /* LATIN CAPITAL LETTER N WITH TILDE */ 0x00 }; /* Unnormalized name used for dimension, variable, and attribute value */ #define UNAME ((char *) uname_utf8) #define UNAMELEN (sizeof uname_utf8) /* Normalized name */ #define NNAME ((char *) nname_utf8) #define NNAMELEN (sizeof nname_utf8) char name_in[UNAMELEN + 1], strings_in[UNAMELEN + 1]; nc_type att_type; MPI_Offset att_len; int err, dimid_in, varid_in, attnum_in; int attvals[] = {42}; #define ATTNUM ((sizeof attvals)/(sizeof attvals[0])) err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL,&ncid); ERR /* Define dimension with unnormalized Unicode UTF-8 encoded name */ err = ncmpi_def_dim(ncid, UNAME, NX, &dimid); ERR dimids[0] = dimid; /* Define variable with same name */ err = ncmpi_def_var(ncid, UNAME, NC_CHAR, NDIMS, dimids, &varid); ERR /* Create string attribute with same value */ err = ncmpi_put_att_text(ncid, varid, UNITS, UNAMELEN, UNAME); ERR /* Create int attribute with same name */ err = ncmpi_put_att_int(ncid, varid, UNAME, NC_INT, ATTNUM, attvals); ERR /* Try to create dimension and variable with NFC-normalized * version of same name. These should fail, as unnormalized name * should have been normalized in library, so these are attempts to * create duplicate netCDF objects. */ if ((err = ncmpi_def_dim(ncid, NNAME, NX, &dimid)) != NC_ENAMEINUSE) { printf("Error at line %d: expecting error code %d but got %d\n",__LINE__,NC_ENAMEINUSE,err); return 1; } if ((err=ncmpi_def_var(ncid, NNAME, NC_CHAR, NDIMS, dimids, &varid)) != NC_ENAMEINUSE) { printf("Error at line %d: expecting error code %d but got %d\n",__LINE__,NC_ENAMEINUSE,err); return 1; } err = ncmpi_enddef(ncid); ERR /* Write string data, UTF-8 encoded, to the file */ err = ncmpi_put_var_text_all(ncid, varid, UNAME); ERR err = ncmpi_close(ncid); ERR /* Check it out. */ err = ncmpi_open(MPI_COMM_WORLD, filename, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err = ncmpi_inq_varid(ncid, UNAME, &varid); ERR err = ncmpi_inq_varname(ncid, varid, name_in); ERR err = strncmp(NNAME, name_in, NNAMELEN); ERR err = ncmpi_inq_varid(ncid, NNAME, &varid_in); ERR if ((err = ncmpi_inq_dimid(ncid, UNAME, &dimid_in)) || dimid != dimid_in) {printf("Error at line %d\n",__LINE__);return 1;} if ((err = ncmpi_inq_dimid(ncid, NNAME, &dimid_in)) || dimid != dimid_in) {printf("Error at line %d\n",__LINE__);return 1;} err = ncmpi_inq_att(ncid, varid, UNITS, &att_type, &att_len); ERR if ( att_type != NC_CHAR || att_len != UNAMELEN) {printf("Error at line %d\n",__LINE__);return 1;} err = ncmpi_get_att_text(ncid, varid, UNITS, strings_in); ERR strings_in[UNAMELEN] = '\0'; err = strncmp(UNAME, strings_in, UNAMELEN); ERR if ((err = ncmpi_inq_attid(ncid, varid, UNAME, &attnum_in)) || ATTNUM != attnum_in) {printf("Error at line %d\n",__LINE__);return 1;} if ((err = ncmpi_inq_attid(ncid, varid, NNAME, &attnum_in)) || ATTNUM != attnum_in) {printf("Error at line %d\n",__LINE__);return 1;} err = ncmpi_close(ncid); ERR return 0; }
/* * NETCDF_open() * Open the trajectory specified by the filename and accessMode in trajInfo as * a NETCDF traj. * Return 0 on success, 1 on failure */ int NETCDF_open(coordinateInfo *trajInfo) { #ifdef BINTRAJ int err,ncid; // NOTE: Put in a check, only open if coord is unknown or netcdf if (prnlev>0) fprintf(stdout,"[%i] NETCDF_open(): Opening %s\n", worldrank,trajInfo->filename); switch (trajInfo->accessMode) { case 0: // Read # ifdef MPI err = ncmpi_open(MPI_COMM_WORLD, trajInfo->filename, NC_NOWRITE, MPI_INFO_NULL, &ncid); /* This next line is a test. Apparently it puts the netcdf file in an * independent I/O mode. Not sure if it is bad to always put here. * Originally this call was only made from ptrajPreprocess... */ if (err == NC_NOERR) err = ncmpi_begin_indep_data(ncid); # else err = nc_open(trajInfo->filename, NC_NOWRITE, &ncid); # endif break; case 1: // Write //omode=NC_WRITE; # ifdef MPI err = ncmpi_create(MPI_COMM_WORLD, trajInfo->filename, NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid); if (err == NC_NOERR) ncmpi_begin_indep_data(ncid); # else err = nc_create(trajInfo->filename, NC_64BIT_OFFSET, &ncid); # endif break; case 2: // Append printfone("Appending of NETCDF files is not supported.\n"); return 1; break; } /* If opening succeeded and memory hasnt been allocated already * initialize necessary data structure. * NOTE: Should this be in NETCDF_setup? If so ncid would have to * be its own variable in coordinateInfo. * NOTE: If this is an output file trajInfo->type has already been set. * Not a huge problem but is a bit circular. TRAJOUT should eventually * only set trajInfo->isNetcdf. */ if (err == NC_NOERR) { trajInfo->type = COORD_AMBER_NETCDF; if (trajInfo->NCInfo==NULL) { trajInfo->NCInfo = (netcdfTrajectoryInfo *) safe_malloc(sizeof(netcdfTrajectoryInfo)); INITIALIZE_netcdfTrajectoryInfo( trajInfo->NCInfo ); } trajInfo->NCInfo->currentFrame = worldrank; // Always set NCID since it can change depending on when file is opened trajInfo->NCInfo->ncid = ncid; if (prnlev>0) fprintf(stdout,"NETCDF_open(): %s has been assigned ncid of %i\n", trajInfo->filename,ncid); return 0; } // If we are here an error occured. Print the error message before exiting. fprintf(stdout,"Error: NETCDF_open(): Could not open %s with accessMode %i\n", trajInfo->filename,trajInfo->accessMode); fprintf(stdout,"%s\n",nc_strerror(err)); #endif // If no BINTRAJ always fail return 1; }
int main(int argc, char** argv) { char filename[256]; int i, j, rank, nprocs, err, nerrs=0, expected; int ncid, cmode, varid[2], dimid[2], req[4], st[4], *buf; int *buf0, *buf1, *buf2; size_t len; MPI_Offset start[2], count[2]; MPI_Info info; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* this program is intended to run on one process */ if (rank) goto fn_exit; /* get command-line arguments */ if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for writing interleaved fileviews ", basename(argv[0])); printf("%-66s ------ ", cmd_str); free(cmd_str); } MPI_Info_create(&info); MPI_Info_set(info, "romio_cb_write", "disable"); MPI_Info_set(info, "ind_wr_buffer_size", "8"); /* these 2 hints are required to cause a core dump if r1758 fix is not * presented */ /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_SELF, filename, cmode, info, &ncid); CHECK_ERR MPI_Info_free(&info); /* define dimensions Y and X */ err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); CHECK_ERR err = ncmpi_def_dim(ncid, "X", NX, &dimid[1]); CHECK_ERR /* define 2D variables of integer type */ err = ncmpi_def_var(ncid, "var0", NC_INT, 2, dimid, &varid[0]); CHECK_ERR err = ncmpi_def_var(ncid, "var1", NC_INT, 2, dimid, &varid[1]); CHECK_ERR /* enable fill mode */ err = ncmpi_set_fill(ncid, NC_FILL, NULL); CHECK_ERR /* do not forget to exit define mode */ err = ncmpi_enddef(ncid); CHECK_ERR /* now we are in data mode */ buf = (int*) malloc(NY*NX * sizeof(int)); /* fill the entire variable var0 with -1s */ for (i=0; i<NY*NX; i++) buf[i] = -1; err = ncmpi_put_var_int_all(ncid, varid[0], buf); CHECK_ERR /* write 8 x 2 elements so this only interleaves the next two * iput requests */ start[0] = 0; start[1] = 3; count[0] = 8; count[1] = 2; len = (size_t)(count[0] * count[1]); buf0 = (int*) malloc(len * sizeof(int)); for (i=0; i<len; i++) buf0[i] = 50+i; err = ncmpi_iput_vara_int(ncid, varid[0], start, count, buf0, &req[0]); CHECK_ERR /* write 1 x 3 elements */ start[0] = 1; start[1] = 8; count[0] = 1; count[1] = 5; len = (size_t)(count[0] * count[1]); buf1 = (int*) malloc(len * sizeof(int)); for (i=0; i<len; i++) buf1[i] = 60+i; err = ncmpi_iput_vara_int(ncid, varid[0], start, count, buf1, &req[1]); CHECK_ERR /* write 1 x 3 elements */ start[0] = 3; start[1] = 7; count[0] = 1; count[1] = 5; len = (size_t)(count[0] * count[1]); buf2 = (int*) malloc(len * sizeof(int)); for (i=0; i<len; i++) buf2[i] = 70+i; err = ncmpi_iput_vara_int(ncid, varid[0], start, count, buf2, &req[2]); CHECK_ERR err = ncmpi_wait_all(ncid, 3, req, st); CHECK_ERR free(buf0); free(buf1); free(buf2); /* fill the entire variable var1 with -1s */ for (i=0; i<NY*NX; i++) buf[i] = -1; err = ncmpi_put_var_int_all(ncid, varid[1], buf); CHECK_ERR /* write 8 x 2 elements so this only interleaves the next two iput * requests */ start[0] = 0; start[1] = 3; count[0] = 8; count[1] = 2; len = (size_t)(count[0] * count[1]); buf0 = (int*) malloc(len * sizeof(int)); for (i=0; i<count[0]*count[1]; i++) buf0[i] = 50+i; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf0, &req[0]); CHECK_ERR /* rearrange buffer contents, as buf is 2D */ for (i=0; i<5; i++) buf[i] = 10 + i; for (i=5; i<10; i++) buf[i] = 10 + i + 5; for (i=10; i<15; i++) buf[i] = 10 + i + 10; start[0] = 6; start[1] = 7; count[0] = 3; count[1] = 5; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf, &req[1]); CHECK_ERR for (i=15; i<20; i++) buf[i] = 10 + i - 10; for (i=20; i<25; i++) buf[i] = 10 + i - 5; start[0] = 6; start[1] = 12; count[0] = 2; count[1] = 5; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf+15, &req[2]); CHECK_ERR for (i=25; i<30; i++) buf[i] = 10 + i; start[0] = 8; start[1] = 12; count[0] = 1; count[1] = 5; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf+25, &req[3]); CHECK_ERR err = ncmpi_wait_all(ncid, 4, req, st); CHECK_ERR /* check if write buffer contents have been altered */ for (i=0; i<16; i++) CHECK_CONTENTS(buf0, 50 + i) for (i=0; i<5; i++) CHECK_CONTENTS(buf, 10 + i) for (i=5; i<10; i++) CHECK_CONTENTS(buf, 10 + i + 5) for (i=10; i<15; i++) CHECK_CONTENTS(buf, 10 + i + 10) for (i=15; i<20; i++) CHECK_CONTENTS(buf, 10 + i - 10) for (i=20; i<25; i++) CHECK_CONTENTS(buf, 10 + i - 5) for (i=25; i<30; i++) CHECK_CONTENTS(buf, 10 + i) err = ncmpi_close(ncid); CHECK_ERR free(buf0); /* open the same file and read back for validate */ err = ncmpi_open(MPI_COMM_SELF, filename, NC_NOWRITE, MPI_INFO_NULL, &ncid); CHECK_ERR err = ncmpi_inq_varid(ncid, "var0", &varid[0]); CHECK_ERR err = ncmpi_inq_varid(ncid, "var1", &varid[1]); CHECK_ERR /* read the entire array */ for (i=0; i<NY*NX; i++) buf[i] = -1; err = ncmpi_get_var_int_all(ncid, varid[0], buf); CHECK_ERR /* check if the contents of buf are expected */ expected = 50; for (j=0; j<8; j++) { for (i=3; i<5; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } } expected = 60; j = 1; for (i=8; i<13; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } expected = 70; j = 3; for (i=7; i<12; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } /* initialize the contents of the array to a different value */ for (i=0; i<NY*NX; i++) buf[i] = -1; /* read the entire array */ err = ncmpi_get_var_int_all(ncid, varid[1], buf); CHECK_ERR /* check if the contents of buf are expected */ expected = 10; for (j=6; j<9; j++) { for (i=7; i<17; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d]=%d, should be %d\n", rank, i, buf[j*NX+i], expected); nerrs++; } expected++; } } expected = 50; for (j=0; j<8; j++) { for (i=3; i<5; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } } err = ncmpi_close(ncid); CHECK_ERR free(buf); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR && malloc_size > 0) { printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", malloc_size); ncmpi_inq_malloc_list(); } fn_exit: MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
int main(int argc, char** argv) { char filename[256]; int rank, nprocs, err, flag, nerrs=0; int log_enabled; int ncid; MPI_Info info, infoused; char hint[MPI_MAX_INFO_VAL]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for checking offsets of new variables ", basename(argv[0])); printf("%-66s ------ ", cmd_str); fflush(stdout); free(cmd_str); } MPI_Info_create(&info); MPI_Info_set(info, "nc_dw_overwrite", "enable"); MPI_Info_set(info, "nc_dw_del_on_close", "disable"); MPI_Info_set(info, "nc_dw_flush_buffer_size", "256"); /* MPI_Info_set(info, "nc_dw_dirname", "()@^$@!(_&$)@(#%%&)(*#$"); */ err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER, info, &ncid); CHECK_ERR err = ncmpi_inq_file_info(ncid, &infoused); CHECK_ERR MPI_Info_get(infoused, "nc_dw", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag && strcasecmp(hint, "enable") == 0) log_enabled = 1; else log_enabled = 0; if (log_enabled) { MPI_Info_get(infoused, "nc_dw_overwrite", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag) { if (strcmp(hint, "enable") != 0) { printf("Error at line %d: unexpected nc_dw_overwrite = %s, but got %s\n", __LINE__, "enable", hint); nerrs++; } } else{ printf("Error at line %d: nc_dw_overwrite is not set\n", __LINE__); nerrs++; } MPI_Info_get(infoused, "nc_dw_del_on_close", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag) { if (strcmp(hint, "disable") != 0) { printf("Error at line %d: unexpected nc_dw_del_on_close = %s, but got %s\n", __LINE__, "disable", hint); nerrs++; } } else{ printf("Error at line %d: nc_dw_del_on_close is not set\n", __LINE__); nerrs++; } MPI_Info_get(infoused, "nc_dw_flush_buffer_size", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag) { if (strcmp(hint, "256") != 0) { printf("Error at line %d: unexpected nc_dw_flush_buffer_size = %s, but got %s\n", __LINE__, "256", hint); nerrs++; } } else{ printf("Error at line %d: nc_dw_flush_buffer_size is not set\n", __LINE__); nerrs++; } } err = ncmpi_enddef(ncid); CHECK_ERR err = ncmpi_close(ncid); CHECK_ERR MPI_Info_free(&info); MPI_Info_free(&infoused); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
int main(int argc, char **argv) { int stat; /* return status */ int ncid; /* netCDF id */ int rec, i, j, k; signed char x[] = {42, 21}; /* dimension ids */ int rec_dim; int i_dim; int j_dim; int k_dim; int n_dim; #define NUMRECS 1 #define I_LEN 4104 #define J_LEN 1023 #define K_LEN 1023 #define N_LEN 2 /* dimension lengths */ MPI_Offset rec_len = NC_UNLIMITED; MPI_Offset i_len = I_LEN; MPI_Offset j_len = J_LEN; MPI_Offset k_len = K_LEN; MPI_Offset n_len = N_LEN; /* variable ids */ int var1_id; int x_id; /* rank (number of dimensions) for each variable */ # define RANK_var1 4 # define RANK_x 2 /* variable shapes */ int var1_dims[RANK_var1]; int x_dims[RANK_x]; printf("\n*** Testing large files, slowly.\n"); printf("*** Creating large file %s...", FILE_NAME); MPI_Init(&argc, &argv); /* enter define mode */ stat = ncmpi_create(MPI_COMM_WORLD, FILE_NAME, NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid); check_err(stat,__LINE__,__FILE__); /* define dimensions */ stat = ncmpi_def_dim(ncid, "rec", rec_len, &rec_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "i", i_len, &i_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "j", j_len, &j_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "k", k_len, &k_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "n", n_len, &n_dim); check_err(stat,__LINE__,__FILE__); /* define variables */ var1_dims[0] = rec_dim; var1_dims[1] = i_dim; var1_dims[2] = j_dim; var1_dims[3] = k_dim; stat = ncmpi_def_var(ncid, "var1", NC_BYTE, RANK_var1, var1_dims, &var1_id); check_err(stat,__LINE__,__FILE__); x_dims[0] = rec_dim; x_dims[1] = n_dim; stat = ncmpi_def_var(ncid, "x", NC_BYTE, RANK_x, x_dims, &x_id); check_err(stat,__LINE__,__FILE__); /* don't initialize variables with fill values */ stat = ncmpi_set_fill(ncid, NC_NOFILL, 0); check_err(stat,__LINE__,__FILE__); /* leave define mode */ stat = ncmpi_enddef (ncid); check_err(stat,__LINE__,__FILE__); { /* store var1 */ int n = 0; static signed char var1[J_LEN][K_LEN]; static MPI_Offset var1_start[RANK_var1] = {0, 0, 0, 0}; static MPI_Offset var1_count[RANK_var1] = {1, 1, J_LEN, K_LEN}; static MPI_Offset x_start[RANK_x] = {0, 0}; static MPI_Offset x_count[RANK_x] = {1, N_LEN}; for(rec=0; rec<NUMRECS; rec++) { var1_start[0] = rec; x_start[0] = rec; for(i=0; i<I_LEN; i++) { for(j=0; j<J_LEN; j++) { for (k=0; k<K_LEN; k++) { var1[j][k] = n++; } } var1_start[1] = i; stat = ncmpi_put_vara_schar_all(ncid, var1_id, var1_start, var1_count, &var1[0][0]); check_err(stat,__LINE__,__FILE__); } } stat = ncmpi_put_vara_schar_all(ncid, x_id, x_start, x_count, x); check_err(stat,__LINE__,__FILE__); } stat = ncmpi_close(ncid); check_err(stat,__LINE__,__FILE__); printf("ok\n"); printf("*** Reading large file %s...", FILE_NAME); stat = ncmpi_open(MPI_COMM_WORLD, FILE_NAME, NC_NOWRITE, MPI_INFO_NULL, &ncid); check_err(stat,__LINE__,__FILE__); { /* read var1 */ int n = 0; static signed char var1[J_LEN][K_LEN]; static MPI_Offset var1_start[RANK_var1] = {0, 0, 0, 0}; static MPI_Offset var1_count[RANK_var1] = {1, 1, J_LEN, K_LEN}; static MPI_Offset x_start[RANK_x] = {0, 0}; static MPI_Offset x_count[RANK_x] = {1, N_LEN}; for(rec=0; rec<NUMRECS; rec++) { var1_start[0] = rec; x_start[0] = rec; for(i=0; i<I_LEN; i++) { var1_start[1] = i; stat = ncmpi_get_vara_schar_all(ncid, var1_id, var1_start, var1_count, &var1[0][0]); check_err(stat,__LINE__,__FILE__); for(j=0; j<J_LEN; j++) { for (k=0; k<K_LEN; k++) { if (var1[j][k] != (signed char) n) { printf("Error on read, var1[%d, %d, %d, %d] = %d wrong, " "should be %d !\n", rec, i, j, k, var1[j][k], (signed char) n); return 1; } n++; } } } ncmpi_get_vara_schar_all(ncid, x_id, x_start, x_count, x); if(x[0] != 42 || x[1] != 21) { printf("Error on read, x[] = %d, %d\n", x[0], x[1]); return 1; } } } stat = ncmpi_close(ncid); check_err(stat,__LINE__,__FILE__); printf("ok\n"); printf("*** Tests successful!\n"); /* Delete the file. */ (void) remove(FILE_NAME); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int i, j, k; int status; int ncid; int dimid1, dimid2, dimid3, udimid; int square_dim[2], cube_dim[3], xytime_dim[3], time_dim[1]; MPI_Offset square_start[2], cube_start[3] = {0, 0, 0}; MPI_Offset square_count[2] = {50, 50}, cube_count[3] = {100, 50, 50}; MPI_Offset square_stride[2] = {2, 2}; MPI_Offset xytime_start[3] = {0, 0, 0}; MPI_Offset xytime_count[3] = {100, 50, 50}; MPI_Offset time_start[1], time_count[1] = {25}; int square_id, cube_id, xytime_id, time_id; static char title[] = "example netCDF dataset"; static char description[] = "2-D integer array"; double data[100][50][50], buffer[100]; double stride_2d_data[50][50]; int rank; int nprocs; MPI_Comm comm = MPI_COMM_WORLD; params opts; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) fprintf(stderr, "Testing write ... "); parse_write_args(argc, argv, rank, &opts); /********** START OF NETCDF ACCESS **************/ /** * Create the dataset * File name: "testwrite.nc" * Dataset API: Collective */ status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid); if (status != NC_NOERR) handle_error(status); /** * Create a global attribute: * :title = "example netCDF dataset"; */ status = ncmpi_put_att_text (ncid, NC_GLOBAL, "title", strlen(title), title); if (status != NC_NOERR) handle_error(status); /** * Add 4 pre-defined dimensions: * x = 100, y = 100, z = 100, time = NC_UNLIMITED */ status = ncmpi_def_dim(ncid, "x", 100L, &dimid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "y", 100L, &dimid2); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "z", 100L, &dimid3); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "time", NC_UNLIMITED, &udimid); if (status != NC_NOERR) handle_error(status); /** * Define the dimensionality and then add 4 variables: * square(x, y), cube(x,y,z), time(time), xytime(time, x, y) */ square_dim[0] = cube_dim[0] = xytime_dim[1] = dimid1; square_dim[1] = cube_dim[1] = xytime_dim[2] = dimid2; cube_dim[2] = dimid3; xytime_dim[0] = udimid; time_dim[0] = udimid; status = ncmpi_def_var (ncid, "square", NC_DOUBLE, 2, square_dim, &square_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "cube", NC_DOUBLE, 3, cube_dim, &cube_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "time", NC_DOUBLE, 1, time_dim, &time_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "xytime", NC_DOUBLE, 3, xytime_dim, &xytime_id); if (status != NC_NOERR) handle_error(status); /** * Add an attribute for variable: * square: decsription = "2-D integer array" */ status = ncmpi_put_att_text (ncid, square_id, "description", strlen(description), description); if (status != NC_NOERR) handle_error(status); /** * End Define Mode (switch to data mode) * Dataset API: Collective */ status = ncmpi_enddef(ncid); if (status != NC_NOERR) handle_error(status); /** * Data Partition (Assume 4 processors): * square: 2-D, (Cyclic, Cyclic), 50*50 from 100*100, strided access * cube: 3-D, (*, Block, Block), 100*50*50 from 100*100*100 * xytime: 3-D, (*, Block, Block), 100*50*50 from 100*100*100 * time: 1-D, Block-wise, 25 from 100 */ /* square_start[0] = */ cube_start[1] = xytime_start[1] = (rank/2) * 50; /* square_start[1] = */ cube_start[2] = xytime_start[2] = (rank%2) * 50; time_start[0] = (rank%4) * 25; square_start[0] = rank/2; square_start[1] = rank%2; /** * Packing data in the buffer */ /* Data for variable: time */ for ( i = time_start[0]; i < time_start[0] + time_count[0]; i++ ) buffer[i - time_start[0]] = i; /* Data for variable: cube and xytime */ for ( i = 0; i < 100; i++ ) for ( j = cube_start[1]; j < cube_start[1]+cube_count[1]; j++ ) for ( k = cube_start[2]; k < cube_start[2]+cube_count[2]; k++ ) data[i][j-cube_start[1]][k-cube_start[2]] = i*100*100 + j*100 + k; /* Data for variable: square */ for ( i = 0; i < 50; i ++ ) for ( j = 0; j < 50; j++ ) stride_2d_data[i][j] = (2*i + rank/2)*100 + (2*j + rank%2); /** * Write data into variables: square, cube, time and xytime * Access Method: subarray * Data Mode API: collective */ status = ncmpi_put_vars_double_all(ncid, square_id, square_start, square_count, square_stride, &stride_2d_data[0][0]); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_double_all(ncid, cube_id, cube_start, cube_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_double_all(ncid, time_id, time_start, time_count, (void *)buffer); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_double_all(ncid, xytime_id, xytime_start, xytime_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); /** * Close the dataset * Dataset API: collective */ status = ncmpi_close(ncid); if (status != NC_NOERR) handle_error(status); /******************* END OF NETCDF ACCESS ****************/ if (rank == 0) fprintf(stderr, "OK\nFile written to: %s!\n", opts.outfname); MPI_Finalize(); return 0; }
int main(int argc, char** argv) { char filename[256]; int i, j, k, rank, nprocs, verbose=0, err, nerrs=0; int ncid, cmode, varid[4], dimid[2], nreqs, reqs[4], sts[4]; unsigned int *buffer[4]; int num_segs[4] = {4, 6, 5, 4}; int req_lens[4], my_nsegs[4]; MPI_Offset **starts[4], **counts[4]; MPI_Offset n_starts[4][6][2] = {{{0,5}, {1,0}, {2,6}, {3,0}, {0,0}, {0,0}}, {{0,3}, {0,8}, {1,5}, {2,0}, {2,8}, {3,4}}, {{0,7}, {1,1}, {1,7}, {2,2}, {3,3}, {0,0}}, {{0,0}, {1,4}, {2,3}, {3,7}, {0,0}, {0,0}}}; MPI_Offset n_counts[4][6][2] = {{{1,2}, {1,1}, {1,2}, {1,3}, {0,0}, {0,0}}, {{1,2}, {1,2}, {1,2}, {1,2}, {1,2}, {1,3}}, {{1,1}, {1,3}, {1,3}, {1,1}, {1,1}, {0,0}}, {{1,3}, {1,1}, {1,3}, {1,3}, {0,0}, {0,0}}}; /* n_starts[0][][] n_counts[0][][] indicate the following: ("-" means skip) - - - - - X X - - - X - - - - - - - - - - - - - - - X X - - X X X - - - - - - - n_starts[1][][] n_counts[1][][] indicate the following pattern. - - - X X - - - X X - - - - - X X - - - X X - - - - - - X X - - - - X X X - - - n_starts[2][][] n_counts[2][][] indicate the following pattern. - - - - - - - X - - - X X X - - - X X X - - X - - - - - - - - - - X - - - - - - n_starts[3][][] n_counts[3][][] indicate the following pattern. X X X - - - - - - - - - - - X - - - - - - - - X X X - - - - - - - - - - - X X X */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 0; } strcpy(filename, "testfile.nc"); if (argc == 2) strcpy(filename, argv[1]); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char cmd_str[256]; sprintf(cmd_str, "*** TESTING C %s for bput_varn_uint ", argv[0]); printf("%-66s ------ ", cmd_str); } if (verbose && nprocs != 4 && rank == 0) printf("Warning: %s is intended to run on 4 processes\n",argv[0]); /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR /* create a global array of size NY * NX */ err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", NX, &dimid[1]); ERR err = ncmpi_def_var(ncid, "var0", NC_UINT, NDIMS, dimid, &varid[0]); ERR err = ncmpi_def_var(ncid, "var1", NC_UINT, NDIMS, dimid, &varid[1]); ERR err = ncmpi_def_var(ncid, "var2", NC_UINT, NDIMS, dimid, &varid[2]); ERR err = ncmpi_def_var(ncid, "var3", NC_UINT, NDIMS, dimid, &varid[3]); ERR err = ncmpi_enddef(ncid); ERR /* allocate space for starts and counts */ starts[0] = (MPI_Offset**) malloc(4 * 6 * sizeof(MPI_Offset*)); counts[0] = (MPI_Offset**) malloc(4 * 6 * sizeof(MPI_Offset*)); starts[0][0] = (MPI_Offset*) calloc(4 * 6 * NDIMS, sizeof(MPI_Offset)); counts[0][0] = (MPI_Offset*) calloc(4 * 6 * NDIMS, sizeof(MPI_Offset)); for (i=1; i<4; i++) { starts[i] = starts[i-1] + 6; counts[i] = counts[i-1] + 6; starts[i][0] = starts[i-1][0] + 6 * NDIMS; counts[i][0] = counts[i-1][0] + 6 * NDIMS; } for (i=0; i<4; i++) { for (j=1; j<6; j++) { starts[i][j] = starts[i][j-1] + NDIMS; counts[i][j] = counts[i][j-1] + NDIMS; } } /* set values for starts and counts */ for (i=0; i<4; i++) { int n = (i + rank) % 4; my_nsegs[i] = num_segs[n]; /* number of segments for this request */ for (j=0; j<6; j++) { for (k=0; k<NDIMS; k++) { starts[i][j][k] = n_starts[n][j][k]; counts[i][j][k] = n_counts[n][j][k]; } } } /* test error code: NC_ENULLABUF */ err = ncmpi_bput_varn_uint(ncid, varid[0], 1, NULL, NULL, NULL, &reqs[0]); if (err != NC_ENULLABUF) { printf("Error at line %d: expecting error code NC_ENULLABUF but got %s\n", __LINE__, nc_err_code_name(err)); nerrs++; } /* only rank 0, 1, 2, and 3 do I/O: * each of ranks 0 to 3 write 4 nonblocking requests */ nreqs = 4; if (rank >= 4) nreqs = 0; /* bufsize must be max of data type converted before and after */ MPI_Offset bufsize = 0; /* calculate length of each varn request and allocate write buffer */ for (i=0; i<nreqs; i++) { req_lens[i] = 0; /* total length this request */ for (j=0; j<my_nsegs[i]; j++) { MPI_Offset req_len=1; for (k=0; k<NDIMS; k++) req_len *= counts[i][j][k]; req_lens[i] += req_len; } if (verbose) printf("req_lens[%d]=%d\n",i,req_lens[i]); /* allocate I/O buffer and initialize its contents */ buffer[i] = (unsigned int*) malloc(req_lens[i] * sizeof(unsigned int)); for (j=0; j<req_lens[i]; j++) buffer[i][j] = rank; bufsize += req_lens[i]; } bufsize *= sizeof(unsigned int); /* give PnetCDF a space to buffer the nonblocking requests */ if (bufsize > 0) { err = ncmpi_buffer_attach(ncid, bufsize); ERR }
int main(int argc, char** argv) { char filename[256]; int rank, nprocs, nerrs=0; int err, ncid; #if NC_MAX_VAR_DIMS < INT_MAX int i, varid, *dimid; #endif MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for checking NC_MAX_VAR_DIMS ", basename(argv[0])); printf("%-66s ------ ", cmd_str); fflush(stdout); free(cmd_str); } #if NC_MAX_VAR_DIMS < INT_MAX err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER, MPI_INFO_NULL, &ncid); CHECK_ERR /* define dimensions */ dimid = (int*) malloc((NC_MAX_VAR_DIMS+2) * sizeof(int)); err = ncmpi_def_dim(ncid, "dim0", NC_UNLIMITED, &dimid[0]); CHECK_ERR err = ncmpi_def_dim(ncid, "dim1", 1, &dimid[1]); CHECK_ERR for (i=2; i<NC_MAX_VAR_DIMS+2; i++) dimid[i] = dimid[1]; /* define variables */ err = ncmpi_def_var(ncid, "v0", NC_INT, NC_MAX_VAR_DIMS+1, &dimid[0], &varid); EXP_ERR(NC_EMAXDIMS) err = ncmpi_def_var(ncid, "v1", NC_INT, NC_MAX_VAR_DIMS+1, &dimid[1], &varid); EXP_ERR(NC_EMAXDIMS) err = ncmpi_set_fill(ncid, NC_NOFILL, NULL); CHECK_ERR err = ncmpi_close(ncid); CHECK_ERR free(dimid); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); if (malloc_size > 0) ncmpi_inq_malloc_list(); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } #else err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER, MPI_INFO_NULL, &ncid); CHECK_ERR err = ncmpi_close(ncid); CHECK_ERR if (rank == 0) printf(SKIP_STR); #endif MPI_Finalize(); return (nerrs > 0); }
int main(int argc, char **argv) { char filename[256]; int err, nerrs=0, ncid, dimid[NDIMS], varid[5], ndims=NDIMS; int i, j, k, nprocs, rank, req, *buf; MPI_Offset start[NDIMS] = {0}; MPI_Offset count[NDIMS] = {0}; MPI_Offset stride[NDIMS] = {0}; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for NULL stride ", basename(argv[0])); printf("%-66s ------ ", cmd_str); fflush(stdout); free(cmd_str); } err = ncmpi_create(MPI_COMM_WORLD, filename, 0, MPI_INFO_NULL, &ncid); CHECK_ERR err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); CHECK_ERR err = ncmpi_def_dim(ncid, "X", nprocs*NX, &dimid[1]); CHECK_ERR err = ncmpi_def_var(ncid, "v0", NC_INT, ndims, dimid, &varid[0]); CHECK_ERR err = ncmpi_def_var(ncid, "v1", NC_INT, ndims, dimid, &varid[1]); CHECK_ERR err = ncmpi_def_var(ncid, "v2", NC_INT, ndims, dimid, &varid[2]); CHECK_ERR err = ncmpi_def_var(ncid, "v3", NC_INT, ndims, dimid, &varid[3]); CHECK_ERR err = ncmpi_def_var(ncid, "v4", NC_INT, ndims, dimid, &varid[4]); CHECK_ERR err = ncmpi_enddef(ncid); CHECK_ERR start[0] = 0; start[1] = rank*NX; count[0] = NY; count[1] = NX; buf = (int*) malloc((size_t)NY * NX * sizeof(int)); for (i=0; i<NY*NX; i++) buf[i] = rank+10; err = ncmpi_put_vara_int_all(ncid, varid[0], start, count, buf); CHECK_ERR CHECK_PUT_BUF err = ncmpi_put_vars_int_all(ncid, varid[1], start, count, NULL, buf); CHECK_ERR CHECK_PUT_BUF start[0] = 0; start[1] = rank; count[0] = NY; count[1] = NX; stride[0] = 1; stride[1] = nprocs; err = ncmpi_put_vars_int_all(ncid, varid[2], start, count, stride, buf); CHECK_ERR CHECK_PUT_BUF /* test bput_vars */ err = ncmpi_buffer_attach(ncid, NY*NX*sizeof(int)); CHECK_ERR start[0] = 0; start[1] = rank*NX; count[0] = NY; count[1] = NX; err = ncmpi_bput_vars_int(ncid, varid[3], start, count, NULL, buf, &req); CHECK_ERR err = ncmpi_wait_all(ncid, 1, &req, NULL); CHECK_ERR CHECK_PUT_BUF start[0] = 0; start[1] = rank; count[0] = NY; count[1] = NX; stride[0] = 1; stride[1] = nprocs; err = ncmpi_bput_vars_int(ncid, varid[4], start, count, stride, buf, &req); CHECK_ERR err = ncmpi_wait_all(ncid, 1, &req, NULL); CHECK_ERR CHECK_PUT_BUF free(buf); err = ncmpi_buffer_detach(ncid); CHECK_ERR buf = (int*) malloc((size_t)NY * NX * nprocs * sizeof(int)); memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int)); err = ncmpi_get_var_int_all(ncid, varid[0], buf); CHECK_ERR /* check read buffer contents */ /* v0 = * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13 ; */ for (i=0; i<NY; i++) { for (j=0; j<nprocs; j++) { for (k=0; k<NX; k++) { if (buf[i*nprocs*NX+j*NX+k] != j+10) { printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n", __LINE__,__FILE__,i*nprocs*NX+j*NX+k, j+10, buf[i*nprocs*NX+j*NX+k]); nerrs++; } } } } memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int)); err = ncmpi_get_var_int_all(ncid, varid[1], buf); CHECK_ERR /* check read buffer contents */ /* v1 = * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13 ; */ for (i=0; i<NY; i++) { for (j=0; j<nprocs; j++) { for (k=0; k<NX; k++) { if (buf[i*nprocs*NX+j*NX+k] != j+10) { printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n", __LINE__,__FILE__,i*nprocs*NX+j*NX+k, j+10, buf[i*nprocs*NX+j*NX+k]); nerrs++; } } } } memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int)); err = ncmpi_get_var_int_all(ncid, varid[2], buf); CHECK_ERR /* check read buffer contents */ /* v2 = * 10, 11, 12, 13, 10, 11, 12, 13, * 10, 11, 12, 13, 10, 11, 12, 13, * 10, 11, 12, 13, 10, 11, 12, 13, * 10, 11, 12, 13, 10, 11, 12, 13 ; */ for (i=0; i<NY; i++) { for (k=0; k<NX; k++) { for (j=0; j<nprocs; j++) { if (buf[i*nprocs*NX+k*nprocs+j] != j+10) { printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n", __LINE__,__FILE__,i*nprocs*NX+k*nprocs+j, j+10, buf[i*nprocs*NX+k*nprocs+j]); nerrs++; } } } } memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int)); err = ncmpi_get_var_int_all(ncid, varid[3], buf); CHECK_ERR /* check read buffer contents */ /* v3 = * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13, * 10, 10, 11, 11, 12, 12, 13, 13 ; */ for (i=0; i<NY; i++) { for (j=0; j<nprocs; j++) { for (k=0; k<NX; k++) { if (buf[i*nprocs*NX+j*NX+k] != j+10) { printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n", __LINE__,__FILE__,i*nprocs*NX+j*NX+k, j+10, buf[i*nprocs*NX+j*NX+k]); nerrs++; } } } } memset(buf, 0, (size_t)NY * NX * nprocs * sizeof(int)); err = ncmpi_get_var_int_all(ncid, varid[4], buf); CHECK_ERR /* check read buffer contents */ /* v4 = * 10, 11, 12, 13, 10, 11, 12, 13, * 10, 11, 12, 13, 10, 11, 12, 13, * 10, 11, 12, 13, 10, 11, 12, 13, * 10, 11, 12, 13, 10, 11, 12, 13 ; */ for (i=0; i<NY; i++) { for (k=0; k<NX; k++) { for (j=0; j<nprocs; j++) { if (buf[i*nprocs*NX+k*nprocs+j] != j+10) { printf("Error at line %d in %s: expected buffer[%d]=%d but got %d\n", __LINE__,__FILE__,i*nprocs*NX+k*nprocs+j, j+10, buf[i*nprocs*NX+k*nprocs+j]); nerrs++; } } } } err = ncmpi_close(ncid); CHECK_ERR free(buf); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); if (malloc_size > 0) ncmpi_inq_malloc_list(); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
/*----< main() >------------------------------------------------------------*/ int main(int argc, char **argv) { char filename[256]; int i, j, err, ncid, varid0, varid1, varid2, dimids[2], nerrs=0; int rank, nprocs, debug=0, blocklengths[2], **buf, *bufptr; int array_of_sizes[2], array_of_subsizes[2], array_of_starts[2]; MPI_Offset start[2], count[2]; MPI_Aint a0, a1, disps[2]; MPI_Datatype buftype, ghost_buftype, rec_filetype, fix_filetype; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 0; } strcpy(filename, "testfile.nc"); if (argc == 2) strcpy(filename, argv[1]); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char cmd_str[256]; sprintf(cmd_str, "*** TESTING C %s for flexible put and get ", argv[0]); printf("%-66s ------ ", cmd_str); fflush(stdout); } buf = (int**)malloc(NY * sizeof(int*)); buf[0] = (int*)malloc(NY * NX * sizeof(int)); for (i=1; i<NY; i++) buf[i] = buf[i-1] + NX; /* construct various MPI derived data types */ /* construct an MPI derived data type for swapping 1st row with 2nd row */ blocklengths[0] = blocklengths[1] = NX; MPI_Get_address(buf[1], &a0); MPI_Get_address(buf[0], &a1); disps[0] = 0; disps[1] = a1 - a0; bufptr = buf[1]; err = MPI_Type_create_hindexed(2, blocklengths, disps, MPI_INT, &buftype); if (err != MPI_SUCCESS) printf("MPI error MPI_Type_create_hindexed\n"); MPI_Type_commit(&buftype); start[0] = 0; start[1] = NX*rank; count[0] = 2; count[1] = NX; if (debug) printf("put start=%lld %lld count=%lld %lld\n",start[0],start[1],count[0],count[1]); /* create a file type for the fixed-size variable */ array_of_sizes[0] = 2; array_of_sizes[1] = NX*nprocs; array_of_subsizes[0] = count[0]; array_of_subsizes[1] = count[1]; array_of_starts[0] = start[0]; array_of_starts[1] = start[1]; MPI_Type_create_subarray(2, array_of_sizes, array_of_subsizes, array_of_starts, MPI_ORDER_C, MPI_INT, &fix_filetype); MPI_Type_commit(&fix_filetype); /* create a buftype with ghost cells on each side */ array_of_sizes[0] = count[0]+4; array_of_sizes[1] = count[1]+4; array_of_subsizes[0] = count[0]; array_of_subsizes[1] = count[1]; array_of_starts[0] = 2; array_of_starts[1] = 2; MPI_Type_create_subarray(2, array_of_sizes, array_of_subsizes, array_of_starts, MPI_ORDER_C, MPI_INT, &ghost_buftype); MPI_Type_commit(&ghost_buftype); /* create a new file for write */ err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER, MPI_INFO_NULL, &ncid); ERR /* define a 2D array */ err = ncmpi_def_dim(ncid, "REC_DIM", NC_UNLIMITED, &dimids[0]); ERR err = ncmpi_def_dim(ncid, "X", NX*nprocs, &dimids[1]); ERR err = ncmpi_def_var(ncid, "rec_var", NC_INT, 2, dimids, &varid0); ERR err = ncmpi_def_var(ncid, "dummy_rec", NC_INT, 2, dimids, &varid2); ERR err = ncmpi_def_dim(ncid, "FIX_DIM", 2, &dimids[0]); ERR err = ncmpi_def_var(ncid, "fix_var", NC_INT, 2, dimids, &varid1); ERR err = ncmpi_enddef(ncid); ERR /* create a file type for the record variable */ int *array_of_blocklengths=(int*) malloc(count[0]*sizeof(int)); MPI_Aint *array_of_displacements=(MPI_Aint*) malloc(count[0]*sizeof(MPI_Aint)); MPI_Offset recsize; err = ncmpi_inq_recsize(ncid, &recsize); for (i=0; i<count[0]; i++) { array_of_blocklengths[i] = count[1]; array_of_displacements[i] = start[1]*sizeof(int) + recsize * i; } MPI_Type_create_hindexed(2, array_of_blocklengths, array_of_displacements, MPI_INT, &rec_filetype); MPI_Type_commit(&rec_filetype); free(array_of_blocklengths); free(array_of_displacements); /* initialize the contents of the array */ for (j=0; j<NY; j++) for (i=0; i<NX; i++) buf[j][i] = rank*100 + j*10 + i; /* write the record variable */ err = ncmpi_put_vard_all(ncid, varid0, rec_filetype, bufptr, 1, buftype); ERR /* check if the contents of buf are altered */ CHECK_VALUE /* check if root process can write to file header in data mode */ err = ncmpi_rename_var(ncid, varid0, "rec_VAR"); ERR /* write the fixed-size variable */ err = ncmpi_put_vard_all(ncid, varid1, fix_filetype, bufptr, 1, buftype); ERR /* check if the contents of buf are altered */ CHECK_VALUE /* check if root process can write to file header in data mode */ err = ncmpi_rename_var(ncid, varid0, "rec_var"); ERR /* test the same routines in independent data mode */ err = ncmpi_begin_indep_data(ncid); ERR err = ncmpi_put_vard(ncid, varid0, rec_filetype, bufptr, 1, buftype); ERR CHECK_VALUE err = ncmpi_rename_var(ncid, varid0, "rec_VAR"); ERR err = ncmpi_put_vard(ncid, varid1, fix_filetype, bufptr, 1, buftype); ERR CHECK_VALUE err = ncmpi_rename_var(ncid, varid0, "rec_var"); ERR err = ncmpi_end_indep_data(ncid); ERR err = ncmpi_close(ncid); ERR /* open the same file and read back for validate */ err = ncmpi_open(MPI_COMM_WORLD, filename, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err = ncmpi_inq_varid(ncid, "rec_var", &varid0); ERR err = ncmpi_inq_varid(ncid, "fix_var", &varid1); ERR nerrs += get_var_and_verify(ncid, varid0, start, count, buf, buftype, ghost_buftype, rec_filetype); nerrs += get_var_and_verify(ncid, varid1, start, count, buf, buftype, ghost_buftype, fix_filetype); err = ncmpi_close(ncid); ERR MPI_Type_free(&rec_filetype); MPI_Type_free(&fix_filetype); MPI_Type_free(&buftype); MPI_Type_free(&ghost_buftype); free(buf[0]); free(buf); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return 0; }
int main(int argc, char** argv) { extern int optind; char *filename="testfile.nc", exec[128]; int i, j, k, n, rank, nprocs, verbose=1, err; int ncid, cmode, varid[4], dimid[2], nreqs, reqs[4], sts[4]; unsigned int *buffer[4]; int num_segs[4], req_lens[4]; MPI_Offset ***starts, ***counts; MPI_Info info; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); strcpy(exec, argv[0]); /* get command-line arguments */ while ((i = getopt(argc, argv, "hq")) != EOF) switch(i) { case 'q': verbose = 0; break; case 'h': default: if (rank==0) usage(argv[0]); MPI_Finalize(); return 0; } argc -= optind; argv += optind; if (argc == 1) filename = argv[0]; /* optional argument */ if (nprocs != 4 && rank == 0 && verbose) printf("Warning: %s is intended to run on 4 processes\n",exec); /* set an MPI-IO hint to disable file offset alignment for fix-sized * variables */ MPI_Info_create(&info); MPI_Info_set(info, "nc_var_align_size", "1"); /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, info, &ncid); ERR MPI_Info_free(&info); /* create a global array of size NY * NX */ err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", NX, &dimid[1]); ERR err = ncmpi_def_var(ncid, "var0", NC_UINT, NDIMS, dimid, &varid[0]); ERR err = ncmpi_def_var(ncid, "var1", NC_UINT, NDIMS, dimid, &varid[1]); ERR err = ncmpi_def_var(ncid, "var2", NC_UINT, NDIMS, dimid, &varid[2]); ERR err = ncmpi_def_var(ncid, "var3", NC_UINT, NDIMS, dimid, &varid[3]); ERR err = ncmpi_enddef(ncid); ERR /* allocate space for starts and counts */ starts = calloc_3D(4, 6, NDIMS); counts = calloc_3D(4, 6, NDIMS); n = rank % 4; num_segs[n] = 4; /* number of segments for this request */ starts[n][0][0]=0; starts[n][0][1]=5; counts[n][0][0]=1; counts[n][0][1]=2; starts[n][1][0]=1; starts[n][1][1]=0; counts[n][1][0]=1; counts[n][1][1]=1; starts[n][2][0]=2; starts[n][2][1]=6; counts[n][2][0]=1; counts[n][2][1]=2; starts[n][3][0]=3; starts[n][3][1]=0; counts[n][3][0]=1; counts[n][3][1]=3; /* starts[n][][] n_counts[n][][] indicate the following: ("-" means skip) - - - - - X X - - - X - - - - - - - - - - - - - - - X X - - X X X - - - - - - - */ n = (rank+1) % 4; num_segs[n] = 6; /* number of segments for this request */ starts[n][0][0]=0; starts[n][0][1]=3; counts[n][0][0]=1; counts[n][0][1]=2; starts[n][1][0]=0; starts[n][1][1]=8; counts[n][1][0]=1; counts[n][1][1]=2; starts[n][2][0]=1; starts[n][2][1]=5; counts[n][2][0]=1; counts[n][2][1]=2; starts[n][3][0]=2; starts[n][3][1]=0; counts[n][3][0]=1; counts[n][3][1]=2; starts[n][4][0]=2; starts[n][4][1]=8; counts[n][4][0]=1; counts[n][4][1]=2; starts[n][5][0]=3; starts[n][5][1]=4; counts[n][5][0]=1; counts[n][5][1]=3; /* starts[n][][] counts[n][][] indicate the following pattern. - - - X X - - - X X - - - - - X X - - - X X - - - - - - X X - - - - X X X - - - */ n = (rank+2) % 4; num_segs[n] = 5; /* number of segments for this request */ starts[n][0][0]=0; starts[n][0][1]=7; counts[n][0][0]=1; counts[n][0][1]=1; starts[n][1][0]=1; starts[n][1][1]=1; counts[n][1][0]=1; counts[n][1][1]=3; starts[n][2][0]=1; starts[n][2][1]=7; counts[n][2][0]=1; counts[n][2][1]=3; starts[n][3][0]=2; starts[n][3][1]=2; counts[n][3][0]=1; counts[n][3][1]=1; starts[n][4][0]=3; starts[n][4][1]=3; counts[n][4][0]=1; counts[n][4][1]=1; /* starts[n][][] counts[n][][] indicate the following pattern. - - - - - - - X - - - X X X - - - X X X - - X - - - - - - - - - - X - - - - - - */ n = (rank+3) % 4; num_segs[n] = 4; /* number of segments for this request */ starts[n][0][0]=0; starts[n][0][1]=0; counts[n][0][0]=1; counts[n][0][1]=3; starts[n][1][0]=1; starts[n][1][1]=4; counts[n][1][0]=1; counts[n][1][1]=1; starts[n][2][0]=2; starts[n][2][1]=3; counts[n][2][0]=1; counts[n][2][1]=3; starts[n][3][0]=3; starts[n][3][1]=7; counts[n][3][0]=1; counts[n][3][1]=3; /*starts[n][][] counts[n][][] indicate the following pattern. X X X - - - - - - - - - - - X - - - - - - - - X X X - - - - - - - - - - - X X X */ /* only rank 0, 1, 2, and 3 do I/O: * each of ranks 0 to 3 write 4 nonblocking requests */ nreqs = 4; if (rank >= 4) nreqs = 0; /* bufsize must be max of data type converted before and after */ MPI_Offset bufsize = 0; /* calculate length of each varn request, number of segments in each * varn request, and allocate write buffer */ for (i=0; i<nreqs; i++) { req_lens[i] = 0; /* total length this request */ for (j=0; j<num_segs[i]; j++) { MPI_Offset req_len=1; for (k=0; k<NDIMS; k++) req_len *= counts[i][j][k]; req_lens[i] += req_len; } if (verbose) printf("req_lens[%d]=%d\n",i,req_lens[i]); /* allocate I/O buffer and initialize its contents */ buffer[i] = (unsigned int*) malloc(req_lens[i] * sizeof(unsigned int)); for (j=0; j<req_lens[i]; j++) buffer[i][j] = rank; bufsize += req_lens[i]; } bufsize *= sizeof(unsigned int); if (verbose) printf("%d: Attach buffer size %lld\n", rank, bufsize); /* give PnetCDF a space to buffer the nonblocking requests */ if (bufsize > 0) { err = ncmpi_buffer_attach(ncid, bufsize); ERR }
int main(int argc, char ** argv) { int ncid, dimid, varid; MPI_Init(&argc, &argv); MPI_Datatype vtype, rtype, usertype; MPI_Aint lb, extent; int userbufsz, *userbuf, *cmpbuf, i, errs=0; int count = 25; double pi = 3.14159; MPI_Offset start, acount; ncmpi_create(MPI_COMM_WORLD, "vectors.nc", NC_CLOBBER, MPI_INFO_NULL, &ncid); ncmpi_def_dim(ncid, "50k", 1024*50, &dimid); ncmpi_def_var(ncid, "vector", NC_DOUBLE, 1, &dimid, &varid); ncmpi_enddef(ncid); MPI_Type_vector(VECCOUNT, BLOCKLEN, STRIDE, MPI_INT, &vtype); MPI_Type_create_resized(vtype, 0, STRIDE*VECCOUNT*sizeof(int), &rtype); MPI_Type_contiguous(count, rtype, &usertype); MPI_Type_commit(&usertype); MPI_Type_free(&vtype); MPI_Type_free(&rtype); MPI_Type_get_extent(usertype, &lb, &extent); userbufsz = extent; userbuf = malloc(userbufsz); cmpbuf = calloc(userbufsz, 1); for (i=0; i< userbufsz/sizeof(int); i++) { userbuf[i] = pi*i; } start = 10; acount = count*12; ncmpi_begin_indep_data(ncid); ncmpi_put_vara(ncid, varid, &start, &acount, userbuf, 1, usertype); ncmpi_close(ncid); NC_CHECK(ncmpi_open(MPI_COMM_WORLD, "vectors.nc", NC_NOWRITE, MPI_INFO_NULL, &ncid)); ncmpi_begin_indep_data(ncid); NC_CHECK(ncmpi_inq_varid(ncid, "vector", &varid)); NC_CHECK(ncmpi_get_vara(ncid, varid, &start, &acount, cmpbuf, 1, usertype)); ncmpi_close(ncid); for (i=0; errs < 10 && i < acount; i++) { /* vector of 4,3,5, so skip 4th and 5th items of every block */ if (i%STRIDE >= BLOCKLEN) continue; if (userbuf[i] != cmpbuf[i]) { errs++; fprintf(stderr, "%d: expected 0x%x got 0x%x\n", i, userbuf[i], cmpbuf[i]); } } free(userbuf); free(cmpbuf); MPI_Type_free(&usertype); MPI_Finalize(); return 0; }
int main(int argc, char** argv) { char filename[256]; int i, j, rank, nprocs, err, nerrs=0; int ncid, varid, dimid[2], req, st; MPI_Offset start[2], count[2], stride[2]; unsigned char buffer[NY][NX]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for ncmpi_end_indep_data ", basename(argv[0])); printf("%-66s ------ ",cmd_str); free(cmd_str); } err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER|NC_64BIT_DATA, MPI_INFO_NULL, &ncid); CHECK_ERR err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); CHECK_ERR err = ncmpi_def_dim(ncid, "X", NX*nprocs, &dimid[1]); CHECK_ERR err = ncmpi_def_var(ncid, "var", NC_UBYTE, NDIMS, dimid, &varid); CHECK_ERR err = ncmpi_enddef(ncid); CHECK_ERR for (i=0; i<NY; i++) for (j=0; j<NX; j++) buffer[i][j] = rank+10; start[0] = 0; start[1] = NX*rank; count[0] = NY/2; count[1] = NX/2; stride[0] = 2; stride[1] = 2; err = ncmpi_buffer_attach(ncid, NY*NX); CHECK_ERR err = ncmpi_begin_indep_data(ncid); CHECK_ERR err = ncmpi_bput_vars_uchar(ncid, varid, start, count, stride, &buffer[0][0], &req); CHECK_ERR /* check if write buffer contents have been altered */ for (i=0; i<NY; i++) for (j=0; j<NX; j++) { if (buffer[i][j] != rank+10) { printf("Error at line %d in %s: put buffer[%d][%d]=%hhu altered, should be %d\n", __LINE__,__FILE__,i,j,buffer[i][j],rank+10); nerrs++; } } err = ncmpi_end_indep_data(ncid); CHECK_ERR /* calling wait API after exiting independent data mode on purpose */ err = ncmpi_wait_all(ncid, 1, &req, &st); CHECK_ERR err = st; CHECK_ERR /* check if write buffer contents have been altered */ for (i=0; i<NY; i++) for (j=0; j<NX; j++) { if (buffer[i][j] != rank+10) { printf("Error at line %d in %s: put buffer[%d][%d]=%hhu altered, should be %d\n", __LINE__,__FILE__,i,j,buffer[i][j],rank+10); nerrs++; } } err = ncmpi_buffer_detach(ncid); CHECK_ERR err = ncmpi_close(ncid); CHECK_ERR /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) { printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); ncmpi_inq_malloc_list(); } } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
int main(int argc, char** argv) { extern int optind; char filename[256]; int i, j, rank, nprocs, verbose=1, err, nerrs=0; int ncid, cmode, varid, dimid[2], buf[NY][NX]; char str_att[128]; float float_att[100]; MPI_Offset global_ny, global_nx; MPI_Offset start[2], count[2]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* get command-line arguments */ while ((i = getopt(argc, argv, "hq")) != EOF) switch(i) { case 'q': verbose = 0; break; case 'h': default: if (rank==0) usage(argv[0]); MPI_Finalize(); return 1; } if (argv[optind] == NULL) strcpy(filename, "testfile.nc"); else snprintf(filename, 256, "%s", argv[optind]); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (verbose && rank == 0) printf("%s: example of using put_vara APIs\n",__FILE__); /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR /* the global array is NY * (NX * nprocs) */ global_ny = NY; global_nx = NX * nprocs; for (i=0; i<NY; i++) for (j=0; j<NX; j++) buf[i][j] = rank; /* add a global attribute: a time stamp at rank 0 */ time_t ltime = time(NULL); /* get the current calendar time */ asctime_r(localtime(<ime), str_att); /* make sure the time string are consistent among all processes */ MPI_Bcast(str_att, strlen(str_att), MPI_CHAR, 0, MPI_COMM_WORLD); err = ncmpi_put_att_text(ncid, NC_GLOBAL, "history", strlen(str_att), &str_att[0]); ERR /* define dimensions x and y */ err = ncmpi_def_dim(ncid, "Y", global_ny, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", global_nx, &dimid[1]); ERR /* define a 2D variable of integer type */ err = ncmpi_def_var(ncid, "var", NC_INT, 2, dimid, &varid); ERR /* add attributes to the variable */ strcpy(str_att, "example attribute of type text."); err = ncmpi_put_att_text(ncid, varid, "str_att_name", strlen(str_att), &str_att[0]); ERR for (i=0; i<8; i++) float_att[i] = i; err = ncmpi_put_att_float(ncid, varid, "float_att_name", NC_FLOAT, 8, &float_att[0]); ERR long long int64_att=10000000000LL; err = ncmpi_put_att_longlong(ncid, varid, "int64_att_name", NC_INT64, 1, &int64_att); ERR /* do not forget to exit define mode */ err = ncmpi_enddef(ncid); ERR /* now we are in data mode */ start[0] = 0; start[1] = NX * rank; count[0] = NY; count[1] = NX; err = ncmpi_put_vara_int_all(ncid, varid, start, count, &buf[0][0]); ERR err = ncmpi_close(ncid); ERR /* check if there is any PnetCDF internal malloc residue */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Finalize(); return (nerrs > 0); }
int main(int argc, char **argv) { MPI_Offset i, j, k; int status; int ncid; int dimid1, dimid2, dimid3, udimid; int square_dim[2], cube_dim[3], xytime_dim[3], time_dim[1]; MPI_Offset square_start[2], cube_start[3] = {0, 0, 0}; MPI_Offset square_count[2] = {50, 50}, cube_count[3] = {100, 50, 50}; MPI_Offset xytime_start[3] = {0, 0, 0}; MPI_Offset xytime_count[3] = {100, 50, 50}; MPI_Offset time_start[1], time_count[1] = {25}; int square_id, cube_id, xytime_id, time_id; static char title[] = "example netCDF dataset"; static char description[] = "2-D integer array"; int data[100][50][50], buffer[100]; int rank; int nprocs; MPI_Comm comm = MPI_COMM_WORLD; double TotalWriteTime; params opts; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) fprintf(stderr, "Testing write ... \n"); parse_write_args(argc, argv, rank, &opts); MPI_Barrier(MPI_COMM_WORLD); TotalWriteTime = MPI_Wtime(); /********** START OF NETCDF ACCESS **************/ /** * Create the dataset * File name: "testwrite.nc" * Dataset API: Collective */ status = ncmpi_create(comm, opts.outfname, NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid); if (status != NC_NOERR) handle_error(status); /** * Create a global attribute: * :title = "example netCDF dataset"; */ sprintf(title, "%s:%d of %d", title, rank, nprocs); printf("title:%s\n", title); status = ncmpi_put_att_text (ncid, NC_GLOBAL, "title", strlen(title), title); if (status != NC_NOERR) handle_error(status); /** * Add 4 pre-defined dimensions: * x = 100, y = 100, z = 100, time = NC_UNLIMITED */ status = ncmpi_def_dim(ncid, "x", 100L, &dimid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "y", 100L, &dimid2); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "z", 100L, &dimid3); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "time", NC_UNLIMITED, &udimid); if (status != NC_NOERR) handle_error(status); /** * Define the dimensionality and then add 4 variables: * square(x, y), cube(x,y,z), time(time), xytime(time, x, y) */ square_dim[0] = cube_dim[0] = xytime_dim[1] = dimid1; square_dim[1] = cube_dim[1] = xytime_dim[2] = dimid2; cube_dim[2] = dimid3; xytime_dim[0] = udimid; time_dim[0] = udimid; status = ncmpi_def_var (ncid, "square", NC_INT, 2, square_dim, &square_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "cube", NC_INT, 3, cube_dim, &cube_id); if (status != NC_NOERR) handle_error(status); // status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id); status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "xytime", NC_INT, 3, xytime_dim, &xytime_id); if (status != NC_NOERR) handle_error(status); /** * Add an attribute for variable: * square: decsription = "2-D integer array" */ status = ncmpi_put_att_text (ncid, square_id, "description", strlen(description), description); if (status != NC_NOERR) handle_error(status); /** * End Define Mode (switch to data mode) * Dataset API: Collective */ status = ncmpi_enddef(ncid); if (status != NC_NOERR){ handle_error(status); status = ncmpi_close(ncid); if (status != NC_NOERR) handle_error(status); if (rank == 0) { fprintf(stderr, "Fatal Error: file header is inconsistent!\n"); } } /** * Data Partition (Assume 4 processors): * square: 2-D, (Block, Block), 50*50 from 100*100 * cube: 3-D, (*, Block, Block), 100*50*50 from 100*100*100 * xytime: 3-D, (*, Block, Block), 100*50*50 from 100*100*100 * time: 1-D, Block-wise, 25 from 100 */ else { square_start[0] = cube_start[1] = xytime_start[1] = (rank/2) * 50; square_start[1] = cube_start[2] = xytime_start[2] = (rank%2) * 50; time_start[0] = (rank%4) * 25; /** * Packing data in the buffer */ /* Data for variable: time */ for ( i = time_start[0]; i < time_start[0] + time_count[0]; i++ ) buffer[i - time_start[0]] = i; /* Data for variable: square, cube and xytime */ for ( i = 0; i < 100; i++ ) for ( j = square_start[0]; j < square_start[0]+square_count[0]; j++ ) for ( k = square_start[1]; k < square_start[1]+square_count[1]; k++ ) data[i][j-square_start[0]][k-square_start[1]] = i*100*100 + j*100 + k; /** * Write data into variables: square, cube, time and xytime * Access Method: subarray * Data Mode API: collective */ status = ncmpi_put_vara_int_all(ncid, square_id, square_start, square_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int_all(ncid, cube_id, cube_start, cube_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int_all(ncid, time_id, time_start, time_count, (void *)buffer); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int_all(ncid, xytime_id, xytime_start, xytime_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); /* status = ncmpi_sync(ncid); if (status != NC_NOERR) handle_error(status); status = ncmpi_redef(ncid); if (status != NC_NOERR) handle_error(status); status = ncmpi_del_att(ncid, square_id, "description"); if (status != NC_NOERR) handle_error(status); status = ncmpi_enddef(ncid); if (status != NC_NOERR) handle_error(status); */ /** * Close the dataset * Dataset API: collective */ status = ncmpi_close(ncid); if (status != NC_NOERR) handle_error(status); /******************* END OF NETCDF ACCESS ****************/ MPI_Barrier(MPI_COMM_WORLD); TotalWriteTime = MPI_Wtime() - TotalWriteTime; if (rank == 0) { fprintf(stderr, "OK\nFile written to: %s!\n", opts.outfname); fprintf(stderr, "Total Write Time = %10.8f\n", TotalWriteTime); } } MPI_Finalize(); return 0; }
/* The test write a NP * NP matrix M, NP is the number of process: put_vara: Process N write N copy of it's rank to row N ([N, 0...WIDTH]) using different APIs on different variable final result should be: 0 0 0 0 ... 1 1 1 1 ... 2 2 2 2 ... . . . */ int simpletest(char* fname, int enable_log) { int buffer[MAXPROCESSES]; MPI_Offset start[2], count[2]; int i, j, ret, errlen; int NProc, MyRank, NP; // Total process; Rank int fid; // Data set ID int did[2]; // IDs of dimension int vid; // IDs for variables int dims[2]; char tmp[1024], tmp2[1024]; MPI_Info Info; MPI_Comm_size(MPI_COMM_WORLD, &NP); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); if (NP == 1) { // Act if there is WIDTH processes for easy debugging. Most debugger supports only single processes. NProc = SINGLEPROCNP; MyRank = SINGLEPROCRANK; } else{ NProc = NP; } if (MyRank < MAXPROCESSES) { // Ensure each process have a independent buffer directory MPI_Info_create(&Info); if (enable_log) { MPI_Info_set(Info, "pnetcdf_log", "enable"); } // Create new cdf file ret = ncmpi_create(MPI_COMM_WORLD, fname, NC_CLOBBER, Info, &fid); if (ret != NC_NOERR) { printf("Error create file\n"); goto ERROR; } ret = ncmpi_set_fill(fid, NC_FILL, NULL); if (ret != NC_NOERR) { printf("Error set fill\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "X", NProc, did); // X if (ret != NC_NOERR) { printf("Error def dim X\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "Y", NProc, did + 1); // Y if (ret != NC_NOERR) { printf("Error def dim Y\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M", NC_INT, 2, did, vid); if (ret != NC_NOERR) { printf("Error def var M\n"); goto ERROR; } ret = ncmpi_enddef(fid); if (ret != NC_NOERR) { printf("Error enddef\n"); goto ERROR; } // Indep mode ret = ncmpi_begin_indep_data(fid); if (ret != NC_NOERR) { printf("Error begin indep\n"); goto ERROR; } // We all write rank from now on for (i = 0; i < NProc; i++) { buffer[i] = MyRank; } // put_vara count[0] = 1; count[1] = NProc; start[0] = MyRank; start[1] = 0; ret = ncmpi_put_vara_int(fid, vid, start, count, buffer); if (ret != NC_NOERR) { MPI_Error_string(ret, tmp, &errlen); printf("Error put_varn: %d\n%s\n", errlen, tmp); goto ERROR; } // Collective mode ncmpi_end_indep_data(fid); if (ret != NC_NOERR) { printf("Error end indep"); goto ERROR; } ncmpi_close(fid); // Close file if (ret != NC_NOERR) { printf("Error close"); goto ERROR; } } ERROR: return 0; }
int main(int argc, char **argv) { /* create foo.nc */ int stat; /* return status */ int ncid; /* netCDF id */ /* dimension ids */ int lat_dim; int lon_dim; int time_dim; /* dimension lengths */ size_t lat_len = 10; size_t lon_len = 5; size_t time_len = NC_UNLIMITED; /* variable ids */ int lat_id; int lon_id; int time_id; int z_id; int t_id; int p_id; int rh_id; /* rank (number of dimensions) for each variable */ # define RANK_lat 1 # define RANK_lon 1 # define RANK_time 1 # define RANK_z 3 # define RANK_t 3 # define RANK_p 3 # define RANK_rh 3 /* variable shapes */ int lat_dims[RANK_lat]; int lon_dims[RANK_lon]; int time_dims[RANK_time]; int z_dims[RANK_z]; int t_dims[RANK_t]; int p_dims[RANK_p]; int rh_dims[RANK_rh]; /* attribute vectors */ double z_valid_range[2]; double p__FillValue[1]; int rh__FillValue[1]; int stat=0; MPI_Init(&argc, &argv); /* enter define mode */ stat = ncmpi_create(MPI_COMM_WORLD, "foo.nc", NC_CLOBBER, MPI_INFO_NULL, &ncid); check_err(stat,__LINE__,__FILE__); /* define dimensions */ stat = ncmpi_def_dim(ncid, "lat", lat_len, &lat_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "lon", lon_len, &lon_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "time", time_len, &time_dim); check_err(stat,__LINE__,__FILE__); /* define variables */ lat_dims[0] = lat_dim; stat = ncmpi_def_var(ncid, "lat", NC_INT, RANK_lat, lat_dims, &lat_id); check_err(stat,__LINE__,__FILE__); lon_dims[0] = lon_dim; stat = ncmpi_def_var(ncid, "lon", NC_INT, RANK_lon, lon_dims, &lon_id); check_err(stat,__LINE__,__FILE__); time_dims[0] = time_dim; stat = ncmpi_def_var(ncid, "time", NC_INT, RANK_time, time_dims, &time_id); check_err(stat,__LINE__,__FILE__); z_dims[0] = time_dim; z_dims[1] = lat_dim; z_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "z", NC_FLOAT, RANK_z, z_dims, &z_id); check_err(stat,__LINE__,__FILE__); t_dims[0] = time_dim; t_dims[1] = lat_dim; t_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "t", NC_FLOAT, RANK_t, t_dims, &t_id); check_err(stat,__LINE__,__FILE__); p_dims[0] = time_dim; p_dims[1] = lat_dim; p_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "p", NC_DOUBLE, RANK_p, p_dims, &p_id); check_err(stat,__LINE__,__FILE__); rh_dims[0] = time_dim; rh_dims[1] = lat_dim; rh_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "rh", NC_INT, RANK_rh, rh_dims, &rh_id); check_err(stat,__LINE__,__FILE__); /* assign attributes */ stat = ncmpi_put_att_text(ncid, lat_id, "units", 13, "degrees_north"); check_err(stat,__LINE__,__FILE__); stat = ncmpi_put_att_text(ncid, lon_id, "units", 12, "degrees_east"); check_err(stat,__LINE__,__FILE__); stat = ncmpi_put_att_text(ncid, time_id, "units", 7, "seconds"); check_err(stat,__LINE__,__FILE__); stat = ncmpi_put_att_text(ncid, z_id, "units", 6, "meters"); check_err(stat,__LINE__,__FILE__); z_valid_range[0] = 0; z_valid_range[1] = 5000; stat = ncmpi_put_att_double(ncid, z_id, "valid_range", NC_DOUBLE, 2, z_valid_range); check_err(stat,__LINE__,__FILE__); p__FillValue[0] = -9999; stat = ncmpi_put_att_double(ncid, p_id, "_FillValue", NC_DOUBLE, 1, p__FillValue); check_err(stat,__LINE__,__FILE__); rh__FillValue[0] = -1; stat = ncmpi_put_att_int(ncid, rh_id, "_FillValue", NC_INT, 1, rh__FillValue); check_err(stat,__LINE__,__FILE__); /* leave define mode */ stat = ncmpi_enddef (ncid); check_err(stat,__LINE__,__FILE__); { /* store lat */ static int lat[] = {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}; ncmpi_begin_indep_data(ncid); stat = ncmpi_put_var_int(ncid, lat_id, lat); ncmpi_end_indep_data(ncid); check_err(stat,__LINE__,__FILE__); } { /* store lon */ static int lon[] = {-140, -118, -96, -84, -52}; ncmpi_begin_indep_data(ncid); stat = ncmpi_put_var_int(ncid, lon_id, lon); ncmpi_end_indep_data(ncid); check_err(stat,__LINE__,__FILE__); } stat = ncmpi_close(ncid); check_err(stat,__LINE__,__FILE__); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int ret, ncfile, nprocs, rank, dimid, varid1, varid2, ndims=1; MPI_Offset start, count=1; char buf[13] = "Hello World\n"; int *data; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc != 2) { if (rank == 0) printf("Usage: %s filename\n", argv[0]); MPI_Finalize(); exit(-1); } if (rank == 0) { ret = ncmpi_create(MPI_COMM_SELF, argv[1], NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncfile); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_def_dim(ncfile, "d1", nprocs, &dimid); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_def_var(ncfile, "v1", NC_INT, ndims, &dimid, &varid1); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_def_var(ncfile, "v2", NC_INT, ndims, &dimid, &varid2); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_put_att_text(ncfile, NC_GLOBAL, "string", 13, buf); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_enddef(ncfile); if (ret != NC_NOERR) handle_error(ret, __LINE__); /* first reason this approach is not scalable: need to allocate * enough memory to hold data from all processors */ data = calloc(nprocs, sizeof(int)); } /* second reason this approch is not scalable: sending to rank 0 * introduces a serialization point, even if using an optimized * collective routine */ MPI_Gather(&rank, 1, MPI_INT, data, 1, MPI_INT, 0, MPI_COMM_WORLD); if (rank == 0) { /* and lastly, the third reason this approach is not scalable: I/O * happens from a single processor. This approach can be ok if the * amount of data is quite small, but almost always the underlying * MPI-IO library can do a better job */ start=0, count=nprocs; ret = ncmpi_put_vara_int_all(ncfile, varid1, &start, &count, data); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_put_vara_int_all(ncfile, varid2, &start, &count, data); if (ret != NC_NOERR) handle_error(ret, __LINE__); ret = ncmpi_close(ncfile); if (ret != NC_NOERR) handle_error(ret, __LINE__); } MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int i, j; int status; int ncid1, ncid2; int ndims, nvars, ngatts, unlimdimid; char name[NC_MAX_NAME]; nc_type type, vartypes[NC_MAX_VARS]; MPI_Offset attlen; MPI_Offset dimlen, shape[NC_MAX_VAR_DIMS], varsize, start[NC_MAX_VAR_DIMS]; void *valuep; int dimids[NC_MAX_DIMS], varids[NC_MAX_VARS]; int vardims[NC_MAX_VARS][NC_MAX_VAR_DIMS/16]; /* divided by 16 due to my memory limitation */ int varndims[NC_MAX_VARS], varnatts[NC_MAX_VARS]; params opts; int rank; int nprocs; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) fprintf(stderr, "Testing independent read ... "); parse_read_args(argc, argv, rank, &opts); /********** START OF NETCDF ACCESS **************/ /* Read a netCDF file and write it out to another file */ /** * Open the input dataset - ncid1: * File name: "../data/test_int.nc" * Dataset API: Collective * And create the output dataset - ncid2: * File name: "testread.nc" * Dataset API: Collective */ status = ncmpi_open(comm, opts.infname, 0, MPI_INFO_NULL, &ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid2); if (status != NC_NOERR) handle_error(status); /** * Inquire the dataset definitions of input dataset AND * Add dataset definitions for output dataset. */ status = ncmpi_inq(ncid1, &ndims, &nvars, &ngatts, &unlimdimid); if (status != NC_NOERR) handle_error(status); /* Inquire global attributes, assume CHAR attributes. */ for (i = 0; i < ngatts; i++) { status = ncmpi_inq_attname(ncid1, NC_GLOBAL, i, name); if (status != NC_NOERR) handle_error(status); status = ncmpi_inq_att (ncid1, NC_GLOBAL, name, &type, &attlen); if (status != NC_NOERR) handle_error(status); switch (type) { case NC_CHAR: valuep = (void *)malloc(attlen * sizeof(char)); status = ncmpi_get_att_text(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_text (ncid2, NC_GLOBAL, name, attlen, (char *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_SHORT: valuep = (void *)malloc(attlen * sizeof(short)); status = ncmpi_get_att_short(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_short (ncid2, NC_GLOBAL, name, type, attlen, (short *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_INT: valuep = (void *)malloc(attlen * sizeof(int)); status = ncmpi_get_att_int(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_int (ncid2, NC_GLOBAL, name, type, attlen, (int *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_FLOAT: valuep = (void *)malloc(attlen * sizeof(float)); status = ncmpi_get_att_float(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_float (ncid2, NC_GLOBAL, name, type, attlen, (float *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_DOUBLE: valuep = (void *)malloc(attlen * sizeof(double)); status = ncmpi_get_att_double(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_double (ncid2, NC_GLOBAL, name, type, attlen, (double *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; default: ; /* handle unexpected types */ } } /* Inquire dimension */ for (i = 0; i < ndims; i++) { status = ncmpi_inq_dim(ncid1, i, name, &dimlen); if (status != NC_NOERR) handle_error(status); if (i == unlimdimid) dimlen = NC_UNLIMITED; status = ncmpi_def_dim(ncid2, name, dimlen, dimids+i); if (status != NC_NOERR) handle_error(status); } /* Inquire variables */ for (i = 0; i < nvars; i++) { status = ncmpi_inq_var (ncid1, i, name, vartypes+i, varndims+i, vardims[i], varnatts+i); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var(ncid2, name, vartypes[i], varndims[i], vardims[i], varids+i); if (status != NC_NOERR) handle_error(status); /* var attributes, assume CHAR attributes */ for (j = 0; j < varnatts[i]; j++) { status = ncmpi_inq_attname(ncid1, varids[i], j, name); if (status != NC_NOERR) handle_error(status); status = ncmpi_inq_att (ncid1, varids[i], name, &type, &attlen); if (status != NC_NOERR) handle_error(status); switch (type) { case NC_CHAR: valuep = (void *)malloc(attlen * sizeof(char)); status = ncmpi_get_att_text(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_text (ncid2, varids[i], name, attlen, (char *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_SHORT: valuep = (void *)malloc(attlen * sizeof(short)); status = ncmpi_get_att_short(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_short (ncid2, varids[i], name, type, attlen, (short *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_INT: valuep = (void *)malloc(attlen * sizeof(int)); status = ncmpi_get_att_int(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_int (ncid2, varids[i], name, type, attlen, (int *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_FLOAT: valuep = (void *)malloc(attlen * sizeof(float)); status = ncmpi_get_att_float(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_float (ncid2, varids[i], name, type, attlen, (float *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_DOUBLE: valuep = (void *)malloc(attlen * sizeof(double)); status = ncmpi_get_att_double(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_double (ncid2, varids[i], name, type, attlen, (double *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; default: ; /* handle unexpected types */ } } } /** * End Define Mode (switch to data mode) for output dataset * Dataset API: Collective */ status = ncmpi_enddef(ncid2); if (status != NC_NOERR) handle_error(status); /** * Read data of variables from input dataset (assume INT variables) * Write the data out to the corresponding variables in the output dataset * * Data Partition (Assume 4 processors): * square: 2-D, (Block, *), 25*100 from 100*100 * cube: 3-D, (Block, *, *), 25*100*100 from 100*100*100 * xytime: 3-D, (Block, *, *), 25*100*100 from 100*100*100 * time: 1-D, Block-wise, 25 from 100 * * Data Mode API: non-collective */ status = ncmpi_begin_indep_data(ncid1); if (status != NC_NOERR) handle_error(status); status =ncmpi_begin_indep_data(ncid2); if (status != NC_NOERR) handle_error(status); for (i = 0; i < NC_MAX_VAR_DIMS; i++) start[i] = 0; for (i = 0; i < nvars; i++) { varsize = 1; for (j = 0; j < varndims[i]; j++) { status = ncmpi_inq_dim(ncid1, vardims[i][j], name, shape + j); if (status != NC_NOERR) handle_error(status); if (j == 0) { shape[j] /= nprocs; start[j] = shape[j] * rank; } varsize *= shape[j]; } switch (vartypes[i]) { case NC_CHAR: break; case NC_SHORT: valuep = (void *)malloc(varsize * sizeof(short)); status = ncmpi_get_vara_short(ncid1, i, start, shape, (short *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_short(ncid2, varids[i], start, shape, (short *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_INT: valuep = (void *)malloc(varsize * sizeof(int)); status = ncmpi_get_vara_int(ncid1, i, start, shape, (int *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int(ncid2, varids[i], start, shape, (int *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_FLOAT: valuep = (void *)malloc(varsize * sizeof(float)); status = ncmpi_get_vara_float(ncid1, i, start, shape, (float *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_float(ncid2, varids[i], start, shape, (float *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_DOUBLE: valuep = (void *)malloc(varsize * sizeof(double)); status = ncmpi_get_vara_double(ncid1, i, start, shape, (double *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_double(ncid2, varids[i], start, shape, (double *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; default: ; /* handle unexpected types */ } } status = ncmpi_end_indep_data(ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_end_indep_data(ncid2); if (status != NC_NOERR) handle_error(status); status = ncmpi_sync(ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_sync(ncid2); if (status != NC_NOERR) handle_error(status); /** * Close the datasets * Dataset API: collective */ status = ncmpi_close(ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_close(ncid2); if (status != NC_NOERR) handle_error(status); /******************* END OF NETCDF ACCESS ****************/ if (rank == 0) fprintf(stderr, "OK\nInput file %s copied to: %s!\n", opts.infname, opts.outfname); MPI_Finalize(); return 0; }
int test(char* fname, int enable_log) { int buffer[MAXPROCESSES]; MPI_Offset start[MAXPROCESSES][2], count[MAXPROCESSES][2]; MPI_Offset *sp[MAXPROCESSES], *cp[MAXPROCESSES]; MPI_Offset stride[2]; int i, j, ret; int NProc, MyRank, NP; // Total process; Rank int fid; // Data set ID int did[2]; // IDs of dimension int vid[4]; // IDs for variables int dims[2]; char tmp[1024]; MPI_Info Info; MPI_Comm_size(MPI_COMM_WORLD, &NP); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); if (NP == 1) { // Act if there is WIDTH processes for easy debugging. Most debugger supports only single proccesses. NProc = SINGLEPROCNP; MyRank = SINGLEPROCRANK; } else{ NProc = NP; } if (MyRank < MAXPROCESSES) { // Ensure each process have a independent buffer directory MPI_Info_create(&Info); if (enable_log) { MPI_Info_set(Info, "pnetcdf_log", "enable"); } // Create new cdf file ret = ncmpi_create(MPI_COMM_WORLD, fname, NC_CLOBBER, Info, &fid); if (ret != NC_NOERR) { printf("Error create file\n"); goto ERROR; } ret = ncmpi_set_fill(fid, NC_FILL, NULL); if (ret != NC_NOERR) { printf("Error set fill\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "X", NProc, did); // X if (ret != NC_NOERR) { printf("Error def dim X\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "Y", NProc * 4, did + 1); // Y if (ret != NC_NOERR) { printf("Error def dim Y\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M0", NC_INT, 2, did, vid + 0); if (ret != NC_NOERR) { printf("Error def var M0\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M1", NC_INT, 2, did, vid + 1); if (ret != NC_NOERR) { printf("Error def var M1\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M2", NC_INT, 2, did, vid + 2); if (ret != NC_NOERR) { printf("Error def var M2\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M3", NC_INT, 2, did, vid + 3); if (ret != NC_NOERR) { printf("Error def var M3\n"); goto ERROR; } ret = ncmpi_enddef(fid); if (ret != NC_NOERR) { printf("Error enddef\n"); goto ERROR; } // We all write rank from now on for (i = 0; i < NProc; i++) { buffer[i] = MyRank; } // put_var1 for (i = 0; i < 4; i++) { for (j = 0; j < NProc; j++) { start[0][0] = MyRank; start[0][1] = i * NProc + j; ret = ncmpi_put_var1_int_all(fid, vid[i], start[0], buffer); if (ret != NC_NOERR) { printf("Error put_var1\n"); goto ERROR; } } } // put_vara for (i = 0; i < 4; i++) { start[0][0] = 0; start[0][1] = ((i + 1) % 4) * NProc + MyRank; count[0][0] = NProc; count[0][1] = 1; ret = ncmpi_put_vara_int_all(fid, vid[i], start[0], count[0], buffer); if (ret != NC_NOERR) { printf("Error put_vara\n"); goto ERROR; } } // put_vars for (i = 0; i < 4; i++) { start[0][0] = MyRank; start[0][1] = ((i + 2) % 4) * NProc + (MyRank % 2); count[0][0] = 1; count[0][1] = NProc / 2; stride[0] = 1; stride[1] = 2; ret = ncmpi_put_vars_int_all(fid, vid[i], start[0], count[0], stride, buffer); if (ret != NC_NOERR) { printf("Error put_vars\n"); goto ERROR; } } // put_varn for (j = 0; j < 4; j++) { for (i = 0; i < NProc; i++) { count[i][0] = 1; count[i][1] = 1; start[i][0] = (MyRank + i) % NProc; start[i][1] = i + ((j + 3) % 4) * NProc; sp[i] = (MPI_Offset*)start[i]; cp[i] = (MPI_Offset*)count[i]; } ret = ncmpi_put_varn_int_all(fid, vid[j], NProc, sp, cp, buffer); if (ret != NC_NOERR) { printf("Error put_varn\n"); goto ERROR; } } // Commit log into cdf file ret = ncmpi_close(fid); // Close file if (ret != NC_NOERR) { printf("Error close"); goto ERROR; } } ERROR:; return 0; }