/* Test a small file with one var and one att. */ static int test_one_with_att(const char *testfile, int cmode) { int err, ncid, dimid, varid; char data = 'h', data_in; int ndims, nvars, natts, unlimdimid; MPI_Offset start[NDIMS], count[NDIMS]; /* Create a file with one ulimited dimensions, and one var. */ err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, &dimid); ERR err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 1, &dimid, &varid); ERR err=ncmpi_put_att_text(ncid, NC_GLOBAL, ATT_NAME, 1, &data); ERR err=ncmpi_enddef(ncid); ERR /* Write one record of var data, a single character. */ count[0] = 1; start[0] = 0; err=ncmpi_put_vara_text_all(ncid, varid, start, count, &data); ERR /* We're done! */ err=ncmpi_close(ncid); ERR /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq(ncid, &ndims, &nvars, &natts, &unlimdimid); ERR if (ndims != 1 && nvars != 1 && natts != 0 && unlimdimid != 0) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_var_text_all(ncid, varid, &data_in); ERR if (data_in != data) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_att_text(ncid, NC_GLOBAL, ATT_NAME, &data_in); ERR if (data_in != data) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_close(ncid); ERR return 0; }
int main(int argc, char** argv) { extern int optind; char *filename="testfile.nc"; int i, rank, verbose=1, err; int ncid, cmode, omode; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* get command-line arguments */ while ((i = getopt(argc, argv, "hq")) != EOF) switch(i) { case 'q': verbose = 0; break; case 'h': default: if (rank==0) usage(argv[0]); MPI_Finalize(); return 0; } argc -= optind; argv += optind; if (argc == 1) filename = argv[0]; /* optional argument */ if (verbose && rank == 0) printf("%s: example of file create and open\n",__FILE__); /* create a new file using clobber mode ----------------------------------*/ cmode = NC_CLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR /* close file */ err = ncmpi_close(ncid); ERR /* open the newly created file for read only -----------------------------*/ omode = NC_NOWRITE; err = ncmpi_open(MPI_COMM_WORLD, filename, omode, MPI_INFO_NULL, &ncid); ERR /* close file */ err = ncmpi_close(ncid); ERR /* check if there is any PnetCDF internal malloc residue */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Finalize(); return 0; }
/* Test a small file with two record vars, which grow, and has * attributes added. */ static int test_two_growing_with_att(const char *testfile, int cmode) { int err, ncid, dimid, varid[NUM_VARS]; char data[MAX_RECS], data_in; char att_name[NC_MAX_NAME + 1]; MPI_Offset start[ONE_DIM], count[ONE_DIM], index[ONE_DIM], len_in; int v, r; /* Create a file with one ulimited dimensions, and one var. */ err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, &dimid); ERR err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 1, &dimid, &varid[0]); ERR err=ncmpi_def_var(ncid, VAR_NAME2, NC_CHAR, 1, &dimid, &varid[1]); ERR err=ncmpi_close(ncid); ERR /* Create some phoney data. */ for (data[0] = 'a', r = 1; r < MAX_RECS; r++) data[r] = data[r - 1] + 1; /* Normally one would not close and reopen the file for each * record, nor add an attribute each time I add a record, but I am * giving the library a little work-out here... */ for (r = 0; r < MAX_RECS; r++) { /* Write one record of var data, a single character. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_WRITE, MPI_INFO_NULL, &ncid); ERR count[0] = 1; start[0] = r; sprintf(att_name, "a_%d", data[r]); for (v = 0; v < NUM_VARS; v++) { err=ncmpi_put_vara_text_all(ncid, varid[v], start, count, &data[r]); ERR err=ncmpi_redef(ncid); ERR err=ncmpi_put_att_text(ncid, varid[v], att_name, 1, &data[r]); ERR err=ncmpi_enddef(ncid); ERR } err=ncmpi_close(ncid); ERR /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq_dimlen(ncid, 0, &len_in); ERR if (len_in != r + 1) {printf("Error at line %d\n",__LINE__);return 1;} index[0] = r; err=ncmpi_begin_indep_data(ncid); ERR for (v = 0; v < NUM_VARS; v++) { err=ncmpi_get_var1_text(ncid, varid[v], index, &data_in); ERR if (data_in != data[r]) {printf("Error at line %d\n",__LINE__);return 1;} } err=ncmpi_close(ncid); ERR } /* Next record. */ return 0; }
static int check_rec_var(char *filename) { int err, nerrs=0, ncid, cmode, varid, dimid[4]; /* create a new file ---------------------------------------------------*/ cmode = NC_CLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", 5, &dimid[1]); ERR err = ncmpi_def_dim(ncid, "YY", 66661, &dimid[2]); ERR err = ncmpi_def_dim(ncid, "XX", 66661, &dimid[3]); ERR /* define a record variable */ err = ncmpi_def_var(ncid, "var", NC_INT, 1, dimid, &varid); ERR err = ncmpi_def_var(ncid, "var_last", NC_FLOAT, 2, dimid+2, &varid); ERR err = ncmpi_enddef(ncid); if (err != NC_EVARSIZE) { printf("\nError at line=%d: expecting error code NC_EVARSIZE but got %s\n",__LINE__,nc_err_code_name(err)); nerrs++; } err = ncmpi_close(ncid); if (err != NC_EVARSIZE) { printf("\nError at line=%d: expecting error code NC_EVARSIZE but got %s\n",__LINE__,nc_err_code_name(err)); nerrs++; } return nerrs; }
/*----< test_var_types() >----------------------------------------------------*/ static int test_var_types(char *filename, int format) { int i, err, rank, ncid, cmode, nerrs=0; int dimid, varid[5]; MPI_Info info=MPI_INFO_NULL; MPI_Comm comm=MPI_COMM_WORLD; nc_type xtype[5]={NC_UBYTE, NC_USHORT, NC_UINT, NC_INT64, NC_UINT64}; MPI_Comm_rank(comm, &rank); cmode = NC_CLOBBER|format; /* create a file in CDF-1 or CDF-2 format */ err = ncmpi_create(comm, filename, cmode, info, &ncid); ERR err = ncmpi_def_dim(ncid, "dim", NC_UNLIMITED, &dimid); ERR for (i=0; i<5; i++) { char name[32]; sprintf(name, "var_%d", i); err = ncmpi_def_var(ncid, name, xtype[i], 1, &dimid, &varid[i]); if (err != NC_ESTRICTCDF2) { printf("Error (line=%d): expecting NC_ESTRICTCDF2 but got %s\n", __LINE__,nc_err_code_name(err)); nerrs++; } } err = ncmpi_close(ncid); ERR return nerrs; }
/* Test a small file with an unlimited dimension. NOTE: Normally I * write a NULL terminator for my attributes and text strings, but * this reproduces a bug that a fortran user sent us. So string data * are written to the file without null terminators. - Ed */ static int test_small_unlim(const char *testfile, int cmode) { int i, err, ncid, dimids[NDIMS], varid; char data[NUM_VALS][STR_LEN + 1], data_in[NUM_VALS][STR_LEN]; int ndims, nvars, natts, unlimdimid; MPI_Offset start[NDIMS], count[NDIMS]; /* Create null-terminated text strings of correct length. */ /*for (i = 0; i < NUM_VALS; i++) strcpy(data[i], source);*/ strcpy(data[0], "2005-04-11_12:00:00"); strcpy(data[1], "2005-04-11_13:00:00"); /* Create a file with two dimensions, one unlimited, and one * var, and a global att. */ err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, dimids); ERR err=ncmpi_def_dim(ncid, DIM2_NAME, STR_LEN, &dimids[1]); ERR err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 2, dimids, &varid); ERR err=ncmpi_put_att_text(ncid, NC_GLOBAL, ATT_NAME2, strlen(TITLE), TITLE); ERR err=ncmpi_enddef(ncid); ERR /* Write some records of var data. */ count[0] = 1; count[1] = STR_LEN; start[1] = 0; for (start[0] = 0; start[0] < NUM_VALS; start[0]++) { err=ncmpi_put_vara_text_all(ncid, varid, start, count, data[start[0]]); ERR } /* We're done! */ err=ncmpi_close(ncid); ERR /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq(ncid, &ndims, &nvars, &natts, &unlimdimid); ERR if (ndims != 2 && nvars != 1 && natts != 0 && unlimdimid != 0) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_var_text_all(ncid, varid, (char *)data_in); ERR for (i = 0; i < NUM_VALS; i++) /* if (strncmp(data[i], data_in[i], STR_LEN)) {printf("Error at line %d\n",__LINE__);return 1;} */ if (strncmp(data[i], data_in[i], STR_LEN)) { printf("i=%d data=%s data_in=%s\n",i,data[i],data_in[i]); } err=ncmpi_close(ncid); ERR return 0; }
/* Test a small file with one record var, which grows. */ static int test_one_growing(const char *testfile, int cmode) { int err, ncid, dimid, varid; char data[MAX_RECS], data_in; MPI_Offset start[ONE_DIM], count[ONE_DIM], index[ONE_DIM], len_in; int r, f; /* Create some phoney data. */ for (data[0] = 'a', r = 1; r < MAX_RECS; r++) data[r] = data[r - 1] + 1; /* Run this with and without fill mode. */ for (f = 0; f < 2; f++) { /* Create a file with one ulimited dimensions, and one var. */ err=ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err=ncmpi_def_dim(ncid, DIM1_NAME, NC_UNLIMITED, &dimid); ERR err=ncmpi_def_var(ncid, VAR_NAME, NC_CHAR, 1, &dimid, &varid); ERR err=ncmpi_close(ncid); ERR /* Normally one would not close and reopen the file for each * record, but I am giving the library a little work-out here... */ for (r = 0; r < MAX_RECS; r++) { /* Write one record of var data, a single character. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_WRITE, MPI_INFO_NULL, &ncid); ERR /* if (f) { err=ncmpi_set_fill(ncid, NC_NOFILL, NULL); ERR} */ count[0] = 1; start[0] = r; err=ncmpi_put_vara_text_all(ncid, varid, start, count, &data[r]); ERR err=ncmpi_close(ncid); ERR /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq_dimlen(ncid, 0, &len_in); ERR if (len_in != r + 1) {printf("Error at line %d\n",__LINE__);return 1;} index[0] = r; err=ncmpi_begin_indep_data(ncid); ERR err=ncmpi_get_var1_text(ncid, 0, index, &data_in); ERR if (data_in != data[r]) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_close(ncid); ERR } /* Next record. */ } return 0; }
/* * Close a file through the NCMPI interface. */ static void NCMPI_Close(void *fd, IOR_param_t * param) { if (param->collective == FALSE) { NCMPI_CHECK(ncmpi_end_indep_data(*(int *)fd), "cannot disable independent data mode"); } NCMPI_CHECK(ncmpi_close(*(int *)fd), "cannot close file"); free(fd); }
// Traj_NcEnsemble::closeTraj() void Traj_NcEnsemble::closeTraj() { # ifdef HAS_PNETCDF if (ncid_ == -1) return; //ncmpi_end_indep_data( ncid_ ); // Disable independent data mode. ncmpi_close( ncid_ ); ncid_ = -1; # else NC_close(); # endif }
int main(int argc, char **argv) { char filename[256]; int err, nerrs=0, ncid, cmode, rank, nprocs; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 0; } strcpy(filename, "testfile.nc"); if (argc == 2) strcpy(filename, argv[1]); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char cmd_str[256]; sprintf(cmd_str, "*** TESTING C %s for NC_NOCLOBBER and NC_EEXIST ", argv[0]); printf("%-66s ------ ", cmd_str); fflush(stdout); } /* create a file if it does not exist */ cmode = NC_CLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_close(ncid); ERR /* now the file exists, test if PnetCDF can return correct error code */ cmode = NC_NOCLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); if (err != NC_EEXIST) /* err == NC_EOFILE */ nerrs++; /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return 0; }
static int test_small_atts(const char *testfile, int cmode) { int ncid, err; char att[MAX_LEN + 1], att_in[MAX_LEN + 1], source[MAX_LEN + 1] = "0123456"; int ndims, nvars, natts, unlimdimid; MPI_Offset len_in; int t, f; /* Run this with and without fill mode. */ for (f = 0; f < 2; f++) { /* Create small files with an attribute that grows by one each * time. */ for (t = 1; t < MAX_LEN; t++) { /* Create null-terminated text string of correct length. */ strncpy(att, source, t); att[t] = '\0'; /* Create a file with one attribute. */ err = ncmpi_create(MPI_COMM_WORLD, testfile,cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_put_att_text(ncid, NC_GLOBAL, ATT_NAME, t + 1, att); ERR if (f) { err=ncmpi_set_fill(ncid, NC_NOFILL, NULL); ERR} err=ncmpi_close(ncid); ERR; /* Reopen the file and check it. */ err=ncmpi_open(MPI_COMM_WORLD, testfile, NC_NOWRITE, MPI_INFO_NULL, &ncid); ERR err=ncmpi_inq(ncid, &ndims, &nvars, &natts, &unlimdimid); ERR if (ndims != 0 && nvars != 0 && natts != 1 && unlimdimid != -1) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_inq_attlen(ncid, NC_GLOBAL, ATT_NAME, &len_in); ERR if (len_in != t + 1) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_get_att_text(ncid, NC_GLOBAL, ATT_NAME, att_in); ERR if (strncmp(att_in, att, t)) {printf("Error at line %d\n",__LINE__);return 1;} err=ncmpi_close(ncid); ERR } } return 0; }
//---------------------------------------------------------------- // Open the first NETCDF file and query all the types for all the variables static void determine_var_types() { assert(netcdf_file_names != 0); assert(netcdf_var_names != 0); struct Type type; int plist_id,i = 0,varidp,ndimsp; int file_id = ncmpi_open(MPI_COMM_WORLD,netcdf_file_names[0], NC_NOWRITE, MPI_INFO_NULL, &plist_id); if (file_id != NC_NOERR) terminate_with_error_msg("ERROR: Cannot open file %s\n", netcdf_file_names[0]); var_types = (struct Type *)calloc(var_count, sizeof(*var_types)); nc_type xtypep; for (i = 0; i < var_count; ++i) { int dataset_id = ncmpi_inq_varid (plist_id, netcdf_var_names[i], &varidp); dataset_id = ncmpi_inq_vartype(plist_id,varidp, &xtypep); if (dataset_id != NC_NOERR) terminate_with_error_msg("ERROR: Cannot read the datatype of the variable %s\n", netcdf_var_names[i]); int num_dims = ncmpi_inq_varndims(plist_id,varidp,&ndimsp); if (ndimsp > 3 ) { //TODO:probably we can make it more clever to handle more dimensions as they are not related to the variable itself on netcdf type.atomic_type = INVALID; // we don't support arrays of more than 3 dimension return; } else if (xtypep == NC_FLOAT || xtypep == NC_INT) { type.num_values = 1; } else // we don't support HD5_COMPOUND datatype for example { type.atomic_type = INVALID; } type.atomic_type = from_netcdf_atomic_type(xtypep); var_types[i] = type; if (var_types[i].atomic_type == INVALID) terminate_with_error_msg("ERROR: The datatype of the %s variable is not supported\n", netcdf_var_names[i]); } ncmpi_close(plist_id); }
static int NC5_close(int ncid) { NC* nc; NC5_INFO* nc5; int status = NC_check_id(ncid, &nc); if(status != NC_NOERR) goto done; status = ncmpi_close(nc->int_ncid); done: nc5 = NC5_DATA(nc); if(nc5 != NULL) free(nc5); /* reclaim allocated space */ return status; }
void BIL_Pio_read_nc_blocks(MPI_Comm all_readers_comm, MPI_Comm io_comm, int num_blocks, BIL_Block* blocks) { int i; for (i = 0; i < num_blocks; i++) { int fp; BIL_Timing_fopen_start(all_readers_comm); assert(ncmpi_open(io_comm, blocks[i].file_name, NC_NOWRITE, BIL->io_hints, &fp) == NC_NOERR); BIL_Timing_fopen_stop(all_readers_comm); ncmpi_begin_indep_data(fp); // Find the id, type, and size of the variable. int var_id; assert(ncmpi_inq_varid(fp, blocks[i].var_name, &var_id) == NC_NOERR); nc_type var_type; assert(ncmpi_inq_vartype(fp, var_id, &var_type) == NC_NOERR); // Create extra variables specifically for the netCDF API. MPI_Offset nc_dim_starts[BIL_MAX_NUM_DIMS]; MPI_Offset nc_dim_sizes[BIL_MAX_NUM_DIMS]; int j; for (j = 0; j < blocks[i].num_dims; j++) { nc_dim_starts[j] = blocks[i].starts[j]; nc_dim_sizes[j] = blocks[i].sizes[j]; } MPI_Datatype nc_var_type; BIL_Pio_nc_to_mpi_type(var_type, &nc_var_type, &(blocks[i].var_size)); // Allocate room for data and read it independently. blocks[i].data = BIL_Misc_malloc(blocks[i].total_size * blocks[i].var_size); BIL_Timing_io_start(all_readers_comm); assert(ncmpi_get_vara(fp, var_id, nc_dim_starts, nc_dim_sizes, blocks[i].data, blocks[i].total_size, nc_var_type) == NC_NOERR); BIL_Timing_io_stop(all_readers_comm, blocks[i].total_size * blocks[i].var_size); // Clean up. ncmpi_end_indep_data(fp); ncmpi_close(fp); } }
static int check_last_var(char *filename) { int err, nerrs=0, ncid, cmode, varid, dimid[4]; /* create a new file ---------------------------------------------------*/ cmode = NC_CLOBBER; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, MPI_INFO_NULL, &ncid); ERR err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", 5, &dimid[1]); ERR err = ncmpi_def_dim(ncid, "YY", 66661, &dimid[2]); ERR err = ncmpi_def_dim(ncid, "XX", 66661, &dimid[3]); ERR /* define only fixed-size variables */ err = ncmpi_def_var(ncid, "var", NC_INT, 1, dimid+1, &varid); ERR err = ncmpi_def_var(ncid, "var_last", NC_FLOAT, 2, dimid+2, &varid); ERR err = ncmpi_enddef(ncid); ERR err = ncmpi_close(ncid); ERR return nerrs; }
int main(int argc, char **argv) { int i, j, rank, nprocs, ret; int ncfile, ndims, nvars, ngatts, unlimited; int var_ndims, var_natts;; MPI_Offset *dim_sizes, var_size; MPI_Offset *start, *count; int *requests, *statuses; char varname[NC_MAX_NAME+1]; int dimids[NC_MAX_VAR_DIMS]; nc_type type; int **data; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc != 2) { if (rank == 0) printf("Usage: %s filename\n", argv[0]); MPI_Finalize(); exit(-1); } ret = ncmpi_open(MPI_COMM_WORLD, argv[1], NC_NOWRITE, MPI_INFO_NULL, &ncfile); if (ret != NC_NOERR) handle_error(ret, __LINE__); /* reader knows nothing about dataset, but we can interrogate with query * routines: ncmpi_inq tells us how many of each kind of "thing" * (dimension, variable, attribute) we will find in the file */ /* no commnunication needed after ncmpi_open: all processors have a cached * veiw of the metadata once ncmpi_open returns */ ret = ncmpi_inq(ncfile, &ndims, &nvars, &ngatts, &unlimited); if (ret != NC_NOERR) handle_error(ret, __LINE__); /* we do not really need the name of the dimension or the variable for * reading in this example. we could, in a different example, take the * name of a variable on the command line and read just that one */ dim_sizes = calloc(ndims, sizeof(MPI_Offset)); /* netcdf dimension identifiers are allocated sequentially starting * at zero; same for variable identifiers */ for(i=0; i<ndims; i++) { ret = ncmpi_inq_dimlen(ncfile, i, &(dim_sizes[i]) ); if (ret != NC_NOERR) handle_error(ret, __LINE__); } requests = calloc(nvars, sizeof(int)); statuses = calloc(nvars, sizeof(int)); data = malloc(nvars*sizeof(int*)); for(i=0; i<nvars; i++) { /* much less coordination in this case compared to rank 0 doing all * the i/o: everyone already has the necessary information */ ret = ncmpi_inq_var(ncfile, i, varname, &type, &var_ndims, dimids, &var_natts); if (ret != NC_NOERR) handle_error(ret, __LINE__); start = calloc(var_ndims, sizeof(MPI_Offset)); count = calloc(var_ndims, sizeof(MPI_Offset)); /* we will simply decompose along one dimension. Generally the * application has some algorithim for domain decomposistion. Note * that data decomposistion can have an impact on i/o performance. * Often it's best just to do what is natural for the application, * but something to consider if performance is not what was * expected/desired */ start[0] = (dim_sizes[dimids[0]]/nprocs)*rank; count[0] = (dim_sizes[dimids[0]]/nprocs); var_size = count[0]; for (j=1; j<var_ndims; j++) { start[j] = 0; count[j] = dim_sizes[dimids[j]]; var_size *= count[j]; } switch(type) { case NC_INT: data[i] = calloc(var_size, sizeof(int)); /* as with the writes, this call is independent: we * will do any coordination (if desired) in a * subsequent ncmpi_wait_all() call */ ret = ncmpi_iget_vara(ncfile, i, start, count, data[i], var_size, MPI_INT, &(requests[i])); if (ret != NC_NOERR) handle_error(ret, __LINE__); break; default: /* we can do this for all the known netcdf types but this * example is already getting too long */ fprintf(stderr, "unsupported NetCDF type \n"); } free(start); free(count); } ret = ncmpi_wait_all(ncfile, nvars, requests, statuses); if (ret != NC_NOERR) handle_error(ret, __LINE__); /* now that the ncmpi_wait_all has returned, the caller can do stuff with * the buffers passed in to the non-blocking operations. The buffer resue * rules are similar to MPI non-blocking messages */ for (i=0; i<nvars; i++) { if (data[i] != NULL) free(data[i]); } free(data); free(dim_sizes); free(requests); free(statuses); ret = ncmpi_close(ncfile); if (ret != NC_NOERR) handle_error(ret, __LINE__); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int i, j; int status; int ncid1, ncid2; int ndims, nvars, ngatts, unlimdimid; char name[NC_MAX_NAME]; nc_type type, vartypes[NC_MAX_VARS]; MPI_Offset attlen; MPI_Offset dimlen, shape[NC_MAX_VAR_DIMS], varsize, start[NC_MAX_VAR_DIMS]; void *valuep; int dimids[NC_MAX_DIMS], varids[NC_MAX_VARS]; int vardims[NC_MAX_VARS][NC_MAX_VAR_DIMS/16]; /* divided by 16 due to my memory limitation */ int varndims[NC_MAX_VARS], varnatts[NC_MAX_VARS]; params opts; int rank; int nprocs; MPI_Comm comm = MPI_COMM_WORLD; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) fprintf(stderr, "Testing independent read ... "); parse_read_args(argc, argv, rank, &opts); /********** START OF NETCDF ACCESS **************/ /* Read a netCDF file and write it out to another file */ /** * Open the input dataset - ncid1: * File name: "../data/test_int.nc" * Dataset API: Collective * And create the output dataset - ncid2: * File name: "testread.nc" * Dataset API: Collective */ status = ncmpi_open(comm, opts.infname, 0, MPI_INFO_NULL, &ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_create(comm, opts.outfname, NC_CLOBBER, MPI_INFO_NULL, &ncid2); if (status != NC_NOERR) handle_error(status); /** * Inquire the dataset definitions of input dataset AND * Add dataset definitions for output dataset. */ status = ncmpi_inq(ncid1, &ndims, &nvars, &ngatts, &unlimdimid); if (status != NC_NOERR) handle_error(status); /* Inquire global attributes, assume CHAR attributes. */ for (i = 0; i < ngatts; i++) { status = ncmpi_inq_attname(ncid1, NC_GLOBAL, i, name); if (status != NC_NOERR) handle_error(status); status = ncmpi_inq_att (ncid1, NC_GLOBAL, name, &type, &attlen); if (status != NC_NOERR) handle_error(status); switch (type) { case NC_CHAR: valuep = (void *)malloc(attlen * sizeof(char)); status = ncmpi_get_att_text(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_text (ncid2, NC_GLOBAL, name, attlen, (char *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_SHORT: valuep = (void *)malloc(attlen * sizeof(short)); status = ncmpi_get_att_short(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_short (ncid2, NC_GLOBAL, name, type, attlen, (short *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_INT: valuep = (void *)malloc(attlen * sizeof(int)); status = ncmpi_get_att_int(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_int (ncid2, NC_GLOBAL, name, type, attlen, (int *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_FLOAT: valuep = (void *)malloc(attlen * sizeof(float)); status = ncmpi_get_att_float(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_float (ncid2, NC_GLOBAL, name, type, attlen, (float *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_DOUBLE: valuep = (void *)malloc(attlen * sizeof(double)); status = ncmpi_get_att_double(ncid1, NC_GLOBAL, name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_double (ncid2, NC_GLOBAL, name, type, attlen, (double *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; default: ; /* handle unexpected types */ } } /* Inquire dimension */ for (i = 0; i < ndims; i++) { status = ncmpi_inq_dim(ncid1, i, name, &dimlen); if (status != NC_NOERR) handle_error(status); if (i == unlimdimid) dimlen = NC_UNLIMITED; status = ncmpi_def_dim(ncid2, name, dimlen, dimids+i); if (status != NC_NOERR) handle_error(status); } /* Inquire variables */ for (i = 0; i < nvars; i++) { status = ncmpi_inq_var (ncid1, i, name, vartypes+i, varndims+i, vardims[i], varnatts+i); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var(ncid2, name, vartypes[i], varndims[i], vardims[i], varids+i); if (status != NC_NOERR) handle_error(status); /* var attributes, assume CHAR attributes */ for (j = 0; j < varnatts[i]; j++) { status = ncmpi_inq_attname(ncid1, varids[i], j, name); if (status != NC_NOERR) handle_error(status); status = ncmpi_inq_att (ncid1, varids[i], name, &type, &attlen); if (status != NC_NOERR) handle_error(status); switch (type) { case NC_CHAR: valuep = (void *)malloc(attlen * sizeof(char)); status = ncmpi_get_att_text(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_text (ncid2, varids[i], name, attlen, (char *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_SHORT: valuep = (void *)malloc(attlen * sizeof(short)); status = ncmpi_get_att_short(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_short (ncid2, varids[i], name, type, attlen, (short *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_INT: valuep = (void *)malloc(attlen * sizeof(int)); status = ncmpi_get_att_int(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_int (ncid2, varids[i], name, type, attlen, (int *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_FLOAT: valuep = (void *)malloc(attlen * sizeof(float)); status = ncmpi_get_att_float(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_float (ncid2, varids[i], name, type, attlen, (float *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_DOUBLE: valuep = (void *)malloc(attlen * sizeof(double)); status = ncmpi_get_att_double(ncid1, varids[i], name, valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_att_double (ncid2, varids[i], name, type, attlen, (double *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; default: ; /* handle unexpected types */ } } } /** * End Define Mode (switch to data mode) for output dataset * Dataset API: Collective */ status = ncmpi_enddef(ncid2); if (status != NC_NOERR) handle_error(status); /** * Read data of variables from input dataset (assume INT variables) * Write the data out to the corresponding variables in the output dataset * * Data Partition (Assume 4 processors): * square: 2-D, (Block, *), 25*100 from 100*100 * cube: 3-D, (Block, *, *), 25*100*100 from 100*100*100 * xytime: 3-D, (Block, *, *), 25*100*100 from 100*100*100 * time: 1-D, Block-wise, 25 from 100 * * Data Mode API: non-collective */ status = ncmpi_begin_indep_data(ncid1); if (status != NC_NOERR) handle_error(status); status =ncmpi_begin_indep_data(ncid2); if (status != NC_NOERR) handle_error(status); for (i = 0; i < NC_MAX_VAR_DIMS; i++) start[i] = 0; for (i = 0; i < nvars; i++) { varsize = 1; for (j = 0; j < varndims[i]; j++) { status = ncmpi_inq_dim(ncid1, vardims[i][j], name, shape + j); if (status != NC_NOERR) handle_error(status); if (j == 0) { shape[j] /= nprocs; start[j] = shape[j] * rank; } varsize *= shape[j]; } switch (vartypes[i]) { case NC_CHAR: break; case NC_SHORT: valuep = (void *)malloc(varsize * sizeof(short)); status = ncmpi_get_vara_short(ncid1, i, start, shape, (short *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_short(ncid2, varids[i], start, shape, (short *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_INT: valuep = (void *)malloc(varsize * sizeof(int)); status = ncmpi_get_vara_int(ncid1, i, start, shape, (int *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int(ncid2, varids[i], start, shape, (int *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_FLOAT: valuep = (void *)malloc(varsize * sizeof(float)); status = ncmpi_get_vara_float(ncid1, i, start, shape, (float *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_float(ncid2, varids[i], start, shape, (float *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; case NC_DOUBLE: valuep = (void *)malloc(varsize * sizeof(double)); status = ncmpi_get_vara_double(ncid1, i, start, shape, (double *)valuep); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_double(ncid2, varids[i], start, shape, (double *)valuep); if (status != NC_NOERR) handle_error(status); free(valuep); break; default: ; /* handle unexpected types */ } } status = ncmpi_end_indep_data(ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_end_indep_data(ncid2); if (status != NC_NOERR) handle_error(status); status = ncmpi_sync(ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_sync(ncid2); if (status != NC_NOERR) handle_error(status); /** * Close the datasets * Dataset API: collective */ status = ncmpi_close(ncid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_close(ncid2); if (status != NC_NOERR) handle_error(status); /******************* END OF NETCDF ACCESS ****************/ if (rank == 0) fprintf(stderr, "OK\nInput file %s copied to: %s!\n", opts.infname, opts.outfname); MPI_Finalize(); return 0; }
int main(int argc, char ** argv) { int ncid, dimid, varid; MPI_Init(&argc, &argv); MPI_Datatype vtype, rtype, usertype; MPI_Aint lb, extent; int userbufsz, *userbuf, *cmpbuf, i, errs=0; int count = 25; double pi = 3.14159; MPI_Offset start, acount; ncmpi_create(MPI_COMM_WORLD, "vectors.nc", NC_CLOBBER, MPI_INFO_NULL, &ncid); ncmpi_def_dim(ncid, "50k", 1024*50, &dimid); ncmpi_def_var(ncid, "vector", NC_DOUBLE, 1, &dimid, &varid); ncmpi_enddef(ncid); MPI_Type_vector(VECCOUNT, BLOCKLEN, STRIDE, MPI_INT, &vtype); MPI_Type_create_resized(vtype, 0, STRIDE*VECCOUNT*sizeof(int), &rtype); MPI_Type_contiguous(count, rtype, &usertype); MPI_Type_commit(&usertype); MPI_Type_free(&vtype); MPI_Type_free(&rtype); MPI_Type_get_extent(usertype, &lb, &extent); userbufsz = extent; userbuf = malloc(userbufsz); cmpbuf = calloc(userbufsz, 1); for (i=0; i< userbufsz/sizeof(int); i++) { userbuf[i] = pi*i; } start = 10; acount = count*12; ncmpi_begin_indep_data(ncid); ncmpi_put_vara(ncid, varid, &start, &acount, userbuf, 1, usertype); ncmpi_close(ncid); NC_CHECK(ncmpi_open(MPI_COMM_WORLD, "vectors.nc", NC_NOWRITE, MPI_INFO_NULL, &ncid)); ncmpi_begin_indep_data(ncid); NC_CHECK(ncmpi_inq_varid(ncid, "vector", &varid)); NC_CHECK(ncmpi_get_vara(ncid, varid, &start, &acount, cmpbuf, 1, usertype)); ncmpi_close(ncid); for (i=0; errs < 10 && i < acount; i++) { /* vector of 4,3,5, so skip 4th and 5th items of every block */ if (i%STRIDE >= BLOCKLEN) continue; if (userbuf[i] != cmpbuf[i]) { errs++; fprintf(stderr, "%d: expected 0x%x got 0x%x\n", i, userbuf[i], cmpbuf[i]); } } free(userbuf); free(cmpbuf); MPI_Type_free(&usertype); MPI_Finalize(); return 0; }
int main(int argc, char** argv) { char filename[256]; int rank, nprocs, err, flag, nerrs=0; int log_enabled; int ncid; MPI_Info info, infoused; char hint[MPI_MAX_INFO_VAL]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for checking offsets of new variables ", basename(argv[0])); printf("%-66s ------ ", cmd_str); fflush(stdout); free(cmd_str); } MPI_Info_create(&info); MPI_Info_set(info, "nc_dw_overwrite", "enable"); MPI_Info_set(info, "nc_dw_del_on_close", "disable"); MPI_Info_set(info, "nc_dw_flush_buffer_size", "256"); /* MPI_Info_set(info, "nc_dw_dirname", "()@^$@!(_&$)@(#%%&)(*#$"); */ err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER, info, &ncid); CHECK_ERR err = ncmpi_inq_file_info(ncid, &infoused); CHECK_ERR MPI_Info_get(infoused, "nc_dw", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag && strcasecmp(hint, "enable") == 0) log_enabled = 1; else log_enabled = 0; if (log_enabled) { MPI_Info_get(infoused, "nc_dw_overwrite", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag) { if (strcmp(hint, "enable") != 0) { printf("Error at line %d: unexpected nc_dw_overwrite = %s, but got %s\n", __LINE__, "enable", hint); nerrs++; } } else{ printf("Error at line %d: nc_dw_overwrite is not set\n", __LINE__); nerrs++; } MPI_Info_get(infoused, "nc_dw_del_on_close", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag) { if (strcmp(hint, "disable") != 0) { printf("Error at line %d: unexpected nc_dw_del_on_close = %s, but got %s\n", __LINE__, "disable", hint); nerrs++; } } else{ printf("Error at line %d: nc_dw_del_on_close is not set\n", __LINE__); nerrs++; } MPI_Info_get(infoused, "nc_dw_flush_buffer_size", MPI_MAX_INFO_VAL - 1, hint, &flag); if (flag) { if (strcmp(hint, "256") != 0) { printf("Error at line %d: unexpected nc_dw_flush_buffer_size = %s, but got %s\n", __LINE__, "256", hint); nerrs++; } } else{ printf("Error at line %d: nc_dw_flush_buffer_size is not set\n", __LINE__); nerrs++; } } err = ncmpi_enddef(ncid); CHECK_ERR err = ncmpi_close(ncid); CHECK_ERR MPI_Info_free(&info); MPI_Info_free(&infoused); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
int test(char* fname, int enable_log) { int buffer[MAXPROCESSES]; MPI_Offset start[MAXPROCESSES][2], count[MAXPROCESSES][2]; MPI_Offset *sp[MAXPROCESSES], *cp[MAXPROCESSES]; MPI_Offset stride[2]; int i, j, ret; int NProc, MyRank, NP; // Total process; Rank int fid; // Data set ID int did[2]; // IDs of dimension int vid[4]; // IDs for variables int dims[2]; char tmp[1024]; MPI_Info Info; MPI_Comm_size(MPI_COMM_WORLD, &NP); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); if (NP == 1) { // Act if there is WIDTH processes for easy debugging. Most debugger supports only single proccesses. NProc = SINGLEPROCNP; MyRank = SINGLEPROCRANK; } else{ NProc = NP; } if (MyRank < MAXPROCESSES) { // Ensure each process have a independent buffer directory MPI_Info_create(&Info); if (enable_log) { MPI_Info_set(Info, "pnetcdf_log", "enable"); } // Create new cdf file ret = ncmpi_create(MPI_COMM_WORLD, fname, NC_CLOBBER, Info, &fid); if (ret != NC_NOERR) { printf("Error create file\n"); goto ERROR; } ret = ncmpi_set_fill(fid, NC_FILL, NULL); if (ret != NC_NOERR) { printf("Error set fill\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "X", NProc, did); // X if (ret != NC_NOERR) { printf("Error def dim X\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "Y", NProc * 4, did + 1); // Y if (ret != NC_NOERR) { printf("Error def dim Y\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M0", NC_INT, 2, did, vid + 0); if (ret != NC_NOERR) { printf("Error def var M0\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M1", NC_INT, 2, did, vid + 1); if (ret != NC_NOERR) { printf("Error def var M1\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M2", NC_INT, 2, did, vid + 2); if (ret != NC_NOERR) { printf("Error def var M2\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M3", NC_INT, 2, did, vid + 3); if (ret != NC_NOERR) { printf("Error def var M3\n"); goto ERROR; } ret = ncmpi_enddef(fid); if (ret != NC_NOERR) { printf("Error enddef\n"); goto ERROR; } // We all write rank from now on for (i = 0; i < NProc; i++) { buffer[i] = MyRank; } // put_var1 for (i = 0; i < 4; i++) { for (j = 0; j < NProc; j++) { start[0][0] = MyRank; start[0][1] = i * NProc + j; ret = ncmpi_put_var1_int_all(fid, vid[i], start[0], buffer); if (ret != NC_NOERR) { printf("Error put_var1\n"); goto ERROR; } } } // put_vara for (i = 0; i < 4; i++) { start[0][0] = 0; start[0][1] = ((i + 1) % 4) * NProc + MyRank; count[0][0] = NProc; count[0][1] = 1; ret = ncmpi_put_vara_int_all(fid, vid[i], start[0], count[0], buffer); if (ret != NC_NOERR) { printf("Error put_vara\n"); goto ERROR; } } // put_vars for (i = 0; i < 4; i++) { start[0][0] = MyRank; start[0][1] = ((i + 2) % 4) * NProc + (MyRank % 2); count[0][0] = 1; count[0][1] = NProc / 2; stride[0] = 1; stride[1] = 2; ret = ncmpi_put_vars_int_all(fid, vid[i], start[0], count[0], stride, buffer); if (ret != NC_NOERR) { printf("Error put_vars\n"); goto ERROR; } } // put_varn for (j = 0; j < 4; j++) { for (i = 0; i < NProc; i++) { count[i][0] = 1; count[i][1] = 1; start[i][0] = (MyRank + i) % NProc; start[i][1] = i + ((j + 3) % 4) * NProc; sp[i] = (MPI_Offset*)start[i]; cp[i] = (MPI_Offset*)count[i]; } ret = ncmpi_put_varn_int_all(fid, vid[j], NProc, sp, cp, buffer); if (ret != NC_NOERR) { printf("Error put_varn\n"); goto ERROR; } } // Commit log into cdf file ret = ncmpi_close(fid); // Close file if (ret != NC_NOERR) { printf("Error close"); goto ERROR; } } ERROR:; return 0; }
int main(int argc, char **argv) { /* create foo.nc */ int stat; /* return status */ int ncid; /* netCDF id */ /* dimension ids */ int lat_dim; int lon_dim; int time_dim; /* dimension lengths */ size_t lat_len = 10; size_t lon_len = 5; size_t time_len = NC_UNLIMITED; /* variable ids */ int lat_id; int lon_id; int time_id; int z_id; int t_id; int p_id; int rh_id; /* rank (number of dimensions) for each variable */ # define RANK_lat 1 # define RANK_lon 1 # define RANK_time 1 # define RANK_z 3 # define RANK_t 3 # define RANK_p 3 # define RANK_rh 3 /* variable shapes */ int lat_dims[RANK_lat]; int lon_dims[RANK_lon]; int time_dims[RANK_time]; int z_dims[RANK_z]; int t_dims[RANK_t]; int p_dims[RANK_p]; int rh_dims[RANK_rh]; /* attribute vectors */ double z_valid_range[2]; double p__FillValue[1]; int rh__FillValue[1]; int stat=0; MPI_Init(&argc, &argv); /* enter define mode */ stat = ncmpi_create(MPI_COMM_WORLD, "foo.nc", NC_CLOBBER, MPI_INFO_NULL, &ncid); check_err(stat,__LINE__,__FILE__); /* define dimensions */ stat = ncmpi_def_dim(ncid, "lat", lat_len, &lat_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "lon", lon_len, &lon_dim); check_err(stat,__LINE__,__FILE__); stat = ncmpi_def_dim(ncid, "time", time_len, &time_dim); check_err(stat,__LINE__,__FILE__); /* define variables */ lat_dims[0] = lat_dim; stat = ncmpi_def_var(ncid, "lat", NC_INT, RANK_lat, lat_dims, &lat_id); check_err(stat,__LINE__,__FILE__); lon_dims[0] = lon_dim; stat = ncmpi_def_var(ncid, "lon", NC_INT, RANK_lon, lon_dims, &lon_id); check_err(stat,__LINE__,__FILE__); time_dims[0] = time_dim; stat = ncmpi_def_var(ncid, "time", NC_INT, RANK_time, time_dims, &time_id); check_err(stat,__LINE__,__FILE__); z_dims[0] = time_dim; z_dims[1] = lat_dim; z_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "z", NC_FLOAT, RANK_z, z_dims, &z_id); check_err(stat,__LINE__,__FILE__); t_dims[0] = time_dim; t_dims[1] = lat_dim; t_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "t", NC_FLOAT, RANK_t, t_dims, &t_id); check_err(stat,__LINE__,__FILE__); p_dims[0] = time_dim; p_dims[1] = lat_dim; p_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "p", NC_DOUBLE, RANK_p, p_dims, &p_id); check_err(stat,__LINE__,__FILE__); rh_dims[0] = time_dim; rh_dims[1] = lat_dim; rh_dims[2] = lon_dim; stat = ncmpi_def_var(ncid, "rh", NC_INT, RANK_rh, rh_dims, &rh_id); check_err(stat,__LINE__,__FILE__); /* assign attributes */ stat = ncmpi_put_att_text(ncid, lat_id, "units", 13, "degrees_north"); check_err(stat,__LINE__,__FILE__); stat = ncmpi_put_att_text(ncid, lon_id, "units", 12, "degrees_east"); check_err(stat,__LINE__,__FILE__); stat = ncmpi_put_att_text(ncid, time_id, "units", 7, "seconds"); check_err(stat,__LINE__,__FILE__); stat = ncmpi_put_att_text(ncid, z_id, "units", 6, "meters"); check_err(stat,__LINE__,__FILE__); z_valid_range[0] = 0; z_valid_range[1] = 5000; stat = ncmpi_put_att_double(ncid, z_id, "valid_range", NC_DOUBLE, 2, z_valid_range); check_err(stat,__LINE__,__FILE__); p__FillValue[0] = -9999; stat = ncmpi_put_att_double(ncid, p_id, "_FillValue", NC_DOUBLE, 1, p__FillValue); check_err(stat,__LINE__,__FILE__); rh__FillValue[0] = -1; stat = ncmpi_put_att_int(ncid, rh_id, "_FillValue", NC_INT, 1, rh__FillValue); check_err(stat,__LINE__,__FILE__); /* leave define mode */ stat = ncmpi_enddef (ncid); check_err(stat,__LINE__,__FILE__); { /* store lat */ static int lat[] = {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}; ncmpi_begin_indep_data(ncid); stat = ncmpi_put_var_int(ncid, lat_id, lat); ncmpi_end_indep_data(ncid); check_err(stat,__LINE__,__FILE__); } { /* store lon */ static int lon[] = {-140, -118, -96, -84, -52}; ncmpi_begin_indep_data(ncid); stat = ncmpi_put_var_int(ncid, lon_id, lon); ncmpi_end_indep_data(ncid); check_err(stat,__LINE__,__FILE__); } stat = ncmpi_close(ncid); check_err(stat,__LINE__,__FILE__); MPI_Finalize(); return 0; }
int main(int argc, char **argv) { MPI_Offset i, j, k; int status; int ncid; int dimid1, dimid2, dimid3, udimid; int square_dim[2], cube_dim[3], xytime_dim[3], time_dim[1]; MPI_Offset square_start[2], cube_start[3] = {0, 0, 0}; MPI_Offset square_count[2] = {50, 50}, cube_count[3] = {100, 50, 50}; MPI_Offset xytime_start[3] = {0, 0, 0}; MPI_Offset xytime_count[3] = {100, 50, 50}; MPI_Offset time_start[1], time_count[1] = {25}; int square_id, cube_id, xytime_id, time_id; static char title[] = "example netCDF dataset"; static char description[] = "2-D integer array"; int data[100][50][50], buffer[100]; int rank; int nprocs; MPI_Comm comm = MPI_COMM_WORLD; double TotalWriteTime; params opts; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) fprintf(stderr, "Testing write ... \n"); parse_write_args(argc, argv, rank, &opts); MPI_Barrier(MPI_COMM_WORLD); TotalWriteTime = MPI_Wtime(); /********** START OF NETCDF ACCESS **************/ /** * Create the dataset * File name: "testwrite.nc" * Dataset API: Collective */ status = ncmpi_create(comm, opts.outfname, NC_CLOBBER|NC_64BIT_OFFSET, MPI_INFO_NULL, &ncid); if (status != NC_NOERR) handle_error(status); /** * Create a global attribute: * :title = "example netCDF dataset"; */ sprintf(title, "%s:%d of %d", title, rank, nprocs); printf("title:%s\n", title); status = ncmpi_put_att_text (ncid, NC_GLOBAL, "title", strlen(title), title); if (status != NC_NOERR) handle_error(status); /** * Add 4 pre-defined dimensions: * x = 100, y = 100, z = 100, time = NC_UNLIMITED */ status = ncmpi_def_dim(ncid, "x", 100L, &dimid1); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "y", 100L, &dimid2); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "z", 100L, &dimid3); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_dim(ncid, "time", NC_UNLIMITED, &udimid); if (status != NC_NOERR) handle_error(status); /** * Define the dimensionality and then add 4 variables: * square(x, y), cube(x,y,z), time(time), xytime(time, x, y) */ square_dim[0] = cube_dim[0] = xytime_dim[1] = dimid1; square_dim[1] = cube_dim[1] = xytime_dim[2] = dimid2; cube_dim[2] = dimid3; xytime_dim[0] = udimid; time_dim[0] = udimid; status = ncmpi_def_var (ncid, "square", NC_INT, 2, square_dim, &square_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "cube", NC_INT, 3, cube_dim, &cube_id); if (status != NC_NOERR) handle_error(status); // status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id); status = ncmpi_def_var (ncid, "time", NC_INT, 1, time_dim, &time_id); if (status != NC_NOERR) handle_error(status); status = ncmpi_def_var (ncid, "xytime", NC_INT, 3, xytime_dim, &xytime_id); if (status != NC_NOERR) handle_error(status); /** * Add an attribute for variable: * square: decsription = "2-D integer array" */ status = ncmpi_put_att_text (ncid, square_id, "description", strlen(description), description); if (status != NC_NOERR) handle_error(status); /** * End Define Mode (switch to data mode) * Dataset API: Collective */ status = ncmpi_enddef(ncid); if (status != NC_NOERR){ handle_error(status); status = ncmpi_close(ncid); if (status != NC_NOERR) handle_error(status); if (rank == 0) { fprintf(stderr, "Fatal Error: file header is inconsistent!\n"); } } /** * Data Partition (Assume 4 processors): * square: 2-D, (Block, Block), 50*50 from 100*100 * cube: 3-D, (*, Block, Block), 100*50*50 from 100*100*100 * xytime: 3-D, (*, Block, Block), 100*50*50 from 100*100*100 * time: 1-D, Block-wise, 25 from 100 */ else { square_start[0] = cube_start[1] = xytime_start[1] = (rank/2) * 50; square_start[1] = cube_start[2] = xytime_start[2] = (rank%2) * 50; time_start[0] = (rank%4) * 25; /** * Packing data in the buffer */ /* Data for variable: time */ for ( i = time_start[0]; i < time_start[0] + time_count[0]; i++ ) buffer[i - time_start[0]] = i; /* Data for variable: square, cube and xytime */ for ( i = 0; i < 100; i++ ) for ( j = square_start[0]; j < square_start[0]+square_count[0]; j++ ) for ( k = square_start[1]; k < square_start[1]+square_count[1]; k++ ) data[i][j-square_start[0]][k-square_start[1]] = i*100*100 + j*100 + k; /** * Write data into variables: square, cube, time and xytime * Access Method: subarray * Data Mode API: collective */ status = ncmpi_put_vara_int_all(ncid, square_id, square_start, square_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int_all(ncid, cube_id, cube_start, cube_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int_all(ncid, time_id, time_start, time_count, (void *)buffer); if (status != NC_NOERR) handle_error(status); status = ncmpi_put_vara_int_all(ncid, xytime_id, xytime_start, xytime_count, &data[0][0][0]); if (status != NC_NOERR) handle_error(status); /* status = ncmpi_sync(ncid); if (status != NC_NOERR) handle_error(status); status = ncmpi_redef(ncid); if (status != NC_NOERR) handle_error(status); status = ncmpi_del_att(ncid, square_id, "description"); if (status != NC_NOERR) handle_error(status); status = ncmpi_enddef(ncid); if (status != NC_NOERR) handle_error(status); */ /** * Close the dataset * Dataset API: collective */ status = ncmpi_close(ncid); if (status != NC_NOERR) handle_error(status); /******************* END OF NETCDF ACCESS ****************/ MPI_Barrier(MPI_COMM_WORLD); TotalWriteTime = MPI_Wtime() - TotalWriteTime; if (rank == 0) { fprintf(stderr, "OK\nFile written to: %s!\n", opts.outfname); fprintf(stderr, "Total Write Time = %10.8f\n", TotalWriteTime); } } MPI_Finalize(); return 0; }
/* The test write a NP * NP matrix M, NP is the number of process: put_vara: Process N write N copy of it's rank to row N ([N, 0...WIDTH]) using different APIs on different variable final result should be: 0 0 0 0 ... 1 1 1 1 ... 2 2 2 2 ... . . . */ int simpletest(char* fname, int enable_log) { int buffer[MAXPROCESSES]; MPI_Offset start[2], count[2]; int i, j, ret, errlen; int NProc, MyRank, NP; // Total process; Rank int fid; // Data set ID int did[2]; // IDs of dimension int vid; // IDs for variables int dims[2]; char tmp[1024], tmp2[1024]; MPI_Info Info; MPI_Comm_size(MPI_COMM_WORLD, &NP); MPI_Comm_rank(MPI_COMM_WORLD, &MyRank); if (NP == 1) { // Act if there is WIDTH processes for easy debugging. Most debugger supports only single processes. NProc = SINGLEPROCNP; MyRank = SINGLEPROCRANK; } else{ NProc = NP; } if (MyRank < MAXPROCESSES) { // Ensure each process have a independent buffer directory MPI_Info_create(&Info); if (enable_log) { MPI_Info_set(Info, "pnetcdf_log", "enable"); } // Create new cdf file ret = ncmpi_create(MPI_COMM_WORLD, fname, NC_CLOBBER, Info, &fid); if (ret != NC_NOERR) { printf("Error create file\n"); goto ERROR; } ret = ncmpi_set_fill(fid, NC_FILL, NULL); if (ret != NC_NOERR) { printf("Error set fill\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "X", NProc, did); // X if (ret != NC_NOERR) { printf("Error def dim X\n"); goto ERROR; } ret = ncmpi_def_dim(fid, "Y", NProc, did + 1); // Y if (ret != NC_NOERR) { printf("Error def dim Y\n"); goto ERROR; } ret = ncmpi_def_var(fid, "M", NC_INT, 2, did, vid); if (ret != NC_NOERR) { printf("Error def var M\n"); goto ERROR; } ret = ncmpi_enddef(fid); if (ret != NC_NOERR) { printf("Error enddef\n"); goto ERROR; } // Indep mode ret = ncmpi_begin_indep_data(fid); if (ret != NC_NOERR) { printf("Error begin indep\n"); goto ERROR; } // We all write rank from now on for (i = 0; i < NProc; i++) { buffer[i] = MyRank; } // put_vara count[0] = 1; count[1] = NProc; start[0] = MyRank; start[1] = 0; ret = ncmpi_put_vara_int(fid, vid, start, count, buffer); if (ret != NC_NOERR) { MPI_Error_string(ret, tmp, &errlen); printf("Error put_varn: %d\n%s\n", errlen, tmp); goto ERROR; } // Collective mode ncmpi_end_indep_data(fid); if (ret != NC_NOERR) { printf("Error end indep"); goto ERROR; } ncmpi_close(fid); // Close file if (ret != NC_NOERR) { printf("Error close"); goto ERROR; } } ERROR: return 0; }
int main(int argc, char** argv) { char filename[256]; int i, j, rank, nprocs, err, nerrs=0; int ncid, varid, dimid[2], req, st; MPI_Offset start[2], count[2], stride[2]; unsigned char buffer[NY][NX]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); MPI_Bcast(filename, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for ncmpi_end_indep_data ", basename(argv[0])); printf("%-66s ------ ",cmd_str); free(cmd_str); } err = ncmpi_create(MPI_COMM_WORLD, filename, NC_CLOBBER|NC_64BIT_DATA, MPI_INFO_NULL, &ncid); CHECK_ERR err = ncmpi_def_dim(ncid, "Y", NC_UNLIMITED, &dimid[0]); CHECK_ERR err = ncmpi_def_dim(ncid, "X", NX*nprocs, &dimid[1]); CHECK_ERR err = ncmpi_def_var(ncid, "var", NC_UBYTE, NDIMS, dimid, &varid); CHECK_ERR err = ncmpi_enddef(ncid); CHECK_ERR for (i=0; i<NY; i++) for (j=0; j<NX; j++) buffer[i][j] = rank+10; start[0] = 0; start[1] = NX*rank; count[0] = NY/2; count[1] = NX/2; stride[0] = 2; stride[1] = 2; err = ncmpi_buffer_attach(ncid, NY*NX); CHECK_ERR err = ncmpi_begin_indep_data(ncid); CHECK_ERR err = ncmpi_bput_vars_uchar(ncid, varid, start, count, stride, &buffer[0][0], &req); CHECK_ERR /* check if write buffer contents have been altered */ for (i=0; i<NY; i++) for (j=0; j<NX; j++) { if (buffer[i][j] != rank+10) { printf("Error at line %d in %s: put buffer[%d][%d]=%hhu altered, should be %d\n", __LINE__,__FILE__,i,j,buffer[i][j],rank+10); nerrs++; } } err = ncmpi_end_indep_data(ncid); CHECK_ERR /* calling wait API after exiting independent data mode on purpose */ err = ncmpi_wait_all(ncid, 1, &req, &st); CHECK_ERR err = st; CHECK_ERR /* check if write buffer contents have been altered */ for (i=0; i<NY; i++) for (j=0; j<NX; j++) { if (buffer[i][j] != rank+10) { printf("Error at line %d in %s: put buffer[%d][%d]=%hhu altered, should be %d\n", __LINE__,__FILE__,i,j,buffer[i][j],rank+10); nerrs++; } } err = ncmpi_buffer_detach(ncid); CHECK_ERR err = ncmpi_close(ncid); CHECK_ERR /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) { printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); ncmpi_inq_malloc_list(); } } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
/** * Read a single-precision parallel-nedcdf file. * * We assume here that localData is a scalar. * * Pnetcdf uses row-major format (same as FFTW). * * \param[in] filename : PnetCDF filename * \param[in] starts : offset to where to start reading data * \param[in] counts : number of elements read (3D sub-domain inside global) * \param[in] gsizes : global sizes * \param[out] localData : actual data buffer (size : nx*ny*nz*sizeof(float)) * * localData must have been allocated prior to calling this routine. */ void read_pnetcdf(const std::string &filename, MPI_Offset starts[3], MPI_Offset counts[3], int gsizes[3], float *localData) { int myRank; MPI_Comm_rank(MPI_COMM_WORLD, &myRank); // netcdf file id int ncFileId; int err; // file opening mode int ncOpenMode = NC_NOWRITE; int nbVar=1; int varIds[nbVar]; MPI_Info mpi_info_used; /* * Open NetCDF file */ err = ncmpi_open(MPI_COMM_WORLD, filename.c_str(), ncOpenMode, MPI_INFO_NULL, &ncFileId); if (err != NC_NOERR) { printf("Error: ncmpi_open() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err)); MPI_Abort(MPI_COMM_WORLD, -1); exit(1); } /* * Query NetCDF mode */ int NC_mode; err = ncmpi_inq_version(ncFileId, &NC_mode); if (myRank==0) { if (NC_mode == NC_64BIT_DATA) std::cout << "Pnetcdf Input mode : NC_64BIT_DATA (CDF-5)\n"; else if (NC_mode == NC_64BIT_OFFSET) std::cout << "Pnetcdf Input mode : NC_64BIT_OFFSET (CDF-2)\n"; else std::cout << "Pnetcdf Input mode : unknown\n"; } /* * Query information about variable named "data" */ { int ndims, nvars, ngatts, unlimited; err = ncmpi_inq(ncFileId, &ndims, &nvars, &ngatts, &unlimited); PNETCDF_HANDLE_ERROR; err = ncmpi_inq_varid(ncFileId, "data", &varIds[0]); PNETCDF_HANDLE_ERROR; } /* * Define expected data types (no conversion done here) */ MPI_Datatype mpiDataType = MPI_FLOAT; /* * Get all the MPI_IO hints used (just in case, we want to print it after * reading data... */ err = ncmpi_get_file_info(ncFileId, &mpi_info_used); PNETCDF_HANDLE_ERROR; /* * Read heavy data (take care of row-major / column major format !) */ int nItems = counts[IX]*counts[IY]*counts[IZ]; { err = ncmpi_get_vara_all(ncFileId, varIds[0], starts, counts, localData, nItems, mpiDataType); PNETCDF_HANDLE_ERROR; } // end reading heavy data /* * close the file */ err = ncmpi_close(ncFileId); PNETCDF_HANDLE_ERROR; } // read_pnetcdf
int main(int argc, char** argv) { char filename[256]; int i, j, rank, nprocs, err, nerrs=0, expected; int ncid, cmode, varid[2], dimid[2], req[4], st[4], *buf; int *buf0, *buf1, *buf2; size_t len; MPI_Offset start[2], count[2]; MPI_Info info; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* this program is intended to run on one process */ if (rank) goto fn_exit; /* get command-line arguments */ if (argc > 2) { if (!rank) printf("Usage: %s [filename]\n",argv[0]); MPI_Finalize(); return 1; } if (argc == 2) snprintf(filename, 256, "%s", argv[1]); else strcpy(filename, "testfile.nc"); if (rank == 0) { char *cmd_str = (char*)malloc(strlen(argv[0]) + 256); sprintf(cmd_str, "*** TESTING C %s for writing interleaved fileviews ", basename(argv[0])); printf("%-66s ------ ", cmd_str); free(cmd_str); } MPI_Info_create(&info); MPI_Info_set(info, "romio_cb_write", "disable"); MPI_Info_set(info, "ind_wr_buffer_size", "8"); /* these 2 hints are required to cause a core dump if r1758 fix is not * presented */ /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_SELF, filename, cmode, info, &ncid); CHECK_ERR MPI_Info_free(&info); /* define dimensions Y and X */ err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); CHECK_ERR err = ncmpi_def_dim(ncid, "X", NX, &dimid[1]); CHECK_ERR /* define 2D variables of integer type */ err = ncmpi_def_var(ncid, "var0", NC_INT, 2, dimid, &varid[0]); CHECK_ERR err = ncmpi_def_var(ncid, "var1", NC_INT, 2, dimid, &varid[1]); CHECK_ERR /* enable fill mode */ err = ncmpi_set_fill(ncid, NC_FILL, NULL); CHECK_ERR /* do not forget to exit define mode */ err = ncmpi_enddef(ncid); CHECK_ERR /* now we are in data mode */ buf = (int*) malloc(NY*NX * sizeof(int)); /* fill the entire variable var0 with -1s */ for (i=0; i<NY*NX; i++) buf[i] = -1; err = ncmpi_put_var_int_all(ncid, varid[0], buf); CHECK_ERR /* write 8 x 2 elements so this only interleaves the next two * iput requests */ start[0] = 0; start[1] = 3; count[0] = 8; count[1] = 2; len = (size_t)(count[0] * count[1]); buf0 = (int*) malloc(len * sizeof(int)); for (i=0; i<len; i++) buf0[i] = 50+i; err = ncmpi_iput_vara_int(ncid, varid[0], start, count, buf0, &req[0]); CHECK_ERR /* write 1 x 3 elements */ start[0] = 1; start[1] = 8; count[0] = 1; count[1] = 5; len = (size_t)(count[0] * count[1]); buf1 = (int*) malloc(len * sizeof(int)); for (i=0; i<len; i++) buf1[i] = 60+i; err = ncmpi_iput_vara_int(ncid, varid[0], start, count, buf1, &req[1]); CHECK_ERR /* write 1 x 3 elements */ start[0] = 3; start[1] = 7; count[0] = 1; count[1] = 5; len = (size_t)(count[0] * count[1]); buf2 = (int*) malloc(len * sizeof(int)); for (i=0; i<len; i++) buf2[i] = 70+i; err = ncmpi_iput_vara_int(ncid, varid[0], start, count, buf2, &req[2]); CHECK_ERR err = ncmpi_wait_all(ncid, 3, req, st); CHECK_ERR free(buf0); free(buf1); free(buf2); /* fill the entire variable var1 with -1s */ for (i=0; i<NY*NX; i++) buf[i] = -1; err = ncmpi_put_var_int_all(ncid, varid[1], buf); CHECK_ERR /* write 8 x 2 elements so this only interleaves the next two iput * requests */ start[0] = 0; start[1] = 3; count[0] = 8; count[1] = 2; len = (size_t)(count[0] * count[1]); buf0 = (int*) malloc(len * sizeof(int)); for (i=0; i<count[0]*count[1]; i++) buf0[i] = 50+i; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf0, &req[0]); CHECK_ERR /* rearrange buffer contents, as buf is 2D */ for (i=0; i<5; i++) buf[i] = 10 + i; for (i=5; i<10; i++) buf[i] = 10 + i + 5; for (i=10; i<15; i++) buf[i] = 10 + i + 10; start[0] = 6; start[1] = 7; count[0] = 3; count[1] = 5; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf, &req[1]); CHECK_ERR for (i=15; i<20; i++) buf[i] = 10 + i - 10; for (i=20; i<25; i++) buf[i] = 10 + i - 5; start[0] = 6; start[1] = 12; count[0] = 2; count[1] = 5; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf+15, &req[2]); CHECK_ERR for (i=25; i<30; i++) buf[i] = 10 + i; start[0] = 8; start[1] = 12; count[0] = 1; count[1] = 5; err = ncmpi_iput_vara_int(ncid, varid[1], start, count, buf+25, &req[3]); CHECK_ERR err = ncmpi_wait_all(ncid, 4, req, st); CHECK_ERR /* check if write buffer contents have been altered */ for (i=0; i<16; i++) CHECK_CONTENTS(buf0, 50 + i) for (i=0; i<5; i++) CHECK_CONTENTS(buf, 10 + i) for (i=5; i<10; i++) CHECK_CONTENTS(buf, 10 + i + 5) for (i=10; i<15; i++) CHECK_CONTENTS(buf, 10 + i + 10) for (i=15; i<20; i++) CHECK_CONTENTS(buf, 10 + i - 10) for (i=20; i<25; i++) CHECK_CONTENTS(buf, 10 + i - 5) for (i=25; i<30; i++) CHECK_CONTENTS(buf, 10 + i) err = ncmpi_close(ncid); CHECK_ERR free(buf0); /* open the same file and read back for validate */ err = ncmpi_open(MPI_COMM_SELF, filename, NC_NOWRITE, MPI_INFO_NULL, &ncid); CHECK_ERR err = ncmpi_inq_varid(ncid, "var0", &varid[0]); CHECK_ERR err = ncmpi_inq_varid(ncid, "var1", &varid[1]); CHECK_ERR /* read the entire array */ for (i=0; i<NY*NX; i++) buf[i] = -1; err = ncmpi_get_var_int_all(ncid, varid[0], buf); CHECK_ERR /* check if the contents of buf are expected */ expected = 50; for (j=0; j<8; j++) { for (i=3; i<5; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } } expected = 60; j = 1; for (i=8; i<13; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } expected = 70; j = 3; for (i=7; i<12; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } /* initialize the contents of the array to a different value */ for (i=0; i<NY*NX; i++) buf[i] = -1; /* read the entire array */ err = ncmpi_get_var_int_all(ncid, varid[1], buf); CHECK_ERR /* check if the contents of buf are expected */ expected = 10; for (j=6; j<9; j++) { for (i=7; i<17; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d]=%d, should be %d\n", rank, i, buf[j*NX+i], expected); nerrs++; } expected++; } } expected = 50; for (j=0; j<8; j++) { for (i=3; i<5; i++) { if (buf[j*NX+i] != expected) { printf("%d: Unexpected read buf[%d][%d]=%d, should be %d\n", rank, j, i, buf[j*NX+i], expected); nerrs++; } expected++; } } err = ncmpi_close(ncid); CHECK_ERR free(buf); /* check if PnetCDF freed all internal malloc */ MPI_Offset malloc_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR && malloc_size > 0) { printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", malloc_size); ncmpi_inq_malloc_list(); } fn_exit: MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return (nerrs > 0); }
void ITLRandomField::_CloseNetCdf ( ) { // MOD-BY-LEETEN 09/01/2011-FROM: // if( iNcId > 0 ) // TO: if( iNcId >= 0 ) // MOD-BY-LEETEN 09/01/2011-END { // write the time stamp TBuffer<int> piTemp; piTemp.alloc(this->IGetNrOfTimeStamps()); for(int t = 0; t < (int)piTemp.USize(); t++) piTemp[t] = this->viTimeStamps[t]; #ifndef WITH_PNETCDF // ADD-BY-LEETEN 08/12/2011 #if 0 // DEL-BY-LEETEN 09/01/2011-BEGIN // since the time step wil lbe written earlier, this part can be removed size_t uStart = 0; size_t uCount = piTemp.USize(); ASSERT_NETCDF(nc_put_vara_int( iNcId, iNcTimeVarId, &uStart, &uCount, &piTemp[0])); #endif // DEL-BY-LEETEN 09/01/2011-END /* Close the file. */ ASSERT_NETCDF(nc_close(iNcId)); // ADD-BY-LEETEN 08/12/2011-BEGIN #else // #ifndef WITH_PNETCDF #if 0 // DEL-BY-LEETEN 09/01/2011-BEGIN MPI_Offset uStart = 0; MPI_Offset uCount = piTemp.USize(); ASSERT_NETCDF(ncmpi_begin_indep_data(iNcId)); if( 0 == iRank ) ASSERT_NETCDF(ncmpi_put_vara_int( iNcId, iNcTimeVarId, &uStart, &uCount, &piTemp[0])); ASSERT_NETCDF(ncmpi_end_indep_data(iNcId)); #endif // DEL-BY-LEETEN 09/01/2011-END /* Close the file. */ ASSERT_NETCDF(ncmpi_close(iNcId)); #endif // #ifndef WITH_PNETCDF // ADD-BY-LEETEN 08/12/2011-END // MOD-BY-LEETEN 09/01/2011-FROM: // iNcId = 0; // TO: iNcId = -1; // MOD-BY-LEETEN 09/01/2011-END } };
int main(int argc, char** argv) { extern int optind; char *filename="testfile.nc"; int i, rank, nprocs, verbose=1, err; int ncid, cmode, varid, dimid[2], num_reqs, *buffer, **bufs, *nvarids; MPI_Offset w_len, **starts, **counts, *bufcounts; MPI_Datatype *datatypes; MPI_Info info; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); /* get command-line arguments */ while ((i = getopt(argc, argv, "hq")) != EOF) switch(i) { case 'q': verbose = 0; break; case 'h': default: if (rank==0) usage(argv[0]); MPI_Finalize(); return 0; } argc -= optind; argv += optind; if (argc == 1) filename = argv[0]; /* optional argument */ if (nprocs != 4 && rank == 0 && verbose) printf("Warning: this program is intended to run on 4 processes\n"); /* set an MPI-IO hint to disable file offset alignment for fix-sized * variables */ MPI_Info_create(&info); MPI_Info_set(info, "nc_var_align_size", "1"); /* create a new file for writing ----------------------------------------*/ cmode = NC_CLOBBER | NC_64BIT_DATA; err = ncmpi_create(MPI_COMM_WORLD, filename, cmode, info, &ncid); ERR MPI_Info_free(&info); /* create a global array of size NY * NX */ err = ncmpi_def_dim(ncid, "Y", NY, &dimid[0]); ERR err = ncmpi_def_dim(ncid, "X", NX, &dimid[1]); ERR err = ncmpi_def_var(ncid, "var", NC_INT, NDIMS, dimid, &varid); ERR err = ncmpi_enddef(ncid); ERR /* pick arbitrary numbers of requests for 4 processes */ num_reqs = 0; if (rank == 0) num_reqs = 4; else if (rank == 1) num_reqs = 6; else if (rank == 2) num_reqs = 5; else if (rank == 3) num_reqs = 4; starts = (MPI_Offset**) malloc(num_reqs * sizeof(MPI_Offset*)); counts = (MPI_Offset**) malloc(num_reqs * sizeof(MPI_Offset*)); starts[0] = (MPI_Offset*) calloc(num_reqs * NDIMS, sizeof(MPI_Offset)); counts[0] = (MPI_Offset*) calloc(num_reqs * NDIMS, sizeof(MPI_Offset)); for (i=1; i<num_reqs; i++) { starts[i] = starts[i-1] + NDIMS; counts[i] = counts[i-1] + NDIMS; } /* assign arbitrary starts and counts */ const int y=0, x=1; if (rank == 0) { starts[0][y] = 0; starts[0][x] = 5; counts[0][y] = 1; counts[0][x] = 2; starts[1][y] = 1; starts[1][x] = 0; counts[1][y] = 1; counts[1][x] = 1; starts[2][y] = 2; starts[2][x] = 6; counts[2][y] = 1; counts[2][x] = 2; starts[3][y] = 3; starts[3][x] = 0; counts[3][y] = 1; counts[3][x] = 3; /* rank 0 is writing the followings: ("-" means skip) - - - - - 0 0 - - - 0 - - - - - - - - - - - - - - - 0 0 - - 0 0 0 - - - - - - - */ } else if (rank ==1) { starts[0][y] = 0; starts[0][x] = 3; counts[0][y] = 1; counts[0][x] = 2; starts[1][y] = 0; starts[1][x] = 8; counts[1][y] = 1; counts[1][x] = 2; starts[2][y] = 1; starts[2][x] = 5; counts[2][y] = 1; counts[2][x] = 2; starts[3][y] = 2; starts[3][x] = 0; counts[3][y] = 1; counts[3][x] = 2; starts[4][y] = 2; starts[4][x] = 8; counts[4][y] = 1; counts[4][x] = 2; starts[5][y] = 3; starts[5][x] = 4; counts[5][y] = 1; counts[5][x] = 3; /* rank 1 is writing the followings: ("-" means skip) - - - 1 1 - - - 1 1 - - - - - 1 1 - - - 1 1 - - - - - - 1 1 - - - - 1 1 1 - - - */ } else if (rank ==2) { starts[0][y] = 0; starts[0][x] = 7; counts[0][y] = 1; counts[0][x] = 1; starts[1][y] = 1; starts[1][x] = 1; counts[1][y] = 1; counts[1][x] = 3; starts[2][y] = 1; starts[2][x] = 7; counts[2][y] = 1; counts[2][x] = 3; starts[3][y] = 2; starts[3][x] = 2; counts[3][y] = 1; counts[3][x] = 1; starts[4][y] = 3; starts[4][x] = 3; counts[4][y] = 1; counts[4][x] = 1; /* rank 2 is writing the followings: ("-" means skip) - - - - - - - 2 - - - 2 2 2 - - - 2 2 2 - - 2 - - - - - - - - - - 2 - - - - - - */ } else if (rank ==3) { starts[0][y] = 0; starts[0][x] = 0; counts[0][y] = 1; counts[0][x] = 3; starts[1][y] = 1; starts[1][x] = 4; counts[1][y] = 1; counts[1][x] = 1; starts[2][y] = 2; starts[2][x] = 3; counts[2][y] = 1; counts[2][x] = 3; starts[3][y] = 3; starts[3][x] = 7; counts[3][y] = 1; counts[3][x] = 3; /* rank 3 is writing the followings: ("-" means skip) 3 3 3 - - - - - - - - - - - 3 - - - - - - - - 3 3 3 - - - - - - - - - - - 3 3 3 */ } nvarids = (int*) malloc(num_reqs * sizeof(int)); bufcounts = (MPI_Offset*) malloc(num_reqs * sizeof(MPI_Offset)); datatypes = (MPI_Datatype*) malloc(num_reqs * sizeof(MPI_Datatype)); w_len = 0; for (i=0; i<num_reqs; i++) { nvarids[i] = varid; bufcounts[i] = counts[i][x]; datatypes[i] = MPI_INT; w_len += bufcounts[i]; } /* allocate I/O buffer and initialize its contents */ buffer = (int*) malloc(w_len * sizeof(int)); for (i=0; i<w_len; i++) buffer[i] = rank; /* set the buffer pointers to different offsets to the I/O buffer */ bufs = (int**) malloc(num_reqs * sizeof(int*)); bufs[0] = buffer; for (i=1; i<num_reqs; i++) bufs[i] = bufs[i-1] + bufcounts[i-1]; err = ncmpi_mput_vara_all(ncid, num_reqs, nvarids, starts, counts, (void**)bufs, bufcounts, datatypes); ERR err = ncmpi_close(ncid); ERR free(buffer); free(bufs); free(nvarids); free(bufcounts); free(datatypes); free(starts[0]); free(counts[0]); free(starts); free(counts); /* check if there is any PnetCDF internal malloc residue */ MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Finalize(); return 0; }
/** * Write a parallel-nedcdf file. * * We assume here that localData is a scalar. * * Pnetcdf uses row-major format (same as FFTW). * * \param[in] filename : PnetCDF filename * \param[in] starts : offset to where to start reading data * \param[in] counts : number of elements read (3D sub-domain inside global) * \param[in] gsizes : global sizes * \param[in] localData : actual data buffer (size : nx*ny*nz*sizeof(float)) * */ void write_pnetcdf(const std::string &filename, MPI_Offset starts[3], MPI_Offset counts[3], int gsizes[3], float *localData) { int myRank; MPI_Comm_rank(MPI_COMM_WORLD, &myRank); // netcdf file id int ncFileId; int err; // file creation mode int ncCreationMode = NC_CLOBBER; // CDF-5 is almost mandatory for very large files (>= 2x10^9 cells) // not useful here bool useCDF5 = false; if (useCDF5) ncCreationMode = NC_CLOBBER|NC_64BIT_DATA; else // use CDF-2 file format ncCreationMode = NC_CLOBBER|NC_64BIT_OFFSET; // verbose log ? //bool pnetcdf_verbose = false; int nbVar=1; int dimIds[3], varIds[nbVar]; //MPI_Offset write_size, sum_write_size; MPI_Info mpi_info_used; //char str[512]; // time measurement variables //float write_timing, max_write_timing, write_bw; /* * Create NetCDF file */ err = ncmpi_create(MPI_COMM_WORLD, filename.c_str(), ncCreationMode, MPI_INFO_NULL, &ncFileId); if (err != NC_NOERR) { printf("Error: ncmpi_create() file %s (%s)\n",filename.c_str(),ncmpi_strerror(err)); MPI_Abort(MPI_COMM_WORLD, -1); exit(1); } /* * Define global dimensions */ err = ncmpi_def_dim(ncFileId, "x", gsizes[0], &dimIds[0]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_dim(ncFileId, "y", gsizes[1], &dimIds[1]); PNETCDF_HANDLE_ERROR; err = ncmpi_def_dim(ncFileId, "z", gsizes[2], &dimIds[2]); PNETCDF_HANDLE_ERROR; /* * Define variables to write (give a name) */ nc_type ncDataType = NC_FLOAT; MPI_Datatype mpiDataType = MPI_FLOAT; err = ncmpi_def_var(ncFileId, "data", ncDataType, 3, dimIds, &varIds[0]); PNETCDF_HANDLE_ERROR; /* * global attributes */ // did we use CDF-2 or CDF-5 { int useCDF5_int = useCDF5 ? 1 : 0; err = ncmpi_put_att_int(ncFileId, NC_GLOBAL, "CDF-5 mode", NC_INT, 1, &useCDF5_int); PNETCDF_HANDLE_ERROR; } /* * exit the define mode */ err = ncmpi_enddef(ncFileId); PNETCDF_HANDLE_ERROR; /* * Get all the MPI_IO hints used */ err = ncmpi_get_file_info(ncFileId, &mpi_info_used); PNETCDF_HANDLE_ERROR; // copy data to write in intermediate buffer int nItems = counts[IX]*counts[IY]*counts[IZ]; { // debug // printf("Pnetcdf [rank=%d] starts=%lld %lld %lld, counts =%lld %lld %lld, gsizes=%d %d %d\n", // myRank, // starts[0],starts[1],starts[2], // counts[0],counts[1],counts[2], // gsizes[0],gsizes[1],gsizes[2]); /* * make sure PNetCDF doesn't complain when starts is outside of global domain * bound. When nItems is null, off course we don't write anything, but starts * offset have to be inside global domain. * So there is no harm, setting starts to origin. */ if (nItems == 0) { starts[0]=0; starts[1]=0; starts[2]=0; } err = ncmpi_put_vara_all(ncFileId, varIds[0], starts, counts, localData, nItems, mpiDataType); PNETCDF_HANDLE_ERROR; } /* * close the file */ err = ncmpi_close(ncFileId); PNETCDF_HANDLE_ERROR; } // write_pnetcdf
int main(int argc, char **argv) { char dir_name[256], filename[256]; int err, rank, nerrs=0, format, ncid; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (argc != 2) { if (!rank) printf("Usage: %s dir_name\n",argv[0]); MPI_Finalize(); return 0; } strcpy(dir_name, argv[1]); MPI_Bcast(dir_name, 256, MPI_CHAR, 0, MPI_COMM_WORLD); if (rank == 0) { char cmd_str[256]; sprintf(cmd_str, "*** TESTING C %s for inquiring CDF file formats ", argv[0]); printf("%-66s ------ ", cmd_str); } /* test CDF-1 -----------------------------------------------------------*/ sprintf(filename,"%s/test_cdf1.nc",dir_name); err = ncmpi_open(MPI_COMM_WORLD, filename, 0, MPI_INFO_NULL, &ncid); ERR err = ncmpi_inq_format(ncid, &format); ERR if (format != NC_FORMAT_CLASSIC) { printf("Error (line=%d): expecting CDF-1 format for file %s but got %d\n", __LINE__,filename,format); nerrs++; } err = ncmpi_close(ncid); ERR err = ncmpi_inq_file_format(filename, &format); ERR if (format != NC_FORMAT_CLASSIC) { printf("Error (line=%d): expecting CDF-1 format for file %s but got %d\n", __LINE__,filename,format); nerrs++; } /* test CDF-2 -----------------------------------------------------------*/ sprintf(filename,"%s/test_cdf2.nc",dir_name); err = ncmpi_open(MPI_COMM_WORLD, filename, 0, MPI_INFO_NULL, &ncid); ERR err = ncmpi_inq_format(ncid, &format); ERR if (format != NC_FORMAT_CDF2) { printf("Error (line=%d): expecting CDF-2 format for file %s but got %d\n", __LINE__,filename,format); nerrs++; } err = ncmpi_close(ncid); ERR err = ncmpi_inq_file_format(filename, &format); ERR if (format != NC_FORMAT_CDF2) { printf("Error (line=%d): expecting CDF-2 format for file %s but got %d\n", __LINE__,filename,format); nerrs++; } /* test CDF-5 -----------------------------------------------------------*/ sprintf(filename,"%s/test_cdf5.nc",dir_name); err = ncmpi_open(MPI_COMM_WORLD, filename, 0, MPI_INFO_NULL, &ncid); ERR err = ncmpi_inq_format(ncid, &format); ERR if (format != NC_FORMAT_CDF5) { printf("Error (line=%d): expecting CDF-5 format for file %s but got %d\n", __LINE__,filename,format); nerrs++; } err = ncmpi_close(ncid); ERR err = ncmpi_inq_file_format(filename, &format); ERR if (format != NC_FORMAT_CDF5) { printf("Error (line=%d): expecting CDF-5 format for file %s but got %d\n", __LINE__,filename,format); nerrs++; } MPI_Offset malloc_size, sum_size; err = ncmpi_inq_malloc_size(&malloc_size); if (err == NC_NOERR) { MPI_Reduce(&malloc_size, &sum_size, 1, MPI_OFFSET, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0 && sum_size > 0) printf("heap memory allocated by PnetCDF internally has %lld bytes yet to be freed\n", sum_size); } MPI_Allreduce(MPI_IN_PLACE, &nerrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (nerrs) printf(FAIL_STR,nerrs); else printf(PASS_STR); } MPI_Finalize(); return 0; }