/* Let two sets of processes open and read different groups and chunked * datasets independently. */ void independent_group_read(void) { int mpi_rank, m; hid_t plist, fid; hbool_t use_gpfs = FALSE; const H5Ptest_param_t *pt; char *filename; int ngroups; pt = GetTestParameters(); filename = pt->name; ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); H5Pclose(plist); /* open groups and read datasets. Odd number processes read even number * groups from the end; even number processes read odd number groups * from the beginning. */ if(mpi_rank%2==0) { for(m=ngroups-1; m==0; m-=2) group_dataset_read(fid, mpi_rank, m); } else { for(m=0; m<ngroups; m+=2) group_dataset_read(fid, mpi_rank, m); } H5Fclose(fid); }
/* * Test following possible scenarios, * Case 1: * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large * size, no write, close, reopen in parallel, read to verify all return * the fill value. * Case 2: * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small * size, no write, close, reopen in parallel, extend to large size, then close, * then reopen in parallel and read to verify all return the fill value. * Case 3: * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large * size, write just a small part of the dataset (second to the last), close, * then reopen in parallel, read to verify all return the fill value except * those small portion that has been written. Without closing it, writes * all parts of the dataset in a interleave pattern, close it, and reopen * it, read to verify all data are as written. */ void test_chunk_alloc(void) { const char *filename; hid_t file_id, dataset; file_id = dataset = -1; filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend Chunked allocation test on file %s\n", filename); /* Case 1 */ /* Create chunked dataset without writing anything.*/ create_chunked_dataset(filename, DSETCHUNKS, none); /* reopen dataset in parallel and check for file size */ parallel_access_dataset(filename, DSETCHUNKS, open_only, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, DSETCHUNKS, none, CLOSE, &file_id, &dataset); /* Case 2 sometimes fails. See bug 281 and 636. Skip it for now, need to fix it later. */ if (VERBOSE_LO){ printf("Started Case 2\n"); /* Case 2 */ /* Create chunked dataset without writing anything */ create_chunked_dataset(filename, 20, none); /* reopen dataset in parallel and only extend it */ parallel_access_dataset(filename, DSETCHUNKS, extend_only, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, DSETCHUNKS, none, CLOSE, &file_id, &dataset); printf("Finished Case 2\n"); } else { if (MAINPROCESS) printf("Skipped Case 2. Use '-v l' to test it.\n"); } /* Case 3 */ /* Create chunked dataset and write in the second to last chunk */ create_chunked_dataset(filename, DSETCHUNKS, sec_last); /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/ verify_data(filename, DSETCHUNKS, sec_last, NO_CLOSE, &file_id, &dataset); /* All processes write in all the chunks in a interleaved way */ parallel_access_dataset(filename, DSETCHUNKS, write_all, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, DSETCHUNKS, all, CLOSE, &file_id, &dataset); }
/* * Test following possible scenarios, * Case 1: * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large * size, no write, close, reopen in parallel, read to verify all return * the fill value. * Case 2: * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small * size, no write, close, reopen in parallel, extend to large size, then close, * then reopen in parallel and read to verify all return the fill value. * Case 3: * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large * size, write just a small part of the dataset (second to the last), close, * then reopen in parallel, read to verify all return the fill value except * those small portion that has been written. Without closing it, writes * all parts of the dataset in a interleave pattern, close it, and reopen * it, read to verify all data are as written. */ void test_chunk_alloc(void) { const char *filename; hid_t file_id, dataset; file_id = dataset = -1; /* Initialize MPI */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend Chunked allocation test on file %s\n", filename); /* Case 1 */ /* Create chunked dataset without writing anything.*/ create_chunked_dataset(filename, CHUNK_FACTOR, none); /* reopen dataset in parallel and check for file size */ parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset); /* Case 2 */ /* Create chunked dataset without writing anything */ create_chunked_dataset(filename, 20, none); /* reopen dataset in parallel and only extend it */ parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset); /* Case 3 */ /* Create chunked dataset and write in the second to last chunk */ create_chunked_dataset(filename, CHUNK_FACTOR, sec_last); /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/ verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset); /* All processes write in all the chunks in a interleaved way */ parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset); }
/* * A test for issue HDFFV-10501. A parallel hang was reported which occurred * in linked-chunk I/O when collective metadata reads are enabled and some ranks * do not have any selection in a dataset's dataspace, while others do. The ranks * which have no selection during the read/write operation called H5D__chunk_addrmap() * to retrieve the lowest chunk address, since we require that the read/write be done * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees, * this caused the non-participating ranks to issue a collective MPI_Bcast() call * which the other ranks did not issue, thus causing a hang. * * However, since these ranks are not actually reading/writing anything, this call * can simply be removed and the address used for the read/write can be set to an * arbitrary number (0 was chosen). */ void test_partial_no_selection_coll_md_read(void) { const char *filename; hsize_t *dataset_dims = NULL; hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t sel_dims[1]; hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = { PARTIAL_NO_SELECTION_Y_DIM_SCALE, PARTIAL_NO_SELECTION_X_DIM_SCALE }; hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS]; hid_t file_id = -1; hid_t fapl_id = -1; hid_t dset_id = -1; hid_t dcpl_id = -1; hid_t dxpl_id = -1; hid_t fspace_id = -1; hid_t mspace_id = -1; int mpi_rank, mpi_size; void *data = NULL; void *read_buf = NULL; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); filename = GetTestParameters(); fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); /* * Even though the testphdf5 framework currently sets collective metadata reads * on the FAPL, we call it here just to be sure this is futureproof, since * demonstrating this issue relies upon it. */ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); VRFY((file_id >= 0), "H5Fcreate succeeded"); dataset_dims = malloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims)); VRFY((dataset_dims != NULL), "malloc succeeded"); dataset_dims[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_size; dataset_dims[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE * mpi_size; max_dataset_dims[0] = H5S_UNLIMITED; max_dataset_dims[1] = H5S_UNLIMITED; fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); /* * Set up chunking on the dataset in order to reproduce the problem. */ dcpl_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded"); dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); /* * Setup hyperslab selection to split the dataset among the ranks. * * The ranks will write rows across the dataset. */ start[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_rank; start[1] = 0; stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; count[0] = 1; count[1] = mpi_size; block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded"); sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE); mspace_id = H5Screate_simple(1, sel_dims, NULL); VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); data = calloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int)); VRFY((data != NULL), "calloc succeeded"); dxpl_id = H5Pcreate(H5P_DATASET_XFER); VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); /* * Enable collective access for the data transfer. */ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); /* * Ensure that linked-chunk I/O is performed since this is * the particular code path where the issue lies and we don't * want the library doing multi-chunk I/O behind our backs. */ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded"); read_buf = malloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int)); VRFY((read_buf != NULL), "malloc succeeded"); /* * Make sure to call H5Sselect_none() on the non-participating process. */ if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded"); VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded"); } /* * Finally have each rank read their section of data back from the dataset. */ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded"); /* * Check data integrity just to be sure. */ if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { VRFY((!memcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded"); } if (dataset_dims) { free(dataset_dims); dataset_dims = NULL; } if (data) { free(data); data = NULL; } if (read_buf) { free(read_buf); read_buf = NULL; } VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); }
/* * This function is to verify the data from multiple group testing. It opens * every dataset in every group and check their correctness. * * Changes: Updated function to use a dynamically calculated size, * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * * JRM - 8/11/04 */ void multiple_group_read(void) { int mpi_rank, mpi_size, error_num, size; int m; hbool_t use_gpfs = FALSE; char gname[64]; hid_t plist, fid, gid, memspace, filespace; hsize_t chunk_origin[DIM]; hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; const H5Ptest_param_t *pt; char *filename; int ngroups; pt = GetTestParameters(); filename = pt->name; ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); H5Pclose(plist); /* decide hyperslab for each process */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); /* select hyperslab for memory and file space */ memspace = H5Screate_simple(DIM, file_dims, NULL); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); filespace = H5Screate_simple(DIM, file_dims, NULL); H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); /* open every group under root group. */ for(m=0; m<ngroups; m++) { sprintf(gname, "group%d", m); gid = H5Gopen(fid, gname); VRFY((gid > 0), gname); /* check the data. */ if(m != 0) if( (error_num = read_dataset(memspace, filespace, gid))>0) nerrors += error_num; /* check attribute.*/ error_num = 0; if( (error_num = read_attribute(gid, is_group, m))>0 ) nerrors += error_num; H5Gclose(gid); #ifdef BARRIER_CHECKS if(!((m+1)%10)) MPI_Barrier(MPI_COMM_WORLD); #endif /* BARRIER_CHECKS */ } /* open all the groups in vertical direction. */ gid = H5Gopen(fid, "group0"); VRFY((gid>0), "group0"); recursive_read_group(memspace, filespace, gid, 0); H5Gclose(gid); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(fid); }
/* * Example of using PHDF5 to create ndatasets datasets. Each process write * a slab of array to the file. * * Changes: Updated function to use a dynamically calculated size, * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * * JRM - 8/11/04 */ void multiple_dset_write(void) { int i, j, n, mpi_size, mpi_rank, size; hid_t iof, plist, dataset, memspace, filespace; hid_t dcpl; /* Dataset creation property list */ hbool_t use_gpfs = FALSE; /* Use GPFS hints */ hsize_t chunk_origin [DIM]; hsize_t chunk_dims [DIM], file_dims [DIM]; hsize_t count[DIM]={1,1}; double * outme = NULL; double fill=1.0; /* Fill value */ char dname [100]; herr_t ret; const H5Ptest_param_t *pt; char *filename; int ndatasets; pt = GetTestParameters(); filename = pt->name; ndatasets = pt->count; size = get_size(); MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size (MPI_COMM_WORLD, &mpi_size); outme = HDmalloc((size_t)(size * size * sizeof(double))); VRFY((outme != NULL), "HDmalloc succeeded for outme"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); VRFY((plist>=0), "create_faccess_plist succeeded"); iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); VRFY((iof>=0), "H5Fcreate succeeded"); ret = H5Pclose (plist); VRFY((ret>=0), "H5Pclose succeeded"); /* decide the hyperslab according to process number. */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); memspace = H5Screate_simple (DIM, chunk_dims, NULL); filespace = H5Screate_simple (DIM, file_dims, NULL); ret = H5Sselect_hyperslab (filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); VRFY((ret>=0), "mdata hyperslab selection"); /* Create a dataset creation property list */ dcpl = H5Pcreate(H5P_DATASET_CREATE); VRFY((dcpl>=0), "dataset creation property list succeeded"); ret=H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); VRFY((ret>=0), "set fill-value succeeded"); for (n = 0; n < ndatasets; n++) { sprintf (dname, "dataset %d", n); dataset = H5Dcreate (iof, dname, H5T_NATIVE_DOUBLE, filespace, dcpl); VRFY((dataset > 0), dname); /* calculate data to write */ for (i = 0; i < size; i++) for (j = 0; j < size; j++) outme [(i * size) + j] = n*1000 + mpi_rank; H5Dwrite (dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); H5Dclose (dataset); #ifdef BARRIER_CHECKS if (! ((n+1) % 10)) { printf("created %d datasets\n", n+1); MPI_Barrier(MPI_COMM_WORLD); } #endif /* BARRIER_CHECKS */ } H5Sclose (filespace); H5Sclose (memspace); H5Pclose (dcpl); H5Fclose (iof); HDfree(outme); }
/* * Example of using PHDF5 to create multiple groups. Under the root group, * it creates ngroups groups. Under the first group just created, it creates * recursive subgroups of depth GROUP_DEPTH. In each created group, it * generates NDATASETS datasets. Each process write a hyperslab of an array * into the file. The structure is like * * root group * | * ---------------------------- ... ... ------------------------ * | | | ... ... | | * group0*+' group1*+' group2*+' ... ... group ngroups*+' * | * 1st_child_group*' * | * 2nd_child_group*' * | * : * : * | * GROUP_DEPTHth_child_group*' * * * means the group has dataset(s). * + means the group has attribute(s). * ' means the datasets in the groups have attribute(s). * * Changes: Updated function to use a dynamically calculated size, * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * * JRM - 8/16/04 */ void multiple_group_write(void) { int mpi_rank, mpi_size, size; int m; hbool_t use_gpfs = FALSE; char gname[64]; hid_t fid, gid, plist, memspace, filespace; hsize_t chunk_origin[DIM]; hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; herr_t ret; const H5Ptest_param_t *pt; char *filename; int ngroups; pt = GetTestParameters(); filename = pt->name; ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); H5Pclose(plist); /* decide the hyperslab according to process number. */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); /* select hyperslab in memory and file spaces. These two operations are * identical since the datasets are the same. */ memspace = H5Screate_simple(DIM, file_dims, NULL); VRFY((memspace>=0), "memspace"); ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); VRFY((ret>=0), "mgroup memspace selection"); filespace = H5Screate_simple(DIM, file_dims, NULL); VRFY((filespace>=0), "filespace"); ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); VRFY((ret>=0), "mgroup filespace selection"); /* creates ngroups groups under the root group, writes datasets in * parallel. */ for(m = 0; m < ngroups; m++) { sprintf(gname, "group%d", m); gid = H5Gcreate(fid, gname, 0); VRFY((gid > 0), gname); /* create attribute for these groups. */ write_attribute(gid, is_group, m); if(m != 0) write_dataset(memspace, filespace, gid); H5Gclose(gid); #ifdef BARRIER_CHECKS if(! ((m+1) % 10)) { printf("created %d groups\n", m+1); MPI_Barrier(MPI_COMM_WORLD); } #endif /* BARRIER_CHECKS */ } /* recursively creates subgroups under the first group. */ gid = H5Gopen(fid, "group0"); create_group_recursive(memspace, filespace, gid, 0); ret = H5Gclose(gid); VRFY((ret>=0), "H5Gclose"); ret = H5Sclose(filespace); VRFY((ret>=0), "H5Sclose"); ret = H5Sclose(memspace); VRFY((ret>=0), "H5Sclose"); ret = H5Fclose(fid); VRFY((ret>=0), "H5Fclose"); }
/* Write multiple groups with a chunked dataset in each group collectively. * These groups and datasets are for testing independent read later. * * Changes: Updated function to use a dynamically calculated size, * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * * JRM - 8/16/04 */ void collective_group_write(void) { int mpi_rank, mpi_size, size; int i, j, m; hbool_t use_gpfs = FALSE; char gname[64], dname[32]; hid_t fid, gid, did, plist, dcpl, memspace, filespace; DATATYPE * outme = NULL; hsize_t chunk_origin[DIM]; hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */ herr_t ret1, ret2; const H5Ptest_param_t *pt; char *filename; int ngroups; pt = GetTestParameters(); filename = pt->name; ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); chunk_size[0] = (hsize_t)(size / 2); chunk_size[1] = (hsize_t)(size / 2); outme = HDmalloc((size_t)(size * size * sizeof(DATATYPE))); VRFY((outme != NULL), "HDmalloc succeeded for outme"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); H5Pclose(plist); /* decide the hyperslab according to process number. */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); /* select hyperslab in memory and file spaces. These two operations are * identical since the datasets are the same. */ memspace = H5Screate_simple(DIM, file_dims, NULL); ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); filespace = H5Screate_simple(DIM, file_dims, NULL); ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); VRFY((memspace>=0), "memspace"); VRFY((filespace>=0), "filespace"); VRFY((ret1>=0), "mgroup memspace selection"); VRFY((ret2>=0), "mgroup filespace selection"); dcpl = H5Pcreate(H5P_DATASET_CREATE); ret1 = H5Pset_chunk (dcpl, 2, chunk_size); VRFY((dcpl>=0), "dataset creation property"); VRFY((ret1>=0), "set chunk for dataset creation property"); /* creates ngroups groups under the root group, writes chunked * datasets in parallel. */ for(m = 0; m < ngroups; m++) { sprintf(gname, "group%d", m); gid = H5Gcreate(fid, gname, 0); VRFY((gid > 0), gname); sprintf(dname, "dataset%d", m); did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace, dcpl); VRFY((did > 0), dname); for(i=0; i < size; i++) for(j=0; j < size; j++) outme[(i * size) + j] = (i+j)*1000 + mpi_rank; H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); H5Dclose(did); H5Gclose(gid); #ifdef BARRIER_CHECKS if(! ((m+1) % 10)) { printf("created %d groups\n", m+1); MPI_Barrier(MPI_COMM_WORLD); } #endif /* BARRIER_CHECKS */ } H5Pclose(dcpl); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(fid); HDfree(outme); }
/* Example of using PHDF5 to read a partial written dataset. The dataset does * not have actual data written to the entire raw data area and relies on the * default fill value of zeros to work correctly. * * Changes: Removed the assert that mpi_size <= the SIZE #define. * As best I can tell, this assert isn't needed here, * and in any case, the SIZE #define is being removed * in an update of the functions in this file to run * with an arbitrary number of processes. * * Also added code to free dynamically allocated buffers. * * JRM - 8/11/04 */ void dataset_fillvalue(void) { int mpi_size, mpi_rank; /* MPI info */ hbool_t use_gpfs = FALSE; /* Don't use GPFS stuff for this test */ int err_num; /* Number of errors */ hid_t iof, /* File ID */ fapl, /* File access property list ID */ dxpl, /* Data transfer property list ID */ dataset, /* Dataset ID */ memspace, /* Memory dataspace ID */ filespace; /* Dataset's dataspace ID */ char dname[]="dataset"; /* Name of dataset */ hsize_t dset_dims[4] = {0, 6, 7, 8}; hsize_t req_start[4] = {0, 0, 0, 0}; hsize_t req_count[4] = {1, 6, 7, 8}; hsize_t dset_size; /* Dataset size */ int *rdata, *wdata; /* Buffers for data to read and write */ int *twdata, *trdata; /* Temporary pointer into buffer */ int acc, i, j, k, l; /* Local index variables */ herr_t ret; /* Generic return value */ const char *filename; MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size (MPI_COMM_WORLD, &mpi_size); filename = GetTestParameters(); /* Set the dataset dimension to be one row more than number of processes */ /* and calculate the actual dataset size. */ dset_dims[0]=mpi_size+1; dset_size=dset_dims[0]*dset_dims[1]*dset_dims[2]*dset_dims[3]; /* Allocate space for the buffers */ rdata=HDmalloc((size_t)(dset_size*sizeof(int))); VRFY((rdata != NULL), "HDcalloc succeeded for read buffer"); wdata=HDmalloc((size_t)(dset_size*sizeof(int))); VRFY((wdata != NULL), "HDmalloc succeeded for write buffer"); fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); VRFY((fapl >= 0), "create_faccess_plist succeeded"); /* * Create HDF5 file */ iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((iof >= 0), "H5Fcreate succeeded"); filespace = H5Screate_simple(4, dset_dims, NULL); VRFY((filespace >= 0), "File H5Screate_simple succeeded"); dataset = H5Dcreate(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate succeeded"); memspace = H5Screate_simple(4, dset_dims, NULL); VRFY((memspace >= 0), "Memory H5Screate_simple succeeded"); /* * Read dataset before any data is written. */ /* set entire read buffer with the constant 2 */ HDmemset(rdata,2,(size_t)(dset_size*sizeof(int))); /* Independently read the entire dataset back */ ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); VRFY((ret >= 0), "H5Dread succeeded"); /* Verify all data read are the fill value 0 */ trdata=rdata; err_num=0; for (i=0; i<(int)dset_dims[0]; i++) for (j=0; j<(int)dset_dims[1]; j++) for (k=0; k<(int)dset_dims[2]; k++) for (l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++) if( *trdata != 0) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata); if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) printf("[more errors ...]\n"); if(err_num){ printf("%d errors found in check_value\n", err_num); nerrors++; } /* Barrier to ensure all processes have completed the above test. */ MPI_Barrier(MPI_COMM_WORLD); /* * Each process writes 1 row of data. Thus last row is not written. */ /* Create hyperslabs in memory and file dataspaces */ req_start[0]=mpi_rank; ret=H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); ret=H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); /* Create DXPL for collective I/O */ dxpl = H5Pcreate (H5P_DATASET_XFER); VRFY((dxpl >= 0), "H5Pcreate succeeded"); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); /* Fill write buffer with some values */ twdata=wdata; for (i=0, acc=0; i<(int)dset_dims[0]; i++) for (j=0; j<(int)dset_dims[1]; j++) for (k=0; k<(int)dset_dims[2]; k++) for (l=0; l<(int)dset_dims[3]; l++) *twdata++ = acc++; /* Collectively write a hyperslab of data to the dataset */ ret=H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata); VRFY((ret >= 0), "H5Dwrite succeeded"); /* Barrier here, to allow MPI-posix I/O to sync */ MPI_Barrier(MPI_COMM_WORLD); /* * Read dataset after partial write. */ /* set entire read buffer with the constant 2 */ HDmemset(rdata,2,(size_t)(dset_size*sizeof(int))); /* Independently read the entire dataset back */ ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); VRFY((ret >= 0), "H5Dread succeeded"); /* Verify correct data read */ twdata=wdata; trdata=rdata; err_num=0; for (i=0; i<(int)dset_dims[0]; i++) for (j=0; j<(int)dset_dims[1]; j++) for (k=0; k<(int)dset_dims[2]; k++) for (l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++) if(i<mpi_size) { if( *twdata != *trdata ) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata); } /* end if */ else { if( *trdata != 0) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata); } /* end else */ if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) printf("[more errors ...]\n"); if(err_num){ printf("%d errors found in check_value\n", err_num); nerrors++; } /* Close all file objects */ ret=H5Dclose (dataset); VRFY((ret >= 0), "H5Dclose succeeded"); ret=H5Sclose (filespace); VRFY((ret >= 0), "H5Sclose succeeded"); ret=H5Fclose (iof); VRFY((ret >= 0), "H5Fclose succeeded"); /* Close memory dataspace */ ret=H5Sclose (memspace); VRFY((ret >= 0), "H5Sclose succeeded"); /* Close dxpl */ ret=H5Pclose (dxpl); VRFY((ret >= 0), "H5Pclose succeeded"); /* Close fapl */ ret=H5Pclose (fapl); VRFY((ret >= 0), "H5Pclose succeeded"); /* free the buffers */ HDfree(rdata); HDfree(wdata); }
/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB) * Actual data is _not_ written to these datasets. Dataspaces are exact * sizes (2GB, 4GB, etc.), but the metadata for the file pushes the file over * the boundary of interest. * * Changes: Removed the assert that mpi_size <= the SIZE #define. * As best I can tell, this assert isn't needed here, * and in any case, the SIZE #define is being removed * in an update of the functions in this file to run * with an arbitrary number of processes. * * JRM - 8/11/04 */ void big_dataset(void) { int mpi_size, mpi_rank; /* MPI info */ hbool_t use_gpfs = FALSE; /* Don't use GPFS stuff for this test */ hid_t iof, /* File ID */ fapl, /* File access property list ID */ dataset, /* Dataset ID */ filespace; /* Dataset's dataspace ID */ hsize_t file_dims [4]; /* Dimensions of dataspace */ char dname[]="dataset"; /* Name of dataset */ MPI_Offset file_size; /* Size of file on disk */ herr_t ret; /* Generic return value */ const char *filename; MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size (MPI_COMM_WORLD, &mpi_size); /* Verify MPI_Offset can handle larger than 2GB sizes */ VRFY((sizeof(MPI_Offset)>4), "sizeof(MPI_Offset)>4"); filename = GetTestParameters(); fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); VRFY((fapl >= 0), "create_faccess_plist succeeded"); /* * Create >2GB HDF5 file */ iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((iof >= 0), "H5Fcreate succeeded"); /* Define dataspace for 2GB dataspace */ file_dims[0]= 2; file_dims[1]= 1024; file_dims[2]= 1024; file_dims[3]= 1024; filespace = H5Screate_simple (4, file_dims, NULL); VRFY((filespace >= 0), "H5Screate_simple succeeded"); dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate succeeded"); /* Close all file objects */ ret=H5Dclose (dataset); VRFY((ret >= 0), "H5Dclose succeeded"); ret=H5Sclose (filespace); VRFY((ret >= 0), "H5Sclose succeeded"); ret=H5Fclose (iof); VRFY((ret >= 0), "H5Fclose succeeded"); /* Check that file of the correct size was created */ file_size=h5_get_file_size(filename); VRFY((file_size == 2147485696ULL), "File is correct size"); /* * Create >4GB HDF5 file */ iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((iof >= 0), "H5Fcreate succeeded"); /* Define dataspace for 4GB dataspace */ file_dims[0]= 4; file_dims[1]= 1024; file_dims[2]= 1024; file_dims[3]= 1024; filespace = H5Screate_simple (4, file_dims, NULL); VRFY((filespace >= 0), "H5Screate_simple succeeded"); dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate succeeded"); /* Close all file objects */ ret=H5Dclose (dataset); VRFY((ret >= 0), "H5Dclose succeeded"); ret=H5Sclose (filespace); VRFY((ret >= 0), "H5Sclose succeeded"); ret=H5Fclose (iof); VRFY((ret >= 0), "H5Fclose succeeded"); /* Check that file of the correct size was created */ file_size=h5_get_file_size(filename); VRFY((file_size == 4294969344ULL), "File is correct size"); /* * Create >8GB HDF5 file */ iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((iof >= 0), "H5Fcreate succeeded"); /* Define dataspace for 8GB dataspace */ file_dims[0]= 8; file_dims[1]= 1024; file_dims[2]= 1024; file_dims[3]= 1024; filespace = H5Screate_simple (4, file_dims, NULL); VRFY((filespace >= 0), "H5Screate_simple succeeded"); dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate succeeded"); /* Close all file objects */ ret=H5Dclose (dataset); VRFY((ret >= 0), "H5Dclose succeeded"); ret=H5Sclose (filespace); VRFY((ret >= 0), "H5Sclose succeeded"); ret=H5Fclose (iof); VRFY((ret >= 0), "H5Fclose succeeded"); /* Check that file of the correct size was created */ file_size=h5_get_file_size(filename); VRFY((file_size == 8589936640ULL), "File is correct size"); /* Close fapl */ ret=H5Pclose (fapl); VRFY((ret >= 0), "H5Pclose succeeded"); }
/* Example of using PHDF5 to create, write, and read compact dataset. * * Changes: Updated function to use a dynamically calculated size, * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * * JRM - 8/11/04 */ void compact_dataset(void) { int i, j, mpi_size, mpi_rank, size, err_num=0; hbool_t use_gpfs = FALSE; hid_t iof, plist, dcpl, dxpl, dataset, filespace; hsize_t file_dims [DIM]; double * outme; double * inme; char dname[]="dataset"; herr_t ret; const char *filename; size = get_size(); for ( i = 0; i < DIM; i++ ) { file_dims[i] = size; } MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size (MPI_COMM_WORLD, &mpi_size); outme = HDmalloc((size_t)(size * size * sizeof(double))); VRFY((outme != NULL), "HDmalloc succeeded for outme"); inme = HDmalloc((size_t)(size * size * sizeof(double))); VRFY((outme != NULL), "HDmalloc succeeded for inme"); filename = GetTestParameters(); VRFY((mpi_size <= size), "mpi_size <= size"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); /* Define data space */ filespace = H5Screate_simple (DIM, file_dims, NULL); /* Create a compact dataset */ dcpl = H5Pcreate(H5P_DATASET_CREATE); VRFY((dcpl>=0), "dataset creation property list succeeded"); ret=H5Pset_layout(dcpl, H5D_COMPACT); VRFY((dcpl >= 0), "set property list for compact dataset"); ret=H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); VRFY((ret >= 0), "set space allocation time for compact dataset"); dataset = H5Dcreate (iof, dname, H5T_NATIVE_DOUBLE, filespace, dcpl); VRFY((dataset >= 0), "H5Dcreate succeeded"); /* set up the collective transfer properties list */ dxpl = H5Pcreate (H5P_DATASET_XFER); VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); /* Recalculate data to write. Each process writes the same data. */ for (i = 0; i < size; i++) for (j = 0; j < size; j++) outme[(i * size) + j] = (i+j)*1000; ret=H5Dwrite (dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme); VRFY((ret >= 0), "H5Dwrite succeeded"); H5Pclose (dcpl); H5Pclose (plist); H5Dclose (dataset); H5Sclose (filespace); H5Fclose (iof); /* Open the file and dataset, read and compare the data. */ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); VRFY((iof >= 0), "H5Fopen succeeded"); /* set up the collective transfer properties list */ dxpl = H5Pcreate (H5P_DATASET_XFER); VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); dataset = H5Dopen(iof, dname); VRFY((dataset >= 0), "H5Dcreate succeeded"); ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme); VRFY((ret >= 0), "H5Dread succeeded"); /* Verify data value */ for (i = 0; i < size; i++) for (j = 0; j < size; j++) if ( inme[(i * size) + j] != outme[(i * size) + j]) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]); H5Pclose(plist); H5Pclose(dxpl); H5Dclose(dataset); H5Fclose(iof); HDfree(inme); HDfree(outme); }
void io_mode_confusion(void) { /* * HDF5 APIs definitions */ const int rank = 1; const char *dataset_name = "IntArray"; hid_t file_id, dset_id; /* file and dataset identifiers */ hid_t filespace, memspace; /* file and memory dataspace */ /* identifiers */ hsize_t dimsf[1]; /* dataset dimensions */ int data[N] = {1}; /* pointer to data buffer to write */ hsize_t coord[N] = {0L,1L,2L,3L}; hsize_t start[1]; hsize_t stride[1]; hsize_t count[1]; hsize_t block[1]; hid_t plist_id; /* property list identifier */ herr_t status; /* * MPI variables */ int mpi_size, mpi_rank; /* * test bed related variables */ const char * fcn_name = "io_mode_confusion"; const hbool_t verbose = FALSE; const H5Ptest_param_t * pt; char * filename; pt = GetTestParameters(); filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); /* * Set up file access property list with parallel I/O access */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id != -1), "H5Pcreate() failed"); status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); VRFY(( status >= 0 ), "H5Pset_fapl_mpio() failed"); /* * Create a new file collectively and release property list identifier. */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name); file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); VRFY(( file_id >= 0 ), "H5Fcreate() failed"); status = H5Pclose(plist_id); VRFY(( status >= 0 ), "H5Pclose() failed"); /* * Create the dataspace for the dataset. */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name); dimsf[0] = N; filespace = H5Screate_simple(rank, dimsf, NULL); VRFY(( filespace >= 0 ), "H5Screate_simple() failed."); /* * Create the dataset with default properties and close filespace. */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name); dset_id = H5Dcreate(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT); VRFY(( dset_id >= 0 ), "H5Dcreate() failed"); status = H5Sclose(filespace); VRFY(( status >= 0 ), "H5Sclose() failed"); if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name); memspace = H5Screate_simple(rank, dimsf, NULL); VRFY(( memspace >= 0 ), "H5Screate_simple() failed."); if( mpi_rank == 0 ) { if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name); status = H5Sselect_all(memspace); VRFY(( status >= 0 ), "H5Sselect_all() failed"); } else { if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name); status = H5Sselect_none(memspace); VRFY(( status >= 0 ), "H5Sselect_none() failed"); } if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); MPI_Barrier(MPI_COMM_WORLD); if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name); filespace = H5Dget_space(dset_id); VRFY(( filespace >= 0 ), "H5Dget_space() failed"); start[0] = 0L; stride[0] = 1; count[0] = 1; block[0] = N; if ( mpi_rank == 0 ) { /* select all */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name); status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t **)&coord); VRFY(( status >= 0 ), "H5Sselect_elements() failed"); } else { /* select nothing */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name); status = H5Sselect_none(filespace); VRFY(( status >= 0 ), "H5Sselect_none() failed"); } if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); MPI_Barrier(MPI_COMM_WORLD); if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name); plist_id = H5Pcreate(H5P_DATASET_XFER); VRFY(( plist_id != -1 ), "H5Pcreate() failed"); if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name); status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); VRFY(( status >= 0 ), "H5Pset_dxpl_mpio() failed"); if ( verbose ) HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name); status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data); if ( verbose ) HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status); VRFY(( status >= 0 ), "H5Dwrite() failed"); /* * Close/release resources. */ if ( verbose ) HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name); status = H5Dclose(dset_id); VRFY(( status >= 0 ), "H5Dclose() failed"); status = H5Sclose(filespace); VRFY(( status >= 0 ), "H5Dclose() failed"); status = H5Sclose(memspace); VRFY(( status >= 0 ), "H5Sclose() failed"); status = H5Pclose(plist_id); VRFY(( status >= 0 ), "H5Pclose() failed"); status = H5Fclose(file_id); VRFY(( status >= 0 ), "H5Fclose() failed"); if ( verbose ) HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); return; } /* io_mode_confusion() */