Exemplo n.º 1
0
int get_size(void)
{
    int mpi_rank;
    int mpi_size;
    int size = SIZE;

    MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
    MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);

    if ( mpi_size > size ) {

        if ( (mpi_size % 2) == 0 ) {

            size = mpi_size;

        } else {

            size = mpi_size + 1;
        }
    }

    VRFY((mpi_size <= size), "mpi_size <= size");
    VRFY(((size % 2) == 0), "size isn't even");

    return(size);

} /* get_size() */
Exemplo n.º 2
0
static MPI_Offset
get_filesize(const char *filename)
{
    int		mpierr;
    MPI_File	fd;
    MPI_Offset	filesize;
#ifndef H5_HAVE_MPI_GET_SIZE
    struct stat stat_buf;
#endif

#ifdef H5_HAVE_MPI_GET_SIZE
    mpierr = MPI_File_open(MPI_COMM_SELF, (char*)filename, MPI_MODE_RDONLY,
	MPI_INFO_NULL, &fd);
    VRFY((mpierr == MPI_SUCCESS), "");

    mpierr = MPI_File_get_size(fd, &filesize);
    VRFY((mpierr == MPI_SUCCESS), "");

    mpierr = MPI_File_close(&fd);
    VRFY((mpierr == MPI_SUCCESS), "");
#else
    /* Some systems (only SGI Altix Propack 4 so far) doesn't return correct
     * file size for MPI_File_get_size.  Use stat instead.
     */
    if((mpierr=stat(filename, &stat_buf))<0)
    VRFY((mpierr == MPI_SUCCESS), "");

    /* Hopefully this casting is safe */
    filesize = (MPI_Offset)(stat_buf.st_size);
#endif

    return(filesize);
}
Exemplo n.º 3
0
static int
test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
{
    MPI_Request req[2];
    MPI_Status status;
    hid_t pl;                   /* Decoded property list */
    size_t buf_size = 0;
    void *sbuf = NULL;
    herr_t ret;         	/* Generic return value */

    if(mpi_rank == 0) {
        int send_size = 0;

        /* first call to encode returns only the size of the buffer needed */
        ret = H5Pencode(orig_pl, NULL, &buf_size);
        VRFY((ret >= 0), "H5Pencode succeeded");

        sbuf = (uint8_t *)HDmalloc(buf_size);

        ret = H5Pencode(orig_pl, sbuf, &buf_size);
        VRFY((ret >= 0), "H5Pencode succeeded");

        /* this is a temp fix to send this size_t */
        send_size = (int)buf_size;

        MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]);
        MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]);
    } /* end if */

    if(mpi_rank == recv_proc) {
        int recv_size;
        void *rbuf;

        MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
        buf_size = recv_size;
        rbuf = (uint8_t *)HDmalloc(buf_size);
        MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);

        pl = H5Pdecode(rbuf);
        VRFY((pl >= 0), "H5Pdecode succeeded");

        VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded");

        ret = H5Pclose(pl);
        VRFY((ret >= 0), "H5Pclose succeeded");

        if(NULL != rbuf)
            HDfree(rbuf);
    } /* end if */

    if(0 == mpi_rank)
        MPI_Waitall(2, req, MPI_STATUSES_IGNORE);

    if(NULL != sbuf)
        HDfree(sbuf);

    MPI_Barrier(MPI_COMM_WORLD);
    return(0);
}
Exemplo n.º 4
0
static herr_t
dset_read(int local_dim, file_descr *fd, parameters *parms, void *buffer,
    const char *buffer2)
{
    int cur_dim = order[local_dim]-1;
    hsize_t i;
    int j;
    herr_t hrc;
    int         ret_code = SUCCESS;

    /* iterate on the current dimension */
    for (i=0; i < parms->dset_size[cur_dim]; i += parms->buf_size[cur_dim]){

        h5offset[cur_dim] = (hssize_t)i;
        offset[cur_dim] = (HDoff_t)i;

        /* if traverse in order array is incomplete, recurse */
        if (local_dim > 0){

            ret_code = dset_read(local_dim-1, fd, parms, buffer, buffer2);

        /* otherwise, write buffer into dataset */
        }else{

            switch (parms->io_type) {

            case POSIXIO:
                for (j=0; j<parms->rank; j++) {
                   buf_offset[j] = 0;
                }
                buf_p = (unsigned char*)buffer;
                posix_buffer_read(0, fd, parms, buffer);
                break;

            case HDF5:
                hrc = H5Soffset_simple(h5dset_space_id, h5offset);
                VRFY((hrc >= 0), "H5Soffset_simple");
                /* Read the buffer out */
                hrc = H5Dread(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
                    h5dset_space_id, h5dxpl, buffer);
                VRFY((hrc >= 0), "H5Dread");
                break;
				
            default:
                /* unknown request */
                HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)parms->io_type);
                HDassert(0 && "Unknown IO type");
                break;
            } /* switch (parms->io_type) */
        }
    }
done:
    return ret_code;
}
Exemplo n.º 5
0
static herr_t
posix_buffer_write(int local_dim, file_descr *fd, parameters *parms, void *buffer)
{
    int         ret_code = SUCCESS;

    /* if dimension is not contiguous, call recursively */
    if (local_dim < parms->rank-1 && local_dim != cont_dim) {
        size_t u;

        for(u = 0; u < parms->buf_size[local_dim]; u ++) {
            buf_offset[local_dim] = u;
            posix_buffer_write(local_dim+1, fd, parms, buffer);

            /* if next dimension is cont_dim, it will fill out the buffer
               traversing the entire dimension local_dim without the need
               of performing iteration */
            if (local_dim+1==cont_dim)
                break;
        }
    /* otherwise, perform contiguous POSIX access */
    } else {
        HDoff_t d_offset;
        HDoff_t linear_dset_offset = 0;
        int i, j, rc;

        buf_offset[local_dim] = 0;

        /* determine offset in the buffer */
        for(i = 0; i < parms->rank; i++) {
            d_offset = 1;

            for(j = i + 1; j < parms->rank; j++)
                d_offset *= (HDoff_t)parms->dset_size[j];

            linear_dset_offset += (offset[i] + (HDoff_t)buf_offset[i]) * d_offset;
        }

        /* only care if seek returns error */
        rc = POSIXSEEK(fd->posixfd, linear_dset_offset) < 0 ? -1 : 0;
        VRFY((rc==0), "POSIXSEEK");
        /* check if all bytes are written */
        rc = ((ssize_t)cont_size ==
             POSIXWRITE(fd->posixfd, buf_p, cont_size));
        VRFY((rc != 0), "POSIXWRITE");

        /* Advance location in buffer */
        buf_p += cont_size;

    }

done:
    return ret_code;
}
Exemplo n.º 6
0
/*
 * This function opens all the datasets in a certain, checks the data using
 * dataset_vrfy function.
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/11/04
 */
int read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
    int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
    char dname[32];
    DATATYPE *outdata = NULL, *indata = NULL;
    hid_t did;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    indata = (DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
    VRFY((indata != NULL), "HDmalloc succeeded for indata");

    outdata = (DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
    VRFY((outdata != NULL), "HDmalloc succeeded for outdata");

    for(n=0; n<NDATASET; n++) {
        sprintf(dname, "dataset%d", n);
        did = H5Dopen(gid, dname);
        VRFY((did>0), dname);

        H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
                indata);

        /* this is the original value */
        for(i=0; i<size; i++)
	    for(j=0; j<size; j++) {
	         *outdata = n*1000 + mpi_rank;
                 outdata++;
	    }
        outdata -= size * size;

        /* compare the original value(outdata) to the value in file(indata).*/
        vrfy_errors = check_value(indata, outdata, size);

        /* check attribute.*/
        if( (attr_errors = read_attribute(did, is_dset, n))>0 )
            vrfy_errors += attr_errors;

        H5Dclose(did);
    }

    HDfree(indata);
    HDfree(outdata);

    return vrfy_errors;
}
Exemplo n.º 7
0
/*
 * Creates subgroups of depth GROUP_DEPTH recursively.  Also writes datasets
 * in parallel in each group.
 */
void create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid,
                            int counter)
{
   hid_t child_gid;
   int   mpi_rank;
   char  gname[64];

   MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

#ifdef BARRIER_CHECKS
   if(! ((counter+1) % 10)) {
        printf("created %dth child groups\n", counter+1);
        MPI_Barrier(MPI_COMM_WORLD);
   }
#endif /* BARRIER_CHECKS */

   sprintf(gname, "%dth_child_group", counter+1);
   child_gid = H5Gcreate(gid, gname, 0);
   VRFY((child_gid > 0), gname);

   /* write datasets in parallel. */
   write_dataset(memspace, filespace, gid);

   if( counter < GROUP_DEPTH )
       create_group_recursive(memspace, filespace, child_gid, counter+1);

   H5Gclose(child_gid);
}
Exemplo n.º 8
0
static herr_t
posix_buffer_read(int local_dim, file_descr *fd, parameters *parms, void *buffer)
{
    int         ret_code = SUCCESS;

    /* if local dimension is not contiguous, recurse */
    if (local_dim < parms->rank-1 && local_dim != cont_dim) {
        size_t u;

        for(u = 0; u < parms->buf_size[local_dim]; u++) {
            buf_offset[local_dim] = u;
            ret_code = posix_buffer_read(local_dim+1, fd, parms, buffer);
            if (local_dim+1==cont_dim)
                break;
        }
    /* otherwise, perform contiguous POSIX access */
    } else {
        HDoff_t d_offset;
        HDoff_t linear_dset_offset = 0;
        int i, j, rc;

        buf_offset[local_dim] = 0;
        /* determine offset in buffer */
        for (i=0; i<parms->rank; i++){
            d_offset=1;

            for (j=i+1; j<parms->rank; j++)
                d_offset *= (HDoff_t)parms->dset_size[j];

            linear_dset_offset += (offset[i] + (HDoff_t)buf_offset[i]) * d_offset;
        }

        /* only care if seek returns error */
        rc = POSIXSEEK(fd->posixfd, linear_dset_offset) < 0 ? -1 : 0;
        VRFY((rc==0), "POSIXSEEK");
        /* check if all bytes are read */
        rc = ((ssize_t)cont_size ==
             POSIXREAD(fd->posixfd, buf_p, cont_size));
        VRFY((rc != 0), "POSIXREAD");

        /* Advance location in buffer */
        buf_p += cont_size;

    }
done:
    return ret_code;
}
Exemplo n.º 9
0
/* Open and read datasets and compare data
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *		Also added code to verify the results of dynamic memory
 *		allocations, and to free dynamically allocated memeory
 *		when we are done with it.
 *
 *                                              JRM - 8/16/04
 */
void group_dataset_read(hid_t fid, int mpi_rank, int m)
{
    int      ret, i, j, size;
    char     gname[64], dname[32];
    hid_t    gid, did;
    DATATYPE *outdata = NULL;
    DATATYPE *indata = NULL;

    size = get_size();

    indata = (DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
    VRFY((indata != NULL), "HDmalloc succeeded for indata");

    outdata = (DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
    VRFY((outdata != NULL), "HDmalloc succeeded for outdata");

    /* open every group under root group. */
    sprintf(gname, "group%d", m);
    gid = H5Gopen(fid, gname);
    VRFY((gid > 0), gname);

    /* check the data. */
    sprintf(dname, "dataset%d", m);
    did = H5Dopen(gid, dname);
    VRFY((did>0), dname);

    H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);

    /* this is the original value */
    for(i=0; i<size; i++)
       for(j=0; j<size; j++) {
           outdata[(i * size) + j] = (i+j)*1000 + mpi_rank;
       }

    /* compare the original value(outdata) to the value in file(indata).*/
    ret = check_value(indata, outdata, size);
    VRFY((ret==0), "check the data");

    H5Dclose(did);
    H5Gclose(gid);

    HDfree(indata);
    HDfree(outdata);
}
Exemplo n.º 10
0
/*
 * Create the appropriate File access property list
 */
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type,
                     hbool_t use_gpfs)
{
    hid_t ret_pl = -1;
    herr_t ret;                 /* generic return value */
    int mpi_rank;		/* mpi variables */

    /* need the rank for error checking macros */
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    ret_pl = H5Pcreate (H5P_FILE_ACCESS);
    VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");

    if (l_facc_type == FACC_DEFAULT)
        return (ret_pl);

    if (l_facc_type == FACC_MPIO) {
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_mpio(ret_pl, comm, info);
        VRFY((ret >= 0), "");
        return(ret_pl);
    }

    if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
        hid_t mpio_pl;

        mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
        VRFY((mpio_pl >= 0), "");
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
        VRFY((ret >= 0), "");

        /* setup file access template */
        ret_pl = H5Pcreate (H5P_FILE_ACCESS);
        VRFY((ret_pl >= 0), "");
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
        VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
        H5Pclose(mpio_pl);
        return(ret_pl);
    }

    if (l_facc_type == FACC_MPIPOSIX) {
        /* set Parallel access with communicator */
        ret = H5Pset_fapl_mpiposix(ret_pl, comm, use_gpfs);
        VRFY((ret >= 0), "H5Pset_fapl_mpiposix succeeded");
        return(ret_pl);
    }

    /* unknown file access types */
    return (ret_pl);
}
Exemplo n.º 11
0
/*
 * In a group, creates NDATASETS datasets.  Each process writes a hyperslab
 * of a data array to the file.
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/16/04
 */
void write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
    int i, j, n, size;
    int mpi_rank, mpi_size;
    char dname[32];
    DATATYPE * outme = NULL;
    hid_t did;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    outme = HDmalloc((size_t)(size * size * sizeof(double)));
    VRFY((outme != NULL), "HDmalloc succeeded for outme");

    for(n=0; n < NDATASET; n++) {
         sprintf(dname, "dataset%d", n);
         did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace,
                         H5P_DEFAULT);
         VRFY((did > 0), dname);

         for(i=0; i < size; i++)
             for(j=0; j < size; j++)
     	         outme[(i * size) + j] = n*1000 + mpi_rank;

         H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
                  outme);

         /* create attribute for these datasets.*/
         write_attribute(did, is_dset, n);

         H5Dclose(did);
    }
    HDfree(outme);
}
Exemplo n.º 12
0
/*
 * This recursive function opens all the groups in vertical direction and
 * checks the data.
 */
void recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid,
                          int counter)
{
    hid_t child_gid;
    int   mpi_rank, err_num=0;
    char  gname[64];

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
#ifdef BARRIER_CHECKS
    if((counter+1) % 10)
        MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */

    if( (err_num = read_dataset(memspace, filespace, gid)) )
        nerrors += err_num;

    if( counter < GROUP_DEPTH ) {
        sprintf(gname, "%dth_child_group", counter+1);
        child_gid = H5Gopen(gid, gname);
        VRFY((child_gid>0), gname);
        recursive_read_group(memspace, filespace, child_gid, counter+1);
        H5Gclose(child_gid);
    }
}
Exemplo n.º 13
0
/* Write multiple groups with a chunked dataset in each group collectively.
 * These groups and datasets are for testing independent read later.
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/16/04
 */
void collective_group_write(void)
{
    int mpi_rank, mpi_size, size;
    int i, j, m;
    hbool_t use_gpfs = FALSE;
    char gname[64], dname[32];
    hid_t fid, gid, did, plist, dcpl, memspace, filespace;
    DATATYPE * outme = NULL;
    hsize_t chunk_origin[DIM];
    hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
    hsize_t chunk_size[2];  /* Chunk dimensions - computed shortly */
    herr_t ret1, ret2;
    const H5Ptest_param_t *pt;
    char	*filename;
    int		ngroups;

    pt = GetTestParameters();
    filename = pt->name;
    ngroups = pt->count;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    chunk_size[0] = (hsize_t)(size / 2);
    chunk_size[1] = (hsize_t)(size / 2);

    outme = HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
    VRFY((outme != NULL), "HDmalloc succeeded for outme");

    plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
    fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
    H5Pclose(plist);

    /* decide the hyperslab according to process number. */
    get_slab(chunk_origin, chunk_dims, count, file_dims, size);

    /* select hyperslab in memory and file spaces.  These two operations are
     * identical since the datasets are the same. */
    memspace  = H5Screate_simple(DIM, file_dims, NULL);
    ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
                               chunk_dims, count, chunk_dims);
    filespace = H5Screate_simple(DIM, file_dims,  NULL);
    ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
                               chunk_dims, count, chunk_dims);
    VRFY((memspace>=0), "memspace");
    VRFY((filespace>=0), "filespace");
    VRFY((ret1>=0), "mgroup memspace selection");
    VRFY((ret2>=0), "mgroup filespace selection");

    dcpl = H5Pcreate(H5P_DATASET_CREATE);
    ret1 = H5Pset_chunk (dcpl, 2, chunk_size);
    VRFY((dcpl>=0), "dataset creation property");
    VRFY((ret1>=0), "set chunk for dataset creation property");

    /* creates ngroups groups under the root group, writes chunked
     * datasets in parallel. */
    for(m = 0; m < ngroups; m++) {
        sprintf(gname, "group%d", m);
        gid = H5Gcreate(fid, gname, 0);
        VRFY((gid > 0), gname);

        sprintf(dname, "dataset%d", m);
        did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace, dcpl);
        VRFY((did > 0), dname);

        for(i=0; i < size; i++)
            for(j=0; j < size; j++)
                outme[(i * size) + j] = (i+j)*1000 + mpi_rank;

        H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
                 outme);

        H5Dclose(did);
        H5Gclose(gid);

#ifdef BARRIER_CHECKS
        if(! ((m+1) % 10)) {
            printf("created %d groups\n", m+1);
            MPI_Barrier(MPI_COMM_WORLD);
	}
#endif /* BARRIER_CHECKS */
    }

    H5Pclose(dcpl);
    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Fclose(fid);

    HDfree(outme);
}
Exemplo n.º 14
0
int
main (int argc, char **argv)
{
    hid_t       file_id, dset_id, grp_id;
    hid_t       fapl, sid, mem_dataspace;
    hsize_t     dims[RANK], i;
    herr_t      ret;
    char	filename[1024];
    int         mpi_size, mpi_rank;
    MPI_Comm    comm  = MPI_COMM_WORLD;
    MPI_Info    info  = MPI_INFO_NULL;
    hsize_t     start[RANK];
    hsize_t     count[RANK];
    hsize_t     stride[RANK];
    hsize_t     block[RANK];
    DATATYPE   *data_array = NULL;	/* data buffer */

    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &mpi_size);
    MPI_Comm_rank(comm, &mpi_rank);  

    if(MAINPROCESS)
	TESTING("proper shutdown of HDF5 library");
 
    /* Set up file access property list with parallel I/O access */
    fapl = H5Pcreate(H5P_FILE_ACCESS);
    VRFY((fapl >= 0), "H5Pcreate succeeded");
    ret = H5Pset_fapl_mpio(fapl, comm, info);
    VRFY((ret >= 0), "");

    h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
    file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
    VRFY((file_id >= 0), "H5Fcreate succeeded");
    grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    VRFY((grp_id >= 0), "H5Gcreate succeeded");

    dims[0] = ROW_FACTOR*mpi_size;
    dims[1] = COL_FACTOR*mpi_size;
    sid = H5Screate_simple (RANK, dims, NULL);
    VRFY((sid >= 0), "H5Screate_simple succeeded");

    dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    VRFY((dset_id >= 0), "H5Dcreate succeeded");

    /* allocate memory for data buffer */
    data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE));
    VRFY((data_array != NULL), "data_array HDmalloc succeeded");

    /* Each process takes a slabs of rows. */
    block[0] = dims[0]/mpi_size;
    block[1] = dims[1];
    stride[0] = block[0];
    stride[1] = block[1];
    count[0] = 1;
    count[1] = 1;
    start[0] = mpi_rank*block[0];
    start[1] = 0;

    /* put some trivial data in the data_array */
    for(i=0 ; i<dims[0]*dims[1]; i++)
        data_array[i] = mpi_rank + 1;

    ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
    VRFY((ret >= 0), "H5Sset_hyperslab succeeded");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (RANK, block, NULL);
    VRFY((mem_dataspace >= 0), "");

    /* write data independently */
    ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid,
                   H5P_DEFAULT, data_array);
    VRFY((ret >= 0), "H5Dwrite succeeded");

    /* release data buffers */
    if(data_array) 
        HDfree(data_array);

    MPI_Finalize();

    nerrors += GetTestNumErrs();

    if(MAINPROCESS) {
        if(0 == nerrors)
            PASSED()
        else
	    H5_FAILED()
    }

    return (nerrors!=0);
}
Exemplo n.º 15
0
/*
 * This function is to verify the data from multiple group testing.  It opens
 * every dataset in every group and check their correctness.
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/11/04
 */
void multiple_group_read(void)
{
    int      mpi_rank, mpi_size, error_num, size;
    int      m;
    hbool_t  use_gpfs = FALSE;
    char     gname[64];
    hid_t    plist, fid, gid, memspace, filespace;
    hsize_t  chunk_origin[DIM];
    hsize_t  chunk_dims[DIM], file_dims[DIM], count[DIM];
    const H5Ptest_param_t *pt;
    char	*filename;
    int		ngroups;

    pt = GetTestParameters();
    filename = pt->name;
    ngroups = pt->count;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
    fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
    H5Pclose(plist);

    /* decide hyperslab for each process */
    get_slab(chunk_origin, chunk_dims, count, file_dims, size);

    /* select hyperslab for memory and file space */
    memspace  = H5Screate_simple(DIM, file_dims, NULL);
    H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims,
                        count, chunk_dims);
    filespace = H5Screate_simple(DIM, file_dims, NULL);
    H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims,
                        count, chunk_dims);

    /* open every group under root group. */
    for(m=0; m<ngroups; m++) {
        sprintf(gname, "group%d", m);
        gid = H5Gopen(fid, gname);
        VRFY((gid > 0), gname);

        /* check the data. */
        if(m != 0)
            if( (error_num = read_dataset(memspace, filespace, gid))>0)
	        nerrors += error_num;

        /* check attribute.*/
        error_num = 0;
        if( (error_num = read_attribute(gid, is_group, m))>0 )
	    nerrors += error_num;

        H5Gclose(gid);

#ifdef BARRIER_CHECKS
        if(!((m+1)%10))
            MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */
    }

    /* open all the groups in vertical direction. */
    gid = H5Gopen(fid, "group0");
    VRFY((gid>0), "group0");
    recursive_read_group(memspace, filespace, gid, 0);
    H5Gclose(gid);

    H5Sclose(filespace);
    H5Sclose(memspace);
    H5Fclose(fid);

}
Exemplo n.º 16
0
/*
 * Function:        do_read
 * Purpose:         Read the required amount of data to the file.
 * Return:          SUCCESS or FAIL
 * Programmer:      Christian Chilan, April, 2008
 * Modifications:
 */
static herr_t
do_read(results *res, file_descr *fd, parameters *parms, void *buffer)
{
    char        *buffer2 = NULL;       /* Buffer for data verification */
    int         ret_code = SUCCESS;
    char        dname[64];
    int         i;
    size_t      u;
    /* HDF5 variables */
    herr_t      hrc;                    /*HDF5 return code              */
    hsize_t     h5dims[MAX_DIMS];       /*dataset dim sizes             */
    hsize_t     h5block[MAX_DIMS];      /*dataspace selection           */
    hsize_t     h5stride[MAX_DIMS];     /*selection stride              */
    hsize_t     h5start[MAX_DIMS];      /*selection start               */
    int         rank;

    /* Allocate data verification buffer */
    if(NULL == (buffer2 = (char *)malloc(linear_buf_size))) {
        HDfprintf(stderr, "malloc for data verification buffer size (%Zu) failed\n", linear_buf_size);
        GOTOERROR(FAIL);
    } /* end if */

    /* Prepare buffer for verifying data */
    for(u = 0; u < linear_buf_size; u++)
        buffer2[u] = (char)(u % 128);

    rank = parms->rank;
    for(i = 0; i < rank; i++)
        h5offset[i] = offset[i] = 0;

    /* I/O Access specific setup */
    switch (parms->io_type) {
    case POSIXIO:
            cont_dim = rank;

            for (i=rank-1; i>=0; i--) {
                if (parms->buf_size[i]==parms->dset_size[i])
                    cont_dim = i;
                else
                    break;
            }
            cont_size = (!cont_dim)? 1 : parms->buf_size[cont_dim-1];
            for (i=cont_dim; i<rank; i++)
                cont_size *= parms->buf_size[i];

        break;

    case HDF5: /* HDF5 setup */
        for (i=0; i < rank; i++){
            h5dims[i] = parms->dset_size[i];
            h5start[i] = 0;
            h5stride[i] = 1;
            h5block[i] = 1;
            h5count[i] = parms->buf_size[i];
        }

        h5dset_space_id = H5Screate_simple(rank, h5dims, NULL);
        VRFY((h5dset_space_id >= 0), "H5Screate_simple");

        hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
                    h5start, h5stride, h5count, h5block);
        VRFY((hrc >= 0), "H5Sselect_hyperslab");

        /* Create the memory dataspace that corresponds to the xfer buffer */
        h5mem_space_id = H5Screate_simple(rank, h5count, NULL);
        VRFY((h5mem_space_id >= 0), "H5Screate_simple");

        /* Create the dataset transfer property list */
        h5dxpl = H5Pcreate(H5P_DATASET_XFER);
        if (h5dxpl < 0) {
            fprintf(stderr, "HDF5 Property List Create failed\n");
            GOTOERROR(FAIL);
        }
        break;

    default:
        /* unknown request */
        HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)parms->io_type);
        GOTOERROR(FAIL);
        break;
    } /* end switch */


    /* create dataset */
    switch (parms->io_type) {
        case POSIXIO:
        break;

        case HDF5:
        sprintf(dname, "Dataset_%ld", (long)parms->num_bytes);
        h5ds_id = H5Dopen2(fd->h5fd, dname, H5P_DEFAULT);
        if (h5ds_id < 0) {
            HDfprintf(stderr, "HDF5 Dataset open failed\n");
            GOTOERROR(FAIL);
        }
        break;
		
        default:
        /* unknown request */
        HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)parms->io_type);
        GOTOERROR(FAIL);
        break;
    } /* end switch */

    /* Start "raw data" read timer */
    set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, TSTART);
    hrc = dset_read(rank-1, fd, parms, buffer, buffer2);

    if (hrc < 0) {
        fprintf(stderr, "Error in dataset read\n");
        GOTOERROR(FAIL);
    }

    /* Stop "raw data" read timer */
    set_time(res->timers, HDF5_RAW_READ_FIXED_DIMS, TSTOP);

    /* Calculate read time */

    /* Close dataset. Only HDF5 needs to do an explicit close. */
    if (parms->io_type == HDF5) {
        hrc = H5Dclose(h5ds_id);

        if (hrc < 0) {
        fprintf(stderr, "HDF5 Dataset Close failed\n");
        GOTOERROR(FAIL);
        }

        h5ds_id = -1;
    } /* end if */

done:

    /* release HDF5 objects */
    if (h5dset_space_id != -1) {
    hrc = H5Sclose(h5dset_space_id);
    if (hrc < 0){
        fprintf(stderr, "HDF5 Dataset Space Close failed\n");
        ret_code = FAIL;
    } else {
        h5dset_space_id = -1;
    }
    }

    if (h5mem_space_id != -1) {
    hrc = H5Sclose(h5mem_space_id);
    if (hrc < 0) {
        fprintf(stderr, "HDF5 Memory Space Close failed\n");
        ret_code = FAIL;
    } else {
        h5mem_space_id = -1;
    }
    }

    if (h5dxpl != -1) {
    hrc = H5Pclose(h5dxpl);
    if (hrc < 0) {
        fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
        ret_code = FAIL;
    } else {
        h5dxpl = -1;
    }
    }

    /* release generic resources */
    if(buffer2)
        free(buffer2);

    return ret_code;
}
Exemplo n.º 17
0
/*
 * Function:        do_write
 * Purpose:         Write the required amount of data to the file.
 * Return:          SUCCESS or FAIL
 * Programmer:      Christian Chilan, April, 2008
 * Modifications:
 */
static herr_t
do_write(results *res, file_descr *fd, parameters *parms, void *buffer)
{
    int         ret_code = SUCCESS;
    char        dname[64];
    int        i;
    size_t     u;
    /* HDF5 variables */
    herr_t      hrc;                    /*HDF5 return code              */
    hsize_t     h5dims[MAX_DIMS];       /*dataset dim sizes             */
    hsize_t     h5chunk[MAX_DIMS];      /*dataset dim sizes             */
    hsize_t     h5block[MAX_DIMS];      /*dataspace selection           */
    hsize_t     h5stride[MAX_DIMS];     /*selection stride              */
    hsize_t     h5start[MAX_DIMS];      /*selection start               */
    hsize_t     h5maxdims[MAX_DIMS];
    int         rank;                   /*rank of dataset */

    /* Prepare buffer for verifying data */
/*    if (parms->verify)
            memset(buffer,1,linear_buf_size); */

    buf_p=(unsigned char *)buffer;

    for(u = 0; u < linear_buf_size; u++)
        buf_p[u] = u % 128;

    rank = parms->rank;

    for(i = 0; i < rank; i++)
        h5offset[i] = offset[i] = 0;

    /* I/O Access specific setup */
    switch (parms->io_type) {
    case POSIXIO:

            /* determine lowest dimension for contiguous POSIX access */
            cont_dim = rank;

            for (i=rank-1; i>=0; i--) {
                if (parms->buf_size[i]==parms->dset_size[i])
                    cont_dim = i;
                else
                    break;
            }

            /* determine size of the contiguous POSIX access */
            cont_size = (!cont_dim)? 1 : parms->buf_size[cont_dim-1];
            for (i=cont_dim; i<rank; i++)
                cont_size *= parms->buf_size[i];

        break;

    case HDF5: /* HDF5 setup */

        for (i=0; i < rank; i++){
            h5dims[i] = parms->dset_size[i];
            h5start[i] = 0;
            h5stride[i] = 1;
            h5block[i] = 1;
            h5count[i] = parms->buf_size[i];
            h5chunk[i] = parms->chk_size[i];
            h5maxdims[i] = H5S_UNLIMITED;

        }

        if (parms->h5_use_chunks && parms->h5_extendable) {
            h5dset_space_id = H5Screate_simple(rank, h5count, h5maxdims);
            VRFY((h5dset_space_id >= 0), "H5Screate_simple");
        }
        else {
            h5dset_space_id = H5Screate_simple(rank, h5dims, NULL);
            VRFY((h5dset_space_id >= 0), "H5Screate_simple");
        }

        hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
                    h5start, h5stride, h5count, h5block);
        VRFY((hrc >= 0), "H5Sselect_hyperslab");

        /* Create the memory dataspace that corresponds to the xfer buffer */
        h5mem_space_id = H5Screate_simple(rank, h5count, NULL);
        VRFY((h5mem_space_id >= 0), "H5Screate_simple");

        /* Create the dataset transfer property list */
        h5dxpl = H5Pcreate(H5P_DATASET_XFER);
        if (h5dxpl < 0) {
            fprintf(stderr, "HDF5 Property List Create failed\n");
            GOTOERROR(FAIL);
        }

        break;

    default:
        HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)parms->io_type);
        GOTOERROR(FAIL);
        break;
    } /* end switch */


    /* create dataset */
    switch (parms->io_type) {
        case POSIXIO:
        break;

        case HDF5:
            h5dcpl = H5Pcreate(H5P_DATASET_CREATE);

        if (h5dcpl < 0) {
            fprintf(stderr, "HDF5 Property List Create failed\n");
            GOTOERROR(FAIL);
        }

        if(parms->h5_use_chunks) {
        /* Set the chunk size to be the same as the buffer size */
            hrc = H5Pset_chunk(h5dcpl, rank, h5chunk);
            if (hrc < 0) {
                fprintf(stderr, "HDF5 Property List Set failed\n");
                GOTOERROR(FAIL);
            } /* end if */
        } /* end if */

        sprintf(dname, "Dataset_%ld", (unsigned long)parms->num_bytes);
        h5ds_id = H5Dcreate2(fd->h5fd, dname, ELMT_H5_TYPE,
            h5dset_space_id, H5P_DEFAULT, h5dcpl, H5P_DEFAULT);

        if (h5ds_id < 0) {
            HDfprintf(stderr, "HDF5 Dataset Create failed\n");
            GOTOERROR(FAIL);
        }

        hrc = H5Pclose(h5dcpl);
        /* verifying the close of the dcpl */
        if (hrc < 0) {
            HDfprintf(stderr, "HDF5 Property List Close failed\n");
            GOTOERROR(FAIL);
        }
        break;

        default:
        /* unknown request */
        HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)parms->io_type);
        GOTOERROR(FAIL);
        break;
    }

    /* Start "raw data" write timer */
    set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, TSTART);

    /* Perform write */
    hrc = dset_write(rank-1, fd, parms, buffer);

    if (hrc < 0) {
        fprintf(stderr, "Error in dataset write\n");
        GOTOERROR(FAIL);
    }


    /* Stop "raw data" write timer */
    set_time(res->timers, HDF5_RAW_WRITE_FIXED_DIMS, TSTOP);

    /* Calculate write time */

    /* Close dataset. Only HDF5 needs to do an explicit close. */
    if (parms->io_type == HDF5) {
        hrc = H5Dclose(h5ds_id);

        if (hrc < 0) {
            fprintf(stderr, "HDF5 Dataset Close failed\n");
            GOTOERROR(FAIL);
        }

        h5ds_id = -1;
    } /* end if */

done:

    /* release HDF5 objects */
    if (h5dset_space_id != -1) {
        hrc = H5Sclose(h5dset_space_id);
        if (hrc < 0){
            fprintf(stderr, "HDF5 Dataset Space Close failed\n");
            ret_code = FAIL;
        } else {
            h5dset_space_id = -1;
        }
    }

    if (h5mem_space_id != -1) {
        hrc = H5Sclose(h5mem_space_id);
        if (hrc < 0) {
            fprintf(stderr, "HDF5 Memory Space Close failed\n");
            ret_code = FAIL;
        } else {
            h5mem_space_id = -1;
        }
    }

    if (h5dxpl != -1) {
        hrc = H5Pclose(h5dxpl);
        if (hrc < 0) {
            fprintf(stderr, "HDF5 Dataset Transfer Property List Close failed\n");
            ret_code = FAIL;
        } else {
            h5dxpl = -1;
        }
    }

    return ret_code;
}
Exemplo n.º 18
0
/*
 * Function:        dset_write
 * Purpose:         Write buffer into the dataset.
 * Return:          SUCCESS or FAIL
 * Programmer:      Christian Chilan, April, 2008
 * Modifications:
 */
static herr_t
dset_write(int local_dim, file_descr *fd, parameters *parms, void *buffer)
{
    int cur_dim = order[local_dim]-1;
    int         ret_code = SUCCESS;
    int         k;
    hsize_t  dims[MAX_DIMS], maxdims[MAX_DIMS];
    hsize_t i;
    int j;
    herr_t hrc;

    /* iterates according to the dimensions in order array */
    for (i=0; i < parms->dset_size[cur_dim]; i += parms->buf_size[cur_dim]){

        h5offset[cur_dim] = (hssize_t)i;
        offset[cur_dim] = (HDoff_t)i;

        if (local_dim > 0){

            dset_write(local_dim-1, fd, parms, buffer);

        }else{

            switch (parms->io_type) {

            case POSIXIO:
                /* initialize POSIX offset in the buffer */
                for(j = 0; j < parms->rank; j++)
                    buf_offset[j] = 0;
                buf_p = (unsigned char *)buffer;
                /* write POSIX buffer */
                posix_buffer_write(0, fd, parms, buffer);
                break;

            case HDF5:
                /* if dimensions are extendable, extend them as needed during access */
                if (parms->h5_use_chunks && parms->h5_extendable) {

                    hrc=H5Sget_simple_extent_dims(h5dset_space_id,dims,maxdims);
                    VRFY((hrc >= 0), "H5Sget_simple_extent_dims");

                    for (k=0; k < parms->rank; k++){

                        HDassert(h5offset[k] >= 0);
                        if (dims[k] <= (hsize_t)h5offset[k]) {
                            dims[k] = dims[k]+h5count[k];
                            hrc=H5Sset_extent_simple(h5dset_space_id,parms->rank,dims,maxdims);
                            VRFY((hrc >= 0), "H5Sset_extent_simple");
                            hrc=H5Dset_extent(h5ds_id,dims);
                            VRFY((hrc >= 0), "H5Dextend");
                        }
                    }
                }
                /* applies offset */
                hrc = H5Soffset_simple(h5dset_space_id, h5offset);
                VRFY((hrc >= 0), "H5Soffset_simple");

                /* Write the buffer out */
                hrc=H5Sget_simple_extent_dims(h5dset_space_id,dims,maxdims);
                hrc = H5Dwrite(h5ds_id, ELMT_H5_TYPE, h5mem_space_id,
                    h5dset_space_id, h5dxpl, buffer);
                VRFY((hrc >= 0), "H5Dwrite");

                break;
				
            default:
                /* unknown request */
                HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)parms->io_type);
                HDassert(0 && "Unknown IO type");
                break;
            } /* switch (parms->io_type) */
        }
    }
done:
    return ret_code;
}
Exemplo n.º 19
0
/*
 * A test for issue HDFFV-10501. A parallel hang was reported which occurred
 * in linked-chunk I/O when collective metadata reads are enabled and some ranks
 * do not have any selection in a dataset's dataspace, while others do. The ranks
 * which have no selection during the read/write operation called H5D__chunk_addrmap()
 * to retrieve the lowest chunk address, since we require that the read/write be done
 * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
 * this caused the non-participating ranks to issue a collective MPI_Bcast() call
 * which the other ranks did not issue, thus causing a hang.
 * 
 * However, since these ranks are not actually reading/writing anything, this call
 * can simply be removed and the address used for the read/write can be set to an
 * arbitrary number (0 was chosen).
 */
void test_partial_no_selection_coll_md_read(void)
{
    const char *filename;
    hsize_t    *dataset_dims = NULL;
    hsize_t     max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
    hsize_t     sel_dims[1];
    hsize_t     chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = { PARTIAL_NO_SELECTION_Y_DIM_SCALE, PARTIAL_NO_SELECTION_X_DIM_SCALE };
    hsize_t     start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
    hsize_t     stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
    hsize_t     count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
    hsize_t     block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
    hid_t       file_id = -1;
    hid_t       fapl_id = -1;
    hid_t       dset_id = -1;
    hid_t       dcpl_id = -1;
    hid_t       dxpl_id = -1;
    hid_t       fspace_id = -1;
    hid_t       mspace_id = -1;
    int         mpi_rank, mpi_size;
    void       *data = NULL;
    void       *read_buf = NULL;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    filename = GetTestParameters();

    fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
    VRFY((fapl_id >= 0), "create_faccess_plist succeeded");

    /*
     * Even though the testphdf5 framework currently sets collective metadata reads
     * on the FAPL, we call it here just to be sure this is futureproof, since
     * demonstrating this issue relies upon it.
     */
    VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");

    file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
    VRFY((file_id >= 0), "H5Fcreate succeeded");

    dataset_dims = malloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
    VRFY((dataset_dims != NULL), "malloc succeeded");

    dataset_dims[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_size;
    dataset_dims[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE * mpi_size;
    max_dataset_dims[0] = H5S_UNLIMITED;
    max_dataset_dims[1] = H5S_UNLIMITED;

    fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims);
    VRFY((fspace_id >= 0), "H5Screate_simple succeeded");

    /*
     * Set up chunking on the dataset in order to reproduce the problem.
     */
    dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
    VRFY((dcpl_id >= 0), "H5Pcreate succeeded");

    VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");

    dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
    VRFY((dset_id >= 0), "H5Dcreate2 succeeded");

    /*
     * Setup hyperslab selection to split the dataset among the ranks.
     *
     * The ranks will write rows across the dataset.
     */
    start[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_rank;
    start[1] = 0;
    stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
    stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
    count[0] = 1;
    count[1] = mpi_size;
    block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
    block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;

    VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");

    sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);

    mspace_id = H5Screate_simple(1, sel_dims, NULL);
    VRFY((mspace_id >= 0), "H5Screate_simple succeeded");

    data = calloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
    VRFY((data != NULL), "calloc succeeded");

    dxpl_id = H5Pcreate(H5P_DATASET_XFER);
    VRFY((dxpl_id >= 0), "H5Pcreate succeeded");

    /*
     * Enable collective access for the data transfer.
     */
    VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");

    VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");

    VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");

    /*
     * Ensure that linked-chunk I/O is performed since this is
     * the particular code path where the issue lies and we don't
     * want the library doing multi-chunk I/O behind our backs.
     */
    VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");

    read_buf = malloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
    VRFY((read_buf != NULL), "malloc succeeded");

    /*
     * Make sure to call H5Sselect_none() on the non-participating process.
     */
    if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
        VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded");
        VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded");
    }

    /*
     * Finally have each rank read their section of data back from the dataset.
     */
    VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");

    /*
     * Check data integrity just to be sure.
     */
    if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
        VRFY((!memcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded");
    }

    if (dataset_dims) {
        free(dataset_dims);
        dataset_dims = NULL;
    }

    if (data) {
        free(data);
        data = NULL;
    }

    if (read_buf) {
        free(read_buf);
        read_buf = NULL;
    }

    VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
    VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
    VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
    VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
    VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
    VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
    VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
}
Exemplo n.º 20
0
/*
 * Function:        do_sio
 * Purpose:         SIO Engine where IO are executed.
 * Return:          results
 * Programmer:      Christian Chilan, April, 2008
 * Modifications:
 */
void
do_sio(parameters param, results *res)
{
    char       *buffer = NULL; /*data buffer pointer           */
    size_t      buf_size[MAX_DIMS];     /* general buffer size in bytes     */
    file_descr  fd;                     /* file handles */
    iotype      iot;                    /* API type */
    char base_name[256];                /* test file base name */
    /* return codes */
    herr_t      ret_code = 0;   /*return code                           */

    char        fname[FILENAME_MAX];    /* test file name */
    int     i;
    /* HDF5 variables */
    herr_t          hrc;        /*HDF5 return code              */

    /* Sanity check parameters */

    /* IO type */
    iot = param.io_type;

    switch (iot) {
    case POSIXIO:
        fd.posixfd = -1;
        res->timers = io_time_new(SYS_CLOCK);
        break;
    case HDF5:
        fd.h5fd = -1;
        res->timers = io_time_new(SYS_CLOCK);
        break;
    default:
        /* unknown request */
        HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)iot);
        GOTOERROR(FAIL);
    }

    linear_buf_size = 1;

    for (i=0; i<param.rank; i++){
        buf_size[i] = param.buf_size[i];
        order[i] = param.order[i];
        linear_buf_size *= buf_size[i];
        buf_offset[i] = 0;
        offset[i] = 0;

        /* Validate transfer buffer size */
        if (param.buf_size[i]<=0) {
            HDfprintf(stderr,
            "Transfer buffer size[%d] (%zu) must be > 0\n", i,buf_size[i]);
            GOTOERROR(FAIL);
        }

        if ((param.dset_size[i]%param.buf_size[i])!=0) {
            HDfprintf(stderr,
            "Dataset size[%d] (%" H5_PRINTF_LL_WIDTH "d) must be a multiple of the "
            "trasfer buffer size[%d] (%zu)\n",param.rank,
            (long long)param.dset_size[i], param.rank, param.buf_size[i]);
            GOTOERROR(FAIL);
        }

    }

    /* Allocate transfer buffer */
    if ((buffer = (char *)malloc(linear_buf_size)) == NULL){
        HDfprintf(stderr, "malloc for transfer buffer size (%zu) failed\n", linear_buf_size);
        GOTOERROR(FAIL);
    }

    if (sio_debug_level >= 4)

    /* output all of the times for all iterations */
        fprintf(output, "Timer details:\n");

    /*
     * Write performance measurement
     */
    /* Open file for write */

    HDstrcpy(base_name, "#sio_tmp");
    sio_create_filename(iot, base_name, fname, sizeof(fname), &param);

    if (sio_debug_level > 0)
        HDfprintf(output, "data filename=%s\n",
             fname);

    set_time(res->timers, HDF5_GROSS_WRITE_FIXED_DIMS, TSTART);
    hrc = do_fopen(&param, fname, &fd, SIO_CREATE | SIO_WRITE);
    VRFY((hrc == SUCCESS), "do_fopen failed");

    set_time(res->timers, HDF5_FINE_WRITE_FIXED_DIMS, TSTART);
    hrc = do_write(res, &fd, &param, buffer);
    set_time(res->timers, HDF5_FINE_WRITE_FIXED_DIMS, TSTOP);
    VRFY((hrc == SUCCESS), "do_write failed");

    /* Close file for write */
    hrc = do_fclose(iot, &fd);
    set_time(res->timers, HDF5_GROSS_WRITE_FIXED_DIMS, TSTOP);
    VRFY((hrc == SUCCESS), "do_fclose failed");

    if (!param.h5_write_only) {
        /*
         * Read performance measurement
         */

        /* Open file for read */
        set_time(res->timers, HDF5_GROSS_READ_FIXED_DIMS, TSTART);
        hrc = do_fopen(&param, fname, &fd, SIO_READ);
        VRFY((hrc == SUCCESS), "do_fopen failed");

        set_time(res->timers, HDF5_FINE_READ_FIXED_DIMS, TSTART);
        hrc = do_read(res, &fd, &param, buffer);
        set_time(res->timers, HDF5_FINE_READ_FIXED_DIMS, TSTOP);
        VRFY((hrc == SUCCESS), "do_read failed");

        /* Close file for read */
        hrc = do_fclose(iot, &fd);

        set_time(res->timers, HDF5_GROSS_READ_FIXED_DIMS, TSTOP);
        VRFY((hrc == SUCCESS), "do_fclose failed");
    }

    do_cleanupfile(iot, fname);

done:
    /* clean up */
    /* release HDF5 objects */

    /* close any opened files */
    /* no remove(fname) because that should have happened normally. */
    switch (iot) {
        case POSIXIO:
            if (fd.posixfd != -1)
                hrc = do_fclose(iot, &fd);
            break;
        case HDF5:
            if (fd.h5fd != -1)
                hrc = do_fclose(iot, &fd);
			break;
        default:
            /* unknown request */
            HDassert(0 && "Unknown IO type");
            break;
    }

    /* release generic resources */
    if (buffer)
        free(buffer);

    res->ret_code = ret_code;
}
Exemplo n.º 21
0
/*
 * Example of using PHDF5 to create multiple groups.  Under the root group,
 * it creates ngroups groups.  Under the first group just created, it creates
 * recursive subgroups of depth GROUP_DEPTH.  In each created group, it
 * generates NDATASETS datasets.  Each process write a hyperslab of an array
 * into the file.  The structure is like
 *
 *                             root group
 *                                 |
 *            ---------------------------- ... ... ------------------------
 *           |          |         |        ... ...  |                      |
 *       group0*+'   group1*+' group2*+'   ... ...             group ngroups*+'
 *           |
 *      1st_child_group*'
 *           |
 *      2nd_child_group*'
 *           |
 *           :
 *           :
 *           |
 * GROUP_DEPTHth_child_group*'
 *
 *      * means the group has dataset(s).
 *      + means the group has attribute(s).
 *      ' means the datasets in the groups have attribute(s).
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/16/04
 */
void multiple_group_write(void)
{
    int mpi_rank, mpi_size, size;
    int m;
    hbool_t use_gpfs = FALSE;
    char gname[64];
    hid_t fid, gid, plist, memspace, filespace;
    hsize_t chunk_origin[DIM];
    hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
    herr_t ret;
    const H5Ptest_param_t *pt;
    char	*filename;
    int		ngroups;

    pt = GetTestParameters();
    filename = pt->name;
    ngroups = pt->count;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
    fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
    H5Pclose(plist);

    /* decide the hyperslab according to process number. */
    get_slab(chunk_origin, chunk_dims, count, file_dims, size);

    /* select hyperslab in memory and file spaces.  These two operations are
     * identical since the datasets are the same. */
    memspace  = H5Screate_simple(DIM, file_dims, NULL);
    VRFY((memspace>=0), "memspace");
    ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
                               chunk_dims, count, chunk_dims);
    VRFY((ret>=0), "mgroup memspace selection");

    filespace = H5Screate_simple(DIM, file_dims,  NULL);
    VRFY((filespace>=0), "filespace");
    ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
                               chunk_dims, count, chunk_dims);
    VRFY((ret>=0), "mgroup filespace selection");

    /* creates ngroups groups under the root group, writes datasets in
     * parallel. */
    for(m = 0; m < ngroups; m++) {
        sprintf(gname, "group%d", m);
        gid = H5Gcreate(fid, gname, 0);
        VRFY((gid > 0), gname);

        /* create attribute for these groups. */
	write_attribute(gid, is_group, m);

        if(m != 0)
	    write_dataset(memspace, filespace, gid);

        H5Gclose(gid);

#ifdef BARRIER_CHECKS
        if(! ((m+1) % 10)) {
            printf("created %d groups\n", m+1);
            MPI_Barrier(MPI_COMM_WORLD);
	}
#endif /* BARRIER_CHECKS */
    }

    /* recursively creates subgroups under the first group. */
    gid = H5Gopen(fid, "group0");
    create_group_recursive(memspace, filespace, gid, 0);
    ret = H5Gclose(gid);
    VRFY((ret>=0), "H5Gclose");

    ret = H5Sclose(filespace);
    VRFY((ret>=0), "H5Sclose");
    ret = H5Sclose(memspace);
    VRFY((ret>=0), "H5Sclose");
    ret = H5Fclose(fid);
    VRFY((ret>=0), "H5Fclose");
}
Exemplo n.º 22
0
/*
 * This program performs three different types of parallel access. It writes on
 * the entire dataset, it extends the dataset to nchunks*CHUNKSIZE, and it only
 * opens the dataset. At the end, it verifies the size of the dataset to be
 * consistent with argument 'nchunks'.
 */
void
parallel_access_dataset(const char *filename, int nchunks, access_type action, hid_t *file_id, hid_t *dataset)
{
    /* HDF5 gubbins */
    hid_t    memspace, dataspace;     /* HDF5 file identifier */
    hid_t    access_plist;         /* HDF5 ID for file access property list */
    herr_t   hrc;                  /* HDF5 return code */
    hsize_t  size[1];
    hsize_t  dim_size;

    hsize_t     chunk_dims[1] ={CHUNKSIZE};
    hsize_t     count[1];
    hsize_t     stride[1];
    hsize_t     block[1];
    hsize_t     offset[1];            /* Selection offset within dataspace */
    /* Variables used in reading data back */
    char         buffer[CHUNKSIZE];
    int         i;

    /* MPI Gubbins */
    MPI_Offset  filesize,	    /* actual file size */
		est_filesize;	    /* estimated file size */
    int         mpierr;

    /* Initialize MPI */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    /* Set up MPIO file access property lists */
    access_plist  = H5Pcreate(H5P_FILE_ACCESS);
    VRFY((access_plist >= 0), "");

    hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
    VRFY((hrc >= 0), "");

    /* Open the file */
    if (*file_id<0){
        *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
        VRFY((*file_id >= 0), "");
    }

    /* Open dataset*/
    if (*dataset<0){
        *dataset = H5Dopen(*file_id, DATASETNAME);
        VRFY((*dataset >= 0), "");
    }

    memspace = H5Screate_simple(1, chunk_dims, NULL);
    VRFY((memspace >= 0), "");

    dataspace = H5Dget_space(*dataset);            
    VRFY((dataspace >= 0), "");

    size[0] = nchunks*CHUNKSIZE;

    switch (action) {

        /* all chunks are written by all the processes in an interleaved way*/
        case write_all:

	    memset(buffer, mpi_rank+1, CHUNKSIZE);
	    count[0] = 1;
	    stride[0] = 1;
	    block[0] = chunk_dims[0];
            for (i=0; i<(nchunks+mpi_size-1)/mpi_size; i++){ 
		if (i*mpi_size+mpi_rank < nchunks){
		    offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0];

		    hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
		    VRFY((hrc >= 0), "");

		    /* Write the buffer out */
		    hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
		    VRFY((hrc >= 0), "H5Dwrite");
		}

            }

            break;

        /* only extends the dataset */
        case extend_only:
            /* Extend dataset*/
            hrc = H5Dextend(*dataset, size);
            VRFY((hrc >= 0), "");

            break;

        /* only opens the dataset */
        case open_only:

            break;
    }

    /* Close up */
    hrc = H5Dclose(*dataset);
    VRFY((hrc >= 0), "");
    *dataset = -1;

    hrc = H5Sclose (dataspace);
	VRFY((hrc >= 0), "");

	hrc = H5Sclose (memspace);
	VRFY((hrc >= 0), "");

    hrc = H5Fclose(*file_id);
    VRFY((hrc >= 0), "");
    *file_id = -1;

    /* verify file size */
    filesize = get_filesize(filename);
    est_filesize = nchunks*CHUNKSIZE*sizeof(unsigned char);
    VRFY((filesize >= est_filesize), "file size check");

    /* Can close some plists */
    hrc = H5Pclose(access_plist);
    VRFY((hrc >= 0), "");

    /* Make sure all processes are done before exiting this routine.  Otherwise,
     * other tests may start and change the test data file before some processes
     * of this test are still accessing the file.
     */
    MPI_Barrier(MPI_COMM_WORLD);
}
Exemplo n.º 23
0
/*
 * Verify that MPI_Offset exceeding 2**31 can be computed correctly.
 * Print any failure as information only, not as an error so that this
 * won't abort the remaining test or other separated tests.
 *
 * Test if MPIO can write file from under 2GB to over 2GB and then
 * from under 4GB to over 4GB.
 * Each process writes 1MB in round robin fashion.
 * Then reads the file back in by reverse order, that is process 0
 * reads the data of process n-1 and vice versa.
 */
static int
test_mpio_gb_file(char *filename)
{
    int mpi_size, mpi_rank;
    MPI_Info info = MPI_INFO_NULL;
    int mrc;
    MPI_File  fh;
    int i, j, n;
    int vrfyerrs;
    int writerrs;    /* write errors */
    int nerrs;
    int ntimes;      /* how many times */
    char  *buf = NULL;
    char  expected;
    MPI_Offset  size;
    MPI_Offset  mpi_off;
    MPI_Offset  mpi_off_old;
    MPI_Status  mpi_stat;
    h5_stat_t stat_buf;
    int is_signed, sizeof_mpi_offset;

    nerrs = 0;
    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    if (VERBOSE_MED)
        printf("MPI_Offset range test\n");

    /* figure out the signness and sizeof MPI_Offset */
    mpi_off = 0;
    is_signed = ((MPI_Offset)(mpi_off - 1)) < 0;
    sizeof_mpi_offset = (int)(sizeof(MPI_Offset));

    /*
     * Verify the sizeof MPI_Offset and correctness of handling multiple GB
     * sizes.
     */
    if (MAINPROCESS){      /* only process 0 needs to check it*/
  printf("MPI_Offset is %s %d bytes integeral type\n",
      is_signed ? "signed" : "unsigned", (int)sizeof(MPI_Offset));
  if (sizeof_mpi_offset <= 4 && is_signed){
      printf("Skipped 2GB range test "
        "because MPI_Offset cannot support it\n");
  }else {
      /* verify correctness of assigning 2GB sizes */
      mpi_off = 2 * 1024 * (MPI_Offset)MB;
      INFO((mpi_off>0), "2GB OFFSET assignment no overflow");
      INFO((mpi_off-1)==TWO_GB_LESS1, "2GB OFFSET assignment succeed");

      /* verify correctness of increasing from below 2 GB to above 2GB */
      mpi_off = TWO_GB_LESS1;
      for (i=0; i < 3; i++){
    mpi_off_old = mpi_off;
    mpi_off = mpi_off + 1;
    /* no overflow */
    INFO((mpi_off>0), "2GB OFFSET increment no overflow");
    /* correct inc. */
    INFO((mpi_off-1)==mpi_off_old, "2GB OFFSET increment succeed");
      }
  }

  if (sizeof_mpi_offset <= 4){
      printf("Skipped 4GB range test "
        "because MPI_Offset cannot support it\n");
  }else {
      /* verify correctness of assigning 4GB sizes */
      mpi_off = 4 * 1024 * (MPI_Offset)MB;
      INFO((mpi_off>0), "4GB OFFSET assignment no overflow");
      INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");

      /* verify correctness of increasing from below 4 GB to above 4 GB */
      mpi_off = FOUR_GB_LESS1;
      for (i=0; i < 3; i++){
    mpi_off_old = mpi_off;
    mpi_off = mpi_off + 1;
    /* no overflow */
    INFO((mpi_off>0), "4GB OFFSET increment no overflow");
    /* correct inc. */
    INFO((mpi_off-1)==mpi_off_old, "4GB OFFSET increment succeed");
      }
  }
    }

    /*
     * Verify if we can write to a file of multiple GB sizes.
     */
    if (VERBOSE_MED)
  printf("MPIO GB file test %s\n", filename);

    if (sizeof_mpi_offset <= 4){
  printf("Skipped GB file range test "
    "because MPI_Offset cannot support it\n");
    }else{
  buf = malloc(MB);
  VRFY((buf!=NULL), "malloc succeed");

  /* open a new file. Remove it first in case it exists. */
  /* Must delete because MPI_File_open does not have a Truncate mode. */
  /* Don't care if it has error. */
  MPI_File_delete(filename, MPI_INFO_NULL);
  MPI_Barrier(MPI_COMM_WORLD);  /* prevent racing condition */

  mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
        info, &fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_OPEN");

  printf("MPIO GB file write test %s\n", filename);

  /* instead of writing every bytes of the file, we will just write
   * some data around the 2 and 4 GB boundaries.  That should cover
   * potential integer overflow and filesystem size limits.
   */
  writerrs = 0;
  for (n=2; n <= 4; n+=2){
      ntimes = GB/MB*n/mpi_size + 1;
      for (i=ntimes-2; i <= ntimes; i++){
    mpi_off = (i*mpi_size + mpi_rank)*(MPI_Offset)MB;
    if (VERBOSE_MED)
        HDfprintf(stdout,"proc %d: write to mpi_off=%016llx, %lld\n",
      mpi_rank, mpi_off, mpi_off);
    /* set data to some trivial pattern for easy verification */
    for (j=0; j<MB; j++)
        *(buf+j) = i*mpi_size + mpi_rank;
    if (VERBOSE_MED)
        HDfprintf(stdout,"proc %d: writing %d bytes at offset %lld\n",
      mpi_rank, MB, mpi_off);
    mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
    INFO((mrc==MPI_SUCCESS), "GB size file write");
    if (mrc!=MPI_SUCCESS)
        writerrs++;
      }
  }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");

  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync after writes");

  /*
   * Verify if we can read the multiple GB file just created.
   */
  /* open it again to verify the data written */
  /* but only if there was no write errors */
  printf("MPIO GB file read test %s\n", filename);
  if (errors_sum(writerrs)>0){
      printf("proc %d: Skip read test due to previous write errors\n",
    mpi_rank);
      goto finish;
  }
  mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
  VRFY((mrc==MPI_SUCCESS), "");

  /* Only read back parts of the file that have been written. */
  for (n=2; n <= 4; n+=2){
      ntimes = GB/MB*n/mpi_size + 1;
      for (i=ntimes-2; i <= ntimes; i++){
    mpi_off = (i*mpi_size + (mpi_size - mpi_rank - 1))*(MPI_Offset)MB;
    if (VERBOSE_MED)
        HDfprintf(stdout,"proc %d: read from mpi_off=%016llx, %lld\n",
      mpi_rank, mpi_off, mpi_off);
    mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
    INFO((mrc==MPI_SUCCESS), "GB size file read");
    expected = i*mpi_size + (mpi_size - mpi_rank - 1);
    vrfyerrs=0;
    for (j=0; j<MB; j++){
        if ((*(buf+j) != expected) &&
      (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)){
          printf("proc %d: found data error at [%ld+%d], expect %d, got %d\n",
        mpi_rank, (long)mpi_off, j, expected, *(buf+j));
        }
    }
    if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
        printf("proc %d: [more errors ...]\n", mpi_rank);

    nerrs += vrfyerrs;
      }
  }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");

  /*
   * one more sync to ensure all processes have done reading
   * before ending this test.
   */
  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");

        /*
         * Check if MPI_File_get_size works correctly.  Some systems (only SGI Altix
         * Propack 4 so far) return wrong file size.  It can be avoided by reconfiguring
         * with "--disable-mpi-size".
         */
#ifdef H5_HAVE_MPI_GET_SIZE
  printf("Test if MPI_File_get_size works correctly with %s\n", filename);

  mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
        VRFY((mrc==MPI_SUCCESS), "");

        if (MAINPROCESS){      /* only process 0 needs to check it*/
            mrc = MPI_File_get_size(fh, &size);
      VRFY((mrc==MPI_SUCCESS), "");

            mrc=HDstat(filename, &stat_buf);
      VRFY((mrc==0), "");

            /* Hopefully this casting is safe */
            if(size != (MPI_Offset)(stat_buf.st_size)) {
                printf("Warning: MPI_File_get_size doesn't return correct file size.  To avoid using it in the library, reconfigure and rebuild the library with --disable-mpi-size.\n");
            }
        }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");

  /*
   * one more sync to ensure all processes have done reading
   * before ending this test.
   */
  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");
#else
        printf("Skipped testing MPI_File_get_size because it's disabled\n");
#endif
    }

finish:
    if (buf)
  HDfree(buf);
    return (nerrs);
}
Exemplo n.º 24
0
/*
 * This routine verifies the data written in the dataset. It does one of the
 * three cases according to the value of parameter `write_pattern'.
 * 1. it returns correct fill values though the dataset has not been written;
 * 2. it still returns correct fill values though only a small part is written;
 * 3. it returns correct values when the whole dataset has been written in an
 *    interleaved pattern.
 */
void verify_data(const char *filename, int nchunks, write_type write_pattern, int close, hid_t *file_id, hid_t *dataset)
{
    /* HDF5 gubbins */
    hid_t    dataspace, memspace;     /* HDF5 file identifier */
    hid_t    access_plist;         /* HDF5 ID for file access property list */
    herr_t   hrc;                  /* HDF5 return code */

    hsize_t     chunk_dims[1] ={CHUNKSIZE};
    hsize_t     count[1];
    hsize_t     stride[1];
    hsize_t     block[1];
    hsize_t     offset[1];            /* Selection offset within dataspace */
    /* Variables used in reading data back */
    char         buffer[CHUNKSIZE];
    int         value, i;
    int         index, current;

    /* MPI Gubbins */
    MPI_Offset  filesize,	    /* actual file size */
		est_filesize;	    /* estimated file size */
    int         mpierr;

    /* Initialize MPI */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    /* Set up MPIO file access property lists */
    access_plist  = H5Pcreate(H5P_FILE_ACCESS);
    VRFY((access_plist >= 0), "");

    hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
    VRFY((hrc >= 0), "");

    /* Open the file */
    if (*file_id<0){
        *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
        VRFY((*file_id >= 0), "");
    }

    /* Open dataset*/
    if (*dataset<0){
        *dataset = H5Dopen(*file_id, DATASETNAME);
        VRFY((*dataset >= 0), "");
    }

    memspace = H5Screate_simple(1, chunk_dims, NULL);
    VRFY((memspace >= 0), "");

    dataspace = H5Dget_space(*dataset);            
    VRFY((dataspace >= 0), "");

    /* all processes check all chunks. */
    count[0] = 1;
    stride[0] = 1;
    block[0] = chunk_dims[0];
    for (i=0; i<nchunks; i++){ 
	/* reset buffer values */
	memset(buffer, -1, CHUNKSIZE);

        offset[0] = i*chunk_dims[0];

        hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
        VRFY((hrc >= 0), "");

        /* Read the chunk */
        hrc = H5Dread(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
        VRFY((hrc >= 0), "H5Dread");

        /* set expected value according the write pattern */
	switch (write_pattern) {
	    case all:
		value = i%mpi_size + 1;
		break;
	    case none:
		value = 0;
		break;
            case sec_last:
		if (i==(nchunks-2))
		    value = 100;
		else
		    value = 0;
	}

        /* verify content of the chunk */
        for (index = 0; index < CHUNKSIZE; index++)
            VRFY((buffer[index] == value), "data verification");
        
    }

    hrc = H5Sclose (dataspace);
	VRFY((hrc >= 0), "");

	hrc = H5Sclose (memspace);
	VRFY((hrc >= 0), "");

    /* Can close some plists */
    hrc = H5Pclose(access_plist);
    VRFY((hrc >= 0), "");

    /* Close up */
    if (close){
        hrc = H5Dclose(*dataset);
        VRFY((hrc >= 0), "");
        *dataset = -1;

        hrc = H5Fclose(*file_id);
        VRFY((hrc >= 0), "");
        *file_id = -1;
    }

    /* Make sure all processes are done before exiting this routine.  Otherwise,
     * other tests may start and change the test data file before some processes
     * of this test are still accessing the file.
     */
    MPI_Barrier(MPI_COMM_WORLD);
}
Exemplo n.º 25
0
void
test_plist_ed(void)
{
    hid_t dcpl;	       	/* dataset create prop. list */
    hid_t dapl;	       	/* dataset access prop. list */
    hid_t dxpl;	       	/* dataset transfer prop. list */
    hid_t gcpl;	       	/* group create prop. list */
    hid_t lcpl;	       	/* link create prop. list */
    hid_t lapl;	       	/* link access prop. list */
    hid_t ocpypl;	/* object copy prop. list */
    hid_t ocpl;	        /* object create prop. list */
    hid_t fapl;	       	/* file access prop. list */
    hid_t fcpl;	       	/* file create prop. list */
    hid_t strcpl;	/* string create prop. list */
    hid_t acpl;	       	/* attribute create prop. list */

    int mpi_size, mpi_rank, recv_proc;

    hsize_t chunk_size = 16384;	/* chunk size */ 
    double fill = 2.7f;         /* Fill value */
    size_t nslots = 521*2;
    size_t nbytes = 1048576 * 10;
    double w0 = 0.5f;
    unsigned max_compact;
    unsigned min_dense;
    hsize_t max_size[1]; /*data space maximum size */
    const char* c_to_f = "x+32";
    H5AC_cache_config_t my_cache_config = {
        H5AC__CURR_CACHE_CONFIG_VERSION,
        TRUE,
        FALSE,
        FALSE,
        "temp",
        TRUE,
        FALSE,
        ( 2 * 2048 * 1024),
        0.3f,
        (64 * 1024 * 1024),
        (4 * 1024 * 1024),
        60000,
        H5C_incr__threshold,
        0.8f,
        3.0f,
        TRUE,
        (8 * 1024 * 1024),
        H5C_flash_incr__add_space,
        2.0f,
        0.25f,
        H5C_decr__age_out_with_threshold,
        0.997f,
        0.8f,
        TRUE,
        (3 * 1024 * 1024),
        3,
        FALSE,
        0.2f,
        (256 * 2048),
        H5AC__DEFAULT_METADATA_WRITE_STRATEGY};

    herr_t ret;         	/* Generic return value */

    if(VERBOSE_MED)
	printf("Encode/Decode DCPLs\n");

    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    if(mpi_size == 1)
        recv_proc = 0;
    else
        recv_proc = 1;

    dcpl = H5Pcreate(H5P_DATASET_CREATE);
    VRFY((dcpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_chunk(dcpl, 1, &chunk_size);
    VRFY((ret >= 0), "H5Pset_chunk succeeded");

    ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
    VRFY((ret >= 0), "H5Pset_alloc_time succeeded");

    ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
    VRFY((ret>=0), "set fill-value succeeded");

    max_size[0] = 100;
    ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, 
                          (hsize_t)(max_size[0] * sizeof(int)/4));
    VRFY((ret>=0), "set external succeeded");
    ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, 
                          (hsize_t)(max_size[0] * sizeof(int)/4));
    VRFY((ret>=0), "set external succeeded");
    ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, 
                          (hsize_t)(max_size[0] * sizeof(int)/4));
    VRFY((ret>=0), "set external succeeded");
    ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, 
                          (hsize_t)(max_size[0] * sizeof(int)/4));
    VRFY((ret>=0), "set external succeeded");

    ret = test_encode_decode(dcpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(dcpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE DAPLS *****/
    dapl = H5Pcreate(H5P_DATASET_ACCESS);
    VRFY((dapl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0);
    VRFY((ret >= 0), "H5Pset_chunk_cache succeeded");

    ret = test_encode_decode(dapl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(dapl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE OCPLS *****/
    ocpl = H5Pcreate(H5P_OBJECT_CREATE);
    VRFY((ocpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
    VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded");

    ret = H5Pset_attr_phase_change(ocpl, 110, 105);
    VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded");

    ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL);
    VRFY((ret >= 0), "H5Pset_filter succeeded");

    ret = test_encode_decode(ocpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(ocpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE DXPLS *****/
    dxpl = H5Pcreate(H5P_DATASET_XFER);
    VRFY((dxpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_btree_ratios(dxpl, 0.2f, 0.6f, 0.2f);
    VRFY((ret >= 0), "H5Pset_btree_ratios succeeded");

    ret = H5Pset_hyper_vector_size(dxpl, 5);
    VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded");

    ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
    VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");

    ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
    VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded");

    ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO);
    VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");

    ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30);
    VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");

    ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40);
    VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");

    ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC);
    VRFY((ret >= 0), "H5Pset_edc_check succeeded");

    ret = H5Pset_data_transform(dxpl, c_to_f);
    VRFY((ret >= 0), "H5Pset_data_transform succeeded");

    ret = test_encode_decode(dxpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(dxpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE GCPLS *****/
    gcpl = H5Pcreate(H5P_GROUP_CREATE);
    VRFY((gcpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_local_heap_size_hint(gcpl, 256);
    VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded");

    ret = H5Pset_link_phase_change(gcpl, 2, 2);
    VRFY((ret >= 0), "H5Pset_link_phase_change succeeded");

    /* Query the group creation properties */
    ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense);
    VRFY((ret >= 0), "H5Pget_est_link_info succeeded");

    ret = H5Pset_est_link_info(gcpl, 3, 9);
    VRFY((ret >= 0), "H5Pset_est_link_info succeeded");

    ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
    VRFY((ret >= 0), "H5Pset_link_creation_order succeeded");

    ret = test_encode_decode(gcpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(gcpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE LCPLS *****/
    lcpl = H5Pcreate(H5P_LINK_CREATE);
    VRFY((lcpl >= 0), "H5Pcreate succeeded");

    ret= H5Pset_create_intermediate_group(lcpl, TRUE);
    VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");

    ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(lcpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE LAPLS *****/
    lapl = H5Pcreate(H5P_LINK_ACCESS);
    VRFY((lapl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_nlinks(lapl, (size_t)134);
    VRFY((ret >= 0), "H5Pset_nlinks succeeded");

    ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY);
    VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded");

    ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod");
    VRFY((ret >= 0), "H5Pset_nlinks succeeded");

    /* Create FAPL for the elink FAPL */
    fapl = H5Pcreate(H5P_FILE_ACCESS);
    VRFY((fapl >= 0), "H5Pcreate succeeded");
    ret = H5Pset_alignment(fapl, 2, 1024);
    VRFY((ret >= 0), "H5Pset_alignment succeeded");

    ret = H5Pset_elink_fapl(lapl, fapl);
    VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");

    /* Close the elink's FAPL */
    ret = H5Pclose(fapl);
    VRFY((ret >= 0), "H5Pclose succeeded");

    ret = test_encode_decode(lapl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(lapl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE OCPYPLS *****/
    ocpypl = H5Pcreate(H5P_OBJECT_COPY);
    VRFY((ocpypl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG);
    VRFY((ret >= 0), "H5Pset_copy_object succeeded");

    ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo");
    VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");

    ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar");
    VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");

    ret = test_encode_decode(ocpypl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(ocpypl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE FAPLS *****/
    fapl = H5Pcreate(H5P_FILE_ACCESS);
    VRFY((fapl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_family_offset(fapl, 1024);
    VRFY((ret >= 0), "H5Pset_family_offset succeeded");

    ret = H5Pset_meta_block_size(fapl, 2098452);
    VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");

    ret = H5Pset_sieve_buf_size(fapl, 1048576);
    VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded");

    ret = H5Pset_alignment(fapl, 2, 1024);
    VRFY((ret >= 0), "H5Pset_alignment succeeded");

    ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3f);
    VRFY((ret >= 0), "H5Pset_cache succeeded");

    ret = H5Pset_elink_file_cache_size(fapl, 10485760);
    VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded");

    ret = H5Pset_gc_references(fapl, 1);
    VRFY((ret >= 0), "H5Pset_gc_references succeeded");

    ret = H5Pset_small_data_block_size(fapl, 2048);
    VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");

    ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
    VRFY((ret >= 0), "H5Pset_libver_bounds succeeded");

    ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK);
    VRFY((ret >= 0), "H5Pset_fclose_degree succeeded");

    ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP);
    VRFY((ret >= 0), "H5Pset_multi_type succeeded");

    ret = H5Pset_mdc_config(fapl, &my_cache_config);
    VRFY((ret >= 0), "H5Pset_mdc_config succeeded");

    ret = test_encode_decode(fapl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(fapl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE FCPLS *****/
    fcpl = H5Pcreate(H5P_FILE_CREATE);
    VRFY((fcpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_userblock(fcpl, 1024);
    VRFY((ret >= 0), "H5Pset_userblock succeeded");

    ret = H5Pset_istore_k(fcpl, 3);
    VRFY((ret >= 0), "H5Pset_istore_k succeeded");

    ret = H5Pset_sym_k(fcpl, 4, 5);
    VRFY((ret >= 0), "H5Pset_sym_k succeeded");

    ret = H5Pset_shared_mesg_nindexes(fcpl, 8);
    VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded");

    ret = H5Pset_shared_mesg_index(fcpl, 1,  H5O_SHMESG_SDSPACE_FLAG, 32);
    VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded");

    ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20);
    VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded");

    ret = H5Pset_sizes(fcpl, 8, 4);
    VRFY((ret >= 0), "H5Pset_sizes succeeded");

    ret = test_encode_decode(fcpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(fcpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE STRCPLS *****/
    strcpl = H5Pcreate(H5P_STRING_CREATE);
    VRFY((strcpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8);
    VRFY((ret >= 0), "H5Pset_char_encoding succeeded");

    ret = test_encode_decode(strcpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(strcpl);
    VRFY((ret >= 0), "H5Pclose succeeded");


    /******* ENCODE/DECODE ACPLS *****/
    acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
    VRFY((acpl >= 0), "H5Pcreate succeeded");

    ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8);
    VRFY((ret >= 0), "H5Pset_char_encoding succeeded");

    ret = test_encode_decode(acpl, mpi_rank, recv_proc);
    VRFY((ret >= 0), "test_encode_decode succeeded");

    ret = H5Pclose(acpl);
    VRFY((ret >= 0), "H5Pclose succeeded");
}
Exemplo n.º 26
0
int
main (int argc, char **argv)
{
    hid_t       file_id, dset_id, grp_id;
    hid_t       fapl, sid, mem_dataspace;
    herr_t      ret;
    char	filename[1024];
    int         mpi_size, mpi_rank, ndims, i, j;
    MPI_Comm    comm  = MPI_COMM_WORLD;
    MPI_Info    info  = MPI_INFO_NULL;
    hsize_t     dims[RANK];
    hsize_t     start[RANK];
    hsize_t     count[RANK];
    hsize_t     stride[RANK];
    hsize_t     block[RANK];
    DATATYPE   *data_array = NULL, *dataptr;	/* data buffer */

    MPI_Init(&argc, &argv);
    MPI_Comm_size(comm, &mpi_size);
    MPI_Comm_rank(comm, &mpi_rank);  

    if(MAINPROCESS)
	TESTING("proper shutdown of HDF5 library");
 
    /* Set up file access property list with parallel I/O access */
    fapl = H5Pcreate(H5P_FILE_ACCESS);
    VRFY((fapl >= 0), "H5Pcreate succeeded");
    ret = H5Pset_fapl_mpio(fapl, comm, info);
    VRFY((ret >= 0), "");

    h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
    file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
    VRFY((file_id >= 0), "H5Fopen succeeded");

    grp_id = H5Gopen2(file_id, "Group", H5P_DEFAULT);
    VRFY((grp_id >= 0), "H5Gopen succeeded");

    dset_id = H5Dopen2(grp_id, "Dataset", H5P_DEFAULT);
    VRFY((dset_id >= 0), "H5Dopen succeeded");

    sid = H5Dget_space(dset_id);
    VRFY((dset_id >= 0), "H5Dget_space succeeded");

    ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
    VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
    VRFY(dims[0] == ROW_FACTOR*mpi_size, "Wrong dataset dimensions");
    VRFY(dims[1] == COL_FACTOR*mpi_size, "Wrong dataset dimensions");

    /* allocate memory for data buffer */
    data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE));
    VRFY((data_array != NULL), "data_array HDmalloc succeeded");

    /* Each process takes a slabs of rows. */
    block[0] = dims[0]/mpi_size;
    block[1] = dims[1];
    stride[0] = block[0];
    stride[1] = block[1];
    count[0] = 1;
    count[1] = 1;
    start[0] = mpi_rank*block[0];
    start[1] = 0;

    ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
    VRFY((ret >= 0), "H5Sset_hyperslab succeeded");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (RANK, block, NULL);
    VRFY((mem_dataspace >= 0), "");

    /* write data independently */
    ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid,
                  H5P_DEFAULT, data_array);
    VRFY((ret >= 0), "H5Dwrite succeeded");

    dataptr = data_array;

    for (i=0; i < block[0]; i++){
	for (j=0; j < block[1]; j++){
	    if(*dataptr != mpi_rank+1) {
                printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
                       (unsigned long)i, (unsigned long)j,
                       (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
                       mpi_rank+1, *(dataptr));
                nerrors ++;
            }
            dataptr++;
	}
    }
    MPI_Finalize();
    HDremove(filename);

    /* release data buffers */
    if(data_array) 
        HDfree(data_array);

    nerrors += GetTestNumErrs();

    if(MAINPROCESS) {
        if(0 == nerrors)
            PASSED()
        else
	    H5_FAILED()
    }

    return (nerrors!=0);
}
Exemplo n.º 27
0
/*
 * Example of using PHDF5 to create ndatasets datasets.  Each process write
 * a slab of array to the file.
 *
 * Changes:	Updated function to use a dynamically calculated size,
 *		instead of the old SIZE #define.  This should allow it
 *		to function with an arbitrary number of processors.
 *
 *						JRM - 8/11/04
 */
void multiple_dset_write(void)
{
    int i, j, n, mpi_size, mpi_rank, size;
    hid_t iof, plist, dataset, memspace, filespace;
    hid_t dcpl;                         /* Dataset creation property list */
    hbool_t use_gpfs = FALSE;           /* Use GPFS hints */
    hsize_t chunk_origin [DIM];
    hsize_t chunk_dims [DIM], file_dims [DIM];
    hsize_t count[DIM]={1,1};
    double * outme = NULL;
    double fill=1.0;                    /* Fill value */
    char dname [100];
    herr_t ret;
    const H5Ptest_param_t *pt;
    char	*filename;
    int		ndatasets;

    pt = GetTestParameters();
    filename = pt->name;
    ndatasets = pt->count;

    size = get_size();

    MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);

    outme = HDmalloc((size_t)(size * size * sizeof(double)));
    VRFY((outme != NULL), "HDmalloc succeeded for outme");

    plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
    VRFY((plist>=0), "create_faccess_plist succeeded");
    iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
    VRFY((iof>=0), "H5Fcreate succeeded");
    ret = H5Pclose (plist);
    VRFY((ret>=0), "H5Pclose succeeded");

    /* decide the hyperslab according to process number. */
    get_slab(chunk_origin, chunk_dims, count, file_dims, size);

    memspace = H5Screate_simple (DIM, chunk_dims, NULL);
    filespace = H5Screate_simple (DIM, file_dims, NULL);
    ret = H5Sselect_hyperslab (filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
    VRFY((ret>=0), "mdata hyperslab selection");

    /* Create a dataset creation property list */
    dcpl = H5Pcreate(H5P_DATASET_CREATE);
    VRFY((dcpl>=0), "dataset creation property list succeeded");

    ret=H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
    VRFY((ret>=0), "set fill-value succeeded");

    for (n = 0; n < ndatasets; n++) {
	sprintf (dname, "dataset %d", n);
	dataset = H5Dcreate (iof, dname, H5T_NATIVE_DOUBLE, filespace, dcpl);
	VRFY((dataset > 0), dname);

	/* calculate data to write */
	for (i = 0; i < size; i++)
	    for (j = 0; j < size; j++)
                outme [(i * size) + j] = n*1000 + mpi_rank;

	H5Dwrite (dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);

	H5Dclose (dataset);
#ifdef BARRIER_CHECKS
	if (! ((n+1) % 10)) {
	    printf("created %d datasets\n", n+1);
	    MPI_Barrier(MPI_COMM_WORLD);
	}
#endif /* BARRIER_CHECKS */
    }

    H5Sclose (filespace);
    H5Sclose (memspace);
    H5Pclose (dcpl);
    H5Fclose (iof);

    HDfree(outme);
}
/*-------------------------------------------------------------------------
 * Function:    test_fapl_mpio_dup
 *
 * Purpose:     Test if fapl_mpio property list keeps a duplicate of the
 * 		communicator and INFO objects given when set; and returns
 * 		duplicates of its components when H5Pget_fapl_mpio is called.
 *
 * Return:      Success:        None
 *
 *              Failure:        Abort
 *
 * Programmer:  Albert Cheng
 *              January 9, 2003
 *
 * Modifications:
 *-------------------------------------------------------------------------
 */
void
test_fapl_mpio_dup(void)
{
    int mpi_size, mpi_rank;
    MPI_Comm comm, comm_tmp;
    int mpi_size_old, mpi_rank_old;
    int mpi_size_tmp, mpi_rank_tmp;
    MPI_Info info = MPI_INFO_NULL;
    MPI_Info info_tmp = MPI_INFO_NULL;
    int mrc;			/* MPI return value */
    hid_t acc_pl;		/* File access properties */
    herr_t ret;			/* hdf5 return value */
    int nkeys, nkeys_tmp;

    if (VERBOSE_MED)
	printf("Verify fapl_mpio duplicates communicator and INFO objects\n");

    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
    if (VERBOSE_MED)
	printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);

    /* Create a new communicator that has the same processes as MPI_COMM_WORLD.
     * Use MPI_Comm_split because it is simplier than MPI_Comm_create
     */
    mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split");
    MPI_Comm_size(comm,&mpi_size_old);
    MPI_Comm_rank(comm,&mpi_rank_old);
    if (VERBOSE_MED)
	printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);

    /* create a new INFO object with some trivial information. */
    mrc = MPI_Info_create(&info);
    VRFY((mrc==MPI_SUCCESS), "MPI_Info_create");
    mrc = MPI_Info_set(info, "hdf_info_name", "XYZ");
    VRFY((mrc==MPI_SUCCESS), "MPI_Info_set");
    if (MPI_INFO_NULL != info){
	mrc=MPI_Info_get_nkeys(info, &nkeys);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info);

    acc_pl = H5Pcreate (H5P_FILE_ACCESS);
    VRFY((acc_pl >= 0), "H5P_FILE_ACCESS");

    ret = H5Pset_fapl_mpio(acc_pl, comm, info);
    VRFY((ret >= 0), "");

    /* Case 1:
     * Free the created communicator and INFO object.
     * Check if the access property list is still valid and can return
     * valid communicator and INFO object.
     */
    mrc = MPI_Comm_free(&comm);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    if (MPI_INFO_NULL!=info){
	mrc = MPI_Info_free(&info);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }

    ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
    VRFY((ret >= 0), "H5Pget_fapl_mpio");
    MPI_Comm_size(comm_tmp,&mpi_size_tmp);
    MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
    if (VERBOSE_MED)
	printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
	mpi_rank_tmp, mpi_size_tmp);
    VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
    VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
    if (MPI_INFO_NULL != info_tmp){
	mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
	VRFY((nkeys_tmp==nkeys), "new and old nkeys equal");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info_tmp);

    /* Case 2:
     * Free the retrieved communicator and INFO object.
     * Check if the access property list is still valid and can return
     * valid communicator and INFO object.
     * Also verify the NULL argument option.
     */
    mrc = MPI_Comm_free(&comm_tmp);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    if (MPI_INFO_NULL!=info_tmp){
	mrc = MPI_Info_free(&info_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }

    /* check NULL argument options. */
    ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL);
    VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only");
    mrc = MPI_Comm_free(&comm_tmp);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");

    ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp);
    VRFY((ret >= 0), "H5Pget_fapl_mpio Info only");
    if (MPI_INFO_NULL!=info_tmp){
	mrc = MPI_Info_free(&info_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }

    ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL);
    VRFY((ret >= 0), "H5Pget_fapl_mpio neither");

    /* now get both and check validity too. */
    /* Donot free the returned objects which are used in the next case. */
    ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
    VRFY((ret >= 0), "H5Pget_fapl_mpio");
    MPI_Comm_size(comm_tmp,&mpi_size_tmp);
    MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
    if (VERBOSE_MED)
	printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
	mpi_rank_tmp, mpi_size_tmp);
    VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
    VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
    if (MPI_INFO_NULL != info_tmp){
	mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
	VRFY((nkeys_tmp==nkeys), "new and old nkeys equal");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info_tmp);

    /* Case 3:
     * Close the property list and verify the retrieved communicator and INFO
     * object are still valid.
     */
    H5Pclose(acc_pl);
    MPI_Comm_size(comm_tmp,&mpi_size_tmp);
    MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
    if (VERBOSE_MED)
	printf("After Property list closed: rank/size of comm are %d/%d\n",
	mpi_rank_tmp, mpi_size_tmp);
    if (MPI_INFO_NULL != info_tmp){
	mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
    }
    if (VERBOSE_MED)
	h5_dump_info_object(info_tmp);

    /* clean up */
    mrc = MPI_Comm_free(&comm_tmp);
    VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    if (MPI_INFO_NULL!=info_tmp){
	mrc = MPI_Info_free(&info_tmp);
	VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
    }
}
Exemplo n.º 29
0
static int
test_mpio_overlap_writes(char *filename)
{
    int mpi_size, mpi_rank;
    MPI_Comm comm;
    MPI_Info info = MPI_INFO_NULL;
    int color, mrc;
    MPI_File  fh;
    int i;
    int vrfyerrs, nerrs;
    unsigned char  buf[4093];    /* use some prime number for size */
    int bufsize = sizeof(buf);
    MPI_Offset  stride;
    MPI_Offset  mpi_off;
    MPI_Status  mpi_stat;


    if (VERBOSE_MED)
  printf("MPIO independent overlapping writes test on file %s\n",
      filename);

    nerrs = 0;
    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    /* Need at least 2 processes */
    if (mpi_size < 2) {
  if (MAINPROCESS)
      printf("Need at least 2 processes to run MPIO test.\n");
      printf(" -SKIP- \n");
  return 0;
    }

    /* splits processes 0 to n-2 into one comm. and the last one into another */
    color = ((mpi_rank < (mpi_size - 1)) ? 0 : 1);
    mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm);
    VRFY((mrc==MPI_SUCCESS), "Comm_split succeeded");

    if (color==0){
  /* First n-1 processes (color==0) open a file and write it */
  mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
    info, &fh);
  VRFY((mrc==MPI_SUCCESS), "");

  stride = 1;
  mpi_off = mpi_rank*stride;
  while (mpi_off < MPIO_TEST_WRITE_SIZE){
      /* make sure the write does not exceed the TEST_WRITE_SIZE */
      if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
    stride = MPIO_TEST_WRITE_SIZE - mpi_off;

      /* set data to some trivial pattern for easy verification */
      for (i=0; i<stride; i++)
    buf[i] = (unsigned char)(mpi_off+i);
      mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
        &mpi_stat);
      VRFY((mrc==MPI_SUCCESS), "");

      /* move the offset pointer to last byte written by all processes */
      mpi_off += (mpi_size - 1 - mpi_rank) * stride;

      /* Increase chunk size without exceeding buffer size. */
      /* Then move the starting offset for next write. */
      stride *= 2;
      if (stride > bufsize)
    stride = bufsize;
      mpi_off += mpi_rank*stride;
  }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
  mrc = MPI_Comm_free(&comm);
  VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");

  /* sync with the other waiting processes */
  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync after writes");
    }else{
  /* last process waits till writes are done,
   * then opens file to verify data.
   */
  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync after writes");

  mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY,
    info, &fh);
  VRFY((mrc==MPI_SUCCESS), "");

  stride = bufsize;
  for (mpi_off=0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize){
      /* make sure it does not read beyond end of data */
      if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
    stride = MPIO_TEST_WRITE_SIZE - mpi_off;
      mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
        &mpi_stat);
      VRFY((mrc==MPI_SUCCESS), "");
      vrfyerrs=0;
      for (i=0; i<stride; i++){
    unsigned char expected;
    expected = (unsigned char)(mpi_off+i);
    if ((expected != buf[i]) &&
        (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
      printf("proc %d: found data error at [%ld], expect %u, got %u\n",
          mpi_rank, (long)(mpi_off+i), expected, buf[i]);
    }
      }
      if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
    printf("proc %d: [more errors ...]\n", mpi_rank);

      nerrs += vrfyerrs;
  }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
  mrc = MPI_Comm_free(&comm);
  VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
    }

    /*
     * one more sync to ensure all processes have done reading
     * before ending this test.
     */
    mrc = MPI_Barrier(MPI_COMM_WORLD);
    VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");
    return (nerrs);
}
Exemplo n.º 30
-1
/*
 * This creates a dataset serially with 'nchunks' chunks, each of CHUNKSIZE
 * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
 * routine will open this in parallel for extension test.
 */
void
create_chunked_dataset(const char *filename, int nchunks, write_type write_pattern)
{
    hid_t       file_id, dataset;                          /* handles */
    hid_t       dataspace,memspace;  
    hid_t       cparms;
    hsize_t      dims[1];
    hsize_t      maxdims[1] = {H5S_UNLIMITED};
    
    hsize_t      chunk_dims[1] ={CHUNKSIZE};
    hsize_t     count[1];
    hsize_t     stride[1];
    hsize_t     block[1];
    hsize_t     offset[1];            /* Selection offset within dataspace */
    /* Variables used in reading data back */
    char         buffer[CHUNKSIZE];
    int           i;

    herr_t       hrc;                             

    MPI_Offset  filesize,	    /* actual file size */
		est_filesize;	    /* estimated file size */

    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    /* Only MAINPROCESS should create the file.  Others just wait. */
    if (MAINPROCESS){

	dims[0]=nchunks*CHUNKSIZE;
	/* Create the data space with unlimited dimensions. */
	dataspace = H5Screate_simple (1, dims, maxdims); 
	VRFY((dataspace >= 0), "");

	memspace = H5Screate_simple(1, chunk_dims, NULL);
	VRFY((memspace >= 0), "");
 
	/* Create a new file. If file exists its contents will be overwritten. */
	file_id = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT,
		    H5P_DEFAULT);
	VRFY((file_id >= 0), "H5Fcreate");

	/* Modify dataset creation properties, i.e. enable chunking  */
	cparms = H5Pcreate (H5P_DATASET_CREATE);
	VRFY((cparms >= 0), "");

	hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
	VRFY((hrc >= 0), "");

	hrc = H5Pset_chunk ( cparms, 1, chunk_dims);
	VRFY((hrc >= 0), "");

	/* Create a new dataset within the file using cparms creation properties. */
	dataset = H5Dcreate (file_id, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, cparms);
	VRFY((dataset >= 0), "");

	switch (write_pattern) {

	    /* writes only the second to last chunk */
	    case sec_last:

		memset(buffer, 100, CHUNKSIZE);

		count[0] = 1;
		stride[0] = 1;
		block[0] = chunk_dims[0];
		offset[0] = (nchunks-2)*chunk_dims[0];

		hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
		    VRFY((hrc >= 0), "");

		/* Write sec_last chunk */
		hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
		VRFY((hrc >= 0), "H5Dwrite");

		break;


	    /* doesn't write anything */
	    case none:

		break;
	}

	/* Close resources */
	hrc = H5Dclose (dataset);
	VRFY((hrc >= 0), "");
	dataset = -1;

	hrc = H5Sclose (dataspace);
	VRFY((hrc >= 0), "");

	hrc = H5Sclose (memspace);
	VRFY((hrc >= 0), "");

	hrc = H5Pclose (cparms);
	VRFY((hrc >= 0), "");

	hrc = H5Fclose (file_id);
	VRFY((hrc >= 0), "");
	file_id = -1;

	/* verify file size */
	filesize = get_filesize(filename);
	est_filesize = nchunks*CHUNKSIZE*sizeof(unsigned char);
	VRFY((filesize >= est_filesize), "file size check");

    }

    /* Make sure all processes are done before exiting this routine.  Otherwise,
     * other tests may start and change the test data file before some processes
     * of this test are still accessing the file.
     */

    MPI_Barrier(MPI_COMM_WORLD);
}