Exemplo n.º 1
0
//--------------------------------------------------------------------------
// Function:    H5Library::termH5cpp (static)
///\brief       Sends request for the C layer to terminate.
///\par Description
///             If the C library fails to terminate, exit with a failure.
// Programmer   Binh-Minh Ribler - September, 2015
//--------------------------------------------------------------------------
void H5Library::termH5cpp()
{
    // Close the C library
    herr_t ret_value = H5close();
    if (ret_value == -1)
        exit(-1);
}
Exemplo n.º 2
0
/*-------------------------------------------------------------------------
 * Function:	main
 *
 * Purpose:
 *
 * Return:	Success:
 *
 *		Failure:
 *
 * Programmer:	Robb Matzke
 *              Tuesday, June 16, 1998
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
int
main (void)
{
    int	nerrors=0;

    /*
     * Open the library explicitly for thread-safe builds, so per-thread
     * things are initialized correctly.
     */
#ifdef H5_HAVE_THREADSAFE
    H5open();
#endif  /* H5_HAVE_THREADSAFE */

    nerrors += test_find ()<0?1:0;
    nerrors += test_set  ()<0?1:0;
    nerrors += test_clear()<0?1:0;
    nerrors += test_copy ()<0?1:0;
    nerrors += test_shift()<0?1:0;
    nerrors += test_increment  ()<0?1:0;
    nerrors += test_decrement  ()<0?1:0;
    nerrors += test_negate  ()<0?1:0;

    if (nerrors) {
        printf("***** %u FAILURE%s! *****\n",
               nerrors, 1==nerrors?"":"S");
        exit(1);
    }
    printf("All bit tests passed.\n");

#ifdef H5_HAVE_THREADSAFE
    H5close();
#endif  /* H5_HAVE_THREADSAFE */
    return 0;
}
Exemplo n.º 3
0
/*-------------------------------------------------------------------------
 * Function:	main
 *
 * Purpose:
 *
 * Return:	Success:
 *
 *		Failure:
 *
 * Programmer:	Robb Matzke
 *              Tuesday, June 16, 1998
 *
 *-------------------------------------------------------------------------
 */
int
main(void)
{
    int	nerrors = 0;

    /*
     * Open the library explicitly.
     */
    H5open();

    nerrors += test_find() < 0 ? 1 : 0;
    nerrors += test_set() < 0 ? 1 : 0;
    nerrors += test_clear() < 0 ? 1 : 0;
    nerrors += test_copy() < 0 ? 1 : 0;
    nerrors += test_shift() < 0 ? 1 : 0;
    nerrors += test_increment() < 0 ? 1 : 0;
    nerrors += test_decrement() < 0 ? 1 : 0;
    nerrors += test_negate() < 0 ? 1 : 0;

    if(nerrors) {
        printf("***** %u FAILURE%s! *****\n",
               nerrors, 1 == nerrors ? "" : "S");
        exit(1);
    }
    printf("All bit tests passed.\n");

    H5close();

    return 0;
}
Exemplo n.º 4
0
//--------------------------------------------------------------------------
// Function:    H5Library::close (static)
///\brief       Flushes all data to disk, closes files, and cleans up memory.
///
///\exception   H5::LibraryIException
// Programmer   Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5Library::close()
{
    herr_t ret_value = H5close();
    if (ret_value < 0)
    {
        throw LibraryIException("H5Library::close", "H5close failed");
    }
}
Exemplo n.º 5
0
/*---------------------------------------------------------------------------
 * Name:              h5close_c
 * Purpose:           Calls H5close call to close C HDF5 library
 * Returns:           0 on success, -1 on failure
 * Programmer:        Elena Pourmal
 *                    Friday, November 17, 2000
 * Modifications:
 *---------------------------------------------------------------------------*/
int_f
nh5close_c()
{

    int ret_value = -1;
    if (H5close() < 0) return ret_value;
    ret_value = 0;
    return ret_value;
}
Exemplo n.º 6
0
int main ( int argc, char *argv[] )
{
    char *prog_name = "mat2hdf";
    int c;
    scats_mat_t *mat;
    SCATS_MATVAR *matvar;
    hid_t   hdf_id;

    if ( argc > 1 && !strcmp(argv[1],"--version")) {
        printf("mat2hdf v%d.%d.%d (compiled %s, %s for %s)\n",
               SCATS_MAJOR_VERSION, SCATS_MINOR_VERSION, SCATS_RELEASE_LEVEL,
               __DATE__, __TIME__, SCATS_PLATFORM );
        exit(0);
    } else if ( argc > 1 && !strcmp(argv[1],"--help") ) {
        Scats_Help(helpstr);
        exit(0);
    } else if ( argc < 3 )
        Scats_Help(helpstr);

    Scats_LogInit(prog_name);

    while ((c = getopt(argc, argv, "v")) != EOF) {
        switch (c) {
            case 'v':
                Scats_SetVerbose(1,0);
                break;
            default:
                Scats_Warning("%c not a valid option\n", c);
                break;
        }
    }

    mat = Scats_MatOpen( argv[optind],SCATS_ACC_RDONLY );
    if ( !mat )
        Scats_Error("Error opening %s\n", argv[1]);

    H5open();

    hdf_id = Scats_HDFOpen(argv[optind+1], SCATS_ACC_RDWR);
    if ( hdf_id < 0 ) {
        printf("Error opening HDF file %s\n", argv[2]);
        Scats_MatClose(mat);
        return 1;
    }

    while ( (matvar = Scats_MatVarReadNext(mat)) != NULL ) {
        write_mat(hdf_id,matvar);
        Scats_MatVarFree(matvar);
        matvar = NULL;
    }

    Scats_MatClose(mat);
    Scats_HDFClose(hdf_id);
    H5close();

    return 0;
}
Exemplo n.º 7
0
/****if* H5_f/h5close_c
 * NAME
 *              h5close_c
 * PURPOSE
 *           Calls H5close call to close C HDF5 library
 * RETURNS
 *           0 on success, -1 on failure
 * AUTHOR
 *        Elena Pourmal
 * SOURCE
 */
int_f
nh5close_c(void)
/******/
{
    int ret_value = -1;

    if (H5close() < 0) return ret_value;
    ret_value = 0;
    return ret_value;
}
Exemplo n.º 8
0
/*
 * Class:     hdf_hdf5lib_H5
 * Method:    H5close
 * Signature: ()I
 */
JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_H5_H5close
    (JNIEnv *env, jclass clss)
{
    herr_t retVal = H5close();
    if (retVal < 0)
        h5libraryError(env);

    return (jint)retVal;
} /* end Java_hdf_hdf5lib_H5_H5close */
Exemplo n.º 9
0
med_err
MEDlibraryClose(void)
{
  med_err _ret = -1;

  _MEDmodeErreurVerrouiller();

  if ( H5close() < 0) {
    MED_ERR_(_ret,MED_ERR_CLOSE,MED_ERR_LIBRARY,"");
    goto ERROR;
  }

  _ret = 0;
 ERROR:

  return _ret;
}
Exemplo n.º 10
0
bool dump_hdf5(const char *file_name, Mesh *mesh) {
	herr_t status;

	// init HDF5
	H5open();

	// create a file
	hid_t file_id = H5Fcreate(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);

	// create main group
	hid_t mesh_group_id = H5Gcreate(file_id, "/mesh3d", 0);

	// version
	hsize_t dims = 2;
	hid_t dataspace_id = H5Screate_simple(1, &dims, NULL);
	hid_t attr_ver = H5Acreate(mesh_group_id, "version", H5T_STD_I8BE, dataspace_id, H5P_DEFAULT);
	char attr_data[2] = { 1, 0 };
	status = H5Awrite(attr_ver, H5T_NATIVE_CHAR, attr_data);
	H5Aclose(attr_ver);
    H5Sclose(dataspace_id);

	// description
    hid_t type = H5Tcopy(H5T_C_S1);
    status = H5Tset_size(type, H5T_VARIABLE);
    const char *descr = "Test mesh";
	hid_t dataspace_id2 = H5Screate(H5S_SCALAR);
	hid_t attr_descr = H5Acreate(mesh_group_id, "description", type, dataspace_id2, H5P_DEFAULT);
	status = H5Awrite(attr_descr, type, &descr);
	H5Aclose(attr_descr);
    H5Tclose(type);
    H5Sclose(dataspace_id2);


    save_vertices(mesh_group_id, mesh);
    save_elements(mesh_group_id, mesh);
    save_bc(mesh_group_id, mesh);

	status = H5Gclose(mesh_group_id);		// close the group
	status = H5Fclose(file_id);			// close the file

	// deinit HDF5
	H5close();

	return 0;
}
Exemplo n.º 11
0
/*-------------------------------------------------------------------------
 * Function:  h5_reset
 *
 * Purpose:  Reset the library by closing it.
 *
 * Return:  void
 *
 * Programmer:  Robb Matzke
 *              Friday, November 20, 1998
 *
 *-------------------------------------------------------------------------
 */
void
h5_reset(void)
{
    HDfflush(stdout);
    HDfflush(stderr);
    H5close();

    /* Save current error stack reporting routine and redirect to our local one */
    HDassert(err_func == NULL);
    H5Eget_auto2(H5E_DEFAULT, &err_func, NULL);
    H5Eset_auto2(H5E_DEFAULT, h5_errors, NULL);

    /*
     * I commented this chunk of code out because it's not clear what diagnostics
     *      were being output and under what circumstances, and creating this file
     *      is throwing off debugging some of the tests.  I can't see any _direct_
     *      harm in keeping this section of code, but I can't see any _direct_
     *      benefit right now either.  If we figure out under which circumstances
     *      diagnostics are being output, we should enable this behavior based on
     *      appropriate configure flags/macros.  QAK - 2007/12/20
     */
#ifdef OLD_WAY
    {
        char  filename[1024];

        /*
         * Cause the library to emit some diagnostics early so they don't
         * interfere with other formatted output.
         */
        sprintf(filename, "/tmp/h5emit-%05d.h5", HDgetpid());
        H5E_BEGIN_TRY {
            hid_t file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT,
            H5P_DEFAULT);
            hid_t grp = H5Gcreate2(file, "emit", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
            H5Gclose(grp);
            H5Fclose(file);
            HDunlink(filename);
        } H5E_END_TRY;
    }
#endif /* OLD_WAY */
}
Exemplo n.º 12
0
extern void profile_fini(void)
{
    H5close(); /* make sure all H5 Objects are closed */

    return;
}
Exemplo n.º 13
0
/* ****************************************************************************************************************************** */
int main(int argc, char *argv[]) {
  hid_t   fileID, datasetID, dataspaceID;
  hsize_t dims[3] = {MAX_X, MAX_Y, MAX_T};
  herr_t  hErrVal;
  float   floatDataPoint[10];
  int     id, i, j, k;
  float   temp[MAX_X][MAX_Y][MAX_T];
  char    *strPerDim[3];
  float   valPerDim[3];

  /* Create phony data. */
  for(i=0,id=0; i<MAX_X; i++)
    for(j=0; j<MAX_Y; j++)
      for(k=0; k<MAX_T; k++)
        temp[i][j][k] = id++;
   
  /* Load the library -- not required on most platforms. */
  hErrVal = H5open();
  mjrHDF5_chkError(hErrVal);

  /* Create a new file using default properties. */
  fileID = H5Fcreate(TST_FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
  mjrHDF5_chkError(fileID);

  hErrVal = mjrHDF5_put_gblAtt_oneFCstr(fileID, "Author", "Mitch Richling");
  mjrHDF5_chkError(hErrVal);
  hErrVal = mjrHDF5_put_gblAtt_oneFCstr(fileID, "title",  "Example File");
  mjrHDF5_chkError(hErrVal);

  /* Create the data space for the dataset. */
  dataspaceID = H5Screate_simple(3, dims, NULL);
  mjrHDF5_chkError(dataspaceID);

  /* Create the dataset. */
  datasetID = H5Dcreate(fileID, "/dset", H5T_IEEE_F32BE, dataspaceID, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
  mjrHDF5_chkError(datasetID);

  /*  Try to use standard unit names and formats... */
  hErrVal = mjrHDF5_put_att_oneFCstr(datasetID, "units", "Celsius");
  mjrHDF5_chkError(hErrVal);

  /* Use this value for labeling plots and the like by programs that
     also support netCDF */
  hErrVal = mjrHDF5_put_att_oneFCstr(datasetID, "long_name", "Some Name For Plots");
  mjrHDF5_chkError(hErrVal);

  /* Add an array of strings to name each dim.*/
  strPerDim[0] = "lat";
  strPerDim[1] = "lon";
  strPerDim[2] = "temp";
  hErrVal = mjrHDF5_put_att_arrVCstr(datasetID, "dimNames", strPerDim, 3);
  mjrHDF5_chkError(hErrVal);

  /* Add an array of strings to to give units for each dim.*/
  strPerDim[0] = "degrees";
  strPerDim[1] = "degrees";
  strPerDim[2] = "hours";
  hErrVal = mjrHDF5_put_att_arrVCstr(datasetID, "dimUnits", strPerDim, 3);
  mjrHDF5_chkError(hErrVal);

  /* Add annotation describing minimum values for each dim. */
  valPerDim[0] = 0.0;
  valPerDim[1] = 0.0;
  valPerDim[2] = 0.0;
  hErrVal = mjrHDF5_put_att_arry(datasetID, H5T_IEEE_F32BE, "dimStart", valPerDim, H5T_NATIVE_FLOAT, 3);
  mjrHDF5_chkError(hErrVal);

  /* Add annotation describing the "step" between values on each dim. */
  valPerDim[0] = 10.0;
  valPerDim[1] = 20.0;
  valPerDim[2] = 24.0;
  hErrVal = mjrHDF5_put_att_arry(datasetID, H5T_IEEE_F32BE, "dimStep", valPerDim, H5T_NATIVE_FLOAT, 3);
  mjrHDF5_chkError(hErrVal);

  /* Just for fun, create a floating point attribute! */
  floatDataPoint[0] = 1.234;
  mjrHDF5_put_att_sclr(datasetID, H5T_IEEE_F32BE, "Pressure", floatDataPoint, H5T_NATIVE_FLOAT);

  /* Write some data into our dataset. */
  hErrVal = H5Dwrite(datasetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, temp);
  mjrHDF5_chkError(hErrVal);

  /* End access to the dataset and release resources used by it. */
  hErrVal = H5Dclose(datasetID);
  mjrHDF5_chkError(hErrVal);

  /* Terminate access to the data space. */ 
  hErrVal = H5Sclose(dataspaceID);
  mjrHDF5_chkError(hErrVal);
  
  /* Close the file. */
  hErrVal = H5Fclose(fileID);
  mjrHDF5_chkError(hErrVal);

  /* Unload the library and free any remaining resources. */
  hErrVal = H5close();
  mjrHDF5_chkError(hErrVal);

  return 0;
} /* end func main */
Exemplo n.º 14
0
int main(int argc, char **argv)
{
    int mpi_size, mpi_rank;				/* mpi variables */
    H5Ptest_param_t ndsets_params, ngroups_params;
    H5Ptest_param_t collngroups_params;
    H5Ptest_param_t io_mode_confusion_params;

    /* Un-buffer the stdout and stderr */
    setbuf(stderr, NULL);
    setbuf(stdout, NULL);

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    dim0 = ROW_FACTOR*mpi_size;
    dim1 = COL_FACTOR*mpi_size;

    if (MAINPROCESS) {
        printf("===================================\n");
        printf("PHDF5 TESTS START\n");
        printf("===================================\n");
    }
    H5open();
    h5_show_hostname();

    /* Initialize testing framework */
    TestInit(argv[0], usage, parse_options);

    /* Tests are generally arranged from least to most complexity... */
    AddTest("mpiodup", test_fapl_mpio_dup, NULL,
            "fapl_mpio duplicate", NULL);
    AddTest("posixdup", test_fapl_mpiposix_dup, NULL,
            "fapl_mpiposix duplicate", NULL);

    AddTest("split", test_split_comm_access, NULL,
            "dataset using split communicators", PARATESTFILE);

    AddTest("idsetw", dataset_writeInd, NULL,
            "dataset independent write", PARATESTFILE);
    AddTest("idsetr", dataset_readInd, NULL,
            "dataset independent read", PARATESTFILE);

    AddTest("cdsetw", dataset_writeAll, NULL,
            "dataset collective write", PARATESTFILE);
    AddTest("cdsetr", dataset_readAll, NULL,
            "dataset collective read", PARATESTFILE);

    AddTest("eidsetw", extend_writeInd, NULL,
            "extendible dataset independent write", PARATESTFILE);
    AddTest("eidsetr", extend_readInd, NULL,
            "extendible dataset independent read", PARATESTFILE);
    AddTest("ecdsetw", extend_writeAll, NULL,
            "extendible dataset collective write", PARATESTFILE);
    AddTest("ecdsetr", extend_readAll, NULL,
            "extendible dataset collective read", PARATESTFILE);
    AddTest("eidsetw2", extend_writeInd2, NULL,
            "extendible dataset independent write #2", PARATESTFILE);
    AddTest("selnone", none_selection_chunk, NULL,
            "chunked dataset with none-selection", PARATESTFILE);
    AddTest("calloc", test_chunk_alloc, NULL,
            "parallel extend Chunked allocation on serial file", PARATESTFILE);
    AddTest("fltread", test_filter_read, NULL,
            "parallel read of dataset written serially with filters", PARATESTFILE);

#ifdef H5_HAVE_FILTER_DEFLATE
    AddTest("cmpdsetr", compress_readAll, NULL,
            "compressed dataset collective read", PARATESTFILE);
#endif /* H5_HAVE_FILTER_DEFLATE */

    ndsets_params.name = PARATESTFILE;
    ndsets_params.count = ndatasets;
    AddTest("ndsetw", multiple_dset_write, NULL,
            "multiple datasets write", &ndsets_params);

    ngroups_params.name = PARATESTFILE;
    ngroups_params.count = ngroups;
    AddTest("ngrpw", multiple_group_write, NULL,
            "multiple groups write", &ngroups_params);
    AddTest("ngrpr", multiple_group_read, NULL,
            "multiple groups read", &ngroups_params);

    AddTest("compact", compact_dataset, NULL,
            "compact dataset test", PARATESTFILE);

    collngroups_params.name = PARATESTFILE;
    collngroups_params.count = ngroups;
    AddTest("cngrpw", collective_group_write, NULL,
            "collective group and dataset write", &collngroups_params);
    AddTest("ingrpr", independent_group_read, NULL,
            "independent group and dataset read", &collngroups_params);
    AddTest("bigdset", big_dataset, NULL,
            "big dataset test", PARATESTFILE);
    AddTest("fill", dataset_fillvalue, NULL,
            "dataset fill value", PARATESTFILE);

    AddTest("cchunk1",
            coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
    AddTest("cchunk2",
            coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
    AddTest("cchunk3",
            coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
    AddTest("cchunk4",
            coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);

    if((mpi_size < 3)&& MAINPROCESS ) {
        printf("Collective chunk IO optimization APIs ");
        printf("needs at least 3 processes to participate\n");
        printf("Collective chunk IO API tests will be skipped \n");
    }
    AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
            coll_chunk5,NULL,
            "linked chunk collective IO without optimization",PARATESTFILE);
    AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
            coll_chunk6,NULL,
            "multi-chunk collective IO without optimization",PARATESTFILE);
    AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
            coll_chunk7,NULL,
            "linked chunk collective IO with optimization",PARATESTFILE);
    AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
            coll_chunk8,NULL,
            "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
    AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
            coll_chunk9,NULL,
            "multiple chunk collective IO with optimization",PARATESTFILE);
    AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
            coll_chunk10,NULL,
            "multiple chunk collective IO transferring to independent IO",PARATESTFILE);



    /* irregular collective IO tests*/
    AddTest("ccontw",
            coll_irregular_cont_write,NULL,
            "collective irregular contiguous write",PARATESTFILE);
    AddTest("ccontr",
            coll_irregular_cont_read,NULL,
            "collective irregular contiguous read",PARATESTFILE);
    AddTest("cschunkw",
            coll_irregular_simple_chunk_write,NULL,
            "collective irregular simple chunk write",PARATESTFILE);
    AddTest("cschunkr",
            coll_irregular_simple_chunk_read,NULL,
            "collective irregular simple chunk read",PARATESTFILE);
    AddTest("ccchunkw",
            coll_irregular_complex_chunk_write,NULL,
            "collective irregular complex chunk write",PARATESTFILE);
    AddTest("ccchunkr",
            coll_irregular_complex_chunk_read,NULL,
            "collective irregular complex chunk read",PARATESTFILE);


#if 0
    if((mpi_size > 3) && MAINPROCESS) {
        printf("Collective irregular chunk IO tests haven't been tested \n");
        printf("  for the number of process greater than 3.\n");
        printf("Please try with the number of process \n");
        printf("  no greater than 3 for collective irregular chunk IO test.\n");
        printf("Collective irregular chunk tests will be skipped \n");
    }
    AddTest((mpi_size > 3) ? "-ccontw" : "ccontw",
            coll_irregular_cont_write,NULL,
            "collective irregular contiguous write",PARATESTFILE);
    AddTest((mpi_size > 3) ? "-ccontr" : "ccontr",
            coll_irregular_cont_read,NULL,
            "collective irregular contiguous read",PARATESTFILE);
    AddTest((mpi_size > 3) ? "-cschunkw" : "cschunkw",
            coll_irregular_simple_chunk_write,NULL,
            "collective irregular simple chunk write",PARATESTFILE);
    AddTest((mpi_size > 3) ? "-cschunkr" : "cschunkr",
            coll_irregular_simple_chunk_read,NULL,
            "collective irregular simple chunk read",PARATESTFILE);
    AddTest((mpi_size > 3) ? "-ccchunkw" : "ccchunkw",
            coll_irregular_complex_chunk_write,NULL,
            "collective irregular complex chunk write",PARATESTFILE);
    AddTest((mpi_size > 3) ? "-ccchunkr" : "ccchunkr",
            coll_irregular_complex_chunk_read,NULL,
            "collective irregular complex chunk read",PARATESTFILE);
#endif


    AddTest("null", null_dataset, NULL,
            "null dataset test", PARATESTFILE);

    io_mode_confusion_params.name  = PARATESTFILE;
    io_mode_confusion_params.count = 0; /* value not used */

    AddTest("I/Omodeconf", io_mode_confusion, NULL,
            "I/O mode confusion test -- hangs quickly on failure",
            &io_mode_confusion_params);

    /* Display testing information */
    TestInfo(argv[0]);

    /* setup file access property list */
    fapl = H5Pcreate (H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);

    /* Parse command line arguments */
    TestParseCmdLine(argc, argv);

    if (facc_type == FACC_MPIPOSIX && MAINPROCESS) {
        printf("===================================\n"
               "   Using MPIPOSIX driver\n"
               "===================================\n");
    }

    if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
        printf("===================================\n"
               "   Using Independent I/O with file set view to replace collective I/O \n"
               "===================================\n");
    }


    /* Perform requested testing */
    PerformTests();

    /* make sure all processes are finished before final report, cleanup
     * and exit.
     */
    MPI_Barrier(MPI_COMM_WORLD);

    /* Display test summary, if requested */
    if (MAINPROCESS && GetTestSummary())
        TestSummary();

    /* Clean up test files */
    h5_cleanup(FILENAME, fapl);

    nerrors += GetTestNumErrs();

    /* Gather errors from all processes */
    {
        int temp;
        MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
        nerrors=temp;
    }

    if (MAINPROCESS) {		/* only process 0 reports */
        printf("===================================\n");
        if (nerrors)
            printf("***PHDF5 tests detected %d errors***\n", nerrors);
        else
            printf("PHDF5 tests finished with no errors\n");
        printf("===================================\n");
    }
    /* close HDF5 library */
    H5close();

    /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
    MPI_Finalize();

    /* cannot just return (nerrors) because exit code is limited to 1byte */
    return(nerrors!=0);
}
Exemplo n.º 15
0
int
main(int argc, char **argv)
{
    int mpi_size, mpi_rank;        /* mpi variables */
    int ret_code;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    /* Attempt to turn off atexit post processing so that in case errors
     * happen during the test and the process is aborted, it will not get
     * hang in the atexit post processing in which it may try to make MPI
     * calls.  By then, MPI calls may not work.
     */
    if (H5dont_atexit() < 0){
  printf("Failed to turn off atexit processing. Continue.\n", mpi_rank);
    };
    H5open();
    if (parse_options(argc, argv) != 0){
  if (MAINPROCESS)
      usage();
  goto finish;
    }

    if (MAINPROCESS){
  printf("===================================\n");
  printf("MPI functionality tests\n");
  printf("===================================\n");
    }

    if (VERBOSE_MED)
  h5_show_hostname();

    fapl = H5Pcreate (H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);

    /* set alarm. */
    ALARM_ON;


    /*=======================================
     * MPIO 1 write Many read test
     *=======================================*/
    MPI_BANNER("MPIO 1 write Many read test...");
    ret_code = test_mpio_1wMr(filenames[0], USENONE);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
  printf("***FAILED with %d total errors\n", ret_code);
  nerrors += ret_code;
    }

    /* test atomicity and file sync in high verbose mode only         */
    /* since they often hang when broken and PHDF5 does not use them. */
    if (VERBOSE_HI){
  MPI_BANNER("MPIO 1 write Many read test with atomicity...");
  ret_code = test_mpio_1wMr(filenames[0], USEATOM);
  ret_code = errors_sum(ret_code);
  if (mpi_rank==0 && ret_code > 0){
      printf("***FAILED with %d total errors\n", ret_code);
      nerrors += ret_code;
  }

  MPI_BANNER("MPIO 1 write Many read test with file sync...");
  ret_code = test_mpio_1wMr(filenames[0], USEFSYNC);
  ret_code = errors_sum(ret_code);
  if (mpi_rank==0 && ret_code > 0){
      printf("***FAILED with %d total errors\n", ret_code);
      nerrors += ret_code;
  }
    }


    /*=======================================
     * MPIO MPIO File size range test
     *=======================================*/
    MPI_BANNER("MPIO File size range test...");
#ifndef H5_HAVE_WIN32_API
    ret_code = test_mpio_gb_file(filenames[0]);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }
#else
    if (mpi_rank==0)
        printf(" will be skipped on Windows (JIRA HDDFV-8064)\n");
#endif


    /*=======================================
     * MPIO independent overlapping writes
     *=======================================*/
    MPI_BANNER("MPIO independent overlapping writes...");
    ret_code = test_mpio_overlap_writes(filenames[0]);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
  printf("***FAILED with %d total errors\n", ret_code);
  nerrors += ret_code;
    }

    /*=======================================
     * MPIO complicated derived datatype test
     *=======================================*/
    MPI_BANNER("MPIO complicated derived datatype test...");
    ret_code = test_mpio_derived_dtype(filenames[0]);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
  printf("***FAILED with %d total errors\n", ret_code);
  nerrors += ret_code;
    }

    /*=======================================
     * MPIO special collective IO  test
     *=======================================*/
    if (mpi_size < 4) {
        MPI_BANNER("MPIO special collective io test SKIPPED.");
        if (mpi_rank == 0)
            printf("This test needs at least four processes to run.\n");
        ret_code = 0;
        goto sc_finish;
    } /* end if */

    MPI_BANNER("MPIO special collective io test...");
    ret_code = test_mpio_special_collective(filenames[0]);

sc_finish:
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
  printf("***FAILED with %d total errors\n", ret_code);
  nerrors += ret_code;
    }


finish:
    /* make sure all processes are finished before final report, cleanup
     * and exit.
     */
    MPI_Barrier(MPI_COMM_WORLD);
    if (MAINPROCESS){    /* only process 0 reports */
  printf("===================================\n");
  if (nerrors){
      printf("***MPI tests detected %d errors***\n", nerrors);
  }
  else{
      printf("MPI tests finished with no errors\n");
  }
  printf("===================================\n");
    }

    /* turn off alarm */
    ALARM_OFF;

    h5_cleanup(FILENAME, fapl);
    H5close();

    /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
    MPI_Finalize();

    /* cannot just return (nerrors) because exit code is limited to 1byte */
    return(nerrors!=0);
}
Exemplo n.º 16
0
	  /*  Get the size of the type */
	  dataSize = H5Tget_size(dataTypeID);
	  if(dataSize == 0) {
		printf("ERROR: Failure in H5Tget_size().\n");
		exit(1);
	  } /* end if */
	  printf(" Size: %3lu ", (unsigned long)dataSize);
	  hErrVal = H5Tclose(dataTypeID);
	  mjrHDF5_chkError(hErrVal);

      printf(" Name: %s \n", attrName);

      hErrVal = H5Aclose(attrID);
	  mjrHDF5_chkError(hErrVal);
    } /* end for */
  } /* end if */

  /* Close the dataset. */
  hErrVal = H5Dclose(dataSetID);

  /* Close the file. */
  hErrVal = H5Fclose(fileID);

  /* Unload the library and free any remaining resources. */
  hErrVal = H5close();
  mjrHDF5_chkError(hErrVal);

  return 0;

} /* end func main */
Exemplo n.º 17
0
void stfio::importHDF5File(const std::string& fName, Recording& ReturnData, ProgressInfo& progDlg) {
    /* Create a new file using default properties. */
    hid_t file_id = H5Fopen(fName.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
    
    /* H5TBread_table
       const int NRECORDS = 1;*/
    const int NFIELDS    = 3;

    /* Calculate the size and the offsets of our struct members in memory */
    size_t rt_offset[NFIELDS] = {  HOFFSET( rt, channels ),
                                   HOFFSET( rt, date ),
                                   HOFFSET( rt, time )};
    rt rt_buf[1];
    size_t rt_sizes[NFIELDS] = { sizeof( rt_buf[0].channels),
                                 sizeof( rt_buf[0].date),
                                 sizeof( rt_buf[0].time)};
    herr_t status=H5TBread_table( file_id, "description", sizeof(rt), rt_offset, rt_sizes, rt_buf );
    if (status < 0) {
        std::string errorMsg("Exception while reading description in stfio::importHDF5File");
        throw std::runtime_error(errorMsg);
    }
    int numberChannels =rt_buf[0].channels;
    if ( ReturnData.SetDate(rt_buf[0].date)
      || ReturnData.SetTime(rt_buf[0].time) ) {
        std::cout << "Warning HDF5: could not decode date/time " << rt_buf[0].date << " " << rt_buf[0].time << std::endl;
    }



    /* Create the data space for the dataset. */
    hsize_t dims;
    H5T_class_t class_id;
    size_t type_size;

    std::string description, comment;
    hid_t group_id = H5Gopen2(file_id, "/comment", H5P_DEFAULT);
    status = H5Lexists(group_id, "/comment/description", 0);
    if (status==1) {
        status = H5LTget_dataset_info( file_id, "/comment/description", &dims, &class_id, &type_size );
        if (status >= 0) {
            description.resize( type_size );
            status = H5LTread_dataset_string (file_id, "/comment/description", &description[0]);
            if (status < 0) {
                std::string errorMsg("Exception while reading description in stfio::importHDF5File");
                throw std::runtime_error(errorMsg);
            }
        }
    }
    ReturnData.SetFileDescription(description);
    
    status = H5Lexists(group_id, "/comment/comment", 0);
    if (status==1) {
        status = H5LTget_dataset_info( file_id, "/comment/comment", &dims, &class_id, &type_size );
        if (status >= 0) {
            comment.resize( type_size );
            status = H5LTread_dataset_string (file_id, "/comment/comment", &comment[0]);
            if (status < 0) {
                std::string errorMsg("Exception while reading comment in stfio::importHDF5File");
                throw std::runtime_error(errorMsg);
            }
        }
    }
    ReturnData.SetComment(comment);

    double dt = 1.0;
    std::string yunits = "";
    for (int n_c=0;n_c<numberChannels;++n_c) {
        /* Calculate the size and the offsets of our struct members in memory */
        size_t ct_offset[NFIELDS] = { HOFFSET( ct, n_sections ) };
        ct ct_buf[1];
        size_t ct_sizes[NFIELDS] = { sizeof( ct_buf[0].n_sections) };

        /* Read channel name */
        hsize_t cdims;
        H5T_class_t cclass_id;
        size_t ctype_size;
        std::ostringstream desc_path;
        desc_path << "/channels/ch" << (n_c);
        status = H5LTget_dataset_info( file_id, desc_path.str().c_str(), &cdims, &cclass_id, &ctype_size );
        if (status < 0) {
            std::string errorMsg("Exception while reading channel in stfio::importHDF5File");
            throw std::runtime_error(errorMsg);
        }
        hid_t string_typec= H5Tcopy( H5T_C_S1 );
        H5Tset_size( string_typec,  ctype_size );
        std::vector<char> szchannel_name(ctype_size);
        // szchannel_name.reset( new char[ctype_size] );
        status = H5LTread_dataset(file_id, desc_path.str().c_str(), string_typec, &szchannel_name[0] );
        if (status < 0) {
            std::string errorMsg("Exception while reading channel name in stfio::importHDF5File");
            throw std::runtime_error(errorMsg);
        }
        std::ostringstream channel_name;
        for (std::size_t c=0; c<ctype_size; ++c) {
            channel_name << szchannel_name[c];
        }

        std::ostringstream channel_path;
        channel_path << "/" << channel_name.str();

        hid_t channel_group = H5Gopen2(file_id, channel_path.str().c_str(), H5P_DEFAULT );
        status=H5TBread_table( channel_group, "description", sizeof(ct), ct_offset, ct_sizes, ct_buf );
        if (status < 0) {
            std::string errorMsg("Exception while reading channel description in stfio::importHDF5File");
            throw std::runtime_error(errorMsg);
        }
        Channel TempChannel(ct_buf[0].n_sections);
        TempChannel.SetChannelName( channel_name.str() );
        int max_log10 = 0;
        if (ct_buf[0].n_sections > 1) {
            max_log10 = int(log10((double)ct_buf[0].n_sections-1.0));
        }

        for (int n_s=0; n_s < ct_buf[0].n_sections; ++n_s) {
            int progbar =
                // Channel contribution:
                (int)(((double)n_c/(double)numberChannels)*100.0+
                      // Section contribution:
                      (double)(n_s)/(double)ct_buf[0].n_sections*(100.0/numberChannels));
            std::ostringstream progStr;
            progStr << "Reading channel #" << n_c + 1 << " of " << numberChannels
                    << ", Section #" << n_s+1 << " of " << ct_buf[0].n_sections;
            progDlg.Update(progbar, progStr.str());
            
            // construct a number with leading zeros:
            int n10 = 0;
            if (n_s > 0) {
                n10 = int(log10((double)n_s));
            }
            std::ostringstream strZero; strZero << "";
            for (int n_z=n10; n_z < max_log10; ++n_z) {
                strZero << "0";
            }

            // construct a section name:
            std::ostringstream section_name;
            section_name << "sec" << n_s;

            // create a child group in the channel:
            std::ostringstream section_path;
            section_path << channel_path.str() << "/" << "section_" << strZero.str() << n_s;
            hid_t section_group = H5Gopen2(file_id, section_path.str().c_str(), H5P_DEFAULT );

            std::ostringstream data_path; data_path << section_path.str() << "/data";
            hsize_t sdims;
            H5T_class_t sclass_id;
            size_t stype_size;
            status = H5LTget_dataset_info( file_id, data_path.str().c_str(), &sdims, &sclass_id, &stype_size );
            if (status < 0) {
                std::string errorMsg("Exception while reading data information in stfio::importHDF5File");
                throw std::runtime_error(errorMsg);
            }
            Vector_float TempSection(sdims);
            status = H5LTread_dataset(file_id, data_path.str().c_str(), H5T_IEEE_F32LE, &TempSection[0]);
            if (status < 0) {
                std::string errorMsg("Exception while reading data in stfio::importHDF5File");
                throw std::runtime_error(errorMsg);
            }

            Section TempSectionT(TempSection.size(), section_name.str());
            for (std::size_t cp = 0; cp < TempSectionT.size(); ++cp) {
                TempSectionT[cp] = double(TempSection[cp]);
            }
            // std::copy(TempSection.begin(),TempSection.end(),&TempSectionT[0]);
            try {
                TempChannel.InsertSection(TempSectionT,n_s);
            }
            catch (...) {
                throw;
            }


            /* H5TBread_table
               const int NSRECORDS = 1; */
            const int NSFIELDS    = 3;

            /* Calculate the size and the offsets of our struct members in memory */
            size_t st_offset[NSFIELDS] = {  HOFFSET( st, dt ),
                                            HOFFSET( st, xunits ),
                                            HOFFSET( st, yunits )};
            st st_buf[1];
            size_t st_sizes[NSFIELDS] = { sizeof( st_buf[0].dt),
                                          sizeof( st_buf[0].xunits),
                                          sizeof( st_buf[0].yunits)};
            status=H5TBread_table( section_group, "description", sizeof(st), st_offset, st_sizes, st_buf );
            if (status < 0) {
                std::string errorMsg("Exception while reading data description in stfio::importHDF5File");
                throw std::runtime_error(errorMsg);
            }
            dt = st_buf[0].dt;
            yunits = st_buf[0].yunits;
            H5Gclose( section_group );
        }
        try {
            if ((int)ReturnData.size()<numberChannels) {
                ReturnData.resize(numberChannels);
            }
            ReturnData.InsertChannel(TempChannel,n_c);
            ReturnData[n_c].SetYUnits( yunits );
        }
        catch (...) {
            ReturnData.resize(0);
            throw;
        }
        H5Gclose( channel_group );
    }
    ReturnData.SetXScale(dt);
    /* Terminate access to the file. */
    status = H5Fclose(file_id);
    if (status < 0) {
        std::string errorMsg("Exception while closing file in stfio::importHDF5File");
        throw std::runtime_error(errorMsg);
    }

    /* Release all hdf5 resources */
    status = H5close();
    if (status < 0) {
        std::string errorMsg("Exception while closing file in stfio::exportHDF5File");
        throw std::runtime_error(errorMsg);
    }
    
}
Exemplo n.º 18
0
bool stfio::exportHDF5File(const std::string& fName, const Recording& WData, ProgressInfo& progDlg) {
    
    hid_t file_id = H5Fcreate(fName.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
    
    const int NRECORDS = 1;
    const int NFIELDS = 3;

    /* Calculate the size and the offsets of our struct members in memory */
    size_t rt_offset[NFIELDS] = {  HOFFSET( rt, channels ),
                                   HOFFSET( rt, date ),
                                   HOFFSET( rt, time )};

    /* Define an array of root tables */
    rt p_data;
    p_data.channels = WData.size();
    struct tm t = WData.GetDateTime();
    std::size_t date_length = snprintf(p_data.date, DATELEN, "%04i-%02i-%02i", t.tm_year+1900, t.tm_mon+1, t.tm_mday);
    std::size_t time_length = snprintf(p_data.time, TIMELEN, "%02i:%02i:%02i", t.tm_hour, t.tm_min, t.tm_sec);
    // ensure that an undefine string is set to "\0", and that the terminating \0 is counted in string length
    p_data.date[date_length++] = 0;
    p_data.time[time_length++] = 0;

    /* Define field information */
    const char *field_names[NFIELDS]  =  { "channels", "date", "time" };
    hid_t      field_type[NFIELDS];

    /* Initialize the field field_type */
    hid_t string_type1 = H5Tcopy( H5T_C_S1 );
    hid_t string_type2 = H5Tcopy( H5T_C_S1 );
    H5Tset_size( string_type1,  date_length);
    H5Tset_size( string_type2,  time_length);
    field_type[0] = H5T_NATIVE_INT;
    field_type[1] = string_type1;
    field_type[2] = string_type2;
    
    std::ostringstream desc;
    desc << "Description of " << fName;
    
    herr_t status = H5TBmake_table( desc.str().c_str(), file_id, "description", (hsize_t)NFIELDS, (hsize_t)NRECORDS, sizeof(rt),
                                    field_names, rt_offset, field_type, 10, NULL, 0, &p_data  );

    if (status < 0) {
        std::string errorMsg("Exception while writing description in stfio::exportHDF5File");
        H5Fclose(file_id);
        H5close();
        throw std::runtime_error(errorMsg);
    }

    hid_t comment_group = H5Gcreate2( file_id,"/comment", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

    /* File comment. */
    std::string description(WData.GetFileDescription());
    if (description.length() <= 0) {
        description = "No description";
    }

    status = H5LTmake_dataset_string(file_id, "/comment/description", description.c_str());
    if (status < 0) {
        std::string errorMsg("Exception while writing description in stfio::exportHDF5File");
        H5Fclose(file_id);
        H5close();
        throw std::runtime_error(errorMsg);
    }

    std::string comment(WData.GetComment());
    if (comment.length() <= 0) {
        comment = "No comment";
    }

    status = H5LTmake_dataset_string(file_id, "/comment/comment", comment.c_str());
    if (status < 0) {
        std::string errorMsg("Exception while writing comment in stfio::exportHDF5File");
        H5Fclose(file_id);
        H5close();
        throw std::runtime_error(errorMsg);
    }
    H5Gclose(comment_group);

    std::vector<std::string> channel_name(WData.size());

    hid_t channels_group = H5Gcreate2( file_id,"/channels", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

    for ( std::size_t n_c=0; n_c < WData.size(); ++n_c) {
        /* Channel descriptions. */
        std::ostringstream ossname;
        ossname << WData[n_c].GetChannelName();
        if ( ossname.str() == "" ) {
            ossname << "ch" << (n_c);
        }
        channel_name[n_c] = ossname.str();
        hsize_t dimsc[1] = { 1 };
        hid_t string_typec = H5Tcopy( H5T_C_S1 );
        std::size_t cn_length = channel_name[n_c].length();
        if (cn_length <= 0) cn_length = 1;
        H5Tset_size( string_typec, cn_length );

        std::vector<char> datac(channel_name[n_c].length());
        std::copy(channel_name[n_c].begin(),channel_name[n_c].end(), datac.begin());
        std::ostringstream desc_path; desc_path << "/channels/ch" << (n_c);
        status = H5LTmake_dataset(file_id, desc_path.str().c_str(), 1, dimsc, string_typec, &datac[0]);
        if (status < 0) {
            std::string errorMsg("Exception while writing channel name in stfio::exportHDF5File");
            H5Fclose(file_id);
            H5close();
            throw std::runtime_error(errorMsg);
        }

        std::ostringstream channel_path; channel_path << "/" << channel_name[n_c];
        hid_t channel_group = H5Gcreate2( file_id, channel_path.str().c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
        if (channel_group < 0) {
            std::ostringstream errorMsg;
            errorMsg << "Exception while creating channel group for "
                     << channel_path.str().c_str();
            H5Fclose(file_id);
            H5close();
            throw std::runtime_error(errorMsg.str());
        }

        /* Calculate the size and the offsets of our struct members in memory */
        size_t ct_size =  sizeof( ct );
        size_t ct_offset[1] = { HOFFSET( rt, channels ) };
        /* Define an array of channel tables */
        ct c_data = { (int)WData[n_c].size() };

        /* Define field information */
        const char *cfield_names[1]  =  { "n_sections" };
        hid_t      cfield_type[1] = {H5T_NATIVE_INT};
        std::ostringstream c_desc;
        c_desc << "Description of channel " << n_c;
        status = H5TBmake_table( c_desc.str().c_str(), channel_group, "description", (hsize_t)1, (hsize_t)1, ct_size,
                                 cfield_names, ct_offset, cfield_type, 10, NULL, 0, &c_data  );
        if (status < 0) {
            std::string errorMsg("Exception while writing channel description in stfio::exportHDF5File");
            H5Fclose(file_id);
            H5close();
            throw std::runtime_error(errorMsg);
        }

        int max_log10 = 0;
        if (WData[n_c].size() > 1) {
            max_log10 = int(log10((double)WData[n_c].size()-1.0));
        }

        for (std::size_t n_s=0; n_s < WData[n_c].size(); ++n_s) {
            int progbar = 
                // Channel contribution:
                (int)(((double)n_c/(double)WData.size())*100.0+
                      // Section contribution:
                      (double)(n_s)/(double)WData[n_c].size()*(100.0/WData.size()));
            std::ostringstream progStr;
            progStr << "Writing channel #" << n_c + 1 << " of " << WData.size()
                    << ", Section #" << n_s << " of " << WData[n_c].size();
            progDlg.Update(progbar, progStr.str());
            
            // construct a number with leading zeros:
            int n10 = 0;
            if (n_s > 0) {
                n10 = int(log10((double)n_s));
            }
            std::ostringstream strZero; strZero << "";
            for (int n_z=n10; n_z < max_log10; ++n_z) {
                strZero << "0";
            }

            // construct a section name:
            std::ostringstream section_name; section_name << WData[n_c][n_s].GetSectionDescription();
            if ( section_name.str() == "" ) {
                section_name << "sec" << n_s;
            }

            // create a child group in the channel:
            std::ostringstream section_path;
            section_path << channel_path.str() << "/" << "section_" << strZero.str() << n_s;
            hid_t section_group = H5Gcreate2( file_id, section_path.str().c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);

            // add data and description, store as 32 bit little endian independent of machine:
            hsize_t dims[1] = { WData[n_c][n_s].size() };
            std::ostringstream data_path;
            data_path << section_path.str() << "/data";
            Vector_float data_cp(WData[n_c][n_s].get().size()); /* 32 bit */
            for (std::size_t n_cp = 0; n_cp < WData[n_c][n_s].get().size(); ++n_cp) {
                data_cp[n_cp] = float(WData[n_c][n_s][n_cp]);
            }
            status = H5LTmake_dataset(file_id, data_path.str().c_str(), 1, dims, H5T_IEEE_F32LE, &data_cp[0]);
            if (status < 0) {
                std::string errorMsg("Exception while writing data in stfio::exportHDF5File");
                H5Fclose(file_id);
                H5close();
                throw std::runtime_error(errorMsg);
            }

            const int NSRECORDS = 1;
            const int NSFIELDS = 3;

            /* Calculate the size and the offsets of our struct members in memory */
            size_t st_size =  sizeof( st );
            size_t st_offset[NSFIELDS] = {  HOFFSET( st, dt ),
                                            HOFFSET( st, xunits ),
                                            HOFFSET( st, yunits )};

            /* Define an array of root tables */
            st s_data;
            s_data.dt = WData.GetXScale();
            if (WData.GetXUnits().length() < UNITLEN)
                strcpy( s_data.xunits, WData.GetXUnits().c_str() );
            if (WData[n_c].GetYUnits().length() < UNITLEN)
                strcpy( s_data.yunits, WData[n_c].GetYUnits().c_str() );

            /* Define field information */
            const char *sfield_names[NSFIELDS]  =  { "dt", "xunits", "yunits" };
            hid_t      sfield_type[NSFIELDS];

            /* Initialize the field field_type */
            hid_t string_type4 = H5Tcopy( H5T_C_S1 );
            hid_t string_type5 = H5Tcopy( H5T_C_S1 );
            H5Tset_size( string_type4,  2);
            std::size_t yu_length = WData[n_c].GetYUnits().length();
            if (yu_length <= 0) yu_length = 1;

            H5Tset_size( string_type5, yu_length );
            sfield_type[0] = H5T_NATIVE_DOUBLE;
            sfield_type[1] = string_type4;
            sfield_type[2] = string_type5;

            std::ostringstream sdesc;
            sdesc << "Description of " << section_name.str();
            status = H5TBmake_table( sdesc.str().c_str(), section_group, "description", (hsize_t)NSFIELDS, (hsize_t)NSRECORDS, st_size,
                                     sfield_names, st_offset, sfield_type, 10, NULL, 0, &s_data  );
            if (status < 0) {
                std::string errorMsg("Exception while writing section description in stfio::exportHDF5File");
                H5Fclose(file_id);
                H5close();
                throw std::runtime_error(errorMsg);
            }
            H5Gclose(section_group);
        }
        H5Gclose(channel_group);
    }
    H5Gclose(channels_group);

    /* Terminate access to the file. */
    status = H5Fclose(file_id);
    if (status < 0) {
        std::string errorMsg("Exception while closing file in stfio::exportHDF5File");
        throw std::runtime_error(errorMsg);
    }

    /* Release all hdf5 resources */
    status = H5close();
    if (status < 0) {
        std::string errorMsg("Exception while closing file in stfio::exportHDF5File");
        throw std::runtime_error(errorMsg);
    }
    
    return (status >= 0);
}
Exemplo n.º 19
0
int main (int argc, char* argv[])
{
   // Library initilization.
   MPI_Init(&argc, &argv);
   herr_t H5open();
   std::string filename = argv[1];

   // We determine the size and ranks of our process.
   int rank, size;
   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
   MPI_Comm_size(MPI_COMM_WORLD, &size);

   // We distribute the indices among the processors. 
   // For proper load balancing, we should have size % 6 = 0. 
   std::vector<std::vector<unsigned int> > indices;
   indices.resize(size);

   for (unsigned int i=0; i<6; i++)
   {
    int idx = user_mod(i,size);
    indices[idx].push_back(i);
   }

   // Open existing file.
   hid_t plist_id;
   plist_id = H5Pcreate(H5P_FILE_ACCESS);
   H5Pset_fapl_mpio(plist_id,MPI_COMM_WORLD,MPI_INFO_NULL); 
   hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);

   // We store the name of the datasets we want to open. 
   std::vector<std::string> dataset_names;
   dataset_names.push_back(std::string("/besselJ"));
   dataset_names.push_back(std::string("/besselY"));
   dataset_names.push_back(std::string("/besselI"));
   dataset_names.push_back(std::string("/besselK"));
   dataset_names.push_back(std::string("/hankelH1"));
   dataset_names.push_back(std::string("/hankelH2"));

   // We store the appropriate functions pointers in an array.
   std::vector<std::complex<double> (*) (double, std::complex<double>, int)> f_ptr;
   f_ptr.push_back(sp_bessel::besselJp);
   f_ptr.push_back(sp_bessel::besselYp);
   f_ptr.push_back(sp_bessel::besselIp);
   f_ptr.push_back(sp_bessel::besselKp);
   f_ptr.push_back(sp_bessel::hankelH1p);
   f_ptr.push_back(sp_bessel::hankelH2p);

   // We loop over the datasets.
   for (auto iter = indices[rank].begin(); iter != indices[rank].end(); iter++)
   {
    // Open dataset.
    hid_t dataset_id = H5Dopen(file_id, dataset_names[*iter].c_str(), H5P_DEFAULT);

    // Obtain the dataspace
    hid_t dspace = H5Dget_space(dataset_id);

    // We obtain the dimensions of the dataset.
    const int ndims = H5Sget_simple_extent_ndims(dspace);
    hsize_t dims[ndims];
    H5Sget_simple_extent_dims(dspace, dims, NULL);

    // We read the dataset.
    std::complex<double> values[dims[0]][dims[1]][dims[2]][dims[3]];
    hid_t complex_id = H5Dget_type(dataset_id);
    H5Dread(dataset_id, complex_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, values);

    // We now open/read the attributes.
    double vmax, zimin, zimax, zrmin, zrmax;
    hid_t vmax_id = H5Aopen(dataset_id, "vmax", H5P_DEFAULT);
    hid_t zimin_id = H5Aopen(dataset_id, "zimin", H5P_DEFAULT);
    hid_t zimax_id = H5Aopen(dataset_id, "zrmax", H5P_DEFAULT);
    hid_t zrmin_id = H5Aopen(dataset_id, "zrmin", H5P_DEFAULT);
    hid_t zrmax_id = H5Aopen(dataset_id, "zrmax", H5P_DEFAULT);

    H5Aread(vmax_id, H5T_NATIVE_DOUBLE, &vmax);
    H5Aread(zimin_id, H5T_NATIVE_DOUBLE, &zimin);
    H5Aread(zimax_id, H5T_NATIVE_DOUBLE, &zimax);
    H5Aread(zrmin_id, H5T_NATIVE_DOUBLE, &zrmin);
    H5Aread(zrmax_id, H5T_NATIVE_DOUBLE, &zrmax);

    // We now evaluate the Bessel functions at the computed values.
    arma::vec orders = arma::linspace(-vmax, vmax, dims[1]);
    arma::vec realZ = arma::linspace(zrmin, zrmax, dims[2]);
    arma::vec imagZ = arma::linspace(zimin, zimax, dims[3]);


    unsigned int count = 0;
    for (int i=0; i<dims[0]; i++)
    {
        for (int j=0; j<dims[1]; j++)
        {
            for (int k=0; k<dims[2]; k++)
            {
                for (int l=0; l<dims[3]; l++)
                {
                    double eps = std::abs(f_ptr[*iter](orders(j), std::complex<double>(realZ(k), imagZ(l)), i)
                                            - values[i][j][k][l]);

                    if (eps > 1.0e-13)
                    {
                        std::cout << "Issue in " << dataset_names[*iter].c_str() 
                                  << " at nu = " << orders(j) 
                                  << " z = " << realZ(k) << " + i" << imagZ(l) << "."
                                  << " and p = " << i << std::endl;
                        std::cout << "Epsilon is " << eps << std::endl;
                        count++;
                    }
                }
            }
        }
    }


   }

   // Library closures
   herr_t H5close();
   MPI_Finalize();

   return 0;
 }
Exemplo n.º 20
0
int main(int argc, char *argv[])
{
    (void)argc;
    (void)argv;

typedef struct rt {
    int channels;
    char date[DATELEN];
    char time[TIMELEN];
} rt;

//    H5Fis_hdf5("/dev/null");

/*
* Create a new file using H5ACC_TRUNC access,
* default file creation properties, and default file
* access properties.
* Then close the file.
*/

    const int NRECORDS = 1;
    const int NFIELDS = 3;
    char fName[] = "tmp.h5";

    /* Calculate the size and the offsets of our struct members in memory */
    size_t rt_offset[NFIELDS] = {  HOFFSET( rt, channels ),
                                   HOFFSET( rt, date ),
                                   HOFFSET( rt, time )};

    rt p_data;
    p_data.channels = 1;
    strcpy( p_data.date, "1234-Dec-31");
    strcpy( p_data.time, "12:34:56");


    hid_t file_id = H5Fcreate(fName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);


    /* Define field information */
    const char *field_names[NFIELDS]  =  { "channels", "date", "time" };
    hid_t      field_type[NFIELDS];

    /* Initialize the field field_type */
    hid_t string_type1 = H5Tcopy( H5T_C_S1 );
    hid_t string_type2 = H5Tcopy( H5T_C_S1 );
    H5Tset_size( string_type1,  strlen(p_data.date));
    H5Tset_size( string_type2,  strlen(p_data.time));
    field_type[0] = H5T_NATIVE_INT;
    field_type[1] = string_type1;
    field_type[2] = string_type2;

    std::ostringstream desc;
    desc << "Description of " << fName;

    herr_t status = H5TBmake_table( desc.str().c_str(), file_id, "description", (hsize_t)NFIELDS, (hsize_t)NRECORDS, sizeof(rt),
                                    field_names, rt_offset, field_type, 10, NULL, 0, &p_data  );

    if (status < 0) {
        perror("Exception while writing description in stfio::exportHDF5File");
        H5Fclose(file_id);
        H5close();
        exit(-1);
    }

    H5Fclose(file_id);

    return(0);
}
Exemplo n.º 21
0
int
main(int argc, char **argv)
{
    int mpi_size, mpi_rank;				/* mpi variables */
    int ret_code;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);

    H5open();
    if (parse_options(argc, argv) != 0){
	if (MAINPROCESS)
	    usage();
	goto finish;
    }

    if (MAINPROCESS){
	printf("===================================\n");
	printf("MPI functionality tests\n");
	printf("===================================\n");
    }

    if (VERBOSE_MED)
	h5_show_hostname();

    fapl = H5Pcreate (H5P_FILE_ACCESS);
    H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);

    MPI_BANNER("MPIO 1 write Many read test...");
    ret_code = test_mpio_1wMr(filenames[0], USENONE);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }

    /* test atomicity and file sync in high verbose mode only         */
    /* since they often hang when broken and PHDF5 does not use them. */
    if (VERBOSE_HI){
	MPI_BANNER("MPIO 1 write Many read test with atomicity...");
	ret_code = test_mpio_1wMr(filenames[0], USEATOM);
	ret_code = errors_sum(ret_code);
	if (mpi_rank==0 && ret_code > 0){
	    printf("***FAILED with %d total errors\n", ret_code);
	    nerrors += ret_code;
	}

	MPI_BANNER("MPIO 1 write Many read test with file sync...");
	ret_code = test_mpio_1wMr(filenames[0], USEFSYNC);
	ret_code = errors_sum(ret_code);
	if (mpi_rank==0 && ret_code > 0){
	    printf("***FAILED with %d total errors\n", ret_code);
	    nerrors += ret_code;
	}
    }

    MPI_BANNER("MPIO File size range test...");
    ret_code = test_mpio_gb_file(filenames[0]);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }

    MPI_BANNER("MPIO independent overlapping writes...");
    ret_code = test_mpio_overlap_writes(filenames[0]);
    ret_code = errors_sum(ret_code);
    if (mpi_rank==0 && ret_code > 0){
	printf("***FAILED with %d total errors\n", ret_code);
	nerrors += ret_code;
    }

finish:
    /* make sure all processes are finished before final report, cleanup
     * and exit.
     */
    MPI_Barrier(MPI_COMM_WORLD);
    if (MAINPROCESS){		/* only process 0 reports */
	printf("===================================\n");
	if (nerrors){
	    printf("***MPI tests detected %d errors***\n", nerrors);
	}
	else{
	    printf("MPI tests finished with no errors\n");
	}
	printf("===================================\n");
    }

    h5_cleanup(FILENAME, fapl);
    H5close();

    /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
    MPI_Finalize();

    /* cannot just return (nerrors) because exit code is limited to 1byte */
    return(nerrors!=0);
}