static void isAvailable(const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Isolate* isolate = v8::Isolate::GetCurrent(); v8::HandleScope scope(isolate); Filters* obj = ObjectWrap::Unwrap<Filters>(args.This()); args.GetReturnValue().Set((H5Zfilter_avail(args[0]->ToInt32()->Value())) ? true : false); }
/*------------------------------------------------------------------------- * Function: test_read_with_filters * * Purpose: Tests reading dataset created with dynamically loaded filters * * Return: Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 14 March 2013 * *------------------------------------------------------------------------- */ static herr_t test_read_with_filters(hid_t file) { hid_t dset; /* Dataset ID */ /*---------------------------------------------------------- * STEP 1: Test deflation by itself. *---------------------------------------------------------- */ #ifdef H5_HAVE_FILTER_DEFLATE TESTING("Testing deflate filter"); if(H5Zfilter_avail(H5Z_FILTER_DEFLATE) != TRUE) TEST_ERROR if((dset = H5Dopen2(file,DSET_DEFLATE_NAME,H5P_DEFAULT)) < 0) TEST_ERROR if(test_read_data(dset, (int *)points_deflate) < 0) TEST_ERROR if(H5Dclose(dset) < 0) TEST_ERROR /* Clean up objects used for this test */ #else /* H5_HAVE_FILTER_DEFLATE */ TESTING("deflate filter"); SKIPPED(); puts(" Deflate filter not enabled"); #endif /* H5_HAVE_FILTER_DEFLATE */ /*---------------------------------------------------------- * STEP 2: Test DYNLIB1 by itself. *---------------------------------------------------------- */ TESTING("Testing DYNLIB1 filter"); if((dset = H5Dopen2(file,DSET_DYNLIB1_NAME,H5P_DEFAULT)) < 0) TEST_ERROR if(test_read_data(dset, (int *)points_dynlib1) < 0) TEST_ERROR if(H5Dclose(dset) < 0) TEST_ERROR /*---------------------------------------------------------- * STEP 3: Test Bogus2 by itself. *---------------------------------------------------------- */ TESTING("Testing DYNLIB2 filter"); if((dset = H5Dopen2(file,DSET_DYNLIB2_NAME,H5P_DEFAULT)) < 0) TEST_ERROR if(test_read_data(dset, (int *)points_dynlib2) < 0) TEST_ERROR if(H5Dclose(dset) < 0) TEST_ERROR return 0; error: return -1; }
/* * Class: hdf_hdf5lib_H5 * Method: H5Zfilter_avail * Signature: (I)I */ JNIEXPORT jint JNICALL Java_hdf_hdf5lib_H5_H5Zfilter_1avail (JNIEnv *env, jclass clss, jint filter) { herr_t retValue = H5Zfilter_avail((H5Z_filter_t)filter); if (retValue < 0) h5libraryError(env); return (jint)retValue; } /* end Java_hdf_hdf5lib_H5_H5Zfilter_1avail */
/*------------------------------------------------------------------------- * Function: test_filter_write_failure * * Purpose: Tests the library's behavior when a mandate filter returns * failure. There're only 5 chunks with each of them having * 2 integers. The filter will fail in the last chunk. The * dataset should release all resources even though the last * chunk can't be flushed to file. The file should close * successfully. * * Return: * Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 25 August 2010 * * Modifications: * Raymond Lu * 5 Oct 2010 * Test when the chunk cache is enable and disabled to make * sure the library behaves properly. *------------------------------------------------------------------------- */ static herr_t test_filter_write(char *file_name, hid_t my_fapl, hbool_t cache_enabled) { hid_t file = -1; hid_t dataset=-1; /* dataset ID */ hid_t sid=-1; /* dataspace ID */ hid_t dcpl=-1; /* dataset creation property list ID */ hsize_t dims[1]={DIM}; /* dataspace dimension - 10*/ hsize_t chunk_dims[1]={FILTER_CHUNK_DIM}; /* chunk dimension - 2*/ int points[DIM]; /* Data */ herr_t ret; /* generic return value */ int i; if(cache_enabled) { TESTING("data writing when a mandatory filter fails and chunk cache is enabled"); } else { TESTING("data writing when a mandatory filter fails and chunk cache is disabled"); } /* Create file */ if((file = H5Fcreate(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0) TEST_ERROR /* create the data space */ if((sid = H5Screate_simple(1, dims, NULL)) < 0) TEST_ERROR /* Create dcpl and register the filter */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_chunk(dcpl, 1, chunk_dims) < 0) TEST_ERROR if(H5Zregister (H5Z_FAIL_TEST) < 0) TEST_ERROR /* Check that the filter was registered */ if(TRUE != H5Zfilter_avail(H5Z_FILTER_FAIL_TEST)) FAIL_STACK_ERROR /* Enable the filter as mandatory */ if(H5Pset_filter(dcpl, H5Z_FILTER_FAIL_TEST, 0, (size_t)0, NULL) < 0) TEST_ERROR /* create a dataset */ if((dataset = H5Dcreate2(file, DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR /* Initialize the write buffer */ for(i = 0; i < DIM; i++) points[i] = i; /* Write data. If the chunk cache is enabled, H5Dwrite should succeed. If it is * diabled, H5Dwrite should fail. */ if(cache_enabled) { if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, points) < 0) TEST_ERROR } else {
loaded_hdf5_plugin() { int reg_rval = 0; if (!H5Zfilter_avail(H5Z_FILTER_SQY)) reg_rval = H5Zregister(H5Z_SQY); if (reg_rval < 0) std::cerr << __FILE__ << ":"<< __LINE__ << "\t unable to register sqy as hdf5 filter!!\n"; else std::cout << "Done registering sqy as hdf5 filter.\n"; }
int_f nh5zfilter_avail_c ( int_f *filter , int_f *flag ) /******/ { int ret_value = 0; H5Z_filter_t c_filter; htri_t status; c_filter = (H5Z_filter_t)*filter; status = H5Zfilter_avail(c_filter); *flag = (int_f)status; if ( status < 0 ) ret_value = -1; return ret_value; }
/*------------------------------------------------------------------------- * Function: test_path_api_calls * * Purpose: Tests the H5PL API calls that manipulate the plugin search * paths. * * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */ static herr_t test_path_api_calls(void) { unsigned int n_starting_paths; unsigned int u; unsigned int n_paths; herr_t ret; ssize_t path_len = -1; char path[256]; char temp_name[256]; HDputs("Testing access to the filter path table"); if (H5Zfilter_avail(FILTER1_ID) != TRUE) TEST_ERROR; /* Set the number of paths to create for this test. * * This should be set high enough to ensure that at least one array * expansion will take place. See H5PLpath.c for details. */ n_starting_paths = 42; /* Check that initialization is correct */ TESTING(" initialize"); if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths != 2) TEST_ERROR; PASSED(); /****************/ /* H5PLremove() */ /****************/ /* Remove all the current paths */ TESTING(" remove"); /* Get the current size */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; /* Remove all existing paths */ for (u = n_paths; u > 0; u--) if (H5PLremove(u-1) < 0) { HDfprintf(stderr," at %u: %s\n", u, path); TEST_ERROR; } /* Verify the table is empty */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths > 0) TEST_ERROR; PASSED(); TESTING(" remove (index 0 in empty table)"); /* Try to remove index zero in an empty list (SHOULD FAIL) */ H5E_BEGIN_TRY { ret = H5PLremove(0); } H5E_END_TRY if (ret >= 0) TEST_ERROR; PASSED(); /****************/ /* H5PLappend() */ /****************/ TESTING(" append"); /* Add a bunch of paths to the path table */ for (u = 0; u < n_starting_paths; u++) { HDsprintf(path, "a_path_%u", u); if (H5PLappend(path) < 0) { HDfprintf(stderr," at %u: %s\n", u, path); TEST_ERROR; } } PASSED(); /**********************/ /* H5PLremove() again */ /**********************/ TESTING(" remove (index too high)"); /* Try to remove a path where the index is beyond the table capacity (SHOULD FAIL) */ H5E_BEGIN_TRY { ret = H5PLremove(n_starting_paths); } H5E_END_TRY if (ret >= 0) TEST_ERROR PASSED(); /*************/ /* H5PLget() */ /*************/ TESTING(" get (path name)"); /* Get the path length by passing in NULL */ if ((path_len = H5PLget(0, NULL, 0)) <= 0) { HDfprintf(stderr," get path 0 length failed\n"); TEST_ERROR; } if (path_len != 8) TEST_ERROR; /* Get the path */ if ((path_len = H5PLget(0, path, 256)) <= 0) { HDfprintf(stderr," get 0 len: %u : %s\n", path_len, path); TEST_ERROR; } if (HDstrcmp(path, "a_path_0") != 0) { HDfprintf(stderr," get 0: %s\n", path); TEST_ERROR; } PASSED(); TESTING(" get (high and low indices)"); /* Get path at index 1 */ if ((path_len = H5PLget(1, path, 256)) <= 0) TEST_ERROR; if (HDstrcmp(path, "a_path_1") != 0) { HDfprintf(stderr," get 1: %s\n", path); TEST_ERROR; } /* Get path at the last index */ if ((path_len = H5PLget(n_starting_paths - 1, path, 256)) <= 0) TEST_ERROR; HDsprintf(temp_name, "a_path_%u", n_starting_paths - 1); if (HDstrcmp(path, temp_name) != 0) { HDfprintf(stderr," get %u: %s\n", n_starting_paths - 1, path); TEST_ERROR; } PASSED(); TESTING(" get (index too high)"); /* Get path at the last + 1 index (SHOULD FAIL) */ H5E_BEGIN_TRY { path_len = H5PLget(n_starting_paths, NULL, 0); } H5E_END_TRY if (path_len > 0) TEST_ERROR; PASSED(); /*****************/ /* H5PLprepend() */ /*****************/ /* We'll remove a path at an arbitrary index and then * prepend a new path. */ TESTING(" remove (arbitrary index 1)"); /* Remove one path */ if (H5PLremove(8) < 0) TEST_ERROR; /* Verify that the entries were moved */ if ((path_len = H5PLget(8, path, 256)) <= 0) TEST_ERROR; if (HDstrcmp(path, "a_path_9") != 0) { HDfprintf(stderr," get 8: %s\n", path); TEST_ERROR; } /* Verify the table shrank */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths != n_starting_paths - 1) TEST_ERROR; PASSED(); TESTING(" prepend"); /* Prepend one path */ HDsprintf(path, "a_path_%d", n_starting_paths + 1); if (H5PLprepend(path) < 0) { HDfprintf(stderr," prepend %u: %s\n", n_starting_paths + 1, path); TEST_ERROR; } /* Verify the table increased */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths != n_starting_paths) TEST_ERROR; /* Verify that the entries were moved */ if (H5PLget(8, path, 256) <= 0) TEST_ERROR; if (HDstrcmp(path, "a_path_7") != 0) { HDfprintf(stderr," get 8: %s\n", path); TEST_ERROR; } /* Verify that the path was inserted at index zero */ if (H5PLget(0, path, 256) <= 0) TEST_ERROR; HDsprintf(temp_name, "a_path_%d", n_starting_paths + 1); if (HDstrcmp(path, temp_name) != 0) { HDfprintf(stderr," get 0: %s\n", path); TEST_ERROR; } PASSED(); /*****************/ /* H5PLreplace() */ /*****************/ TESTING(" replace"); /* Replace one path at index 1 */ HDsprintf(path, "a_path_%u", n_starting_paths + 4); if (H5PLreplace(path, 1) < 0) { HDfprintf(stderr," replace 1: %s\n", path); TEST_ERROR; } /* Verify the table size remained the same */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths != n_starting_paths) TEST_ERROR; /* Verify that the entries were not moved by * inspecting the paths at indices +/- 1. */ /* Check path at index 0 */ if (H5PLget(0, path, 256) <= 0) TEST_ERROR; HDsprintf(temp_name, "a_path_%u", n_starting_paths + 1); if (HDstrcmp(path, temp_name) != 0) { HDfprintf(stderr," get 0: %s\n", path); TEST_ERROR; } /* Check path at index 2 */ if (H5PLget(2, path, 256) <= 0) TEST_ERROR; if (HDstrcmp(path, "a_path_1") != 0) { HDfprintf(stderr," get 2: %s\n", path); TEST_ERROR; } PASSED(); /****************/ /* H5PLinsert() */ /****************/ /* We'll remove a path at an arbitrary index and then * insert a new path. */ TESTING(" remove (arbitrary index 2)"); /* Remove one path */ if (H5PLremove(4) < 0) TEST_ERROR; /* Verify that the entries were moved */ if (H5PLget(4, path, 256) <= 0) TEST_ERROR; if (HDstrcmp(path, "a_path_4") != 0) { HDfprintf(stderr," get 4: %s\n", path); TEST_ERROR; } /* Verify the table size */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths != n_starting_paths - 1) TEST_ERROR; PASSED(); TESTING(" insert"); /* Insert one path at index 3*/ HDsprintf(path, "a_path_%d", n_starting_paths + 5); if (H5PLinsert(path, 3) < 0) { HDfprintf(stderr," insert 3: %s\n", path); TEST_ERROR; } /* Verify that the entries were moved */ if (H5PLget(4, path, 256) <= 0) TEST_ERROR; if (HDstrcmp(path, "a_path_2") != 0) { HDfprintf(stderr," get 4: %s\n", path); TEST_ERROR; } /* Verify the table size increased */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths != n_starting_paths) TEST_ERROR; PASSED(); /****************/ /* H5PLremove() */ /****************/ /* Remove all the current paths */ TESTING(" remove (all)"); /* Get the current size */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; /* Remove all existing paths */ for (u = n_paths; u > 0; u--) if (H5PLremove(u-1) < 0) { HDfprintf(stderr," at %u: %s\n", u, path); TEST_ERROR; } /* Verify the table is empty */ if (H5PLsize(&n_paths) < 0) TEST_ERROR; if (n_paths > 0) TEST_ERROR; PASSED(); return SUCCEED; error: return FAIL; } /* end test_path_api_calls() */
/*------------------------------------------------------------------------- * Function: test_dataset_read_with_filters * * Purpose: Tests reading datasets created with dynamically-loaded * filter plugins. * * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */ static herr_t test_dataset_read_with_filters(hid_t fid) { hid_t did = -1; /* Dataset ID */ /*---------------------------------------------------------- * STEP 1: Test deflation by itself. *---------------------------------------------------------- */ TESTING("dataset read I/O with deflate filter"); #ifdef H5_HAVE_FILTER_DEFLATE if (H5Zfilter_avail(H5Z_FILTER_DEFLATE) != TRUE) TEST_ERROR; if ((did = H5Dopen2(fid, DSET_DEFLATE_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if (test_read_data(did, orig_deflate_g[0]) < 0) TEST_ERROR; if (H5Dclose(did) < 0) TEST_ERROR; #else /* H5_HAVE_FILTER_DEFLATE */ SKIPPED(); HDputs(" Deflate filter not enabled"); #endif /* H5_HAVE_FILTER_DEFLATE */ /*---------------------------------------------------------- * STEP 2: Test filter plugin 1 by itself. *---------------------------------------------------------- */ TESTING(" dataset reads with filter plugin 1"); if ((did = H5Dopen2(fid, DSET_FILTER1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if (test_read_data(did, orig_dynlib1_g[0]) < 0) TEST_ERROR; if (H5Dclose(did) < 0) TEST_ERROR; /*---------------------------------------------------------- * STEP 3: Test filter plugin 2 by itself. *---------------------------------------------------------- */ TESTING(" dataset reads with filter plugin 2"); if ((did = H5Dopen2(fid, DSET_FILTER2_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if (test_read_data(did, orig_dynlib2_g[0]) < 0) TEST_ERROR; if (H5Dclose(did) < 0) TEST_ERROR; /*---------------------------------------------------------- * STEP 4: Test filter plugin 3 by itself. *---------------------------------------------------------- */ TESTING(" dataset reads with filter plugin 3"); if ((did = H5Dopen2(fid, DSET_FILTER3_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if (test_read_data(did, orig_dynlib4_g[0]) < 0) TEST_ERROR; if (H5Dclose(did) < 0) TEST_ERROR; return SUCCEED; error: /* Clean up objects used for this test */ H5E_BEGIN_TRY { H5Dclose(did); } H5E_END_TRY return FAIL; } /* end test_dataset_read_with_filters() */
//========================================================================= void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if (nrhs < 1 || !mxIsChar(prhs[0])) mexErrMsgTxt("First parameter must be the command (a string)"); char cmd[100]; mxGetString(prhs[0],cmd,100); if (!strcmp("H5Fcreate",cmd)){ //See h5f // //file_id = hdf5_mex('H5Fcreate',file_name); // //TODO: Eventually this will be changed to allow //all inputs, not just the file hid_t file; file = H5Fcreate("Attributes.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); } else if (!strcmp("H5Fopen",cmd)){ } else if (!strcmp("H5Fclose",cmd)){ } else if (!strcmp("H5Zfilter_avail",cmd)){ //http://www.hdfgroup.org/HDF5/doc/RM/RM_H5Z.html#Compression-FilterAvail // // status = hdf5_mex('H5Zfilter_avail',filter_id); htri_t status; //TODO: Remove hard coded filter values // - this requires enumeration support status = H5Zfilter_avail(H5Z_FILTER_SZIP); setInt8Output(plhs,0,(int8)status); } else if (!strcmp("H5Zget_filter_info",cmd)){ //http://www.hdfgroup.org/HDF5/doc/RM/RM_H5Z.html#Compression-GetFilterInfo //H5Z_filter_t filter, unsigned int *filter_config // // [err,config] = hdf5_mex('H5Zget_filter_info',filter_id); unsigned int filter_config = 0; herr_t function_error; //TODO: Replace hardcoded value function_error = H5Zget_filter_info(H5Z_FILTER_SZIP,&filter_config); setInt8Output(plhs,0,(int8)function_error); setInt8Output(plhs,0,(int8)filter_config); } else if (!strcmp("H5MLget_constant_value",cmd)){ char *enum_string; enum_string = mxArrayToString(prhs[1]); //TODO: I'm still working on this. Once this code works I'll need //to create a function that will support translation of a given //input with the following features // - handling string or number // - handling invalid options - i.e. bad strings - throw error // - handling all number types //mexPrintf("Test: %d\n",ENUM_MAP.at("H5D_CHUNKED")); //map<string, int>::iterator p; //p = ENUM_MAP.find("H5D_CHUNKED"); //mexPrintf("Test: %d",ENUM_MAP["H5D_CHUNKED"]); setDoubleOutput(plhs,0,(double)(ENUM_MAP[enum_string])); } //herr_t ret; //ret = H5Fclose(file); }
/*------------------------------------------------------------------------- * Function: test_filter_write_failure * * Purpose: Tests the library's behavior when a mandate filter returns * failure. There're only 5 chunks with each of them having * 2 integers. The filter will fail in the last chunk. The * dataset should release all resources even though the last * chunk can't be flushed to file. The file should close * successfully. * * Return: * Success: 0 * Failure: -1 * * Programmer: Raymond Lu * 25 August 2010 * * Modifications: * *------------------------------------------------------------------------- */ static herr_t test_filter_write(char *file_name, hid_t my_fapl) { char filename[1024]; hid_t file = -1; hid_t dataset=-1; /* dataset ID */ hid_t sid=-1; /* dataspace ID */ hid_t dcpl=-1; /* dataset creation property list ID */ hsize_t dims[1]={DIM}; /* dataspace dimension - 10*/ hsize_t chunk_dims[1]={FILTER_CHUNK_DIM}; /* chunk dimension - 2*/ int nfilters; /* number of filters in DCPL */ unsigned flags; /* flags for filter */ int points[DIM]; /* Data */ int rbuf[DIM]; /* Data */ herr_t ret; /* generic return value */ int i; TESTING("data writing when a mandatory filter fails"); /* Create file */ if((file = H5Fcreate(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0) TEST_ERROR /* create the data space */ if((sid = H5Screate_simple(1, dims, NULL)) < 0) TEST_ERROR /* Create dcpl and register the filter */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR if(H5Pset_chunk(dcpl, 1, chunk_dims) < 0) TEST_ERROR if(H5Zregister (H5Z_FAIL_TEST) < 0) TEST_ERROR /* Check that the filter was registered */ if(TRUE != H5Zfilter_avail(H5Z_FILTER_FAIL_TEST)) FAIL_STACK_ERROR /* Enable the filter as mandatory */ if(H5Pset_filter(dcpl, H5Z_FILTER_FAIL_TEST, 0, (size_t)0, NULL) < 0) TEST_ERROR /* create a dataset */ if((dataset = H5Dcreate2(file, DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR /* Initialize the write buffer */ for(i = 0; i < DIM; i++) points[i] = i; /* Write data */ if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, points) < 0) TEST_ERROR /* clean up objects used for this test */ if(H5Pclose (dcpl) < 0) TEST_ERROR if(H5Sclose (sid) < 0) TEST_ERROR /* Dataset closing should fail */ H5E_BEGIN_TRY { ret = H5Dclose (dataset); } H5E_END_TRY; if(ret >= 0) { H5_FAILED(); puts(" Dataset is supposed to fail because the chunk can't be flushed to file."); TEST_ERROR }