コード例 #1
0
ファイル: ph5example.c プロジェクト: fortnern/H5Tuner
/*
 * Print the content of the dataset.
 */
int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original)
{
#define MAX_ERR_REPORT	10		/* Maximum number of errors reported */

    hsize_t i, j;
    int nerr;

    /* print it if verbose */
    if (verbose)
	dataset_print(start, count, stride, dataset);

    nerr = 0;
    for (i=0; i < count[0]; i++){
	for (j=0; j < count[1]; j++){
	    if (*dataset++ != *original++){
		nerr++;
		if (nerr <= MAX_ERR_REPORT){
		    printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
			(unsigned long)i, (unsigned long)j,
			(unsigned long)(i*stride[0]+start[0]), (unsigned long)(j*stride[1]+start[1]),
			*(dataset-1), *(original-1));
		}
	    }
	}
    }
    if (nerr > MAX_ERR_REPORT)
	printf("[more errors ...]\n");
    if (nerr)
	printf("%d errors found in dataset_vrfy\n", nerr);
    return(nerr);
}
コード例 #2
0
ファイル: selection_sort.c プロジェクト: rmascarenhas/ita
int
main(int argc, char *argv[]) {
  if (dataset_init(argc, argv) == -1) {
    dataset_usage();
    return EXIT_FAILURE;
  }

  int size;
  int *buf = malloc(SSORT_MAX_NUMS * sizeof(int));
  if (buf == NULL) {
    pexit(progname);
  }

  size = dataset_get(buf, SSORT_MAX_NUMS);
  if (size == -1) {
    pexit(progname);
  }

  selection_sort(buf, size);
  dataset_print(buf, size);

  free(buf);

  return EXIT_SUCCESS;
}
コード例 #3
0
ファイル: external.c プロジェクト: obaltzer/geocube
void external_dataset_sort(external_dataset_t* data)
{
    struct runtime rt;
    void* results = NULL;
    size_t i;
    
    stream_t* tmp;
    stream_t* stream = data->stream;
    stream_seek(stream, 0);
    tmp = stream_create(stream->config, "tmp.stream");
    stream_open(tmp, O_CREAT | O_TRUNC | O_RDWR | O_SYNC);
    /* sort each individual chunk of memory size */
    for(i = 0; i < data->n_records / MEMORY_RECORDS(stream); i++)
    {
        memory_read(stream, RECORDS(data->mem), MEMORY_RECORDS(stream));
        N_RECORDS(data->mem) = MEMORY_RECORDS(stream);
        results = NULL;
        start_timing(&rt);
        fp_im_sort(data->context, RECORDS(data->mem), N_RECORDS(data->mem), &results);
        stop_timing(&rt);
        printf("Sort time: %f\n", get_runtime(rt));
        memory_write(tmp, RECORDS(data->mem), N_RECORDS(data->mem));
        dataset_print(data->mem, TRUE);

    }
    if(data->n_records % MEMORY_RECORDS(stream))
    {
        memory_read(stream, RECORDS(data->mem), data->n_records % MEMORY_RECORDS(stream));
        N_RECORDS(data->mem) = data->n_records % MEMORY_RECORDS(stream);
        start_timing(&rt);
        results = NULL;
        fp_im_sort(data->context, RECORDS(data->mem), N_RECORDS(data->mem), &results);
        stop_timing(&rt);
        memory_write(tmp, RECORDS(data->mem), N_RECORDS(data->mem));
        dataset_print(data->mem, TRUE);
    }
    /* merge memory chunks block by block */
}
コード例 #4
0
ファイル: visualize.c プロジェクト: obaltzer/group-movement
int main(int argc, char** argv)
{
    /* configuration for the visualization program */
    visualize_config_t config;
    /* reference to the loaded dataset */
    dataset_t* dataset;
    group_list_t* groups = NULL;

    configure(&config, argc, argv);

    /* load dataset from input file */
    dataset = dataset_load(config.input);
    if(dataset)
    {
        /* if a group list is defined, load that one too */
        if(config.have_groups)
            groups = group_list_load(config.grpfile);

        /* if the dataset is supposed to be printed to stdout in text format,
         * then do so */
        if(config.print)
            dataset_print(dataset);

        /* now draw the dataset */
        dataset_draw(&config, dataset, groups);

        /* release group list from memory if there is one */
        if(groups)
            group_list_destroy(groups);

        /* release dataset from memory */
        dataset_destroy(&dataset);

    }

    return 0;
}
コード例 #5
0
ファイル: ph5example.c プロジェクト: fortnern/H5Tuner
void
phdf5readAll(char *filename)
{
    hid_t fid1;			/* HDF5 file IDs */
    hid_t acc_tpl1;		/* File access templates */
    hid_t xfer_plist;		/* Dataset transfer properties list */
    hid_t file_dataspace;	/* File dataspace ID */
    hid_t mem_dataspace;	/* memory dataspace ID */
    hid_t dataset1, dataset2;	/* Dataset ID */
    DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2];	/* data buffer */
    DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2];	/* expected data buffer */

    hsize_t start[SPACE1_RANK];			/* for hyperslab setting */
    hsize_t count[SPACE1_RANK], stride[SPACE1_RANK];	/* for hyperslab setting */

    herr_t ret;         	/* Generic return value */

    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;

    if (verbose)
			printf("Collective read test on file %s\n", filename);

    /* -------------------
     * OPEN AN HDF5 FILE
     * -------------------*/
    /* setup file access template with parallel IO access. */
    acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
    assert(acc_tpl1 != FAIL);
    MESG("H5Pcreate access succeed");
    /* set Parallel access with communicator */
    ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
    assert(ret != FAIL);
    MESG("H5Pset_fapl_mpio succeed");

    /* open the file collectively */
    fid1=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl1);
    assert(fid1 != FAIL);
    MESG("H5Fopen succeed");

    /* Release file-access template */
    ret=H5Pclose(acc_tpl1);
    assert(ret != FAIL);


    /* --------------------------
     * Open the datasets in it
     * ------------------------- */
    /* open the dataset1 collectively */
    dataset1 = H5Dopen2(fid1, DATASETNAME1, H5P_DEFAULT);
    assert(dataset1 != FAIL);
    MESG("H5Dopen2 succeed");

    /* open another dataset collectively */
    dataset2 = H5Dopen2(fid1, DATASETNAME2, H5P_DEFAULT);
    assert(dataset2 != FAIL);
    MESG("H5Dopen2 2 succeed");

    /*
     * Set up dimensions of the slab this process accesses.
     */

    /* Dataset1: each process takes a block of columns. */
    slab_set(start, count, stride, BYCOL);
if (verbose)
    printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
	(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill dataset with test data */
    dataset_fill(start, count, stride, &data_origin1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* read data collectively */
    ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dread succeed");

    /* verify the read data with original expected data */
    ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
    assert(ret != FAIL);

    /* release all temporary handles. */
    /* Could have used them for dataset2 but it is cleaner */
    /* to create them again.*/
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);

    /* Dataset2: each process takes a block of rows. */
    slab_set(start, count, stride, BYROW);
if (verbose)
    printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
	(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill dataset with test data */
    dataset_fill(start, count, stride, &data_origin1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* read data independently */
    ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dread succeed");

    /* verify the read data with original expected data */
    ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
    assert(ret != FAIL);

    /* release all temporary handles. */
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);


    /*
     * All reads completed.  Close datasets collectively
     */
    ret=H5Dclose(dataset1);
    assert(ret != FAIL);
    MESG("H5Dclose1 succeed");
    ret=H5Dclose(dataset2);
    assert(ret != FAIL);
    MESG("H5Dclose2 succeed");

    /* close the file collectively */
    H5Fclose(fid1);
}
コード例 #6
0
ファイル: ph5example.c プロジェクト: fortnern/H5Tuner
void
phdf5writeAll(char *filename)
{
    hid_t fid1;			/* HDF5 file IDs */
    hid_t acc_tpl1;		/* File access templates */
    hid_t xfer_plist;		/* Dataset transfer properties list */
    hid_t sid1;   		/* Dataspace ID */
    hid_t file_dataspace;	/* File dataspace ID */
    hid_t mem_dataspace;	/* memory dataspace ID */
    hid_t dataset1, dataset2;	/* Dataset ID */
    hsize_t dims1[SPACE1_RANK] =
	{SPACE1_DIM1,SPACE1_DIM2};	/* dataspace dim sizes */
    DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2];	/* data buffer */

    hsize_t start[SPACE1_RANK];			/* for hyperslab setting */
    hsize_t count[SPACE1_RANK], stride[SPACE1_RANK];	/* for hyperslab setting */

    herr_t ret;         	/* Generic return value */

    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Info info = MPI_INFO_NULL;


		/* in support of H5Tuner Test */
		MPI_Comm comm_test = MPI_COMM_WORLD;
		MPI_Info info_test ;
		int i_test, nkeys_test, flag_test;
		char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL+1];
		char *libtuner_file = getenv("LD_PRELOAD");
		/* in support of H5Tuner Test */

    if (verbose)
			printf("Collective write test on file %s\n", filename);

    /* -------------------
     * START AN HDF5 FILE
     * -------------------*/
    /* setup file access template with parallel IO access. */
    acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
    assert(acc_tpl1 != FAIL);
    MESG("H5Pcreate access succeed");
    /* set Parallel access with communicator */
    ret = H5Pset_fapl_mpio(acc_tpl1, comm, info);
    assert(ret != FAIL);
    MESG("H5Pset_fapl_mpio succeed");

    /* create the file collectively */
    fid1=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1);
    assert(fid1 != FAIL);
    MESG("H5Fcreate succeed");

// ------------------------------------------------
// H5Tuner tests
// ------------------------------------------------

// Retrieve MPI parameters set via the H5Tuner
printf("\n\n--------------------------------------------------\n");
if ( (libtuner_file != NULL) && (strlen(libtuner_file) > 1) ){
	printf("Version of the H5Tuner loaded: \n%s\n", libtuner_file);
}
else {
	printf("No H5Tuner currently loaded.\n");
}
printf("--------------------------------------------------\n");


// Retrieve HDF5 Threshold and Alignment
hsize_t alignment[2];
size_t sieve_buf_size;
alignment[0]= 0; // threshold value
alignment[1]= 0; // alignment value
int ierr = H5Pget_alignment(acc_tpl1, &alignment[0], &alignment[1]);
printf("\n\n--------------------------------------------------\n");
printf("Testing values for Threshold and Alignment\n");
printf("--------------------------------------------------\n");
printf("Test value set to:88 \nRetrieved Threshold=%lu\n", alignment[0]);
printf("Test value set to:44 \nRetrieved Alignment=%lu\n", alignment[1]);
// Check Threshold
if ( alignment[0] == 88 ) {
	printf("PASSED: Threshold Test\n");
}
else {
	printf("FAILED: Threshold Test\n");
}
// Check Alignment
if ( alignment[1] == 44 ) {
	printf("PASSED: Alignment Test\n");
}
else {
	printf("FAILED: Alignment Test\n");
}
printf("--------------------------------------------------\n\n");

// Retrieve HDF5 sieve buffer size
ierr = H5Pget_sieve_buf_size(acc_tpl1, &sieve_buf_size);
printf("\n\n--------------------------------------------------\n");
printf("Testing values for Sieve Buffer Size\n");
printf("--------------------------------------------------\n");
printf("Test value set to:77 \nRetrieved Sieve Buffer Size=%lu\n", sieve_buf_size);
// Check sieve buffer size
if ( (int) sieve_buf_size == 77 ) {
	printf("PASSED: Sieve Buffer Size Test\n");
}
else {
	printf("FAILED: Sieve Buffer Size Test\n");
}
printf("--------------------------------------------------\n\n");

// Retrieve MPI parameters set via the H5Tuner
MPI_Info_create(&info_test);

ret = H5Pget_fapl_mpio(acc_tpl1, &comm_test, &info_test);
assert(ret != FAIL);
MESG("H5Pget_fapl_mpio succeed");


printf("-------------------------------------------------\n" );
printf("Testing parameters values via MPI_Info\n" );
printf("-------------------------------------------------\n" );
if(info_test == MPI_INFO_NULL) {
				printf("MPI info object is null. No keys are available.\n");
}
else {
	MPI_Info_get_nkeys(info_test, &nkeys_test);
	//printf("MPI info has %d keys\n", nkeys_test);
	if (nkeys_test <= 0) {
		printf("MPI info has no keys\n");
	}
	else {
		printf("MPI info has %d keys\n", nkeys_test);
		for ( i_test=0; i_test < nkeys_test; i_test++) {
			MPI_Info_get_nthkey( info_test, i_test, key );
			MPI_Info_get( info_test, key, MPI_MAX_INFO_VAL, value, &flag_test );
			printf( "Retrieved value for key %s is %s\n", key, value );
			//fflush(stdout);
		}
	}
	printf("-------------------------------------------------\n" );
	MPI_Info_free(&info_test);
}
// end of H5Tuner tests
// ---------------------------------------


    /* Release file-access template */
    ret=H5Pclose(acc_tpl1);
    assert(ret != FAIL);


    /* --------------------------
     * Define the dimensions of the overall datasets
     * and create the dataset
     * ------------------------- */
    /* setup dimensionality object */
    sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL);
    assert (sid1 != FAIL);
    MESG("H5Screate_simple succeed");


    /* create a dataset collectively */
    dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    assert(dataset1 != FAIL);
    MESG("H5Dcreate2 succeed");

    /* create another dataset collectively */
    dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    assert(dataset2 != FAIL);
    MESG("H5Dcreate2 2 succeed");

    /*
     * Set up dimensions of the slab this process accesses.
     */

    /* Dataset1: each process takes a block of rows. */
    slab_set(start, count, stride, BYROW);
		if (verbose)
    	printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
				(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill the local slab with some trivial data */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
			MESG("data_array created");
			dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* write data collectively */
    ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dwrite succeed");

    /* release all temporary handles. */
    /* Could have used them for dataset2 but it is cleaner */
    /* to create them again.*/
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);

    /* Dataset2: each process takes a block of columns. */
    slab_set(start, count, stride, BYCOL);
		if (verbose)
    	printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
				(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* put some trivial data in the data_array */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
			MESG("data_array created");
			dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill the local slab with some trivial data */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
			MESG("data_array created");
			dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* set up the collective transfer properties list */
    xfer_plist = H5Pcreate (H5P_DATASET_XFER);
    assert(xfer_plist != FAIL);
    ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
    assert(ret != FAIL);
    MESG("H5Pcreate xfer succeed");

    /* write data independently */
    ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    xfer_plist, data_array1);
    assert(ret != FAIL);
    MESG("H5Dwrite succeed");

    /* release all temporary handles. */
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);
    H5Pclose(xfer_plist);


    /*
     * All writes completed.  Close datasets collectively
     */
    ret=H5Dclose(dataset1);
    assert(ret != FAIL);
    MESG("H5Dclose1 succeed");
    ret=H5Dclose(dataset2);
    assert(ret != FAIL);
    MESG("H5Dclose2 succeed");

    /* release all IDs created */
    H5Sclose(sid1);

    /* close the file collectively */
    H5Fclose(fid1);
}
コード例 #7
0
void
phdf5write2(char *filename, hio_context_t context)
{
    hid_t fid1;			/* HDF5 file IDs */
    hid_t acc_tpl1;		/* File access templates */
    hid_t sid1;   		/* Dataspace ID */
    hid_t file_dataspace;	/* File dataspace ID */
    hid_t mem_dataspace;	/* memory dataspace ID */
    hid_t dataset1, dataset2;	/* Dataset ID */
    hsize_t dims1[SPACE1_RANK] =
	{SPACE1_DIM1,SPACE1_DIM2};	/* dataspace dim sizes */
    DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2];	/* data buffer */

    hsize_t start[SPACE1_RANK];			/* for hyperslab setting */
    hsize_t count[SPACE1_RANK], stride[SPACE1_RANK];	/* for hyperslab setting */

    herr_t ret;         	/* Generic return value */
    ssize_t bytes_written;
    hio_request_t req;

    if (verbose)
	printf("Collective write test on file %s\n", filename);

    /* -------------------
     * START AN HDF5 FILE
     * -------------------*/
    /* setup file access template with hio IO access. */
    acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
    assert(acc_tpl1 != FAIL);
    MESG("H5Pcreate access succeed");
    H5FD_hio_set_write_io(&settings, H5FD_HIO_STRIDED);
    H5FD_hio_set_write_blocking(&settings, H5FD_HIO_NONBLOCKING);
    H5FD_hio_set_request(&settings, &req);
    ret = H5Pset_fapl_hio(acc_tpl1, &settings);
    assert(ret != FAIL);
    MESG("H5Pset_fapl_hio succeed");

    /* create the file collectively */
    fid1=H5Fcreate(filename,0,H5P_DEFAULT,acc_tpl1);
    assert(fid1 != FAIL);
    MESG("H5Fcreate succeed");

    /* Release file-access template */
    ret=H5Pclose(acc_tpl1);
    assert(ret != FAIL);


    /* --------------------------
     * Define the dimensions of the overall datasets
     * and create the dataset
     * ------------------------- */
    /* setup dimensionality object */
    sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL);
    assert (sid1 != FAIL);
    MESG("H5Screate_simple succeed");

    /* create a dataset collectively */
    dataset1 = H5Dcreate2(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    assert(dataset1 != FAIL);
    MESG("H5Dcreate2 succeed");

    /* create another dataset collectively */
    dataset2 = H5Dcreate2(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    assert(dataset2 != FAIL);
    MESG("H5Dcreate2 2 succeed");

    /*
     * Set up dimensions of the slab this process accesses.
     */

    /* Dataset1: each process takes a block of rows. */
    slab_set(start, count, stride, BYROW);
if (verbose)
    printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
	(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill the local slab with some trivial data */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* write data strided and blocking*/
    ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    H5P_DEFAULT, data_array1);
    assert(ret != FAIL);
    MESG("H5Dwrite succeed");

    //Wait for the request to finish
    hio_request_wait(&req, 1, &bytes_written);

    /* release all temporary handles. */
    /* Could have used them for dataset2 but it is cleaner */
    /* to create them again.*/
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);

    /* Dataset2: each process takes a block of columns. */
    slab_set(start, count, stride, BYCOL);
if (verbose)
    printf("start[]=(%lu,%lu), count[]=(%lu,%lu), total datapoints=%lu\n",
	(unsigned long)start[0], (unsigned long)start[1],
        (unsigned long)count[0], (unsigned long)count[1],
        (unsigned long)(count[0]*count[1]));

    /* put some trivial data in the data_array */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* create a file dataspace independently */
    file_dataspace = H5Dget_space (dataset1);
    assert(file_dataspace != FAIL);
    MESG("H5Dget_space succeed");
    ret=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
	    count, NULL);
    assert(ret != FAIL);
    MESG("H5Sset_hyperslab succeed");

    /* create a memory dataspace independently */
    mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
    assert (mem_dataspace != FAIL);

    /* fill the local slab with some trivial data */
    dataset_fill(start, count, stride, &data_array1[0][0]);
    MESG("data_array initialized");
    if (verbose){
	MESG("data_array created");
	dataset_print(start, count, stride, &data_array1[0][0]);
    }

    /* write data contiguous and blocking */
    ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
	    H5P_DEFAULT, data_array1);
    assert(ret != FAIL);
    MESG("H5Dwrite succeed");

    /* release all temporary handles. */
    H5Sclose(file_dataspace);
    H5Sclose(mem_dataspace);

    /*
     * All writes completed.  Close datasets collectively
     */
    ret=H5Dclose(dataset1);
    assert(ret != FAIL);
    MESG("H5Dclose1 succeed");
    ret=H5Dclose(dataset2);
    assert(ret != FAIL);
    MESG("H5Dclose2 succeed");

    /* release all IDs created */
    H5Sclose(sid1);

    /* close the file collectively */
    H5Fclose(fid1);
}
コード例 #8
0
ファイル: external.c プロジェクト: obaltzer/geocube
int main(int argc, char** argv)
{
    FILE* f;
    metadata_t* meta;
    fp_context_t* context;
    struct sort_config sc;
    dataset_t* ds;
    char target[256];
    stream_t* stream;
    config_t conf;
    external_dataset_t* ext;
    int i; 

    if(argc < 2)
    {
        printf("Barf!\n");
        exit(-1);
    }
    
    if((f = fopen(argv[1], "r")) == NULL)
    {
        printf("Cannot open dataset file %s.\n", argv[1]);
        return -1;
    }
    meta = metadata_read(f);
    sc.verbose = 1;
    sc.normalize = 0;
    sc.denormalize = 0;
    sc.benchmark = 1;
    sc.find_order = ITERATIVE;
    sc.index = KEEP;
    sc.cmp = HILBERT;
    sc.print = stdout;
    context = fp_create_context(&sc, meta->dimz, meta->dimf, meta->start_order);
    ds = dataset_read(f, meta, context);
    fclose(f);
    dataset_print(ds, FALSE);
    
    strcpy(target, argv[1]);
    strcat(target, ".stream");
    printf("Number of records: %d\n", ds->n_records);
    printf("Record size: %d\n", context->record_size);

    conf.memory_size = 1000; 
    conf.block_size = 0x100;
    conf.record_size = context->record_size;
    stream = stream_create(&conf, target);
    stream_open(stream, O_CREAT | O_TRUNC | O_SYNC | O_WRONLY);
    dataset_convert(ds, stream);
    stream_close(stream);
    stream_open(stream, O_SYNC | O_RDONLY);
    ext = external_dataset_create(stream, meta, ds->n_records);

    for(i = 0; i < ds->n_records / MEMORY_RECORDS(stream); i++)
    {
        memory_read(stream, ext->mem->records, MEMORY_RECORDS(stream));
        ext->mem->n_records = MEMORY_RECORDS(stream);
        dataset_print(ext->mem, FALSE);
    }
    if(ds->n_records % MEMORY_RECORDS(stream))
    {
        memory_read(stream, ext->mem->records, ds->n_records % MEMORY_RECORDS(stream));
        ext->mem->n_records = ds->n_records % MEMORY_RECORDS(stream);
        dataset_print(ext->mem, FALSE);
    }
    printf("Stream position: %ld vs. %ld\n", stream->pos, stream_tell(stream));
    external_dataset_sort(ext);
    
    
    external_dataset_destroy(ext);
    stream_destroy(stream);
    stats_print();
    dataset_destroy(ds);
    fp_destroy_context(context);
    metadata_destroy(meta);
    return 0;
}