コード例 #1
0
/*-------------------------------------------------------------------------
 * Function:	randll
 *
 * Purpose:	Create a random long long value.
 * 		Ensures that a write at this value doesn't overlap any
 *		previous write.
 *
 * Return:	Success:	Random value
 *
 *		Failure:	Random value which overlaps another write
 *
 * Programmer:	Robb Matzke
 *              Tuesday, November 24, 1998
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
static hsize_t
randll(hsize_t limit, int current_index)
{
    hsize_t	acc;
    int 	overlap = 1;
    int 	i;
    int 	tries = 0;

    /* Generate up to MAX_TRIES random numbers until one of them */
    /* does not overlap with any previous writes */
    while(overlap != 0 && tries < MAX_TRIES)
    {
        acc = HDrandom();
        acc *= HDrandom();
        acc = acc % limit;
        overlap = 0;

        for(i = 0; i < current_index; i++)
        {
            if((acc >= values_used[i]) && (acc < values_used[i]+WRT_SIZE))
                overlap = 1;
            if((acc+WRT_SIZE >= values_used[i]) && (acc+WRT_SIZE < values_used[i]+WRT_SIZE))
                overlap = 1;
        }
        tries++;
    }

    values_used[current_index]=acc;

    return acc;
}
コード例 #2
0
ファイル: swmr_common.c プロジェクト: aleph7/HDF5Kit
/*-------------------------------------------------------------------------
 * Function:    choose_dataset
 *
 * Purpose:     Selects a random dataset in the SWMR file
 *
 * Parameters:  N/A
 *
 * Return:      Success:    A pointer to information about a dataset.
 *              Failure:    Can't fail
 *
 *-------------------------------------------------------------------------
 */
symbol_info_t *
choose_dataset(void)
{
    unsigned level;             /* The level of the dataset */
    unsigned offset;            /* The "offset" of the dataset at that level */

    /* Determine level of dataset */
    level = symbol_mapping[HDrandom() % NMAPPING];

    /* Determine the offset of the level */
    offset = HDrandom() % symbol_count[level];

    return &symbol_info[level][offset];
} /* end choose_dataset() */
コード例 #3
0
ファイル: swmr_sparse_reader.c プロジェクト: aleph7/HDF5Kit
/*-------------------------------------------------------------------------
 * Function:    check_dataset
 *
 * Purpose:     For a given dataset, checks to make sure that the stated
 *              and actual sizes are the same.  If they are not, then
 *              we have an inconsistent dataset due to a SWMR error.
 *
 * Parameters:  hid_t fid
 *              The SWMR test file's ID.
 *
 *              unsigned verbose
 *              Whether verbose console output is desired.
 *
 *              const symbol_info_t *symbol
 *              The dataset from which to read (the ID is in the struct).
 *              Must be pre-allocated.
 *
 *              symbol_t *record
 *              Memory for the record.  Must be pre-allocated.
 *
 *              hid_t rec_sid
 *              The memory dataspace for access.  It's always the same so
 *              there is no need to re-create it every time this function
 *              is called.
 *
 * Return:      Success:    0
 *              Failure:    -1
 *
 *-------------------------------------------------------------------------
 */
static int
check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t *record,
    hid_t rec_sid)
{
    hid_t dsid;                 /* Dataset ID */
    hid_t file_sid;             /* Dataset's space ID */
    hsize_t start[2] = {0, 0};  /* Hyperslab selection values */
    hsize_t count[2] = {1, 1};  /* Hyperslab selection values */

    HDassert(fid >= 0);
    HDassert(symbol);
    HDassert(record);
    HDassert(rec_sid >= 0);

    /* Open dataset for symbol */
    if((dsid = H5Dopen2(fid, symbol->name, H5P_DEFAULT)) < 0)
        return -1;

    /* Get the dataset's dataspace */
    if((file_sid = H5Dget_space(dsid)) < 0)
        return -1;

    /* Choose the random record in the dataset (will be the same as chosen by
     * the writer) */
    start[1] = (hsize_t)HDrandom() % symbol->nrecords;
    if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
        return -1;

    /* Emit informational message */
    if(verbose)
        HDfprintf(stderr, "Symbol = '%s', location = %lld\n", symbol->name, (long long)start);

    /* Read record from dataset */
    record->rec_id = (uint64_t)ULLONG_MAX;
    if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
        return -1;

    /* Verify record value */
    if(record->rec_id != start[1]) {
        HDfprintf(stderr, "*** ERROR ***\n");
        HDfprintf(stderr, "Incorrect record value!\n");
        HDfprintf(stderr, "Symbol = '%s', location = %lld, record->rec_id = %llu\n", symbol->name, (long long)start, (unsigned long long)record->rec_id);
        return -1;
    } /* end if */

    /* Close the dataset's dataspace */
    if(H5Sclose(file_sid) < 0)
        return -1;

    /* Close dataset for symbol */
    if(H5Dclose(dsid) < 0)
        return -1;

    return 0;
} /* end check_dataset() */
コード例 #4
0
ファイル: tid.c プロジェクト: asteever/thirdparty_hdf5
/* Free callback */
static herr_t test_rct_free(void *_obj) {
    test_rct_obj_t *obj = (test_rct_obj_t *)_obj;
    long rem_idx, i;
    herr_t  ret;        /* return value */

    /* Mark this object as freed */
    obj->nfrees++;
    obj->obj_list->nobjs_rem--;

    /* Check freeing and nobjs_rem */
    if(!obj->freeing && (obj->obj_list->nobjs_rem > 0)) {
        /* Remove a random object from the list */
        rem_idx = HDrandom() % obj->obj_list->nobjs_rem;

        /* Scan the list, finding the rem_idx'th object that has not been
         * freed */
        for(i = 0; i < obj->obj_list->nobjs; i++)
            if(obj->obj_list->list[i].nfrees == 0) {
                if(rem_idx == 0)
                    break;
                else
                    rem_idx--;
            } /* end if */
        if(i == obj->obj_list->nobjs) {
            ERROR("invalid obj_list");
            goto out;
        } /* end if */
        else {
            /* Remove the object.  Mark as "freeing" so its own callback does
             * not free another object. */
            obj->obj_list->list[i].freeing = TRUE;
            ret = H5Idec_ref(obj->obj_list->list[i].id);
            CHECK(ret, FAIL, "H5Idec_ref");
            if(ret == FAIL)
                goto out;
            obj->obj_list->list[i].freeing = FALSE;
        } /* end else */
    } /* end if */

    /* Verify nobjs_rem is non-negative */
    if(obj->obj_list->nobjs_rem < 0) {
        ERROR("invalid nobjs_rem");
        goto out;
    } /* end if */

    return 0;

out:
    return -1;
} /* end test_rct_free() */
コード例 #5
0
ファイル: tskiplist.c プロジェクト: Len3d/appleseed
/****************************************************************
**
**  test_skiplist_init(): Test H5SL (skiplist) code.
**      Initialize data for skip list testing
**
****************************************************************/
static void
test_skiplist_init(void)
{
    time_t curr_time;   /* Current time, for seeding random number generator */
    int new_val;        /* New value to insert */
    unsigned found;     /* Flag to indicate value was inserted already */
    size_t u,v;         /* Local index variables */

    /* Initialize random number seed */
    curr_time = HDtime(NULL);
    HDsrandom((unsigned)curr_time);

    /* Create randomized set of numbers */
    for(u=0; u<NUM_ELEMS; u++) {
        do {
            /* Reset flag */
            found=0;

            /* Generate random numbers from -5000 to 5000 */
            new_val=(int)(HDrandom()%10001)-5001;

            /* Check if the value is already in the array */
            for(v=0; v<u; v++)
                if(rand_num[v]==new_val)
                    found=1;
        } while(found);

        /* Set unique value in array */
        rand_num[u]=new_val;
    } /* end for */

    /* Copy random values to sorted array */
    HDmemcpy(sort_rand_num,rand_num,sizeof(int)*NUM_ELEMS);

    /* Sort random numbers */
    HDqsort(sort_rand_num, (size_t)NUM_ELEMS, sizeof(int), tst_sort);

    /* Copy random values to reverse sorted array */
    HDmemcpy(rev_sort_rand_num, rand_num, sizeof(int) * NUM_ELEMS);

    /* Sort random numbers */
    HDqsort(rev_sort_rand_num, (size_t)NUM_ELEMS, sizeof(int), tst_rev_sort);
} /* end test_tst_init() */
コード例 #6
0
ファイル: zip_perf.c プロジェクト: FilipeMaia/hdf5
static void
fill_with_random_data(Bytef *src, uLongf src_len)
{
    register unsigned u;
    h5_stat_t stat_buf;

    if (HDstat("/dev/urandom", &stat_buf) == 0) {
        uLongf len = src_len;
        Bytef *buf = src;
        int fd = HDopen("/dev/urandom", O_RDONLY, 0);

        HDfprintf(stdout, "Using /dev/urandom for random data\n");

        if (fd < 0)
            error(HDstrerror(errno));

        for (;;) {
            ssize_t rc = HDread(fd, buf, src_len);

            if (rc == -1)
                error(HDstrerror(errno));

            if (rc == (ssize_t)len)
                break;

            buf += rc;
            len -= rc;
        }
    } else {
        HDfprintf(stdout, "Using random() for random data\n");

        for (u = 0; u < src_len; ++u)
            src[u] = (Bytef)(0xff & HDrandom());
    }

    if (compress_percent) {
        unsigned long s = src_len * compress_percent / 100;

        HDmemset(src, '\0', s);
    }
}
コード例 #7
0
ファイル: pool.c プロジェクト: chaako/sceptic3D
/*-------------------------------------------------------------------------
 * Function:	test_allocate_random
 *
 * Purpose:	Tests allocating random sized blocks in pool
 *
 * Return:	Success:	0
 *
 *		Failure:	1
 *
 * Programmer:	Quincey Koziol
 *              Friday, May 6, 2005
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
static int
test_allocate_random(void)
{
    H5MP_pool_t *mp;            /* Memory pool */
    size_t u;                   /* Local index variable */
    time_t curr_time;           /* Current time, for seeding random number generator */
    size_t *blk_size = NULL;    /* Pointer to block sizes */
    void **spc = NULL;          /* Pointer to space allocated */
    size_t swap_idx;            /* Location to swap with when shuffling */
    void *swap_ptr;             /* Pointer to swap when shuffling */

    /*
     * Test memory pool allocation
     */
    TESTING("allocate many random sized blocks");

    /* Initialize random number seed */
    curr_time=HDtime(NULL);
#ifdef QAK
curr_time=1115412944;
HDfprintf(stderr,"curr_time=%lu\n",(unsigned long)curr_time);
#endif /* QAK */
    HDsrandom((unsigned long)curr_time);

    /* Create a memory pool */
    if(NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
        TEST_ERROR

    /* Allocate space for the block sizes */
    if(NULL == (blk_size = HDmalloc(sizeof(size_t) * MPOOL_NUM_RANDOM)))
        TEST_ERROR

    /* Allocate space for the block pointers */
    if(NULL == (spc = HDmalloc(sizeof(void *) * MPOOL_NUM_RANDOM)))
        TEST_ERROR

    /* Initialize the block sizes with random values */
    for(u = 0; u < MPOOL_NUM_RANDOM; u++)
        blk_size[u] = (size_t)(HDrandom() % MPOOL_RANDOM_MAX_SIZE) + 1;

    /* Allocate space in pool */
    for(u = 0; u < MPOOL_NUM_RANDOM; u++)
        if(NULL == (spc[u] = H5MP_malloc(mp, blk_size[u])))
            TEST_ERROR

    /* Check that free space totals match */
    if(H5MP_pool_is_free_size_correct(mp) <= 0)
        TEST_ERROR;

    /* Shuffle pointers to free */
    for(u = 0; u < MPOOL_NUM_RANDOM; u++) {
        swap_idx = (size_t)(HDrandom() % (MPOOL_NUM_RANDOM - u)) + u;
        swap_ptr = spc[u];
        spc[u] = spc[swap_idx];
        spc[swap_idx] = swap_ptr;
    } /* end for */

    /* Free blocks in pool */
    for(u = 0; u < MPOOL_NUM_RANDOM; u++)
        H5MP_free(mp, spc[u]);

    /* Check that free space totals match */
    if (H5MP_pool_is_free_size_correct(mp) <= 0)
        TEST_ERROR;

    /* Initialize the block sizes with random values */
    for(u = 0; u < MPOOL_NUM_RANDOM; u++)
        blk_size[u] = (size_t)(HDrandom() % MPOOL_RANDOM_MAX_SIZE) + 1;

    /* Allocate space in pool (again) */
    /* (Leave allocated to test closing pool with many blocks still allocated) */
    for(u = 0; u < MPOOL_NUM_RANDOM; u++)
        if(NULL == (spc[u] = H5MP_malloc(mp, blk_size[u])))
            TEST_ERROR

    /* Check that free space totals match */
    if(H5MP_pool_is_free_size_correct(mp) <= 0)
        TEST_ERROR;

    /* Close the memory pool */
    if (H5MP_close(mp) < 0)
        TEST_ERROR

    /* Free memory for block sizes & pointers */
    HDfree(blk_size);
    HDfree(spc);

    PASSED();

    return 0;

error:
    if(blk_size)
        HDfree(blk_size);
    if(spc)
        HDfree(spc);
    H5E_BEGIN_TRY {
    } H5E_END_TRY;
    return 1;
} /* test_allocate_random() */
コード例 #8
0
ファイル: istore.c プロジェクト: ElaraFX/hdf5
/*-------------------------------------------------------------------------
 * Function:	test_sparse
 *
 * Purpose:	Creates a sparse matrix consisting of NBLOCKS randomly placed
 *		blocks each of size NX,NY,NZ.
 *
 * Return:	Success:	SUCCEED
 *
 *		Failure:	FAIL
 *
 * Programmer:	Robb Matzke
 *		Wednesday, October 22, 1997
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
static herr_t
test_sparse(hid_t f, const char *prefix, size_t nblocks,
	    size_t nx, size_t ny, size_t nz, int skip_test)
{
    hid_t               dataset;        /* Dataset ID */
    hid_t               fspace;         /* Dataset's file dataspace */
    hid_t               mspace;         /* Dataset's memory dataspace */
    int			ndims;
    hsize_t		ctr;
    char		dims[64], s[256], name[256];
    hsize_t		offset[3];
    hsize_t		size[3], total = 0;
    uint8_t		*buf = NULL;
    hsize_t		whole_size[3];  /* Size of dataset's dataspace */
    size_t              u;              /* Local index variable */

    if (!nz) {
	if (!ny) {
	    ndims = 1;
	    ny = nz = 1;
	    sprintf(dims, "%lu", (unsigned long) nx);
	} else {
	    ndims = 2;
	    nz = 1;
	    sprintf(dims, "%lux%lu", (unsigned long) nx, (unsigned long) ny);
	}
    } else {
	ndims = 3;
	sprintf(dims, "%lux%lux%lu",
		(unsigned long) nx, (unsigned long) ny, (unsigned long) nz);
    }

    sprintf(s, "istore sparse: %s", dims);
    TESTING(s);
    if(skip_test){
        SKIPPED()
        return SUCCEED;
    }
    buf = (uint8_t *)HDmalloc(nx * ny * nz);
    HDmemset(buf, 128, nx * ny * nz);

    /* Set dimensions of dataset */
    for (u=0; u<(size_t)ndims; u++)
        whole_size[u]=TEST_SPARSE_SIZE;

    /* Set dimensions of selection */
    size[0] = nx;
    size[1] = ny;
    size[2] = nz;

    /* Build the new empty object */
    sprintf(name, "%s_%s", prefix, dims);
    if ((dataset=new_object(f, name, ndims, whole_size, chunk_dims)) < 0) {
	printf("    Cannot create %u-d object `%s'\n", ndims, name);
	goto error;
    }

    /* Get dataset's dataspace */
    if((fspace=H5Dget_space(dataset)) < 0) TEST_ERROR;

    /* Create dataspace for memory buffer */
    if((mspace=H5Screate_simple(ndims,size,NULL)) < 0) TEST_ERROR;

    for (ctr=0; ctr<nblocks; ctr++) {
	offset[0] = (hsize_t)(HDrandom() % (TEST_SPARSE_SIZE-nx));
	offset[1] = (hsize_t)(HDrandom() % (TEST_SPARSE_SIZE-ny));
	offset[2] = (hsize_t)(HDrandom() % (TEST_SPARSE_SIZE-nz));

        /* Select region in file dataspace */
        if(H5Sselect_hyperslab(fspace,H5S_SELECT_SET,offset,NULL,size,NULL) < 0) TEST_ERROR;

	/* write to disk */
	if (H5Dwrite(dataset, TEST_DATATYPE, mspace, fspace, H5P_DEFAULT, buf) < 0) {
	    H5_FAILED();
	    printf("    Write failed: ctr=%lu\n", (unsigned long)ctr);
	    printf("    offset=(%lu", (unsigned long) (offset[0]));
	    if (ndims > 1)
		printf(",%lu", (unsigned long) (offset[1]));
	    if (ndims > 2)
		printf(",%lu", (unsigned long) (offset[2]));
	    printf("), size=(%lu", (unsigned long) (size[0]));
	    if (ndims > 1)
		printf(",%lu", (unsigned long) (size[1]));
	    if (ndims > 2)
		printf(",%lu", (unsigned long) (size[2]));
	    printf(")\n");
	    goto error;
	}
	total += nx * ny * nz;
#if 0
	HDfprintf(stderr,"ctr: ctr=%Zu, total=%Zu\n", ctr, total);
#endif

	/* We don't test reading yet.... */
    }

    /* Close memory dataspace */
    if(H5Sclose(mspace) < 0) TEST_ERROR;

    /* Close dataset's dataspace */
    if(H5Sclose(fspace) < 0) TEST_ERROR;

    /* Close dataset */
    if(H5Dclose(dataset) < 0) TEST_ERROR;

    HDfree(buf);
    PASSED();
    return SUCCEED;

error:
    HDfree(buf);
    return FAIL;
}
コード例 #9
0
ファイル: filter_plugin.c プロジェクト: Starlink/hdf5
/*-------------------------------------------------------------------------
 * Function:  ensure_filter_works
 *
 * Purpose:   Tests writing entire data and partial data with filters
 *
 * Return:    SUCCEED/FAIL
 *
 *-------------------------------------------------------------------------
 */
static herr_t
ensure_filter_works(hid_t fid, const char *name, hid_t dcpl_id)
{
    hid_t           did = -1;                       /* Dataset ID                                   */
    hid_t           dxpl_id = -1;                   /* Dataset xfer property list ID                */
    hid_t           write_dxpl_id = -1;             /* Dataset xfer property list ID for writing    */
    hid_t           sid = -1;                       /* Dataspace ID                                 */
    void           *tconv_buf = NULL;               /* Temporary conversion buffer                  */
    int           **orig = NULL;                    /* Data written to the dataset                  */
    int           **read = NULL;                    /* Data read from the dataset                   */
    size_t          r, c;                           /* Data rows and columns                        */
    size_t          hs_r, hs_c, hs_offr, hs_offc;   /* Hypserslab sizes and offsets                 */
    size_t          i, j;                           /* Local index variables                        */
    int             n = 0;                          /* Value written to point array                 */
    hbool_t         are_same;                       /* Output from dataset compare function         */
    int          ***save_array = NULL;              /* (Global) array where the final data go       */

    /* initialize */
    r = (size_t)sizes_g[0];
    c = (size_t)sizes_g[1];

    /* Create the data space */
    if ((sid = H5Screate_simple(2, sizes_g, NULL)) < 0)
        TEST_ERROR;

    /* Allocate memory for the data buffers
     * We're using the hacky way of doing 2D arrays that uses a
     * single data buffer but which allows normal 2D access.
     */
    if (allocate_and_init_2D_array(&orig, sizes_g, NULL) < 0)
        TEST_ERROR;
    if (allocate_and_init_2D_array(&read, sizes_g, NULL) < 0)
        TEST_ERROR;

    /* Create a small conversion buffer to test strip mining. We
     * might as well test all we can!
     */
    if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
        TEST_ERROR;
    if (NULL == (tconv_buf = HDcalloc((size_t)1000, sizeof(char))))
        TEST_ERROR;
    if (H5Pset_buffer(dxpl_id, (size_t)1000, tconv_buf, NULL) < 0)
        TEST_ERROR;
    if ((write_dxpl_id = H5Pcopy(dxpl_id)) < 0)
        TEST_ERROR;

    TESTING("    filters (setup)");

    /* Check if all the filters are available */
    if (H5Pall_filters_avail(dcpl_id) != TRUE)
        TEST_ERROR;

    /* Create the dataset */
    if ((did = H5Dcreate2(fid, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
        TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 1: Read uninitialized data.  It should be zero.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (uninitialized read)");

    if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, *read) < 0)
        TEST_ERROR;

    /* The input buffer was calloc'd and has not been initialized yet */
    if (compare_2D_arrays(orig, read, sizes_g, &are_same) < 0)
        TEST_ERROR;
    if (FALSE == are_same)
        TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 2: Test filters by setting up a chunked dataset and writing
     * to it.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (write)");

    n = 0;
    for (i = 0; i < r; i++)
        for (j = 0; j < c; j++)
            orig[i][j] = n++;

    if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl_id, *orig) < 0)
        TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 3: Try to read the data we just wrote.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (read)");

    /* Read the dataset back */
    if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, *read) < 0)
        TEST_ERROR;

    /* Check that the values read are the same as the values written */
    if (compare_2D_arrays(orig, read, sizes_g, &are_same) < 0)
        TEST_ERROR;
    if (FALSE == are_same)
        TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 4: Write new data over the top of the old data.  The new data is
     * random thus not very compressible, and will cause the chunks to move
     * around as they grow.  We only change values for the left half of the
     * dataset although we rewrite the whole thing.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (modify)");

    for (i = 0; i < r; i++)
        for (j = 0; j < c / 2; j++)
            orig[i][j] = (int)HDrandom() % RANDOM_LIMIT;

    if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl_id, *orig) < 0)
        TEST_ERROR;

    /* Read the dataset back and check it */
    if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, *read) < 0)
        TEST_ERROR;

    /* Check that the values read are the same as the values written */
    if (compare_2D_arrays(orig, read, sizes_g, &are_same) < 0)
        TEST_ERROR;
    if (FALSE == are_same)
        TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 5: Close the dataset and then open it and read it again.  This
     * insures that the filters message is picked up properly from the
     * object header.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (re-open)");

    if (H5Dclose(did) < 0)
        TEST_ERROR;
    if ((did = H5Dopen2(fid, name, H5P_DEFAULT)) < 0)
        TEST_ERROR;

    if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, *read) < 0)
        TEST_ERROR;

    /* Check that the values read are the same as the values written */
    if (compare_2D_arrays(orig, read, sizes_g, &are_same) < 0)
        TEST_ERROR;
    if (FALSE == are_same)
        TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 6: Test partial I/O by writing to and then reading from a
     * hyperslab of the dataset.  The hyperslab does not line up on chunk
     * boundaries (we know that case already works from above tests).
     *----------------------------------------------------------------------
     */
    TESTING("    filters (partial I/O)");

    hs_r = (size_t)hs_sizes_g[0];
    hs_c = (size_t)hs_sizes_g[1];
    hs_offr = (size_t)hs_offsets_g[0];
    hs_offc = (size_t)hs_offsets_g[1];
    for (i = 0; i < hs_r; i++)
        for (j = 0; j < hs_c; j++)
            orig[hs_offr + i][hs_offc + j] = (int)HDrandom() % RANDOM_LIMIT;

    if (H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offsets_g, NULL, hs_sizes_g, NULL) < 0)
        TEST_ERROR;

    /* Use the "read" DXPL because partial I/O on corrupted data test
     * needs to ignore errors during writing
     */
    if (H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, *orig) < 0)
        TEST_ERROR;

    if (H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl_id, *read) < 0)
        TEST_ERROR;

    /* Check that the values read are the same as the values written */
    if (compare_2D_arrays(orig, read, sizes_g, &are_same) < 0)
        TEST_ERROR;
    if (FALSE == are_same)
        TEST_ERROR;

    PASSED();

    /* Save the data written to the file for later comparison when the file
     * is reopened for read test.
     */
    if (!HDstrcmp(name, DSET_DEFLATE_NAME))
        save_array = &orig_deflate_g;
    else if (!HDstrcmp(name, DSET_FILTER1_NAME))
        save_array = &orig_dynlib1_g;
    else if (!HDstrcmp(name, DSET_FILTER2_NAME))
        save_array = &orig_dynlib2_g;
    else if (!HDstrcmp(name, DSET_FILTER3_NAME))
        save_array = &orig_dynlib4_g;
    else
        TEST_ERROR;
    if (allocate_and_init_2D_array(save_array, sizes_g, orig) < 0)
        TEST_ERROR;

    /* Clean up and exit */
    if (H5Dclose(did) < 0)
        TEST_ERROR;
    if (H5Sclose(sid) < 0)
        TEST_ERROR;
    if (H5Pclose(dxpl_id) < 0)
        TEST_ERROR;
    if (H5Pclose(write_dxpl_id) < 0)
        TEST_ERROR;

    free_2D_array(&orig);
    free_2D_array(&read);

    HDfree(tconv_buf);

    return SUCCEED;

error:
    /* Clean up objects used for this test */
    H5E_BEGIN_TRY {
        H5Dclose(did);
        H5Sclose(sid);
        H5Pclose(dxpl_id);
        H5Pclose(write_dxpl_id);
    } H5E_END_TRY

    /* NULLs are okay here */
    free_2D_array(&orig);
    free_2D_array(&read);

    if (tconv_buf)
        HDfree(tconv_buf);

    return FAIL;
} /* end ensure_filter_works() */
コード例 #10
0
ファイル: tid.c プロジェクト: asteever/thirdparty_hdf5
/* Test function */
static int test_remove_clear_type(void)
{
    H5I_type_t obj_type;
    test_rct_list_t obj_list;
    test_rct_obj_t list[TEST_RCT_MAX_NOBJS];
    long i, j;
    long nobjs_found;
    hsize_t nmembers;
    herr_t  ret;        /* return value */

    /* Register type */
    obj_type = H5Iregister_type((size_t)8, 0, test_rct_free);
    CHECK(obj_type, H5I_BADID, "H5Iregister_type");
    if(obj_type == H5I_BADID)
        goto out;

    /* Init obj_list.list */
    obj_list.list = list;

    for(i = 0; i < TEST_RCT_NITER; i++) {
        /* Build object list */
        obj_list.nobjs = obj_list.nobjs_rem = TEST_RCT_MIN_NOBJS + (HDrandom() % (long)(TEST_RCT_MAX_NOBJS - TEST_RCT_MIN_NOBJS + 1));
        for(j = 0; j < obj_list.nobjs; j++) {
            list[j].nfrees = 0;
            list[j].freeing = FALSE;
            list[j].obj_list = &obj_list;
            list[j].id = H5Iregister(obj_type, &list[j]);
            CHECK(list[j].id, FAIL, "H5Iregister");
            if(list[j].id == FAIL)
                goto out;
            if(HDrandom() % 2) {
                ret = H5Iinc_ref(list[j].id);
                CHECK(ret, FAIL, "H5Iinc_ref");
                if(ret == FAIL)
                    goto out;
            } /* end if */
        } /* end for */

        /* Clear the type */
        ret = H5Iclear_type(obj_type, FALSE);
        CHECK(ret, FAIL, "H5Iclear_type");
        if(ret == FAIL)
            goto out;

        /* Verify list */
        nobjs_found = 0;
        for(j = 0; j < obj_list.nobjs; j++) {
            if(list[j].nfrees == 0)
                nobjs_found++;
            else {
                VERIFY(list[j].nfrees, (long)1, "list[j].nfrees");
                if(list[j].nfrees != (long)1)
                    goto out;
            } /* end else */
            VERIFY(list[j].freeing, FALSE, "list[j].freeing");
            if(list[j].freeing != FALSE)
                goto out;
        } /* end for */

        /* Verify number of objects */
        VERIFY(obj_list.nobjs_rem, nobjs_found, "obj_list.nobjs_rem");
        if(obj_list.nobjs_rem != nobjs_found)
            goto out;
        ret = H5Inmembers(obj_type, &nmembers);
        CHECK(ret, FAIL, "H5Inmembers");
        if(ret == FAIL)
            goto out;
        VERIFY(nmembers, (size_t)nobjs_found, "H5Inmembers");
        if(nmembers != (size_t)nobjs_found)
            goto out;

        /* Clear the type with force set to TRUE */
        ret = H5Iclear_type(obj_type, TRUE);
        CHECK(ret, FAIL, "H5Iclear_type");
        if(ret == FAIL)
            goto out;

        /* Verify list */
        for(j = 0; j < obj_list.nobjs; j++) {
            VERIFY(list[j].nfrees, (long)1, "list[j].nfrees");
            if(list[j].nfrees != (long)1)
                goto out;
            VERIFY(list[j].freeing, FALSE, "list[j].freeing");
            if(list[j].freeing != FALSE)
                goto out;
        } /* end for */

        /* Verify number of objects is 0 */
        VERIFY(obj_list.nobjs_rem, (long)0, "obj_list.nobjs_rem");
        if(obj_list.nobjs_rem != (long)0)
            goto out;
        ret = H5Inmembers(obj_type, &nmembers);
        CHECK(ret, FAIL, "H5Inmembers");
        if(ret == FAIL)
            goto out;
        VERIFY(nmembers, (size_t)0, "H5Inmembers");
        if(nmembers != (size_t)0)
            goto out;
    } /* end for */

    /* Destroy type */
    ret = H5Idestroy_type(obj_type);
    CHECK(ret, FAIL, "H5Idestroy_type");
    if(ret == FAIL)
        goto out;

    return 0;

out:
    /* Cleanup.  For simplicity, just destroy the types and ignore errors. */
    H5E_BEGIN_TRY
    H5Idestroy_type(obj_type);
    H5E_END_TRY
    return -1;
} /* end test_remove_clear_type() */
コード例 #11
0
ファイル: plugin.c プロジェクト: schwehr/hdf5
/*-------------------------------------------------------------------------
 * Function:	test_filter_internal
 *
 * Purpose:	Tests writing entire data and partial data with filters  
 *
 * Return:	Success:	0
 *		Failure:	-1
 *
 * Programmer:	Raymond Lu
 *              27 February 2013
 *
 *-------------------------------------------------------------------------
 */
static herr_t
test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
{
    hid_t		dataset;        /* Dataset ID */
    hid_t		dxpl;           /* Dataset xfer property list ID */
    hid_t		write_dxpl;     /* Dataset xfer property list ID for writing */
    hid_t		sid;            /* Dataspace ID */
    const hsize_t	size[2] = {DSET_DIM1, DSET_DIM2};           /* Dataspace dimensions */
    const hsize_t	hs_offset[2] = {FILTER_HS_OFFSET1, FILTER_HS_OFFSET2}; /* Hyperslab offset */
    const hsize_t	hs_size[2] = {FILTER_HS_SIZE1, FILTER_HS_SIZE2};   /* Hyperslab size */
    void		*tconv_buf = NULL;      /* Temporary conversion buffer */
    int	                points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2];
    size_t		i, j;        /* Local index variables */
    int                 n = 0;

    /* Create the data space */
    if((sid = H5Screate_simple(2, size, NULL)) < 0) goto error;

    /*
     * Create a small conversion buffer to test strip mining. We
     * might as well test all we can!
     */
    if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) goto error;
    tconv_buf = HDmalloc((size_t)1000);
    if(H5Pset_buffer(dxpl, (size_t)1000, tconv_buf, NULL) < 0) goto error;
    if((write_dxpl = H5Pcopy(dxpl)) < 0) TEST_ERROR;

    TESTING("    filters (setup)");

    /* Check if all the filters are available */
    if(H5Pall_filters_avail(dcpl)!=TRUE) {
        H5_FAILED();
        printf("    Line %d: Incorrect filter availability\n",__LINE__);
        goto error;
    } /* end if */

    /* Create the dataset */
    if((dataset = H5Dcreate2(fid, name, H5T_NATIVE_INT, sid, H5P_DEFAULT,
			     dcpl, H5P_DEFAULT)) < 0) goto error;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 1: Read uninitialized data.  It should be zero.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (uninitialized read)");

    if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
	TEST_ERROR;

    for(i=0; i<(size_t)size[0]; i++) {
	for(j=0; j<(size_t)size[1]; j++) {
	    if(0!=check[i][j]) {
		H5_FAILED();
		printf("    Read a non-zero value.\n");
		printf("    At index %lu,%lu\n",
		       (unsigned long)i, (unsigned long)j);
		goto error;
	    }
	}
    }
    PASSED();

    /*----------------------------------------------------------------------
     * STEP 2: Test filters by setting up a chunked dataset and writing
     * to it.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (write)");

    n = 0;
    for(i=0; i<size[0]; i++) {
	for(j=0; j<size[1]; j++) {
	    points[i][j] = (int)(n++);
	}
    }

    if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points) < 0)
	TEST_ERROR;

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 3: Try to read the data we just wrote.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (read)");

    /* Read the dataset back */
        if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
	   TEST_ERROR;

        /* Check that the values read are the same as the values written */
        for(i=0; i<size[0]; i++) {
	   for(j=0; j<size[1]; j++) {
	       if(points[i][j] != check[i][j]) {
		  H5_FAILED();
		  fprintf(stderr,"    Read different values than written.\n");
		  fprintf(stderr,"    At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
		  fprintf(stderr,"    At original: %d\n", (int)points[i][j]);
		  fprintf(stderr,"    At returned: %d\n", (int)check[i][j]);
		  goto error;
	       }
	   }
        }

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 4: Write new data over the top of the old data.  The new data is
     * random thus not very compressible, and will cause the chunks to move
     * around as they grow.  We only change values for the left half of the
     * dataset although we rewrite the whole thing.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (modify)");

    for(i=0; i<size[0]; i++) {
	for(j=0; j<size[1]/2; j++) {
	    points[i][j] = (int)HDrandom () % RANDOM_LIMIT;
	}
    }
    if(H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points) < 0)
	TEST_ERROR;

        /* Read the dataset back and check it */
        if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
	   TEST_ERROR;

        /* Check that the values read are the same as the values written */
        for(i=0; i<size[0]; i++) {
	   for(j=0; j<size[1]; j++) {
	       if(points[i][j] != check[i][j]) {
		  H5_FAILED();
		  printf("    Read different values than written.\n");
		  printf("    At index %lu,%lu\n",
		           (unsigned long)i, (unsigned long)j);
		  goto error;
	       }
	   }
        }

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 5: Close the dataset and then open it and read it again.  This
     * insures that the filters message is picked up properly from the
     * object header.
     *----------------------------------------------------------------------
     */
    TESTING("    filters (re-open)");

    if(H5Dclose(dataset) < 0) TEST_ERROR;
    if((dataset = H5Dopen2(fid, name, H5P_DEFAULT)) < 0) TEST_ERROR;

        if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
            TEST_ERROR;

        /* Check that the values read are the same as the values written */
        for(i = 0; i < size[0]; i++)
	   for(j = 0; j < size[1]; j++)
	       if(points[i][j] != check[i][j]) {
		  H5_FAILED();
		  printf("    Read different values than written.\n");
		  printf("    At index %lu,%lu\n",
		        (unsigned long)i, (unsigned long)j);
		  goto error;
	       } /* end if */

    PASSED();

    /*----------------------------------------------------------------------
     * STEP 6: Test partial I/O by writing to and then reading from a
     * hyperslab of the dataset.  The hyperslab does not line up on chunk
     * boundaries (we know that case already works from above tests).
     *----------------------------------------------------------------------
     */
    TESTING("    filters (partial I/O)");

    for(i=0; i<(size_t)hs_size[0]; i++) {
	for(j=0; j<(size_t)hs_size[1]; j++) {
	    points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j] = (int)HDrandom() % RANDOM_LIMIT;
	}
    }
    if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size,
			    NULL) < 0) TEST_ERROR;
    /* (Use the "read" DXPL because partial I/O on corrupted data test needs to ignore errors during writing) */
    if(H5Dwrite (dataset, H5T_NATIVE_INT, sid, sid, dxpl, points) < 0)
	TEST_ERROR;

        if(H5Dread (dataset, H5T_NATIVE_INT, sid, sid, dxpl, check) < 0)
	   TEST_ERROR;

        /* Check that the values read are the same as the values written */
        for(i=0; i<(size_t)hs_size[0]; i++) {
	   for(j=0; j<(size_t)hs_size[1]; j++) {
	       if(points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j] !=
                      check[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]) {
		  H5_FAILED();
		  fprintf(stderr,"    Read different values than written.\n");
		  fprintf(stderr,"    At index %lu,%lu\n",
		         (unsigned long)((size_t)hs_offset[0]+i),
		         (unsigned long)((size_t)hs_offset[1]+j));
		  fprintf(stderr,"    At original: %d\n",
		         (int)points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]);
		  fprintf(stderr,"    At returned: %d\n",
		         (int)check[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]);
		  goto error;
	       }
	   }
        }

    PASSED();

    /* Save the data written to the file for later comparison when the file 
     * is reopened for read test */
    for(i=0; i<size[0]; i++) {
        for(j=0; j<size[1]; j++) {
            if(!HDstrcmp(name, DSET_DEFLATE_NAME)) {
	        points_deflate[i][j] = points[i][j];
            } else if(!HDstrcmp(name, DSET_DYNLIB1_NAME)) {
	        points_dynlib1[i][j] = points[i][j];
            } else if(!HDstrcmp(name, DSET_DYNLIB2_NAME)) {
	        points_dynlib2[i][j] = points[i][j];
            }
	}
    }

    /* Clean up objects used for this test */
    if(H5Dclose (dataset) < 0) goto error;
    if(H5Sclose (sid) < 0) goto error;
    if(H5Pclose (dxpl) < 0) goto error;
    free (tconv_buf);

    return(0);

error:
    if(tconv_buf)
        free (tconv_buf);
    return -1;
}
コード例 #12
0
ファイル: ttst.c プロジェクト: MattNapsAlot/rHDF5
/****************************************************************
**
**  test_tst_init(): Test basic H5ST (ternary search tree) selection code.
**      Initialize data for TST testing
**
****************************************************************/
static void
test_tst_init(void)
{
    time_t curr_time;   /* Current time, for seeding random number generator */
    char *tmp_word;/* Temporary pointer to word in word set */
    size_t u,v,w;       /* Local index variables */

    /* Compute the number of words in the test set */
    num_words=sizeof(words)/sizeof(words[0]);

    /* Determine the number of unique words in test set */
    /* (Not particularly efficient, be careful if many words are added to set) */
    num_uniq_words=0;
    for(u=0; u<num_words; u++) {
        /* Assume word is unique */
        num_uniq_words++;
        for(v=0; v<u; v++)
            /* If word is already found in words looked at, decrement unique count */
            if(!HDstrcmp(words[u],words[v])) {
                num_uniq_words--;
                break;
            } /* end if */
    } /* end for */

    /* Allocate space for the array of unique words */
    uniq_words=HDmalloc(sizeof(char *)*num_uniq_words);

    /* Allocate space for the array of randomized order unique words also */
    rand_uniq_words=HDmalloc(sizeof(char *)*num_uniq_words);

    /* Allocate space for the array of sorted order unique words also */
    sort_uniq_words=HDmalloc(sizeof(char *)*num_uniq_words);

    /* Insert unique words from test set into unique word set */
    w=0;
    for(u=0; u<num_words; u++) {
        /* Assume word is unique */
        tmp_word=(char *)words[u];
        for(v=0; v<u; v++)
            /* If word is already found in words looked at, decrement unique count */
            if(!HDstrcmp(words[u],words[v])) {
                tmp_word=NULL;
                break;
            } /* end if */

        /* Check if word was actually unique */
        if(tmp_word!=NULL)
            uniq_words[w++]=tmp_word;
    } /* end for */

    /* Create randomized set of unique words */
    for(u=0; u<num_uniq_words; u++)
        rand_uniq_words[u]=uniq_words[u];
    curr_time=HDtime(NULL);
    HDsrandom((unsigned long)curr_time);
    for(u=0; u<num_uniq_words; u++) {
        v=u+(HDrandom()%(num_uniq_words-u));
        if(u!=v) {
            tmp_word=rand_uniq_words[u];
            rand_uniq_words[u]=rand_uniq_words[v];
            rand_uniq_words[v]=tmp_word;
        } /* end if */
    } /* end for */

    /* Create sorted set of unique words */
    for(u=0; u<num_uniq_words; u++)
        sort_uniq_words[u]=uniq_words[u];
    HDqsort(sort_uniq_words,num_uniq_words,sizeof(char *),tst_strcmp);
} /* end test_tst_init() */
コード例 #13
0
ファイル: swmr_sparse_writer.c プロジェクト: aleph7/HDF5Kit
/*-------------------------------------------------------------------------
 * Function:    add_records
 *
 * Purpose:     Writes a specified number of records to random datasets in
 *              the SWMR test file.
 *
 * Parameters:  hid_t fid
 *              The file ID of the SWMR HDF5 file
 *
 *              unsigned verbose
 *              Whether or not to emit verbose console messages
 *
 *              unsigned long nrecords
 *              # of records to write to the datasets
 *
 *              unsigned long flush_count
 *              # of records to write before flushing the file to disk
 *
 * Return:      Success:    0
 *              Failure:    -1
 *
 *-------------------------------------------------------------------------
 */
static int
add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
{
    hid_t tid;                          /* Datatype ID for records */
    hid_t mem_sid;                      /* Memory dataspace ID */
    hsize_t start[2] = {0, 0};          /* Hyperslab selection values */
    hsize_t count[2] = {1, 1};          /* Hyperslab selection values */
    symbol_t record;                    /* The record to add to the dataset */
    H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
    H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
    unsigned long rec_to_flush;         /* # of records left to write before flush */
    volatile int dummy;                 /* Dummy varialbe for busy sleep */
    hsize_t dim[2] = {1,0};             /* Dataspace dimensions */
    unsigned long u, v;                 /* Local index variables */

    HDassert(fid >= 0);

    /* Reset the record */
    /* (record's 'info' field might need to change for each record written, also) */
    HDmemset(&record, 0, sizeof(record));

    /* Create a dataspace for the record to add */
    if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
        return -1;

    /* Create datatype for appending records */
    if((tid = create_symbol_datatype()) < 0)
        return -1;

    /* Get the current metadata cache configuration, and set up the corked
     * configuration */
    mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
    if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
        return -1;
    HDmemcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
    mdc_config_cork.evictions_enabled = FALSE;
    mdc_config_cork.incr_mode = H5C_incr__off;
    mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
    mdc_config_cork.decr_mode = H5C_decr__off;

    /* Add records to random datasets, according to frequency distribution */
    rec_to_flush = flush_count;
    for(u = 0; u < nrecords; u++) {
        symbol_info_t *symbol;  /* Symbol to write record to */
        hid_t file_sid;         /* Dataset's space ID */
        hid_t aid;              /* Attribute ID */

        /* Get a random dataset, according to the symbol distribution */
        symbol = choose_dataset();

        /* Cork the metadata cache, to prevent the object header from being
         * flushed before the data has been written */
        /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
            return(-1);*/

        /* If this is the first time the dataset has been opened, extend it and
         * add the sequence attribute */
        if(symbol->nrecords == 0) {
            symbol->nrecords = nrecords / 5;
            dim[1] = symbol->nrecords;

            if(H5Dset_extent(symbol->dsid, dim) < 0)
                return -1;

            if((file_sid = H5Screate(H5S_SCALAR)) < 0)
                return -1;
            if((aid = H5Acreate2(symbol->dsid, "seq", H5T_NATIVE_ULONG, file_sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
                return -1;
            if(H5Sclose(file_sid) < 0)
                return -1;
        } /* end if */
        else if((aid = H5Aopen(symbol->dsid, "seq", H5P_DEFAULT)) < 0)
            return -1;

        /* Get the coordinate to write */
        start[1] = (hsize_t)HDrandom() % symbol->nrecords;

        /* Set the record's ID (equal to its position) */
        record.rec_id = start[1];

        /* Get the dataset's dataspace */
        if((file_sid = H5Dget_space(symbol->dsid)) < 0)
            return -1;

        /* Choose a random record in the dataset */
        if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
            return -1;

        /* Write record to the dataset */
        if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
            return -1;

        /* Write the sequence number attribute.  Since we synchronize the random
         * number seed, the readers will always generate the same sequence of
         * randomly chosen datasets and offsets.  Therefore, and because of the
         * flush dependencies on the object header, the reader will be
         * guaranteed to see the written data if the sequence attribute is >=u.
         */
        if(H5Awrite(aid, H5T_NATIVE_ULONG, &u) < 0)
            return -1;

        /* Close the attribute */
        if(H5Aclose(aid) < 0)
            return -1;

        /* Uncork the metadata cache */
        /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
            return(-1);*/

        /* Close the dataset's dataspace */
        if(H5Sclose(file_sid) < 0)
            return -1;

        /* Check for flushing file */
        if(flush_count > 0) {
            /* Decrement count of records to write before flushing */
            rec_to_flush--;

            /* Check for counter being reached */
            if(0 == rec_to_flush) {
                /* Flush contents of file */
                if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
                    return -1;

                /* Reset flush counter */
                rec_to_flush = flush_count;
            } /* end if */
        } /* end if */

#ifdef OUT
        /* Busy wait, to let readers catch up */
        /* If this is removed, also remove the BUSY_WAIT symbol
         * at the top of the file.
         */
        dummy = 0;
        for(v=0; v<BUSY_WAIT; v++)
            dummy++;
        if((unsigned long)dummy != v)
            return -1;
#endif /* OUT */

    } /* end for */

    /* Close the memory dataspace */
    if(H5Sclose(mem_sid) < 0)
        return -1;

    /* Close the datatype */
    if(H5Tclose(tid) < 0)
        return -1;

    /* Emit informational message */
    if(verbose)
        fprintf(stderr, "Closing datasets\n");

    /* Close the datasets */
    for(u = 0; u < NLEVELS; u++)
        for(v = 0; v < symbol_count[u]; v++)
            if(H5Dclose(symbol_info[u][v].dsid) < 0)
                return -1;

    return 0;
}