Esempio n. 1
0
/*
 * Function:    output_results
 * Purpose:     Print information about the time & bandwidth for a given
 *                  minmax & # of iterations.
 * Return:      Nothing
 * Programmer:  Quincey Koziol, 9. May 2002
 * Modifications:
 */
static void
output_results(const struct options *opts, const char *name, minmax *table,
    int table_size,off_t data_size)
{
    minmax          total_mm;

    accumulate_minmax_stuff(table, table_size, &total_mm);

    print_indent(3);
    output_report("%s (%d iteration(s)):\n", name,table_size);

    /* Note: The maximum throughput uses the minimum amount of time & vice versa */

    print_indent(4);
    output_report("Maximum Throughput: %6.2f MB/s", MB_PER_SEC(data_size,total_mm.min));
    if(opts->print_times)
        output_report(" (%7.3f s)\n", total_mm.min);
    else
        output_report("\n");

    print_indent(4);
    output_report("Average Throughput: %6.2f MB/s",
                  MB_PER_SEC(data_size,total_mm.sum / total_mm.num));
    if(opts->print_times)
        output_report(" (%7.3f s)\n", (total_mm.sum / total_mm.num));
    else
        output_report("\n");

    print_indent(4);
    output_report("Minimum Throughput: %6.2f MB/s", MB_PER_SEC(data_size,total_mm.max));
    if(opts->print_times)
        output_report(" (%7.3f s)\n", total_mm.max);
    else
        output_report("\n");
}
Esempio n. 2
0
/*
 * Function:    output_all_info
 * Purpose:
 * Return:      Nothing
 * Programmer:  Bill Wendling, 29. January 2002
 * Modifications:
 */
static void
output_all_info(minmax *mm, int count, int indent_level)
{
    int i;

    for (i = 0; i < count; ++i) {
        print_indent(indent_level);
        output_report("Iteration %d:\n", i + 1);
        print_indent(indent_level + 1);
        output_report("Minimum Time: %.2fs\n", mm[i].min);
        print_indent(indent_level + 1);
        output_report("Maximum Time: %.2fs\n", mm[i].max);
    }
}
Esempio n. 3
0
int main(int argc, char *argv[])
{
	const char *dirname;
	int listnum;
    int result;

    /* first argument is the directory */
    if (argc < 3)
    {
    	fprintf(stderr, "Usage:\nsummcomp <outputdir> <summary1> [<summary2> [<summary3> ...]]\n");
    	return 1;
    }
    dirname = argv[1];
    list_count = argc - 2;

    /* loop over arguments and read the files */
    for (listnum = 0; listnum < list_count; listnum++)
    {
        result = read_summary_log(argv[listnum + 2], listnum);
        if (result != 0)
            return result;
    }

    /* output the summary */
    output_report(dirname, sort_file_list());
    return 0;
}
Esempio n. 4
0
/*
 * Function:    run_test_loop
 * Purpose:     Run the I/O tests. Write the results to OUTPUT.
 *
 *            - The slowest changing part of the test is the number of
 *              processors to use. For each loop iteration, we divide that
 *              number by 2 and rerun the test.
 *
 *            - The second slowest is what type of IO API to perform. We have
 *              three choices: POSIXIO, and HDF5.
 *
 *            - Then we change the size of the buffer. This information is
 *              inferred from the number of datasets to create and the number
 *              of integers to put into each dataset. The backend code figures
 *              this out.
 *
 * Return:      Nothing
 * Programmer:  Bill Wendling, 30. October 2001
 * Modifications:
 *    Added multidimensional testing (Christian Chilan, April, 2008)
 */
static void
run_test_loop(struct options *opts)
{
    parameters parms;
    int i;
    size_t      buf_bytes;

    /* load options into parameter structure */
    parms.num_files = opts->num_files;
    parms.num_dsets = opts->num_dsets;
    parms.num_iters = opts->num_iters;
    parms.rank = opts->dset_rank;
    parms.h5_align = opts->h5_alignment;
    parms.h5_thresh = opts->h5_threshold;
    parms.h5_use_chunks = opts->h5_use_chunks;
    parms.h5_extendable = opts->h5_extendable;
    parms.h5_write_only = opts->h5_write_only;
    parms.verify = opts->verify;
    parms.vfd = opts->vfd;
    parms.page_buffer_size = opts->page_buffer_size;
    parms.page_size = opts->page_size;

    /* load multidimensional options */
    parms.num_bytes = 1;
    buf_bytes = 1;
    for (i=0; i<parms.rank; i++){
        parms.buf_size[i] = opts->buf_size[i];
        parms.dset_size[i] = opts->dset_size[i];
        parms.chk_size[i] = opts->chk_size[i];
        parms.order[i] = opts->order[i];
        parms.num_bytes *= opts->dset_size[i];
        buf_bytes *= opts->buf_size[i];
    }

    /* print size information */
    output_report("Transfer Buffer Size (bytes): %d\n", buf_bytes);
    output_report("File Size(MB): %.2f\n",((double)parms.num_bytes) / ONE_MB);

    print_indent(0);
    if (opts->io_types & SIO_POSIX)
        run_test(POSIXIO, parms, opts);

    print_indent(0);
    if (opts->io_types & SIO_HDF5)
        run_test(HDF5, parms, opts);
}
Esempio n. 5
0
int main(int argc, char *argv[])
{
	astring *dirname = NULL, *tempfilename = NULL, *tempheader = NULL, *tempfooter = NULL;
	UINT32 bufsize;
	void *buffer;
	int listnum;
    int result;

    /* first argument is the directory */
    if (argc < 4)
    {
    	fprintf(stderr, "Usage:\nregrep <template> <outputdir> <summary1> [<summary2> [<summary3> ...]]\n");
    	return 1;
    }
    tempfilename = astring_dupc(argv[1]);
    dirname = astring_dupc(argv[2]);
    list_count = argc - 3;

	/* read the template file into an astring */
	if (core_fload(astring_c(tempfilename), &buffer, &bufsize) == FILERR_NONE)
	{
		tempheader = astring_dupch((const char *)buffer, bufsize);
		free(buffer);
	}

	/* verify the template */
	if (tempheader == NULL)
	{
		fprintf(stderr, "Unable to read template file\n");
		return 1;
	}
	result = astring_findc(tempheader, 0, "<!--CONTENT-->");
	if (result == -1)
	{
		fprintf(stderr, "Template is missing a <!--CONTENT--> marker\n");
		return 1;
	}
	tempfooter = astring_substr(astring_dup(tempheader), result + 14, -1);
	tempheader = astring_substr(tempheader, 0, result);

    /* loop over arguments and read the files */
    for (listnum = 0; listnum < list_count; listnum++)
    {
        result = read_summary_log(argv[listnum + 3], listnum);
        if (result != 0)
            return result;
    }

    /* output the summary */
    output_report(dirname, tempheader, tempfooter, sort_file_list());

	astring_free(dirname);
	astring_free(tempfilename);
	astring_free(tempheader);
	astring_free(tempfooter);
    return 0;
}
Esempio n. 6
0
int main(int argc, char *argv[])
{
	UINT32 bufsize;
	void *buffer;
	int listnum;
	int result;

	/* first argument is the directory */
	if (argc < 4)
	{
		fprintf(stderr, "Usage:\nregrep <template> <outputdir> <summary1> [<summary2> [<summary3> ...]]\n");
		return 1;
	}
	std::string tempfilename(argv[1]);
	std::string dirname(argv[2]);
	list_count = argc - 3;

	/* read the template file into an astring */
	std::string tempheader;
	if (util::core_file::load(tempfilename.c_str(), &buffer, bufsize) == osd_file::error::NONE)
	{
		tempheader.assign((const char *)buffer, bufsize);
		osd_free(buffer);
	}

	/* verify the template */
	if (tempheader.length() == 0)
	{
		fprintf(stderr, "Unable to read template file\n");
		return 1;
	}
	result = tempheader.find("<!--CONTENT-->");
	if (result == -1)
	{
		fprintf(stderr, "Template is missing a <!--CONTENT--> marker\n");
		return 1;
	}
	std::string tempfooter(tempheader);
	tempfooter = tempfooter.substr(result + 14);
	tempfooter = tempheader.substr(0, result);

	/* loop over arguments and read the files */
	for (listnum = 0; listnum < list_count; listnum++)
	{
		result = read_summary_log(argv[listnum + 3], listnum);
		if (result != 0)
			return result;
	}

	/* output the summary */
	output_report(dirname, tempheader, tempfooter, sort_file_list());
	return 0;
}
Esempio n. 7
0
/*
 * Function:    run_test
 * Purpose:     Inner loop call to actually run the I/O test.
 * Return:      Nothing
 * Programmer:  Bill Wendling, 18. December 2001
 * Modifications:
 */
static int
run_test(iotype iot, parameters parms, struct options *opts)
{
    results         res;
    register int    i, ret_value = SUCCESS;
    off_t           raw_size;
    minmax         *write_sys_mm_table=NULL;
    minmax         *write_mm_table=NULL;
    minmax         *write_gross_mm_table=NULL;
    minmax         *write_raw_mm_table=NULL;
    minmax         *read_sys_mm_table=NULL;
    minmax         *read_mm_table=NULL;
    minmax         *read_gross_mm_table=NULL;
    minmax         *read_raw_mm_table=NULL;
    minmax          write_sys_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          write_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          write_gross_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          write_raw_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          read_sys_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          read_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          read_gross_mm = {0.0F, 0.0F, 0.0F, 0};
    minmax          read_raw_mm = {0.0F, 0.0F, 0.0F, 0};

    raw_size = (off_t)parms.num_bytes;
    parms.io_type = iot;
    print_indent(2);
    output_report("IO API = ");

    switch (iot) {
        case POSIXIO:
            output_report("POSIX\n");
            break;
        case HDF5:
            output_report("HDF5\n");
            break;
        default:
            /* unknown request */
            HDfprintf(stderr, "Unknown IO type request (%d)\n", (int)iot);
            HDassert(0 && "Unknown IO tpe");
            break;
    }

    /* allocate space for tables minmax and that it is sufficient */
    /* to initialize all elements to zeros by calloc.             */
    write_sys_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
    write_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
    write_gross_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
    write_raw_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));

    if (!parms.h5_write_only) {
        read_sys_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
        read_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
        read_gross_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
        read_raw_mm_table = (minmax *)calloc((size_t)parms.num_iters , sizeof(minmax));
    }

    /* Do IO iteration times, collecting statistics each time */
    for (i = 0; i < parms.num_iters; ++i) {
        double t;

        do_sio(parms, &res);

        /* gather all of the "sys write" times */
        t = get_time(res.timers, HDF5_MPI_WRITE);
        get_minmax(&write_sys_mm, t);

        write_sys_mm_table[i] = write_sys_mm;

        /* gather all of the "write" times */
        t = get_time(res.timers, HDF5_FINE_WRITE_FIXED_DIMS);
        get_minmax(&write_mm, t);

        write_mm_table[i] = write_mm;

        /* gather all of the "write" times from open to close */
        t = get_time(res.timers, HDF5_GROSS_WRITE_FIXED_DIMS);
        get_minmax(&write_gross_mm, t);

        write_gross_mm_table[i] = write_gross_mm;

        /* gather all of the raw "write" times */
        t = get_time(res.timers, HDF5_RAW_WRITE_FIXED_DIMS);
        get_minmax(&write_raw_mm, t);

        write_raw_mm_table[i] = write_raw_mm;

        if (!parms.h5_write_only) {
            /* gather all of the "mpi read" times */
            t = get_time(res.timers, HDF5_MPI_READ);
            get_minmax(&read_sys_mm, t);

            read_sys_mm_table[i] = read_sys_mm;

            /* gather all of the "read" times */
            t = get_time(res.timers, HDF5_FINE_READ_FIXED_DIMS);
            get_minmax(&read_mm, t);

            read_mm_table[i] = read_mm;

            /* gather all of the "read" times from open to close */
            t = get_time(res.timers, HDF5_GROSS_READ_FIXED_DIMS);
            get_minmax(&read_gross_mm, t);

            read_gross_mm_table[i] = read_gross_mm;

            /* gather all of the raw "read" times */
            t = get_time(res.timers, HDF5_RAW_READ_FIXED_DIMS);
            get_minmax(&read_raw_mm, t);

            read_raw_mm_table[i] = read_gross_mm;
        }
        io_time_destroy(res.timers);
    }

    /*
     * Show various statistics
     */
    /* Write statistics	*/
    /* Print the raw data throughput if desired */
    if (opts->print_raw) {
        /* accumulate and output the max, min, and average "raw write" times */
        if (sio_debug_level >= 3) {
            /* output all of the times for all iterations */
            print_indent(3);
            output_report("Raw Data Write details:\n");
            output_all_info(write_raw_mm_table, parms.num_iters, 4);
        }

        output_results(opts,"Raw Data Write",write_raw_mm_table,parms.num_iters,raw_size);
    } /* end if */

    /* show sys write statics */
#if 0
    if (sio_debug_level >= 3) {
        /* output all of the times for all iterations */
        print_indent(3);
        output_report("MPI Write details:\n");
        output_all_info(write_sys_mm_table, parms.num_iters, 4);
    }
#endif
    /* We don't currently output the MPI write results */

    /* accumulate and output the max, min, and average "write" times */
    if (sio_debug_level >= 3) {
        /* output all of the times for all iterations */
        print_indent(3);
        output_report("Write details:\n");
        output_all_info(write_mm_table, parms.num_iters, 4);
    }

    output_results(opts,"Write",write_mm_table,parms.num_iters,raw_size);

    /* accumulate and output the max, min, and average "gross write" times */
    if (sio_debug_level >= 3) {
        /* output all of the times for all iterations */
        print_indent(3);
        output_report("Write Open-Close details:\n");
        output_all_info(write_gross_mm_table, parms.num_iters, 4);
    }

    output_results(opts,"Write Open-Close",write_gross_mm_table,parms.num_iters,raw_size);

    if (!parms.h5_write_only) {
        /* Read statistics	*/
        /* Print the raw data throughput if desired */
        if (opts->print_raw) {
            /* accumulate and output the max, min, and average "raw read" times */
            if (sio_debug_level >= 3) {
                /* output all of the times for all iterations */
                print_indent(3);
                output_report("Raw Data Read details:\n");
                output_all_info(read_raw_mm_table, parms.num_iters, 4);
            }

            output_results(opts, "Raw Data Read", read_raw_mm_table,
                           parms.num_iters, raw_size);
        } /* end if */

        /* show mpi read statics */
#if 0
        if (sio_debug_level >= 3) {
            /* output all of the times for all iterations */
            print_indent(3);
            output_report("MPI Read details:\n");
            output_all_info(read_sys_mm_table, parms.num_iters, 4);
        }
#endif
        /* We don't currently output the MPI read results */

        /* accumulate and output the max, min, and average "read" times */
        if (sio_debug_level >= 3) {
            /* output all of the times for all iterations */
            print_indent(3);
            output_report("Read details:\n");
            output_all_info(read_mm_table, parms.num_iters, 4);
        }

        output_results(opts, "Read", read_mm_table, parms.num_iters, raw_size);

        /* accumulate and output the max, min, and average "gross read" times */
        if (sio_debug_level >= 3) {
            /* output all of the times for all iterations */
            print_indent(3);
            output_report("Read Open-Close details:\n");
            output_all_info(read_gross_mm_table, parms.num_iters, 4);
        }

        output_results(opts, "Read Open-Close", read_gross_mm_table,
                       parms.num_iters, raw_size);
    }

    /* clean up our mess */
    free(write_sys_mm_table);
    free(write_mm_table);
    free(write_gross_mm_table);
    free(write_raw_mm_table);

    if (!parms.h5_write_only) {
        free(read_sys_mm_table);
        free(read_mm_table);
        free(read_gross_mm_table);
        free(read_raw_mm_table);
    }

    return ret_value;
}