コード例 #1
0
ファイル: comm_test.c プロジェクト: piperod/ExtremScale
int main(int argc, char** argv)
{
    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_SINGLE, &provided);

    int size, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    rank_printf(0, "Running with %d MPI processes\n", size);

    if (rank == 0) {
        double tickSize = MPI_Wtick();
        printf("MPI wall clock resolution: %0.4le seconds\n", tickSize);
    }

    int mvals[18];
    mvals[0] = 0;
    for (int k = 2; k <= 18; k++) {
        mvals[k-1] = (1 << k);
    }

    rank_printf(0, "\n");

    const int numIters = 5;
    const int numPerIter = 1000;

    print_header();

    run_tests(0, 1, rank, mvals, 18, numIters, numPerIter);
    run_tests(0, size-1, rank, mvals, 18, numIters, numPerIter);
    run_tests(2, 3, rank, mvals, 18, numIters, numPerIter);

    MPI_Finalize();
    return 0;
}
コード例 #2
0
static int one_round_run(int round_no)
{
	int ret = 0, fd = -1, j;
	unsigned long i, chunk_no = 0;
	struct write_unit wu;

	MPI_Request request;
	MPI_Status  status;

	/*
	 * Root rank creates working file in chunks.
	 */
	if (!rank) {
		rank_printf("Prepare file of %lu bytes\n", file_size);

		open_rw_flags |= O_DIRECT;
		open_ro_flags |= O_DIRECT;

		ret = prep_orig_file_in_chunks(workfile, file_size);
		should_exit(ret);
	}

	MPI_Barrier_Sync();

	if (!rank) {
		fd = open_file(workfile, open_rw_flags);
		should_exit(fd);
	} else {

		/*
		 * Verification at the very beginning doesn't do anything more
		 * than reading the file into pagecache on none-root nodes.
		 */
		open_rw_flags &= ~O_DIRECT;
		open_ro_flags &= ~O_DIRECT;

		ret = verify_file(1, NULL, remote_wus, workfile, file_size);
		should_exit(fd);
	}

	MPI_Barrier_Sync();

	/*
	 * Root ranks write chunks at random serially.
	 */
	for (i = 0; i < num_chunks; i++) {
		
		MPI_Barrier_Sync();
		/*
		 * Root rank generates random write unit, then sends it to
		 * rest of ranks in-memoery after O_DIRECT write into file.
		 */
		if (!rank) {

			chunk_no = get_rand_ul(0, num_chunks - 1);
			prep_rand_dest_write_unit(&wu, chunk_no);
			rank_printf("Write #%lu chunk with char(%c)\n",
				    chunk_no, wu.wu_char);
			ret = do_write_chunk(fd, wu);
			should_exit(ret);
			
			memcpy(&remote_wus[wu.wu_chunk_no], &wu, sizeof(wu));

			for (j = 1; j < size; j++) {
				if (verbose)
					rank_printf("Send write unit #%lu chunk "
						    "char(%c) to rank %d\n",
						     wu.wu_chunk_no,
						     wu.wu_char, j);
				ret = MPI_Isend(&wu, sizeof(wu), MPI_BYTE, j,
						1, MPI_COMM_WORLD, &request);
				if (ret != MPI_SUCCESS)
					abort_printf("MPI_Isend failed: %d\n",
						     ret);
				MPI_Wait(&request, &status);

                        }
		} else {

			MPI_Irecv(&wu, sizeof(wu), MPI_BYTE, 0, 1,
				  MPI_COMM_WORLD, &request);
			MPI_Wait(&request, &status);

			if (verbose)
				rank_printf("Receive write unit #%lu chunk "
					    "char(%c)\n", wu.wu_chunk_no, wu.wu_char);

			if (wu.wu_timestamp >=
				remote_wus[wu.wu_chunk_no].wu_timestamp)
				memcpy(&remote_wus[wu.wu_chunk_no],
				       &wu, sizeof(wu));
		}

		MPI_Barrier_Sync();

		if (rank) {

			/*
			 * All none-root ranks need to verify if O_DIRECT writes
			 * from remote root node can be seen locally.
			 */
			rank_printf("Try to verify whole file in chunks.\n");

			ret = verify_file(1, NULL, remote_wus, workfile, file_size);
			should_exit(ret);
		}
	}

	MPI_Barrier_Sync();

	if (!rank)
		if (fd > 0)
			close(fd);

	return ret;
}
コード例 #3
0
ファイル: comm_test.c プロジェクト: piperod/ExtremScale
void run_tests(int i, int j, int rank, int *mvals, int mcount, int niters, int nPerIter)
{
    double start, end;

    for (int mIter = 0; mIter < mcount; mIter++) {
        int m = mvals[mIter];

        char* buffer1 = malloc(m * sizeof(char));
        char* buffer2 = malloc(m * sizeof(char));

        if (rank == i || rank == j) {
            double minTime = 1e9;
            for (int iter = 0; iter < niters; iter++) {
                start = MPI_Wtime();
                for (int run = 0; run < nPerIter; run++) {
                    pingpong_blocking(i, j, m, rank, buffer1);
                }
                end = MPI_Wtime();
                double timeTaken = (end - start) / nPerIter;
                if (timeTaken < minTime) minTime = timeTaken;
            }
            rank_printf(i, "ppnb\t%d\t%d\t%d\t%0.4le\n", i, j, m, minTime);
        }

        MPI_Barrier(MPI_COMM_WORLD);  // ----------------------------------------------------------------------

        if (rank == i || rank == j) {
            double minTime = 1e9;
            for (int iter = 0; iter < niters; iter++) {
                start = MPI_Wtime();
                for (int run = 0; run < nPerIter; run++) {
                    pingpong_nonblocking(i, j, m, rank, buffer1);
                }
                end = MPI_Wtime();
                double timeTaken = (end - start) / nPerIter;
                if (timeTaken < minTime) minTime = timeTaken;
            }
            rank_printf(i, "ppb\t%d\t%d\t%d\t%0.4le\n", i, j, m, minTime);
        }

        MPI_Barrier(MPI_COMM_WORLD);  // ----------------------------------------------------------------------

        if (rank == i || rank == j) {
            double minTime = 1e9;
            for (int iter = 0; iter < niters; iter++) {
                start = MPI_Wtime();
                for (int run = 0; run < nPerIter; run++) {
                    head_to_head(i, j, m, rank, buffer1, buffer2);
                }
                end = MPI_Wtime();
                double timeTaken = (end - start) / nPerIter;
                if (timeTaken < minTime) minTime = timeTaken;
            }
            rank_printf(i, "h2h\t%d\t%d\t%d\t%0.4le\n", i, j, m, minTime);
        }

        MPI_Barrier(MPI_COMM_WORLD);  // ----------------------------------------------------------------------

        free(buffer1);
        free(buffer2);
    }
}
コード例 #4
0
ファイル: comm_test.c プロジェクト: piperod/ExtremScale
void print_header(void)
{
    rank_printf(0, "test\ti\tj\tm\ttime\n");
}