Пример #1
0
int main (int argc, char **argv) {
    int i, j, x, y, owner_rank, ierr=0, numtasks, rank, k;
    double *matrix = malloc (NREPS * CSIZE * RSIZE *  sizeof(double));

    MPI_Assert(MPI_Init (&argc, &argv), MPI_SUCCESS, 
	    "Failed to initialize MPI Environment", 101);
    
    MPI_Assert(MPI_Comm_size (MPI_COMM_WORLD, &numtasks), MPI_SUCCESS, 
	    "Error in querying communicator size", 102);

    MPI_Assert(numtasks, NPROC, 
	    "Number of processors specified is not consistent", 501);
    
    MPI_Assert(MPI_Comm_rank (MPI_COMM_WORLD, &rank), MPI_SUCCESS,
	    "Error in querying task ID", 103);

    MPI_Root (printf("Detected %d tasks. Root is %d.\n", numtasks, rank));

    for (i=0; i<CSIZE; ++i)
	for (j=0; j<RSIZE; ++j)
	    matrix[index(i,j)] = i*RSIZE + j;

    for (k=0; k<numtasks; ++k) {
	if (k == rank)
		Assert(disp_formatted(matrix, rank),
			0,"Failed display!");
	MPI_Barrier (MPI_COMM_WORLD);
    }

    double *operating_row = malloc (RSIZE * sizeof(double));
    operating_row = &matrix[index(1,0)];

    for (k=0; k<numtasks; ++k) {
	if (k == rank) {
	    printf ("\n");
	    for (i=0; i<RSIZE; ++i)
		printf ("%f\t",operating_row[i]);
	}
	MPI_Barrier (MPI_COMM_WORLD);
    }




    MPI_Finalize ();
    return 0;
}
Пример #2
0
static void test_file_mode_reads(const char *bp_filename) {
	int i;
	ADIOS_FILE *fp = adios_read_open_file(bp_filename, ADIOS_READ_METHOD_BP, COMM);
	MPI_Assert(COMM, fp);

	fprintf(stderr, "[rank %d/%d] Starting file-mode writeblock reads on %s\n", rank, size, bp_filename);

	for (i = 0; i < fp->nvars; ++i) {
		const char *varname = fp->var_namelist[i];
		test_file_mode_reads_on_var(fp, bp_filename, varname);

		MPI_Barrier(COMM);
	}

	adios_read_close(fp);

	fprintf(stderr, "[rank %d/%d] Finished file-mode writeblock reads on %s\n", rank, size, bp_filename);
}
Пример #3
0
static void test_file_mode_reads_on_var(ADIOS_FILE *fp, const char *bp_filename, const char *varname) {
	int i;

	ADIOS_VARINFO *varinfo = adios_inq_var(fp, varname);
	MPI_Assert(COMM, varinfo);

	if (varinfo->value != NULL) {
		//if (rank == 0) fprintf(stderr, "(skipping scalar variable '%s')\n", varname);
		adios_free_varinfo(varinfo);
		return;
	}

	fprintf(stderr, "[rank %d/%d] Starting file-mode writeblock reads on %s:/%s\n", rank, size, bp_filename, varname);

	adios_inq_var_blockinfo(fp, varinfo);
	MPI_Assert(COMM, varinfo->blockinfo);

	const enum ADIOS_DATATYPES datatype = varinfo->type;
	const int datatypesize = adios_get_type_size(datatype, NULL);

	int timestep, timestep_blockidx, blockidx = 0;
	for (timestep = 0; timestep < varinfo->nsteps; ++timestep) {
		for (timestep_blockidx = 0; timestep_blockidx < varinfo->nblocks[timestep]; ++timestep_blockidx, ++blockidx) {
			if (blockidx % size != rank) continue;

			const ADIOS_VARBLOCK *vb = &varinfo->blockinfo[blockidx];

			ADIOS_SELECTION *block_bb = adios_selection_boundingbox(varinfo->ndim, vb->start, vb->count);
			ADIOS_SELECTION *block_wb = adios_selection_writeblock(timestep_blockidx);
			ADIOS_SELECTION *block_abs_wb = adios_selection_writeblock(blockidx);
			block_abs_wb->u.block.is_absolute_index = 1;

			uint64_t blocksize = datatypesize;
			for (i = 0; i < varinfo->ndim; ++i)
				blocksize *= vb->count[i];

			void *buf_bb = malloc(blocksize);
			void *buf_wb = malloc(blocksize);
			void *buf_abs_wb = malloc(blocksize);
			memset(buf_bb,     0, blocksize);
			memset(buf_wb,     1, blocksize);
			memset(buf_abs_wb, 2, blocksize);
			MPI_Assert(COMM, buf_bb && buf_wb && buf_abs_wb);

			adios_schedule_read(fp, block_bb,     varname, timestep, 1, buf_bb    );
			adios_schedule_read(fp, block_wb,     varname, timestep, 1, buf_wb    );
			adios_schedule_read(fp, block_abs_wb, varname, timestep, 1, buf_abs_wb);
			adios_perform_reads(fp, 1);

			fprintf(stderr, "[rank %d/%d] Checking file-mode blockidx %d BB vs. WB...\n", rank, size, blockidx);
			MPI_Assert(COMM, memcmp(buf_bb, buf_wb, blocksize) == 0);
			fprintf(stderr, "[rank %d/%d] Checking file-mode blockidx %d BB vs. abs-WB...\n", rank, size, blockidx);
			MPI_Assert(COMM, memcmp(buf_bb, buf_abs_wb, blocksize) == 0);

			free(buf_bb); free(buf_wb); free(buf_abs_wb);
			adios_selection_delete(block_bb);
			adios_selection_delete(block_wb);
			adios_selection_delete(block_abs_wb);
		}
	}

	adios_free_varinfo(varinfo);

	fprintf(stderr, "[rank %d/%d] Finished file-mode writeblock reads on %s:/%s\n", rank, size, bp_filename, varname);
}
Пример #4
0
//---------------------------------------------------------------------------------
int main (int argc, char **argv) {
    //Struct for MPI_Allreduce (MAX_LOC)
    struct {
	double value;
	int rank;
    } local_in, global_out;

    int ierr = 0;
    int i, j, x, y, owner_rank, numtasks, rank, k;
    
    MPI_Init (&argc, &argv); 
    MPI_Comm_size (MPI_COMM_WORLD, &numtasks);
    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
    
    ierr = parse_command_line (argc, argv);
    if (ierr) { 
       MPI_Root (help_message ());
       MPI_Abort (MPI_COMM_WORLD, 000);
    }     
    MPI_Root (echo_parameters ());
    
    double *matrix = malloc (NREPS * CSIZE * RSIZE *  sizeof(double));
    double pp_max; int pp_loc;
    double t1, t2;

    MPI_Assert(numtasks, NPROC, 
	    "Number of processors specified is not consistent", 501);

    //MPI_Root (printf("Detected %d tasks. Root is %d.\n", numtasks, rank));

    Assert(init_rand(matrix, rank+1), 0, "Failed initialization!");

    //MPI_Root (printf ("\n Initial Matrix \n--------------------------------------------\n"));
    //for (k=0; k<NREPS; ++k)
	//for (i=0; i<numtasks; ++i) {
	//    if (i == rank)
	//	Assert(disp_formatted(matrix, rank, k),
	//		0,"Failed display!");
	//    MPI_Barrier (MPI_COMM_WORLD);
	//}

    double *operating_row = malloc (1 * RSIZE * sizeof(double));
    int row_start_index, col_start_index, dest, src, iters;
    int tag1 = 351, tag2 = 451, tag3 = 551;
    int pp_row_no=0, pp_col_no, pp_dummy_rank;
    MPI_Status status;

    //Define Derived MPI Datatype for communication
    MPI_Datatype MPI_ROW;
    MPI_Type_contiguous (RSIZE, MPI_DOUBLE, &MPI_ROW);
    MPI_Type_commit (&MPI_ROW);

    //Fix number of iterations and output system type
    iters = fmin (NROWS, NCOLS);
    /*
    if (NROWS>NCOLS) {	
	MPI_Root (printf ("\nSystem is over-determined. Row Count = %d, Column Count = %d.\n", NROWS, NCOLS)); }
    else if (NCOLS>NROWS) {
	MPI_Root (printf ("\nSystem is under-determined. Row Count = %d, Column Count = %d.\n", NROWS, NCOLS)); }
    else {
	MPI_Root (printf ("\nSystem is square. Row Count = Column Count = %d.\n", NCOLS)); }
	*/

    //Barrier for timing synchronization
    MPI_Barrier (MPI_COMM_WORLD);
    t1 = MPI_Wtime ();

    for (i=0; i<iters; ++i) { 
	global_to_local_map (i,i,rank,&x,&y,&owner_rank);
	row_start_index = x;
	col_start_index = y;
	if (rank == owner_rank) {
	    if (fabs(matrix[index(x,y)])<1E-14) {
		printf ("\nMatrix is singular or very nearly singular. Aborting. "
			"Error encountered while evaluating row %d.\n", i);
		MPI_Abort (MPI_COMM_WORLD, 999);
	    }
	    row_start_index += 1;
	    vec_to_vec (&matrix[index(x,0)], &operating_row[0]);
	}
	MPI_Bcast (&operating_row[0], 1, MPI_ROW, owner_rank, MPI_COMM_WORLD);
	if (x!=-1 && y!=-1) 
	    gauss_step (matrix, operating_row, row_start_index, col_start_index);
    }

    MPI_Barrier (MPI_COMM_WORLD);
    t2 = MPI_Wtime ();

    /*
    MPI_Root (printf ("\n RREF Matrix \n--------------------------------------------\n"));
    for (k=0; k<NREPS; ++k)
	for (i=0; i<numtasks; ++i) {
	    if (i == rank)
		Assert(disp_formatted(matrix, rank, k),
			0,"Failed display!");
	    MPI_Barrier (MPI_COMM_WORLD);
	}
	*/

    //MPI_Root(printf("\nReached End of Program\n"));
    MPI_Root(printf("GE algorithm took %f seconds\n", t2-t1));
    MPI_Finalize ();
    return 0;
}