Example #1
0
void ParallelCalc()
{
	double max, allmax;
	while (true)
	{
		max = 0;
		for (int i = 1; i < K + 1; i++)
			for (int j = 1; j < N + 1; j++)
			{
				double u0 = u1[i * (N + 2) + j];
				u1[i * (N + 2) + j] = 0.25 * (u1[(i - 1) * (N + 2) + j]
				+ u1[(i + 1) * (N + 2) + j] + u1[i * (N + 2) + j - 1]
				+ u1[i * (N + 2) + j + 1] - h * h * f1[(i - 1) * N + j - 1]);
				double d = abs(u1[i * (N + 2) + j] - u0);
				if (d > max)
					max = d;
			}
		MPI_Allreduce(&max, &allmax, 1, MPI_DOUBLE, MPI_MAX,
		MPI_COMM_WORLD);
		if (allmax <= eps)
			break;
		int RSrc, RDest;
		MPI_Status s;
		MPI_Cart_shift(BAND_COMM, 0, 1, &RSrc, &RDest);
		MPI_Sendrecv(&u1[K * (N + 2)], N + 2, MPI_DOUBLE, RDest, 0, u1, N + 2, MPI_DOUBLE, RSrc, 0, BAND_COMM, &s);
		MPI_Cart_shift(BAND_COMM, 0, -1, &RSrc, &RDest);
		MPI_Sendrecv(&u1[N + 2], N + 2, MPI_DOUBLE, RDest, 0, &u1[(K + 1) * (N + 2)], N + 2, MPI_DOUBLE, RSrc, 0, BAND_COMM,&s);
	}
}
Example #2
0
static void init_mpi(void)
{
    MPI_Comm_size(MPI_COMM_WORLD, &nproc); //プロセス数の取得
    int dim = 2;          //number of dimension
    int procs[2] = {0,0}; //[0]: x方向の分割数, [1]:y方向の分割数 がはいる
    int period[2] = {0,0};//境界条件, 0は固定境界
    MPI_Comm grid_comm;
    int reorder = 1;   //re-distribute rank flag

    MPI_Dims_create(nproc, dim, procs); //縦横を何分割にするか自動的に計算
    MPI_Cart_create(MPI_COMM_WORLD, 2, procs, period, reorder, &grid_comm); //領域を自動分割 => procs, grid_commは変更される
    MPI_Cart_shift(grid_comm, 0, 1, &ltRank, &rtRank);
    MPI_Cart_shift(grid_comm, 1, 1, &bmRank, &tpRank);

    //プロセス座標において自分がどの位置に居るのか求める(何行何列に居るか)
    int coordinates[2];
    MPI_Comm_rank(grid_comm, &rank);
    MPI_Cart_coords(grid_comm, rank, 2, coordinates);

    SUB_N_X = N_PX / procs[0];
    SUB_N_Y = N_PY / procs[1];
    SUB_N_PX = SUB_N_X + 2; //のりしろ(となりの領域の値が入る部分)の分2大きい
    SUB_N_PY = SUB_N_Y + 2;
    SUB_N_CELL = SUB_N_PX*SUB_N_PY;
    offsetX = coordinates[0] * SUB_N_X; //ランクのインデックスではなく, セル単位のオフセットなのでSUB_N_Xずれる
    offsetY = coordinates[1] * SUB_N_Y;

    /*これだと, 1個のデータをSUB_N_PY跳び(次のデータまでSUB_N_PY-1個隙間がある),SUB_N_X行ぶん取ってくる事になる */
    MPI_Type_vector(SUB_N_X, 1, SUB_N_PY, MPI_C_DOUBLE_COMPLEX, &X_DIRECTION_DOUBLE_COMPLEX);
    MPI_Type_commit(&X_DIRECTION_DOUBLE_COMPLEX);
}
Example #3
0
void topo_cartesian(int rank, int *dims, int *coords, int *neigh)
{
  int periods[3];
  
  MPI_Comm commcart;
  
  periods[0]=1;
  periods[1]=1;
  periods[2]=1;

  // creation of cartesian communicator
  MPI_Cart_create(MPI_COMM_WORLD,3,dims,periods,0,&commcart);

  // getting the cartesian position
  MPI_Cart_coords(commcart,rank,3,coords);
  
  // getting the neighbors
  MPI_Cart_shift(commcart,0,1,neigh+0,neigh+1); //X
  MPI_Cart_shift(commcart,1,1,neigh+2,neigh+3); //Y
  MPI_Cart_shift(commcart,2,1,neigh+4,neigh+5); //Z

  printf(" proc #%d has coordinates %d %d %d and neighbors Xm=%d Xp=%d Ym=%d Yp=%d Zm=%d Zp=%d \n",rank,coords[0],coords[1],coords[2],neigh[0],neigh[1],neigh[2],neigh[3],neigh[4],neigh[5]);
  //  printf(" dims = %d %d %d\n",dims[0],dims[1],dims[2]);

}
Example #4
0
/* Function that creates a list of the neighbours for each processes */ 
inline void neighbour_table(int* neighbours, MPI_Comm grid, int proc_rank){
  int move, id;
  int coord[2];
  id = 1;
  // move from a proc to an other ==> move = 1
  move = 1;
  // get Left and Right
  MPI_Cart_shift(grid, id, move, &neighbours[L], &neighbours[R]);
  id = 0;
  // get Up and Down
  MPI_Cart_shift(grid, id, move, &neighbours[U], &neighbours[D]);
  // get current proc coordinates
  MPI_Cart_coords(grid, proc_rank, 2, coord);
  coord[0]--;
  coord[1]--;
  // determine Up-Left neighbour
  MPI_Cart_rank(grid, coord, &neighbours[UL]);
  coord[1]+=2;
  // determine Up-Right neighbour
  MPI_Cart_rank(grid, coord, &neighbours[UR]);
  coord[0]+=2;
  // determine Down-Right neighbour
  MPI_Cart_rank(grid, coord, &neighbours[LR]);
  coord[1]-=2;
  // determine Down-Left neighbour
  MPI_Cart_rank(grid, coord, &neighbours[LL]);
  return;
}
Example #5
0
void initial_send(MPI_Comm &Comm_Cart,int rank, float **A_tmp, float **A,
					 float **B_tmp,float **B, MPI_Status &status, int size, int n){
	int mycoords[2] ={0,0};
	MPI_Cart_coords(Comm_Cart,rank,2,mycoords);

	int a_left_displ, a_right_displ, b_top_displ, b_bot_displ;

	a_left_displ=a_right_displ=b_top_displ=b_bot_displ=rank;

	// Shifts the initial value of A(i,j) by i steps to the left (in j direction)
	MPI_Cart_shift(Comm_Cart, 0, mycoords[1], &b_top_displ, &b_bot_displ);
	MPI_Cart_shift(Comm_Cart, 1, mycoords[0], &a_left_displ, &a_right_displ);


	float *sendptrA, *recvptrA,*sendptrB, *recvptrB;
	sendptrA = &(A[0][0]);
	recvptrA = &(A_tmp[0][0]);

	sendptrB = &(B[0][0]);
	recvptrB = &(B_tmp[0][0]);


	// Sends initial values of A to the left
	MPI_Sendrecv(sendptrA,n*n, MPI_FLOAT, a_left_displ,  lr_tag,
				 recvptrA,n*n, MPI_FLOAT, a_right_displ, lr_tag,
					Comm_Cart, &status);

	// Sends initial values of B to the top
	MPI_Sendrecv(sendptrB,n*n, MPI_FLOAT, b_top_displ, bt_tag,
				 recvptrB,n*n, MPI_FLOAT, b_bot_displ, bt_tag,
					Comm_Cart, &status);

}
void boundary(GRID_INFO_TYPE* grid, int spin[][LENGTH], int nbr1[] , int nbr2[])
{
 int i,*U2,*D2,*R2,*L2,m,n,tag=50;
     U2=(int *)malloc(LENGTH*sizeof(int));
     D2=(int *)malloc(LENGTH*sizeof(int));
     R2=(int *)malloc(LENGTH*sizeof(int));
     L2=(int *)malloc(LENGTH*sizeof(int));
/* put  the spins at the left, right, up and down boundaries into separate arrays */
     for (i = 1 ; i < LENGTH-1 ; i++)    
     {  U2[i]=spin[i][1];
       D2[i]=spin[i][LENGTH-2];
       L2[i]=spin[1][i];
       R2[i]=spin[LENGTH-2][i];
      }
/*
send the boundary arrays to appropriate neighbor process
Remeber 0 is up and down and  1 is left and right. 
*/
       MPI_Cart_shift(grid->comm,0,-1,&m,&n); /* find the neighbor process down the current one */ 
       MPI_Send(U2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD); /* send U2 buffer with dimension LENGTH of type MPI_INT to  process "n" with "tag" */
       MPI_Cart_shift(grid->comm,0,1,&m,&n);
       MPI_Send(D2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
       MPI_Cart_shift(grid->comm,1,1,&m,&n);
       MPI_Send(R2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
       MPI_Cart_shift(grid->comm,1,-1,&m,&n);
       MPI_Send(L2,LENGTH, MPI_INT,n,tag,MPI_COMM_WORLD);
	free(U2);
	free(D2);
	free(L2);
	free(R2);
}
Example #7
0
void calc_node_neighbors(int node)
{
  int dir;
  
  map_node_array(node,node_pos);
  for(dir=0;dir<3;dir++) {
    int buf;
    MPI_Cart_shift(comm_cart, dir, -1, &buf, node_neighbors + 2*dir);
    MPI_Cart_shift(comm_cart, dir, 1, &buf, node_neighbors + 2*dir + 1);

    /* left boundary ? */
    if (node_pos[dir] == 0) {
      boundary[2*dir] = 1;
    }
    else {
      boundary[2*dir] = 0;
    }
    /* right boundary ? */
   if (node_pos[dir] == node_grid[dir]-1) {
      boundary[2*dir+1] = -1;
    }
    else {
      boundary[2*dir+1] = 0;
    }
  }
  GRID_TRACE(printf("%d: node_grid %d %d %d, pos %d %d %d, node_neighbors ", this_node, node_grid[0], node_grid[1], node_grid[2], node_pos[0], node_pos[1], node_pos[2]));
}
Example #8
0
void collectAfterPre(Vector u, const Vector v)
{
  int source, dest;
  if (u->comm_rank == 0) {
    int len=u->len-1;
    dcopy(&len, v->data, &v->stride, u->data+1, &u->stride);
  } else if (u->comm_rank == u->comm_size-1) {
    int len=v->len-1;
    dcopy(&len, v->data+1, &v->stride, u->data+1, &u->stride);
  } else
    copyVector(u, v);

  // west
  double recv;
  MPI_Cart_shift(*u->comm, 0,   -1, &source, &dest);
  MPI_Sendrecv(v->data,        1, MPI_DOUBLE, dest,   0,
               u->data, 1, MPI_DOUBLE, source, 0, *u->comm, MPI_STATUS_IGNORE);
  if (source > -1)
    u->data[u->len-2] += u->data[0];

  // east
  MPI_Cart_shift(*u->comm,  0,   1, &source, &dest);
  MPI_Sendrecv(v->data+v->len-1, 1, MPI_DOUBLE, dest,   1,
               u->data,          1, MPI_DOUBLE, source, 1, *u->comm, MPI_STATUS_IGNORE);
  if (source > -1)
    u->data[1] += u->data[0];

  u->data[0] = u->data[u->len-1] = 0.0;
}
Example #9
0
int main(int argc, char *argv[]) {
    int SIZE = atoi(argv[1]);
    MPI_Status status;
    MPI_Comm comm_3d;
    int rank, p;
    int dims[3], periods[3], coords[3];
    dims[0] = dims[1] = dims[2] = periods[0] = periods[1] = periods[2] = 0;
    int i;
    int *b;
    int *my_b = (int *) malloc(SIZE * sizeof(int));

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &p);

    // Start the vector that is broadcast.
    if (rank == 0){
        b = (int *) malloc(SIZE * sizeof(int));
        for (i = 0; i < SIZE; i++) my_b[i] = b[i] = i;
    }

    // Create the mesh.
    MPI_Dims_create(p, 3, dims);
    MPI_Cart_create(MPI_COMM_WORLD, 3, dims, periods, 0, &comm_3d);

    // Load the coordinates.
    MPI_Cart_coords(comm_3d, rank, 3, coords);

    // The first column will start the broadcast along the rows.
    double start = MPI_Wtime();
    int dim_0_succ, dim_0_pred, dim_1_succ, dim_1_pred, dim_2_succ, dim_2_pred;
    dim_0_succ = dim_0_pred = dim_1_succ = dim_1_pred = dim_2_succ = dim_2_pred = 0;
    MPI_Cart_shift(comm_3d, 0, 1, &dim_0_pred, &dim_0_succ);
    MPI_Cart_shift(comm_3d, 1, 1, &dim_1_pred, &dim_1_succ);
    MPI_Cart_shift(comm_3d, 2, 1, &dim_2_pred, &dim_2_succ);

    if (coords[0] == 0 && coords[1] == 0 && coords[2]) {
        MPI_Send(b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
        MPI_Send(b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
        MPI_Send(b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    } else if (coords[1] == 0 && coords[2] == 0){
        MPI_Recv(my_b, SIZE, MPI_INT, dim_0_pred, 0, MPI_COMM_WORLD, &status);
        MPI_Send(my_b, SIZE, MPI_INT, dim_0_succ, 0, MPI_COMM_WORLD);
        MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
        MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    } else if  (coords[3] == 0){
        MPI_Recv(my_b, SIZE, MPI_INT, dim_1_pred, 0, MPI_COMM_WORLD, &status);
        MPI_Send(my_b, SIZE, MPI_INT, dim_1_succ, 0, MPI_COMM_WORLD);
        MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    } else {
        MPI_Recv(my_b, SIZE, MPI_INT, dim_2_pred, 0, MPI_COMM_WORLD, &status);
        MPI_Send(my_b, SIZE, MPI_INT, dim_2_succ, 0, MPI_COMM_WORLD);
    }

    double end = MPI_Wtime();
 
    if (rank == 0) printf("%d %f\n", SIZE, end - start);
     
    MPI_Finalize();
}
Example #10
0
void MatrixMatrixMultiply(double ***a, double ***b, double ***c, int mra, int
        mca, int mrb, int mcb, int *ra, int *ca, int *rb, int *cb, MPI_Comm
        comm)
{
    /*from the teaching book */
    int i, j;
    int num_procs, dims[2], periods[2];
    int myrank, my2drank, mycoords[2];
    int uprank, downrank, leftrank, rightrank, coords[2];
    int shiftsource, shiftdest;
    MPI_Status status; 
    MPI_Comm comm_2d;

    MPI_Comm_size(comm, &num_procs);
    MPI_Comm_rank(comm_2d, &my2drank);
    
    dims[0] = dims[1] = 0;
    MPI_Dims_create(num_procs, 2, dims);
    periods[0]= periods[1] = 1;

    MPI_Cart_create(comm, 2, dims, periods, 1, &comm_2d);
    MPI_Comm_rank(comm_2d, &my2drank);
    MPI_Cart_coords(comm_2d, my2drank, 2, mycoords);

    MPI_Cart_shift(comm_2d, 1, -1, &rightrank, &leftrank);
    MPI_Cart_shift(comm_2d, 0, -1, &downrank, &uprank);

    int ia = my2drank;
    int ib = my2drank;

    MPI_Cart_shift(comm_2d, 1, -mycoords[0], &shiftsource, &shiftdest);
    MPI_Sendrecv_replace((*a)[0], mra*mca, MPI_DOUBLE, shiftdest, 1,
            shiftsource, 1, comm_2d, &status);
    MPI_Sendrecv_replace(&ia, 1, MPI_INT, shiftdest, 1, shiftsource, 1,
            comm_2d, &status);

    MPI_Cart_shift(comm_2d, 0, -mycoords[1], &shiftsource, &shiftdest);
    MPI_Sendrecv_replace((*b)[0], mrb*mcb, MPI_DOUBLE, shiftdest, 1,
            shiftsource, 1, comm_2d, &status);  
    MPI_Sendrecv_replace(&ib, 1, MPI_INT, shiftdest, 1, shiftsource, 1,
            comm_2d, &status);

    for (i=0; i<dims[0]; i++){
        MatrixMultiply(ra[ia], ca[ia], rb[ib], cb[ib], *a, *b, c); /* c=c + a*b */

        MPI_Sendrecv_replace((*a)[0], mra*mca, MPI_DOUBLE, leftrank, 1,
                rightrank, 1, comm_2d, &status);
        MPI_Sendrecv_replace((*b)[0], mrb*mcb, MPI_DOUBLE, uprank, 1, downrank,
                1, comm_2d, &status);

        MPI_Sendrecv_replace(&ia, 1, MPI_INT, leftrank, 1, rightrank, 1,
                comm_2d, &status);
        MPI_Sendrecv_replace(&ib, 1, MPI_INT, uprank, 1, downrank, 1,
                comm_2d, &status);
    }

    MPI_Comm_free(&comm_2d);    
}
 int main (int argc, char** argv)
 {
    int num_tasks;

    char hostname[80];

    int dims[DIM];
    dims[0] = DIM_0;
    dims[1] = DIM_1;
    dims[2] = DIM_2;

    int periods[DIM] = {false, false, false};
    int reorder = true;
    int my_rank;

    int coords[DIM];

    MPI_Comm cartcomm, y_comm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);

    if (num_tasks != SIZE) {
        if (my_rank == 0) {
            printf("We need %d proccesses, %d given. Exiting.\n", SIZE, num_tasks);
        }
        
        MPI_Finalize();

		return 0;
        
    }         
    
    gethostname(hostname, 79);
	MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder, &cartcomm);
	MPI_Cart_coords(cartcomm, my_rank, DIM, coords);
	printf("%-15.12s: MPI_COMM_WORLD rank %2d: (%d, %d, %d)\n", hostname, my_rank, coords[0], coords[1], coords[2]);
	
	//neighbors
	int src, dest;
	for (int i = 0; i < 3; i++) {
		MPI_Cart_shift(cartcomm, i, +1, src, dest);
		printf("i am %d and my right neighbor in %d is %d", dest, i, src);
		MPI_Cart_shift(cartcomm, i, -1, src, dest);	
		printf("i am %d and my left neighbor in %d is %d", dest, i, src);
	}
	
	
	int keep_dims[1];
	keep_dims[0] = 1;
	MPI_Cart_sub(cartcomm, keep_dims, &y_comm);
	printf("%d: my y rank is %d", my_rank, coords[1]);

    MPI_Finalize();

    return 0;
 }
Example #12
0
/*
 * int direction (0 horizontal, 1 vertical)
 * int distance  
 */
void send_to(MPI_Comm *comm, int direction, float *A, int size, int row, int column, int n) {
  int prev_rank, next_rank;
  int distance = 1;
  MPI_Cart_shift(*comm, direction, distance, &prev_rank, &next_rank);
  while(next_rank >= 0) {
    MPI_Send(A + row * n + column, size, MPI_FLOAT, next_rank, 0, *comm);
    MPI_Cart_shift(*comm, direction, ++distance, &prev_rank, &next_rank);
  }
} 
Example #13
0
int main(int argc, char *argv[])
{
	int *matrix_a;
	int *matrix_b;
	int *matrix_c;

	const char *matrix_a_filename = argv[1];
	const char *matrix_b_filename = argv[2];
	const char *matrix_c_filename = argv[3];

	MPI_Comm matrix_comm;

	MPI_Init(&argc, &argv);

	create_matrix_comm(MPI_COMM_WORLD, &matrix_comm);
	MPI_Comm_size(matrix_comm, &size);
	MPI_Comm_rank(matrix_comm, &rank);

	compute_matrixes_variables(matrix_a_filename, matrix_comm);

	alloc_submatrix_buffer(&matrix_a);
	alloc_submatrix_buffer(&matrix_b);
	alloc_submatrix_buffer(&matrix_c);

	distribute_matrix(matrix_a_filename, matrix_a, matrix_comm);
	distribute_matrix(matrix_b_filename, matrix_b, matrix_comm);

	/* The actual cannon algorithms */
	int row_source, row_dst;
	int col_source, col_dst;
	MPI_Cart_shift(matrix_comm, 0, -1, &row_source, &row_dst);
	MPI_Cart_shift(matrix_comm, 1, -1, &col_source, &col_dst);
	int i;
	for (i = 0; i < pp_dims; i++) {
		compute_matrix_mul(matrix_a, matrix_b, matrix_c, N);
		MPI_Sendrecv_replace(matrix_a, sub_n * sub_n, MPI_INT,
				     row_source, MPI_ANY_TAG, row_dst, MPI_ANY_TAG,
				     matrix_comm, MPI_STATUS_IGNORE);

		MPI_Sendrecv_replace(matrix_b, sub_n * sub_n, MPI_INT,
				     col_source, MPI_ANY_TAG, col_dst, MPI_ANY_TAG,
				     matrix_comm, MPI_STATUS_IGNORE);
	}


	write_result(matrix_c_filename, matrix_c, matrix_comm);

	free(matrix_a);
	free(matrix_b);
	free(matrix_c);

	MPI_Comm_free(&matrix_comm);

	MPI_Finalize();
	return 0;
}
Example #14
0
/******************************************************
 * Function to setup MPI data.
 *
 * (1) Initializes MPI
 * (2) Creates a cartesian communicator for border exchange
 * (3) Distributes the overall grid to the processes
 * (4) Sets up helpful data-type and MPI buffer
 *
 ******************************************************/
void heatMPISetup (int* pargc, char*** pargv, heatGrid *grid, dataMPI* configMPI)
{
    int size,
        dims[2] = {0,0},
        periods[2] = {1,1},
        coords[2];
    int buf_size;
    char *buf;

    /* ==== (1) ==== */
    /* Base init*/
    MPI_Init (pargc, pargv);
    MPI_Comm_rank (MPI_COMM_WORLD, &configMPI->rank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);

    /* ==== (2) ==== */
    /* Create cartesian communicator*/
    MPI_Dims_create (size, 2, dims);
    MPI_Cart_create (MPI_COMM_WORLD, 2, dims, periods, 0, &configMPI->cart);

    /* Store neighbors in the grid */
    MPI_Cart_shift (configMPI->cart, 0, 1, &configMPI->left, &configMPI->right);
    MPI_Cart_shift (configMPI->cart, 1, 1, &configMPI->up,    &configMPI->down);

    /* ==== (3) ==== */
    /* Distribute overall grid to processes */
    MPI_Cart_coords (configMPI->cart, configMPI->rank, 2, coords); /*My coordinate*/

    configMPI->start_x = 1 + (grid->xsize/dims[0])*coords[0];
    if (coords[0]+1 != dims[0])
        /* coords 0 to N-1 get an equal distribution*/
        configMPI->num_cells_x = grid->xsize / (dims[0]);
    else
        /* last coord gets the rest */
        configMPI->num_cells_x = grid->xsize - configMPI->start_x + 1;

    configMPI->start_y = 1 + (grid->ysize/dims[1])*coords[1];
    if (coords[1]+1 != dims[1])
            /* coords 0 to N-1 get an equal distribution*/
            configMPI->num_cells_y = grid->ysize / (dims[1]);
        else
            /* last coord gets the rest */
            configMPI->num_cells_y = grid->ysize - configMPI->start_y + 1;

    /* ==== (4) ==== */
    /* Create datatype to communicate one column */
    MPI_Type_vector (
            configMPI->num_cells_y, /* #blocks */
            1, /* #elements per block */
            grid->xsize+2, /* #stride */
            MPI_DOUBLE, /* old type */
            &configMPI->columntype /* new type */ );
    MPI_Type_commit (&configMPI->columntype);
}
Example #15
0
int main_replaced(int argc, char** argv){

    //Initialization
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    //Reading image
    if(rank == 0){
        image = read_bmp("Lenna_blur.bmp");
    }

    //Creating cartesian communicator
    MPI_Dims_create(size, 2, dims);
    MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &cart_comm );
    MPI_Cart_coords( cart_comm, rank, 2, coords );

    MPI_Cart_shift( cart_comm, 0, 1, &north, &south );
    MPI_Cart_shift( cart_comm, 1, 1, &west, &east );

    local_image_size[0] = image_size[0]/dims[0];
    local_image_size[1] = image_size[1]/dims[1];

    //Allocating buffers
    int lsize = local_image_size[0]*local_image_size[1];
    int lsize_border = (local_image_size[0] + 2*BORDER)*(local_image_size[1] + 2*BORDER);
    local_image_orig = (unsigned char*)malloc(sizeof(unsigned char)*lsize);
    local_image[0] = (unsigned char*)calloc(lsize_border, sizeof(unsigned char));
    local_image[1] = (unsigned char*)calloc(lsize_border, sizeof(unsigned char));

    create_types();

    distribute_image();

    initialilze_guess();

    //Main loop
    for(int i = 0; i < ITERATIONS; i++){
        exchange_borders(i);
        perform_convolution(i);
    }

    gather_image();

    MPI_Finalize();

    //Write image
    if(rank==0){
        write_bmp(image, image_size[0], image_size[1]);
    }

    exit(0);
}
Example #16
0
int main( int argc, char** argv ) {

    int numtasks = 0; 
 
    MPI_( MPI_Init( &argc, &argv ) );
    MPI_( MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ) );
    MPI_( MPI_Comm_size( MPI_COMM_WORLD, &numtasks ) );
    
    const int DIM = int( std::sqrt( double( numtasks ) ) );
    std::vector< int > dims( 2, DIM );
    std::vector< int > periods( 2, 0 ); //periodic - false -> non-periodic
    const int reorder = 0; //false - no reorder
    MPI_Comm cartcomm;
    MPI_( MPI_Cart_create( MPI_COMM_WORLD, 2, &dims[ 0 ], &periods[ 0 ], reorder, &cartcomm ) ); 
    int task = -1;
    MPI_( MPI_Comm_rank( cartcomm, &task ) );
    std::vector< int > coords( 2, -1 );
    MPI_( MPI_Cart_coords( cartcomm, task, 2, &coords[ 0 ] ) );
     
    std::vector< int > neighbors( 4, -1 );
    enum { UP = 0, DOWN, LEFT, RIGHT };
    // compute the shifted source and destination ranks, given a shift direction and amount
    //MPI_Cart_shift is uses to find two "nearby" neighbors of the calling process
    //along a specified direction of an N-dimensional grid
    //The direction and offset are specified as a signed integer
    //If the sign of the displacement is positive the "source" rank is lower
    //than the destination rank; if it's negative the opposite is true 
    MPI_( MPI_Cart_shift( cartcomm, 0, 1, &neighbors[ UP ],   &neighbors[ DOWN ] ) );
    MPI_( MPI_Cart_shift( cartcomm, 1, 1, &neighbors[ LEFT ], &neighbors[ RIGHT ] ) );
    int sendbuf = task;
    const int tag = 0x01;
    std::vector< int > recvbuf( 4, MPI_PROC_NULL ); 
    std::vector< MPI_Request > reqs( 2 * 4 );
    for( int i = 0; i != 4; ++i ) {
        int dest = neighbors[ i ];
        int src  = neighbors[ i ];
        MPI_( MPI_Isend( &sendbuf, 1, MPI_INT, dest, tag, MPI_COMM_WORLD, &reqs[ i ] ) );
        MPI_( MPI_Irecv( &recvbuf[ i ], 1, MPI_INT, src, tag, MPI_COMM_WORLD, &reqs[ i + 4 ] ) );
    }
    std::vector< MPI_Status  > status( 2 * 4 ); 
    MPI_( MPI_Waitall( 8, &reqs[ 0 ], &status[ 0 ] ) );

    std::ostringstream os;
    os << "rank= " << task << " coords= " << coords[ 0 ] << ',' << coords[ 1 ]
       << " neighbors= " << neighbors[ UP ] << ',' << neighbors[ DOWN ] << ','
       << neighbors[ LEFT ] << ',' << neighbors[ RIGHT ] << '\n';
    std::cout << os.str(); os.flush();
 
    MPI_( MPI_Finalize() );
    
    return 0;
}
Example #17
0
File: cart.c Project: kura-pl/priry
int main(int argc,char **argv){
	int wymiar=2,sasiedzi[4],koordynaty[2],rank;
	int dims[2]={2,3};
	int okresowosc[2]={1,0};
	MPI_Init(&argc,&argv);
	MPI_Comm comm_cart;
	MPI_Cart_create(MPI_COMM_WORLD,wymiar,dims,okresowosc,0,&comm_cart);
	MPI_Comm_rank(comm_cart,&rank);
	MPI_Cart_coords(comm_cart,rank,2,koordynaty);
	MPI_Cart_shift(comm_cart,0,1,&sasiedzi[0],&sasiedzi[1]);
	MPI_Cart_shift(comm_cart,1,1,&sasiedzi[2],&sasiedzi[3]);
	printf("rank = %d wspolrzedne to %d %d sasiedzi= %d %d %d %d \n",rank,koordynaty[0],koordynaty[1],sasiedzi[0],sasiedzi[1],sasiedzi[2],sasiedzi[3]);
	MPI_Finalize();	
	return 0;
}
/*-----------------------------------------------------------*/
int findNeighbours(){
	int dims[1];
	int periods[1];
	int reorder;

	/* find a good process distribution */
	dims[0] = 0; /* zero so that dims_create tries to rearrange */
	MPI_Dims_create(numMPIprocs, 1, dims);

	/* set periods equal to TURE for periodic boundary conditions ... */
	periods[0] = TRUE;
	/* ...and reorder = FALSE */
	reorder = FALSE;

	/* Create the cartesian topology */
	MPI_Cart_create(comm, 1, dims, periods, reorder, &commCart);

	/* Find the ranks of the left and right neighbour */
	MPI_Cart_shift(commCart, 0, 1, &leftNeighbour, &rightNeighbour);

    MPI_Isend(&myMPIRank, 1, MPI_INT, leftNeighbour, TAG, commCart, &requestArray[0]);

    MPI_Isend(&myMPIRank, 1, MPI_INT, rightNeighbour, TAG, commCart, &requestArray[1]);

    MPI_Irecv(&myGASPIrightNeighbour, 1, MPI_INT, leftNeighbour, TAG, commCart, &requestArray[2]);

    MPI_Irecv(&myGASPIleftNeighbour, 1, MPI_INT, rightNeighbour, TAG, commCart, &requestArray[3]);

    MPI_Waitall(4, requestArray, statusArray);

	return 0;
}
Example #19
0
void SubDomain::init(int mpi_rank, int mpi_size, Discretization& discretization)
{
    // determine the number of subdomains in the x and y dimensions
    int dims[2] = { 0, 0 };
    MPI_Dims_create(mpi_size, 2, dims);
    ndomy = dims[0];
    ndomx = dims[1];

    // create a 2D non-periodic cartesian topology
    int periods[2] = { 0, 0 };
    MPI_Comm comm_cart;
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &comm_cart);
    domain.comm_cart = comm_cart;

    // retrieve coordinates of the rank in the topology
    int coords[2];
    MPI_Cart_coords(comm_cart, mpi_rank, 2, coords);
    domy = coords[0]+1;
    domx = coords[1]+1;

    // set neighbours for all directions
    MPI_Cart_shift(comm_cart, 0, 1, &neighbour_south, &neighbour_north);
    MPI_Cart_shift(comm_cart, 1, 1, &neighbour_west, &neighbour_east);

    // get bounding box
    nx = discretization.nx / ndomx;
    ny = discretization.ny / ndomy;
    startx = (domx-1)*nx+1;
    starty = (domy-1)*ny+1;

    // adjust for grid dimensions that do not divided evenly between the
    // sub-domains
    if( domx == ndomx )
        nx = discretization.nx - startx + 1;
    if( domy == ndomy )
        ny = discretization.ny - starty + 1;

    endx = startx + nx -1;
    endy = starty + ny -1;

    // get total number of grid points in this sub-domain
    N = nx*ny;

    rank = mpi_rank;
    size = mpi_size;
}
Example #20
0
int main(int argc,char **argv)
{
    int rank,neighbors[4],coords[2];
    int periods[2]= {1, 0};
    int ndims=2;
    int dims[2]={1,0};
    MPI_Comm comm_cart;
    MPI_Init(&argc,&argv);
    MPI_Cart_create(MPI_COMM_WORLD,ndims,dims,periods,0,&comm_cart);
    MPI_Comm_rank(comm_cart,&rank);
    MPI_Cart_coords(comm_cart,rank,2,coords);
    MPI_Cart_shift(comm_cart,0,1,&neighbors[0],&neighbors[1]);
    MPI_Cart_shift(comm_cart,1,1,&neighbors[2],&neighbors[3]);
    printf("rank= %d wspolrzedne = %d %d sasiedzi= %d %d %d %d\n",rank,coords[0],coords[1],neighbors[0],neighbors[1],neighbors[2],neighbors[3]);
    MPI_Finalize();
    return 0;
}
Example #21
0
void collectMatrix(Matrix u)
{
#ifdef HAVE_MPI
  int source, dest;
  // south
  MPI_Cart_shift(*u->as_vec->comm, 1, -1, &source, &dest);
  MPI_Sendrecv(u->data[1]+1, u->rows-2, MPI_DOUBLE, dest, 0,
               u->data[u->cols-1]+1, u->rows-2, MPI_DOUBLE, source, 0,
               *u->as_vec->comm, MPI_STATUS_IGNORE);

  // north
  MPI_Cart_shift(*u->as_vec->comm, 1, 1, &source, &dest);
  MPI_Sendrecv(u->data[u->cols-2]+1, u->rows-2, MPI_DOUBLE, dest, 1,
               u->data[0]+1, u->rows-2, MPI_DOUBLE, source, 1,
               *u->as_vec->comm, MPI_STATUS_IGNORE);

  Vector sendBuf = createVector(u->cols-2);
  Vector recvBuf = createVector(u->cols-2);

  // west
  MPI_Cart_shift(*u->as_vec->comm, 0, -1, &source, &dest);
  if (dest != MPI_PROC_NULL)
    copyVectorDispl(sendBuf, u->row[1], u->cols-2, 1);
  MPI_Sendrecv(sendBuf->data, sendBuf->len, MPI_DOUBLE, dest, 2,
               recvBuf->data, recvBuf->len, MPI_DOUBLE, source, 2,
               *u->as_vec->comm, MPI_STATUS_IGNORE);
  if (source != MPI_PROC_NULL)
    dcopy(&recvBuf->len, recvBuf->data, &recvBuf->stride,
          u->row[u->rows-1]->data+u->rows, &u->rows);

  // east
  MPI_Cart_shift(*u->as_vec->comm, 0, 1, &source, &dest);
  if (dest != MPI_PROC_NULL)
    copyVectorDispl(sendBuf, u->row[u->rows-2], u->cols-2, 1);
  MPI_Sendrecv(sendBuf->data, sendBuf->len, MPI_DOUBLE, dest, 2,
               recvBuf->data, recvBuf->len, MPI_DOUBLE, source, 2,
               *u->as_vec->comm, MPI_STATUS_IGNORE);
  if (source != MPI_PROC_NULL)
    dcopy(&recvBuf->len, recvBuf->data, &recvBuf->stride,
          u->row[0]->data+u->rows, &u->rows);

  freeVector(sendBuf);
  freeVector(recvBuf);
#endif
}
Example #22
0
void receive_from_left(MPI_Comm *comm, int direction, float *A, int size, int row, int column, int n, int k) {
  int prev_rank, next_rank, coords[2], startingRow, startingColumn;
  int distance = 1;
  MPI_Status status;

  MPI_Cart_shift(*comm, direction, distance, &prev_rank, &next_rank);
  while(prev_rank >= 0) {
    MPI_Cart_coords(*comm, prev_rank, 2, coords);
    
    startingRow     = coords[1] * n; 
    startingColumn  = coords[0] * n;
    
    if( k >= startingColumn && k < startingColumn + n ) {
      MPI_Recv(A, size, MPI_FLOAT, prev_rank, 0, *comm, &status);
    }
    MPI_Cart_shift(*comm, 0, ++distance, &prev_rank, &next_rank);
  }
}
void Setup_Proc_Grid(int argc, char **argv)
{
  int wrap_around[2];
  int reorder;

  Debug("My_MPI_Init", 0);

  /* Retrieve the number of processes P */
  MPI_Comm_size(MPI_COMM_WORLD, &P);

  /* Calculate the number of processes per column and per row for the grid */
  if (argc > 2)
  {
    P_grid[X_DIR] = atoi(argv[1]);
    P_grid[Y_DIR] = atoi(argv[2]);
    if (P_grid[X_DIR] * P_grid[Y_DIR] != P)
      Debug("ERROR : Process grid dimensions do not match with P", 1);
  }
  else
    Debug("ERROR : Wrong parameter input", 1);

  /* Create process topology (2D grid) */
  wrap_around[X_DIR] = 0;
  wrap_around[Y_DIR] = 0;       /* do not connect first and last process */
  reorder = 1;                  /* reorder process ranks */
  MPI_Cart_create(MPI_COMM_WORLD, 2, P_grid, wrap_around, reorder, &grid_comm);

  /* Retrieve new rank and carthesian coordinates of this process */
  MPI_Comm_rank(grid_comm, &proc_rank); /* Rank of process in new communicator */
  MPI_Cart_coords(grid_comm, proc_rank, 2, proc_coord); /* Coordinates of process in new communicator */

  if (proc_rank == 0)
    printf("pt: Topology: %d %d %d\n", P, P_grid[X_DIR], P_grid[Y_DIR]);

  if (DEBUG)
    printf("(%i) (x,y)=(%i,%i)\n", proc_rank, proc_coord[X_DIR], proc_coord[Y_DIR]);

  /* calculate ranks of neighbouring processes */
  MPI_Cart_shift(grid_comm, Y_DIR, 1, &proc_top, &proc_bottom); /* rank of processes proc_top and proc_bottom */
  MPI_Cart_shift(grid_comm, X_DIR, 1, &proc_left, &proc_right); /* rank of processes proc_left and proc_right */

  if (DEBUG)
    printf("(%i) top %i, right %i, bottom %i, left %i\n", proc_rank, proc_top, proc_right, proc_bottom, proc_left);
}
Example #24
0
FORTRAN_API void FORT_CALL mpi_cart_shift_( MPI_Fint *comm, MPI_Fint *direction, MPI_Fint *shift, MPI_Fint *source, MPI_Fint *dest, MPI_Fint *ierr )
{
    int lsource;
    int ldest;

    *ierr =     MPI_Cart_shift( MPI_Comm_f2c(*comm), (int)*direction, 
                                (int)*shift, &lsource, &ldest );
    *source = (MPI_Fint)lsource;
    *dest = (MPI_Fint)ldest;
}
int main (int argc, char *argv[])
{
    int my_rank, size;
    int snd_buf, rcv_buf;
    int right, left;
    int sum, i;

    MPI_Comm    new_comm;
    int  dims[max_dims], periods[max_dims], reorder;
    /*int         my_coords[max_dims]; */

    MPI_Status  status;
    MPI_Request request;

    MPI_Init(&argc, &argv);

    /* Get process info. */
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    /* Set cartesian topology. */
    dims[0] = size;
    periods[0] = 1;
    reorder = 1;

    MPI_Cart_create(MPI_COMM_WORLD, max_dims, dims, periods, reorder,&new_comm);

    /* Get coords */
    MPI_Comm_rank(new_comm, &my_rank);
    /* MPI_Cart_coords(new_comm, my_rank, max_dims, my_coords); */

    /* Get nearest neighbour rank. */
    MPI_Cart_shift(new_comm, 0, 1, &left, &right);


    /* Compute global sum. */
    sum = 0;
    snd_buf = my_rank;

    for( i = 0; i < size; i++) {
        MPI_Isend(&snd_buf, 1, MPI_INT, right, to_right, new_comm, &request);

        MPI_Recv(&rcv_buf, 1, MPI_INT, left, to_right, new_comm, &status);

        MPI_Wait(&request, &status);

        snd_buf = rcv_buf;
        sum += rcv_buf;
    }

    printf ("Rank %i:\tSum = %i\n", my_rank, sum);
    /* printf ("Rank %i, Coords = %i: Sum = %i\n",
                my_rank, my_coords[0], sum); */

    MPI_Finalize();
}
Example #26
0
void mpi_manager_1D::determin_OtherRanks() {

	// Find neighbouring ranks:
	MPI_Cart_shift(comm1d, 0, 1, &left , &right);

	// Determine ranks of neighbour processes:
	int shiftcoord;

	Neighbours.resize(Index::set(-ntasks), Index::set(ntasks));
	Neighbours.clear();

	for(int pos=-ntasks; pos<=ntasks; ++pos){
		shiftcoord = (coords + pos);
		if(shiftcoord < 0) shiftcoord += ntasks;

		if(shiftcoord>=0 && shiftcoord<ntasks) {
			// Now determine rank at relative shifted position
			// std::cout << " Cart ";
			// std::cout << shiftcoord[0] << " ";
			// std::cout << shiftcoord[1] << " ";
			// std::cout << rank << " ";
			// std::cout << nproc[0] << " ";
			// std::cout << nproc[1] << " ";
			// std::cout << std::endl;
			MPI_Cart_rank(comm1d, &shiftcoord, &Neighbours(pos));
		} else {
			// If outside domain set to error value
			Neighbours(pos) = MPI_PROC_NULL;
		}

	
	}


	NeighboursCyclic.resize(Index::set(-ntasks), Index::set(ntasks));
	NeighboursCyclic.clear();

	for(int pos=-ntasks; pos<=ntasks; ++pos){
		shiftcoord = (coords + pos)%ntasks;
		if(shiftcoord < 0) shiftcoord += ntasks;

		// Now determine rank at relative shifted position
		MPI_Cart_rank(comm1d, &shiftcoord, &NeighboursCyclic(pos));
			
	}
	
	// Now determine absolute position of ranks
	AllRanks.resize(ntasks);

	for(int coord=0; coord<ntasks; ++coord) {
		MPI_Cart_rank(comm1d, &coord, &AllRanks(coord));
	}

}
Example #27
0
/** Within CART_COMM, processes find about their new rank numbers, their cartesian coordinates,
  and their neighbors  */
inline void VCtopology3D::setup_vctopology(MPI_Comm old_comm) {
  // create a matrix with ranks, and neighbours for fields
  MPI_Cart_create(old_comm, 3, divisions, periods, reorder, &CART_COMM);
  // create a matrix with ranks, and neighbours for Particles
  MPI_Cart_create(old_comm, 3, divisions, periods_P, reorder, &CART_COMM_P);
  // field Communicator
  if (CART_COMM != MPI_COMM_NULL) {
    MPI_Comm_rank(CART_COMM, &cartesian_rank);
    MPI_Comm_size(CART_COMM, &nproc);
    MPI_Cart_coords(CART_COMM, cartesian_rank, 3, coordinates);

    MPI_Cart_shift(CART_COMM, XDIR, RIGHT, &xleft_neighbor, &xright_neighbor);
    MPI_Cart_shift(CART_COMM, YDIR, RIGHT, &yleft_neighbor, &yright_neighbor);
    MPI_Cart_shift(CART_COMM, ZDIR, RIGHT, &zleft_neighbor, &zright_neighbor);
  }
  else {
    // EXCEPTION
    cout << "A process is trown away from the new topology for fields. VCtopology3D.h" << endl;
  }
  // Particles Communicator
  if (CART_COMM_P != MPI_COMM_NULL) {
    MPI_Comm_rank(CART_COMM_P, &cartesian_rank);
    MPI_Comm_size(CART_COMM, &nproc);
    MPI_Cart_coords(CART_COMM_P, cartesian_rank, 3, coordinates);

    MPI_Cart_shift(CART_COMM_P, XDIR, RIGHT, &xleft_neighbor_P, &xright_neighbor_P);
    MPI_Cart_shift(CART_COMM_P, YDIR, RIGHT, &yleft_neighbor_P, &yright_neighbor_P);
    MPI_Cart_shift(CART_COMM_P, ZDIR, RIGHT, &zleft_neighbor_P, &zright_neighbor_P);
  }
  else {
    // EXCEPTION
    cout << "A process is trown away from the new topology for Particles. VCtopology3D.h" << endl;
  }

}
Example #28
0
/**
 * @brief Generates the 2D topology and establishes the neighbor relationships between MPI processes
 *
 * @param[in, out]      rank		The rank of the calling MPI process
 * @param[in]  		size		The total number of MPI processes available
 * @param[in]  		topSize		The desired topology size (this must match the number of available MPI processes)
 * @param[out] 		neighbors	The list that will be populated with the direct neighbors of the calling MPI process
 * @param[out] 		topIndex	The 2D index that the calling MPI process will have in the topology
 * @param[out]		cartComm	The carthesian MPI communicator
 */
int ApplyTopology(int * rank, int size, const int2 * topSize, int * neighbors, int2 * topIndex, MPI_Comm * cartComm)
{
	int topologySize = topSize->x * topSize->y;
	int dimSize[2] = {topSize->x, topSize->y};
	int usePeriods[2] = {0, 0}, newCoords[2];
	int oldRank = * rank;
	
	// The number of MPI processes must fill the topology
	if (size != topologySize)
	{
		OneErrPrintf(* rank == MPI_MASTER_RANK, "Error: The number of MPI processes (%d) doesn't match "
				"the topology size (%d).\n", size, topologySize);
		
		return STATUS_ERR;
	}

	// Create a carthesian communicator
	MPI_Cart_create(MPI_COMM_WORLD, 2, dimSize, usePeriods, 1, cartComm);

	// Update the rank to be relevant to the new communicator
	MPI_Comm_rank(* cartComm, rank);

	if ((* rank) != oldRank)
	{
		printf("Rank change: from %d to %d\n", oldRank, * rank);
	}

	// Obtain the 2D coordinates in the new communicator
	MPI_Cart_coords(* cartComm, * rank, 2, newCoords);
	* topIndex = make_int2(newCoords[0], newCoords[1]);

	// Obtain the direct neighbor ranks
	MPI_Cart_shift(* cartComm, 0, 1, neighbors + DIR_LEFT, neighbors + DIR_RIGHT);
	MPI_Cart_shift(* cartComm, 1, 1, neighbors + DIR_TOP, neighbors + DIR_BOTTOM);

	// Setting the device here will have effect only for the normal CUDA & MPI version
	SetDeviceAfterInit(* rank);

	return STATUS_OK;
}
Example #29
0
void make_neighbours_table(int * neighbours, MPI_Comm comm_cart) {
  int displ = 1;
  int index = 1;
  int coords[2];
  int myrank;

  MPI_Comm_rank(comm_cart, &myrank);

  MPI_Cart_shift(comm_cart, index, displ, &neighbours[LEFT], &neighbours[RIGHT]);
  index = 0;
  MPI_Cart_shift(comm_cart, index, displ, &neighbours[UP], &neighbours[DOWN]);  
  MPI_Cart_coords(comm_cart, myrank, 2, coords);
  coords[0]--;
  coords[1]--;
  MPI_Cart_rank(comm_cart, coords, &neighbours[UPLEFT]);
  coords[0]+=2;
  MPI_Cart_rank(comm_cart, coords, &neighbours[UPRIGHT]);
  coords[1]+=2;
  MPI_Cart_rank(comm_cart, coords, &neighbours[DOWNRIGHT]);
  coords[0]-=2;
  MPI_Cart_rank(comm_cart, coords, &neighbours[DOWNLEFT]);
}
int NBC_Comm_neighbors(MPI_Comm comm, int maxindegree, int sources[], int sourceweights[], int maxoutdegree, int destinations[], int destweights[]) {
  int topo, res;
  int index = 0;

  int indeg, outdeg, wgtd;
  res = NBC_Comm_neighbors_count(comm, &indeg, &outdeg, &wgtd);
  if(indeg > maxindegree && outdeg > maxoutdegree) return NBC_INVALID_PARAM; /* we want to return *all* neighbors */

  res = MPI_Topo_test(comm, &topo);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Topo_test() (%i)\n", res); return res; }

  switch(topo) {
    case MPI_CART: /* cartesian */
      {
        int ndims, i, rpeer, speer;
        res = MPI_Cartdim_get(comm, &ndims);
        if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Cartdim_get() (%i)\n", res); return res; }

        for(i = 0; i<ndims; i++) {
          res = MPI_Cart_shift(comm, i, 1, &rpeer, &speer);
          if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Cart_shift() (%i)\n", res); return res; }
          sources[index] = destinations[index] = rpeer; index++;
          sources[index] = destinations[index] = speer; index++;
        }
      }
      break;
    case MPI_GRAPH: /* graph */
      {
        int rank;
        MPI_Comm_rank(comm, &rank);
        res = MPI_Graph_neighbors(comm, rank, maxindegree, sources);
        if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Graph_neighbors_count() (%i)\n", res); return res; }
        for(int i=0; i<maxindegree; i++) destinations[i] = sources[i];
      }
      break;
    case MPI_DIST_GRAPH: /* dist graph */
      {
        res = MPI_Dist_graph_neighbors(comm, maxindegree, sources, sourceweights, maxoutdegree, destinations, destweights);
        if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Graph_neighbors_count() (%i)\n", res); return res; }
      }
      break;
    case MPI_UNDEFINED:
      return NBC_INVALID_TOPOLOGY_COMM;
      break;
    default:
      return NBC_INVALID_PARAM;
      break;
  }

  return NBC_OK;
}