Example #1
1
int main(int argc, char* argv[])
{
    int r=-1;
    double a=37.0;
    MPI_Datatype t, u, v, w;
    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &r);

    MPI_Type_vector( 0, 0, 0, MPI_DOUBLE, &t);
    MPI_Type_vector( 1, 0, 0, MPI_DOUBLE, &u);
    MPI_Type_vector( 0, 1, 0, MPI_DOUBLE, &v);
    MPI_Type_vector( 1, 1, 0, MPI_DOUBLE, &w);

    MPI_Type_commit(&t);
    MPI_Type_commit(&u);
    MPI_Type_commit(&v);
    MPI_Type_commit(&w);

    MPI_Bcast( &a, 1, t, 0, MPI_COMM_WORLD );
    if (r==0) printf("MPI_Bcast of 0x0 datatype succeeded.\n");
    MPI_Bcast( &a, 1, u, 0, MPI_COMM_WORLD );
    if (r==0) printf("MPI_Bcast of 1x0 datatype succeeded.\n");
    MPI_Bcast( &a, 1, v, 0, MPI_COMM_WORLD );
    if (r==0) printf("MPI_Bcast of 0x1 datatype succeeded.\n");
    MPI_Bcast( &a, 1, w, 0, MPI_COMM_WORLD );
    if (r==0) printf("MPI_Bcast of 1x1 datatype succeeded.\n");

    MPI_Type_free(&t);
    MPI_Type_free(&u);
    MPI_Type_free(&v);
    MPI_Type_free(&w);

    MPI_Finalize();
    return 0;
}
Example #2
0
int makeHDF5type1(MPI_Datatype *type)
{
    MPI_Datatype ctg, vect, structype, vec2, structype2,
                 vec3, structype3, vec4, structype4, vec5;

    int b[3];
    MPI_Aint d[3];
    MPI_Datatype t[3];

    MPI_Type_contiguous(4, MPI_BYTE, &ctg);

    MPI_Type_vector(1, 5, 1, ctg, &vect);

    b[0] =         b[1] =       b[2] = 1;
    d[0] = 0;      d[1] = 20;    d[2] = 40;
    t[0] = MPI_LB; t[1] = vect; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype);

    MPI_Type_vector(1, 5, 1, structype, &vec2);

    b[0] =         b[1] =        b[2] = 1;
    d[0] = 0;      d[1] = 0;     d[2] = 400;
    t[0] = MPI_LB; t[1] = vec2; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype2);

    MPI_Type_vector(1, 5, 1, structype2, &vec3);

    b[0] =         b[1] =        b[2] = 1;
    d[0] = 0;      d[1] = 0;     d[2] = 4000;
    t[0] = MPI_LB; t[1] = vec3; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype3);

    MPI_Type_vector(1, 5, 1, structype3, &vec4);

    b[0] =         b[1] =        b[2] = 1;
    d[0] = 0;      d[1] = 0;     d[2] = 40000;
    t[0] = MPI_LB; t[1] = vec4; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype4);

    MPI_Type_vector(1, 1, 1, structype4, &vec5);

    b[0] =         b[1] =         b[2] = 1;
    d[0] = 0;      d[1] = 160000; d[2] = 200000;
    t[0] = MPI_LB; t[1] = vec5; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, type);

    MPI_Type_free(&ctg);
    MPI_Type_free(&vect);
    MPI_Type_free(&structype);
    MPI_Type_free(&vec2);
    MPI_Type_free(&structype2);
    MPI_Type_free(&vec3);
    MPI_Type_free(&structype3);
    MPI_Type_free(&vec4);
    MPI_Type_free(&structype4);
    MPI_Type_free(&vec5);
    MPI_Type_commit(type);

    return 0;
}
Example #3
0
void Communicator::init(int argc, char *argv[])
{
    MPI_Init(&argc, &argv);
    MPI_Type_vector(1, 2, 2, MPI_DOUBLE, &MPI_VECTOR2D_);
    MPI_Type_vector(1, 4, 4, MPI_DOUBLE, &MPI_TENSOR2D_);
    MPI_Type_commit(&MPI_VECTOR2D_);
    MPI_Type_commit(&MPI_TENSOR2D_);
}
Example #4
0
/******************************************************
 * Gathers all data on process 0
 *
 * For output and total energy calculation it is
 * necessary to receive all sub-grids on process 0.
 *
 * It is a simple, but non-optimal implementation.
 ******************************************************/
void heatMPIGather (heatGrid *grid, dataMPI* mympi)
{
    int block_size[4]; /*stores: x_start,y_start, num_cells_x, num_cells_y*/
    MPI_Datatype blocktype;
    MPI_Status status;
    int i, size;

    /*Slaves send data*/
    if (mympi->rank != 0)
    {
        /*Prepare block info to be sent*/
        block_size[0] = mympi->start_x;
        block_size[1] = mympi->start_y;
        block_size[2] = mympi->num_cells_x;
        block_size[3] = mympi->num_cells_y;

        /* Create datatype to communicate one block*/
        MPI_Type_vector (
                mympi->num_cells_y-1, /* #blocks */
                mympi->num_cells_x, /* #elements per block */
                grid->xsize+2, /* #stride */
                MPI_DOUBLE, /* old type */
                &blocktype /* new type */ );
        MPI_Type_commit (&blocktype);

        MPI_Send (block_size, 4, MPI_INT, 0, 123, MPI_COMM_WORLD);
        MPI_Send (&grid->theta[mympi->start_y][mympi->start_x],1 ,blocktype, 0, 123, MPI_COMM_WORLD);

        MPI_Type_free (&blocktype);
    }
    else
    /*Master Receives data*/
    {
        MPI_Comm_size (MPI_COMM_WORLD, &size);
        for (i = 1; i < size; i++)
        {
            /*Receive Block Info*/
            MPI_Recv (block_size, 4, MPI_INT, i, 123, MPI_COMM_WORLD, &status);

            /* Create datatype to communicate one block*/
            MPI_Type_vector (
                    block_size[3], /* #blocks */
                    block_size[2], /* #elements per block */
                    grid->xsize+2, /* #stride */
                    MPI_DOUBLE, /* old type */
                    &blocktype /* new type */ );
            MPI_Type_commit (&blocktype);

            MPI_Recv (&grid->theta[block_size[1]][block_size[0]],1 ,blocktype, i, 123, MPI_COMM_WORLD, &status);

            MPI_Type_free (&blocktype);
        }
    }
}
Example #5
0
int main(int argc, char *argv[])
{
	int rank;
	MPI_Status status;
 
	MPI_Init(&argc, &argv);

	MPI_Datatype type;
	MPI_Type_contiguous(2, MPI_INT, &type);

 
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	if (rank == 0)
	{
		int buffer[1000];
		for (int i = 0; i < 1000; i++) {
			buffer[i] = i;
		}
		// 2 * (6x int + 5x space)
		MPI_Datatype vtype;
		MPI_Type_vector(2, 3, 5, type, &vtype);
		MPI_Type_commit(&vtype);

		MPI_Send(buffer, 4, vtype, 1, 123, MPI_COMM_WORLD);
		MPI_Send(buffer, 4, vtype, 1, 123, MPI_COMM_WORLD);
	}
	else if (rank == 1)
	{
		int buffer1[1000], buffer2[1000];
		for (int i = 0; i < 1000; i++) {
			buffer1[i] = -1;
			buffer2[i] = -1;
		}
		MPI_Recv(buffer1, 48, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
		for (int i = 0; i < 50; i++) {
			printf("%i ", buffer1[i]);
		}
		printf("\n");

		MPI_Datatype vtype;
		MPI_Type_vector(4, 6, 7, MPI_INT, &vtype);
		MPI_Type_commit(&vtype);

		MPI_Recv(buffer2, 2, vtype, 0, 123, MPI_COMM_WORLD, &status);
		for (int i = 0; i < 50; i++) {
			printf("%i ", buffer2[i]);
		}
		printf("\n");
	}
 
	MPI_Finalize();
	return 0;
}
void Setup_MPI_Datatypes()
{
  Debug("Setup_MPI_Datatypes", 0);
 
  /* Datatype for vertical data exchange (Y_DIR) */
  MPI_Type_vector(dim[X_DIR] - 2, 1, dim[Y_DIR], MPI_DOUBLE, &border_type[Y_DIR]);
  MPI_Type_commit(&border_type[Y_DIR]);
  
  /* Datatype for horizontal data exchange (X_DIR) */
  MPI_Type_vector(dim[Y_DIR] - 2, 1, 1, MPI_DOUBLE, &border_type[X_DIR]);
  MPI_Type_commit(&border_type[X_DIR]);
}
Example #7
0
int get_datatypes( int *grid, int *start, int *end, \
                    MPI_Datatype *faces, int msg_fac )
{
  int count, blocklength;
  int stride;
  MPI_Aint extent, i;
  MPI_Datatype z_row, oneface[3];

  /* set up datatype for x_faces */
  count = end[1] - start[1] + 1;
  blocklength = end[2] - start[2] + 1;
  stride = grid[2];
  MPI_Type_vector( count, blocklength, stride, MPI_DOUBLE, \
                       &oneface[0] );

  /* set up datatype for y_faces */
  count = end[0] - start[0] + 1;
  blocklength = end[2] - start[2] + 1;
  stride = grid[1] * grid[2];
  MPI_Type_vector( count, blocklength, stride, MPI_DOUBLE, \
                       &oneface[1] );

  /* set up datatype for z_faces */
  count = end[1] - start[1] + 1;
  blocklength = 1;
  stride = grid[2];
  MPI_Type_vector( count, blocklength, stride, MPI_DOUBLE, \
                       &z_row );
  MPI_Type_commit( &z_row );

  count = end[0] - start[0] + 1;
  blocklength = 1;
  MPI_Type_extent( MPI_DOUBLE, &extent );
  extent = grid[1] * grid[2] * extent;
  MPI_Type_hvector( count, blocklength, extent, z_row, \
                       &oneface[2] );

  for( i=0 ; i<3 ; i++ )
  {
    MPI_Type_commit( &oneface[i] );
    MPI_Type_vector( msg_fac, 1, 0, oneface[i], &faces[i] ); 
    MPI_Type_commit( &faces[i] );
    MPI_Type_free( &oneface[i] );
  }

  /* Free the z_row Type */
  MPI_Type_free( &z_row );

  return 0;
}
Example #8
0
int main(int argc, char **argv)
{
    MPI_Datatype vec;
    MPI_Comm comm;
    double *vecin, *vecout;
    int minsize = 2, count;
    int root, i, n, stride, errs = 0;
    int rank, size;

    MTest_Init(&argc, &argv);

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        for (root = 0; root < size; root++) {
            for (count = 1; count < 65000; count = count * 2) {
                n = 12;
                stride = 10;
                vecin = (double *) malloc(n * stride * size * sizeof(double));
                vecout = (double *) malloc(size * n * sizeof(double));

                MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
                MPI_Type_commit(&vec);

                for (i = 0; i < n * stride; i++)
                    vecin[i] = -2;
                for (i = 0; i < n; i++)
                    vecin[i * stride] = rank * n + i;

                MPI_Gather(vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm);

                if (rank == root) {
                    for (i = 0; i < n * size; i++) {
                        if (vecout[i] != i) {
                            errs++;
                            if (errs < 10) {
                                fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
                            }
                        }
                    }
                }
                MPI_Type_free(&vec);
                free(vecin);
                free(vecout);
            }
        }
        MTestFreeComm(&comm);
    }

    /* do a zero length gather */
    MPI_Gather(NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #9
0
File: grid.c Project: Thundzz/TDP
double* partition_matrix(double *a,
	int N, int gd, 
	MPI_Datatype *type_block)
{
	MPI_Datatype type_block_tmp;

	int NB = N/gd;

	double* b = malloc(NB*NB*sizeof(double));

	MPI_Type_vector(NB, NB, N, MPI_DOUBLE, &type_block_tmp);
	MPI_Type_create_resized(type_block_tmp, 0, sizeof(double), type_block);
	MPI_Type_commit(type_block);

	int counts[gd*gd];
	int disps[gd*gd];
	for (int i=0; i<gd; i++) {
		for (int j=0; j<gd; j++) {
			disps[i*gd+j] = i*N*NB+j*NB;
			counts [i*gd+j] = 1;
		}
	}
	MPI_Scatterv(a, counts, disps, *type_block, b, NB*NB, MPI_DOUBLE, 0, MPI_COMM_WORLD);	

	return b;
}
Example #10
0
static int type_create_contiguous_x(MPI_Count count,
	MPI_Datatype oldtype, MPI_Datatype *newtype)
{
    /* to make 'count' fit MPI-3 type processing routines (which take integer
     * counts), we construct a type consisting of N INT_MAX chunks followed by
     * a remainder.  e.g for a count of 4000000000 bytes you would end up with
     * one 2147483647-byte chunk followed immediately by a 1852516353-byte
     * chunk */
    MPI_Datatype chunks, remainder;
    MPI_Aint lb, extent, disps[2];
    int blocklens[2];
    MPI_Datatype types[2];

    MPI_Count c = count/INT_MAX;
    MPI_Count r = count%INT_MAX;

    MPI_Type_vector(c, INT_MAX, INT_MAX, oldtype, &chunks);
    MPI_Type_contiguous(r, oldtype, &remainder);

    MPI_Type_get_extent(oldtype, &lb, &extent);

    blocklens[0] = 1;      blocklens[1] = 1;
    disps[0]     = 0;      disps[1]     = c*extent*INT_MAX;
    types[0]     = chunks; types[1]     = remainder;

    MPI_Type_create_struct(2, blocklens, disps, types, newtype);

    MPI_Type_free(&chunks);
    MPI_Type_free(&remainder);

    return MPI_SUCCESS;
}
Example #11
0
void distribute_matrix(ATYPE *root_matrix, ATYPE *local_matrix, int local_rank, int proc_size, long partition, uint N){
  int sendcounts[proc_size], displs[proc_size];
  ATYPE *sendbuffer=NULL;

  MPI_Datatype MPI_type, MPI_type2;


  int rest = N - (partition * ( proc_size - 1) );


  MPI_Type_vector(N, 1, N, ATYPE_MPI, &MPI_type2);
  MPI_Type_create_resized( MPI_type2, 0, sizeof(ATYPE), &MPI_type);
  MPI_Type_commit(&MPI_type);


  for ( int i=0 ; i<proc_size ; ++i ){
    if ( i == proc_size - 1 ) {
      sendcounts[i] = rest;
    }
    else {
      sendcounts[i] = partition;
    }
    displs[i] = i*partition;
  }

  if ( local_rank == root )
    sendbuffer = &(root_matrix[0]);

  MPI_Scatterv( sendbuffer, sendcounts, displs, MPI_type, &(local_matrix[0]), partition*N, ATYPE_MPI, root, MPI_COMM_WORLD );
  MPI_Type_free(&MPI_type);
}
Example #12
0
static void init_mpi(void)
{
    MPI_Comm_size(MPI_COMM_WORLD, &nproc); //プロセス数の取得
    int dim = 2;          //number of dimension
    int procs[2] = {0,0}; //[0]: x方向の分割数, [1]:y方向の分割数 がはいる
    int period[2] = {0,0};//境界条件, 0は固定境界
    MPI_Comm grid_comm;
    int reorder = 1;   //re-distribute rank flag

    MPI_Dims_create(nproc, dim, procs); //縦横を何分割にするか自動的に計算
    MPI_Cart_create(MPI_COMM_WORLD, 2, procs, period, reorder, &grid_comm); //領域を自動分割 => procs, grid_commは変更される
    MPI_Cart_shift(grid_comm, 0, 1, &ltRank, &rtRank);
    MPI_Cart_shift(grid_comm, 1, 1, &bmRank, &tpRank);

    //プロセス座標において自分がどの位置に居るのか求める(何行何列に居るか)
    int coordinates[2];
    MPI_Comm_rank(grid_comm, &rank);
    MPI_Cart_coords(grid_comm, rank, 2, coordinates);

    SUB_N_X = N_PX / procs[0];
    SUB_N_Y = N_PY / procs[1];
    SUB_N_PX = SUB_N_X + 2; //のりしろ(となりの領域の値が入る部分)の分2大きい
    SUB_N_PY = SUB_N_Y + 2;
    SUB_N_CELL = SUB_N_PX*SUB_N_PY;
    offsetX = coordinates[0] * SUB_N_X; //ランクのインデックスではなく, セル単位のオフセットなのでSUB_N_Xずれる
    offsetY = coordinates[1] * SUB_N_Y;

    /*これだと, 1個のデータをSUB_N_PY跳び(次のデータまでSUB_N_PY-1個隙間がある),SUB_N_X行ぶん取ってくる事になる */
    MPI_Type_vector(SUB_N_X, 1, SUB_N_PY, MPI_C_DOUBLE_COMPLEX, &X_DIRECTION_DOUBLE_COMPLEX);
    MPI_Type_commit(&X_DIRECTION_DOUBLE_COMPLEX);
}
Example #13
0
/* Extract an m x n submatrix within an m x N matrix and transpose it.
   Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
/* computes a datatype for the transpose of an mxn matrix 
   with entries of type type */
{
  MPI_Datatype subrow, subrow1, submatrix;
  MPI_Aint lb, extent;
  
  MPI_Type_vector(m, 1, N, type, &subrow);
  MPI_Type_get_extent(type, &lb, &extent);
  MPI_Type_create_resized(subrow, 0, extent, &subrow1);
  MPI_Type_contiguous(n, subrow1, &submatrix); 
  MPI_Type_commit(&submatrix);
  MPI_Type_free( &subrow );
  MPI_Type_free( &subrow1 );

  /* Add a consistency test: the size of submatrix should be
     n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
  {
      int      tsize;
      MPI_Aint textent, llb;
      MPI_Type_size( type, &tsize );
      MPI_Type_get_true_extent( submatrix, &llb, &textent );
      
      if (textent != tsize * (N * (m-1)+n)) {
	  fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
		   (long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
      }
  }

  return(submatrix);
}
Example #14
0
/* Define an n x m submatrix in a n x M local matrix (this is the 
   destination in the transpose matrix */
MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
/* computes a datatype for an mxn submatrix within an MxN matrix 
   with entries of type type */
{
  /* MPI_Datatype subrow; */
  MPI_Datatype submatrix;

  /* The book, MPI: The Complete Reference, has the wrong type constructor 
     here.  Since the stride in the vector type is relative to the input 
     type, the stride in the book's code is n times as long as is intended. 
     Since n may not exactly divide N, it is better to simply use the 
     blocklength argument in Type_vector */
  /*
  MPI_Type_contiguous(n, type, &subrow);
  MPI_Type_vector(m, 1, N, subrow, &submatrix);  
  */
  MPI_Type_vector(n, m, M, type, &submatrix );
  MPI_Type_commit(&submatrix);

  /* Add a consistency test: the size of submatrix should be
     n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
  {
      int      tsize;
      MPI_Aint textent, lb;
      MPI_Type_size( type, &tsize );
      MPI_Type_get_extent( submatrix, &lb, &textent );
      
      if (textent != tsize * (M * (n-1)+m)) {
	  fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
		   (long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
      }
  }
  return(submatrix);
}
Example #15
0
void gather_image(){
		// MPI type for image gathering
		MPI_Datatype image_gathering_t;
    MPI_Type_vector(local_image_size[0],
            local_image_size[1], local_image_size[1]+2*BORDER, MPI_UNSIGNED_CHAR, &image_gathering_t);
    MPI_Type_commit(&image_gathering_t);
    
    MPI_Request req[size];

    // gather image data at rank 0
    if(rank == 0){
        // receive data from all ranks
        for(int i = 0; i < size; i++){        	
        	// calc offset of these data
        	int thisCoords[2];
        	MPI_Cart_coords(cart_comm, i, 2, thisCoords ); // coords of this rank
        	int offset = thisCoords[0] * local_image_size[0] * image_size[1] + thisCoords[1] * local_image_size[1];
        	
        	// receive data
        	MPI_Irecv(&image[offset], 1, image_t, i, 99, cart_comm, req+i);
        }
    }
    
  	// send image data to rank 0
  	MPI_Send(&F(ITERATIONS,0,0), 1, image_gathering_t, 0, 99, cart_comm);

		// wait until all borders are received
		if(rank == 0){
    	MPI_Waitall(size, req, MPI_STATUSES_IGNORE);
    }
}
Example #16
0
int main(int argc, char **argv) {
  int rank;
  double a[SIZE][SIZE] = {{0}};

  MPI_Datatype columntype;

  MPI_Init(&argc,&argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  MPI_Type_vector(SIZE, 1, SIZE, MPI_DOUBLE, &columntype);
  MPI_Type_commit(&columntype);

    if (rank == 0) {
      for (int i = 0; i < SIZE; i++)
        for (int j = 0; j < SIZE; j++)
          a[i][j] = i*SIZE+j;
    }

    /* only one column is send this is an exemple for non-contignous data*/
    MPI_Bcast(a, 1, columntype, 0, MPI_COMM_WORLD);

    for (int i = 0; i < SIZE; i++) {
      for (int j = 0; j < SIZE; j++) {
        printf("rank= %d, a[%d][%d]=%f\n", rank, i, j, a[i][j]);
      }
      printf("\n");
    }

  MPI_Type_free(&columntype);
  MPI_Finalize();
  return 0;
}
int main(int argc, char* argv[])
{
    int iam, np;
    int m = 2, n = 0, lda = 1;
    double A[2];
    MPI_Comm comm = MPI_COMM_WORLD;
    MPI_Datatype type = MPI_DOUBLE, vtype;

    MPI_Init(&argc,&argv);
    MPI_Comm_size(comm, &np);
    MPI_Comm_rank(comm, &iam);
    if (np < 2) {
        printf( "Should be at least 2 processes for the test\n");
    } else {
        MPI_Type_vector(n, m, lda, type, &vtype);
        MPI_Type_commit(&vtype);
        A[0] = -1.0-0.1*iam;
        A[1] = 0.5+0.1*iam;
        printf("In process %i of %i before Bcast: A = %f,%f\n",
               iam, np, A[0], A[1] );
        MPI_Bcast(A, 1, vtype, 0, comm);
        printf("In process %i of %i after Bcast: A = %f,%f\n",
               iam, np, A[0], A[1]);
        MPI_Type_free(&vtype);
    }

    MPI_Finalize();
}
Example #18
0
void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype,  int* ierr){
  MPI_Datatype tmp;
  *ierr= MPI_Type_vector(*count, *blocklen, *stride, get_datatype(*old_type), &tmp);
  if(*ierr == MPI_SUCCESS) {
    *newtype = new_datatype(tmp);
  }
}
Example #19
0
void exchange_boundary(field v, int Lx, int Ly)
{
  int y;
  MPI_Status status[4];
  MPI_Request req[4];

  MPI_Datatype Row;
  MPI_Type_contiguous(Lx+2, MPI_DOUBLE, &Row);
  MPI_Type_commit(&Row);

  MPI_Isend(&v[Ly][0],   1, Row, decomp.north, 0, MPI_COMM_WORLD, &req[0]);
  MPI_Irecv(&v[Ly+1][0], 1, Row, decomp.north, 0, MPI_COMM_WORLD, &status[0]);

  MPI_Isend(&v[1][0], 1, Row, decomp.south, 0, MPI_COMM_WORLD, &req[1]);
  MPI_Irecv(&v[0][0], 1, Row, decomp.south, 0, MPI_COMM_WORLD, &status[1]);

  MPI_Datatype Column;
  MPI_Type_vector(Ly+2, 1, Lx+2, MPI_DOUBLE, &Column);
  MPI_Type_commit(&Column);

  MPI_Isend(&v[0][Lx],   1, Column, decomp.east, 0, MPI_COMM_WORLD, &req[2]);
  MPI_Irecv(&v[0][Lx+1], 1, Column, decomp.east, 0, MPI_COMM_WORLD, &status[2]);

  MPI_Isend(&v[0][1], 1, Column, decomp.west, 0, MPI_COMM_WORLD, &req[3]);
  MPI_Irecv(&v[0][0], 1, Column, decomp.west, 0, MPI_COMM_WORLD, &status[3]);

  MPI_Waitall(4,req,status);

  MPI_Type_free(&Row);
  MPI_Type_free(&Column);
}
Example #20
0
/*
 * Synopsis
 *
 * int BigMPI_Type_contiguous(MPI_Aint offset,
 *                            MPI_Count count,
 *                            MPI_Datatype   oldtype,
 *                            MPI_Datatype * newtype)
 *
 *  Input Parameters
 *
 *   offset            byte offset of the start of the contiguous chunk
 *   count             replication count (nonnegative integer)
 *   oldtype           old datatype (handle)
 *
 * Output Parameter
 *
 *   newtype           new datatype (handle)
 *
 * Notes
 *
 *   Following the addition of the offset argument, this function no longer
 *   matches the signature of MPI_Type_contiguous.  This may constitute
 *   breaking user experience for some people.  However, the value of
 *   adding it simplies the primary purpose of this function, which is to
 *   do the heavy lifting _inside_ of BigMPI.  In particular, it allows
 *   us to use MPI_Alltoallw instead of MPI_Neighborhood_alltoallw.
 *
 */
int BigMPI_Type_contiguous(MPI_Aint offset, MPI_Count count, MPI_Datatype oldtype, MPI_Datatype * newtype)
{
    /* The count has to fit into MPI_Aint for BigMPI to work. */
    if ((uint64_t)count>(uint64_t)bigmpi_count_max) {
        printf("count (%llu) exceeds bigmpi_count_max (%llu)\n",
               (long long unsigned)count, (long long unsigned)bigmpi_count_max);
        fflush(stdout);
    }

#ifdef BIGMPI_AVOID_TYPE_CREATE_STRUCT
    if (offset==0) {
        /* There is no need for this code path in homogeneous execution,
         * but it is useful to exercise anyways. */
        int a, b;
        int prime = BigMPI_Factorize_count(count, &a, &b);
        if (!prime) {
            MPI_Type_vector(a, b, b, oldtype, newtype);
            return MPI_SUCCESS;
        }
    }
#endif
    MPI_Count c = count/bigmpi_int_max;
    MPI_Count r = count%bigmpi_int_max;

    assert(c<bigmpi_int_max);
    assert(r<bigmpi_int_max);

    MPI_Datatype chunks;
    MPI_Type_vector(c, bigmpi_int_max, bigmpi_int_max, oldtype, &chunks);

    MPI_Datatype remainder;
    MPI_Type_contiguous(r, oldtype, &remainder);

    MPI_Aint lb /* unused */, extent;
    MPI_Type_get_extent(oldtype, &lb, &extent);

    MPI_Aint remdisp          = (MPI_Aint)c*bigmpi_int_max*extent;
    int blocklengths[2]       = {1,1};
    MPI_Aint displacements[2] = {offset,offset+remdisp};
    MPI_Datatype types[2]     = {chunks,remainder};
    MPI_Type_create_struct(2, blocklengths, displacements, types, newtype);

    MPI_Type_free(&chunks);
    MPI_Type_free(&remainder);

    return MPI_SUCCESS;
}
Example #21
0
// Function to create and commit MPI datatypes
void create_types() {
    //Vector to be used in the distribution of the divergence to the difference processors: diverg_vector
    MPI_Type_vector(local_height, local_width, local_width*dims[1]+2, MPI_FLOAT, &diverg_vector);
    MPI_Type_commit(&diverg_vector);

    //Vector to be used in border exchange for exchanging rows: border_row_t
    MPI_Type_contiguous(local_width, MPI_FLOAT, &border_row_t);
    MPI_Type_commit(&border_row_t);

    //Vector to be used in border exchange for exchanging columns: border_col_t
    MPI_Type_vector(local_height, 1, local_width, MPI_FLOAT, &border_col_t);
    MPI_Type_commit(&border_col_t);

    //Vector to be used for sending local_pres arrays in gather_pres() function
    MPI_Type_vector(local_height, local_width, local_width + 2, MPI_FLOAT, &local_pres_vector);
    MPI_Type_commit(&local_pres_vector);
}
Example #22
0
void create_types(){
    MPI_Type_contiguous(local_image_size[0]*local_image_size[1],
            MPI_UNSIGNED_CHAR, &local_image_orig_t);
    MPI_Type_commit(&local_image_orig_t);

    MPI_Type_vector(local_image_size[0],
            local_image_size[1], image_size[1], MPI_UNSIGNED_CHAR, &image_t);
    MPI_Type_commit(&image_t);

    // MPI type for border exchange (row)
    MPI_Type_vector(BORDER,local_image_size[1],local_image_size[1]+2*BORDER,MPI_UNSIGNED_CHAR,&border_row_t);
    MPI_Type_commit(&border_row_t);
    
    // MPI type for border exchange (col)
    MPI_Type_vector(local_image_size[0],BORDER,local_image_size[1]+2*BORDER,MPI_UNSIGNED_CHAR,&border_col_t);
    MPI_Type_commit(&border_col_t);
}
Example #23
0
int main(int argc, char *argv[])
{
    MPI_Datatype column[LOOPS], xpose[LOOPS];
    double t[NUM_SIZES], ttmp, tmin, tmax, tmean, tdiff;
    int size;
    int i, j, isMonotone, errs = 0, nrows, ncols, isvalid;

    MPI_Init(&argc, &argv);

    tmean = 0;
    size = 1;
    for (i = 0; i < NUM_SIZES + SKIP; i++) {
        nrows = ncols = size;

        ttmp = MPI_Wtime();

        for (j = 0; j < LOOPS; j++) {
            MPI_Type_vector(nrows, 1, ncols, MPI_INT, &column[j]);
            MPI_Type_hvector(ncols, 1, sizeof(int), column[j], &xpose[j]);
            MPI_Type_commit(&xpose[j]);
        }

        if (i >= SKIP) {
            t[i - SKIP] = MPI_Wtime() - ttmp;
            tmean += t[i - SKIP];
        }

        for (j = 0; j < LOOPS; j++) {
            MPI_Type_free(&xpose[j]);
            MPI_Type_free(&column[j]);
        }

        if (i >= SKIP)
            size *= 2;
    }
    tmean /= NUM_SIZES;

    /* Now, analyze the times to see that they are nearly independent
     * of size */
    for (i = 0; i < NUM_SIZES; i++) {
        /* The difference between the value and the mean is more than
         * a "FRACTION" of mean. */
        if (fabs(t[i] - tmean) > (FRACTION * tmean))
            errs++;
    }

    if (errs) {
        fprintf(stderr, "too much difference in performance: ");
        for (i = 0; i < NUM_SIZES; i++)
            fprintf(stderr, "%.3f ", t[i] * 1e6);
        fprintf(stderr, "\n");
    }
    else
        printf(" No Errors\n");

    MPI_Finalize();
    return 0;
}
Example #24
0
File: main.c Project: sondrele/NTNU
// Function to create and commit MPI datatypes
// Each datatype is resized to size of float, it looks like this fixes 
// segmentation fault issues.
void create_types() {
    MPI_Datatype border_row_t0;
    MPI_Type_contiguous(local_width,            // count
                        MPI_FLOAT,              // old_type
                        &border_row_t0);        // newtype_p
    MPI_Type_create_resized(border_row_t0, 0, sizeof(float), &border_row_t);
    MPI_Type_commit(&border_row_t);

    MPI_Datatype border_col_t0;
    MPI_Type_vector(local_height,               // count
                    1,                          // blocklength
                    local_width + 2,            // stride
                    MPI_FLOAT,                  // old_type
                    &border_col_t0);            // newtype_p
    MPI_Type_create_resized(border_col_t0, 0, sizeof(float), &border_col_t);
    MPI_Type_commit(&border_col_t);

    MPI_Datatype pres_and_diverg_t0;
    MPI_Type_vector(local_height,               // count
                    local_width,                // blocklength
                    imageSize + 2,              // stride
                    MPI_FLOAT,                  // old_type
                    &pres_and_diverg_t0);       // newtype_p
    MPI_Type_create_resized(pres_and_diverg_t0, 0, sizeof(float), &pres_and_diverg_t);
    MPI_Type_commit(&pres_and_diverg_t);

    MPI_Datatype local_diverg_t0;
    MPI_Type_vector(local_height,               // count
                    local_width,                // blocklength
                    local_width,                // stride
                    MPI_FLOAT,                  // old_type
                    &local_diverg_t0);          // newtype_p
    MPI_Type_create_resized(local_diverg_t0, 0, sizeof(float), &local_diverg_t);
    MPI_Type_commit(&local_diverg_t);

    MPI_Datatype local_pres_t0;
    MPI_Type_vector(local_height,               // count
                    local_width,                // blocklength
                    local_width + 2,            // stride
                    MPI_FLOAT,                  // old_type
                    &local_pres_t0);            // newtype_p
    MPI_Type_create_resized(local_pres_t0, 0, sizeof(float), &local_pres_t);
    MPI_Type_commit(&local_pres_t);
}
//--------------------------------------------------------------------------
//
// creates a vector datatype
//
// num_elems: number of elements in the vector
// stride: number of elements between start of each element (usually 1)
// base_type: data type of vector elements
// type: new (output) data type
//
// returns: error code
//
int DIY_Create_vector_datatype(int num_elems, int stride, 
			       DIY_Datatype base_type, DIY_Datatype *type) {

  MPI_Type_vector(num_elems, 1, stride, base_type, type);
  MPI_Type_commit(type);
  dtype_absolute_address = false;

  return 0;

}
Example #26
0
HYPRE_Int
hypre_MPI_Type_vector( HYPRE_Int           count,
                       HYPRE_Int           blocklength,
                       HYPRE_Int           stride,
                       hypre_MPI_Datatype  oldtype,
                       hypre_MPI_Datatype *newtype )
{
   return (HYPRE_Int) MPI_Type_vector((hypre_int)count, (hypre_int)blocklength,
                                      (hypre_int)stride, oldtype, newtype);
}
Example #27
0
/*---------------------------------------------------------------------
 * Function:         Build_cyclic_mpi_type
 * Purpose:          Build an MPI derived datatype that can be used with
 *                   cyclically distributed data.
 * In arg:
 *    loc_n:         The number of elements assigned to each process
 * Global out:
 *    cyclic_mpi_t:  An MPI datatype that can be used with cyclically
 *                   distributed data
 */
void Build_cyclic_mpi_type(int loc_n) {
   MPI_Datatype temp_mpi_t;
   MPI_Aint lb, extent;

   MPI_Type_vector(loc_n, 1, comm_sz, MPI_INT, &temp_mpi_t);
   MPI_Type_get_extent(MPI_INT, &lb, &extent);
   MPI_Type_create_resized(temp_mpi_t, lb, extent, &cyclic_mpi_t);
   MPI_Type_commit(&cyclic_mpi_t);

}  /* Build_cyclic_mpi_type */
Example #28
0
void init_column_t(){
	MPI_Type_vector(
		m+2,
		1,
		n+2,
		MPI_INT,
		&column_t
	);
	MPI_Type_commit(&column_t);
}
Example #29
0
static h5part_int64_t
_halo_exchange_vectors (
	const H5PartFile *f,
	char *data,
	h5part_int64_t count,
	h5part_int64_t blocklen,
	h5part_int64_t stride,
	int proc_spacing,
	const char *edges
	) {

	int ret;
	int icount, iblocklen, istride;
	h5part_int64_t herr;
	MPI_Datatype halo_vector;

	if ( f->myproc == 0 ) _H5Part_print_info (
			"Using halo exchange method _halo_exchange_vectors");

	icount = (int)count;
	if ( (h5part_int64_t)icount != count ) return HANDLE_MPI_INT64_ERR;

	iblocklen = (int)blocklen;
	if ( (h5part_int64_t)iblocklen != blocklen ) return HANDLE_MPI_INT64_ERR;

	istride = (int)stride;
	if ( (h5part_int64_t)istride != stride ) return HANDLE_MPI_INT64_ERR;

	ret = MPI_Type_vector (	icount, iblocklen, istride,
						MPI_BYTE, &halo_vector);
	if (ret != MPI_SUCCESS) return HANDLE_MPI_TYPE_ERR;

	ret = MPI_Type_commit ( &halo_vector );
	if (ret != MPI_SUCCESS) return HANDLE_MPI_TYPE_ERR;

	/* forward */
	herr = _halo_exchange_vector ( f, data, &halo_vector,
					stride - 2*blocklen,
					0,
					edges[0], edges[1], proc_spacing );
	if (herr != H5PART_SUCCESS) return herr;

	/* backward */
	herr = _halo_exchange_vector ( f, data, &halo_vector,
					blocklen,
					stride - blocklen,
					edges[1], edges[0], -proc_spacing );
	if (herr != H5PART_SUCCESS) return herr;

	ret = MPI_Type_free ( &halo_vector );
	if (ret != MPI_SUCCESS) return HANDLE_MPI_TYPE_ERR;

	return H5PART_SUCCESS;
}
Example #30
0
/******************************************************
 * Function to setup MPI data.
 *
 * (1) Initializes MPI
 * (2) Creates a cartesian communicator for border exchange
 * (3) Distributes the overall grid to the processes
 * (4) Sets up helpful data-type and MPI buffer
 *
 ******************************************************/
void heatMPISetup (int* pargc, char*** pargv, heatGrid *grid, dataMPI* configMPI)
{
    int size,
        dims[2] = {0,0},
        periods[2] = {1,1},
        coords[2];
    int buf_size;
    char *buf;

    /* ==== (1) ==== */
    /* Base init*/
    MPI_Init (pargc, pargv);
    MPI_Comm_rank (MPI_COMM_WORLD, &configMPI->rank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);

    /* ==== (2) ==== */
    /* Create cartesian communicator*/
    MPI_Dims_create (size, 2, dims);
    MPI_Cart_create (MPI_COMM_WORLD, 2, dims, periods, 0, &configMPI->cart);

    /* Store neighbors in the grid */
    MPI_Cart_shift (configMPI->cart, 0, 1, &configMPI->left, &configMPI->right);
    MPI_Cart_shift (configMPI->cart, 1, 1, &configMPI->up,    &configMPI->down);

    /* ==== (3) ==== */
    /* Distribute overall grid to processes */
    MPI_Cart_coords (configMPI->cart, configMPI->rank, 2, coords); /*My coordinate*/

    configMPI->start_x = 1 + (grid->xsize/dims[0])*coords[0];
    if (coords[0]+1 != dims[0])
        /* coords 0 to N-1 get an equal distribution*/
        configMPI->num_cells_x = grid->xsize / (dims[0]);
    else
        /* last coord gets the rest */
        configMPI->num_cells_x = grid->xsize - configMPI->start_x + 1;

    configMPI->start_y = 1 + (grid->ysize/dims[1])*coords[1];
    if (coords[1]+1 != dims[1])
            /* coords 0 to N-1 get an equal distribution*/
            configMPI->num_cells_y = grid->ysize / (dims[1]);
        else
            /* last coord gets the rest */
            configMPI->num_cells_y = grid->ysize - configMPI->start_y + 1;

    /* ==== (4) ==== */
    /* Create datatype to communicate one column */
    MPI_Type_vector (
            configMPI->num_cells_y, /* #blocks */
            1, /* #elements per block */
            grid->xsize+2, /* #stride */
            MPI_DOUBLE, /* old type */
            &configMPI->columntype /* new type */ );
    MPI_Type_commit (&configMPI->columntype);
}