Esempio n. 1
0
int PassTokenComm(const MPI_Comm comm, int *send_message, int *recv_message ) {
  int rank, size;
  
  MPI_Comm_rank (comm, &rank);
  MPI_Comm_size (comm, &size);

  int tag = 0;
  
  MPI_Status status;
  int retval;
  
  int recv_neigh = (rank - 1 < 0) ? size - 1 : rank - 1;
  int send_neigh = (rank + 1) % size;
  
  if (rank == 0) {

    if ((retval = MPI_Send(send_message, 1, MPI_INT, send_neigh, tag, comm)) != MPI_SUCCESS)
      mpi_error(retval);

    if ((retval = MPI_Recv(recv_message, 1, MPI_INT, recv_neigh, tag, comm, &status)) != MPI_SUCCESS)
      mpi_error(retval);

  } else {

    if ((retval = MPI_Recv(recv_message, 1, MPI_INT, recv_neigh, tag, comm, &status)) != MPI_SUCCESS)
      mpi_error(retval);
    
    if ((retval = MPI_Send(send_message, 1, MPI_INT, send_neigh, tag, comm)) != MPI_SUCCESS)
      mpi_error(retval);
  }

  //printf("Node %d recieved message from Node %d: %d\n", rank, recv_neigh, recieved_message);
  
  return 0;
}
Esempio n. 2
0
int   MPI_Send( void         *buf,
                int          count,
                MPI_Datatype datatype,
                int          dest,
                int          tag,
                MPI_Comm     comm )
{
    mpi_error();
    return( MPI_ERR_OTHER );
}
Esempio n. 3
0
/* m - number of rows of processors
   n - number of columns of processors
   m*n = total number of processors
*/
int CreateGemmCommGroups(const int m, const int n, const MPI_Comm comm, MPI_Comm *comm_row, MPI_Comm *comm_col) {
  int retval;
  int rank, size;

  MPI_Comm_rank (comm, &rank);
  MPI_Comm_size (comm, &size);

  assert(size == m*n);
  
  // Create row communication group
  if ((retval = MPI_Comm_split(comm, rank % m, rank / m, comm_row)) != MPI_SUCCESS)
    mpi_error(retval);
  
  // Create column communication group
  if ((retval = MPI_Comm_split(comm, rank / m, rank % m, comm_col)) != MPI_SUCCESS)
    mpi_error(retval);

  return 0;
}
Esempio n. 4
0
int   MPI_Recv( void         *buf,
                int          count,
                MPI_Datatype datatype,
                int          source,
                int          tag,
                MPI_Comm     comm,
                MPI_Status   *status )
{
    mpi_error();
    return( MPI_ERR_OTHER );
}
Esempio n. 5
0
int   MPI_Irecv( void         *buf,
                 int          count,
                 MPI_Datatype datatype,
                 int          source,
                 int          tag,
                 MPI_Comm     comm,
                 MPI_Request  *request )
{
    mpi_error();
    return( MPI_ERR_OTHER );
}
Esempio n. 6
0
int   MPI_Wait( MPI_Request *request,
                MPI_Status  *status )
{
    mpi_error();
    return( MPI_ERR_OTHER );
}
Esempio n. 7
0
int ShiftMPIMatrixLeft(MPI_Comm comm_row, int gm, int gn, int bm, int bn, int p, int q, double *a) {
  int rank, size;

  MPI_Comm_rank (comm_row, &rank);
  MPI_Comm_size (comm_row, &size);

  int tag = 0;
  
  MPI_Status status;
  int retval;

  int recv_neigh = (rank + 1) % size;
  int send_neigh = (rank - 1 < 0) ? size - 1 : rank - 1;

  double *a_buffer = malloc(sizeof(double) * (gm*gn) / (p*q));
  if (a_buffer == NULL) {
    fprintf(stderr, "Failed to malloc memory for arrays\n");
    exit(1);
  }

  int
    pm = bm / p,
    pn = bn / q;

  int
    m = gm / bm,
    n = gn / bn;

  int a_offset;
  int a_buffer_offset;
  int message_size = (pm*pn);
  
  int bi, bj;
    
  if (rank == 0) {
    for ( bi=0; bi<m; bi++ )
      for ( bj=0; bj<n; bj++ ) {
	a_offset = (bi + bj * m) * (pm*pn);
	
	if ((retval = MPI_Send(a + a_offset, message_size, MPI_DOUBLE, send_neigh, tag, comm_row)) != MPI_SUCCESS)
	  mpi_error(retval);
      }

    for ( bi=0; bi<m; bi++ )
      for ( bj=0; bj<n; bj++ ) {
	
	a_buffer_offset = (bi + bj * m) * (pm*pn);
	
	if ((retval = MPI_Recv(a_buffer + a_buffer_offset, message_size, MPI_DOUBLE, recv_neigh, tag, comm_row, &status)) != MPI_SUCCESS)
	  mpi_error(retval);
      }

  } else {
    for ( bi=0; bi<m; bi++ )
      for ( bj=0; bj<n; bj++ ) {
	if (recv_neigh == 0) {
	  a_buffer_offset = (bi + (bj == 0 ? n-1 : bj-1) * m) * (pm*pn);
	} else {
	  a_buffer_offset = (bi + bj * m) * (pm*pn);
	}
	
	if ((retval = MPI_Recv(a_buffer + a_buffer_offset, message_size, MPI_DOUBLE, recv_neigh, tag, comm_row, &status)) != MPI_SUCCESS)
	  mpi_error(retval);
      }
    
    for ( bi=0; bi<m; bi++ )
      for ( bj=0; bj<n; bj++ ) {
	a_offset = (bi + bj * m) * (pm*pn);
	
	if ((retval = MPI_Send(a + a_offset, message_size, MPI_DOUBLE, send_neigh, tag, comm_row)) != MPI_SUCCESS)
	  mpi_error(retval);
      }
  }

  // Copy data from buffer to Matrix
  int i;
  for ( i=0; i<(gm*gn) / (p*q); i++ )
    a[i] = a_buffer[i];
    
  free(a_buffer);

  return 0;
}