Ejemplo n.º 1
0
int MPIX_Rsend_x(BIGMPI_CONST void *buf, MPI_Count count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
{
    int rc = MPI_SUCCESS;

    if (likely (count <= bigmpi_int_max )) {
        rc = MPI_Rsend(buf, (int)count, datatype, dest, tag, comm);
    } else {
        MPI_Datatype newtype;
        BigMPI_Type_contiguous(0,count, datatype, &newtype);
        MPI_Type_commit(&newtype);
        rc = MPI_Rsend(buf, 1, newtype, dest, tag, comm);
        MPI_Type_free(&newtype);
    }
    return rc;
}
Ejemplo n.º 2
0
void ParallelInfo::sendZi(const Pattern &toSend)
{
   int P_Tag = 0;

#if defined(TIMING_MODE)
   long long elapsed, start;
   elapsed = 0;
#endif

   // 10-2-2002, Replaced MPI_Send with MPI_Rsend
   //    this assumes that the comp nodes have already
   //    called MPI_Recv, which they should have.
   for (unsigned int i = 1; i < P_NumNodes; i++) {
#if defined(TIMING_MODE)
      start = rdtsc();
#endif
      int * tmpToSend = new int[totalNumNrns];
      for (unsigned int nrn = 0; nrn < totalNumNrns; nrn++)
         tmpToSend[nrn] = toSend.at(nrn);
      MPI_Rsend(&tmpToSend[0], totalNumNrns, MPI_INT, i, P_Tag, MPI_COMM_WORLD);
#if defined(TIMING_MODE)
      elapsed += rdtsc() - start;
#endif
   }

#if defined(TIMING_MODE)
   Output::Out() << "Elapsed root_snd time = " << elapsed * 1.0 / TICKS_PER_SEC
                 << " seconds" << endl;
#endif   
}
Ejemplo n.º 3
0
void MpiComm<Ordinal>::readySend(
    const ArrayView<const char> &sendBuffer,
    const int destRank
) const
{
    TEUCHOS_COMM_TIME_MONITOR(
        "Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::readySend(...)"
    );
#ifdef TEUCHOS_DEBUG
    TEST_FOR_EXCEPTION(
        ! ( 0 <= destRank && destRank < size_ ), std::logic_error
        ,"Error, destRank = " << destRank << " is not < 0 or is not"
        " in the range [0,"<<size_-1<<"]!"
    );
#endif // TEUCHOS_DEBUG
#ifdef TEUCHOS_MPI_COMM_DUMP
    if(show_dump) {
        dumpBuffer<Ordinal,char>(
            "Teuchos::MpiComm<Ordinal>::readySend(...)"
            ,"sendBuffer", bytes, sendBuffer
        );
    }
#endif // TEUCHOS_MPI_COMM_DUMP
    MPI_Rsend(
        const_cast<char*>(sendBuffer.getRawPtr()),sendBuffer.size(),MPI_CHAR,destRank,tag_,*rawMpiComm_
    );
    // ToDo: What about error handling???
}
Ejemplo n.º 4
0
int main(int argc, char **argv)
{
    int i, envia, recibe;
    int procs, miRank;
    int tag = 10;

    MPI_Status status;

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &procs);
    MPI_Comm_rank(MPI_COMM_WORLD, &miRank);

    if(miRank < procs-1)
    {
            MPI_Recv(&recibe, 1, MPI_INT, miRank, tag, MPI_COMM_WORLD, &status);
            printf("Recibido el valor %d en ell proceso %d\n", recibe, miRank);
    }
    else
    {
	printf("Ingrese un numero  :");
	scanf("%d", &envia);

        MPI_Rsend(&envia, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
    }

    MPI_Finalize();

    return 0;

}
Ejemplo n.º 5
0
 void send(MPI_Comm comm, double *send_buf,int send_size)
 {
   MPI_Status status;
   int tmp, error;
   error = MPI_Recv(&tmp,0,MPI_INT,0,0,comm,&status);
   error = error && MPI_Rsend(send_buf,send_size,MPI_DOUBLE,0,0,comm);
   if (error != MPI_SUCCESS) throw ATC_Error("error in int_send "+to_string(error));
 }
Ejemplo n.º 6
0
/**
 * vsg_packed_msg_rsend:
 * @pm: a #VsgPackedMsg.
 * @dst: the destination task id.
 * @tag: an integer message tag.
 *
 * Sends stored message to the specified destination with the specified tag.
 */
void vsg_packed_msg_rsend (VsgPackedMsg *pm, gint dst, gint tag)
{
  gint ierr;

  _trace_write_msg_send (pm, "rsend", dst, tag);

  ierr = MPI_Rsend (pm->buffer, pm->position, MPI_PACKED, dst, tag,
                    pm->communicator);

  if (ierr != MPI_SUCCESS) vsg_mpi_error_output (ierr);
}
Ejemplo n.º 7
0
void mpi_rsend_f(char *ibuf, MPI_Fint *count, MPI_Fint *datatype,
		 MPI_Fint *dest, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *ierr)
{
   MPI_Datatype c_type = MPI_Type_f2c(*datatype);
   MPI_Comm c_comm;

   c_comm = MPI_Comm_f2c (*comm);

   *ierr = OMPI_INT_2_FINT(MPI_Rsend(OMPI_F2C_BOTTOM(ibuf), OMPI_FINT_2_INT(*count),
                                     c_type, OMPI_FINT_2_INT(*dest),
                                     OMPI_FINT_2_INT(*tag), c_comm));
}
Ejemplo n.º 8
0
static
void get_off_process_entries(
  const struct distributed_crs_matrix * const matrix ,
  VECTOR_SCALAR * const vec )
{
  const int np   = matrix->p_size ;
  const int my_p = matrix->p_rank ;
  const int * const recv_pc = matrix->p_recv_pc ;
  const int * const send_pc = matrix->p_send_pc ;
  const int * const send_id = matrix->p_send_id ;
  int i , irecv ;

  for ( irecv = 0 , i = 1 ; i < np ; ++i ) {
    if ( recv_pc[i] < recv_pc[i+1] ) ++irecv ;
  }

  {
    VECTOR_SCALAR * const send_buf =
      (VECTOR_SCALAR *) malloc( sizeof(VECTOR_SCALAR) * send_pc[np] );

    MPI_Request * const recv_request =
      (MPI_Request *) malloc( sizeof(MPI_Request) * irecv );

    MPI_Status * const recv_status =
      (MPI_Status *) malloc( sizeof(MPI_Status) * irecv );

    for ( irecv = 0 , i = 1 ; i < np ; ++i ) {
      const int ip = ( i + my_p ) % np ;
      const int recv_beg    = recv_pc[i];
      const int recv_length = recv_pc[i+1] - recv_beg ;
      if ( recv_length ) {
        MPI_Irecv( vec + recv_beg ,
                   recv_length * sizeof(VECTOR_SCALAR), MPI_BYTE ,
                   ip , 0 , MPI_COMM_WORLD , recv_request + irecv );
        ++irecv ;
      }
    }

    /* Gather components into send buffer */

    for ( i = 0 ; i < send_pc[np] ; ++i ) {
      send_buf[i] = vec[ send_id[i] ];
    }

    MPI_Barrier( MPI_COMM_WORLD );

    for ( i = 1 ; i < np ; ++i ) {
      const int ip = ( i + my_p ) % np ;
      const int send_beg    = send_pc[i];
      const int send_length = send_pc[i+1] - send_beg ;
      if ( send_length ) { /* Send to 'i' */
        MPI_Rsend( send_buf + send_beg ,
                   send_length * sizeof(VECTOR_SCALAR), MPI_BYTE ,
                   ip , 0 , MPI_COMM_WORLD );
      }
    }

    MPI_Waitall( irecv , recv_request , recv_status );

    free( recv_status );
    free( recv_request );
    free( send_buf );
  }
}
Ejemplo n.º 9
0
int		buflen;
va_list         ap;

va_start(ap, unknown);
buf = unknown;
if (_numargs() == NUMPARAMS+1) {
        buflen = va_arg(ap, int) / 8;         /* The length is in bits. */
}
count =         va_arg(ap, int *);
datatype =      va_arg(ap, MPI_Datatype*);
dest =          va_arg(ap, int *);
tag =           va_arg(ap, int *);
comm =          va_arg(ap, MPI_Comm *);
__ierr =        va_arg(ap, int *);

*__ierr = MPI_Rsend(MPIR_F_PTR(buf),*count,*datatype,*dest,*tag,*comm);
}

#else

 void mpi_rsend_( buf, count, datatype, dest, tag, comm, __ierr )
void             *buf;
int*count,*dest,*tag;
MPI_Datatype     *datatype;
MPI_Comm         *comm;
int *__ierr;
{
    _fcd temp;
    if (_isfcd(buf)) {
	temp = _fcdtocp(buf);
	buf = (void *) temp;
Ejemplo n.º 10
0
//==============================================================================
//---------------------------------------------------------------------------
//Do_Posts Method
//---------------------------------------------------------------------------
int Epetra_MpiDistributor::DoPosts( char * export_objs,
				    int obj_size,
                                    int & len_import_objs,
				    char *& import_objs )
{
  int i, j, k;

  int my_proc = 0;
  int self_recv_address = 0;

  MPI_Comm_rank( comm_, &my_proc );

  if( len_import_objs < (total_recv_length_*obj_size) )
  {
    if( import_objs!=0 ) {delete [] import_objs; import_objs = 0;}
    len_import_objs = total_recv_length_*obj_size;
    if (len_import_objs>0) import_objs = new char[len_import_objs];
    for( i=0; i<len_import_objs; ++i ) import_objs[i]=0;
  }

  k = 0;

  j = 0;
  for( i = 0; i < (nrecvs_+self_msg_); i++ )
  {
    if( procs_from_[i] != my_proc )
    {
      MPI_Irecv( &(import_objs[j]),
                 lengths_from_[i] * obj_size,
                 MPI_CHAR, procs_from_[i],
                 tag_, comm_,
                 &(request_[k]) );
      k++;
    }
    else
      self_recv_address = j;

    j += lengths_from_[i] * obj_size;
  }

  MPI_Barrier( comm_ );

  //setup scan through procs_to list starting w/ higher numbered procs 
  //Should help balance msg traffic
  int nblocks = nsends_ + self_msg_; 
  int proc_index = 0; 
  while( proc_index < nblocks && procs_to_[proc_index] < my_proc )
    ++proc_index;                    
  if( proc_index == nblocks ) proc_index = 0;
   
  int self_num = 0, self_index = 0;
  int p;

  if( !indices_to_ ) //data already blocked by processor
  {
    for( i = 0; i < nblocks; ++i )
    {
      p = i + proc_index;
      if( p > (nblocks-1) ) p -= nblocks;

      if( procs_to_[p] != my_proc )
        MPI_Rsend( &export_objs[starts_to_[p]*obj_size],
                   lengths_to_[p]*obj_size,
                   MPI_CHAR,
                   procs_to_[p],
                   tag_,
                   comm_ );
      else
       self_num = p;
    }

    if( self_msg_ )
      memcpy( &import_objs[self_recv_address],
              &export_objs[starts_to_[self_num]*obj_size],
              lengths_to_[self_num]*obj_size );
  }
  else //data not blocked by proc, use send buffer
  {
    if( send_array_size_ < (max_send_length_*obj_size) )
    {
      if( send_array_!=0 ) {delete [] send_array_; send_array_ = 0;}
      send_array_size_ = max_send_length_*obj_size;
      if (send_array_size_>0) send_array_ = new char[send_array_size_];
    }

    j = 0;
    for( i = 0; i < nblocks; i++ )
    {
      p = i + proc_index;
      if( p > (nblocks-1) ) p -= nblocks;
      if( procs_to_[p] != my_proc )
      {
        int offset = 0;
        j = starts_to_[p];
        for( k = 0; k < lengths_to_[p]; k++ )
        {
          memcpy( &(send_array_[offset]), 
                  &(export_objs[indices_to_[j]*obj_size]),
                  obj_size );
          ++j;
          offset += obj_size;
        }
        MPI_Rsend( send_array_,
                   lengths_to_[p] * obj_size,
                   MPI_CHAR,
                   procs_to_[p],
                   tag_, comm_ );
      }
      else
      {
        self_num = p;
        self_index = starts_to_[p];
      }
    }

    if( self_msg_ )
      for( k = 0; k < lengths_to_[self_num]; k++ )
      {
        memcpy( &(import_objs[self_recv_address]),
                &(export_objs[indices_to_[self_index]*obj_size]),
                obj_size );
        self_index++;
        self_recv_address += obj_size;
      }
  }

  return(0);
}
Ejemplo n.º 11
0
void mpi_rsend_(void* buf, int* count, int* datatype, int* dst,
                int* tag, int* comm, int* ierr) {
   *ierr = MPI_Rsend(buf, *count, get_datatype(*datatype), *dst, *tag,
                    get_comm(*comm));
}
Ejemplo n.º 12
0
/* communicate integers and doubles using point to point communication */
int COM (MPI_Comm comm, int tag,
         COMDATA *send, int nsend,
	 COMDATA **recv, int *nrecv) /* recv is contiguous => free (*recv) releases all memory */
{
  COMDATA *cd;
  int rank,
      ncpu,
      send_size,
    (*send_sizes) [3],
     *send_position,
     *send_rank,
      send_count,
     *send_rank_all,
     *send_count_all,
     *send_rank_disp,
     *recv_rank,
    (*recv_sizes) [3],
      recv_count,
      i, j, k, l;
  char **send_data,
       **recv_data;
  MPI_Request *req;
  MPI_Status *sta;
  void *p;

  MPI_Comm_rank (comm, &rank);
  MPI_Comm_size (comm, &ncpu);

  ERRMEM (send_sizes = MEM_CALLOC (ncpu * sizeof (int [3])));
  ERRMEM (send_position = MEM_CALLOC (ncpu * sizeof (int)));
  ERRMEM (send_rank = malloc (ncpu * sizeof (int)));
  ERRMEM (send_data = malloc (ncpu * sizeof (char*)));

  /* compute send sizes */
  for (i = 0, cd = send; i < nsend; i ++, cd ++)
  {
    send_sizes [cd->rank][0] += cd->ints;
    send_sizes [cd->rank][1] += cd->doubles;
    MPI_Pack_size (cd->ints, MPI_INT, comm, &j);
    MPI_Pack_size (cd->doubles, MPI_DOUBLE, comm, &k);
    send_sizes [cd->rank][2] += (j + k);
  }

  /* allocate send buffers */
  for (send_size = i = 0; i < ncpu; i ++)
  {
    if (send_sizes [i][2])
    {
      ERRMEM (send_data [i] = malloc (send_sizes [i][2]));
      send_position [i] = 0;
      send_size += send_sizes [i][2];
    }
  }

  /* pack ints */
  for (i = 0, cd = send; i < nsend; i ++, cd ++)
  {
    if (cd->ints)
    {
      MPI_Pack (cd->i, cd->ints, MPI_INT, send_data [cd->rank], send_sizes [cd->rank][2], &send_position [cd->rank], comm);
    }
  }

  /* pack doubles */
  for (i = 0, cd = send; i < nsend; i ++, cd ++)
  {
    if (cd->doubles)
    {
      MPI_Pack (cd->d, cd->doubles, MPI_DOUBLE, send_data [cd->rank], send_sizes [cd->rank][2], &send_position [cd->rank], comm);
    }
  }

#if DEBUG
  for (i = 0; i < ncpu; i ++)
  {
    ASSERT_DEBUG (send_position [i] <= send_sizes [i][2], "Incorrect packing");
  }
#endif

  /* compute send ranks and move data */
  for (send_count = i = 0; i < ncpu; i ++)
  {
    if (send_sizes [i][2])
    {
      send_rank [send_count] = i;
      send_data [send_count] = send_data [i];
      send_sizes [send_count][0] = send_sizes [i][0];
      send_sizes [send_count][1] = send_sizes [i][1];
      send_sizes [send_count][2] = send_sizes [i][2];
      send_count ++;
    }
  }

  ERRMEM (send_count_all = malloc (ncpu * sizeof (int)));
  ERRMEM (recv_rank = malloc (ncpu * sizeof (int)));

  /* gather all send ranks */
  MPI_Allgather (&send_count, 1, MPI_INT, send_count_all, 1, MPI_INT, comm);
  ERRMEM (send_rank_disp = malloc (ncpu * sizeof (int)));
  for (send_rank_disp [0] = l = i = 0; i < ncpu; i ++)
  { l += send_count_all [i]; if (i < ncpu-1) send_rank_disp [i+1] = l; }
  ERRMEM (send_rank_all = malloc (l * sizeof (int)));
  MPI_Allgatherv (send_rank, send_count, MPI_INT, send_rank_all, send_count_all, send_rank_disp, MPI_INT, comm);

  /* compute receive ranks */
  for (recv_count = k = i = 0; i < l; i += send_count_all [k], k ++)
  {
    for (j = 0; j < send_count_all [k]; j ++)
    {
      if (send_rank_all [i+j] == rank) /* 'k'th rank is sending here */
      {
	recv_rank [recv_count] = k;
	recv_count ++;
	break;
      }
    }
  }

  ERRMEM (recv_sizes = malloc (recv_count * sizeof (int [3])));
  ERRMEM (req = malloc (recv_count * sizeof (MPI_Request)));
  ERRMEM (sta = malloc (recv_count * sizeof (MPI_Status)));

  /* communicate receive sizes */
  for (i = 0; i < recv_count; i ++)
  {
    MPI_Irecv (recv_sizes [i], 3, MPI_INT, recv_rank [i], tag, comm, &req [i]);
  }
  MPI_Barrier (comm);
  for (i = 0; i < send_count; i ++)
  {
    MPI_Rsend (send_sizes [i], 3, MPI_INT, send_rank [i], tag, comm);
  }
  MPI_Waitall (recv_count, req, sta);

  /* contiguous receive size */
  j = recv_count * sizeof (COMDATA);
  for (i = 0; i < recv_count; i ++)
  {
    j += recv_sizes [i][0] * sizeof (int) + 
         recv_sizes [i][1] * sizeof (double);
  }

  /* prepare receive buffers */
  ERRMEM (recv_data = malloc (recv_count * sizeof (char*)));
  ERRMEM ((*recv) = malloc (j));
  p = (*recv) + recv_count;
  *nrecv = recv_count;
  for (i = 0, cd = *recv; i < recv_count; i ++, cd ++)
  {
    cd->rank = recv_rank [i];
    cd->ints = recv_sizes [i][0];
    cd->doubles = recv_sizes [i][1];
    cd->i = p; p = (cd->i + cd->ints);
    cd->d = p; p = (cd->d + cd->doubles);
    ERRMEM (recv_data [i] = malloc (recv_sizes [i][2]));
  }

  /* communicate data */
  for (i = 0; i < recv_count; i ++)
  {
    MPI_Irecv (recv_data [i], recv_sizes [i][2], MPI_PACKED, recv_rank [i], tag, comm, &req [i]);
  }
  MPI_Barrier (comm);
  for (i = 0; i < send_count; i ++)
  {
    MPI_Rsend (send_data [i], send_sizes [i][2], MPI_PACKED, send_rank [i], tag, comm);
  }
  MPI_Waitall (recv_count, req, sta);

  /* unpack data */
  for (i = j = 0; i < recv_count; i ++, j = 0)
  {
    MPI_Unpack (recv_data [i], recv_sizes [i][2], &j, (*recv) [i].i, (*recv) [i].ints, MPI_INT, comm);
    MPI_Unpack (recv_data [i], recv_sizes [i][2], &j, (*recv) [i].d, (*recv) [i].doubles, MPI_DOUBLE, comm);
  }

  /* cleanup */
  free (send_rank_disp);
  free (send_sizes);
  free (send_position);
  free (send_rank);
  for (i = 0; i < send_count; i ++)
    free (send_data [i]);
  free (send_data);
  free (send_count_all);
  free (send_rank_all);
  free (recv_rank);
  free (recv_sizes);
  for (i = 0; i < recv_count; i ++)
    free (recv_data [i]);
  free (recv_data);
  free (req);
  free (sta);

  return send_size;
}
Ejemplo n.º 13
0
/* communicate integers and doubles accodring
 * to the pattern computed by COM_Pattern */
int COM_Repeat (void *pattern)
{
  COMPATTERN *cp = pattern;
  int *rankmap = cp->rankmap,
      *send_position = cp->send_position,
     (*send_sizes) [3] = cp->send_sizes,
      *send_rank = cp->send_rank,
       send_count = cp->send_count,
      *recv_rank = cp->recv_rank,
     (*recv_sizes) [3] = cp->recv_sizes,
       recv_count = cp->recv_count,
       tag = cp->tag,
       nsend = cp->nsend,
       i, j;
  char **send_data = cp->send_data,
       **recv_data = cp->recv_data;
  MPI_Request *recv_req = cp->recv_req;
  MPI_Status *recv_sta = cp->recv_sta;
  MPI_Comm comm = cp->comm;
  COMDATA *cd;

  for (i = 0; i < send_count; i ++)
    send_position [i] = 0;

  /* pack ints */
  for (i = 0, cd = cp->send; i < nsend; i ++, cd ++)
  {
    if (cd->ints)
    {
      j = rankmap [cd->rank];
      MPI_Pack (cd->i, cd->ints, MPI_INT, send_data [j], send_sizes [j][2], &send_position [j], comm);
    }
  }

  /* pack doubles */
  for (i = 0, cd = cp->send; i < nsend; i ++, cd ++)
  {
    if (cd->doubles)
    {
      j = rankmap [cd->rank];
      MPI_Pack (cd->d, cd->doubles, MPI_DOUBLE, send_data [j], send_sizes [j][2], &send_position [j], comm);
    }
  }

  /* communicate data */
  for (i = 0; i < recv_count; i ++)
  {
    MPI_Irecv (recv_data [i], recv_sizes [i][2], MPI_PACKED, recv_rank [i], tag, comm, &recv_req [i]);
  }
  MPI_Barrier (comm);
  for (i = 0; i < send_count; i ++)
  {
    MPI_Rsend (send_data [i], send_sizes [i][2], MPI_PACKED, send_rank [i], tag, comm);
  }
  MPI_Waitall (recv_count, recv_req, recv_sta);

  /* unpack data */
  for (i = j = 0, cd = cp->recv; i < recv_count; i ++, cd ++, j = 0)
  {
    MPI_Unpack (recv_data [i], recv_sizes [i][2], &j, cd->i, cd->ints, MPI_INT, comm);
    MPI_Unpack (recv_data [i], recv_sizes [i][2], &j, cd->d, cd->doubles, MPI_DOUBLE, comm);
  }

  return cp->send_size;
}
Ejemplo n.º 14
0
/* create a repetitive point to point communication pattern;
 * ranks and sizes must not change during the communication;
 * pointers to send and receive buffers data must not change */
void* COM_Pattern (MPI_Comm comm, int tag,
                   COMDATA *send, int nsend,
	           COMDATA **recv, int *nrecv) /* recv is contiguous => free (*recv) releases all memory */
{
  COMPATTERN *pattern;
  COMDATA *cd;
  int rank,
      ncpu,
     *send_rank_all,
     *send_count_all,
     *send_rank_disp,
      i, j, k, l;
  void *p;

  MPI_Comm_rank (comm, &rank);
  MPI_Comm_size (comm, &ncpu);

  ERRMEM (pattern = malloc (sizeof (COMPATTERN)));
  ERRMEM (pattern->rankmap = MEM_CALLOC (ncpu * sizeof (int)));
  ERRMEM (pattern->send_sizes = MEM_CALLOC (ncpu * sizeof (int [3])));
  ERRMEM (pattern->send_position = MEM_CALLOC (ncpu * sizeof (int)));
  ERRMEM (pattern->send_rank = malloc (ncpu * sizeof (int)));
  ERRMEM (pattern->send_data = malloc (ncpu * sizeof (char*)));
  pattern->nsend = nsend;
  pattern->send = send;
  pattern->comm = comm;
  pattern->tag = tag;

  /* compute send sizes */
  for (i = 0, cd = send; i < nsend; i ++, cd ++)
  {
    pattern->send_sizes [cd->rank][0] += cd->ints;
    pattern->send_sizes [cd->rank][1] += cd->doubles;
    MPI_Pack_size (cd->ints, MPI_INT, comm, &j);
    MPI_Pack_size (cd->doubles, MPI_DOUBLE, comm, &k);
    pattern->send_sizes [cd->rank][2] += (j + k);
  }

  /* allocate send buffers and prepare rank map */
  for (pattern->send_size = i = j = 0; i < ncpu; i ++)
  {
    if (pattern->send_sizes [i][2])
    {
      ERRMEM (pattern->send_data [i] = malloc (pattern->send_sizes [i][2]));
      pattern->rankmap [i] = j;
      pattern->send_size += pattern->send_sizes [i][2];
      j ++;
    }
  }

  /* compute send ranks and move data */
  for (pattern->send_count = i = 0; i < ncpu; i ++)
  {
    if (pattern->send_sizes [i][2])
    {
      pattern->send_rank [pattern->send_count] = i;
      pattern->send_data [pattern->send_count] = pattern->send_data [i];
      pattern->send_sizes [pattern->send_count][0] = pattern->send_sizes [i][0];
      pattern->send_sizes [pattern->send_count][1] = pattern->send_sizes [i][1];
      pattern->send_sizes [pattern->send_count][2] = pattern->send_sizes [i][2];
      pattern->send_count ++;
    }
  }

  ERRMEM (send_count_all = malloc (ncpu * sizeof (int)));
  ERRMEM (pattern->recv_rank = malloc (ncpu * sizeof (int)));

  /* gather all send ranks */
  MPI_Allgather (&pattern->send_count, 1, MPI_INT, send_count_all, 1, MPI_INT, comm);
  ERRMEM (send_rank_disp = malloc (ncpu * sizeof (int)));
  for (send_rank_disp [0] = l = i = 0; i < ncpu; i ++)
  { l += send_count_all [i]; if (i < ncpu-1) send_rank_disp [i+1] = l; }
  ERRMEM (send_rank_all = malloc (l * sizeof (int)));
  MPI_Allgatherv (pattern->send_rank, pattern->send_count, MPI_INT, send_rank_all, send_count_all, send_rank_disp, MPI_INT, comm);

  /* compute receive ranks */
  for (pattern->recv_count = k = i = 0; i < l; i += send_count_all [k], k ++)
  {
    for (j = 0; j < send_count_all [k]; j ++)
    {
      if (send_rank_all [i+j] == rank) /* 'k'th rank is sending here */
      {
	pattern->recv_rank [pattern->recv_count] = k;
	pattern->recv_count ++;
	break;
      }
    }
  }

  ERRMEM (pattern->recv_sizes = malloc (pattern->recv_count * sizeof (int [3])));
  ERRMEM (pattern->recv_req = malloc (pattern->recv_count * sizeof (MPI_Request)));
  ERRMEM (pattern->recv_sta = malloc (pattern->recv_count * sizeof (MPI_Status)));
  ERRMEM (pattern->send_req = malloc (pattern->send_count * sizeof (MPI_Request)));
  ERRMEM (pattern->send_sta = malloc (pattern->send_count * sizeof (MPI_Status)));

  /* communicate receive sizes */
  for (i = 0; i < pattern->recv_count; i ++)
  {
    MPI_Irecv (pattern->recv_sizes [i], 3, MPI_INT, pattern->recv_rank [i], tag, comm, &pattern->recv_req [i]);
  }
  MPI_Barrier (comm);
  for (i = 0; i < pattern->send_count; i ++)
  {
    MPI_Rsend (pattern->send_sizes [i], 3, MPI_INT, pattern->send_rank [i], tag, comm);
  }
  MPI_Waitall (pattern->recv_count, pattern->recv_req, pattern->recv_sta);

  /* contiguous receive size */
  j = pattern->recv_count * sizeof (COMDATA);
  for (i = 0; i < pattern->recv_count; i ++)
  {
    j += pattern->recv_sizes [i][0] * sizeof (int) + 
         pattern->recv_sizes [i][1] * sizeof (double);
  }

  /* prepare receive buffers */
  ERRMEM (pattern->recv_data = malloc (pattern->recv_count * sizeof (char*)));
  ERRMEM (pattern->recv = malloc (j));
  p = pattern->recv + pattern->recv_count;
  pattern->nrecv = pattern->recv_count;
  for (i = 0, cd = pattern->recv; i < pattern->recv_count; i ++, cd ++)
  {
    cd->rank = pattern->recv_rank [i];
    cd->ints = pattern->recv_sizes [i][0];
    cd->doubles = pattern->recv_sizes [i][1];
    cd->i = p; p = (cd->i + cd->ints);
    cd->d = p; p = (cd->d + cd->doubles);
    ERRMEM (pattern->recv_data [i] = malloc (pattern->recv_sizes [i][2]));
  }

  /* truncate */
  if (pattern->send_count)
  {
    ERRMEM (pattern->send_sizes = realloc (pattern->send_sizes, pattern->send_count * sizeof (int [3])));
    ERRMEM (pattern->send_position = realloc (pattern->send_position, pattern->send_count * sizeof (int)));
    ERRMEM (pattern->send_rank = realloc (pattern->send_rank, pattern->send_count * sizeof (int)));
    ERRMEM (pattern->send_data = realloc (pattern->send_data, pattern->send_count * sizeof (char*)));
  }
  if (pattern->recv_count) ERRMEM (pattern->recv_rank = realloc (pattern->recv_rank, pattern->recv_count * sizeof (int)));
 
  /* cleanup */
  free (send_rank_disp);
  free (send_count_all);
  free (send_rank_all);

  /* output */
  *nrecv = pattern->nrecv;
  *recv = pattern->recv;

  return pattern;
}
Ejemplo n.º 15
0
void BI_Rsend(BLACSCONTEXT *ctxt, int dest, int msgid, BLACBUFF *bp)
{
   int info;

   info=MPI_Rsend(bp->Buff, bp->N, bp->dtype, dest, msgid, ctxt->scp->comm);
}
Ejemplo n.º 16
0
FORT_DLL_SPEC void FORT_CALL mpi_rsend_ ( void*v1, MPI_Fint *v2, MPI_Fint *v3, MPI_Fint *v4, MPI_Fint *v5, MPI_Fint *v6, MPI_Fint *ierr ){
    *ierr = MPI_Rsend( v1, *v2, (MPI_Datatype)(*v3), *v4, *v5, (MPI_Comm)(*v6) );
}
Ejemplo n.º 17
0
int       Zoltan_Comm_Do_Post(
ZOLTAN_COMM_OBJ * plan,		/* communication data structure */
int tag,			/* message tag for communicating */
char *send_data,		/* array of data I currently own */
int nbytes,			/* multiplier for sizes */
char *recv_data)		/* array of data I'll own after comm */
{
    char     *send_buff;	/* space to buffer outgoing data */
    int       my_proc;		/* processor ID */
    unsigned int self_recv_address = 0;/* where in recv_data self info starts */
    int       self_num=0;       /* where in send list my_proc appears */
    int       offset;		/* offset into array I'm copying into */
    int       self_index = 0;	/* send offset for data I'm keeping */
    int       out_of_mem;	/* am I out of memory? */
    int       nblocks;		/* number of procs who need my data */
    int       proc_index;	/* loop counter over procs to send to */
    int       i, j, k, jj;	/* loop counters */

    static char *yo = "Zoltan_Comm_Do_Post";


    /* Check input parameters */
    if (!plan) {
        MPI_Comm_rank(MPI_COMM_WORLD, &my_proc);
	ZOLTAN_COMM_ERROR("Communication plan = NULL", yo, my_proc);
	return ZOLTAN_FATAL;
    }

    /* If not point to point, currently we do synchroneous communications */
    if (plan->maxed_recvs){
      int status;
      status = Zoltan_Comm_Do_AlltoAll(plan, send_data, nbytes, recv_data);
      return (status);
    }

    MPI_Comm_rank(plan->comm, &my_proc);

    if ((plan->nsends + plan->self_msg) && !send_data) {
        int sum = 0;
        if (plan->sizes_to)   /* Not an error if all sizes_to == 0 */
            for (i = 0; i < (plan->nsends + plan->self_msg); i++) 
                sum += plan->sizes_to[i];
        if (!plan->sizes_to || (plan->sizes_to && sum)) {
            ZOLTAN_COMM_ERROR("nsends not zero, but send_data = NULL", 
                              yo, my_proc);
            return ZOLTAN_FATAL;
        }
    }
    if ((plan->nrecvs + plan->self_msg) && !recv_data) {
        int sum = 0;
        if (plan->sizes_from)   /* Not an error if all sizes_from == 0 */
            for (i = 0; i < (plan->nrecvs + plan->self_msg); i++) 
                sum += plan->sizes_from[i];
        if (!plan->sizes_from || (plan->sizes_from && sum)) {
            ZOLTAN_COMM_ERROR("nrecvs not zero, but recv_data = NULL", 
                              yo, my_proc);
            return ZOLTAN_FATAL;
        }
    }
    if (nbytes < 0) {
	ZOLTAN_COMM_ERROR("Scale factor nbytes is negative", yo, my_proc);
	return ZOLTAN_FATAL;
    }


    /* Post irecvs */

    out_of_mem = 0;

    if (plan->indices_from == NULL) {
	/* Data can go directly into user space. */
	plan->recv_buff = recv_data;
    }
    else {			/* Need to buffer receive to reorder */
	plan->recv_buff = (char *) ZOLTAN_MALLOC(plan->total_recv_size * nbytes);
	if (plan->recv_buff == NULL && plan->total_recv_size * nbytes != 0)
	    out_of_mem = 1;
    }

    if (!out_of_mem) {
	if (plan->sizes == NULL) {	/* All data the same size */
	    k = 0;
	    for (i = 0; i < plan->nrecvs + plan->self_msg; i++) {
		if (plan->procs_from[i] != my_proc) {
		    MPI_Irecv((void *) & plan->recv_buff[plan->starts_from[i] * nbytes],
			      plan->lengths_from[i] * nbytes,
			      (MPI_Datatype) MPI_BYTE, plan->procs_from[i], tag,
			      plan->comm, &plan->request[k]);
		    k++;
		}
		else {
		    self_recv_address = plan->starts_from[i] * nbytes;
		}
	    }
	}

	else {			/* Data of varying sizes */
	    k = 0;
	    for (i = 0; i < plan->nrecvs + plan->self_msg; i++) {
		if (plan->procs_from[i] != my_proc) {
                    if (plan->sizes_from[i]) 
		        MPI_Irecv((void *)
                &plan->recv_buff[plan->starts_from_ptr[i] * nbytes],
			          plan->sizes_from[i] * nbytes,
			          (MPI_Datatype) MPI_BYTE, plan->procs_from[i], 
			          tag, plan->comm, &plan->request[k]);
                    else
                        plan->request[k] = MPI_REQUEST_NULL;
	            k++;
		}
		else {
		    self_recv_address = plan->starts_from_ptr[i] * nbytes;
		}
	    }
	}
    }


    /* Do remaining allocation to check for any mem problems. */
    if (plan->indices_to != NULL) {	/* can't sent straight from input */
	send_buff = (char *) ZOLTAN_MALLOC(plan->max_send_size * nbytes);
	if (send_buff == 0 && plan->max_send_size * nbytes != 0)
	    out_of_mem = 1;
    }
    else
	send_buff = NULL;

    /* Barrier to ensure irecvs are posted before doing any sends. */
    /* Simultaneously see if anyone out of memory */

    MPI_Allreduce(&out_of_mem, &j, 1, MPI_INT, MPI_SUM, plan->comm);

    if (j > 0) {		/* Some proc is out of memory -> Punt */
	ZOLTAN_FREE(&send_buff);
	if (plan->indices_from != NULL)
	    ZOLTAN_FREE(&plan->recv_buff);
	return (ZOLTAN_MEMERR);
    }

    /* Send out data */

    /* Scan through procs_to list to start w/ higher numbered procs */
    /* This should balance message traffic. */

    nblocks = plan->nsends + plan->self_msg;
    proc_index = 0;
    while (proc_index < nblocks && plan->procs_to[proc_index] < my_proc)
	proc_index++;
    if (proc_index == nblocks)
	proc_index = 0;

    if (plan->sizes == NULL) {	/* Data all of same size */
	if (plan->indices_to == NULL) {	/* data already blocked by processor. */
	    for (i = proc_index, j = 0; j < nblocks; j++) {
		if (plan->procs_to[i] != my_proc) {
		    MPI_Rsend((void *) &send_data[plan->starts_to[i] * nbytes],
			      plan->lengths_to[i] * nbytes,
			      (MPI_Datatype) MPI_BYTE, plan->procs_to[i], tag,
			      plan->comm);
		}
		else
		    self_num = i;
		if (++i == nblocks)
		    i = 0;
	    }

	    if (plan->self_msg) {	/* Copy data to self. */
		/* I use array+offset instead of &(array[offset]) because of
		   a bug with PGI v9 */
		/* I use memmove because I'm not sure that the pointer are not
		   overlapped. */
		memmove(plan->recv_buff+self_recv_address, send_data+plan->starts_to[self_num] * nbytes, plan->lengths_to[self_num]*nbytes);
	    }
	}

	else {			/* Not blocked by processor.  Need to buffer. */
	    for (i = proc_index, jj = 0; jj < nblocks; jj++) {
		if (plan->procs_to[i] != my_proc) {
		    /* Need to pack message first. */
		    offset = 0;
		    j = plan->starts_to[i];
		    for (k = 0; k < plan->lengths_to[i]; k++) {
			memcpy(&send_buff[offset],
			       &send_data[plan->indices_to[j++] * nbytes], nbytes);
			offset += nbytes;
		    }
		    MPI_Rsend((void *) send_buff, plan->lengths_to[i] * nbytes,
		      (MPI_Datatype) MPI_BYTE, plan->procs_to[i], tag, plan->comm);
		}
		else {
		    self_num = i;
		    self_index = plan->starts_to[i];
		}
		if (++i == nblocks)
		    i = 0;
	    }
	    if (plan->self_msg) {	/* Copy data to self. */
		for (k = 0; k < plan->lengths_to[self_num]; k++) {
		    memcpy(&plan->recv_buff[self_recv_address],
		      &send_data[plan->indices_to[self_index++] * nbytes], nbytes);
		    self_recv_address += nbytes;
		}
	    }

	    ZOLTAN_FREE(&send_buff);
	}
    }
    else {			/* Data of differing sizes */
	if (plan->indices_to == NULL) {	/* data already blocked by processor. */
	    for (i = proc_index, j = 0; j < nblocks; j++) {

		if (plan->procs_to[i] != my_proc) {
                    if (plan->sizes_to[i]) {
		        MPI_Rsend((void *)
                                  &send_data[plan->starts_to_ptr[i] * nbytes],
			          plan->sizes_to[i] * nbytes,
			          (MPI_Datatype) MPI_BYTE, plan->procs_to[i],
			          tag, plan->comm);
                    }
		}
		else
		    self_num = i;
		if (++i == nblocks)
		    i = 0;
	    }

	    if (plan->self_msg) {	/* Copy data to self. */
                if (plan->sizes_to[self_num]) {
                    char* lrecv = &plan->recv_buff[self_recv_address];
                    char* lsend = &send_data[plan->starts_to_ptr[self_num] 
                                             * nbytes];
                    int sindex = plan->sizes_to[self_num], idx;
                    for (idx=0; idx<nbytes; idx++) {
                        memcpy(lrecv, lsend, sindex);
                        lrecv += sindex;
                        lsend += sindex;
                    }
                }
	    }
	}

	else {			/* Not blocked by processor.  Need to buffer. */
	    for (i = proc_index, jj = 0; jj < nblocks; jj++) {
		if (plan->procs_to[i] != my_proc) {
		    /* Need to pack message first. */
		    offset = 0;
		    j = plan->starts_to[i];
		    for (k = 0; k < plan->lengths_to[i]; k++) {
                        if (plan->sizes[plan->indices_to[j]]) {
			    memcpy(&send_buff[offset],
			       &send_data[plan->indices_to_ptr[j] * nbytes],
			       plan->sizes[plan->indices_to[j]] * nbytes);
			    offset += plan->sizes[plan->indices_to[j]] * nbytes;
                        }
			j++;
		    }
                    if (plan->sizes_to[i]) {
		        MPI_Rsend((void *) send_buff, 
                                  plan->sizes_to[i] * nbytes,
		                  (MPI_Datatype) MPI_BYTE, plan->procs_to[i],
                                  tag, plan->comm);
                    }
		}
		else
		    self_num = i;
		if (++i == nblocks)
		    i = 0;
	    }
	    if (plan->self_msg) {	/* Copy data to self. */
                if (plan->sizes_to[self_num]) {
		    j = plan->starts_to[self_num];
		    for (k = 0; k < plan->lengths_to[self_num]; k++) {
		        int kk = plan->indices_to_ptr[j];
                        char* lrecv = &plan->recv_buff[self_recv_address];
                        unsigned int send_idx = kk * nbytes;
                        char* lsend = &send_data[send_idx];
                        int sindex = plan->sizes[plan->indices_to[j]], idx;
                        for (idx=0; idx<nbytes; idx++) {
                            memcpy(lrecv, lsend, sindex);
                            lrecv += sindex;
                            lsend += sindex;
                        }
		        self_recv_address += plan->sizes[plan->indices_to[j]] 
                                           * nbytes;
		        j++;
		    }
		}
	    }

	    ZOLTAN_FREE(&send_buff);
	}
    }
    return (ZOLTAN_OK);
}
Ejemplo n.º 18
0
Archivo: MPI-api.c Proyecto: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
Ejemplo n.º 19
0
int main( int argc, char *argv[] )
{
    int msglen, i;
    int msglen_min = MIN_MESSAGE_LENGTH;
    int msglen_max = MAX_MESSAGE_LENGTH;
    int rank,poolsize,Master;
    char *sendbuf,*recvbuf;
    char ival;
    MPI_Request request;
    MPI_Status status;
	
    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&poolsize);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    if(poolsize != 2) {
	printf("Expected exactly 2 MPI processes\n");
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

/* 
   The following test allows this test to run on small-memory systems
   that support the sysconf call interface.  This test keeps the test from
   becoming swap-bound.  For example, on an old Linux system or a
   Sony Playstation 2 (really!) 
 */
#if defined(HAVE_SYSCONF) && defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)
    { 
	long n_pages, pagesize;
	int  actmsglen_max;
	n_pages  = sysconf( _SC_PHYS_PAGES );
	pagesize = sysconf( _SC_PAGESIZE );
	/* We want to avoid integer overflow in the size calculation.
	   The best way is to avoid computing any products (such
	   as total memory = n_pages * pagesize) and instead
	   compute a msglen_max that fits within 1/4 of the available 
	   pages */
	if (n_pages > 0 && pagesize > 0) {
	    /* Recompute msglen_max */
	    int msgpages = 4 * ((msglen_max + pagesize - 1)/ pagesize);
	    while (n_pages < msgpages) { msglen_max /= 2; msgpages /= 2; }
	}
	/* printf ( "before = %d\n", msglen_max ); */
	MPI_Allreduce( &msglen_max, &actmsglen_max, 1, MPI_INT, 
		       MPI_MIN, MPI_COMM_WORLD );
	msglen_max = actmsglen_max;
	/* printf ( "after = %d\n", msglen_max ); */
    }
#endif

    Master = (rank == 0);	

    if(Master && verbose)
	printf("Size (bytes)\n------------\n");
    for(msglen = msglen_min; msglen <= msglen_max; msglen *= 2) {

	sendbuf = malloc(msglen);
	recvbuf = malloc(msglen);
	if(sendbuf == NULL || recvbuf == NULL) {
	    printf("Can't allocate %d bytes\n",msglen);
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}

	ival = 0;
	for (i=0; i<msglen; i++) {
	    sendbuf[i] = ival++;
	    recvbuf[i] = 0;
	}


	if(Master && verbose) 
	    printf("%d\n",msglen);
	fflush(stdout);

	MPI_Barrier(MPI_COMM_WORLD);
		
	/* Send/Recv */
	if(Master) 
	    MPI_Send(sendbuf,msglen,MPI_CHAR,1,TAG1,MPI_COMM_WORLD);
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Recv(recvbuf,msglen,MPI_CHAR,0,TAG1,MPI_COMM_WORLD,&status);
	    Checkbuf( recvbuf, msglen, &status );
	}

	MPI_Barrier(MPI_COMM_WORLD);

	/* Ssend/Recv */
	if(Master) 
	    MPI_Ssend(sendbuf,msglen,MPI_CHAR,1,TAG2,MPI_COMM_WORLD);
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Recv(recvbuf,msglen,MPI_CHAR,0,TAG2,MPI_COMM_WORLD,&status);
	    Checkbuf( recvbuf, msglen, &status );
	}

	MPI_Barrier(MPI_COMM_WORLD);
		
	/* Rsend/Recv */
	if (Master) {
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Rsend( sendbuf,msglen,MPI_CHAR,1,TAG3,MPI_COMM_WORLD );
	}
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Irecv( recvbuf,msglen,MPI_CHAR,0,TAG3,MPI_COMM_WORLD,&request);
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Wait( &request, &status );
	    Checkbuf( recvbuf, msglen, &status );
	}
	    
	MPI_Barrier(MPI_COMM_WORLD);

	/* Isend/Recv - receive not ready */
	if(Master) {
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 1, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Isend(sendbuf,msglen,MPI_CHAR,1,TAG4,MPI_COMM_WORLD, &request);
	    MPI_Wait( &request, &status );
	}
	else {
	    Resetbuf( recvbuf, msglen );
	    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_BOTTOM, 0, MPI_INT, 0, TAGSR,
			  MPI_COMM_WORLD, &status );
	    MPI_Recv(recvbuf,msglen,MPI_CHAR,0,TAG4,MPI_COMM_WORLD,&status);
	    Checkbuf( recvbuf, msglen, &status );
	}

	MPI_Barrier(MPI_COMM_WORLD);

	free(sendbuf);
	free(recvbuf);
    }

    if (rank == 0) {
	/* If we do not abort, we saw no errors */
	printf( " No Errors\n" );
    }

    MPI_Finalize();
    return 0;
}
Ejemplo n.º 20
0
static void test_pair (void)
{
  int prev, next, count, tag, index, i, outcount, indices[2];
  int rank, size, flag, ierr, reqcount;
  double send_buf[TEST_SIZE], recv_buf[TEST_SIZE];
  double buffered_send_buf[TEST_SIZE * 2 + MPI_BSEND_OVERHEAD]; /* factor of two is based on guessing - only dynamic allocation would be safe */
  void *buffer;
  MPI_Status statuses[2];
  MPI_Status status;
  MPI_Request requests[2];
  MPI_Comm dupcom, intercom;
#ifdef V_T

  struct _VT_FuncFrameHandle {
      char *name;
      int func;
      int frame;
  };
  typedef struct _VT_FuncFrameHandle VT_FuncFrameHandle_t;

  VT_FuncFrameHandle_t normal_sends,
      buffered_sends,
      buffered_persistent_sends,
      ready_sends,
      sync_sends,
      nblock_sends,
      nblock_rsends,
      nblock_ssends,
      pers_sends,
      pers_rsends,
      pers_ssends,
      sendrecv,
      sendrecv_repl,
      intercomm;

  int classid;
  VT_classdef( "Application:test_pair", &classid );


#define VT_REGION_DEF( _name, _nameframe, _class ) \
        (_nameframe).name=_name; \
        VT_funcdef( (_nameframe).name, _class, &((_nameframe).func) );
#define VT_BEGIN_REGION( _nameframe ) \
        LOCDEF(); \
        VT_begin( (_nameframe).func )
#define VT_END_REGION( _nameframe ) \
        LOCDEF(); VT_end( (_nameframe).func )
#else
#define VT_REGION_DEF( _name, _nameframe, _class )
#define VT_BEGIN_REGION( _nameframe )
#define VT_END_REGION( _nameframe )

#endif




  ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  ierr = MPI_Comm_size(MPI_COMM_WORLD, &size);
  if ( size < 2 ) {
      if ( rank == 0 ) {
	  printf("Program needs to be run on at least 2 processes.\n");
      }
      ierr = MPI_Abort( MPI_COMM_WORLD, 66 );
  }
  ierr = MPI_Comm_dup(MPI_COMM_WORLD, &dupcom);

  if ( rank >= 2 ) {
      /*      printf( "%d Calling finalize.\n", rank ); */
      ierr = MPI_Finalize( );
      exit(0);
  }

  next = rank + 1;
  if (next >= 2)
    next = 0;

  prev = rank - 1;
  if (prev < 0)
    prev = 1;

  VT_REGION_DEF( "Normal_Sends", normal_sends, classid );
  VT_REGION_DEF( "Buffered_Sends", buffered_sends, classid );
  VT_REGION_DEF( "Buffered_Persistent_Sends", buffered_persistent_sends, classid );
  VT_REGION_DEF( "Ready_Sends", ready_sends, classid );
  VT_REGION_DEF( "Sync_Sends", sync_sends, classid );
  VT_REGION_DEF( "nblock_Sends", nblock_sends, classid );
  VT_REGION_DEF( "nblock_RSends", nblock_rsends, classid );
  VT_REGION_DEF( "nblock_SSends", nblock_ssends, classid );
  VT_REGION_DEF( "Pers_Sends", pers_sends, classid );
  VT_REGION_DEF( "Pers_RSends", pers_rsends, classid );
  VT_REGION_DEF( "Pers_SSends", pers_ssends, classid );
  VT_REGION_DEF( "SendRecv", sendrecv, classid );
  VT_REGION_DEF( "SendRevc_Repl", sendrecv_repl, classid );
  VT_REGION_DEF( "InterComm", intercomm, classid );



/*
 * Normal sends
 */

  VT_BEGIN_REGION( normal_sends );

  if (rank == 0)
    printf ("Send\n");

  tag = 0x100;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( normal_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_sends );

  if (rank == 0)
    printf ("Buffered Send\n");

  tag = 138;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_persistent_sends );

  if (rank == 0)
    printf ("Buffered Persistent Send\n");

  tag = 238;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests);
    MPI_Start(requests);
    MPI_Wait(requests, statuses);
    MPI_Request_free(requests);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_persistent_sends );


/*
 * Ready sends.  Note that we must insure that the receive is posted
 * before the rsend; this requires using Irecv.
 */


  VT_BEGIN_REGION( ready_sends );

  if (rank == 0)
    printf ("Rsend\n");

  tag = 1456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Recv(MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD, &status);
    MPI_Rsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);
    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Send( MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD);
    MPI_Wait(requests, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( ready_sends );

/*
 * Synchronous sends
 */

  VT_BEGIN_REGION( sync_sends );

  if (rank == 0)
    printf ("Ssend\n");

  tag = 1789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);
    if (flag)
      printf ("Iprobe succeeded! source %d, tag %d\n",status.MPI_SOURCE,
                                                      status.MPI_TAG);

    MPI_Ssend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

    while (!flag)
      MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);

    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);

    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Ssend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sync_sends );

/*
 * Nonblocking normal sends
 */

  VT_BEGIN_REGION( nblock_sends );

  if (rank == 0)
    printf ("Isend\n");

  tag = 2123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Isend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
              (requests+1));
    MPI_Waitall(2, requests, statuses);
    rq_check( requests, 2, "isend and irecv" );

    msg_check(recv_buf,prev,tag,count,statuses, TEST_SIZE,"isend and irecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"isend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Isend(recv_buf, count, MPI_DOUBLE, next, tag,MPI_COMM_WORLD,
              (requests));
    MPI_Wait((requests), &status);
    rq_check(requests, 1, "isend (and recv)");
  }



  VT_END_REGION( nblock_sends );

/*
 * Nonblocking ready sends
 */


  VT_BEGIN_REGION( nblock_rsends );

  if (rank == 0)
    printf ("Irsend\n");

  tag = 2456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    MPI_Irsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitany( 2, requests, &index, statuses);
      if( index == 0 ) {
	  memcpy( &status, statuses, sizeof(status) );
      }
      reqcount++;
    }

    rq_check( requests, 1, "irsend and irecv");
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"irsend and irecv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    flag = 0;
    while (!flag)
      MPI_Test(requests, &flag, &status);

    rq_check( requests, 1, "irsend and irecv (test)");
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "irsend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Irsend(recv_buf, count, MPI_DOUBLE, next, tag,
               MPI_COMM_WORLD, requests);
    MPI_Waitall(1, requests, statuses);
    rq_check( requests, 1, "irsend and irecv");
  }

  VT_END_REGION( nblock_rsends );

/*
 * Nonblocking synchronous sends
 */

  VT_BEGIN_REGION( nblock_ssends );

  if (rank == 0)
    printf ("Issend\n");

  tag = 2789;
  count = TEST_SIZE / 3;
  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests );
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Issend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    flag = 0;
    while (!flag)
      MPI_Testall(2, requests, &flag, statuses);

    rq_check( requests, 2, "issend and irecv (testall)");
    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, 
               "issend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "issend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Issend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,requests);

    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    rq_check( requests, 1, "issend and recv (testany)");
  }


  VT_END_REGION( nblock_ssends );


/*
 * Persistent normal sends
 */

  VT_BEGIN_REGION( pers_sends );

  if (rank == 0)
    printf ("Send_init\n");

  tag = 3123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Send_init(send_buf, count, MPI_DOUBLE, next, tag,
                MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
                MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);
    MPI_Waitall(2, requests, statuses);
    msg_check( recv_buf, prev, tag, count, (statuses+1),
               TEST_SIZE, "persistent send/recv");
  }
  else {
    MPI_Start((requests+1));
    MPI_Wait((requests+1), &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "persistent send/recv");
    init_test_data(send_buf,TEST_SIZE,1);


    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_sends );

/*
 * Persistent ready sends
 */

  VT_BEGIN_REGION( pers_rsends );

  if (rank == 0)
    printf ("Rsend_init\n");

  tag = 3456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Rsend_init(send_buf, count, MPI_DOUBLE, next, tag,
                  MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0); MPI_Barrier( MPI_COMM_WORLD );
    MPI_Startall(2, requests);
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 1) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "waitsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start((requests+1)); MPI_Barrier( MPI_COMM_WORLD );
    flag = 0;
    while (!flag)
      MPI_Test((requests+1), &flag, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "test");

    init_test_data(send_buf,TEST_SIZE,1);

 
    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_rsends );


/*
 * Persistent synchronous sends
 */


  VT_BEGIN_REGION( pers_ssends );

  if (rank == 0)
    printf ("Ssend_init\n");

  tag = 3789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Ssend_init(send_buf, count, MPI_DOUBLE, next, tag,
                 MPI_COMM_WORLD, (requests+1));
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, requests);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);

    reqcount = 0;
    while (reqcount != 2) {
      MPI_Testsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 0) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "testsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start(requests);
    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "testany" );

    init_test_data(send_buf,TEST_SIZE,1);


     MPI_Start((requests+1));
     MPI_Wait((requests+1), &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_ssends );


/*
 * Send/receive.
 */


  VT_BEGIN_REGION( sendrecv );

  if (rank == 0)
    printf ("Sendrecv\n");

  tag = 4123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv(send_buf, count, MPI_DOUBLE, next, tag,
                 recv_buf, count, MPI_DOUBLE, prev, tag,
                 MPI_COMM_WORLD, &status );

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }


  VT_END_REGION( sendrecv );

#ifdef V_T
  VT_flush();
#endif


/*
 * Send/receive replace.
 */

  VT_BEGIN_REGION( sendrecv_repl );

  if (rank == 0)
    printf ("Sendrecv_replace\n");

  tag = 4456;
  count = TEST_SIZE / 3;

  if (rank == 0) {
      init_test_data(recv_buf, TEST_SIZE,0);
    for (i=count; i< TEST_SIZE; i++)
      recv_buf[i] = 0.0;

    MPI_Sendrecv_replace(recv_buf, count, MPI_DOUBLE,
                         next, tag, prev, tag, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecvreplace");
  }
  else {
    clear_test_data(recv_buf,TEST_SIZE);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send for replace"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sendrecv_repl );


/*
 * Send/Receive via inter-communicator
 */

  VT_BEGIN_REGION( intercomm );

  MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, next, 1, &intercom);

  if (rank == 0)
    printf ("Send via inter-communicator\n");

  tag = 4018;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, 0, tag, intercom);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, intercom, &status);
    msg_check(recv_buf, 0, tag, count, &status, TEST_SIZE, "send and recv via inter-communicator");
  }
  else if (rank == 1) {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             intercom, &status);
    msg_check( recv_buf, 0, tag, count, &status, TEST_SIZE,"send and recv via inter-communicator");
    init_test_data(recv_buf,TEST_SIZE,0);
    MPI_Send(recv_buf, count, MPI_DOUBLE, 0, tag, intercom);

  }

  VT_END_REGION( normal_sends );



  MPI_Comm_free(&intercom);
  MPI_Comm_free(&dupcom);
} 
Ejemplo n.º 21
0
//==============================================================================
//---------------------------------------------------------------------------
//ComputeRecvs Method
//---------------------------------------------------------------------------
int Epetra_MpiDistributor::ComputeRecvs_( int my_proc, 
			        int nprocs )
{
  int * msg_count = new int[ nprocs ];
  int * counts = new int[ nprocs ];

  int i;
  MPI_Status status;

  for( i = 0; i < nprocs; i++ )
  {
    msg_count[i] = 0;
    counts[i] = 1;
  }

  for( i = 0; i < nsends_+self_msg_; i++ )
    msg_count[ procs_to_[i] ] = 1;

#if defined(REDUCE_SCATTER_BUG)
// the bug is found in mpich on linux platforms
  MPI_Reduce(msg_count, counts, nprocs, MPI_INT, MPI_SUM, 0, comm_);
  MPI_Scatter(counts, 1, MPI_INT, &nrecvs_, 1, MPI_INT, 0, comm_);
#else
  MPI_Reduce_scatter( msg_count, &nrecvs_, counts, MPI_INT, MPI_SUM, comm_ );
#endif

  delete [] msg_count;
  delete [] counts;

  if (nrecvs_>0) {
    lengths_from_ = new int[nrecvs_];
    procs_from_ = new int[nrecvs_];
    for(i=0; i<nrecvs_; ++i) {
      lengths_from_[i] = 0;
      procs_from_[i] = 0;
    }
  }

#ifndef NEW_COMM_PATTERN
  for( i = 0; i < (nsends_+self_msg_); i++ )
    if( procs_to_[i] != my_proc ) {
      MPI_Send( &(lengths_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_ );
    }
    else
    {
      //set self_msg_ to end block of recv arrays
      lengths_from_[nrecvs_-1] = lengths_to_[i];
      procs_from_[nrecvs_-1] = my_proc;
    }

  for( i = 0; i < (nrecvs_-self_msg_); i++ )
  {
    MPI_Recv( &(lengths_from_[i]), 1, MPI_INT, MPI_ANY_SOURCE, tag_, comm_, &status );
    procs_from_[i] = status.MPI_SOURCE;
  }

  MPI_Barrier( comm_ );
#else
  if (nrecvs_>0) {
    if( !request_ ) {
      request_ = new MPI_Request[nrecvs_-self_msg_];
      status_ = new MPI_Status[nrecvs_-self_msg_];
    }
  }

  for( i = 0; i < (nrecvs_-self_msg_); i++ )
    MPI_Irecv( &(lengths_from_[i]), 1, MPI_INT, MPI_ANY_SOURCE, tag_, comm_, &(request_[i]) );

  MPI_Barrier( comm_ );

  for( i = 0; i < (nsends_+self_msg_); i++ )
    if( procs_to_[i] != my_proc ) {
      MPI_Rsend( &(lengths_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_ );
    }
    else
    {
      //set self_msg_ to end block of recv arrays
      lengths_from_[nrecvs_-1] = lengths_to_[i];
      procs_from_[nrecvs_-1] = my_proc;
    }

  if( (nrecvs_-self_msg_) > 0 ) MPI_Waitall( (nrecvs_-self_msg_), request_, status_ );

  for( i = 0; i < (nrecvs_-self_msg_); i++ )
    procs_from_[i] = status_[i].MPI_SOURCE;
#endif

  Sort_ints_( procs_from_, lengths_from_, nrecvs_ );

  // Compute indices_from_
  // Seems to break some rvs communication
/* Not necessary since rvs communication is always blocked
  size_indices_from_ = 0;
  if( nrecvs_ > 0 )
  {
    for( i = 0; i < nrecvs_; i++ )  size_indices_from_ += lengths_from_[i];
    indices_from_ = new int[ size_indices_from_ ];

    for (i=0; i<size_indices_from_; i++) indices_from_[i] = i;
  }
*/

  if (nrecvs_>0) starts_from_ = new int[nrecvs_];
  int j = 0;
  for( i=0; i<nrecvs_; ++i )
  {
    starts_from_[i] = j;
    j += lengths_from_[i];
  }

  total_recv_length_ = 0;
  for( i = 0; i < nrecvs_; i++ )
    total_recv_length_ += lengths_from_[i];

  nrecvs_ -= self_msg_;

  MPI_Barrier( comm_ );
  
  return false;
}
Ejemplo n.º 22
0
static double
do_one_Test1_trial(int num_ops, int msg_len, int dest)
{

int j;
double t, t2;
MPI_Request latencyflag[SHORT_MSG_OPS];


    /* Touch the memory we are going to use */
    memset(aligned_buf, 0xAA, msg_len);
    memset(aligned_buf, 0x55, msg_len);

    /* Do a warm-up */
    j= 0;
    if (my_rank == 0)   {
	MPI_Irecv(aligned_buf, msg_len, MPI_BYTE, dest, TAG_LATENCY, MPI_COMM_WORLD, &latencyflag[j]);
	MPI_Recv(NULL, 0, MPI_BYTE, dest, TAG_READY, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	MPI_Rsend(aligned_buf, msg_len, MPI_BYTE, dest, TAG_LATENCY, MPI_COMM_WORLD);
	MPI_Wait(&latencyflag[j], MPI_STATUS_IGNORE);

    } else if (my_rank == dest)   {
	MPI_Irecv(aligned_buf, msg_len, MPI_BYTE, 0, TAG_LATENCY, MPI_COMM_WORLD, &latencyflag[j]);
	MPI_Send(NULL, 0, MPI_BYTE, 0, TAG_READY, MPI_COMM_WORLD);
	MPI_Wait(&latencyflag[j], MPI_STATUS_IGNORE);
	MPI_Rsend(aligned_buf, msg_len, MPI_BYTE, 0, TAG_LATENCY, MPI_COMM_WORLD);
    }



    /* Post warm-up: Run the actual experiment */
    t2= 0.0;
    if (my_rank == 0)   {
	/* Pre-post receive(s) */
	for (j= 0; j < num_ops; j++)   {
	    MPI_Irecv(aligned_buf, msg_len, MPI_BYTE, dest, TAG_LATENCY, MPI_COMM_WORLD, &latencyflag[j]);
	}

	/* Wait for ACK from other node */
	MPI_Recv(NULL, 0, MPI_BYTE, dest, TAG_READY, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

	/* Start the timer */
	t= MPI_Wtime();
	for (j= 0; j < num_ops; j++)   {
	    MPI_Rsend(aligned_buf, msg_len, MPI_BYTE, dest, TAG_LATENCY, MPI_COMM_WORLD);
	    MPI_Wait(&latencyflag[j], MPI_STATUS_IGNORE);
	}
	t2= MPI_Wtime() - t;

    } else if (my_rank == dest)   {
	/* Pre-post receive(s) */ 
	for (j= 0; j < num_ops; j++)   {
	    MPI_Irecv(aligned_buf, msg_len, MPI_BYTE, 0, TAG_LATENCY, MPI_COMM_WORLD, &latencyflag[j]);
	}

	/* Send ACK */
	MPI_Send(NULL, 0, MPI_BYTE, 0, TAG_READY, MPI_COMM_WORLD);

	for (j= 0; j < num_ops; j++)   {
	    MPI_Wait(&latencyflag[j], MPI_STATUS_IGNORE);
	    MPI_Rsend(aligned_buf, msg_len, MPI_BYTE, 0, TAG_LATENCY, MPI_COMM_WORLD);
	}
    }

    return t2 / num_ops / 2.0;

}  /* end of do_one_Test1_trial() */
Ejemplo n.º 23
0
//==============================================================================
//---------------------------------------------------------------------------
//Resize Method                      (Heaphy) 
//---------------------------------------------------------------------------
int Epetra_MpiDistributor::Resize_( int * sizes )
{ 
  int  i, j, k;         // loop counters
  int sum; 
 
  //if (sizes == 0) return 0; 
     
  int my_proc; 
  MPI_Comm_rank (comm_, &my_proc);  
  int nprocs;
  MPI_Comm_size( comm_, &nprocs );
     
  if( resized_ )
  {  
    //test and see if we are already setup for these sizes
    bool match = true; 
    for( i = 0; i < nexports_; ++i )
      match = match && (sizes_[i]==sizes[i]);
    int matched = match?1:0;
    int match_count = 0;
    MPI_Allreduce( &matched, &match_count, 1, MPI_INT, MPI_SUM, comm_ );
    if( match_count == nprocs )
      return 0;
    else //reset existing sizing arrays
      max_send_length_ = 0;
  } 
 
  if( !sizes_ && nexports_ ) sizes_ = new int[nexports_]; 
  for (i = 0; i < nexports_; i++)
    sizes_[i] = sizes[i]; 
 
  if( !sizes_to_ && (nsends_+self_msg_) ) sizes_to_ = new int[nsends_+self_msg_];
  for (i = 0; i < (nsends_+self_msg_); ++i) 
    sizes_to_[i] = 0;                       
 
  if( !starts_to_ptr_ && (nsends_+self_msg_) ) starts_to_ptr_ = new int[nsends_+self_msg_];

  if( !indices_to_ ) //blocked sends
  {
    
    int * index = 0;
    int * sort_val = 0;
    if (nsends_+self_msg_>0) {
      index = new int[nsends_+self_msg_];
      sort_val = new int[nsends_+self_msg_];
    }
    for( i = 0; i < (nsends_+self_msg_); ++i )
    {
      j = starts_to_[i];
      for( k = 0; k < lengths_to_[i]; ++k )
        sizes_to_[i] += sizes[j++];
      if( (sizes_to_[i] > max_send_length_) && (procs_to_[i] != my_proc) )
        max_send_length_ = sizes_to_[i];
    }

    for( i = 0; i < (nsends_+self_msg_); ++i )
    {
      sort_val[i] = starts_to_[i];
      index[i] = i;
    }

    if( nsends_+self_msg_ )
      Sort_ints_( sort_val, index, (nsends_+self_msg_) );

    sum = 0;
    for( i = 0; i < (nsends_+self_msg_); ++i )
    {
      starts_to_ptr_[ index[i] ] = sum;
      sum += sizes_to_[ index[i] ];
    }

    if (index!=0) {delete [] index; index = 0;}
    if (sort_val!=0) {delete [] sort_val; sort_val = 0;}
  }
  else //Sends not blocked, so have to do more work
  {
    if( !indices_to_ptr_ && nexports_ ) indices_to_ptr_ = new int[nexports_]; 
    int * offset = 0;
    if( nexports_ ) offset = new int[nexports_];
   
    //Compute address for every item in send array
    sum = 0; 
    for( i = 0; i < nexports_; ++i )
    {  
      offset[i] = sum; 
      sum += sizes_[i];
    }
   
    sum = 0;
    max_send_length_ = 0;
    for( i = 0; i < (nsends_+self_msg_); ++i )
    {
      starts_to_ptr_[i] = sum;
      for( j = starts_to_[i]; j < (starts_to_[i]+lengths_to_[i]); ++j )
      {
        indices_to_ptr_[j] = offset[ indices_to_[j] ];
        sizes_to_[i] += sizes_[ indices_to_[j] ];
      }
      if( sizes_to_[i] > max_send_length_ && procs_to_[i] != my_proc )
        max_send_length_ = sizes_to_[i];
      sum += sizes_to_[i];
    }
 
    if (offset!=0) {delete [] offset; offset = 0;}
  }
 
  //  Exchange sizes routine inserted here:
  int self_index_to = -1;
  total_recv_length_ = 0;
  if( !sizes_from_ && (nrecvs_+self_msg_) ) sizes_from_ = new int [nrecvs_+self_msg_];

#ifndef EPETRA_NEW_COMM_PATTERN
  for (i = 0; i < (nsends_+self_msg_); i++)
  {
    if(procs_to_[i] != my_proc)
      MPI_Send ((void *) &(sizes_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_);
    else
      self_index_to = i;
  }

  MPI_Status status;
  for (i = 0; i < (nrecvs_+self_msg_); ++i)
  {
    sizes_from_[i] = 0;
    if (procs_from_[i] != my_proc)
      MPI_Recv((void *) &(sizes_from_[i]), 1, MPI_INT, procs_from_[i], tag_, comm_, &status);
    else
      sizes_from_[i] = sizes_to_[self_index_to];
    total_recv_length_ += sizes_from_[i];
  }
#else
  if (nrecvs_>0 && !request_) {
    request_ = new MPI_Request[ nrecvs_-self_msg_ ];
    status_ = new MPI_Status[ nrecvs_-self_msg_ ];
  }

  for (i = 0; i < (nsends_+self_msg_); i++)
  {
    if(procs_to_[i] == my_proc)
      self_index_to = i;
  }

  for (i = 0; i < (nrecvs_+self_msg_); ++i)
  {
    sizes_from_[i] = 0;
    if (procs_from_[i] != my_proc)
      MPI_Irecv((void *) &(sizes_from_[i]), 1, MPI_INT, procs_from_[i], tag_, comm_, &(request_[i]));
    else
    {
      sizes_from_[i] = sizes_to_[self_index_to];
      total_recv_length_ += sizes_from_[i];
    }
  }

  MPI_Barrier( comm_ );

  for (i = 0; i < (nsends_+self_msg_); i++)
  {
    if(procs_to_[i] != my_proc)
      MPI_Rsend ((void *) &(sizes_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_);
  }

  if( nrecvs_ > 0 ) MPI_Waitall( nrecvs_, request_, status_ );

  for (i = 0; i < (nrecvs_+self_msg_); ++i)
  {
    if (procs_from_[i] != my_proc)
      total_recv_length_ += sizes_from_[i];
  }
#endif
  // end of exchanges sizes insert
 
  sum = 0;
  if( !starts_from_ptr_ ) starts_from_ptr_  = new int[nrecvs_+self_msg_]; 
  for (i = 0; i < (nrecvs_+self_msg_); ++i)
  {
     starts_from_ptr_[i] = sum;
     sum += sizes_from_[i];
  }

  resized_ = true;

  return 0;
}
Ejemplo n.º 24
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int count;
    MPI_Comm comm;
    MPI_Request req;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);

    comm = MPI_COMM_WORLD;
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);

    /* To improve reporting of problems about operations, we
     * change the error handler to errors return */
    MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);

    MTEST_DATATYPE_FOR_EACH_COUNT(count) {
        while (MTestGetDatatypes(&sendtype, &recvtype, count)) {

            sendtype.InitBuf(&sendtype);
            recvtype.InitBuf(&recvtype);

            err = MPI_Irecv(recvtype.buf, recvtype.count, recvtype.datatype, rank, 0, comm, &req);
            if (err) {
                errs++;
                if (errs < 10) {
                    MTestPrintError(err);
                }
            }

            err = MPI_Send(sendtype.buf, sendtype.count, sendtype.datatype, rank, 0, comm);
            if (err) {
                errs++;
                if (errs < 10) {
                    MTestPrintError(err);
                }
            }
            err = MPI_Wait(&req, MPI_STATUS_IGNORE);
            err = MTestCheckRecv(0, &recvtype);
            if (err) {
                if (errs < 10) {
                    printf
                        ("Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
                         MTestGetDatatypeName(&recvtype), MTestGetDatatypeName(&sendtype), count);
                    recvtype.printErrors = 1;
                    (void) MTestCheckRecv(0, &recvtype);
                }
                errs += err;
            }

            err = MPI_Irecv(recvtype.buf, recvtype.count, recvtype.datatype, rank, 0, comm, &req);
            if (err) {
                errs++;
                if (errs < 10) {
                    MTestPrintError(err);
                }
            }

            err = MPI_Ssend(sendtype.buf, sendtype.count, sendtype.datatype, rank, 0, comm);
            if (err) {
                errs++;
                if (errs < 10) {
                    MTestPrintError(err);
                }
            }
            err = MPI_Wait(&req, MPI_STATUS_IGNORE);
            err = MTestCheckRecv(0, &recvtype);
            if (err) {
                if (errs < 10) {
                    printf
                        ("Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
                         MTestGetDatatypeName(&recvtype), MTestGetDatatypeName(&sendtype), count);
                    recvtype.printErrors = 1;
                    (void) MTestCheckRecv(0, &recvtype);
                }
                errs += err;
            }

            err = MPI_Irecv(recvtype.buf, recvtype.count, recvtype.datatype, rank, 0, comm, &req);
            if (err) {
                errs++;
                if (errs < 10) {
                    MTestPrintError(err);
                }
            }

            err = MPI_Rsend(sendtype.buf, sendtype.count, sendtype.datatype, rank, 0, comm);
            if (err) {
                errs++;
                if (errs < 10) {
                    MTestPrintError(err);
                }
            }
            err = MPI_Wait(&req, MPI_STATUS_IGNORE);
            err = MTestCheckRecv(0, &recvtype);
            if (err) {
                if (errs < 10) {
                    printf
                        ("Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
                         MTestGetDatatypeName(&recvtype), MTestGetDatatypeName(&sendtype), count);
                    recvtype.printErrors = 1;
                    (void) MTestCheckRecv(0, &recvtype);
                }
                errs += err;
            }

            MTestFreeDatatype(&sendtype);
            MTestFreeDatatype(&recvtype);
        }
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}