Ejemplo n.º 1
0
Archivo: slave.c Proyecto: artpol84/poc
int main(int argc, char *argv[]) 
{ 
    int rank, size;
    int lsize, rsize;
    int grank, gsize;
    MPI_Comm parent, global;
    MPI_Init(&argc, &argv);
    // Locat info
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // Global info
    MPI_Comm_get_parent(&parent);
    if (parent == MPI_COMM_NULL)
        error("No parent!");
    MPI_Comm_remote_size(parent, &size);

    MPI_Comm_size(parent, &lsize);
    MPI_Comm_remote_size(parent, &rsize);

    MPI_Intercomm_merge(parent, 1, &global);
    MPI_Comm_rank(global, &grank);
    MPI_Comm_size(global, &gsize);
    printf("child %d: lsize=%d, rsize=%d, grank=%d, gsize=%d\n",
           rank, lsize, rsize, grank, gsize);

    MPI_Barrier(global);

    printf("%d: after Barrier\n", grank);

    MPI_Comm_free(&global);

    MPI_Finalize();
    return 0;
} 
Ejemplo n.º 2
0
static void emulate_armci_init_clusinfo()
{
    int psize;

    MPI_Comm_remote_size(MPI_COMM_SERVER2CLIENT, &psize);

    /* server id (i.e. server's armci_me) is derived from node master's id.
       Similar to armci_create_server_process() to set SERVER_CONTEXT */
    armci_me        = SOFFSET - armci_client_first;
    armci_nproc     = psize;
    armci_usr_tid   = THREAD_ID_SELF(); /*remember the main user thread id */
    armci_master    = armci_client_first;

    /* ***** emulate armci_init_clusinfo() ***** */
    armci_clus_me    = armci_server_me;
    armci_nclus      = armci_nserver;
    armci_clus_first = armci_clus_info[armci_clus_me].master;
    armci_clus_last  = (armci_clus_first +
                        armci_clus_info[armci_clus_me].nslave - 1);

    if(armci_clus_first != armci_client_first ||
            armci_nclients   != armci_clus_info[armci_clus_me].nslave)
    {
        armci_mpi2_server_debug(armci_server_me,
                                "armci_clus_first=%d, armci_clus_last=%d\n",
                                armci_clus_first, armci_clus_last);
        armci_die("mpi2_server: armci_clus_info is incorrect.", 0);
    }
}
Ejemplo n.º 3
0
int ompi_coll_libnbc_ibarrier_inter(struct ompi_communicator_t *comm, ompi_request_t ** request,
                                    struct mca_coll_base_module_2_1_0_t *module)
{
  int rank, res, rsize, peer;
  NBC_Schedule *schedule;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);

  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Comm_remote_size(comm, &rsize);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }

  handle->tmpbuf=(void*)malloc(2*sizeof(char));

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  if (NULL == schedule) { printf("Error in malloc()\n"); return res; }

  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create (%i)\n", res); return res; }

  if (0 == rank) {
    for (peer = 1 ; peer < rsize ; ++peer) {
      res = NBC_Sched_recv (0, true, 1, MPI_BYTE, peer, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
    }
  }

  /* synchronize with the remote root */
  res = NBC_Sched_recv (0, true, 1, MPI_BYTE, 0, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }

  res = NBC_Sched_send (0, true, 1, MPI_BYTE, 0, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }

  if (0 == rank) {
    /* wait for the remote root */
    res = NBC_Sched_barrier(schedule);
    if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }

    /* inform remote peers that all local peers have entered the barrier */
    for (peer = 0 ; peer < rsize ; ++peer) {
      res = NBC_Sched_send (0, true, 1, MPI_BYTE, peer, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
    }
  }

  res = NBC_Sched_commit(schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }

  res = NBC_Start(handle, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }

  return NBC_OK;
}
Ejemplo n.º 4
0
FORTRAN_API void FORT_CALL mpi_comm_remote_size_ ( MPI_Fint *comm, MPI_Fint *size, MPI_Fint *__ierr )
{
    int l_size;

    *__ierr = MPI_Comm_remote_size( MPI_Comm_f2c(*comm), &l_size);
    *size = l_size;
}
int MPIPortsCommunication:: getRemoteCommunicatorSize()
{
  preciceTrace ( "getRemoteCommunicatorSize()" );
  assertion ( _isConnection );
  int remoteSize = 0;
  MPI_Comm_remote_size ( communicator(), &remoteSize );
  return remoteSize;
}
Ejemplo n.º 6
0
/* simple linear Alltoallv */
int ompi_coll_libnbc_ialltoallv_inter (void* sendbuf, int *sendcounts, int *sdispls,
				       MPI_Datatype sendtype, void* recvbuf, int *recvcounts, int *rdispls,
				       MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
				       struct mca_coll_base_module_2_0_0_t *module)
{
  int rank, res, i, rsize;
  MPI_Aint sndext, rcvext;
  NBC_Schedule *schedule;
  char *rbuf, *sbuf;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);
  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Type_extent(sendtype, &sndext);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
  res = MPI_Type_extent(recvtype, &rcvext);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }

  MPI_Comm_remote_size (comm, &rsize);

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  if (NULL == schedule) { printf("Error in malloc() (%i)\n", res); return res; }

  handle->tmpbuf=NULL;

  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create (%i)\n", res); return res; }

  for (i = 0; i < rsize; i++) {
    /* post all sends */
    if(sendcounts[i] != 0) {
      sbuf = ((char *) sendbuf) + (sdispls[i] * sndext);
      res = NBC_Sched_send(sbuf, false, sendcounts[i], sendtype, i, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
    }
    /* post all receives */
    if(recvcounts[i] != 0) {
      rbuf = ((char *) recvbuf) + (rdispls[i] * rcvext);
      res = NBC_Sched_recv(rbuf, false, recvcounts[i], recvtype, i, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
    }
  }

  /*NBC_PRINT_SCHED(*schedule);*/

  res = NBC_Sched_commit(schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }

  res = NBC_Start(handle, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }

  return NBC_OK;
}
Ejemplo n.º 7
0
int CWorkerGroup::nbworkers () const
{
  int size;

  // use macro?
  MPI_Comm_remote_size(m_comm, &size);

  return size;
}
Ejemplo n.º 8
0
int main(int argc, char *argv[])
{
    MPI_Status status;
    MPI_Comm comm, scomm;
    int a[10], b[10];
    int buf[BUFSIZE], *bptr, bl, i, j, rank, size, color, errs = 0;

    MTest_Init(0, 0);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    color = rank % 2;
    MPI_Comm_split(MPI_COMM_WORLD, color, rank, &scomm);
    MPI_Intercomm_create(scomm, 0, MPI_COMM_WORLD, 1 - color, 52, &comm);
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_remote_size(comm, &size);
    MPI_Buffer_attach(buf, BUFSIZE);

    for (j = 0; j < 10; j++) {
        for (i = 0; i < 10; i++) {
            a[i] = (rank + 10 * j) * size + i;
        }
        MPI_Bsend(a, 10, MPI_INT, 0, 27 + j, comm);
    }
    if (rank == 0) {

        for (i = 0; i < size; i++) {
            for (j = 0; j < 10; j++) {
                int k;
                status.MPI_TAG = -10;
                status.MPI_SOURCE = -20;
                MPI_Recv(b, 10, MPI_INT, i, 27 + j, comm, &status);

                if (status.MPI_TAG != 27 + j) {
                    errs++;
                    printf("Wrong tag = %d\n", status.MPI_TAG);
                }
                if (status.MPI_SOURCE != i) {
                    errs++;
                    printf("Wrong source = %d\n", status.MPI_SOURCE);
                }
                for (k = 0; k < 10; k++) {
                    if (b[k] != (i + 10 * j) * size + k) {
                        errs++;
                        printf("received b[%d] = %d from %d tag %d\n", k, b[k], i, 27 + j);
                    }
                }
            }
        }
    }
    MPI_Buffer_detach(&bptr, &bl);

    MPI_Comm_free(&scomm);
    MPI_Comm_free(&comm);

    MTest_Finalize(errs);

    return MTestReturnValue(errs);
}
Ejemplo n.º 9
0
    /*!
    \param [in] parent Pointer to context on client side
    \param [in] intraComm_ communicator of group client
    \param [in] interComm_ communicator of group server
    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode)
    */
    CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer)
     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
    {
      context = parent;
      intraComm = intraComm_;
      interComm = interComm_;
      MPI_Comm_rank(intraComm, &clientRank);
      MPI_Comm_size(intraComm, &clientSize);

      int flag;
      MPI_Comm_test_inter(interComm, &flag);
      if (flag) MPI_Comm_remote_size(interComm, &serverSize);
      else  MPI_Comm_size(interComm, &serverSize);

      if (clientSize < serverSize)
      {
        int serverByClient = serverSize / clientSize;
        int remain = serverSize % clientSize;
        int rankStart = serverByClient * clientRank;

        if (clientRank < remain)
        {
          serverByClient++;
          rankStart += clientRank;
        }
        else
          rankStart += remain;

        for (int i = 0; i < serverByClient; i++)
          ranksServerLeader.push_back(rankStart + i);

        ranksServerNotLeader.resize(0);
      }
      else
      {
        int clientByServer = clientSize / serverSize;
        int remain = clientSize % serverSize;

        if (clientRank < (clientByServer + 1) * remain)
        {
          if (clientRank % (clientByServer + 1) == 0)
            ranksServerLeader.push_back(clientRank / (clientByServer + 1));
          else
            ranksServerNotLeader.push_back(clientRank / (clientByServer + 1));
        }
        else
        {
          int rank = clientRank - (clientByServer + 1) * remain;
          if (rank % clientByServer == 0)
            ranksServerLeader.push_back(remain + rank / clientByServer);
          else
            ranksServerNotLeader.push_back(remain + rank / clientByServer);
        }        
      }

      timeLine = 0;
    }
Ejemplo n.º 10
0
int ompi_coll_libnbc_igatherv_inter (void* sendbuf, int sendcount, MPI_Datatype sendtype,
				     void* recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype,
				     int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
				     struct mca_coll_base_module_2_0_0_t *module) {
  int rank, p, res, i, rsize;
  MPI_Aint rcvext;
  NBC_Schedule *schedule;
  char *rbuf;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);
  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Comm_size(comm, &p);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
  res = MPI_Comm_remote_size (comm, &rsize);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }

  if (MPI_ROOT == root) {
    res = MPI_Type_extent(recvtype, &rcvext);
    if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
  }
  handle->tmpbuf = NULL;

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  if (NULL == schedule) { printf("Error in malloc() (%i)\n", res); return res; }

  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create (%i)\n", res); return res; }

  /* send to root */
  if (MPI_ROOT != root && MPI_PROC_NULL != root) {
    /* send msg to root */
    res = NBC_Sched_send(sendbuf, false, sendcount, sendtype, root, schedule);
    if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
  } else if (MPI_ROOT == root) {
    for (i = 0 ; i < rsize ; ++i) {
      rbuf = ((char *)recvbuf) + (displs[i]*rcvext);
      /* root receives message to the right buffer */
      res = NBC_Sched_recv(rbuf, false, recvcounts[i], recvtype, i, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
    }
  }

  res = NBC_Sched_commit(schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }

  res = NBC_Start(handle, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }

  return NBC_OK;
}
Ejemplo n.º 11
0
int ompi_coll_libnbc_iallgatherv_inter(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int *recvcounts, int *displs,
				       MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
				       struct mca_coll_base_module_2_1_0_t *module)
{
  int rank, res, r, rsize;
  MPI_Aint rcvext;
  NBC_Schedule *schedule;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);
  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Comm_remote_size(comm, &rsize);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }
  res = MPI_Type_extent(recvtype, &rcvext);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  if (NULL == schedule) { printf("Error in malloc() (%i)\n", res); return res; }

  handle->tmpbuf=NULL;

  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create, (%i)\n", res); return res; }

  /* do rsize  rounds */
  for (r = 0 ; r < rsize ; ++r) {
    char *rbuf = ((char *)recvbuf) + (displs[r]*rcvext);

    if (recvcounts[r]) {
      res = NBC_Sched_recv(rbuf, false, recvcounts[r], recvtype, r, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
    }
  }

  if (sendcount) {
    for (r = 0 ; r < rsize ; ++r) {
      res = NBC_Sched_send(sendbuf, false, sendcount, sendtype, r, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
    }
  }

  res = NBC_Sched_commit(schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }

  res = NBC_Start(handle, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }

  return NBC_OK;
}
Ejemplo n.º 12
0
int ompi_coll_libnbc_ibcast_inter(void *buffer, int count, MPI_Datatype datatype, int root,
                                  struct ompi_communicator_t *comm, ompi_request_t ** request,
                                  struct mca_coll_base_module_2_0_0_t *module) {
  int rank, p, res, size, peer;
  NBC_Schedule *schedule;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);
  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Comm_size(comm, &p);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Type_size(datatype, &size);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_size() (%i)\n", res); return res; }
  
  handle->tmpbuf=NULL;

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  
  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create, res = %i\n", res); return res; }

  if(root != MPI_PROC_NULL) {
    /* send to all others */
    if(root == MPI_ROOT) {
      int remsize;

      res = MPI_Comm_remote_size(comm, &remsize);
      if(MPI_SUCCESS != res) { printf("MPI_Comm_remote_size() failed\n"); return res; }

      for (peer=0;peer<remsize;peer++) {
        /* send msg to peer */
        res = NBC_Sched_send(buffer, false, count, datatype, peer, schedule);
        if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
      }
    } else {
      /* recv msg from root */
      res = NBC_Sched_recv(buffer, false, count, datatype, root, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
    }
  }
  
  res = NBC_Sched_commit(schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }
  
  res = NBC_Start(handle, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }
  
  return NBC_OK;
}
Ejemplo n.º 13
0
void Cache::orient(int *argc_ptr, char ***argv_ptr) {
   MPI_ASSERT(argc_ptr != NULL);
   MPI_ASSERT(argv_ptr != NULL);

   int argc = *argc_ptr;
   char **argv = *argv_ptr;

   MPI_ASSERT(argc >= 3);

#ifdef DEBUG
   // Print the argv list for reference
   printf("job_node argc: %d\n", argc);
   for (int i = 0; i < argc; ++i) {
      printf("argv[%d]: %s\n", i, argv[i]);
   }
   //
#endif

   // Get the job_num for this job. 
   char *endptr;
   job_num = strtol(argv[1], &endptr, 10);

   // Grab the job to cache node pairings list. 
   std::string mapping(argv[2]);
   stringlist_to_vector(job_to_cache, mapping);

   // Update argc and argv so things are transparent to the caller.
   // TODO: Ensure this is working properly
   argc_ptr -= 1;
   std::string exec_name(argv[0]);
   memcpy(argv[2], exec_name.c_str(), exec_name.size());
   *argv_ptr = *(argv_ptr + 2);

   // Get details on the world this node lives in.
   MPI_Comm_size(MPI_COMM_WORLD, &local_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &local_rank);

   MPI_Comm_get_parent(&parent_comm);
   MPI_Comm_remote_size(parent_comm, &parent_size);
   MPI_Comm_rank(parent_comm, &parent_rank);

   // Get coordinator cache node's rank for this job node.
   coord_rank = job_to_cache[local_rank];

#ifdef DEBUG
   printf("Job node: local rank - %d/%d parent rank - %d/%d\n", local_rank,
         local_size, parent_rank, parent_size);

   printf("Job Num: %d Job node %d: team cache node: %d\n", job_num, local_rank,
         coord_rank);
#endif
}
Ejemplo n.º 14
0
void ompi_comm_remote_size_f(MPI_Fint *comm, MPI_Fint *size, MPI_Fint *ierr)
{
    int c_ierr;
    MPI_Comm c_comm = MPI_Comm_f2c ( *comm );
    OMPI_SINGLE_NAME_DECL(size);

    c_ierr = MPI_Comm_remote_size ( c_comm, OMPI_SINGLE_NAME_CONVERT(size ));
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

    if (MPI_SUCCESS == c_ierr) {
        OMPI_SINGLE_INT_2_FINT(size);
    }
}
Ejemplo n.º 15
0
int main(int argc, char *argv[]) 
{ 
    int rank, size;
    int universe_size, *universe_sizep, flag;
    int lsize, rsize;
    int grank, gsize;
    MPI_Comm everyone, global;           /* intercommunicator */
    char worker_program[100];

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE,  
                &universe_sizep, &flag);  
    if (!flag) { 
        universe_size = 8;
    } else 
        universe_size = *universe_sizep; 

    if( rank == 0 ) {    
        printf("univ size = %d\n", universe_size);
    }

    sprintf(worker_program, "./slave");
    MPI_Comm_spawn(worker_program, MPI_ARGV_NULL, 6,
                   MPI_INFO_NULL, 0, MPI_COMM_WORLD, &everyone,
                   MPI_ERRCODES_IGNORE);

    MPI_Comm_size(everyone, &lsize);
    MPI_Comm_remote_size(everyone, &rsize);

    MPI_Intercomm_merge(everyone, 1, &global);
    MPI_Comm_rank(global, &grank);
    MPI_Comm_size(global, &gsize);
    printf("parent %d: lsize=%d, rsize=%d, grank=%d, gsize=%d\n",
           rank, lsize, rsize, grank, gsize);

    MPI_Barrier(global);

    printf("%d: after Barrier\n", grank);

    MPI_Comm_free(&global);


    MPI_Finalize();
    return 0;
}
Ejemplo n.º 16
0
void CWorkerGroup::signal_solve( Common::SignalArgs & args)
{
  std::string str;
  char * buffer;
  int remote_size;
  SignalFrame frame("solve", uri(), "//Root/Worker");

  to_string( *frame.xml_doc, str);

  buffer = new char[ str.length() + 1 ];
  std::strcpy( buffer, str.c_str() );

  MPI_Comm_remote_size(m_comm, &remote_size);

  for(int i = 0 ; i < remote_size ; ++i)
    MPI_Send( buffer, str.length() + 1, MPI_CHAR, i, 0, m_comm );
}
Ejemplo n.º 17
0
void LeaderNode::spawn_cache_nodes(uint32_t job_num, MPI_Comm *comm,
   uint16_t count) {

   MPI_ASSERT(comm != NULL);

#ifdef DEBUG
   printf("LeaderNode sending SPAWN_CACHE of size %d\n", count);
#endif
   int result;
   int comm_size;
   MPI_Comm_remote_size(*comm, &comm_size);

   SpawnNodesTemplate *format = (SpawnNodesTemplate *)buf;
   format->job_num = job_num;
   format->count = count;

   std::vector<uint32_t> vec = job_to_swing[job_num];
   std::string mapping;
   vector_to_stringlist(vec, mapping);
   format->mapping_size = (uint16_t)mapping.size();
   memcpy(format->mapping, mapping.c_str(), mapping.size());

   MPI_ASSERT(mapping.size() <= MAX_MAPPING_SIZE);
   int msg_size = sizeof(SpawnNodesTemplate);
#ifdef DEBUG
   printf("job_num: %d\n", job_num);
   printf("count: %d\n", count);
   printf("spawn_cache_msg_size: %d\n", msg_size);
   printf("job_num: %d\ncount: %d\nmapping_size: %d\nmapping: %s\n",
         format->job_num, format->count, format->mapping_size, format->mapping);
#endif

   // TODO: Look into MPI_Comm_Idup and perhaps MPI_Bcast for sending out this
   //       spawn request to all nodes efficiently and having them all handle it
   //       efficiently.
   // Have all swing nodes collectively spawn the cache nodes.
   for (uint32_t i = 0; i < comm_size; ++i) {
#ifdef DEBUG
      printf("Leader sending spawn cache msg to swing node %d\n", i);
#endif
      result = send_msg(buf, msg_size, MPI_UINT8_T, i, SPAWN_CACHE, *comm,
            &request);
      MPI_ASSERT(result == MPI_SUCCESS);
   }
}
Ejemplo n.º 18
0
int ompi_coll_libnbc_iallreduce_inter(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op,
                                      struct ompi_communicator_t *comm, ompi_request_t ** request,
                                      struct mca_coll_base_module_2_0_0_t *module)
{
  int rank, res, size, rsize;
  MPI_Aint ext;
  NBC_Schedule *schedule;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);
  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Comm_remote_size(comm, &rsize);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }
  res = MPI_Type_extent(datatype, &ext);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
  res = MPI_Type_size(datatype, &size);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_size() (%i)\n", res); return res; }

  handle->tmpbuf = malloc(ext*count);
  if(handle->tmpbuf == NULL) { printf("Error in malloc() (%i)\n", res); return NBC_OOR; }

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  if (NULL == schedule) { printf("Error in malloc()\n"); return res; }

  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create (%i)\n", res); return res; }

  res = allred_sched_linear(rank, rsize, sendbuf, recvbuf, count, datatype, op, ext, size, schedule, handle);
  if (NBC_OK != res) { printf("Error in Schedule creation() (%i)\n", res); return res; }

  res = NBC_Sched_commit(schedule);
  if(res != NBC_OK) { free(handle->tmpbuf); printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }

  res = NBC_Start(handle, schedule);
  if(res != NBC_OK) { free(handle->tmpbuf); printf("Error in NBC_Start() (%i)\n", res); return res; }

  /* tmpbuf is freed with the handle */
  return NBC_OK;
}
Ejemplo n.º 19
0
// TODO: REMOVE THIS METHOD (It is just for testing functionality).
void LeaderNode::create_test_job() {
   MPI_Comm swing_comm;

   // TODO: This is a simple place holder for swing node spawning,
   //       will want to make this more flexible later.
   spawn_swing_nodes(MPI_COMM_WORLD, &swing_comm, 2);

#ifdef DEBUG
   // TODO: REMOVE
   int size;
   MPI_Comm_remote_size(swing_comm, &size);
   printf("leader after --- swing node count: %d\n", size);
   //
#endif

   // TODO: Make a better way of adding mappings for coordinator nodes.
   //       Use this bandaid to get off the ground for now.
   int job_num = next_job_num++; 
   job_to_comms[job_num].swing = swing_comm; 

   std::vector<uint32_t> temp_vec;
   temp_vec.push_back(0);
   temp_vec.push_back(0);
   temp_vec.push_back(1);
   temp_vec.push_back(1);
   job_to_swing[job_num] = temp_vec;

   // TODO: This is a simple place holder for cache node spawning,
   //       will want to make this more flexible later.
   spawn_cache_nodes(job_num, &swing_comm, 4);

   // TODO: Make a better way of adding mappings for team nodes.
   //       Use this bandaid to get off the ground for now.
   temp_vec.clear();
   temp_vec.push_back(0);
   temp_vec.push_back(1);
   temp_vec.push_back(2);
   temp_vec.push_back(3);
   job_to_cache[job_num] = temp_vec;

   // TODO: This is a simple place holder for job node spawning,
   //       will want to make this more flexible later.
   spawn_job_nodes(job_num, job_exec, &swing_comm, 4);
}
Ejemplo n.º 20
0
/**
 * Main loop of the function.
 */
int main(int argc, char **argv) {
    int rank, size;
    MPI_Comm parent;

    MPI_Init(&argc, &argv);
    MPI_Comm_get_parent(&parent);

    if (parent == MPI_COMM_NULL)
        error("No parent.");

    MPI_Comm_remote_size(parent, &size);
    if (size != 1)
        error("Something wrong with parent.");

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    printf("I am a worker number %d of %d.\n", rank, size);

    MPI_Finalize();

    return 0;
}
Ejemplo n.º 21
0
int ompi_coll_libnbc_ireduce_scatter_inter(void* sendbuf, void* recvbuf, int *recvcounts, MPI_Datatype datatype,
					   MPI_Op op, struct ompi_communicator_t *comm, ompi_request_t ** request,
					   struct mca_coll_base_module_2_0_0_t *module) {
  int peer, rank, r, res, count, rsize, offset;
  MPI_Aint ext;
  NBC_Schedule *schedule;
  NBC_Handle *handle;
  ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
  ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;

  res = NBC_Init_handle(comm, coll_req, libnbc_module);
  if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
  handle = (*coll_req);
  res = MPI_Comm_rank(comm, &rank);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
  res = MPI_Comm_remote_size(comm, &rsize);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }
  MPI_Type_extent(datatype, &ext);
  if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }

  schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
  if (NULL == schedule) { printf("Error in malloc()\n"); return NBC_OOR; }

  res = NBC_Sched_create(schedule);
  if(res != NBC_OK) { printf("Error in NBC_Sched_create (%i)\n", res); return res; }

  count = 0;
  for (r = 0 ; r < rsize ; ++r) count += recvcounts[r];

  handle->tmpbuf = malloc(2 * ext * count);
  if(handle->tmpbuf == NULL) { printf("Error in malloc()\n"); return NBC_OOR; }

  /* send my data to the remote root */
  res = NBC_Sched_send(sendbuf, false, count, datatype, 0, schedule);
  if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }

  if (0 == rank) {
    res = NBC_Sched_recv((void *) 0, true, count, datatype, 0, schedule);
    if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }

    res = NBC_Sched_barrier(schedule);
    if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }

    for (peer = 1 ; peer < rsize ; ++peer) {
      res = NBC_Sched_recv((void *)(ext * count), true, count, datatype, peer, schedule);
      if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }

      res = NBC_Sched_barrier(schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }

      res = NBC_Sched_op((void *) 0, true, (void *)(ext * count), true, (void *) 0, true, count, datatype, op, schedule);
      if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_op() (%i)\n", res); return res; }

      res = NBC_Sched_barrier(schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }

    }

    /* exchange data with remote root for scatter phase (we *could* use the local communicator to do the scatter) */
    res = NBC_Sched_recv((void *)(ext * count), true, count, datatype, 0, schedule);
    if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }

    res = NBC_Sched_send((void *) 0, true, count, datatype, 0, schedule);
    if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }

    res = NBC_Sched_barrier(schedule);
    if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }

    /* scatter */
    for (peer = 0, offset = ext * count ; peer < rsize ; ++peer) {
      res = NBC_Sched_send((void *)(uintptr_t) offset, true, recvcounts[peer], datatype, peer, schedule);
      if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
      offset += recvcounts[peer] * ext;
    }
  }

  /* receive my block */
  res = NBC_Sched_recv(recvbuf, false, recvcounts[rank], datatype, 0, schedule);
  if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }

  /*NBC_PRINT_SCHED(*schedule);*/

  res = NBC_Sched_commit(schedule);
  if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }

  res = NBC_Start(handle, schedule);
  if (NBC_OK != res) { free(handle->tmpbuf); printf("Error in NBC_Start() (%i)\n", res); return res; }

  /* tmpbuf is freed with the handle */
  return NBC_OK;
}
Ejemplo n.º 22
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int *sendbuf = 0, *recvbuf = 0;
    int leftGroup, i, j, idx, count, rrank, rsize;
    MPI_Comm comm;
    MPI_Datatype datatype;

    MTest_Init( &argc, &argv );

    datatype = MPI_INT;
    while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
	if (comm == MPI_COMM_NULL) continue;
	for (count = 1; count < 66000; count = 2 * count) {
	    /* Get an intercommunicator */
	    MPI_Comm_remote_size( comm, &rsize );
	    MPI_Comm_rank( comm, &rrank );
	    sendbuf = (int *)malloc( rsize * count * sizeof(int) );
	    recvbuf = (int *)malloc( rsize * count * sizeof(int) );
	    for (i=0; i<rsize*count; i++) recvbuf[i] = -1;
	    if (leftGroup) {
		idx = 0;
		for (j=0; j<rsize; j++) {
		    for (i=0; i<count; i++) {
			sendbuf[idx++] = i + rrank;
		    }
		}
		err = MPI_Alltoall( sendbuf, count, datatype, 
				    NULL, 0, datatype, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
	    }
	    else {
		int rank, size;

		MPI_Comm_rank( comm, &rank );
		MPI_Comm_size( comm, &size );

		/* In the right group */
		err = MPI_Alltoall( NULL, 0, datatype, 
				    recvbuf, count, datatype, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
		/* Check that we have received the correct data */
		idx = 0;
		for (j=0; j<rsize; j++) {
		    for (i=0; i<count; i++) {
			if (recvbuf[idx++] != i + j) {
			    errs++;
			    if (errs < 10) 
				fprintf( stderr, "buf[%d] = %d on %d\n", 
					 i, recvbuf[i], rank );
			}
		    }
		}
	    }
	    free( recvbuf );
	    free( sendbuf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Ejemplo n.º 23
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, rsize, i;
    int np = 2;
    int errcodes[2];
    MPI_Comm      parentcomm, intercomm, intracomm, intracomm2, intracomm3;
    int           isChild = 0;
    MPI_Status    status;

    MTest_Init( &argc, &argv );

    MPI_Comm_get_parent( &parentcomm );

    if (parentcomm == MPI_COMM_NULL) {
	/* Create 2 more processes */
	MPI_Comm_spawn( (char*)"./spawnintra", MPI_ARGV_NULL, np,
			MPI_INFO_NULL, 0, MPI_COMM_WORLD,
			&intercomm, errcodes );
    }
    else 
	intercomm = parentcomm;

    /* We now have a valid intercomm */

    MPI_Comm_remote_size( intercomm, &rsize );
    MPI_Comm_size( intercomm, &size );
    MPI_Comm_rank( intercomm, &rank );

    if (parentcomm == MPI_COMM_NULL) {
	/* Master */
	if (rsize != np) {
	    errs++;
	    printf( "Did not create %d processes (got %d)\n", np, rsize );
	}
	if (rank == 0) {
	    for (i=0; i<rsize; i++) {
		MPI_Send( &i, 1, MPI_INT, i, 0, intercomm );
	    }
	}
    }
    else {
	/* Child */
	isChild = 1;
	if (size != np) {
	    errs++;
	    printf( "(Child) Did not create %d processes (got %d)\n", 
		    np, size );
	}
	MPI_Recv( &i, 1, MPI_INT, 0, 0, intercomm, &status );
	if (i != rank) {
	    errs++;
	    printf( "Unexpected rank on child %d (%d)\n", rank, i );
	}
    }

    /* At this point, try to form the intracommunicator */
    MPI_Intercomm_merge( intercomm, isChild, &intracomm );

    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm, &icsize );
	MPI_Comm_rank( intracomm, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Make sure that the processes are ordered correctly */
	if (isChild) {
	    int psize;
	    MPI_Comm_remote_size( parentcomm, &psize );
	    if (icrank != psize + wrank ) {
		errs++;
		printf( "Intracomm rank %d (from child) should have rank %d\n",
			icrank, psize + wrank );
	    }
	}
	else {
	    if (icrank != wrank) {
		errs++;
		printf( "Intracomm rank %d (from parent) should have rank %d\n",
			icrank, wrank );
	    }
	}
    }

    /* At this point, try to form the intracommunicator, with the other 
     processes first */
    MPI_Intercomm_merge( intercomm, !isChild, &intracomm2 );

    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm2, &icsize );
	MPI_Comm_rank( intracomm2, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "(2)Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Make sure that the processes are ordered correctly */
	if (isChild) {
	    if (icrank != wrank ) {
		errs++;
		printf( "(2)Intracomm rank %d (from child) should have rank %d\n",
			icrank, wrank );
	    }
	}
	else {
	    int csize;
	    MPI_Comm_remote_size( intercomm, &csize );
	    if (icrank != wrank + csize) {
		errs++;
		printf( "(2)Intracomm rank %d (from parent) should have rank %d\n",
			icrank, wrank + csize );
	    }
	}
    }

    /* At this point, try to form the intracommunicator, with an 
       arbitrary choice for the first group of processes */
    MPI_Intercomm_merge( intercomm, 0, &intracomm3 );
    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm3, &icsize );
	MPI_Comm_rank( intracomm3, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "(3)Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Eventually, we should test that the processes are ordered 
	   correctly, by groups (must be one of the two cases above) */
    }

    /* Update error count */
    if (isChild) {
	/* Send the errs back to the master process */
	MPI_Ssend( &errs, 1, MPI_INT, 0, 1, intercomm );
    }
    else {
	if (rank == 0) {
	    /* We could use intercomm reduce to get the errors from the 
	       children, but we'll use a simpler loop to make sure that
	       we get valid data */
	    for (i=0; i<rsize; i++) {
		MPI_Recv( &err, 1, MPI_INT, i, 1, intercomm, MPI_STATUS_IGNORE );
		errs += err;
	    }
	}
    }

    /* It isn't necessary to free the intracomms, but it should not hurt */
    MPI_Comm_free( &intracomm );
    MPI_Comm_free( &intracomm2 );
    MPI_Comm_free( &intracomm3 );

    /* It isn't necessary to free the intercomm, but it should not hurt */
    MPI_Comm_free( &intercomm );

    /* Note that the MTest_Finalize get errs only over COMM_WORLD */
    /* Note also that both the parent and child will generate "No Errors"
       if both call MTest_Finalize */
    if (parentcomm == MPI_COMM_NULL) {
	MTest_Finalize( errs );
    }

    MPI_Finalize();
    return 0;
}
Ejemplo n.º 24
0
/* create a loop between all the elements types */
int mpi_lsa_create_intercoms(com_lsa * com){
	int prev, next,flag;
	int prev_size,next_size,size;
	/* create first connection between intracommunicators thanks to an intercommunicator */
	/* one way */
	MPI_Barrier(MPI_COMM_WORLD);
	if(com->rank_world==0)printf("]> Creating intercommunicators\n-One Way :\n");
	MPI_Barrier(MPI_COMM_WORLD);
	printf("\t *> %d -> %d ",com->rank_world,com->master.com[4-((com->color_group)+1)]);
	MPI_Barrier(MPI_COMM_WORLD);



	MPI_Intercomm_create(com->com_group,0,
					MPI_COMM_WORLD,com->master.com[4-((com->color_group)+1)], 
					com->rank_group,
					&(com->inter.com[4-((com->color_group)+1)]));


	
	MPI_Barrier(MPI_COMM_WORLD);
	if(com->rank_world==0)printf("\n]> The Other : \n");
	MPI_Barrier(MPI_COMM_WORLD);
	printf("\t *> %d -> %d ",(com->color_group),(4-((com->color_group)-1)%4)%4);
	MPI_Barrier(MPI_COMM_WORLD);
	if(com->rank_world==0)printf("\n");
	MPI_Barrier(MPI_COMM_WORLD);



 	/* the other */
	MPI_Intercomm_create(com->com_group,0,
					 com->com_world,com->master.com[(4-((com->color_group)-1)%4)%4],
	 				com->rank_group,
	 				&(com->inter.com[(4-((com->color_group)-1)%4)%4]));


	/// WHY THIS ????????
	if((4-((com->color_group)-1)%4)%4>com->color_group){
		next=(4-(com->color_group-1)%4)%4;
		prev=4-((com->color_group)+1);
	} else {
		prev=(4-(com->color_group-1)%4)%4;
		next=4-((com->color_group)+1);
	}

	/* set the in and out communicators */
	com->out_com=com->inter.com[next];
	MPI_Comm_test_inter(com->inter.com[next],&flag);
		if(!flag){
			mpi_lsa_print("\n\n\n\nproblem with inter.[next]\n\n\n\n\n", com);
		}
	com->in_com=com->inter.com[prev];
		MPI_Comm_test_inter(com->inter.com[prev],&flag);
		if(!flag){
			mpi_lsa_print("\n\n\n\n\nproblem with inter.[prev]\n\n\n\n", com);
		}

	MPI_Comm_remote_size(com->out_com,&next_size);
	MPI_Comm_remote_size(com->in_com,&prev_size);
	MPI_Comm_size(com->com_group,&size);
	if(com->color_group==0) 		 printf("GMRES 1: my intercomm with LS %d \n",com->in_com);
	if(com->rank_world==0) printf("]> In and Out communicators : \n");
		MPI_Barrier(MPI_COMM_WORLD);

	if(com->color_group==0) 		 printf("GMRES :   ");
	else if(com->color_group==1) printf("MAIN :    ");
	else if(com->color_group==2) printf("ARNOLDI : ");
	else if(com->color_group==3) printf("LS :      ");

	printf("%d: %d (%d) -> %d (%d) -> %d (%d)   in_com: %d,  out_com: %d\n",com->rank_world,com->master.com[prev],prev_size,com->color_group,size,com->master.com[next],next_size, com->in_com, com->out_com);
	

	return 0;
}
Ejemplo n.º 25
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int size, isLeft;
    MPI_Comm intercomm, newcomm;

    MTest_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &size );
    if (size < 4) {
	printf( "This test requires at least 4 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
	int key, color;

        if (intercomm == MPI_COMM_NULL) continue;

	/* Split this intercomm.  The new intercomms contain the 
	   processes that had odd (resp even) rank in their local group
	   in the original intercomm */
	MTestPrintfMsg( 1, "Created intercomm %s\n", MTestGetIntercommName() );
	MPI_Comm_rank( intercomm, &key );
	color = (key % 2);
	MPI_Comm_split( intercomm, color, key, &newcomm );
	/* Make sure that the new communicator has the appropriate pieces */
	if (newcomm != MPI_COMM_NULL) {
	    int orig_rsize, orig_size, new_rsize, new_size;
	    int predicted_size, flag, commok=1;

	    MPI_Comm_test_inter( intercomm, &flag );
	    if (!flag) {
		errs++;
		printf( "Output communicator is not an intercomm\n" );
		commok = 0;
	    }

	    MPI_Comm_remote_size( intercomm, &orig_rsize );
	    MPI_Comm_remote_size( newcomm, &new_rsize );
	    MPI_Comm_size( intercomm, &orig_size );
	    MPI_Comm_size( newcomm, &new_size );
	    /* The local size is 1/2 the original size, +1 if the 
	       size was odd and the color was even.  More precisely,
	       let n be the orig_size.  Then
	                        color 0     color 1
	       orig size even    n/2         n/2
	       orig size odd     (n+1)/2     n/2

	       However, since these are integer valued, if n is even,
	       then (n+1)/2 = n/2, so this table is much simpler:
	                        color 0     color 1
	       orig size even    (n+1)/2     n/2
	       orig size odd     (n+1)/2     n/2
	       
	    */
	    predicted_size = (orig_size + !color) / 2; 
	    if (predicted_size != new_size) {
		errs++;
		printf( "Predicted size = %d but found %d for %s (%d,%d)\n",
			predicted_size, new_size, MTestGetIntercommName(),
			orig_size, orig_rsize );
		commok = 0;
	    }
	    predicted_size = (orig_rsize + !color) / 2;
	    if (predicted_size != new_rsize) {
		errs++;
		printf( "Predicted remote size = %d but found %d for %s (%d,%d)\n",
			predicted_size, new_rsize, MTestGetIntercommName(), 
			orig_size, orig_rsize );
		commok = 0;
	    }
	    /* ... more to do */
	    if (commok) {
		errs += TestIntercomm( newcomm );
	    }
	}
	else {
	    int orig_rsize;
	    /* If the newcomm is null, then this means that remote group
	       for this color is of size zero (since all processes in this 
	       test have been given colors other than MPI_UNDEFINED).
	       Confirm that here */
	    /* FIXME: ToDo */
	    MPI_Comm_remote_size( intercomm, &orig_rsize );
	    if (orig_rsize == 1) {
		if (color == 0) {
		    errs++;
		    printf( "Returned null intercomm when non-null expected\n" );
		}
	    }
	}
	if (newcomm != MPI_COMM_NULL) 
	    MPI_Comm_free( &newcomm );
	MPI_Comm_free( &intercomm );
    }
    MTest_Finalize(errs);

    MPI_Finalize();

    return 0;
}
Ejemplo n.º 26
0
/*
 * Return an intercomm; set isLeftGroup to 1 if the calling process is
 * a member of the "left" group.
 */
int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size)
{
    int size, rank, remsize, merr;
    int done = 0;
    MPI_Comm mcomm = MPI_COMM_NULL;
    MPI_Comm mcomm2 = MPI_COMM_NULL;
    int rleader;

    /* The while loop allows us to skip communicators that are too small.
     * MPI_COMM_NULL is always considered large enough.  The size is
     * the sum of the sizes of the local and remote groups */
    while (!done) {
        *comm = MPI_COMM_NULL;
        *isLeftGroup = 0;
        interCommName = "MPI_COMM_NULL";

        switch (interCommIdx) {
        case 0:
            /* Split comm world in half */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == size / 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD";
            }
            else
                *comm = MPI_COMM_NULL;
            break;
        case 1:
            /* Split comm world in to 1 and the rest */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, rank == 0, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = 1;
                }
                else if (rank == 1) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank == 0;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12346, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD into 1, rest";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 2:
            /* Split comm world in to 2 and the rest */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 3) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, rank < 2, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = 2;
                }
                else if (rank == 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12347, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD into 2, rest";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 3:
            /* Split comm world in half, then dup */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == size / 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                if (merr)
                    MTestPrintError(merr);
                /* avoid leaking after assignment below */
                merr = MPI_Comm_free(&mcomm);
                if (merr)
                    MTestPrintError(merr);

                /* now dup, some bugs only occur for dup's of intercomms */
                mcomm = *comm;
                merr = MPI_Comm_dup(mcomm, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD then dup'ing";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 4:
            /* Split comm world in half, form intercomm, then split that intercomm */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size > 1) {
                merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);
                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == size / 2) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                if (merr)
                    MTestPrintError(merr);
                /* avoid leaking after assignment below */
                merr = MPI_Comm_free(&mcomm);
                if (merr)
                    MTestPrintError(merr);

                /* now split, some bugs only occur for splits of intercomms */
                mcomm = *comm;
                merr = MPI_Comm_rank(mcomm, &rank);
                if (merr)
                    MTestPrintError(merr);
                /* this split is effectively a dup but tests the split code paths */
                merr = MPI_Comm_split(mcomm, 0, rank, comm);
                if (merr)
                    MTestPrintError(merr);
                interCommName = "Intercomm by splitting MPI_COMM_WORLD then then splitting again";
            }
            else
                *comm = MPI_COMM_NULL;
            break;

        case 5:
            /* split comm world in half discarding rank 0 on the "left"
             * communicator, then form them into an intercommunicator */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= 4) {
                int color = (rank < size / 2 ? 0 : 1);
                if (rank == 0)
                    color = MPI_UNDEFINED;

                merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);

                if (rank == 1) {
                    rleader = size / 2;
                }
                else if (rank == (size / 2)) {
                    rleader = 1;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                if (rank != 0) {        /* 0's mcomm is MPI_COMM_NULL */
                    merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
                    if (merr)
                        MTestPrintError(merr);
                }
                interCommName =
                    "Intercomm by splitting MPI_COMM_WORLD (discarding rank 0 in the left group) then MPI_Intercomm_create'ing";
            }
            else {
                *comm = MPI_COMM_NULL;
            }
            break;

        case 6:
            /* Split comm world in half then form them into an
             * intercommunicator.  Then discard rank 0 from each group of the
             * intercomm via MPI_Comm_create. */
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= 4) {
                MPI_Group oldgroup, newgroup;
                int ranks[1];
                int color = (rank < size / 2 ? 0 : 1);

                merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm);
                if (merr)
                    MTestPrintError(merr);

                if (rank == 0) {
                    rleader = size / 2;
                }
                else if (rank == (size / 2)) {
                    rleader = 0;
                }
                else {
                    /* Remote leader is signficant only for the processes
                     * designated local leaders */
                    rleader = -1;
                }
                *isLeftGroup = rank < size / 2;
                merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, &mcomm2);
                if (merr)
                    MTestPrintError(merr);

                /* We have an intercomm between the two halves of comm world. Now create
                 * a new intercomm that removes rank 0 on each side. */
                merr = MPI_Comm_group(mcomm2, &oldgroup);
                if (merr)
                    MTestPrintError(merr);
                ranks[0] = 0;
                merr = MPI_Group_excl(oldgroup, 1, ranks, &newgroup);
                if (merr)
                    MTestPrintError(merr);
                merr = MPI_Comm_create(mcomm2, newgroup, comm);
                if (merr)
                    MTestPrintError(merr);

                merr = MPI_Group_free(&oldgroup);
                if (merr)
                    MTestPrintError(merr);
                merr = MPI_Group_free(&newgroup);
                if (merr)
                    MTestPrintError(merr);

                interCommName =
                    "Intercomm by splitting MPI_COMM_WORLD then discarding 0 ranks with MPI_Comm_create";
            }
            else {
                *comm = MPI_COMM_NULL;
            }
            break;

        default:
            *comm = MPI_COMM_NULL;
            interCommIdx = -1;
            break;
        }

        if (*comm != MPI_COMM_NULL) {
            merr = MPI_Comm_size(*comm, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_remote_size(*comm, &remsize);
            if (merr)
                MTestPrintError(merr);
            if (size + remsize >= min_size)
                done = 1;
        }
        else {
            interCommName = "MPI_COMM_NULL";
            done = 1;
        }

        /* we are only done if all processes are done */
        MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        interCommIdx++;

        if (!done && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr)
                MTestPrintError(merr);
        }

        /* cleanup for common temp objects */
        if (mcomm != MPI_COMM_NULL) {
            merr = MPI_Comm_free(&mcomm);
            if (merr)
                MTestPrintError(merr);
        }
        if (mcomm2 != MPI_COMM_NULL) {
            merr = MPI_Comm_free(&mcomm2);
            if (merr)
                MTestPrintError(merr);
        }
    }

    return interCommIdx;
}
Ejemplo n.º 27
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int *buf = 0;
    int *recvcounts;
    int *recvdispls;
    int leftGroup, i, count, rank, rsize, size;
    MPI_Comm comm;
    MPI_Datatype datatype;

    MTest_Init( &argc, &argv );

    datatype = MPI_INT;
    while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
	if (comm == MPI_COMM_NULL) continue;
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_remote_size( comm, &rsize );
	MPI_Comm_size( comm, &size );
		
	/* To improve reporting of problems about operations, we
	   change the error handler to errors return */
	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );

	for (count = 1; count < 65000; count = 2 * count) {
	    /* Get an intercommunicator */
	    recvcounts = (int *)malloc( rsize * sizeof(int) );
	    recvdispls = (int *)malloc( rsize * sizeof(int) );
	    /* This simple test duplicates the Gather test, 
	       using the same lengths for all messages */
	    for (i=0; i<rsize; i++) {
		recvcounts[i] = count;
		recvdispls[i] = count * i;
	    }
	    if (leftGroup) {
		buf = (int *)malloc( count * rsize * sizeof(int) );
		for (i=0; i<count*rsize; i++) buf[i] = -1;

		err = MPI_Gatherv( NULL, 0, datatype,
				  buf, recvcounts, recvdispls, datatype, 
				 (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
				 comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
		/* Test that no other process in this group received the 
		   broadcast */
		if (rank != 0) {
		    for (i=0; i<count; i++) {
			if (buf[i] != -1) {
			    errs++;
			}
		    }
		}
		else {
		    /* Check for the correct data */
		    for (i=0; i<count*rsize; i++) {
			if (buf[i] != i) {
			    errs++;
			}
		    }
		}
	    }
	    else {
		/* In the right group */
		buf = (int *)malloc( count * sizeof(int) );
		for (i=0; i<count; i++) buf[i] = rank * count + i;
		err = MPI_Gatherv( buf, count, datatype, 
				   NULL, 0, 0, datatype, 0, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
	    }
	    free( buf );
	    free( recvcounts );
	    free( recvdispls );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Ejemplo n.º 28
0
int main( int argc, char **argv )
{
    int      err = 0;
    int      *recvcounts;
    int      size, rsize, rank, i;
    int      recvcount, /* Each process receives this much data */
             sendcount, /* Each process contributes this much data */
	     basecount; /* Unit of elements - basecount *rsize is recvcount, 
			   etc. */
    int      isLeftGroup;
    long long *sendbuf, *recvbuf;
    long long sumval;
    MPI_Comm comm;


    MTest_Init( &argc, &argv );
    comm = MPI_COMM_WORLD;

    basecount = 1024;

    while (MTestGetIntercomm( &comm, &isLeftGroup, 2 )) {
	if (comm == MPI_COMM_NULL) continue;

	MPI_Comm_remote_size( comm, &rsize );
	MPI_Comm_size( comm, &size );
	MPI_Comm_rank( comm, &rank );

	if (0) {
	    printf( "[%d] %s (%d,%d) remote %d\n", rank, 
		    isLeftGroup ? "L" : "R", 
		    rank, size, rsize );
	}

	recvcount = basecount * rsize;
	sendcount = basecount * rsize * size;

	recvcounts = (int *)malloc( size * sizeof(int) );
	if (!recvcounts) {
	    fprintf( stderr, "Could not allocate %d int for recvcounts\n", 
		     size );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
	for (i=0; i<size; i++) 
	    recvcounts[i] = recvcount;
	
	sendbuf = (long long *) malloc( sendcount * sizeof(long long) );
	if (!sendbuf) {
	    fprintf( stderr, "Could not allocate %d ints for sendbuf\n", 
		     sendcount );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}

	for (i=0; i<sendcount; i++) {
	    sendbuf[i] = (long long)(rank*sendcount + i);
	}
	recvbuf = (long long *)malloc( recvcount * sizeof(long long) );
	if (!recvbuf) {
	    fprintf( stderr, "Could not allocate %d ints for recvbuf\n", 
		     recvcount );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
	for (i=0; i<recvcount; i++) {
	    recvbuf[i] = (long long)(-i);
	}
	
	MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_LONG_LONG, MPI_SUM,
			    comm );

	/* Check received data */
	for (i=0; i<recvcount; i++) {
	    sumval = (long long)(sendcount) * (long long)((rsize * (rsize-1))/2) +
		(long long)(i + rank * rsize * basecount) * (long long)rsize;
	    if (recvbuf[i] != sumval) {
		err++;
		if (err < 4) {
		    fprintf( stdout, "Did not get expected value for reduce scatter\n" );
		    fprintf( stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
			     rank, 
			     isLeftGroup ? "L" : "R", 
			     i, recvbuf[i], sumval );
		}
	    }
	}
	
	free(sendbuf);
	free(recvbuf);
	free(recvcounts);

	MTestFreeComm( &comm );
    }

    MTest_Finalize( err );

    MPI_Finalize( );

    return 0;
}
Ejemplo n.º 29
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, rsize;
    int np = 3;
    MPI_Comm parentcomm, intercomm;
    int verbose = 0;
    char *env;
    int can_spawn;

    env = getenv("MPITEST_VERBOSE");
    if (env) {
        if (*env != '0')
            verbose = 1;
    }

    MTest_Init(&argc, &argv);

    errs += MTestSpawnPossible(&can_spawn);

    if (can_spawn) {
        MPI_Comm_get_parent(&parentcomm);

        if (parentcomm == MPI_COMM_NULL) {
            IF_VERBOSE(("spawning %d processes\n", np));
            /* Create 3 more processes */
            MPI_Comm_spawn((char *) "./disconnect", MPI_ARGV_NULL, np,
                           MPI_INFO_NULL, 0, MPI_COMM_WORLD, &intercomm, MPI_ERRCODES_IGNORE);
        } else {
            intercomm = parentcomm;
        }

        /* We now have a valid intercomm */

        MPI_Comm_remote_size(intercomm, &rsize);
        MPI_Comm_size(intercomm, &size);
        MPI_Comm_rank(intercomm, &rank);

        if (parentcomm == MPI_COMM_NULL) {
            IF_VERBOSE(("parent rank %d alive.\n", rank));
            /* Parent */
            if (rsize != np) {
                errs++;
                printf("Did not create %d processes (got %d)\n", np, rsize);
                fflush(stdout);
            }
            IF_VERBOSE(("disconnecting child communicator\n"));
            MPI_Comm_disconnect(&intercomm);

            /* Errors cannot be sent back to the parent because there is no
             * communicator connected to the children
             * for (i=0; i<rsize; i++)
             * {
             * MPI_Recv(&err, 1, MPI_INT, i, 1, intercomm, MPI_STATUS_IGNORE);
             * errs += err;
             * }
             */
        } else {
            IF_VERBOSE(("child rank %d alive.\n", rank));
            /* Child */
            if (size != np) {
                errs++;
                printf("(Child) Did not create %d processes (got %d)\n", np, size);
                fflush(stdout);
            }

            IF_VERBOSE(("disconnecting communicator\n"));
            MPI_Comm_disconnect(&intercomm);

            /* Send the errs back to the master process */
            /* Errors cannot be sent back to the parent because there is no
             * communicator connected to the parent */
            /*MPI_Ssend(&errs, 1, MPI_INT, 0, 1, intercomm); */
        }

        /* Note that the MTest_Finalize get errs only over COMM_WORLD */
        /* Note also that both the parent and child will generate "No Errors"
         * if both call MTest_Finalize */
        if (parentcomm == MPI_COMM_NULL) {
            MTest_Finalize(errs);
        } else {
            MPI_Finalize();
        }
    } else {
        MTest_Finalize(errs);
    }

    IF_VERBOSE(("calling finalize\n"));
    return MTestReturnValue(errs);
}
Ejemplo n.º 30
0
/* FIXME: This is copied from iccreate.  It should be in one place */
int TestIntercomm( MPI_Comm comm )
{
    int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
    int errs = 0, wrank, nsize;
    char commname[MPI_MAX_OBJECT_NAME+1];
    MPI_Request *reqs;

    MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
    MPI_Comm_size( comm, &local_size );
    MPI_Comm_remote_size( comm, &remote_size );
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_get_name( comm, commname, &nsize );

    MTestPrintfMsg( 1, "Testing communication on intercomm %s\n", commname );
    
    reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
    if (!reqs) {
	printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n", 
		wrank, remote_size, commname );
	errs++;
	return errs;
    }
    bufs = (int **) malloc( remote_size * sizeof(int *) );
    if (!bufs) {
	printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n", 
		wrank, remote_size, commname );
	errs++;
	return errs;
    }
    bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
    if (!bufmem) {
	printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n", 
		wrank, 2*remote_size, commname );
	errs++;
	return errs;
    }

    /* Each process sends a message containing its own rank and the
       rank of the destination with a nonblocking send.  Because we're using
       nonblocking sends, we need to use different buffers for each isend */
    for (j=0; j<remote_size; j++) {
	bufs[j]    = &bufmem[2*j];
	bufs[j][0] = rank;
	bufs[j][1] = j;
	MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
    }

    for (j=0; j<remote_size; j++) {
	MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
	if (rbuf[0] != j) {
	    printf( "[%d] Expected rank %d but saw %d in %s\n", 
		    wrank, j, rbuf[0], commname );
	    errs++;
	}
	if (rbuf[1] != rank) {
	    printf( "[%d] Expected target rank %d but saw %d from %d in %s\n", 
		    wrank, rank, rbuf[1], j, commname );
	    errs++;
	}
    }
    if (errs) 
	fflush(stdout);
    MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );

    free( reqs );
    free( bufs );
    free( bufmem );

    return errs;
}