/*-----------------------------------------------------------*/
int setupCommunicators(){
	int procHash;

	/* Get hash from processor name */
	procHash = procNameToHash();

	/* Comm_split using procHash as colour to get
	 * local communicator.
	 */
	MPI_Comm_split(comm, procHash, 0, &localComm);

	/* Find ranks of processes in localComm */
	MPI_Comm_rank(localComm, &localCommRank);

	/* Find the size of localComm (for use in calculating multi datasize) */
	MPI_Comm_size(localComm, &localCommSize);

	/* Use localRank as colour to get communicator across nodes. */
	MPI_Comm_split(comm, localCommRank, 0, &crossComm);

	/* Find ranks of processes in crossComm */
	MPI_Comm_rank(crossComm, &crossCommRank);

    MPI_Barrier(comm);

	return 0;
}
Beispiel #2
0
/*@C
  PetscSubcommSetTypeGeneral - Set type of subcommunicators from user's specifications

   Collective on MPI_Comm

   Input Parameter:
+  psubcomm - PetscSubcomm context
.  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
.  subrank - rank in the subcommunicator
-  duprank - rank in the dupparent (see PetscSubcomm)

   Level: advanced

.keywords: communicator, create

.seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
@*/
PetscErrorCode  PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank,PetscMPIInt duprank)
{
  PetscErrorCode ierr;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
  PetscMPIInt    size;

  PetscFunctionBegin;
  if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
  if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm
     if duprank is not a valid number, then dupcomm is not created - not all applications require dupcomm! */
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  if (duprank == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"duprank==PETSC_DECIDE is not supported yet");
  else if (duprank >= 0 && duprank < size){
    ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);
  psubcomm->color     = color;
  PetscFunctionReturn(0);
}
Beispiel #3
0
void mesh(MPI_Comm comm, MPI_Comm *row_comm, MPI_Comm *col_comm, 
		int p, int q, int iam, int np, int *riam, int *ciam)
{
	//processers not enough
	if (np < p * q)
		return ;

	int color;
	if (iam < p * q) {
		color = iam / q;
		//int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm)
		//1. Use MPI_Allgather to get the color and key from each process
		//2. Count the number of processes with the same color; create a 
		//communicator with that many processes.  If this process has
		//MPI_UNDEFINED as the color, create a process with a single member.
		//3. Use key to order the ranks
		//4. Set the VCRs using the ordered key values
		MPI_Comm_split(comm, color, iam, row_comm);

		color = iam % q; 
		MPI_Comm_split(comm, color, iam, col_comm);

		//comm in the same row, different col
		MPI_Comm_rank(*row_comm, ciam);

		//comm in the same col, different row
		MPI_Comm_rank(*col_comm, riam);
	} else {
		color = MPI_UNDEFINED;
		MPI_Comm_split(comm, color, iam, row_comm);
		MPI_Comm_split(comm, color, iam, col_comm);
	}
}
Beispiel #4
0
static void
_create_segment_group(struct SegmentGroupDescr * descr, size_t * sizes, size_t * outsizes, size_t avgsegsize, int Ngroup, MPI_Comm comm)
{
    int i;
    int ThisTask, NTask;

    MPI_Comm_size(comm, &NTask);
    MPI_Comm_rank(comm, &ThisTask);

    descr->ThisSegment = _assign_colors(avgsegsize, sizes, outsizes, &descr->Nsegments, comm);

    if(descr->ThisSegment >= 0) {
        /* assign segments to groups.
         * if Nsegments < Ngroup, some groups will have no segments, and thus no ranks belong to them. */
        descr->GroupID = ((size_t) descr->ThisSegment) * Ngroup / descr->Nsegments;
    } else {
        descr->GroupID = Ngroup + 1;
        descr->ThisSegment = NTask + 1;
    }

    descr->Ngroup = Ngroup;

    MPI_Comm_split(comm, descr->GroupID, ThisTask, &descr->Group);

    MPI_Allreduce(&descr->ThisSegment, &descr->segment_start, 1, MPI_INT, MPI_MIN, descr->Group);
    MPI_Allreduce(&descr->ThisSegment, &descr->segment_end, 1, MPI_INT, MPI_MAX, descr->Group);

    descr->segment_end ++;

    int rank;

    MPI_Comm_rank(descr->Group, &rank);

    struct { 
        size_t val;
        int   rank;
    } leader_st;

    leader_st.val = sizes[ThisTask];
    leader_st.rank = rank;

    MPI_Allreduce(MPI_IN_PLACE, &leader_st, 1, MPI_LONG_INT, MPI_MAXLOC, descr->Group);

    descr->is_group_leader = rank == leader_st.rank;
    descr->group_leader_rank = leader_st.rank;

    MPI_Comm_split(comm, rank == leader_st.rank? 0 : 1, ThisTask, &descr->Leader);

    MPI_Comm_split(descr->Group, descr->ThisSegment, ThisTask, &descr->Segment);
    int rank2;

    MPI_Comm_rank(descr->Segment, &rank2);

    leader_st.val = sizes[ThisTask];
    leader_st.rank = rank2;

    MPI_Allreduce(MPI_IN_PLACE, &leader_st, 1, MPI_LONG_INT, MPI_MINLOC, descr->Segment);
    descr->segment_leader_rank = leader_st.rank;
}
Beispiel #5
0
//Set global mpi_comm and mpi_group
//Check if the number of nodes is to the power of 2 ( 2, 4, 8, 16, ...)
//and if not create a new group that contains all nodes up to the closest lowest valid number of nodes.
//Return 1 if the node is part of the active comm world
//Return 0 if not
char prepareMPIComm(void)
{
    float tf;
    int ti;
    int idOld;
    char activeNode;

    //Get Rank and GroupSize
    MPI_Comm_rank (MPI_COMM_WORLD, &idOld);        /* get current process id */
    MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);        /* get number of processes */

    if(mpi_size < 2) {
        printf("Error! At least two nodes are needed!\n");
        MPI_Finalize();
        exit(EXIT_FAILURE);
    }

    //Test for multiples of 2
    tf = log(mpi_size)/log(2);
    ti = tf;
    tf = tf - (float)ti;
    if(tf > 0)
    {
        if(idOld == 0) {
            printf("Cant use all possible nodes! Will only use the first [%d] of [%d].\n",1<<ti, mpi_size);
            fflush(stdout);
        }
        mpi_size = 1 << ti;

        if(idOld < mpi_size)
        {
            activeNode = 1;

            // Split comm into two group
            MPI_Comm_split(MPI_COMM_WORLD, 0, idOld, &mpi_comm);
            MPI_Comm_group( mpi_comm, &mpi_group);

            //printf("[%d] Creating new group with [%d] members.\n",idOld, mpi_size);
            //printf("Created new group of size [%d] with id [%d] -> [%d]\n", mpi_size, idOld, mpi_id); fflush(stdout);

        } else {
            activeNode = 0;
            MPI_Comm_split(MPI_COMM_WORLD, 1, idOld - mpi_size, &mpi_comm);
            MPI_Comm_group( mpi_comm, &mpi_group);

            //printf("Node [%d] wont participate.\n", idOld);
        }
    } else {
        activeNode = 1;
        //If the number of nodes is alright, use the standard world comm
        MPI_Comm_group( MPI_COMM_WORLD, &mpi_group);
        MPI_Comm_create( MPI_COMM_WORLD, mpi_group, &mpi_comm);
    }

    MPI_Comm_size( mpi_comm, &mpi_size );
    MPI_Comm_rank( mpi_comm, &mpi_id );
    return activeNode;
}
PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank,subrank;
  PetscMPIInt    np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);

  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++) {
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* find color for this proc */
  color   = rank%nsubcomm;
  subrank = rank/nsubcomm;

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  j = 0; duprank = 0;
  for (i=0; i<nsubcomm; i++) {
    if (j == color) {
      duprank += subrank;
      break;
    }
    duprank += subsize[i]; j++;
  }

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  {
    PetscThreadComm tcomm;
    ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
    ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
    ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_INTERLACED;
  PetscFunctionReturn(0);
}
PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank=-1,subrank=-1;
  PetscMPIInt    np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);

  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++) {
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* get color and subrank of this proc */
  rankstart = 0;
  for (i=0; i<nsubcomm; i++) {
    if (rank >= rankstart && rank < rankstart+subsize[i]) {
      color   = i;
      subrank = rank - rankstart;
      duprank = rank;
      break;
    } else rankstart += subsize[i];
  }

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  {
    PetscThreadComm tcomm;
    ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
    ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
    ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_CONTIGUOUS;
  PetscFunctionReturn(0);
}
Beispiel #8
0
static void initialize(int nid) {
    MPI_Comm_rank(MPI_COMM_WORLD, &ThisTask);

    /* First split into ranks on the same node */
    MPI_Comm_split(MPI_COMM_WORLD, nid, ThisTask, &NODE_GROUPS);

    MPI_Comm_rank(NODE_GROUPS, &NodeRank);

    /* Next split by Node Rank */
    MPI_Comm_split(MPI_COMM_WORLD, NodeRank, ThisTask, &NODE_LEADERS);

}
Beispiel #9
0
void Set_Communicator( struct comm_info *c_info )
/**********************************************************************

----------------------------------------------------------------------
             VARIABLE              |       TYPE        |   MEANING
----------------------------------------------------------------------
Input      :                       |                   |
Output     :                       |                   |
                                   |                   |
In/Out     :  c_info               | struct comm_info* | see comm_info.h
                                   |                   | Communicator set up
-----------------------------------------------------------------------

-----------------------------------------------------------------------
Description: Initialization of communicators by group coloring
---------------------------------------------------------------------*/
{
    int color,key,i;

    /* insert choice for communicator here;
       NOTE   :  globally more than one communicator is allowed
       Example: grouping of pairs of processes:
       0 0 1 1 2 2  .. (if even),  UNDEF 0 0 1 1 2 2  .. (if odd)
       */

    if( c_info->communicator != MPI_COMM_NULL &&
            c_info->communicator != MPI_COMM_SELF &&
            c_info->communicator != MPI_COMM_WORLD)
    {
        i=MPI_Comm_free(&c_info->communicator);
        Err_Hand(1,i);
    }

    if(c_info->group_mode >= 0)
    {
        i=c_info->w_rank;
        color = i/c_info->NP;
        c_info->group_no = color;
        key = 0;
        if(color >= c_info->w_num_procs/c_info->NP) color=MPI_UNDEFINED;
        MPI_Comm_split(MPI_COMM_WORLD, color, key, &c_info->communicator);
    }
    /* Default choice and Group definition.  */
    else
    {
        if(c_info->w_rank < c_info->NP) color=0;
        else color=MPI_UNDEFINED;
        c_info->group_no = 0;
        key=0;
        MPI_Comm_split(MPI_COMM_WORLD, color, key, &c_info->communicator);
    }
}
Beispiel #10
0
/*@C
  PetscSubcommSetTypeGeneral - Set a PetscSubcomm from user's specifications

   Collective on MPI_Comm

   Input Parameter:
+  psubcomm - PetscSubcomm context
.  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
-  subrank - rank in the subcommunicator

   Level: advanced

.keywords: communicator, create

.seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
@*/
PetscErrorCode PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank)
{
  PetscErrorCode ierr;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
  PetscMPIInt    size,icolor,duprank,*recvbuf,sendbuf[3],mysubsize,rank,*subsize;
  PetscMPIInt    i,nsubcomm=psubcomm->n;

  PetscFunctionBegin;
  if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
  if (nsubcomm < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %d is incorrect. Call PetscSubcommSetNumber()",nsubcomm);

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  /* TODO: this can be done in an ostensibly scalale way (i.e., without allocating an array of size 'size') as is done in PetscObjectsCreateGlobalOrdering(). */
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = PetscMalloc1(2*size,&recvbuf);CHKERRQ(ierr);

  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(subcomm,&mysubsize);CHKERRQ(ierr);

  sendbuf[0] = color;
  sendbuf[1] = mysubsize;
  ierr = MPI_Allgather(sendbuf,2,MPI_INT,recvbuf,2,MPI_INT,comm);CHKERRQ(ierr);

  ierr = PetscCalloc1(nsubcomm,&subsize);CHKERRQ(ierr);
  for (i=0; i<2*size; i+=2) {
    subsize[recvbuf[i]] = recvbuf[i+1];
  }
  ierr = PetscFree(recvbuf);CHKERRQ(ierr);

  duprank = 0;
  for (icolor=0; icolor<nsubcomm; icolor++) {
    if (icolor != color) { /* not color of this process */
      duprank += subsize[icolor];
    } else {
      duprank += subrank;
      break;
    }
  }
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);

  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->child,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_GENERAL;
  PetscFunctionReturn(0);
}
Beispiel #11
0
int main(int argc, char **argv)
{
	MPI_Comm c2;
	int rank;
	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	if (rank == 1) {
		MPI_Comm_split(MPI_COMM_WORLD, -10, 0, &c2);
	} else {
		MPI_Comm_split(MPI_COMM_WORLD, 10, 0, &c2);
	}
	MPI_Finalize();
	return 0;
}
Beispiel #12
0
/*@C
  PetscSubcommCreate - Create a PetscSubcomm context.

   Collective on MPI_Comm

   Input Parameter:
+  comm - MPI communicator
-  nsubcomm - the number of subcommunicators to be created

   Output Parameter:
.  psubcomm - location to store the PetscSubcomm context


   Notes:
   To avoid data scattering from subcomm back to original comm, we create subcommunicators 
   by iteratively taking a process into a subcommunicator. 
   Example: size=4, nsubcomm=(*psubcomm)->n=3
     comm=(*psubcomm)->parent:
      rank:     [0]  [1]  [2]  [3]
      color:     0    1    2    0

     subcomm=(*psubcomm)->comm:
      subrank:  [0]  [0]  [0]  [1]    

     dupcomm=(*psubcomm)->dupparent:
      duprank:  [0]  [2]  [3]  [1]

     Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
           subcomm[color = 1] has subsize=1, owns process [1]
           subcomm[color = 2] has subsize=1, owns process [2]
           dupcomm has same number of processes as comm, and its duprank maps
           processes in subcomm contiguously into a 1d array:
            duprank: [0] [1]      [2]         [3]
            rank:    [0] [3]      [1]         [2]
                    subcomm[0] subcomm[1]  subcomm[2]

   Level: advanced

.keywords: communicator, create

.seealso: PetscSubcommDestroy()
@*/
PetscErrorCode PETSCMAT_DLLEXPORT PetscSubcommCreate(MPI_Comm comm,PetscInt nsubcomm,PetscSubcomm *psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank,subrank;
  PetscInt       np_subcomm,nleftover,i,j,color;
  MPI_Comm       subcomm=0,dupcomm=0;
  PetscSubcomm   psubcomm_tmp;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);
  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++){
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* find color for this proc */
  color   = rank%nsubcomm;
  subrank = rank/nsubcomm;

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  j = 0; duprank = 0;
  for (i=0; i<nsubcomm; i++){
    if (j == color){
      duprank += subrank;
      break;
    }
    duprank += subsize[i]; j++;
  }
  ierr = PetscFree(subsize);CHKERRQ(ierr);
 
  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */   
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
 
  ierr = PetscNew(struct _n_PetscSubcomm,&psubcomm_tmp);CHKERRQ(ierr);
  psubcomm_tmp->parent    = comm;
  psubcomm_tmp->dupparent = dupcomm;
  psubcomm_tmp->comm      = subcomm;
  psubcomm_tmp->n         = nsubcomm;
  psubcomm_tmp->color     = color;
  *psubcomm = psubcomm_tmp;
  PetscFunctionReturn(0);
}
int main(int argc, char** argv) {

    unsigned world_rank = 0, local_rank = 0;
    unsigned world_count = 0, local_count = 0;
    unsigned num_of_groups = 8, group = 0;
    
    //New communicator
    MPI_Comm COMM_LOCAL;
    
    //Basic stuff
    MPI_Init(&argc, &argv);    
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_count);
    
    //Figure out which node go where and their local rank.
    group = world_rank % num_of_groups;
    local_rank = world_rank / num_of_groups;
    
    //Add processors from COMM_WORLD to their respective COMM_LOCAL
    MPI_Comm_split(MPI_COMM_WORLD, group, local_rank, &COMM_LOCAL);
    
    //Get number of processors in each communicator
    MPI_Comm_size(COMM_LOCAL, &local_count);
    
    //use if statement to control which processors print output
    //if(group == 2), if(local_rank == 3), etc
    if(1)
        printf("Group %d of %d, Local rank: %d of %d, Global rank: %d of %d\n", group, num_of_groups, local_rank, local_count, world_rank, world_count);
    
    MPI_Finalize();
    return (EXIT_SUCCESS);
}
Beispiel #14
0
int main(int argc, char* argv[]) {
  int id=0, numprocs;
  int color;
  MPI_Comm local;
  double t1, t2;

  ELG_USER_START("main");

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &id);
  printf("%03d: ctest-elg start\n", id);

  /* define Cartesian topology */
  elg_cart_create(2, (numprocs+1)/2, 0, 1, 1, 0);
  elg_cart_coords(id%2, id/2, 0);

  color = (id >= numprocs/2);
  MPI_Comm_split(MPI_COMM_WORLD, color, id, &local);

  t1 = MPI_Wtime();
  parallel(MPI_COMM_WORLD);
  parallel(local);
  parallel(MPI_COMM_WORLD);
  t2 = MPI_Wtime();

  MPI_Comm_free(&local);
  MPI_Finalize();
  printf("%03d: ctest-elg end  (%12.9f)\n", id, (t2-t1));

  ELG_USER_END("main");
  return 0;
}
Beispiel #15
0
void PX(split_cart_procmesh_for_3dto2d_remap_q1)(
    const INT *n, MPI_Comm comm_cart_3d,
    MPI_Comm *comm_1d
    )
{
  int p0, p1, q0=0, q1=0;
  int ndims, coords_3d[3];
  int dim_1d, period_1d, reorder=0;
  int color, key;
  MPI_Comm comm;

  if( !PX(is_cart_procmesh)(comm_cart_3d) )
    return;

  MPI_Cartdim_get(comm_cart_3d, &ndims);
  if(ndims != 3)
    return;

  PX(get_mpi_cart_coords)(comm_cart_3d, ndims, coords_3d);
  PX(get_procmesh_dims_2d)(n, comm_cart_3d, &p0, &p1, &q0, &q1);

  /* split into p0*p1*q0 comms of size q1 */
  color = coords_3d[0]*p1*q0 + coords_3d[1]*q0 + coords_3d[2]/q1;
  key = coords_3d[2]%q1;
//   key = coords_3d[2]/q0; /* TODO: delete this line after several tests */
  MPI_Comm_split(comm_cart_3d, color, key, &comm);

  dim_1d = q1; period_1d = 1;
  MPI_Cart_create(comm, ndims=1, &dim_1d, &period_1d, reorder,
      comm_1d);

  MPI_Comm_free(&comm);
}
Beispiel #16
0
void PX(split_cart_procmesh_3dto2d_p1q1)(
    const INT *n, MPI_Comm comm_cart_3d,
    MPI_Comm *comm_1d
    )
{
  int p0, p1, q0=0, q1=0;
  int ndims, coords_3d[3];
  int dim_1d, period_1d, reorder=0;
  int color, key;
  MPI_Comm comm;

  if( !PX(is_cart_procmesh)(comm_cart_3d) )
    return;

  MPI_Cartdim_get(comm_cart_3d, &ndims);
  if(ndims != 3)
    return;

  PX(get_mpi_cart_coords)(comm_cart_3d, ndims, coords_3d);
  PX(get_procmesh_dims_2d)(n, comm_cart_3d, &p0, &p1, &q0, &q1);

  /* split into p0*q0 comms of size p1*q1 */
  color = coords_3d[0]*q0 + coords_3d[2]/q1;
  key = coords_3d[1]*q1 + coords_3d[2]%q1;
  MPI_Comm_split(comm_cart_3d, color, key, &comm);

  dim_1d = p1*q1; period_1d = 1;
  MPI_Cart_create(comm, ndims=1, &dim_1d, &period_1d, reorder,
      comm_1d);

  MPI_Comm_free(&comm);
}
Beispiel #17
0
inline MPI_Comm splitComm(int color, MPI_Comm globalComm)
{
    int myGlobalId = getProcId(globalComm);
    MPI_Comm localComm;
    MPI_Comm_split(MPI_COMM_WORLD, color, myGlobalId, &localComm);
    return localComm;
}
Beispiel #18
0
int main(int argc, char *argv[])
{
    int wrank, wsize, rank, size, color;
    int tmp;
    MPI_Comm newcomm;

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &wsize);
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);

    /* Color is 0 or 1; 1 will be the processes that "fault" */
    color = (wrank > 0) && (wrank <= wsize / 2);
    MPI_Comm_split(MPI_COMM_WORLD, color, wrank, &newcomm);

    MPI_Barrier(MPI_COMM_WORLD);
    if (color) {
        /* Simulate a fault on some processes */
        exit(1);
    }

    /* Can we still use newcomm? */
    MPI_Comm_size(newcomm, &size);
    MPI_Comm_rank(newcomm, &rank);

    MPI_Allreduce(&rank, &tmp, 1, MPI_INT, MPI_SUM, newcomm);
    if (tmp != (size * (size + 1)) / 2) {
        printf("Allreduce gave %d but expected %d\n", tmp, (size * (size + 1)) / 2);
    }

    MPI_Comm_free(&newcomm);
    MTest_Finalize(0);

    return 0;
}
Beispiel #19
0
int main(int argc, char** argv)
{
  // Initialize MPI
  MPI_Init(&argc, &argv);

  int size, rank;

  // Figure out the number of processes and our rank in the world group
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if (size % 2) {
    printf("Need an even number of processes\n");
    MPI_Finalize();
    return 1;
  }

  // setup new communicators
  MPI_Comm twocomm;
  MPI_Comm_split(MPI_COMM_WORLD, rank/2, rank%2, &twocomm);

  int senddata[2], recvdata[2];
  senddata[(rank+1)%2] = rank;
  senddata[rank%2] = 0;
  MPI_Alltoall(senddata, 1, MPI_INT, recvdata, 1, MPI_INT, twocomm);

  // print to tty
  printf("process %i: received %i\n", rank, recvdata[(rank+1)%2]);

  // close down MPI
  MPI_Finalize();

  // ay-oh-kay
  return 0;
}
Beispiel #20
0
/**********************************************************************\
 *    Implementation of global functions                              * 
\**********************************************************************/
extern io_amiga_t
io_amiga_open(io_logging_t log,
              char *fname,
              io_file_swap_t swapped,
              io_file_mode_t mode,
              uint32_t reader)
{
	io_amiga_t f;

	/* Get memory for the structure */
	f = (io_amiga_t)malloc(sizeof(io_amiga_struct_t));
	if (f == NULL) {
		io_logging_memfatal(log,  "io_amiga structure");
		return NULL;
	}

	/* Start filling the structure */

	/* Store the filename */
	f->fname = (char *)malloc(sizeof(char) * (strlen(fname) + 1));
	if (f->fname == NULL) {
		io_logging_memfatal(log, "filename of AMIGAFILE");
		free(f);
		return NULL;
	}
	strncpy(f->fname, fname, strlen(fname)+1);

	/* Okay, we are an AMIGA file */
	f->ftype = IO_FILE_AMIGA;

	/* And we can just copy in the parallel information */
#	ifdef WITH_MPI
	MPI_Comm_rank(MPI_COMM_WORLD, &(f->rank));
	MPI_Comm_size(MPI_COMM_WORLD, &(f->size));
	MPI_Comm_split(MPI_COMM_WORLD, 1,
	               f->rank, &(f->mycomm));
	MPI_Comm_size(f->mycomm, &(f->size_mycomm));
	MPI_Comm_rank(f->mycomm, &(f->rank_mycomm));
#	endif

	/* Try to open the file */
	if (local_openopen(log, f, mode) == NULL)
		return NULL;

	/* Set the mode and then print a message when in READ mode about the
	 * swapping*/
	local_openswapped(log, f, swapped);

	/* Set the file_sizeof_long */
	local_openfsol(log, f);

	/* Nothing for the header for now */
	f->header = NULL;

	/* Initialise the rest to safe parameters */
	f->minweight = 1e40;
	f->maxweight = 0.0;

	return f;
}
int main(int argc, char **argv)
{
    MPI_Group basegroup;
    MPI_Group g1;
    MPI_Comm comm, newcomm;
    int rank, size;
    int worldrank;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &worldrank);
    comm = MPI_COMM_WORLD;
    MPI_Comm_group(comm, &basegroup);
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_split(comm, 0, size - rank, &newcomm);
    MPI_Comm_group(newcomm, &g1);

    /* Checking group_intersection for NULL variable */
    mpi_errno = MPI_Group_intersection(basegroup, g1, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Group_free(&basegroup);
    MPI_Group_free(&g1);
    MTest_Finalize(errs);
    return 0;
}
Beispiel #22
0
// Construct from components
MytwoWayMPI::MytwoWayMPI(const dictionary& dict,cfdemCloud& sm)
:
    dataExchangeModel(dict,sm), 
    propsDict_(dict.subDict(typeName + "Props"))
{
           
    MPI_Comm_rank(MPI_COMM_WORLD,&me);
    MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
        
    if (me < nprocs) liggghts = 1;
    else liggghts = MPI_UNDEFINED;
                        
    //IS HAS TO BE HERE!!!
    MPI_Comm_split(MPI_COMM_WORLD,liggghts,0,&comm_liggghts);
    
    couplingInterval_=readScalar(dict_.lookup("couplingInterval"));
    DEMts_ = readScalar(propsDict_.lookup("DEMts"));
    //DEMts_ = lmp->time_step;
    //DEMts_ = 0.00001;
    checkTSsize();
        
    if (liggghts == 1) 
    {
        Info<<"*** Starting up MYDEM first TIME on "<<me<<" out of "<<nprocs<< " processors!"<<endl;
        lmp = new MYLAMMPS(0,NULL,comm_liggghts,DEMts_,couplingInterval_);

        //+++
        MPI_Barrier(MPI_COMM_WORLD);
    }
}
Beispiel #23
0
int main( int argc, char* argv[] )
{
  int i, res;
  int myrank, nprocs;
  MPI_Group g1;
  MPI_Comm com1, com2, com3;
  
  MPI_Init( &argc, &argv );

  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );

  for( i=0; i<REPEAT; i++ ) {
    MPI_Comm_group(MPI_COMM_WORLD, &g1);
    MPI_Comm_create(MPI_COMM_WORLD, g1, &com1);
    
    MPI_Comm_compare(MPI_COMM_WORLD, com1, &res);
    MPI_Comm_dup(MPI_COMM_WORLD, &com2);
    
    MPI_Comm_split(MPI_COMM_WORLD, myrank, myrank, &com3);

    MPI_Comm_free(&com2);
    MPI_Comm_free(&com3);

  }


  MPI_Finalize();
  return 0;
}
int main(int argc, char* argv[])
{
    MPI_Comm comm, newcomm, scomm;
    MPI_Group group;
    MPI_Info newinfo;
    int rank, size, color;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_dup(MPI_COMM_WORLD, &comm);
    MPI_Comm_group(comm, &group);

    MPI_Comm_create(comm, group, &newcomm);
    color = rank % 2;
    MPI_Comm_split(MPI_COMM_WORLD, color, rank, &scomm);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /*test comm_split_type for NULL variable */
    mpi_errno = MPI_Comm_split_type(scomm, 2, 4, newinfo, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Comm_free(&scomm);
    MPI_Group_free(&group);
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Beispiel #25
0
int main(int argc, char* argv[]) {
  int id=0, numprocs;
  int color;
  MPI_Comm local;
  double t1, t2;

  VT_initialize(&argc, &argv);
  VT_symdef(1, "step", "USR");
  VT_symdef(2, "sequential", "USR");
  VT_symdef(3, "p2p", "USR");
  VT_symdef(4, "parallel", "USR");
  VT_symdef(5, "main", "USR");
  VT_begin(5);

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &id);
  printf("%03d: ctest-vt start\n", id);
  color = (id >= numprocs/2);
  MPI_Comm_split(MPI_COMM_WORLD, color, id, &local);

  t1 = MPI_Wtime();
  parallel(MPI_COMM_WORLD);
  parallel(local);
  parallel(MPI_COMM_WORLD);
  t2 = MPI_Wtime();

  MPI_Comm_free(&local);
  MPI_Finalize();
  printf("%03d: ctest-vt end (%12.9f)\n", id, (t2-t1));

  VT_end(5);
  VT_finalize();
  return 0;
}
Beispiel #26
0
int
main(int argc, char *argv[])
{
	int			my_rank;
	int			comm_sz;
	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
	MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);

	int key = 1;
	int ngroups = 3;
	int color = my_rank / ngroups;

	MPI_Comm comm;
	int new_rank;
	int new_comm_sz;

	MPI_Comm_split(MPI_COMM_WORLD, color, key, &comm);
	MPI_Comm_rank(comm, &new_rank);
	MPI_Comm_size(comm, &new_comm_sz);

	fprintf(stdout, "my old rank is %d of %d; my new rank is %d of %d\n", my_rank, comm_sz, new_rank, new_comm_sz);

	MPI_Finalize();
	return 0;
}
// This constructor is for just one subdomain, so only adds the info
// for multiple time steps on the domain. No two-level parallelism.
MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_,
                           const Teuchos::EVerbosityLevel verbLevel) :
        Epetra_MpiComm(EpetraMpiComm_),
        Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
        myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
        subComm(0)
{

  numSubDomains = 1;
  subDomainRank = 0;
  numTimeSteps = numTimeSteps_;
  numTimeStepsOnDomain = numTimeSteps_;
  firstTimeStepOnDomain = 0;

  subComm = new Epetra_MpiComm(EpetraMpiComm_);

  // Create split communicators for time domain
  MPI_Comm time_split_MPI_Comm;
  int rank = EpetraMpiComm_.MyPID();
  (void) MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank,
                        &time_split_MPI_Comm);
  timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
  numTimeDomains = EpetraMpiComm_.NumProc();
  timeDomainRank = rank;
}
void gather_topology_nektar(int *BLACS_PARAMS, double *V, double *work){

  /* proccessors with column=0  do MPI_Allgather      */
  /* then scatter results from proc with MYCOL=0
     to the rest of proccessors with the same MYROW   */

   int my_color,info;
   static int INIT_FLAG = 0;
   static MPI_Comm MPI_COMM_ZERO_COLUMN = MPI_COMM_NULL;

   if (INIT_FLAG == 0){
      /* create communicators by splitting */

      if (BLACS_PARAMS[6] == 0)
        my_color = 1;
      else
        my_color = MPI_UNDEFINED;

      info = MPI_Comm_split(get_MPI_COMM(), my_color, BLACS_PARAMS[5], &MPI_COMM_ZERO_COLUMN);
      if (info != MPI_SUCCESS) {
        fprintf (stderr, "scatter_topology_nektar: MPI split error\n");
        exit(1);
     }
     INIT_FLAG = 1;
   }

  MPI_Reduce(V, work, BLACS_PARAMS[7], MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_ROW_NEW);

  if (BLACS_PARAMS[6] == 0)
    MPI_Allreduce (work, V ,BLACS_PARAMS[7], MPI_DOUBLE, MPI_SUM, MPI_COMM_ZERO_COLUMN);

}
Beispiel #29
0
int main(int argc, char **argv) {
  MPI_Init(NULL, NULL);

  // Get the rank and size in the original communicator
  int world_rank, world_size;
  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
  MPI_Comm_size(MPI_COMM_WORLD, &world_size);

  int color = world_rank / 4; // Determine color based on row

  // Split the communicator based on the color and use the original rank for ordering
  MPI_Comm row_comm;
  MPI_Comm_split(MPI_COMM_WORLD, color, world_rank, &row_comm);

  int row_rank, row_size;
  MPI_Comm_rank(row_comm, &row_rank);
  MPI_Comm_size(row_comm, &row_size);

  printf("WORLD RANK/SIZE: %d/%d --- ROW RANK/SIZE: %d/%d\n",
    world_rank, world_size, row_rank, row_size);

  MPI_Comm_free(&row_comm);

  MPI_Finalize();
}
int RestrictedCrsMatrixWrapper::Trestrict_comm(Teuchos::RCP<Epetra_CrsMatrix> input_matrix){
  /* Pull the Matrix Info */
  input_matrix_=input_matrix;
  
  const Epetra_MpiComm *InComm = dynamic_cast<const Epetra_MpiComm*>(& input_matrix_->Comm());
  const Epetra_Map *InRowMap= dynamic_cast<const Epetra_Map* >(& input_matrix_->RowMap());
  const Epetra_Map *InColMap= dynamic_cast<const Epetra_Map* >(& input_matrix_->ColMap());

  if(!InComm || !InRowMap || !InColMap) return (-1);
  
  int_type Nrows = (int_type) InRowMap->NumGlobalElements64();
  int_type Ncols = (int_type) InColMap->NumGlobalElements64();
  
  if(!subcomm_is_set){
    /* Build the Split Communicators, If Needed */
    int color;
    if(InRowMap->NumMyElements()) color=1;
    else color=MPI_UNDEFINED;
    MPI_Comm_split(InComm->Comm(),color,InComm->MyPID(),&MPI_SubComm_);
  }
  else{
    /* Sanity check user-provided subcomm - drop an error if the MPISubComm
       does not include a processor with data. */
    if (input_matrix->NumMyRows() && MPI_SubComm_ == MPI_COMM_NULL)
      return(-2);
  }

  /* Mark active processors */
  if(MPI_SubComm_ == MPI_COMM_NULL) proc_is_active=false;
  else proc_is_active=true;
  

  if(proc_is_active){      
    RestrictedComm_=new Epetra_MpiComm(MPI_SubComm_);
    
    int_type* RowMapGlobalElements = 0;
	InRowMap->MyGlobalElementsPtr(RowMapGlobalElements);
    int_type* ColMapGlobalElements = 0;
	InColMap->MyGlobalElementsPtr(ColMapGlobalElements);

    /* Build the Restricted Maps */
    ResRowMap_ = new Epetra_Map(Nrows,InRowMap->NumMyElements(),RowMapGlobalElements,
                                (int_type) InRowMap->IndexBase64(),*RestrictedComm_);
    ResColMap_ = new Epetra_Map(Ncols,InColMap->NumMyElements(),ColMapGlobalElements,
                                (int_type) InColMap->IndexBase64(),*RestrictedComm_);        
    
    int *colind,Nr;
    double *values;
    
    /* Allocate the Restricted Matrix */
    restricted_matrix_= Teuchos::rcp(new Epetra_CrsMatrix(View,*ResRowMap_,*ResColMap_,0));
    for(int i=0;i<input_matrix_->NumMyRows();i++) {
      input_matrix_->ExtractMyRowView(i,Nr,values,colind);
      restricted_matrix_->InsertMyValues(i,Nr,values,colind);
    }
    restricted_matrix_->FillComplete();      
  }

  return 0;
}/*end restrict_comm*/