Exemplo n.º 1
0
void init_comms(void){ 
    extern int numtasks, rank, 
    myfieldrank, myenglandrank, mybrazilrank, 
    field_ranks[12], eng_ranks[11], bra_ranks[11];
    
    extern MPI_Group world, england, brazil, engfield, brafield, field;
    extern MPI_Comm eng_comm, bra_comm, engfield_comm, brafield_comm, field_comm; 
    
    MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	if (numtasks != NPROCS) 
	{
		printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
		MPI_Finalize();
		exit(EXIT_FAILURE);
	}
    
    MPI_Comm_group(MPI_COMM_WORLD, &world);
    
    MPI_Group_incl(world, 12, field_ranks, &field);
    MPI_Group_incl(world, 11, eng_ranks, &england);
    MPI_Group_incl(world, 11, bra_ranks, &brazil);
    MPI_Group_union(field, england, &engfield);
    MPI_Group_union(field, brazil, &brafield);
    
	MPI_Comm_create(MPI_COMM_WORLD, field, &field_comm);
    MPI_Comm_create(MPI_COMM_WORLD, england, &eng_comm);
    MPI_Comm_create(MPI_COMM_WORLD, brazil, &bra_comm);
    MPI_Comm_create(MPI_COMM_WORLD, engfield, &engfield_comm);
    MPI_Comm_create(MPI_COMM_WORLD, brafield, &brafield_comm);
    
    MPI_Group_rank (field, &myfieldrank);
    MPI_Group_rank (england, &myenglandrank);    
    MPI_Group_rank (brazil, &mybrazilrank); 
}
Exemplo n.º 2
0
int main( int argc, char **argv )
{
    int rank, size, i;
    MPI_Group group1, group2, group3, groupall, groupunion, newgroup;
    MPI_Comm newcomm;
    int ranks1[100], ranks2[100], ranks3[100];
    int nranks1=0, nranks2=0, nranks3=0;

    MPI_Init( &argc, &argv );
    MPI_Barrier( MPI_COMM_WORLD );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_group( MPI_COMM_WORLD, &groupall );

    /* Divide groups */
    for (i=0; i<size; i++) 
      if ( (i%3)==0 )
	ranks1[nranks1++] = i;
      else if ( (i%3)==1 )
	ranks2[nranks2++] = i;
      else
	ranks3[nranks3++] = i;

    MPI_Group_incl ( groupall, nranks1, ranks1, &group1 );
    MPI_Group_incl ( groupall, nranks2, ranks2, &group2 );
    MPI_Group_incl ( groupall, nranks3, ranks3, &group3 );

    MPI_Group_difference ( groupall, group2, &groupunion );

    MPI_Comm_create ( MPI_COMM_WORLD, group3, &newcomm );
    newgroup = MPI_GROUP_NULL;
    if (newcomm != MPI_COMM_NULL)
    {
	/* If we don't belong to group3, this would fail */
	MPI_Comm_group ( newcomm, &newgroup );
    }

    /* Free the groups */
    MPI_Group_free( &groupall );
    MPI_Group_free( &group1 );
    MPI_Group_free( &group2 );
    MPI_Group_free( &group3 );
    MPI_Group_free( &groupunion );
    if (newgroup != MPI_GROUP_NULL)
    {
	MPI_Group_free( &newgroup );
    }

    /* Free the communicator */
    if (newcomm != MPI_COMM_NULL)
	MPI_Comm_free( &newcomm );
    Test_Waitforall( );
    MPI_Finalize();
    return 0;
}
Exemplo n.º 3
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rc;
    int ranks[2];
    MPI_Group ng;
    char      str[MPI_MAX_ERROR_STRING+1];
    int       slen;

    MTest_Init( &argc, &argv );
    /* Set errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    /* Create some valid input data except for the group handle */
    ranks[0] = 0;
    rc = MPI_Group_incl( MPI_COMM_WORLD, 1, ranks, &ng );
    if (rc == MPI_SUCCESS) {
	errs ++;
	printf( "Did not detect invalid handle (comm) in group_incl\n" );
    }
    else {
	if (verbose) {
	    MPI_Error_string( rc, str, &slen );
	    printf( "Found expected error; message is: %s\n", str );
	}
    }

    MTest_Finalize( errs );
    MPI_Finalize( );
    return 0;
}
Exemplo n.º 4
0
int main(int argc, char *argv[])  {
	int rank, new_rank, sendbuf, recvbuf, numtasks;
	int P[4][4]={{0,1,2,3}, {4,5,6,7}, {8,9,10,11}, {12,13,14,15} };
	MPI_Group orig_group, new_group;
	MPI_Comm new_comm;

	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &numtasks);

	if (numtasks != NPROCS) {
		printf("Must specify MP_PROCS= %d. Terminating.\n",NPROCS);
		MPI_Finalize();
		exit(0);
	}

	sendbuf = rank;
	MPI_Comm_group(MPI_COMM_WORLD, &orig_group);
	MPI_Group_incl(orig_group, NPROCS/4, P[rank/4], &new_group);

	MPI_Comm_create(MPI_COMM_WORLD, new_group, &new_comm);
	MPI_Allreduce(&sendbuf, &recvbuf, 1, MPI_INT, MPI_SUM, new_comm);

	MPI_Group_rank (new_group, &new_rank);
	printf("rank= %2d newgroup= %2d newrank= %2d recvbuf= %2d\n",rank,rank/4,new_rank,recvbuf);

	MPI_Finalize();
	return 0;
}
Exemplo n.º 5
0
void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) {
  MPI_Group tmp;

  *ierr = MPI_Group_incl(get_group(*group), *n, ranks, &tmp);
  if(*ierr == MPI_SUCCESS) {
    *group_out = new_group(tmp);
  }
}
Exemplo n.º 6
0
/*@C
   PetscSFGetGroups - gets incoming and outgoing process groups

   Collective

   Input Argument:
.  sf - star forest

   Output Arguments:
+  incoming - group of origin processes for incoming edges (leaves that reference my roots)
-  outgoing - group of destination processes for outgoing edges (roots that I reference)

   Level: developer

.seealso: PetscSFGetWindow(), PetscSFRestoreWindow()
@*/
PetscErrorCode PetscSFGetGroups(PetscSF sf,MPI_Group *incoming,MPI_Group *outgoing)
{
  PetscErrorCode ierr;
  MPI_Group      group;

  PetscFunctionBegin;
  if (sf->ingroup == MPI_GROUP_NULL) {
    PetscInt       i;
    const PetscInt *indegree;
    PetscMPIInt    rank,*outranks,*inranks;
    PetscSFNode    *remote;
    PetscSF        bgcount;

    /* Compute the number of incoming ranks */
    ierr = PetscMalloc1(sf->nranks,&remote);CHKERRQ(ierr);
    for (i=0; i<sf->nranks; i++) {
      remote[i].rank  = sf->ranks[i];
      remote[i].index = 0;
    }
    ierr = PetscSFDuplicate(sf,PETSCSF_DUPLICATE_CONFONLY,&bgcount);CHKERRQ(ierr);
    ierr = PetscSFSetGraph(bgcount,1,sf->nranks,NULL,PETSC_COPY_VALUES,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
    ierr = PetscSFComputeDegreeBegin(bgcount,&indegree);CHKERRQ(ierr);
    ierr = PetscSFComputeDegreeEnd(bgcount,&indegree);CHKERRQ(ierr);

    /* Enumerate the incoming ranks */
    ierr = PetscMalloc2(indegree[0],&inranks,sf->nranks,&outranks);CHKERRQ(ierr);
    ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);CHKERRQ(ierr);
    for (i=0; i<sf->nranks; i++) outranks[i] = rank;
    ierr = PetscSFGatherBegin(bgcount,MPI_INT,outranks,inranks);CHKERRQ(ierr);
    ierr = PetscSFGatherEnd(bgcount,MPI_INT,outranks,inranks);CHKERRQ(ierr);
    ierr = MPI_Comm_group(PetscObjectComm((PetscObject)sf),&group);CHKERRQ(ierr);
    ierr = MPI_Group_incl(group,indegree[0],inranks,&sf->ingroup);CHKERRQ(ierr);
    ierr = MPI_Group_free(&group);CHKERRQ(ierr);
    ierr = PetscFree2(inranks,outranks);CHKERRQ(ierr);
    ierr = PetscSFDestroy(&bgcount);CHKERRQ(ierr);
  }
  *incoming = sf->ingroup;

  if (sf->outgroup == MPI_GROUP_NULL) {
    ierr = MPI_Comm_group(PetscObjectComm((PetscObject)sf),&group);CHKERRQ(ierr);
    ierr = MPI_Group_incl(group,sf->nranks,sf->ranks,&sf->outgroup);CHKERRQ(ierr);
    ierr = MPI_Group_free(&group);CHKERRQ(ierr);
  }
  *outgoing = sf->outgroup;
  PetscFunctionReturn(0);
}
Exemplo n.º 7
0
bool SplitMPI_Communicator::CreateCommunicator(MPI_Comm comm_world, int np, int nb_ddc)
{
	int n_DDC;
	bool splitcomm;

	if ((nb_ddc > 0) && (nb_ddc < np))
	{ // if the number of total cores is larger than the number of DDCs is the same, two new MPI groups will be
// generated will be generated
#ifdef OGS_FEM_IPQC
		splitcomm = true;
		n_DDC = nb_ddc; // number of ddc

		int DDC_ranks[n_DDC];
		for (int k = 0; k < n_DDC; k++)
		{
			DDC_ranks[k] = k;
		}

		MPI_Comm comm_IPQC;
		MPI_Group group_base, group_DDC, group_IPQC;

		// define MPI group and communicator for DDC related processes WH
		MPI_Comm_group(comm_world, &group_base);
		MPI_Group_incl(group_base, n_DDC, DDC_ranks, &group_DDC); // define group flow and mass transport
		MPI_Comm_create(comm_world, group_DDC, &comm_DDC);

		// define MPI group and communicator for IPQC WH
		MPI_Group_difference(group_base, group_DDC, &group_IPQC);
		MPI_Comm_create(comm_world, group_IPQC, &comm_IPQC);

		int myrank_IPQC, mysize_IPQC;
		MPI_Group_size(group_DDC, &mysize); // WH
		MPI_Group_rank(group_DDC, &myrank); // WH
		MPI_Group_rank(group_IPQC, &myrank_IPQC);
		MPI_Group_size(group_IPQC, &mysize_IPQC);
		if (myrank_IPQC != MPI_UNDEFINED) // WH
			std::cout << "After MPI_Init myrank_IPQC = " << myrank_IPQC << '\n';
		if (myrank != MPI_UNDEFINED) // WH
			std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';

		if (myrank_IPQC != MPI_UNDEFINED) // ranks of group_IPQC will call to IPhreeqc
			Call_IPhreeqc();
#endif
	}
	else
	{ // if no -ddc is specified or the number of ddc is incorrect, make ddc = np, no new MPI groups willnot be
		// generated;
		splitcomm = false;
		n_DDC = np;
		comm_DDC = comm_world;
		MPI_Comm_size(comm_DDC, &mysize);
		MPI_Comm_rank(comm_DDC, &myrank);
		std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';
	}

	return splitcomm;
}
Exemplo n.º 8
0
int main(int argc, char **argv)
{
    int rank, nproc, mpi_errno;
    int i, ncomm, *ranks;
    int errs = 1;
    MPI_Comm *comm_hdls;
    MPI_Group world_group;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
    ranks = malloc(sizeof(int) * nproc);

    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        int incl = i % nproc;
        MPI_Group comm_group;

        /* Comms include ranks: 0; 1; 2; ...; 0; 1; ... */
        MPI_Group_incl(world_group, 1, &incl, &comm_group);

        /* Note: the comms we create all contain one rank from MPI_COMM_WORLD */
        mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);

        if (mpi_errno == MPI_SUCCESS) {
            if (verbose)
                printf("%d: Created comm %d\n", rank, i);
            ncomm++;
        } else {
            if (verbose)
                printf("%d: Error creating comm %d\n", rank, i);
            MPI_Group_free(&comm_group);
            errs = 0;
            break;
        }

        MPI_Group_free(&comm_group);
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    free(ranks);
    MPI_Group_free(&world_group);

    MTest_Finalize(errs);

    return MTestReturnValue(errs);
}
Exemplo n.º 9
0
void build_inter_win_comm() {
    //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
    // Build Up Inter-Windows Communicators and Groups
    int* ranks;
    ranks = new int[2 * num_procs_per_win];
    mpi_inter_win_comm = new MPI_Comm*[DIM];
    mpi_inter_win_group = new MPI_Group*[DIM];
    for (int i = 0; i < DIM; ++i) {
        mpi_inter_win_comm[i] = new MPI_Comm[num_inter_win_comm[i]];
        mpi_inter_win_group[i] = new MPI_Group[num_inter_win_comm[i]];
    }

    for (int d = 0; d < DIM; ++d) {
        for (int i = 0; i < num_inter_win_comm[d]; ++i) {
            for (int j = 0; j < 2 * num_procs_per_win; ++j) {

                if (d == 0) {
                    int m = i + i / (num_wins_dim[d] - 1);
                    ranks[j] = m * num_procs_per_win + j;
                } else {
                    int m = i;
                    if (j < num_procs_per_win)
                        ranks[j] = m * num_procs_per_win + j;
                    else
                        ranks[j] = (m + num_wins_dim[0] - 1) * num_procs_per_win + j;
                }
            }
            MPI_Group_incl(world_group, 2 * num_procs_per_win, ranks, &mpi_inter_win_group[d][i]);
            MPI_Comm_create(MPI_COMM_WORLD, mpi_inter_win_group[d][i], &mpi_inter_win_comm[d][i]);
        }

    }
    delete [] ranks;

    //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
    // Get Own ID in Inter-Win Communicator
    int line, row;
    line = (int) (pid_world_comm / (num_wins_dim[0] * num_procs_per_win));
    row = (int) ((pid_world_comm / num_procs_per_win) % num_wins_dim[0]);

    inter_win_comm_id[Right]  = ((row == num_wins_dim[0] - 1) ? -1 : (num_wins_dim[0] - 1) * line + row);
    inter_win_comm_id[Left]   = ((row == 0) ? -1 : (num_wins_dim[0] - 1) * line + row - 1);
    inter_win_comm_id[Up]     = ((line == num_wins_dim[1] - 1) ? -1 : num_wins_dim[0] * line + row);
    inter_win_comm_id[Down]   = ((line == 0) ? -1 : num_wins_dim[0] * (line - 1) + row);

    for (int i = 0; i < pow(2.0, DIM); ++i) {
        if (inter_win_comm_id[i] != -1) {
            MPI_Comm_rank(mpi_inter_win_comm[i / DIM][inter_win_comm_id[i]], &pid_inter_win_comm[i]);
        } else {
            pid_inter_win_comm[i] = -1;
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    
}
Exemplo n.º 10
0
dart_ret_t dart_group_union(
  const dart_group_t *g1,
  const dart_group_t *g2,
  dart_group_t *gout)
{
  /* g1 and g2 are both ordered groups. */
  int ret = MPI_Group_union(
              g1->mpi_group,
              g2->mpi_group,
              &(gout -> mpi_group));
  if (ret == MPI_SUCCESS) {
    int i, j, k, size_in, size_out;
    dart_unit_t *pre_unitidsout, *post_unitidsout;;

    MPI_Group group_all;
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_size(gout->mpi_group, &size_out);
    if (size_out > 1) {
      MPI_Group_size(g1->mpi_group, &size_in);
      pre_unitidsout  = (dart_unit_t *)malloc(
                          size_out * sizeof (dart_unit_t));
      post_unitidsout = (dart_unit_t *)malloc(
                          size_out * sizeof (dart_unit_t));
      dart_group_getmembers (gout, pre_unitidsout);

      /* Sort gout by the method of 'merge sort'. */
      i = k = 0;
      j = size_in;

      while ((i <= size_in - 1) && (j <= size_out - 1)) {
        post_unitidsout[k++] =
          (pre_unitidsout[i] <= pre_unitidsout[j])
          ? pre_unitidsout[i++]
          : pre_unitidsout[j++];
      }
      while (i <= size_in -1) {
        post_unitidsout[k++] = pre_unitidsout[i++];
      }
      while (j <= size_out -1) {
        post_unitidsout[k++] = pre_unitidsout[j++];
      }
      gout -> mpi_group = MPI_GROUP_EMPTY;
      MPI_Group_incl(
        group_all,
        size_out,
        post_unitidsout,
        &(gout->mpi_group));
      free (pre_unitidsout);
      free (post_unitidsout);
    }
    ret = DART_OK;
  }
  return ret;
}
Exemplo n.º 11
0
int main(int argc, char **argv) {
    int       rank, nproc, mpi_errno;
    int       i, ncomm, *ranks;
    int       errors = 1;
    MPI_Comm *comm_hdls;
    MPI_Group world_group;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
    ranks     = malloc(sizeof(int) * nproc);

    for (i = 0; i < nproc; i++)
        ranks[i] = i;

    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        MPI_Group comm_group;

        /* Comms include ranks: 0; 0,1; 0,1,2; ...; 0; 0,1; 0,1,2; ... */
        MPI_Group_incl(world_group, (i+1) % (nproc+1), /* Adding 1 yields counts of 1..nproc */
                       ranks, &comm_group);

        /* Note: the comms we create are all varying subsets of MPI_COMM_WORLD */
        mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);

        if (mpi_errno == MPI_SUCCESS) {
            ncomm++;
        } else {
            if (verbose) printf("%d: Error creating comm %d\n", rank, i);
            MPI_Group_free(&comm_group);
            errors = 0;
            break;
        }

        MPI_Group_free(&comm_group);
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    MPI_Group_free(&world_group);

    MTest_Finalize(errors);
    MPI_Finalize();

    return 0;
}
Exemplo n.º 12
0
int main(int argc, char *argv[])
{
	int rank;
	MPI_Status status;

	MPI_Init(&argc, &argv);

	MPI_Group gw, g1, g2, g3;
	MPI_Comm_group(MPI_COMM_WORLD, &gw);

	int new_ranks[] = {0, 2, 3};
	MPI_Group_incl(gw, 3, new_ranks, &g1);

	int new_ranks2[] = {2, 3, 0};
	MPI_Group_incl(gw, 3, new_ranks2, &g2);

	MPI_Group_incl(gw, 3, new_ranks2, &g3);


	int size1, size2, size3;
	MPI_Group_size(g1, &size1);
	MPI_Group_size(g1, &size2);
	MPI_Group_size(g1, &size3);

	if (size1 != size2 || size2 != size3 || size1 != 3) {
		return 1;
	}

	int r1, r2;
	MPI_Group_compare(g1, g2, &r1);
	MPI_Group_compare(g2, g3, &r2);
	if (r1 != MPI_SIMILAR || r2 != MPI_IDENT) {
		return 1;
	}

	MPI_Group_free(&g1);
	MPI_Group_free(&g3);
	MPI_Group_free(&g2);

	return 0;
}
Exemplo n.º 13
0
void MPIDistribution::MPICreateCommLayer()
{
	if(m_population->network()->MPIGetNodeId() == 0 && DEBUG_LEVEL > 2)
	{
		cout<<"Creating MPI Communicator for layer...";cout.flush();
	}

	vector<int> localHypercolumns = ((PopulationColumns*)m_population)->GetLocalHypercolumnIndexes();
	MPI_Group orig_group, new_group; 
	MPI_Comm_group(NETWORK_COMM_WORLD, &orig_group);

	MPI_Comm* new_comm = new MPI_Comm();
	//debug
	//*new_comm = NETWORK_COMM_WORLD;
	//end debug

	vector<int> nodeLayerIndexes =  m_population->GetNodeLayerIndexes();
	vector<int> mpiProcsUsed = m_population->MPIGetProcessesUsed();

	// currently not set here in this way but may be changed
	/*
	MPI_Group_incl(orig_group, nodeLayerIndexes.size(), &(nodeLayerIndexes[0]), &new_group);
	MPI_Comm_create(NETWORK_COMM_WORLD, new_group, new_comm);
	*/
	
	//m_mpiCommLayer = new_comm;
	m_mpiCommLayer = new MPI_Comm();
	
	if(mpiProcsUsed.size()==0) // all used
	{
		*m_mpiCommLayer = NETWORK_COMM_WORLD; // currently may result in bug if this assumption not correct and trying to record
		MPI_Comm_size(NETWORK_COMM_WORLD, &m_mpiSizeLocal);
		MPI_Comm_rank(NETWORK_COMM_WORLD, &m_mpiRankLocal);
	}
	else
	{
		m_mpiCommLayer = new_comm;
		MPI_Group_incl(orig_group, mpiProcsUsed.size(), &(mpiProcsUsed[0]), &new_group);
		MPI_Comm_create(NETWORK_COMM_WORLD, new_group, new_comm);
		if(binary_search(mpiProcsUsed.begin(),mpiProcsUsed.end(),m_population->network()->MPIGetNodeId()))
		{
			MPI_Comm_size(*m_mpiCommLayer, &m_mpiSizeLocal);
			MPI_Comm_rank(*m_mpiCommLayer, &m_mpiRankLocal);
		}
	}

	if(m_population->network()->MPIGetNodeId() == 0 && DEBUG_LEVEL > 2)
	{
		cout<<"done.\n";cout.flush();
	}

	m_commsLayersCreated = true;
}
Exemplo n.º 14
0
int main(int argc, char *argv[]) 
{ 
    int rank, destrank, nprocs, *A, *B, i;
    MPI_Comm CommDeuce;
    MPI_Group comm_group, group;
    MPI_Win win;
    int errs = 0;

    MTest_Init(&argc,&argv); 
    MPI_Comm_size(MPI_COMM_WORLD,&nprocs); 
    MPI_Comm_rank(MPI_COMM_WORLD,&rank); 

    if (nprocs < 2) {
        printf("Run this program with 2 or more processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_split(MPI_COMM_WORLD, (rank < 2), rank, &CommDeuce);

    if (rank < 2)
    {

        i = MPI_Alloc_mem(SIZE2 * sizeof(int), MPI_INFO_NULL, &A);
        if (i) {
            printf("Can't allocate memory in test program\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        i = MPI_Alloc_mem(SIZE2 * sizeof(int), MPI_INFO_NULL, &B);
        if (i) {
            printf("Can't allocate memory in test program\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Comm_group(CommDeuce, &comm_group);

        if (rank == 0) {
            for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
            MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, CommDeuce, &win);
            destrank = 1;
            MPI_Group_incl(comm_group, 1, &destrank, &group);
            MPI_Win_start(group, 0, win);
            for (i=0; i<SIZE1; i++)
                MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
            for (i=0; i<SIZE1; i++)
                MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);

            MPI_Win_complete(win);

            for (i=0; i<SIZE1; i++)
                if (B[i] != (-4)*(i+SIZE1)) {
                    SQUELCH( printf("Get Error: B[i] is %d, should be %d\n", B[i], (-4)*(i+SIZE1)); );
                    errs++;
                }
Exemplo n.º 15
0
int main( int argc, char **argv )
{
    int i, n, n_goal = 2048, n_all, rc, n_ranks, *ranks, rank, size, len;
    MPI_Group *group_array, world_group;
    char msg[MPI_MAX_ERROR_STRING];

    MPI_Init( &argc, &argv );
    MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    n = n_goal;
    
    group_array = (MPI_Group *)malloc( n * sizeof(MPI_Group) );

    MPI_Comm_group( MPI_COMM_WORLD, &world_group );

    n_ranks = size;
    ranks = (int *)malloc( size * sizeof(int) );
    for (i=0; i<size; i++) ranks[i] = i;

    for (i=0; i<n; i++) {
	rc = MPI_Group_incl( world_group, n_ranks, ranks, group_array + i );
 	if (rc) {
	    fprintf( stderr, "Error when creating group number %d\n", i );
	    MPI_Error_string( rc, msg, &len );
	    fprintf( stderr, "%s\n", msg );
	    n = i + 1;
	    break;
	}
	
    }

    for (i=0; i<n; i++) {
	rc = MPI_Group_free( group_array + i );
	if (rc) {
	    fprintf( stderr, "Error when freeing group number %d\n", i );
	    MPI_Error_string( rc, msg, &len );
	    fprintf( stderr, "%s\n", msg );
	    break;
	}
    }
    
    MPI_Group_free( &world_group );

    MPI_Allreduce( &n, &n_all, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD );
    if (rank == 0) {
	printf( "Completed test of %d type creations\n", n_all );
	if (n_all != n_goal) {
	printf (
"This MPI implementation limits the number of datatypes that can be created\n\
This is allowed by the standard and is not a bug, but is a limit on the\n\
implementation\n" );
	}
Exemplo n.º 16
0
value caml_mpi_group_incl(value group, value vranks)
{
  MPI_Group newgroup;
  int n = Wosize_val(vranks);
  int * ranks = stat_alloc(n * sizeof(int));
  int i;

  for (i = 0; i < n; i++) ranks[i] = Int_val(Field(vranks, i));
  MPI_Group_incl(Group_val(group), n, ranks, &newgroup);
  stat_free(ranks);
  return caml_mpi_alloc_group(newgroup);
}
Exemplo n.º 17
0
void
Albany::IossSTKMeshStruct::readSerialMesh(const Teuchos::RCP<const Epetra_Comm>& comm,
                                          std::vector<std::string>& entity_rank_names){

#ifdef ALBANY_ZOLTAN // rebalance needs Zoltan

  MPI_Group group_world;
  MPI_Group peZero;
  MPI_Comm peZeroComm;

  // Read a single exodus mesh on Proc 0 then rebalance it across the machine

  MPI_Comm theComm = Albany::getMpiCommFromEpetraComm(*comm);

  int process_rank[1]; // the reader process

  process_rank[0] = 0;
  int my_rank = comm->MyPID();

  //get the group under theComm
  MPI_Comm_group(theComm, &group_world);
  // create the new group. This group includes only processor zero - that is the only processor that reads the file
  MPI_Group_incl(group_world, 1, process_rank, &peZero);
  // create the new communicator - it just contains processor zero
  MPI_Comm_create(theComm, peZero, &peZeroComm);

  // Note that peZeroComm == MPI_COMM_NULL on all processors but processor 0

  if(my_rank == 0){

    *out << "Albany_IOSS: Loading serial STKMesh from Exodus file  " 
         << params->get<std::string>("Exodus Input File Name") << std::endl;

  }

  /* 
   * This checks the existence of the file, checks to see if we can open it, builds a handle to the region
   * and puts it in mesh_data (in_region), and reads the metaData into metaData.
   */

  stk::io::create_input_mesh("exodusii",
//  create_input_mesh("exodusii",
                             params->get<std::string>("Exodus Input File Name"), 
                             peZeroComm, 
                             *metaData, *mesh_data,
                             entity_rank_names); 

  // Here, all PEs have read the metaData from the input file, and have a pointer to in_region in mesh_data

#endif

}
Exemplo n.º 18
0
int
gen_libcomm(p1_context_t *c,
            int np1s /* number of participants |p1who| */,
            int *p1who /* the participating ranks (initializing comm) */)
{
    int rc = QUO_SUCCESS;
    if (0 == c->noderank) {
        printf("### [rank %d] %d p1pes slated for work\n", c->rank, np1s);
        printf("### [rank %d] and they are: ", c->rank);
        if (0 == np1s) printf("\n");
        fflush(stdout);
        for (int i = 0; i < np1s; ++i) {
            printf("%d ", p1who[i]); fflush(stdout);
            if (i + 1 == np1s) printf("\n"); fflush(stdout);
        }
    }
    /* ////////////////////////////////////////////////////////////////////// */
    /* now create our own communicator based on the rank ids passed here */
    /* ////////////////////////////////////////////////////////////////////// */
    MPI_Group init_comm_grp;
    MPI_Group p1_group;
    if (MPI_SUCCESS != MPI_Comm_group(c->init_comm_dup, &init_comm_grp)) {
        rc = QUO_ERR_MPI;
        goto out;
    }
    if (MPI_SUCCESS != MPI_Group_incl(init_comm_grp, np1s,
                                      p1who, &p1_group)) {
        rc = QUO_ERR_MPI;
        goto out;
    }
    if (MPI_SUCCESS != MPI_Comm_create(c->init_comm_dup,
                                       p1_group,
                                       &(c->quo_comm))) {
        rc = QUO_ERR_MPI;
        goto out;
    }
    /* am i in the new communicator? */
    c->in_quo_comm = (MPI_COMM_NULL == c->quo_comm) ? false : true;
    if (c->in_quo_comm) {
        if (MPI_SUCCESS != MPI_Comm_size(c->quo_comm, &c->qc_size)) {
            rc = QUO_ERR_MPI;
            goto out;
        }
        if (MPI_SUCCESS != MPI_Comm_rank(c->quo_comm, &c->qc_rank)) {
            rc = QUO_ERR_MPI;
            goto out;
        }
    }
out:
    if (MPI_SUCCESS != MPI_Group_free(&init_comm_grp)) return 1;
    return (QUO_SUCCESS == rc) ? 0 : 1;
}
Exemplo n.º 19
0
main(int argc, char* argv[]) {
    int        p;
    int        q; /* = sqrt(p) */
    int        my_rank;
    MPI_Group  group_world;
    MPI_Group  first_row_group;
    MPI_Comm   first_row_comm;
    int*       process_ranks;
    int        proc;
    int        test = 0;
    int        sum;
    int        my_rank_in_first_row;


    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &p);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

    q = (int) sqrt((double) p);

    /* Make a list of the processes in the new
     * communicator */
    process_ranks = (int*) malloc(q*sizeof(int));
    for (proc = 0; proc < q; proc++)
        process_ranks[proc] = proc;

    /* Get the group underlying MPI_COMM_WORLD */
    MPI_Comm_group(MPI_COMM_WORLD, &group_world);

    /* Create the new group */
    MPI_Group_incl(group_world, q, process_ranks,
        &first_row_group);

    /* Create the new communicator */
    MPI_Comm_create(MPI_COMM_WORLD, first_row_group,
        &first_row_comm);

    /* Now check whether we can do collective ops in first_row_comm */
    if (my_rank < q) {
        MPI_Comm_rank(first_row_comm, &my_rank_in_first_row);
        if (my_rank_in_first_row == 0) test = 1;
        MPI_Bcast(&test, 1, MPI_INT, 0, first_row_comm);
    }
    MPI_Reduce(&test, &sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (my_rank == 0) {
        printf("q = %d, sum = %d\n", q, sum);
    }

    MPI_Finalize();
}  /* main */
Exemplo n.º 20
0
/** Create an ARMCI group that contains a subset of the nodes in the parent
  * group. Collective across output group.
  *
  * @param[in]  grp_size         Number of entries in pid_list.
  * @param[in]  pid_list         List of process ids that will be in the new group.
  * @param[out] armci_grp_out    The new ARMCI group, only valid on group members.
  * @param[in]  armci_grp_parent The parent of the new ARMCI group.
  */
static inline void ARMCI_Group_create_comm_collective(int grp_size, int *pid_list, ARMCI_Group *armci_grp_out,
    ARMCI_Group *armci_grp_parent) {

  MPI_Group mpi_grp_parent;
  MPI_Group mpi_grp_child;

  MPI_Comm_group(armci_grp_parent->comm, &mpi_grp_parent);
  MPI_Group_incl(mpi_grp_parent, grp_size, pid_list, &mpi_grp_child);

  MPI_Comm_create(armci_grp_parent->comm, mpi_grp_child, &armci_grp_out->comm);
 
  MPI_Group_free(&mpi_grp_parent);
  MPI_Group_free(&mpi_grp_child);
}
Exemplo n.º 21
0
proc::proc()
{
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
  MPI_Comm_size(MPI_COMM_WORLD, &totalNumProcessors_);
  NumProcCalculator();
  proc_i_=myRank_%NumProcX_;
  proc_j_=(myRank_/NumProcX_)%NumProcY_;
  proc_k_=myRank_/(NumProcX_*NumProcY_);

  procRIGHT_=((proc_i_+1)%NumProcX_)+proc_j_*NumProcX_+proc_k_*NumProcX_*NumProcY_;
  procLEFT_=((proc_i_-1+NumProcX_)%NumProcX_)+proc_j_*NumProcX_+proc_k_*NumProcX_*NumProcY_;
  procTOP_=proc_i_+((proc_j_+1)%NumProcY_)*NumProcX_+proc_k_*NumProcX_*NumProcY_;
  procBOT_=proc_i_+((proc_j_-1+NumProcY_)%NumProcY_)*NumProcX_+proc_k_*NumProcX_*NumProcY_;
  procFRONT_=proc_i_+proc_j_*NumProcX_+((proc_k_+1)%NumProcZ_)*NumProcX_*NumProcY_;
  procREAR_=proc_i_+proc_j_*NumProcX_+((proc_k_-1+NumProcZ_)%NumProcZ_)*NumProcX_*NumProcY_;
  procROOT_=0;

  //update list of processes with same i in he logical grid
  XMates_ranks=new int[NumProcY_*NumProcZ_];
  //update list of processes with same j&k in he logical grid
  YZMates_ranks=new int[NumProcX_];
  int count=0;
  for (int i(0);i<totalNumProcessors_;i++)
    if ((i%NumProcX_)==proc_i_) XMates_ranks[count++]=i;
  count=0;
  for (int i(0);i<totalNumProcessors_;i++)
    if (((i/NumProcX_)%NumProcY_==proc_j_)&&(i/(NumProcX_*NumProcY_)==proc_k_)) YZMates_ranks[count++]=i;
  //define a group process with same i in the logical grid
  MPI_Comm_group(MPI_COMM_WORLD,&all_group_);
  MPI_Group_incl(all_group_,NumProcY_*NumProcZ_,XMates_ranks,&XMates_group_);
  MPI_Comm_create(MPI_COMM_WORLD,XMates_group_,&XMates_comm_);
  MPI_Group_incl(all_group_,NumProcX_,YZMates_ranks,&YZMates_group_);
  MPI_Comm_create(MPI_COMM_WORLD,YZMates_group_,&YZMates_comm_);

  MPI_Comm_rank(YZMates_comm_, &myRank_YZ_);
  MPI_Comm_rank(XMates_comm_, &myRank_X_);
}
Exemplo n.º 22
0
int main(int argc, char **argv)
{
	int rank, size, i;
	MPI_Group groupall, groupunion, newgroup, group[GROUPS];
	MPI_Comm newcomm;
	int ranks[GROUPS][100];
	int nranks[GROUPS] = { 0, 0, 0 };

	MPI_Init(&argc, &argv);
	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_group(MPI_COMM_WORLD, &groupall);

	/* Divide groups */
	for (i = 0; i < size; i++)
		ranks[i % GROUPS][nranks[i % GROUPS]++] = i;

	for (i = 0; i < GROUPS; i++)
		MPI_Group_incl(groupall, nranks[i], ranks[i], &group[i]);

	MPI_Group_difference(groupall, group[1], &groupunion);

	MPI_Comm_create(MPI_COMM_WORLD, group[2], &newcomm);
	newgroup = MPI_GROUP_NULL;
	if (newcomm != MPI_COMM_NULL)
	{
		/* If we don't belong to group[2], this would fail */
		MPI_Comm_group(newcomm, &newgroup);
	}

	/* Free the groups */
	MPI_Group_free(&groupall);
	for (i = 0; i < GROUPS; i++)
		MPI_Group_free(&group[i]);
	MPI_Group_free(&groupunion);
	if (newgroup != MPI_GROUP_NULL)
	{
		MPI_Group_free(&newgroup);
	}

	/* Free the communicator */
	if (newcomm != MPI_COMM_NULL)
		MPI_Comm_free(&newcomm);
	Test_Waitforall();
	Test_Global_Summary();
	MPI_Finalize();
	return 0;
}
Exemplo n.º 23
0
/*@C
   PetscOpenMPMerge - Initializes the PETSc and MPI to work with OpenMP. This is not usually called
      by the user. One should use -openmp_merge_size <n> to indicate the node size of merged communicator
      to be.

   Collective on MPI_COMM_WORLD or PETSC_COMM_WORLD if it has been set

   Input Parameter:
+  nodesize - size of each compute node that will share processors
.  func - optional function to call on the master nodes
-  ctx - context passed to function on master nodes

   Options Database:
.   -openmp_merge_size <n>

   Level: developer

$    Comparison of two approaches for OpenMP usage (MPI started with N processes)
$
$    -openmp_spawn_size <n> requires MPI 2, results in n*N total processes with N directly used by application code
$                                           and n-1 worker processes (used by PETSc) for each application node.
$                           You MUST launch MPI so that only ONE MPI process is created for each hardware node.
$
$    -openmp_merge_size <n> results in N total processes, N/n used by the application code and the rest worker processes
$                            (used by PETSc)
$                           You MUST launch MPI so that n MPI processes are created for each hardware node.
$
$    petscmpiexec -n 2 ./ex1 -openmp_spawn_size 3 gives 2 application nodes (and 4 PETSc worker nodes)
$    petscmpiexec -n 6 ./ex1 -openmp_merge_size 3 gives the SAME 2 application nodes and 4 PETSc worker nodes
$       This is what would use if each of the computers hardware nodes had 3 CPUs.
$
$      These are intended to be used in conjunction with USER OpenMP code. The user will have 1 process per
$   computer (hardware) node (where the computer node has p cpus), the user's code will use threads to fully
$   utilize all the CPUs on the node. The PETSc code will have p processes to fully use the compute node for 
$   PETSc calculations. The user THREADS and PETSc PROCESSES will NEVER run at the same time so the p CPUs 
$   are always working on p task, never more than p.
$
$    See PCOPENMP for a PETSc preconditioner that can use this functionality
$

   For both PetscOpenMPSpawn() and PetscOpenMPMerge() PETSC_COMM_WORLD consists of one process per "node", PETSC_COMM_LOCAL_WORLD
   consists of all the processes in a "node."

   In both cases the user's code is running ONLY on PETSC_COMM_WORLD (that was newly generated by running this command).

   Concepts: OpenMP
   
.seealso: PetscFinalize(), PetscInitializeFortran(), PetscGetArgs(), PetscOpenMPFinalize(), PetscInitialize(), PetscOpenMPSpawn(), PetscOpenMPRun()

@*/
PetscErrorCode PETSC_DLLEXPORT PetscOpenMPMerge(PetscMPIInt nodesize,PetscErrorCode (*func)(void*),void *ctx)
{
  PetscErrorCode ierr;
  PetscMPIInt    size,rank,*ranks,i;
  MPI_Group      group,newgroup;

  PetscFunctionBegin;
  saved_PETSC_COMM_WORLD = PETSC_COMM_WORLD;

  ierr = MPI_Comm_size(saved_PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  if (size % nodesize) SETERRQ2(PETSC_ERR_ARG_SIZ,"Total number of process nodes %d is not divisible by number of processes per node %d",size,nodesize);
  ierr = MPI_Comm_rank(saved_PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);


  /* create two communicators 
      *) one that contains the first process from each node: 0,nodesize,2*nodesize,...
      *) one that contains all processes in a node:  (0,1,2...,nodesize-1), (nodesize,nodesize+1,...2*nodesize-), ...
  */
  ierr = MPI_Comm_group(saved_PETSC_COMM_WORLD,&group);CHKERRQ(ierr);
  ierr = PetscMalloc((size/nodesize)*sizeof(PetscMPIInt),&ranks);CHKERRQ(ierr);
  for (i=0; i<(size/nodesize); i++) ranks[i] = i*nodesize;
  ierr = MPI_Group_incl(group,size/nodesize,ranks,&newgroup);CHKERRQ(ierr);
  ierr = PetscFree(ranks);CHKERRQ(ierr);
  ierr = MPI_Comm_create(saved_PETSC_COMM_WORLD,newgroup,&PETSC_COMM_WORLD);CHKERRQ(ierr);
  if (rank % nodesize) PETSC_COMM_WORLD = 0; /* mark invalid processes for easy debugging */
  ierr = MPI_Group_free(&group);CHKERRQ(ierr);
  ierr = MPI_Group_free(&newgroup);CHKERRQ(ierr);

  ierr = MPI_Comm_split(saved_PETSC_COMM_WORLD,rank/nodesize,rank % nodesize,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);

  ierr = PetscInfo2(0,"PETSc OpenMP successfully started: number of nodes = %d node size = %d\n",size/nodesize,nodesize);CHKERRQ(ierr);
  ierr = PetscInfo1(0,"PETSc OpenMP process %sactive\n",(rank % nodesize) ? "in" : "");CHKERRQ(ierr);

  PetscOpenMPCtx = ctx;
  /* 
     All process not involved in user application code wait here
  */
  if (!PETSC_COMM_WORLD) {
    ierr              = PetscOpenMPHandle(PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);
    PETSC_COMM_WORLD  = saved_PETSC_COMM_WORLD;
    PetscOpenMPWorker = PETSC_TRUE; /* so that PetscOpenMPIFinalize() will not attempt a broadcast from this process */
    ierr = PetscInfo(0,"PETSc OpenMP inactive process becoming active");CHKERRQ(ierr);
  } else {
    if (func) {
      ierr = (*func)(ctx);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 24
0
/**
 * @param[in] owner {target processor rank within the processor group} 
 * @param[in] arrwin {start access epoch only to owner's arrwin (-windows) }
 */
inline void SpParHelper::StartAccessEpoch(int owner, vector<MPI_Win> & arrwin, MPI_Group & group)
{
	/* Now start using the whole comm as a group */
	int acc_ranks[1]; 
	acc_ranks[0] = owner;
	MPI_Group access;
	MPI_Group_incl(group, 1, acc_ranks, &access);	// take only the owner

	// begin the ACCESS epochs for the arrays of the remote matrices A and B
	// Start() *may* block until all processes in the target group have entered their exposure epoch
	for(unsigned int i=0; i< arrwin.size(); ++i)
	       MPI_Win_start(access, 0, arrwin[i]);

	MPI_Group_free(&access);
}
Exemplo n.º 25
0
JNIEXPORT jlong JNICALL Java_mpi_Group_incl(
        JNIEnv *env, jobject jthis, jlong group, jintArray ranks)
{
    jsize n = (*env)->GetArrayLength(env, ranks);
    jint *jRanks;
    int  *cRanks;
    ompi_java_getIntArray(env, ranks, &jRanks, &cRanks);

    MPI_Group newGroup;
    int rc = MPI_Group_incl((MPI_Group)group, n, cRanks, &newGroup);
    ompi_java_exceptionCheck(env, rc);

    ompi_java_forgetIntArray(env, ranks, jRanks, cRanks);
    return (jlong)newGroup;
}
int 	MPI_Group_incl_Wrapper(MPI_Group group, int n, int *ranks, MPI_Group *newgroup)
{
#ifdef COMMPI
  char *me = ft_mpi_routine_names[MPI_Group_incl_cntr];
  int ierr;
  FT_INITIALIZE(me, ft_global_ht)
  ft_mpi_cntrs[MPI_Total_cntr]++;
  ft_mpi_cntrs[MPI_Group_incl_cntr]++;
  ierr = MPI_Group_incl(group, n, ranks, newgroup);
  FT_FINALIZE(me, ft_global_ht, 1)
  return(ierr);
#else
  return(0);
#endif
}
Exemplo n.º 27
0
    void initCommunicator()
    {
      Logger::info() << "simx::Messenger: Checking if MPI has been initialized..." << std::endl;
      int mpi_running = 0;
      MPI_Initialized( &mpi_running );
      if ( mpi_running )
	{
	  static string where = "simx::Messenger::initCommunicator()";
	  int mpi_res;
	  Logger::info() << "simx::Messenger: MPI is active. Initializing simx communicator" << std::endl; 

	  int numMPIprocs;
      
	  mpi_res = MPI_Comm_size( MPI_COMM_WORLD, &numMPIprocs );
	  handleMPIReturn( where, mpi_res );
	  //int* newRank = (int*)malloc( numMPIprocs * sizeof(int));
	  int newRank[numMPIprocs];
	  // if ( ! newRank )
// 	    {
// 	      Logger::error() << "simx::Messenger: Malloc failure. "
// 			      << "Unable to initialize Messenger " << endl;
// 	      return;
// 	    }
	  for ( int i = 0; i < numMPIprocs; i++ )
	    newRank[i] = i;
	  handleMPIReturn( where, 
			   MPI_Comm_group( MPI_COMM_WORLD, &orig_group ) );
	  handleMPIReturn( where, 
			   MPI_Group_incl( orig_group, numMPIprocs, newRank, &new_group ) );
	  handleMPIReturn( where,  
			   MPI_Comm_create( MPI_COMM_WORLD, new_group, &SIMX_WORLD ) );
	  handleMPIReturn( where, 
			   MPI_Comm_rank ( SIMX_WORLD, &fMRank) );
	  simx::Logger::info() << "simx Messenger: SIMX_WORLD: Process Rank" 
				  << fMRank << std::endl;
	  fMessengerActive = true;
	  
	  fRecvdMessages.clear();

	  initReceive();
	  
	}
      else
	{
	  Logger::info() << "No MPI found, proceeding with simulation " << std::endl;
	}
      
    }
Exemplo n.º 28
0
/*
 *  Because sometimes only one process at a time wants to read or write to
 *  the file and other times many do, just open a single file in each process.
 *  This is kind of against the whole point of MPI files, but any other way to
 *  do it would require a load of hacky stuff.
 */
void ssioInitialise(void)
{
    if (ssio_initd)
    {
        return;
    }

    int ret;
    // start MPI if it isn't started already (for writing ssic.ss)
    MPI_Initialized(&ret);
    if (!ret)
    {
        MPI_Init(NULL, NULL);
    }

    // all processes originally in one group
    MPI_Group original_group;

    // extract original group from comm world
    MPI_Comm_group(MPI_COMM_WORLD, &original_group);

    int num = 1;
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    int* group_ranks = (int *) malloc(sizeof(int));
    group_ranks[0] = rank;

    MPI_Group loc_group;

    // all processes with a certain hash will now join the same group
    MPI_Group_incl(original_group,
                   num,
                   &group_ranks[0],
                   &loc_group);
    // create the comm which will be used within the group
    MPI_Comm_create(MPI_COMM_WORLD,
                    loc_group,
                    &process_comm);

    MPI_Group_free(&loc_group);
    free(group_ranks);

    // still need
    //MPI_Comm_free(&process_comm);

    ssio_initd = 1;
}
Exemplo n.º 29
0
Arquivo: da2.c Projeto: PeiLiu90/petsc
PetscErrorCode  DMDASplitComm2d(MPI_Comm comm,PetscInt M,PetscInt N,PetscInt sw,MPI_Comm *outcomm)
{
  PetscErrorCode ierr;
  PetscInt       m,n = 0,x = 0,y = 0;
  PetscMPIInt    size,csize,rank;

  PetscFunctionBegin;
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);

  csize = 4*size;
  do {
    if (csize % 4) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot split communicator of size %d tried %d %D %D",size,csize,x,y);
    csize = csize/4;

    m = (PetscInt)(0.5 + PetscSqrtReal(((PetscReal)M)*((PetscReal)csize)/((PetscReal)N)));
    if (!m) m = 1;
    while (m > 0) {
      n = csize/m;
      if (m*n == csize) break;
      m--;
    }
    if (M > N && m < n) {PetscInt _m = m; m = n; n = _m;}

    x = M/m + ((M % m) > ((csize-1) % m));
    y = (N + (csize-1)/m)/n;
  } while ((x < 4 || y < 4) && csize > 1);
  if (size != csize) {
    MPI_Group   entire_group,sub_group;
    PetscMPIInt i,*groupies;

    ierr = MPI_Comm_group(comm,&entire_group);CHKERRQ(ierr);
    ierr = PetscMalloc1(csize,&groupies);CHKERRQ(ierr);
    for (i=0; i<csize; i++) {
      groupies[i] = (rank/csize)*csize + i;
    }
    ierr = MPI_Group_incl(entire_group,csize,groupies,&sub_group);CHKERRQ(ierr);
    ierr = PetscFree(groupies);CHKERRQ(ierr);
    ierr = MPI_Comm_create(comm,sub_group,outcomm);CHKERRQ(ierr);
    ierr = MPI_Group_free(&entire_group);CHKERRQ(ierr);
    ierr = MPI_Group_free(&sub_group);CHKERRQ(ierr);
    ierr = PetscInfo1(0,"DMDASplitComm2d:Creating redundant coarse problems of size %d\n",csize);CHKERRQ(ierr);
  } else {
    *outcomm = comm;
  }
  PetscFunctionReturn(0);
}
Exemplo n.º 30
0
/*
 * Class:     mpi_Group
 * Method:    incl
 * Signature: ([I)J
 */
JNIEXPORT jlong JNICALL Java_mpi_Group_incl(JNIEnv *env, jobject jthis, jintArray ranks)
{
    int n;
    jint *rks;
    jboolean isCopy=JNI_TRUE;
    MPI_Group newgroup;

    ompi_java_clearFreeList(env) ;

    n=(*env)->GetArrayLength(env,ranks);
    rks=(*env)->GetIntArrayElements(env,ranks,&isCopy);
    MPI_Group_incl((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
                   n, (int*)rks,
                   &newgroup);
    (*env)->ReleaseIntArrayElements(env,ranks,rks,0);
    return (jlong)newgroup;
}