Exemple #1
0
int print_comm(int testid, int rank, MPI_Comm comm)
{
  int i;

  if (comm != MPI_COMM_NULL) {
    int ranks;
    MPI_Comm_size(comm, &ranks);

    int* members_comm  = (int*) malloc(ranks * sizeof(int));
    int* members_world = (int*) malloc(ranks * sizeof(int));

    for (i = 0; i < ranks; i++) {
      members_comm[i] = i;
    }

    MPI_Group group_world, group_comm;
    MPI_Comm_group(MPI_COMM_WORLD, &group_world);
    MPI_Comm_group(comm, &group_comm);
    MPI_Group_translate_ranks(group_comm, ranks, members_comm, group_world, members_world);
    MPI_Group_free(&group_comm);
    MPI_Group_free(&group_world);

    print_members(testid, rank, ranks, members_world);

    free(members_world);
    free(members_comm);
  }

  return 0;
}
Exemple #2
0
/** Translate a group process rank to the corresponding process rank in the
  * ARMCI world group.
  *
  * @param[in] group      Group to translate from.
  * @param[in] group_rank Rank of the process in group.
  */
int ARMCI_Absolute_id(ARMCI_Group *group, int group_rank) {
  int       world_rank;
  MPI_Group world_group, sub_group;

  ARMCII_Assert(group_rank >= 0 && group_rank < group->size);

  /* Check if group is the world group */
  if (group->comm == ARMCI_GROUP_WORLD.comm)
    world_rank = group_rank;

  /* Check for translation cache */
  else if (group->grp_to_abs != NULL)
    world_rank = group->grp_to_abs[group_rank];

  else {
    /* Translate the rank */
    MPI_Comm_group(ARMCI_GROUP_WORLD.comm, &world_group);
    MPI_Comm_group(group->comm, &sub_group);

    MPI_Group_translate_ranks(sub_group, 1, &group_rank, world_group, &world_rank);

    MPI_Group_free(&world_group);
    MPI_Group_free(&sub_group);
  }

  /* Check if translation failed */
  if (world_rank == MPI_UNDEFINED)
    return -1;
  else
    return world_rank;
}
int main(int argc, char **argv)
{
    MPI_Group basegroup;
    MPI_Group g1;
    MPI_Comm comm, newcomm;
    int rank, size;
    int worldrank;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &worldrank);
    comm = MPI_COMM_WORLD;
    MPI_Comm_group(comm, &basegroup);
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_split(comm, 0, size - rank, &newcomm);
    MPI_Comm_group(newcomm, &g1);

    /* Checking group_intersection for NULL variable */
    mpi_errno = MPI_Group_intersection(basegroup, g1, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Group_free(&basegroup);
    MPI_Group_free(&g1);
    MTest_Finalize(errs);
    return 0;
}
Exemple #4
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  char processor_name[128];
  int namelen = 128;
  MPI_Group newgroup, newgroup2;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);
  MPI_Comm_group (MPI_COMM_WORLD, &newgroup);
  MPI_Group_free (&newgroup);
  MPI_Barrier (MPI_COMM_WORLD);
  /* now with an alias... */
  MPI_Comm_group (MPI_COMM_WORLD, &newgroup);
  newgroup2 = newgroup;
  MPI_Group_free (&newgroup2);
  MPI_Barrier (MPI_COMM_WORLD);
  printf ("(%d) Finished normally\n", rank);
  MPI_Finalize ();
}
Exemple #5
0
int main(int argc, char **argv)
{
    int rank, nproc, mpi_errno;
    int i, ncomm, *ranks;
    int errs = 1;
    MPI_Comm *comm_hdls;
    MPI_Group world_group;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
    ranks = malloc(sizeof(int) * nproc);

    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        int incl = i % nproc;
        MPI_Group comm_group;

        /* Comms include ranks: 0; 1; 2; ...; 0; 1; ... */
        MPI_Group_incl(world_group, 1, &incl, &comm_group);

        /* Note: the comms we create all contain one rank from MPI_COMM_WORLD */
        mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);

        if (mpi_errno == MPI_SUCCESS) {
            if (verbose)
                printf("%d: Created comm %d\n", rank, i);
            ncomm++;
        } else {
            if (verbose)
                printf("%d: Error creating comm %d\n", rank, i);
            MPI_Group_free(&comm_group);
            errs = 0;
            break;
        }

        MPI_Group_free(&comm_group);
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    free(ranks);
    MPI_Group_free(&world_group);

    MTest_Finalize(errs);

    return MTestReturnValue(errs);
}
Exemple #6
0
int main (int argc, char **argv)
{
  int num, i, rank, localRank;
  MPI_Group all, odd, even;
  MPI_Comm oddComm, evenComm;
  char mess[11];

  MPI_Init (&argc, &argv);
  // copy all the processes in group "all"
  MPI_Comm_group (MPI_COMM_WORLD, &all);
  MPI_Comm_size (MPI_COMM_WORLD, &num);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);

  int grN = 0;
  int ranks[num / 2];

  for (i = 0; i < num; i += 2)
    ranks[grN++] = i;

  // extract from "all" only the odd ones
  MPI_Group_excl (all, grN, ranks, &odd);
  // sutract odd group from all to get the even ones
  MPI_Group_difference (all, odd, &even);

  MPI_Comm_create (MPI_COMM_WORLD, odd, &oddComm);
  MPI_Comm_create (MPI_COMM_WORLD, even, &evenComm);
  
  // check group membership
  MPI_Group_rank (odd, &localRank);
  if (localRank != MPI_UNDEFINED)
    {
      if (localRank == 0)       // local group root, sets-up message
        strcpy (mess, "ODD GROUP");
      MPI_Bcast (mess, 11, MPI_CHAR, 0, oddComm);
      MPI_Comm_free (&oddComm);  // free communicator in processes where it is valid
    }
  else
    {
      MPI_Comm_rank (evenComm, &localRank);
      if (localRank == 0)       // local group root, sets-up message
        strcpy (mess, "EVEN GROUP");
      MPI_Bcast (mess, 11, MPI_CHAR, 0, evenComm);
      MPI_Comm_free (&evenComm);
    }

  printf ("Process %i with local rank %i received %s\n", rank, localRank, mess);

  // free up memory
  MPI_Group_free (&all);
  MPI_Group_free (&odd);
  MPI_Group_free (&even);
  MPI_Finalize ();
  return 0;
}
int main(int argc, char **argv) {
    int       rank, nproc, mpi_errno;
    int       i, ncomm, *ranks;
    int       errors = 1;
    MPI_Comm *comm_hdls;
    MPI_Group world_group;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
    ranks     = malloc(sizeof(int) * nproc);

    for (i = 0; i < nproc; i++)
        ranks[i] = i;

    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        MPI_Group comm_group;

        /* Comms include ranks: 0; 0,1; 0,1,2; ...; 0; 0,1; 0,1,2; ... */
        MPI_Group_incl(world_group, (i+1) % (nproc+1), /* Adding 1 yields counts of 1..nproc */
                       ranks, &comm_group);

        /* Note: the comms we create are all varying subsets of MPI_COMM_WORLD */
        mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);

        if (mpi_errno == MPI_SUCCESS) {
            ncomm++;
        } else {
            if (verbose) printf("%d: Error creating comm %d\n", rank, i);
            MPI_Group_free(&comm_group);
            errors = 0;
            break;
        }

        MPI_Group_free(&comm_group);
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    MPI_Group_free(&world_group);

    MTest_Finalize(errors);
    MPI_Finalize();

    return 0;
}
Exemple #8
0
/** Initialize an ARMCI group's remaining fields using the communicator field.
  */
void ARMCII_Group_init_from_comm(ARMCI_Group *group) {
  if (group->comm != MPI_COMM_NULL) {
    MPI_Comm_size(group->comm, &group->size);
    MPI_Comm_rank(group->comm, &group->rank);

  } else {
    group->rank = -1;
    group->size =  0;
  }

  /* If noncollective groups are in use, create a separate communicator that
    can be used for noncollective group creation with this group as the parent.
    This ensures that calls to MPI_Intercomm_create can't clash with any user
    communication. */

  if (ARMCII_GLOBAL_STATE.noncollective_groups && group->comm != MPI_COMM_NULL)
    MPI_Comm_dup(group->comm, &group->noncoll_pgroup_comm);
  else
    group->noncoll_pgroup_comm = MPI_COMM_NULL;

  /* Check if translation caching is enabled */
  if (ARMCII_GLOBAL_STATE.cache_rank_translation) {
    if (group->comm != MPI_COMM_NULL) {
      int      *ranks, i;
      MPI_Group world_group, sub_group;

      group->abs_to_grp = malloc(sizeof(int)*ARMCI_GROUP_WORLD.size);
      group->grp_to_abs = malloc(sizeof(int)*group->size);
      ranks = malloc(sizeof(int)*ARMCI_GROUP_WORLD.size);

      ARMCII_Assert(group->abs_to_grp != NULL && group->grp_to_abs != NULL && ranks != NULL);

      for (i = 0; i < ARMCI_GROUP_WORLD.size; i++)
        ranks[i] = i;

      MPI_Comm_group(ARMCI_GROUP_WORLD.comm, &world_group);
      MPI_Comm_group(group->comm, &sub_group);

      MPI_Group_translate_ranks(sub_group, group->size, ranks, world_group, group->grp_to_abs);
      MPI_Group_translate_ranks(world_group, ARMCI_GROUP_WORLD.size, ranks, sub_group, group->abs_to_grp);

      MPI_Group_free(&world_group);
      MPI_Group_free(&sub_group);

      free(ranks);
    }
  }
  
  /* Translation caching is disabled */
  else {
    group->abs_to_grp = NULL;
    group->grp_to_abs = NULL;
  }
}
Exemple #9
0
int main( int argc, char **argv )
{
    int i, n, n_goal = 2048, n_all, rc, n_ranks, *ranks, rank, size, len;
    MPI_Group *group_array, world_group;
    char msg[MPI_MAX_ERROR_STRING];

    MPI_Init( &argc, &argv );
    MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    n = n_goal;
    
    group_array = (MPI_Group *)malloc( n * sizeof(MPI_Group) );

    MPI_Comm_group( MPI_COMM_WORLD, &world_group );

    n_ranks = size;
    ranks = (int *)malloc( size * sizeof(int) );
    for (i=0; i<size; i++) ranks[i] = i;

    for (i=0; i<n; i++) {
	rc = MPI_Group_incl( world_group, n_ranks, ranks, group_array + i );
 	if (rc) {
	    fprintf( stderr, "Error when creating group number %d\n", i );
	    MPI_Error_string( rc, msg, &len );
	    fprintf( stderr, "%s\n", msg );
	    n = i + 1;
	    break;
	}
	
    }

    for (i=0; i<n; i++) {
	rc = MPI_Group_free( group_array + i );
	if (rc) {
	    fprintf( stderr, "Error when freeing group number %d\n", i );
	    MPI_Error_string( rc, msg, &len );
	    fprintf( stderr, "%s\n", msg );
	    break;
	}
    }
    
    MPI_Group_free( &world_group );

    MPI_Allreduce( &n, &n_all, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD );
    if (rank == 0) {
	printf( "Completed test of %d type creations\n", n_all );
	if (n_all != n_goal) {
	printf (
"This MPI implementation limits the number of datatypes that can be created\n\
This is allowed by the standard and is not a bug, but is a limit on the\n\
implementation\n" );
	}
Exemple #10
0
int PIOc_finalize(const int iosysid)
{
  iosystem_desc_t *ios, *nios;
  int msg;
  int mpierr;

  ios = pio_get_iosystem_from_id(iosysid);
  if(ios == NULL)
    return PIO_EBADID;
  
  /* If asynch IO is in use, send the PIO_MSG_EXIT message from the
   * comp master to the IO processes. */
  if (ios->async_interface && !ios->comp_rank)
  {
    msg = PIO_MSG_EXIT;
    mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
    CheckMPIReturn(mpierr, __FILE__, __LINE__);		      
  }

  /* Free this memory that was allocated in init_intracomm. */
  if (ios->ioranks)
      free(ios->ioranks);

  /* Free the buffer pool. */
  free_cn_buffer_pool(*ios);

  /* Free the MPI groups. */
  if (ios->compgroup != MPI_GROUP_NULL)
    MPI_Group_free(&ios->compgroup);

  if (ios->iogroup != MPI_GROUP_NULL)
    MPI_Group_free(&(ios->iogroup));

  /* Free the MPI communicators. my_comm is just a copy (but not an
   * MPI copy), so does not have to have an MPI_Comm_free() call. */
  if(ios->intercomm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->intercomm));
  }
  if(ios->io_comm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->io_comm));
  }
  if(ios->comp_comm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->comp_comm));
  }
  if(ios->union_comm != MPI_COMM_NULL){
    MPI_Comm_free(&(ios->union_comm));
  }

  /* Delete the iosystem_desc_t data associated with this id. */
  return pio_delete_iosystem_from_list(iosysid);
}
Exemple #11
0
/** Create an ARMCI group that contains a subset of the nodes in the parent
  * group. Collective across output group.
  *
  * @param[in]  grp_size         Number of entries in pid_list.
  * @param[in]  pid_list         List of process ids that will be in the new group.
  * @param[out] armci_grp_out    The new ARMCI group, only valid on group members.
  * @param[in]  armci_grp_parent The parent of the new ARMCI group.
  */
static inline void ARMCI_Group_create_comm_collective(int grp_size, int *pid_list, ARMCI_Group *armci_grp_out,
    ARMCI_Group *armci_grp_parent) {

  MPI_Group mpi_grp_parent;
  MPI_Group mpi_grp_child;

  MPI_Comm_group(armci_grp_parent->comm, &mpi_grp_parent);
  MPI_Group_incl(mpi_grp_parent, grp_size, pid_list, &mpi_grp_child);

  MPI_Comm_create(armci_grp_parent->comm, mpi_grp_child, &armci_grp_out->comm);
 
  MPI_Group_free(&mpi_grp_parent);
  MPI_Group_free(&mpi_grp_child);
}
Exemple #12
0
int translate_rank(MPI_Comm comm1, int rank1, MPI_Comm comm2)
{
    MPI_Group group1, group2;
    MPI_Comm_group(comm1, &group1);
    MPI_Comm_group(comm2, &group2);

    int rank2;
    MPI_Group_translate_ranks(group1, 1, &rank1, group2, &rank2);

    MPI_Group_free(&group2);
    MPI_Group_free(&group1);

    return rank2;
}
Exemple #13
0
int main(int argc, char **argv)
{
	int rank, size, i;
	MPI_Group groupall, groupunion, newgroup, group[GROUPS];
	MPI_Comm newcomm;
	int ranks[GROUPS][100];
	int nranks[GROUPS] = { 0, 0, 0 };

	MPI_Init(&argc, &argv);
	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_group(MPI_COMM_WORLD, &groupall);

	/* Divide groups */
	for (i = 0; i < size; i++)
		ranks[i % GROUPS][nranks[i % GROUPS]++] = i;

	for (i = 0; i < GROUPS; i++)
		MPI_Group_incl(groupall, nranks[i], ranks[i], &group[i]);

	MPI_Group_difference(groupall, group[1], &groupunion);

	MPI_Comm_create(MPI_COMM_WORLD, group[2], &newcomm);
	newgroup = MPI_GROUP_NULL;
	if (newcomm != MPI_COMM_NULL)
	{
		/* If we don't belong to group[2], this would fail */
		MPI_Comm_group(newcomm, &newgroup);
	}

	/* Free the groups */
	MPI_Group_free(&groupall);
	for (i = 0; i < GROUPS; i++)
		MPI_Group_free(&group[i]);
	MPI_Group_free(&groupunion);
	if (newgroup != MPI_GROUP_NULL)
	{
		MPI_Group_free(&newgroup);
	}

	/* Free the communicator */
	if (newcomm != MPI_COMM_NULL)
		MPI_Comm_free(&newcomm);
	Test_Waitforall();
	Test_Global_Summary();
	MPI_Finalize();
	return 0;
}
Exemple #14
0
/*@C
   PetscOpenMPMerge - Initializes the PETSc and MPI to work with OpenMP. This is not usually called
      by the user. One should use -openmp_merge_size <n> to indicate the node size of merged communicator
      to be.

   Collective on MPI_COMM_WORLD or PETSC_COMM_WORLD if it has been set

   Input Parameter:
+  nodesize - size of each compute node that will share processors
.  func - optional function to call on the master nodes
-  ctx - context passed to function on master nodes

   Options Database:
.   -openmp_merge_size <n>

   Level: developer

$    Comparison of two approaches for OpenMP usage (MPI started with N processes)
$
$    -openmp_spawn_size <n> requires MPI 2, results in n*N total processes with N directly used by application code
$                                           and n-1 worker processes (used by PETSc) for each application node.
$                           You MUST launch MPI so that only ONE MPI process is created for each hardware node.
$
$    -openmp_merge_size <n> results in N total processes, N/n used by the application code and the rest worker processes
$                            (used by PETSc)
$                           You MUST launch MPI so that n MPI processes are created for each hardware node.
$
$    petscmpiexec -n 2 ./ex1 -openmp_spawn_size 3 gives 2 application nodes (and 4 PETSc worker nodes)
$    petscmpiexec -n 6 ./ex1 -openmp_merge_size 3 gives the SAME 2 application nodes and 4 PETSc worker nodes
$       This is what would use if each of the computers hardware nodes had 3 CPUs.
$
$      These are intended to be used in conjunction with USER OpenMP code. The user will have 1 process per
$   computer (hardware) node (where the computer node has p cpus), the user's code will use threads to fully
$   utilize all the CPUs on the node. The PETSc code will have p processes to fully use the compute node for 
$   PETSc calculations. The user THREADS and PETSc PROCESSES will NEVER run at the same time so the p CPUs 
$   are always working on p task, never more than p.
$
$    See PCOPENMP for a PETSc preconditioner that can use this functionality
$

   For both PetscOpenMPSpawn() and PetscOpenMPMerge() PETSC_COMM_WORLD consists of one process per "node", PETSC_COMM_LOCAL_WORLD
   consists of all the processes in a "node."

   In both cases the user's code is running ONLY on PETSC_COMM_WORLD (that was newly generated by running this command).

   Concepts: OpenMP
   
.seealso: PetscFinalize(), PetscInitializeFortran(), PetscGetArgs(), PetscOpenMPFinalize(), PetscInitialize(), PetscOpenMPSpawn(), PetscOpenMPRun()

@*/
PetscErrorCode PETSC_DLLEXPORT PetscOpenMPMerge(PetscMPIInt nodesize,PetscErrorCode (*func)(void*),void *ctx)
{
  PetscErrorCode ierr;
  PetscMPIInt    size,rank,*ranks,i;
  MPI_Group      group,newgroup;

  PetscFunctionBegin;
  saved_PETSC_COMM_WORLD = PETSC_COMM_WORLD;

  ierr = MPI_Comm_size(saved_PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
  if (size % nodesize) SETERRQ2(PETSC_ERR_ARG_SIZ,"Total number of process nodes %d is not divisible by number of processes per node %d",size,nodesize);
  ierr = MPI_Comm_rank(saved_PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);


  /* create two communicators 
      *) one that contains the first process from each node: 0,nodesize,2*nodesize,...
      *) one that contains all processes in a node:  (0,1,2...,nodesize-1), (nodesize,nodesize+1,...2*nodesize-), ...
  */
  ierr = MPI_Comm_group(saved_PETSC_COMM_WORLD,&group);CHKERRQ(ierr);
  ierr = PetscMalloc((size/nodesize)*sizeof(PetscMPIInt),&ranks);CHKERRQ(ierr);
  for (i=0; i<(size/nodesize); i++) ranks[i] = i*nodesize;
  ierr = MPI_Group_incl(group,size/nodesize,ranks,&newgroup);CHKERRQ(ierr);
  ierr = PetscFree(ranks);CHKERRQ(ierr);
  ierr = MPI_Comm_create(saved_PETSC_COMM_WORLD,newgroup,&PETSC_COMM_WORLD);CHKERRQ(ierr);
  if (rank % nodesize) PETSC_COMM_WORLD = 0; /* mark invalid processes for easy debugging */
  ierr = MPI_Group_free(&group);CHKERRQ(ierr);
  ierr = MPI_Group_free(&newgroup);CHKERRQ(ierr);

  ierr = MPI_Comm_split(saved_PETSC_COMM_WORLD,rank/nodesize,rank % nodesize,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);

  ierr = PetscInfo2(0,"PETSc OpenMP successfully started: number of nodes = %d node size = %d\n",size/nodesize,nodesize);CHKERRQ(ierr);
  ierr = PetscInfo1(0,"PETSc OpenMP process %sactive\n",(rank % nodesize) ? "in" : "");CHKERRQ(ierr);

  PetscOpenMPCtx = ctx;
  /* 
     All process not involved in user application code wait here
  */
  if (!PETSC_COMM_WORLD) {
    ierr              = PetscOpenMPHandle(PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);
    PETSC_COMM_WORLD  = saved_PETSC_COMM_WORLD;
    PetscOpenMPWorker = PETSC_TRUE; /* so that PetscOpenMPIFinalize() will not attempt a broadcast from this process */
    ierr = PetscInfo(0,"PETSc OpenMP inactive process becoming active");CHKERRQ(ierr);
  } else {
    if (func) {
      ierr = (*func)(ctx);CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
Exemple #15
0
   ~MPI_Gang()
   {
#     ifdef USE_MPI
      if( !owner ) return;
      int final_flag;
      MPI_Finalized(&final_flag);
      if( final_flag ) return;
      if( pool.group!=MPI_GROUP_NULL ) MPI_Group_free(&pool.group);
      if( gang.group!=MPI_GROUP_NULL ) MPI_Group_free(&gang.group);
      if( lead.group!=MPI_GROUP_NULL ) MPI_Group_free(&lead.group);
      if(  pool.comm!=MPI_COMM_NULL && pool.comm!=MPI_COMM_WORLD ) MPI_Comm_free(&pool.comm);
      if(  gang.comm!=MPI_COMM_NULL && pool.comm!=MPI_COMM_WORLD ) MPI_Comm_free(&gang.comm);
      if(  lead.comm!=MPI_COMM_NULL && pool.comm!=MPI_COMM_WORLD ) MPI_Comm_free(&lead.comm);
#     endif
   }
Exemple #16
0
void ARMCI_Group_free(ARMCI_Group *group) {

    int rv;
    
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)group;

#ifdef ARMCI_GROUP
    int i, world_me = armci_msg_me();
    for(i=0; i<igroup->grp_attr.nproc; i++) {
      if(igroup->grp_attr.proc_list[i] == world_me) {
	break;
      }
    }
    if(i==igroup->grp_attr.nproc) {
      return; /*not in group to be freed*/
    }
#endif


    assert(igroup);
    free(igroup->grp_attr.grp_clus_info);
#ifdef ARMCI_GROUP
    free(igroup->grp_attr.proc_list);
    igroup->grp_attr.nproc = 0;
#else

    rv=MPI_Group_free(&(igroup->igroup));
    if(rv != MPI_SUCCESS) armci_die("MPI_Group_free: Failed ",armci_me);
    
    if(igroup->icomm != MPI_COMM_NULL) {
      rv = MPI_Comm_free( (MPI_Comm*)&(igroup->icomm) );
      if(rv != MPI_SUCCESS) armci_die("MPI_Comm_free: Failed ",armci_me);
    }
#endif
}
int numProcsFails(MPI_Comm mcw){
	int rank, ret, numFailures = 0, flag;
        MPI_Group fGroup;
        MPI_Errhandler newEh;
        MPI_Comm dupComm;

        // Error handler
        MPI_Comm_create_errhandler(mpiErrorHandler, &newEh);

        MPI_Comm_rank(mcw, &rank);

        // Set error handler for communicator
        MPI_Comm_set_errhandler(mcw, newEh);

        // Target function
        if(MPI_SUCCESS != (ret = MPI_Comm_dup(mcw, &dupComm))) {
        //if(MPI_SUCCESS != (ret = MPI_Barrier(mcw))) { // MPI_Comm_dup or MPI_Barrier
           OMPI_Comm_failure_ack(mcw);
           OMPI_Comm_failure_get_acked(mcw, &fGroup);
           // Get the number of failures
           MPI_Group_size(fGroup, &numFailures);
        }// end of "MPI_Comm_dup failure"

        OMPI_Comm_agree(mcw, &flag);
        // Memory release
	if(numFailures > 0)
           MPI_Group_free(&fGroup);
        MPI_Errhandler_free(&newEh);

        return numFailures;
}//numProcsFails()
int main(int argc, char* argv[])
{
    MPI_Comm comm, newcomm, scomm;
    MPI_Group group;
    MPI_Info newinfo;
    int rank, size, color;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_dup(MPI_COMM_WORLD, &comm);
    MPI_Comm_group(comm, &group);

    MPI_Comm_create(comm, group, &newcomm);
    color = rank % 2;
    MPI_Comm_split(MPI_COMM_WORLD, color, rank, &scomm);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /*test comm_split_type for NULL variable */
    mpi_errno = MPI_Comm_split_type(scomm, 2, 4, newinfo, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Comm_free(&scomm);
    MPI_Group_free(&group);
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Exemple #19
0
FORTRAN_API void FORT_CALL mpi_group_free_ ( MPI_Fint *group, MPI_Fint *__ierr )
{
    MPI_Group l_group = MPI_Group_f2c(*group);
    *__ierr = MPI_Group_free(&l_group);
    if (*__ierr == MPI_SUCCESS) 		     
        *group = MPI_Group_c2f(l_group);
}
Exemple #20
0
/**
 * Destroys the given comex igroup.
 */
void comex_igroup_finalize(comex_igroup_t *igroup)
{
    int status;
    win_link_t *curr_win;
    win_link_t *next_win;

    assert(igroup);

    if (igroup->group != MPI_GROUP_NULL) {
        status = MPI_Group_free(&igroup->group);
        if (status != MPI_SUCCESS) {
            comex_error("MPI_Group_free: Failed ", status);
        }
    }
    
    if (igroup->comm != MPI_COMM_NULL) {
        status = MPI_Comm_free(&igroup->comm);
        if (status != MPI_SUCCESS) {
            comex_error("MPI_Comm_free: Failed ", status);
        }
    }

    /* Remove all windows associated with this group */
    curr_win = igroup->win_list;
    while (curr_win != NULL) {
      next_win = curr_win->next;
      MPI_Win_free(&curr_win->win);
      free(curr_win);
      curr_win = next_win;
    }
}
int main(int argc, char *argv[])
{
    int errs = 0, errclass, mpi_errno;
    int rank, size;
    MPI_Comm comm;
    MPI_Group group;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_dup(MPI_COMM_WORLD, &comm);
    MPI_Comm_group(comm, &group);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /*test comm_create for NULL variable */
    mpi_errno = MPI_Comm_create(comm, group, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Group_free(&group);
    MTest_Finalize(errs);
    return 0;
}
Exemple #22
0
/*
 * Class:     mpi_Group
 * Method:    free
 * Signature: ()V
 */
JNIEXPORT void JNICALL Java_mpi_Group_free(JNIEnv *env, jobject jthis)
{
    MPI_Group group=(MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID));

    MPI_Group_free(&group);
    (*env)->SetLongField(env,jthis, ompi_java.GrouphandleID,(jlong)MPI_GROUP_NULL);
}
Exemple #23
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  int i, j;
  char processor_name[128];
  int namelen = 128;
  MPI_Group newgroup[GROUPS_PER_ITERATION];

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  for (i = 0; i < ITERATIONS; i++) {
    for (j = 0; j < GROUPS_PER_ITERATION; j++) {
      MPI_Comm_group (MPI_COMM_WORLD, &newgroup[j]);

      if (j < GROUPS_PER_ITERATION - GROUPS_LOST_PER_ITERATION) {
	MPI_Group_free (&newgroup[j]);
      }
    }
  }

  MPI_Barrier (MPI_COMM_WORLD);
  printf ("(%d) Finished normally\n", rank);
  MPI_Finalize ();
}
Exemple #24
0
PetscErrorCode  DMDASplitComm2d(MPI_Comm comm,PetscInt M,PetscInt N,PetscInt sw,MPI_Comm *outcomm)
{
  PetscErrorCode ierr;
  PetscInt       m,n = 0,x = 0,y = 0;
  PetscMPIInt    size,csize,rank;

  PetscFunctionBegin;
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);

  csize = 4*size;
  do {
    if (csize % 4) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot split communicator of size %d tried %d %D %D",size,csize,x,y);
    csize = csize/4;

    m = (PetscInt)(0.5 + PetscSqrtReal(((PetscReal)M)*((PetscReal)csize)/((PetscReal)N)));
    if (!m) m = 1;
    while (m > 0) {
      n = csize/m;
      if (m*n == csize) break;
      m--;
    }
    if (M > N && m < n) {PetscInt _m = m; m = n; n = _m;}

    x = M/m + ((M % m) > ((csize-1) % m));
    y = (N + (csize-1)/m)/n;
  } while ((x < 4 || y < 4) && csize > 1);
  if (size != csize) {
    MPI_Group   entire_group,sub_group;
    PetscMPIInt i,*groupies;

    ierr = MPI_Comm_group(comm,&entire_group);CHKERRQ(ierr);
    ierr = PetscMalloc1(csize,&groupies);CHKERRQ(ierr);
    for (i=0; i<csize; i++) {
      groupies[i] = (rank/csize)*csize + i;
    }
    ierr = MPI_Group_incl(entire_group,csize,groupies,&sub_group);CHKERRQ(ierr);
    ierr = PetscFree(groupies);CHKERRQ(ierr);
    ierr = MPI_Comm_create(comm,sub_group,outcomm);CHKERRQ(ierr);
    ierr = MPI_Group_free(&entire_group);CHKERRQ(ierr);
    ierr = MPI_Group_free(&sub_group);CHKERRQ(ierr);
    ierr = PetscInfo1(0,"DMDASplitComm2d:Creating redundant coarse problems of size %d\n",csize);CHKERRQ(ierr);
  } else {
    *outcomm = comm;
  }
  PetscFunctionReturn(0);
}
int main(int argc, char *argv[])
{
    int size, rank, i, *excl;
    MPI_Group world_group, even_group;
    MPI_Comm  __attribute__((unused)) even_comm;

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (size % 2) {
        fprintf(stderr, "this program requires a multiple of 2 number of processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
        exit(1);
    }

    excl = malloc((size / 2) * sizeof(int));
    assert(excl);

    /* exclude the odd ranks */
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    /* Create some groups */
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    MPI_Group_free(&world_group);

#if !defined(USE_STRICT_MPI) && defined(MPICH)
    if (rank % 2 == 0) {
        /* Even processes create a group for themselves */
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &even_comm);
        MPI_Barrier(even_comm);
        MPI_Comm_free(&even_comm);
    }
#endif /* USE_STRICT_MPI */

    MPI_Group_free(&even_group);
    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0)
        printf(" No errors\n");

    MPI_Finalize();
    return 0;
}
Exemple #26
0
JNIEXPORT jlong JNICALL Java_mpi_Group_free(
        JNIEnv *env, jobject jthis, jlong handle)
{
    MPI_Group group = (MPI_Group)handle;
    int rc = MPI_Group_free(&group);
    ompi_java_exceptionCheck(env, rc);
    return (jlong)group;
}
Exemple #27
0
static void group_free(MPI_Group *grp)
{
    int rv;

    rv = MPI_Group_free(grp);
    mpi_exception(rv);
    free(grp);
}
Exemple #28
0
int main(int argc, char *argv[])
{
	MPI_Init(&argc, &argv);

	MPI_Group gw, gs;
	MPI_Comm_group(MPI_COMM_SELF, &gs);
	MPI_Comm_group(MPI_COMM_WORLD, &gw);

	MPI_Comm cw, cs;
	MPI_Comm_create(MPI_COMM_WORLD, gw, &cw);
	MPI_Comm_create(MPI_COMM_SELF, gs, &cs);

	MPI_Group_free(&gs);
	MPI_Group_free(&gw);

	int world_size;
	MPI_Comm_size(MPI_COMM_WORLD, &world_size);

	int size;
	MPI_Comm_size(cw, &size);
	if (size != world_size) {
		return 1;
	}

	MPI_Comm_size(cs, &size);
	if (size != 1) {
		return 2;
	}

	int world_rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);

	int rank;
	MPI_Comm_rank(cw, &rank);
	if (rank != world_rank) {
		return 3;
	}

	MPI_Comm_rank(cs, &rank);
	if (rank != 0) {
		return 4;
	}
	MPI_Finalize();
	return 0;

}
Exemple #29
0
static PetscErrorCode MatMPIAdjCreateNonemptySubcommMat_MPIAdj(Mat A,Mat *B)
{
  Mat_MPIAdj     *a = (Mat_MPIAdj*)A->data;
  PetscErrorCode ierr;
  const PetscInt *ranges;
  MPI_Comm       acomm,bcomm;
  MPI_Group      agroup,bgroup;
  PetscMPIInt    i,rank,size,nranks,*ranks;

  PetscFunctionBegin;
  *B    = NULL;
  ierr  = PetscObjectGetComm((PetscObject)A,&acomm);CHKERRQ(ierr);
  ierr  = MPI_Comm_size(acomm,&size);CHKERRQ(ierr);
  ierr  = MPI_Comm_size(acomm,&rank);CHKERRQ(ierr);
  ierr  = MatGetOwnershipRanges(A,&ranges);CHKERRQ(ierr);
  for (i=0,nranks=0; i<size; i++) {
    if (ranges[i+1] - ranges[i] > 0) nranks++;
  }
  if (nranks == size) {         /* All ranks have a positive number of rows, so we do not need to create a subcomm; */
    ierr = PetscObjectReference((PetscObject)A);CHKERRQ(ierr);
    *B   = A;
    PetscFunctionReturn(0);
  }

  ierr = PetscMalloc1(nranks,&ranks);CHKERRQ(ierr);
  for (i=0,nranks=0; i<size; i++) {
    if (ranges[i+1] - ranges[i] > 0) ranks[nranks++] = i;
  }
  ierr = MPI_Comm_group(acomm,&agroup);CHKERRQ(ierr);
  ierr = MPI_Group_incl(agroup,nranks,ranks,&bgroup);CHKERRQ(ierr);
  ierr = PetscFree(ranks);CHKERRQ(ierr);
  ierr = MPI_Comm_create(acomm,bgroup,&bcomm);CHKERRQ(ierr);
  ierr = MPI_Group_free(&agroup);CHKERRQ(ierr);
  ierr = MPI_Group_free(&bgroup);CHKERRQ(ierr);
  if (bcomm != MPI_COMM_NULL) {
    PetscInt   m,N;
    Mat_MPIAdj *b;
    ierr       = MatGetLocalSize(A,&m,NULL);CHKERRQ(ierr);
    ierr       = MatGetSize(A,NULL,&N);CHKERRQ(ierr);
    ierr       = MatCreateMPIAdj(bcomm,m,N,a->i,a->j,a->values,B);CHKERRQ(ierr);
    b          = (Mat_MPIAdj*)(*B)->data;
    b->freeaij = PETSC_FALSE;
    ierr       = MPI_Comm_free(&bcomm);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemple #30
0
/*@C
   PetscSFGetGroups - gets incoming and outgoing process groups

   Collective

   Input Argument:
.  sf - star forest

   Output Arguments:
+  incoming - group of origin processes for incoming edges (leaves that reference my roots)
-  outgoing - group of destination processes for outgoing edges (roots that I reference)

   Level: developer

.seealso: PetscSFGetWindow(), PetscSFRestoreWindow()
@*/
PetscErrorCode PetscSFGetGroups(PetscSF sf,MPI_Group *incoming,MPI_Group *outgoing)
{
  PetscErrorCode ierr;
  MPI_Group      group;

  PetscFunctionBegin;
  if (sf->ingroup == MPI_GROUP_NULL) {
    PetscInt       i;
    const PetscInt *indegree;
    PetscMPIInt    rank,*outranks,*inranks;
    PetscSFNode    *remote;
    PetscSF        bgcount;

    /* Compute the number of incoming ranks */
    ierr = PetscMalloc1(sf->nranks,&remote);CHKERRQ(ierr);
    for (i=0; i<sf->nranks; i++) {
      remote[i].rank  = sf->ranks[i];
      remote[i].index = 0;
    }
    ierr = PetscSFDuplicate(sf,PETSCSF_DUPLICATE_CONFONLY,&bgcount);CHKERRQ(ierr);
    ierr = PetscSFSetGraph(bgcount,1,sf->nranks,NULL,PETSC_COPY_VALUES,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
    ierr = PetscSFComputeDegreeBegin(bgcount,&indegree);CHKERRQ(ierr);
    ierr = PetscSFComputeDegreeEnd(bgcount,&indegree);CHKERRQ(ierr);

    /* Enumerate the incoming ranks */
    ierr = PetscMalloc2(indegree[0],&inranks,sf->nranks,&outranks);CHKERRQ(ierr);
    ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);CHKERRQ(ierr);
    for (i=0; i<sf->nranks; i++) outranks[i] = rank;
    ierr = PetscSFGatherBegin(bgcount,MPI_INT,outranks,inranks);CHKERRQ(ierr);
    ierr = PetscSFGatherEnd(bgcount,MPI_INT,outranks,inranks);CHKERRQ(ierr);
    ierr = MPI_Comm_group(PetscObjectComm((PetscObject)sf),&group);CHKERRQ(ierr);
    ierr = MPI_Group_incl(group,indegree[0],inranks,&sf->ingroup);CHKERRQ(ierr);
    ierr = MPI_Group_free(&group);CHKERRQ(ierr);
    ierr = PetscFree2(inranks,outranks);CHKERRQ(ierr);
    ierr = PetscSFDestroy(&bgcount);CHKERRQ(ierr);
  }
  *incoming = sf->ingroup;

  if (sf->outgroup == MPI_GROUP_NULL) {
    ierr = MPI_Comm_group(PetscObjectComm((PetscObject)sf),&group);CHKERRQ(ierr);
    ierr = MPI_Group_incl(group,sf->nranks,sf->ranks,&sf->outgroup);CHKERRQ(ierr);
    ierr = MPI_Group_free(&group);CHKERRQ(ierr);
  }
  *outgoing = sf->outgroup;
  PetscFunctionReturn(0);
}