Пример #1
0
bool SplitMPI_Communicator::CreateCommunicator(MPI_Comm comm_world, int np, int nb_ddc)
{
	int n_DDC;
	bool splitcomm;

	if ((nb_ddc > 0) && (nb_ddc < np))
	{ // if the number of total cores is larger than the number of DDCs is the same, two new MPI groups will be
// generated will be generated
#ifdef OGS_FEM_IPQC
		splitcomm = true;
		n_DDC = nb_ddc; // number of ddc

		int DDC_ranks[n_DDC];
		for (int k = 0; k < n_DDC; k++)
		{
			DDC_ranks[k] = k;
		}

		MPI_Comm comm_IPQC;
		MPI_Group group_base, group_DDC, group_IPQC;

		// define MPI group and communicator for DDC related processes WH
		MPI_Comm_group(comm_world, &group_base);
		MPI_Group_incl(group_base, n_DDC, DDC_ranks, &group_DDC); // define group flow and mass transport
		MPI_Comm_create(comm_world, group_DDC, &comm_DDC);

		// define MPI group and communicator for IPQC WH
		MPI_Group_difference(group_base, group_DDC, &group_IPQC);
		MPI_Comm_create(comm_world, group_IPQC, &comm_IPQC);

		int myrank_IPQC, mysize_IPQC;
		MPI_Group_size(group_DDC, &mysize); // WH
		MPI_Group_rank(group_DDC, &myrank); // WH
		MPI_Group_rank(group_IPQC, &myrank_IPQC);
		MPI_Group_size(group_IPQC, &mysize_IPQC);
		if (myrank_IPQC != MPI_UNDEFINED) // WH
			std::cout << "After MPI_Init myrank_IPQC = " << myrank_IPQC << '\n';
		if (myrank != MPI_UNDEFINED) // WH
			std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';

		if (myrank_IPQC != MPI_UNDEFINED) // ranks of group_IPQC will call to IPhreeqc
			Call_IPhreeqc();
#endif
	}
	else
	{ // if no -ddc is specified or the number of ddc is incorrect, make ddc = np, no new MPI groups willnot be
		// generated;
		splitcomm = false;
		n_DDC = np;
		comm_DDC = comm_world;
		MPI_Comm_size(comm_DDC, &mysize);
		MPI_Comm_rank(comm_DDC, &myrank);
		std::cout << "After MPI_Init myrank_DDC = " << myrank << '\n';
	}

	return splitcomm;
}
Пример #2
0
dart_ret_t dart_group_union(
  const dart_group_t *g1,
  const dart_group_t *g2,
  dart_group_t *gout)
{
  /* g1 and g2 are both ordered groups. */
  int ret = MPI_Group_union(
              g1->mpi_group,
              g2->mpi_group,
              &(gout -> mpi_group));
  if (ret == MPI_SUCCESS) {
    int i, j, k, size_in, size_out;
    dart_unit_t *pre_unitidsout, *post_unitidsout;;

    MPI_Group group_all;
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_size(gout->mpi_group, &size_out);
    if (size_out > 1) {
      MPI_Group_size(g1->mpi_group, &size_in);
      pre_unitidsout  = (dart_unit_t *)malloc(
                          size_out * sizeof (dart_unit_t));
      post_unitidsout = (dart_unit_t *)malloc(
                          size_out * sizeof (dart_unit_t));
      dart_group_getmembers (gout, pre_unitidsout);

      /* Sort gout by the method of 'merge sort'. */
      i = k = 0;
      j = size_in;

      while ((i <= size_in - 1) && (j <= size_out - 1)) {
        post_unitidsout[k++] =
          (pre_unitidsout[i] <= pre_unitidsout[j])
          ? pre_unitidsout[i++]
          : pre_unitidsout[j++];
      }
      while (i <= size_in -1) {
        post_unitidsout[k++] = pre_unitidsout[i++];
      }
      while (j <= size_out -1) {
        post_unitidsout[k++] = pre_unitidsout[j++];
      }
      gout -> mpi_group = MPI_GROUP_EMPTY;
      MPI_Group_incl(
        group_all,
        size_out,
        post_unitidsout,
        &(gout->mpi_group));
      free (pre_unitidsout);
      free (post_unitidsout);
    }
    ret = DART_OK;
  }
  return ret;
}
Пример #3
0
dart_ret_t dart_team_unit_l2g(
  dart_team_t teamid,
  dart_unit_t localid,
  dart_unit_t *globalid)
{
#if 0
  dart_unit_t *unitids;
  int size;
  int i = 0;
  dart_group_t group;
  dart_team_get_group (teamid, &group);
  MPI_Group_size (group.mpi_group, &size);
  if (localid >= size)
  {
    DART_LOG_ERROR ("Invalid localid input");
    return DART_ERR_INVAL;
  }
  unitids = (dart_unit_t*)malloc (sizeof(dart_unit_t) * size);
  dart_group_getmembers (&group, unitids);

  /* The unitids array is arranged in ascending order. */
  *globalid = unitids[localid];
//  printf ("globalid is %d\n", *globalid);
  return DART_OK;
#endif
  int size;
  dart_group_t group;

  dart_team_get_group (teamid, &group);
  MPI_Group_size (group.mpi_group, &size);

  if (localid >= size) {
    DART_LOG_ERROR ("Invalid localid input: %d", localid);
    return DART_ERR_INVAL;
  }
  if (teamid == DART_TEAM_ALL) {
    *globalid = localid;
  }
  else {
    MPI_Group group_all;
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_translate_ranks(
      group.mpi_group,
      1,
      &localid,
      group_all,
      globalid);
  }

  return DART_OK;
}
Пример #4
0
/*
//  This function must be called by all members of communicator com.
*/
void print_comm_contents(MPI_Comm com, const char *name)
{
  MPI_Group world, local;
  int i, n, *ranks_local, *ranks_world;

  MPI_Comm_group(mpi.comm.world,&world);
  MPI_Comm_group(com,&local);

  MPI_Group_size(local,&n);
  MPI_Group_rank(local,&i);

  if(i == 0)
    {
      ranks_local = cart_alloc(int,n);
      ranks_world = cart_alloc(int,n);
  
      for(i=0; i<n; i++) ranks_local[i] = i;

      MPI_Group_translate_ranks(local,n,ranks_local,world,ranks_world);

      cart_debug("Communicator %s (%p), size = %d:",name,com,n);
      for(i=0; i<n; i++) cart_debug("id = %d -> world id = %d",i,ranks_world[i]);

      cart_free(ranks_local);
      cart_free(ranks_world);
    }
int numProcsFails(MPI_Comm mcw){
	int rank, ret, numFailures = 0, flag;
        MPI_Group fGroup;
        MPI_Errhandler newEh;
        MPI_Comm dupComm;

        // Error handler
        MPI_Comm_create_errhandler(mpiErrorHandler, &newEh);

        MPI_Comm_rank(mcw, &rank);

        // Set error handler for communicator
        MPI_Comm_set_errhandler(mcw, newEh);

        // Target function
        if(MPI_SUCCESS != (ret = MPI_Comm_dup(mcw, &dupComm))) {
        //if(MPI_SUCCESS != (ret = MPI_Barrier(mcw))) { // MPI_Comm_dup or MPI_Barrier
           OMPI_Comm_failure_ack(mcw);
           OMPI_Comm_failure_get_acked(mcw, &fGroup);
           // Get the number of failures
           MPI_Group_size(fGroup, &numFailures);
        }// end of "MPI_Comm_dup failure"

        OMPI_Comm_agree(mcw, &flag);
        // Memory release
	if(numFailures > 0)
           MPI_Group_free(&fGroup);
        MPI_Errhandler_free(&newEh);

        return numFailures;
}//numProcsFails()
Пример #6
0
JNIEXPORT jint JNICALL Java_mpi_Group_getSize(
        JNIEnv *env, jobject jthis, jlong group)
{
    int size, rc;
    rc = MPI_Group_size((MPI_Group)group, &size);
    ompi_java_exceptionCheck(env, rc);
    return size;
}
Пример #7
0
void ARMCI_Group_size(ARMCI_Group *group, int *size) {
    ARMCI_iGroup *igroup = (ARMCI_iGroup *)group;
#ifdef ARMCI_GROUP
    *size = igroup->grp_attr.nproc;
#else
    MPI_Group_size((MPI_Group)(igroup->igroup), size);
#endif
}
Пример #8
0
int main (int argc, char **argv)
{
  int num, i, rank;
  MPI_Group all, odd, even;

  MPI_Init (&argc, &argv);
  // copy all the processes in group "all"
  MPI_Comm_group (MPI_COMM_WORLD, &all);
  MPI_Comm_size (MPI_COMM_WORLD, &num);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);

  int grN = 0;
  int ranks[num / 2];

  for (i = 0; i < num; i += 2)
    ranks[grN++] = i;

  // extract from "all" only the odd ones
  MPI_Group_excl (all, grN, ranks, &odd);
  // sutract odd group from all to get the even ones
  MPI_Group_difference (all, odd, &even);

  // print group sizes
  if (rank == 0)
    {
      MPI_Group_size (odd, &i);
      printf ("Odd group has %i processes\n", i);
      MPI_Group_size (even, &i);
      printf ("Even group has %i processes\n", i);
    }

  // check group membership
  MPI_Group_rank (odd, &i);
  if (i == MPI_UNDEFINED)
    printf ("Process %i belongs to even group\n", rank);
  else
    printf ("Process %i belongs to odd group\n", rank);

  // free up memory
  MPI_Group_free (&all);
  MPI_Group_free (&odd);
  MPI_Group_free (&even);
  MPI_Finalize ();
  return 0;
}
Пример #9
0
dart_ret_t dart_group_size(
  const dart_group_t *g,
  size_t *size)
{
  int s;
  MPI_Group_size (g -> mpi_group, &s);
  (*size) = s;
  return DART_OK;
}
Пример #10
0
char * get_str_failed_procs(MPI_Comm comm, MPI_Group f_group)
{
    int f_size, i, c_size;
    MPI_Group c_group;
    int *failed_ranks = NULL;
    int *comm_ranks   = NULL;
    int rank_len = 7;
    char * ranks_failed = NULL;

    MPI_Group_size(f_group, &f_size);

    if( f_size <= 0 ) {
        ranks_failed = strdup("None");
    } else {
        MPI_Comm_group(comm, &c_group);
        MPI_Comm_size( comm, &c_size);

        failed_ranks = (int *)malloc(f_size * sizeof(int));
        comm_ranks   = (int *)malloc(f_size * sizeof(int));
        for( i = 0; i < f_size; ++i) {
            failed_ranks[i] = i;
        }

        MPI_Group_translate_ranks(f_group, f_size, failed_ranks,
                                  c_group, comm_ranks);

        ranks_failed = (char *)malloc(sizeof(char) * (rank_len) * f_size + 1);
        for( i = 0; i < f_size; ++i) {
            /*
            printf("%2d of %2d) Error Handler: %2d / %2d Failed Rank %3d\n",
                   mpi_mcw_rank, mpi_mcw_size, i, f_size, comm_ranks[i]);
            */
            if( i+1 == f_size ) {
                sprintf((ranks_failed+(i*rank_len)), "%c%5d.%c",
                        (0 == i ? ' ' : ','), comm_ranks[i], '\0');
            }
            else if( 0 == i ) {
                sprintf(ranks_failed, "  %5d", comm_ranks[i]);
            }
            else {
                sprintf((ranks_failed+(i*rank_len)), ", %5d", comm_ranks[i]);
            }
        }

        MPI_Group_free(&c_group);
    }

    if( NULL != failed_ranks ) {
        free(failed_ranks);
    }
    if( NULL != comm_ranks ) {
        free(comm_ranks);
    }

    return ranks_failed;
}
Пример #11
0
/**
 * Returns the size of a group.
 */
void ARMCI_Group_size(ARMCI_Group *id, int *size)
{
    int status;

    ARMCI_iGroup *igroup = armci_get_igroup_from_group(id);
    status = MPI_Group_size(igroup->group, size);
    if (status != MPI_SUCCESS) {
        armci_die("MPI_Group_size: Failed ", status);
    }
}
Пример #12
0
/*
 * Class:     mpi_Group
 * Method:    Size
 * Signature: ()I
 */
JNIEXPORT jint JNICALL Java_mpi_Group_Size(JNIEnv *env, jobject jthis)
{
    int size;

    ompi_java_clearFreeList(env) ;

    MPI_Group_size((MPI_Group)((*env)->GetLongField(env,jthis,ompi_java.GrouphandleID)),
                   &size);
    return size;
}
Пример #13
0
int main(int argc, char *argv[])
{
	int rank;
	MPI_Status status;

	MPI_Init(&argc, &argv);

	MPI_Group gw, g1, g2, g3;
	MPI_Comm_group(MPI_COMM_WORLD, &gw);

	int new_ranks[] = {0, 2, 3};
	MPI_Group_incl(gw, 3, new_ranks, &g1);

	int new_ranks2[] = {2, 3, 0};
	MPI_Group_incl(gw, 3, new_ranks2, &g2);

	MPI_Group_incl(gw, 3, new_ranks2, &g3);


	int size1, size2, size3;
	MPI_Group_size(g1, &size1);
	MPI_Group_size(g1, &size2);
	MPI_Group_size(g1, &size3);

	if (size1 != size2 || size2 != size3 || size1 != 3) {
		return 1;
	}

	int r1, r2;
	MPI_Group_compare(g1, g2, &r1);
	MPI_Group_compare(g2, g3, &r2);
	if (r1 != MPI_SIMILAR || r2 != MPI_IDENT) {
		return 1;
	}

	MPI_Group_free(&g1);
	MPI_Group_free(&g3);
	MPI_Group_free(&g2);

	return 0;
}
Пример #14
0
/**
 * Returns the size of a group.
 */
int comex_group_size(comex_group_t id, int *size)
{
    int status;

    comex_igroup_t *igroup = comex_get_igroup_from_group(id);
    status = MPI_Group_size(igroup->group, size);
    if (status != MPI_SUCCESS) {
        comex_error("MPI_Group_size: Failed ", status);
    }

    return COMEX_SUCCESS;
}
Пример #15
0
static VALUE group_size(VALUE self)
{
    int rv, size;
    MPI_Group *grp;

    Data_Get_Struct(self, MPI_Group, grp);

    rv = MPI_Group_size(*grp, &size);
    mpi_exception(rv);

    return rb_fix_new(size);
}
Пример #16
0
void mpi_group_size_f(MPI_Fint *group, MPI_Fint *size, MPI_Fint *ierr)
{
  ompi_group_t *c_group;
  OMPI_SINGLE_NAME_DECL(size);

  /* Make the fortran to c representation conversion */
  c_group = MPI_Group_f2c(*group);
  
  *ierr = OMPI_INT_2_FINT(MPI_Group_size(c_group, 
					 OMPI_SINGLE_NAME_CONVERT(size))); 
  if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
      OMPI_SINGLE_INT_2_FINT(size);
  }
}
Пример #17
0
void mpi_error_handler(MPI_Comm *comm, int *error_code, ...)
{
    MPI_Group f_group;
    int num_failures;
    int loc_size;
    char * ranks_failed = NULL;

    MPI_Comm_size(*comm, &loc_size);

    /*
     * Will cause normal termination by unblocking the wait_for_signal() function
     * when all process failures have been reported.
     */
    cur_epoch++;
    printf("Handler !!\n");
    fflush(NULL);

    if(*error_code == MPI_ERR_PROC_FAILED ) {
        /* Access the local list of failures */
        OMPI_Comm_failure_ack(*comm);
        OMPI_Comm_failure_get_acked(*comm, &f_group);

        /* Get the number of failures */
        MPI_Group_size(f_group, &num_failures);

        cur_epoch = num_failures;

        ranks_failed = get_str_failed_procs(*comm, f_group);

        printf("%2d of %2d) Error Handler: (Comm = %s) %3d Failed Ranks: %s\n",
               mpi_mcw_rank, mpi_mcw_size,
               (mpi_mcw_size == loc_size ? "MCW" : "Subcomm"),
               num_failures, ranks_failed);

        free(ranks_failed);
    } else {
        printf("%2d of %2d) Error Handler: Some Other error has occurred. (Comm = %s) [Count = %2d / %2d]\n",
               mpi_mcw_rank, mpi_mcw_size,
               (mpi_mcw_size == loc_size ? "MCW" : "Subcomm"), cur_epoch, max_signals );
    }

    /* Introduce a small delay to aid debugging */
    fflush(NULL);
    sleep(1);

    return;
}
Пример #18
0
dart_ret_t dart_team_unit_g2l(
  dart_team_t teamid,
  dart_unit_t globalid,
  dart_unit_t *localid)
{
#if 0
  dart_unit_t *unitids;
  int size;
  int i;
  dart_group_t group;
  dart_team_get_group (teamid, &group);
  MPI_Group_size (group.mpi_group, &size);
  unitids = (dart_unit_t *)malloc (sizeof (dart_unit_t) * size);

  dart_group_getmembers (&group, unitids);


  for (i = 0; (i < size) && (unitids[i] < globalid); i++);

  if ((i == size) || (unitids[i] > globalid))
  {
    *localid = -1;
    return DART_OK;
  }

  *localid = i;
  return DART_OK;
#endif
  if(teamid == DART_TEAM_ALL) {
    *localid = globalid;
  }
  else {
    dart_group_t group;
    MPI_Group group_all;
    dart_team_get_group(teamid, &group);
    MPI_Comm_group(MPI_COMM_WORLD, &group_all);
    MPI_Group_translate_ranks(
      group_all,
      1,
      &globalid,
      group.mpi_group,
      localid);
  }
  return DART_OK;
}
Пример #19
0
MPI_Fint c2fgroup_ (MPI_Fint *group)
{
    MPI_Group cGroup = MPI_Group_f2c(*group);
    int cSize, wSize, cRank, wRank;

    /* We pass in the group of comm world */
    MPI_Comm_size( MPI_COMM_WORLD, &wSize );
    MPI_Comm_rank( MPI_COMM_WORLD, &wRank );
    MPI_Group_size( cGroup, &cSize );
    MPI_Group_rank( cGroup, &cRank );

    if (wSize != cSize || wRank != cRank) {
	fprintf( stderr, "Group: Did not get expected size,rank (got %d,%d)",
		 cSize, cRank );
	return 1;
    }
    return 0;
}
Пример #20
0
dart_ret_t dart_group_split(
  const dart_group_t *g,
  size_t n,
  dart_group_t **gout)
{
  MPI_Group grouptem;
  int size, length, i, ranges[1][3];

  MPI_Group_size (g -> mpi_group, &size);

  /* Ceiling division. */
  length = (size+(int)n-1)/(int)n;

  /* Note: split the group into chunks of subgroups. */
  for (i = 0; i < (int)n; i++)
  {
    if (i * length < size)
    {
      ranges[0][0] = i * length;

      if (i * length + length <= size)
      {
        ranges[0][1] = i * length + length -1;
      }
      else
      {
        ranges[0][1] = size - 1;
      }

      ranges[0][2] = 1;
      MPI_Group_range_incl(
        g -> mpi_group,
        1,
        ranges,
        &grouptem);
      (*(gout + i))->mpi_group = grouptem;
    }
    else
    {
      (*(gout + i))->mpi_group = MPI_GROUP_EMPTY;
    }
  }
  return DART_OK;
}
Пример #21
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, grank, gsize;
    int minsize = 2, isleft; 
    MPI_Comm      comm;
    MPI_Group     group;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntercomm( &comm, &isleft, minsize )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	MPI_Comm_group( comm, &group );
	MPI_Group_rank( group, &grank );
	MPI_Group_size( group, &gsize );
	if (rank != grank) {
	    errs++;
	    fprintf( stderr, "Ranks of groups do not match %d != %d\n",
		     rank, grank );
	}
	if (size != gsize) {
	    errs++;
	    fprintf( stderr, "Sizes of groups do not match %d != %d\n",
		     size, gsize );
	}
	MPI_Group_free( &group );
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Пример #22
0
dart_ret_t dart_group_getmembers(
  const dart_group_t *g,
  dart_unit_t *unitids)
{
  int size, i;
  int *array;
  MPI_Group group_all;
  MPI_Group_size(g -> mpi_group, &size);
  MPI_Comm_group(MPI_COMM_WORLD, &group_all);
  array = (int*) malloc (sizeof (int) * size);
  for (i = 0; i < size; i++) {
    array[i] = i;
  }
  MPI_Group_translate_ranks(
    g->mpi_group,
    size,
    array,
    group_all,
    unitids);
  free (array);
  return DART_OK;
}
Пример #23
0
dart_ret_t dart_group_ismember(
  const dart_group_t *g,
  dart_unit_t unitid,
  int32_t *ismember)
{
  dart_unit_t id;
  dart_myid (&id);

  int           i, size;
  dart_unit_t*  ranks;

  MPI_Group_size(g->mpi_group, &size);
  ranks = (dart_unit_t *)malloc(size * sizeof(dart_unit_t));
  dart_group_getmembers (g, ranks);
  for (i = 0; i < size; i++) {
    if (ranks[i] == unitid) {
      break;
    }
  }
  *ismember = (i!=size);
  DART_LOG_DEBUG("%2d: GROUP_ISMEMBER - %s", unitid, (*ismember) ? "yes" : "no");
  return DART_OK;
}
Пример #24
0
FastQuery::FastQuery(const std::string& dataFileName,
                     const FQ::FileFormat ffmt,
                     const std::string& indexFileName,
                     const int v, const char *rcfile,
                     const char *logfile,
                     bool readOnly,
                     const MPI_Comm comm,
                     void *extra)
#endif
{
    ibis::util::setVerboseLevel(v);
    ibis::init(rcfile, logfile);

    dataFile = 0;
    indexFile = 0;
    metadataMgr = 0;
    // true if indexFile needs to be initiated.
    bool indexing = (indexFileName.compare("") != 0 &&
                     indexFileName.compare(dataFileName) != 0);

#ifndef FQ_NOMPI
    mpi_comm = comm;
    MPI_Group mpi_group;
    MPI_Comm_group(mpi_comm, &mpi_group);
    MPI_Group_size(mpi_group, &mpi_size);
    MPI_Group_rank(mpi_group, &mpi_rank);
#endif

    LOGGER(ibis::gVerbose > 0)
        << "FastQuery constructor invoked with datafileName=" << dataFileName
        << ", fileFormat=" << ffmt << ", readOnly=" << readOnly;

    // open the file
    std::string indexPath = "";
    switch (ffmt) {

#ifdef FQ_HAVE_HDF5
    case FQ::FQ_H5Part: indexPath = "/__H5PartIndex__";
    case FQ::FQ_HDF5: {
#ifdef FQ_NOMPI
        if (readOnly == false && indexing == true) {
            dataFile = new HDF5(dataFileName, true, indexPath);
        }
        else {
            dataFile = new HDF5(dataFileName, readOnly, indexPath);
        }
#else
        if (readOnly == false && indexing == true) {
            dataFile = new HDF5(dataFileName, mpi_comm, true, indexPath);
        }
        else {
            dataFile = new HDF5(dataFileName, mpi_comm, readOnly,
                                indexPath);
        }
#endif
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
#ifdef FQ_NOMPI
            indexFile = new HDF5(indexFileName, readOnly, indexPath);
#else
            indexFile = new HDF5(indexFileName, mpi_comm, readOnly,
                                 indexPath);
#endif
            if (indexFile->isValid() == false) {
                delete (indexFile);
                indexFile = 0;
            }
        }
        else {
            indexFile = dataFile;
        }
        break;}
#endif


#ifdef FQ_HAVE_NETCDF
    case FQ::FQ_NetCDF: {
#ifndef FQ_NOMPI
        // netcdf is not supported in MPI mode yet
        LOGGER(ibis::gVerbose > 0)
            << "Warning -- FastQuery::FastQuery:"
            << " MPI is not supported for NetCDF yet";
#else
        if (readOnly == false && indexing == true) {
            dataFile = new NETCDF(dataFileName, true, "");
        }
        else {
            dataFile = new NETCDF(dataFileName, readOnly, "");
        }
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
            indexFile = new NETCDF(indexFileName, readOnly, "");
            if (indexFile->isValid() == false) {
                delete (dataFile);
                dataFile = 0;
                delete (indexFile);
                indexFile = 0;
            }
        }
        else {
            indexFile = dataFile;
        }
#endif
        break;}
#endif


#ifdef FQ_HAVE_PNETCDF
    case FQ::FQ_pnetCDF: {
        if (readOnly == false && indexing == true) {
            dataFile = new PNETCDF(dataFileName, true, "", mpi_comm);
        } else {
            dataFile = new PNETCDF(dataFileName, readOnly, "", mpi_comm);
        }
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
            indexFile = new PNETCDF(indexFileName, readOnly, "", mpi_comm);
            if (indexFile->isValid() == false) {
                delete (dataFile);
                dataFile = 0;
                delete (indexFile);
                indexFile = 0;
            }
        } else {
            indexFile = dataFile;
        }
        break;}
#endif


#ifdef FQ_HAVE_BP
    case FQ::FQ_BP: {
        MPI_Comm comm = MPI_COMM_WORLD;
#ifndef FQ_NOMPI
        comm = mpi_comm;
#endif
        bool streaming = true;
        float timeout = 0.0;
        enum ADIOS_READ_METHOD read_method = ADIOS_READ_METHOD_BP;
        if (extra != 0) {
            const BPExtras &bpx = *static_cast<BPExtras*>(extra);
            streaming = bpx.streaming;
            read_method = bpx.read_method;
            if (bpx.timeout != 0.0)
                timeout = bpx.timeout;
            else if (read_method == ADIOS_READ_METHOD_DATASPACES ||
                     read_method == ADIOS_READ_METHOD_DIMES ||
                     read_method == ADIOS_READ_METHOD_FLEXIO)
                timeout = FQ_ADIOS_STREAM_TIMEOUT;
        }

        if (readOnly == false && indexing == true) {
            dataFile = new BPArrayIODriver
                (dataFileName, "", comm, read_method, timeout, streaming);
        }
        else {
            dataFile = new BPArrayIODriver
                (dataFileName, "", comm, read_method, timeout, streaming);
        }
        if (dataFile->isValid() == false) {
            delete (dataFile);
            dataFile = 0;
        }
        if (indexing) {
            indexFile = new BPArrayIODriver
                (indexFileName, "", comm, read_method, timeout, streaming);
            if (indexFile->isValid() == false) {
                delete (dataFile);
                dataFile = 0;
                delete (indexFile);
                indexFile = 0;
            }
        }
        else {
            indexFile = dataFile;
        }
        break;}
#endif
    default: {
        LOGGER(ibis::gVerbose > 0)
            << "Warning -- FastQuery::FastQuery: unsupport file model";
        break;}
    }

    if (dataFile == 0 || indexFile == 0) {
        LOGGER(ibis::gVerbose > 0)
            << "Warning -- FastQuery:"
            << " failed to initialize the FastQuery object";
        return;
    }
#if defined(DEBUG) && DEBUG+0 > 0
    report_timing = true;
#else
#ifdef FQ_REPORT_STATISTIC
    if (! report_timing)
        report_timing = ibis::gParameters().isTrue(FQ_REPORT_STATISTIC);
#endif
    if (ibis::gVerbose < 0) {
        report_timing = false;
    }
    else if (! report_timing) {
        if (ibis::gVerbose > 3) {
            report_timing = true;
        }
        else {
            report_timing =
                ibis::gParameters().isTrue("FastQuery.reportTiming");
        }
    }
#endif

#ifndef FQ_NOMPI
    if (report_timing)
        report_timing = (ibis::gVerbose > 5 || mpi_rank == 0);
#endif

    ibis::horometer timer;
    if (report_timing)
        timer.start();
    // initialize information manager
    metadataMgr = new MetadataMgr(*dataFile);
    if (report_timing) {
        timer.stop();
        LOGGER(true) << "Statistic\tFQ::init\t"
                     << timer.CPUTime() << "\t" << timer.realTime();
    }
    LOGGER(ibis::gVerbose > 2)
        << "FastQuery: successfully initialized the FastQuery object";
} // FastQuery::FastQuery
Пример #25
0
value caml_mpi_group_size(value group)
{
  int size;
  MPI_Group_size(Group_val(group), &size);
  return Val_int(size);
}
Пример #26
0
int main( int argc, char **argv )
{
    int errs=0, toterr;
    MPI_Group basegroup;
    MPI_Group g1, g2, g3, g4, g5, g6, g7, g8, g9, g10;
    MPI_Group g3a, g3b;
    MPI_Comm  comm, newcomm, splitcomm, dupcomm;
    int       i, grp_rank, rank, grp_size, size, result;
    int       nranks, *ranks, *ranks_out;
    int       range[1][3];
    int       worldrank;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &worldrank );

    comm = MPI_COMM_WORLD;

    MPI_Comm_group( comm, &basegroup );
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

/* Get the basic information on this group */
    MPI_Group_rank( basegroup, &grp_rank );
    if (grp_rank != rank) {
	errs++;
	fprintf( stdout, "group rank %d != comm rank %d\n", grp_rank, rank );
    }

    MPI_Group_size( basegroup, &grp_size );
    if (grp_size != size) {
	errs++;
	fprintf( stdout, "group size %d != comm size %d\n", grp_size, size );
    }


/* Form a new communicator with inverted ranking */
    MPI_Comm_split( comm, 0, size - rank, &newcomm );
    MPI_Comm_group( newcomm, &g1 );
    ranks	  = (int *)malloc( size * sizeof(int) );
    ranks_out = (int *)malloc( size * sizeof(int) );
    for (i=0; i<size; i++) ranks[i] = i;
    nranks = size;
    MPI_Group_translate_ranks( g1, nranks, ranks, basegroup, ranks_out );
    for (i=0; i<size; i++) {
	if (ranks_out[i] != (size - 1) - i) {
	    errs++;
	    fprintf( stdout, "Translate ranks got %d expected %d\n", 
		     ranks_out[i], (size - 1) - i );
	}
    }

/* Check Compare */
    MPI_Group_compare( basegroup, g1, &result );
    if (result != MPI_SIMILAR) {
	errs++;
	fprintf( stdout, "Group compare should have been similar, was %d\n",
		 result );
    }
    MPI_Comm_dup( comm, &dupcomm );
    MPI_Comm_group( dupcomm, &g2 );
    MPI_Group_compare( basegroup, g2, &result );
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group compare should have been ident, was %d\n",
		 result );
    }
    MPI_Comm_split( comm, rank < size/2, rank, &splitcomm );
    MPI_Comm_group( splitcomm, &g3 );
    MPI_Group_compare( basegroup, g3, &result );
    if (result != MPI_UNEQUAL) {
	errs++;
	fprintf( stdout, "Group compare should have been unequal, was %d\n",
		 result );
    }

    /* Build two groups that have this process and one other, but do not
       have the same processes */
    ranks[0] = rank;
    ranks[1] = (rank + 1) % size;
    MPI_Group_incl( basegroup, 2, ranks, &g3a );
    ranks[1] = (rank + size - 1) % size;
    MPI_Group_incl( basegroup, 2, ranks, &g3b );
    MPI_Group_compare( g3a, g3b, &result );
    if (result != MPI_UNEQUAL) {
        errs++;
	fprintf( stdout, "Group compare of equal sized but different groups should have been unequal, was %d\n", result );
    }
    

/* Build two new groups by excluding members; use Union to put them
   together again */

/* Exclude 0 */
    for (i=0; i<size; i++) ranks[i] = i;
    MPI_Group_excl( basegroup, 1, ranks, &g4 );
/* Exclude 1-(size-1) */
    MPI_Group_excl( basegroup, size-1, ranks+1, &g5 );
    MPI_Group_union( g5, g4, &g6 );
    MPI_Group_compare( basegroup, g6, &result );
    if (result != MPI_IDENT) {
	int usize;
	errs++;
	/* See ordering requirements on union */
	fprintf( stdout, "Group excl and union did not give ident groups\n" );
	fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
	MPI_Group_size( g6, &usize );
	fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
    }
    MPI_Group_union( basegroup, g4, &g7 );
    MPI_Group_compare( basegroup, g7, &result );
    if (result != MPI_IDENT) {
	int usize;
	errs++;
	fprintf( stdout, "Group union of overlapping groups failed\n" );
	fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
	MPI_Group_size( g7, &usize );
	fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
    }

/* Use range_excl instead of ranks */
    /* printf ("range excl\n" ); fflush( stdout ); */
    range[0][0] = 1;
    range[0][1] = size-1;
    range[0][2] = 1;
    MPI_Group_range_excl( basegroup, 1, range, &g8 );
    /* printf( "out  of range excl\n" ); fflush( stdout ); */
    MPI_Group_compare( g5, g8, &result );
    /* printf( "out of compare\n" ); fflush( stdout ); */
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group range excl did not give ident groups\n" );
    }

    /* printf( "intersection\n" ); fflush( stdout ); */
    MPI_Group_intersection( basegroup, g4, &g9 );
    MPI_Group_compare( g9, g4, &result );
    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, "Group intersection did not give ident groups\n" );
    }

/* Exclude EVERYTHING and check against MPI_GROUP_EMPTY */
    /* printf( "range excl all\n" ); fflush( stdout ); */
    range[0][0] = 0;
    range[0][1] = size-1;
    range[0][2] = 1;
    MPI_Group_range_excl( basegroup, 1, range, &g10 );

    /* printf( "done range excl all\n" ); fflush(stdout); */
    MPI_Group_compare( g10, MPI_GROUP_EMPTY, &result );
    /* printf( "done compare to MPI_GROUP_EMPTY\n" ); fflush(stdout); */

    if (result != MPI_IDENT) {
	errs++;
	fprintf( stdout, 
		 "MPI_GROUP_EMPTY didn't compare against empty group\n");
    }

    /* printf( "freeing groups\n" ); fflush( stdout ); */
    MPI_Group_free( &basegroup );
    MPI_Group_free( &g1 );
    MPI_Group_free( &g2 );
    MPI_Group_free( &g3 );
    MPI_Group_free( &g3a );
    MPI_Group_free( &g3b );
    MPI_Group_free( &g4 );
    MPI_Group_free( &g5 );
    MPI_Group_free( &g6 );
    MPI_Group_free( &g7 );
    MPI_Group_free( &g8 );
    MPI_Group_free( &g9 );
    MPI_Group_free( &g10 );
    MPI_Comm_free( &dupcomm );
    MPI_Comm_free( &splitcomm );
    MPI_Comm_free( &newcomm );

    MPI_Allreduce( &errs, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (worldrank == 0) {
	if (toterr == 0) 
	    printf( " No Errors\n" );
	else
	    printf( "Found %d errors in MPI Group routines\n", toterr );
    }

    MPI_Finalize();
    return toterr;
}
Пример #27
0
int main( int argc, char *argv[] )
{
    MPI_Group g1, g2, g4, g5, g45, selfgroup, g6;
    int ranks[16], size, rank, myrank, range[1][3];
    int errs = 0;
    int i, rin[16], rout[16], result;

    MPI_Init(&argc,&argv);

	MPI_Comm_group( MPI_COMM_WORLD, &g1 );
	MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
	MPI_Comm_size( MPI_COMM_WORLD, &size );
	if (size < 8) {
	    fprintf( stderr, 
		  "Test requires 8 processes (16 prefered) only %d provided\n",
		     size );
	    errs++;
	}

	/* 16 members, this process is rank 0, return in group 1 */
	ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7;
	if (myrank == 2) ranks[1] = 3;
	if (myrank == 7) ranks[2] = 6;
	MPI_Group_incl( g1, 3, ranks, &g2 );
	
	/* Check the resulting group */
	MPI_Group_size( g2, &size );
	MPI_Group_rank( g2, &rank );
	
	if (size != 3) {
	    fprintf( stderr, "Size should be %d, is %d\n", 3, size );
	    errs++;
	}
	if (rank != 0) {
	    fprintf( stderr, "Rank should be %d, is %d\n", 0, rank );
	    errs++;
	}

	rin[0] = 0; rin[1] = 1; rin[2] = 2;
	MPI_Group_translate_ranks( g2, 3, rin, g1, rout );
	for (i=0; i<3; i++) {
	    if (rout[i] != ranks[i]) {
		fprintf( stderr, "translated rank[%d] %d should be %d\n", 
			 i, rout[i], ranks[i] );
		errs++;
	    }
	}
	
	/* Translate the process of the self group against another group */
	MPI_Comm_group( MPI_COMM_SELF, &selfgroup );
	rin[0] = 0;
	MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout );
	if (rout[0] != myrank) {
	    fprintf( stderr, "translated of self is %d should be %d\n", 
			 rout[0], myrank );
	    errs++;
	}

	for (i=0; i<size; i++) 
	    rin[i] = i;
	MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout );
	for (i=0; i<size; i++) {
	    if (i == myrank && rout[i] != 0) {
		fprintf( stderr, "translated world to self of %d is %d\n",
			 i, rout[i] );
		errs++;
	    }
	    else if (i != myrank && rout[i] != MPI_UNDEFINED) {
		fprintf( stderr, "translated world to self of %d should be undefined, is %d\n",
			 i, rout[i] );
		errs++;
	    }
	}
	MPI_Group_free( &selfgroup );

	/* Exclude everyone in our group */
	{
	    int ii, *lranks, g1size;

	    MPI_Group_size( g1, &g1size );
	    
	    lranks = (int *)malloc( g1size * sizeof(int) );
	    for (ii=0; ii<g1size; ii++) lranks[ii] = ii;
	    MPI_Group_excl( g1, g1size, lranks, &g6 );
	    if (g6 != MPI_GROUP_EMPTY) {
		fprintf( stderr, "Group formed by excluding all ranks not empty\n" );
		errs++;
		MPI_Group_free( &g6 );
	    }
	    free( lranks );
	}
	
	/* Add tests for additional group operations */
	/* 
	   g2 = incl 1,3,7
	   g3 = excl 1,3,7
	   intersect ( w, g2 ) => g2
	   intersect ( w, g3 ) => g3
	   intersect ( g2, g3 ) => empty
	   
	   g4 = rincl 1:n-1:2
	   g5 = rexcl 1:n-1:2
	   union( g4, g5 ) => world
	   g6 = rincl n-1:1:-1 
	   g7 = rexcl n-1:1:-1
	   union( g6, g7 ) => concat of entries, similar to world
	   diff( w, g2 ) => g3
	*/
	MPI_Group_free( &g2 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = 1;
	range[0][1] = size-1;
	range[0][2] = 2;
	MPI_Group_range_incl( g1, 1, range, &g4 );
	MPI_Group_union( g4, g5, &g45 );
	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );

	/* Now, duplicate the test, but using negative strides */
	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_excl( g1, 1, range, &g5 );

	range[0][0] = size-1;
	range[0][1] = 1;
	range[0][2] = -2;
	MPI_Group_range_incl( g1, 1, range, &g4 );

	MPI_Group_union( g4, g5, &g45 );

	MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
	if (result != MPI_UNEQUAL) {
	    errs++;
	    fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n",
		     result );
	}
	MPI_Group_free( &g4 );
	MPI_Group_free( &g5 );
	MPI_Group_free( &g45 );
        MPI_Group_free( &g1 );

    if (myrank == 0) 
    {
	if (errs == 0) {
	    printf( " No Errors\n" );
	}
	else {
	    printf( "Found %d errors\n", errs );
	}
    }

    MPI_Finalize();
    return 0;
}
Пример #28
0
int main(int argc, char **argv) {

	int rankLeft[4] = {0, 1, 2, 3}, rankRight[4] = {4, 5, 6, 7};
	int i, result;
	char outStr[600];

	int nProcs, myRank;
	MPI_Group grpWorld, grpNew;
	MPI_Comm commNew;

	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &nProcs);
	MPI_Comm_rank(MPI_COMM_WORLD, &myRank);

	MPI_Comm_group(MPI_COMM_WORLD, &grpWorld);
	if (myRank < nProcs	/ 2) {
		MPI_Group_incl(grpWorld, nProcs / 2, rankLeft, &grpNew);
	} else {
		MPI_Group_incl(grpWorld, nProcs / 2, rankRight, &grpNew);
	}
	MPI_Comm_create(MPI_COMM_WORLD, grpNew, &commNew);

	int myRankCommNew, nProcsCommNew;
	int myRankGrpNew, nProcsGrpNew;

	MPI_Comm_rank(commNew, &myRankCommNew);
	MPI_Comm_size(commNew, &nProcsCommNew);
	MPI_Group_rank(grpNew, &myRankGrpNew);
	MPI_Group_size(grpNew, &nProcsGrpNew);

	fprintf(stdout, "WorldRank: %d in %d, NewCommRank: %d in %d, NewGrpRank: %d in %d\n",
		myRank, nProcs, myRankCommNew, nProcsCommNew, myRankGrpNew, nProcsGrpNew);

	MPI_Barrier(MPI_COMM_WORLD);

	int sendBuf = myRank, recvBuf;

	MPI_Allreduce(&sendBuf, &recvBuf, 1, MPI_INT, MPI_SUM, commNew);

	fprintf(stdout, "WorldRank = %d, sendBuf = %d, recvBuf = %d\n", myRank, sendBuf, recvBuf);
	fflush(stdout);

	MPI_Barrier(MPI_COMM_WORLD);

	int ranks1[8] = {0, 1, 2, 3, 4, 5, 6, 7}, ranks2[8];

	MPI_Group_compare(grpWorld, grpNew, &result);
	MPI_Group_translate_ranks(grpWorld, nProcs, ranks1, grpNew, ranks2);
	
	if (myRank == 0) {
		fprintf(stdout, "result = %d\n", result);
	}
	sprintf_s(outStr, "rank %d: ", myRank);
	for (i = 0; i < nProcs; i++) {
		sprintf_s(outStr, "%s%d = %d ", outStr, ranks1[i], ranks2[i]);
	}
	fprintf(stdout, "%s\n", outStr);


	MPI_Comm_free(&commNew);
	MPI_Group_free(&grpNew);
	MPI_Group_free(&grpWorld);

	MPI_Finalize();
	return 0;
}
Пример #29
0
int foMPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, foMPI_Win *win) {

  int i;
  int* temp;
  MPI_Group group_comm_world, group;
  foMPI_Win_struct_t win_info;

  assert( size >= 0 );
  assert( disp_unit > 0 );

  /* allocate the window */
  void * memptr;
  _foMPI_ALIGNED_ALLOC(&memptr,  sizeof(foMPI_Win_desc_t) )
  *win = memptr;
  assert(*win != NULL);

  /**transition info. As soon as an foMPI Communicator is implemented update this UGNI use this*/
  (*win)->fompi_comm = glob_info.comm_world;
  /* the window communicator specific informations */
  (*win)->comm = comm;
  MPI_Comm_size( comm, &((*win)->commsize) );
  MPI_Comm_rank( comm, &((*win)->commrank) );

  /* get all ranks from the members of the group */
  (*win)->group_ranks = _foMPI_ALLOC((*win)->commsize * sizeof(int32_t));
  assert((*win)->group_ranks != NULL);
  
  temp = _foMPI_ALLOC((*win)->commsize * sizeof(int));
  assert( temp != NULL );
  for( i=0 ; i<(*win)->commsize ; i++) {
    temp[i] = i;
  }
  MPI_Comm_group(comm, &group);
  MPI_Comm_group(MPI_COMM_WORLD, &group_comm_world);
  MPI_Group_translate_ranks(group, (*win)->commsize, &temp[0], group_comm_world, &((*win)->group_ranks[0]));

  _foMPI_FREE(temp);
  MPI_Group_free(&group_comm_world);
#ifdef UGNI
  gni_return_t status_gni;

  #ifdef _foMPI_UGNI_WIN_RELATED_SRC_CQ
  /*
  	 * Create the source completion queue.
  	 *     nic_handle is the NIC handle that this completion queue will be
  	 *          associated with.
  	 *     number_of_cq_entries is the size of the completion queue.
  	 *     zero is the delay count is the number of allowed events before an
  	 *          interrupt is generated.
  	 *     GNI_CQ_NOBLOCK states that the operation mode is non-blocking.
  	 *     NULL states that no user supplied callback function is defined.
  	 *     NULL states that no user supplied pointer is passed to the callback
  	 *          function.
  	 *     cq_handle is the handle that is returned pointing to this newly
  	 *          created completion queue.
  	 */
  	(*win)->number_of_source_cq_entries = _foMPI_NUM_SRC_CQ_ENTRIES;
  	status_gni = GNI_CqCreate((*win)->fompi_comm->nic_handle, (*win)->number_of_source_cq_entries , 0,
  	_foMPI_SRC_CQ_MODE, NULL, NULL, &((*win)->source_cq_handle));
  	_check_gni_status(status_gni, GNI_RC_SUCCESS, (char*) __FILE__, __LINE__);
  	_foMPI_TRACE_LOG(3, "GNI_CqCreate      source with %i entries\n", (*win)->number_of_source_cq_entries);
#endif
  	(*win)->counter_ugni_nbi = 0;

  	  /*
  		 * Create the destination_completion queue.
  		 *     nic_handle is the NIC handle that this completion queue will be
  		 *          associated with.
  		 *     number_of_dest_cq_entries is the size of the completion queue.
  		 *     zero is the delay count is the number of allowed events before
  		 *          an interrupt is generated.
  		 *     GNI_CQ_NOBLOCK states that the operation mode is non-blocking.
  		 *     NULL states that no user supplied callback function is defined.
  		 *     NULL states that no user supplied pointer is passed to the
  		 *          callback function.
  		 *     destination_cq_handle is the handle that is returned pointing to
  		 *          this newly created completion queue.
  		 */
  		(*win)->number_of_dest_cq_entries = _foMPI_NUM_DST_CQ_ENTRIES;
  	//we try to use the handler instead of only the dispatcher trying to decrease the latency of the notification
  	#ifdef NOTIFICATION_SOFTWARE_AGENT
  	//	status_gni = GNI_CqCreate(glob_info.comm_world->nic_handle, (*win)->number_of_dest_cq_entries, 0,
  	//			foMPI_DST_CQ_MODE, &foMPI_NotificationHandler, (*win), &((*win)->destination_cq_handle));
  	#else
  		//TODO: substitute comme world with foMPI_Comm
  		status_gni = GNI_CqCreate((*win)->fompi_comm->nic_handle, (*win)->number_of_dest_cq_entries, 0,
  			_foMPI_DST_CQ_MODE, NULL, NULL, &((*win)->destination_cq_handle));
  	#endif
  		_check_gni_status(status_gni, GNI_RC_SUCCESS, (char*) __FILE__, __LINE__);
  		_foMPI_TRACE_LOG(3 , "GNI_CqCreate      destination with %i entries\n",(*win)->number_of_dest_cq_entries);

  		/*init backup_queue*/
  		(*win)->destination_cq_discarded = _fompi_notif_uq_init();
  		_foMPI_TRACE_LOG(3, "fompi_oset    Created \n");
  #endif
#ifdef XPMEM
  /* get communicator for all onnode processes that are part of the window */
  MPI_Group_intersection( glob_info.onnode_group, group /* window group */, &((*win)->win_onnode_group) );
  MPI_Comm_create( comm, (*win)->win_onnode_group, &((*win)->win_onnode_comm) );

  /* mapping of the global ranks (of the window communicator */
  MPI_Group_size( (*win)->win_onnode_group, &((*win)->onnode_size) );

  temp = _foMPI_ALLOC( (*win)->onnode_size * sizeof(int));
  assert( temp != NULL );

  (*win)->onnode_ranks = _foMPI_ALLOC( (*win)->onnode_size * sizeof(int));
  assert( (*win)->onnode_ranks != NULL );

  for( i=0 ; i<(*win)->onnode_size ; i++) {
    temp[i] = i;
  }
  MPI_Group_translate_ranks((*win)->win_onnode_group, (*win)->onnode_size, &temp[0], group, &((*win)->onnode_ranks[0]) );

  for( i=1 ; i<(*win)->onnode_size ; i++ ) {
    if( (*win)->onnode_ranks[i] != ( (*win)->onnode_ranks[i-1]+1 ) ) {
      break; 
    }
  }
  if (i == (*win)->onnode_size) {
    (*win)->onnode_lower_bound = (*win)->onnode_ranks[0];
    (*win)->onnode_upper_bound = (*win)->onnode_ranks[(*win)->onnode_size-1];
    _foMPI_FREE( (*win)->onnode_ranks );
  } else {
    (*win)->onnode_lower_bound = -1;
    (*win)->onnode_upper_bound = -1;
  }

  //NOTIFICATION QUEUE
  /*init data structure and export*/
  int exp_size;
  xpmem_notif_init_queue(*win,(*win)->onnode_size);
  /*export memory and save into the segment descriptor to send to others on-node PEs*/
  	(*win)->xpmem_segdesc.notif_queue = foMPI_export_memory_xpmem((*win)->xpmem_notif_queue, sizeof(fompi_xpmem_notif_queue_t));
  	(*win)->xpmem_segdesc.notif_queue_state = foMPI_export_memory_xpmem((void*)((*win)->xpmem_notif_state_lock),sizeof(fompi_xpmem_notif_state_t) + (*win)->onnode_size * sizeof(lock_flags_t));

  _foMPI_FREE( temp );
#endif
  MPI_Group_free(&group);

  /* allocate the memory for the remote window information */

  _foMPI_ALIGNED_ALLOC(&memptr, (*win)->commsize * sizeof(foMPI_Win_struct_t) )
  (*win)->win_array =  memptr ;
  assert((*win)->win_array != NULL);

  /* set the information for the remote processes */
  if ( (base != NULL) && (size > 0) ) {
    win_info.base = base;
    _foMPI_mem_register( base, (uint64_t) size, &(win_info.seg), *win );

#ifdef XPMEM
    (*win)->xpmem_segdesc.base = foMPI_export_memory_xpmem(base, size);
#endif
  } else {
    win_info.base = NULL;
#ifdef XPMEM
    (*win)->xpmem_segdesc.base.seg = -1;
#endif
  }

  win_info.size = size;
  win_info.disp_unit = disp_unit;

  win_info.win_ptr = *win;
  _foMPI_mem_register( *win, (uint64_t) sizeof(foMPI_Win_desc_t), &(win_info.win_ptr_seg), *win );

#ifdef XPMEM
  (*win)->xpmem_segdesc.win_ptr = foMPI_export_memory_xpmem(*win, sizeof(foMPI_Win_desc_t));
#endif

  /* PCSW matching */

  _foMPI_ALIGNED_ALLOC(&memptr, (*win)->commsize * sizeof(uint64_t) )
  win_info.pscw_matching_exposure = memptr;
  assert( win_info.pscw_matching_exposure != NULL );

  _foMPI_ALIGNED_ALLOC(&memptr, (*win)->commsize * sizeof(uint32_t))
  (*win)->pscw_matching_access = memptr ;
  assert( (*win)->pscw_matching_access != NULL );

  for( i=0 ; i<(*win)->commsize ; i++ ){
    win_info.pscw_matching_exposure[i] = 0;
    (*win)->pscw_matching_access[i] = 0;
  }

  _foMPI_mem_register( win_info.pscw_matching_exposure, (uint64_t) (*win)->commsize * sizeof(uint64_t), &(win_info.pscw_matching_exposure_seg), *win );

#ifdef XPMEM
  (*win)->xpmem_segdesc.pscw_matching_exposure = foMPI_export_memory_xpmem( win_info.pscw_matching_exposure, (*win)->commsize * sizeof(uint64_t) );
#endif

  /* lock synchronisation */
  (*win)->mutex = foMPI_MUTEX_NONE;
  (*win)->lock_mutex = 0; /* no current access */
  if ( (*win)->commrank == MASTER ) {
    (*win)->lock_all_mutex = 0; /* no current access */
  }
  (*win)->local_exclusive_count = 0;
  (*win)->excl_locks = NULL;

  /* management of rma operations */
  (*win)->nbi_counter = 0;

  (*win)->name = NULL;
 
  (*win)->create_flavor = foMPI_WIN_FLAVOR_CREATE;

  MPI_Allgather( &win_info, sizeof(foMPI_Win_struct_t), MPI_BYTE, &((*win)->win_array[0]), sizeof(foMPI_Win_struct_t), MPI_BYTE, comm );

#ifdef XPMEM
  /* exchange the exposure infos with the onnode processes */
  (*win)->xpmem_array = _foMPI_ALLOC( (*win)->onnode_size * sizeof(fompi_xpmem_addr_t) );
  assert( (*win)->xpmem_array != NULL );
  fompi_xpmem_info_t* xpmem_temp = _foMPI_ALLOC( (*win)->onnode_size * sizeof(fompi_xpmem_info_t) );
 
  MPI_Allgather( &((*win)->xpmem_segdesc), sizeof(fompi_xpmem_info_t), MPI_BYTE, &(xpmem_temp[0]), sizeof(fompi_xpmem_info_t), MPI_BYTE, (*win)->win_onnode_comm );
  
  /* map the onnode memory */
  for( i=0 ; i<(*win)->onnode_size ; i++ ) {
    if (xpmem_temp[i].base.seg != -1) {
      (*win)->xpmem_array[i].base = foMPI_map_memory_xpmem( xpmem_temp[i].base, (*win)->win_array[foMPI_onnode_rank_local_to_global( i, (*win) )].size,
        &((*win)->xpmem_array[i].base_apid), &((*win)->xpmem_array[i].base_offset) );
    } else {
      (*win)->xpmem_array[i].base_apid = -1;
    }
    (*win)->xpmem_array[i].win_ptr = foMPI_map_memory_xpmem( xpmem_temp[i].win_ptr, sizeof(foMPI_Win_desc_t), &((*win)->xpmem_array[i].win_ptr_apid), &((*win)->xpmem_array[i].win_ptr_offset) );
    (*win)->xpmem_array[i].pscw_matching_exposure = foMPI_map_memory_xpmem( xpmem_temp[i].pscw_matching_exposure, (*win)->commsize * sizeof(uint64_t),
      &((*win)->xpmem_array[i].pscw_matching_exposure_apid), &((*win)->xpmem_array[i].pscw_matching_exposure_offset) );
    //notifications
    (*win)->xpmem_array[i].notif_queue = foMPI_map_memory_xpmem( xpmem_temp[i].notif_queue, sizeof(fompi_xpmem_notif_queue_t), &((*win)->xpmem_array[i].notif_queue_apid), &((*win)->xpmem_array[i].notif_queue_offset) );
    (*win)->xpmem_array[i].notif_queue_state = foMPI_map_memory_xpmem( xpmem_temp[i].notif_queue_state, (*win)->onnode_size * sizeof(lock_flags_t), &((*win)->xpmem_array[i].notif_queue_state_apid), &((*win)->xpmem_array[i].notif_queue_state_offset) );

  }

  _foMPI_FREE( xpmem_temp );
#endif

  return MPI_SUCCESS;
}
Пример #30
0
int main( int argc, char **argv )
{
    int i, n, n_goal = 2048, n_all, rc, n_ranks, *ranks, rank, size, len;
    int group_size;
    MPI_Group *group_array, world_group;
    char msg[MPI_MAX_ERROR_STRING];

    MPI_Init( &argc, &argv );
    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    n = n_goal;
    
    group_array = (MPI_Group *)malloc( n * sizeof(MPI_Group) );

    MPI_Comm_group( MPI_COMM_WORLD, &world_group );

    n_ranks = size;
    ranks = (int *)malloc( size * sizeof(int) );
    for (i=0; i<size; i++) ranks[i] = i;

    MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
    for (i=0; i<n; i++) {
	rc = MPI_Group_incl( world_group, n_ranks, ranks, group_array + i );
 	if (rc) {
	    fprintf( stderr, "Error when creating group number %d\n", i );
	    MPI_Error_string( rc, msg, &len );
	    fprintf( stderr, "%s\n", msg );
	    n = i + 1;
	    break;
	}
	else {
	    /* Check that the group was created (and that any errors were
	       caught) */
	    rc = MPI_Group_size( group_array[i], &group_size );
	    if (group_size != size) {
		fprintf( stderr, "Group number %d not correct (size = %d)\n", 
			 i, size );
		n = i + 1; 
		break;
	    }
	}
	
    }

    for (i=0; i<n; i++) {
	rc = MPI_Group_free( group_array + i );
	if (rc) {
	    fprintf( stderr, "Error when freeing group number %d\n", i );
	    MPI_Error_string( rc, msg, &len );
	    fprintf( stderr, "%s\n", msg );
	    break;
	}
    }

    MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
    MPI_Group_free( &world_group );

    MPI_Reduce( &n, &n_all, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD );
    if (rank == 0) {
	/* printf( "Completed test of %d type creations\n", n_all ); */
	if (n_all != n_goal) {
	    printf (
"This MPI implementation limits the number of groups that can be created\n\
This is allowed by the standard and is not a bug, but is a limit on the\n\
implementation\n" );
	}
	else {