Exemple #1
0
static PetscErrorCode DMCreateInterpolation_Redundant(DM dmc,DM dmf,Mat *P,Vec *scale)
{
  PetscErrorCode ierr;
  DM_Redundant   *redc = (DM_Redundant*)dmc->data;
  DM_Redundant   *redf = (DM_Redundant*)dmf->data;
  PetscMPIInt    flag;
  PetscInt       i,rstart,rend;

  PetscFunctionBegin;
  ierr = MPI_Comm_compare(PetscObjectComm((PetscObject)dmc),PetscObjectComm((PetscObject)dmf),&flag);CHKERRQ(ierr);
  if (flag != MPI_CONGRUENT && flag != MPI_IDENT) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_SUP,"cannot change communicators");
  if (redc->rank != redf->rank) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Owning rank does not match");
  if (redc->N != redf->N) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_ARG_INCOMP,"Global size does not match");
  ierr = MatCreate(PetscObjectComm((PetscObject)dmc),P);CHKERRQ(ierr);
  ierr = MatSetSizes(*P,redc->n,redc->n,redc->N,redc->N);CHKERRQ(ierr);
  ierr = MatSetType(*P,MATAIJ);CHKERRQ(ierr);
  ierr = MatSeqAIJSetPreallocation(*P,1,0);CHKERRQ(ierr);
  ierr = MatMPIAIJSetPreallocation(*P,1,0,0,0);CHKERRQ(ierr);
  ierr = MatGetOwnershipRange(*P,&rstart,&rend);CHKERRQ(ierr);
  for (i=rstart; i<rend; i++) {ierr = MatSetValue(*P,i,i,1.0,INSERT_VALUES);CHKERRQ(ierr);}
  ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
  if (scale) {ierr = DMCreateInterpolationScale(dmc,dmf,*P,scale);CHKERRQ(ierr);}
  PetscFunctionReturn(0);
}
Exemple #2
0
int main( int argc, char* argv[] )
{
  int i, res;
  int myrank, nprocs;
  MPI_Group g1;
  MPI_Comm com1, com2, com3;
  
  MPI_Init( &argc, &argv );

  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  MPI_Comm_size( MPI_COMM_WORLD, &nprocs );

  for( i=0; i<REPEAT; i++ ) {
    MPI_Comm_group(MPI_COMM_WORLD, &g1);
    MPI_Comm_create(MPI_COMM_WORLD, g1, &com1);
    
    MPI_Comm_compare(MPI_COMM_WORLD, com1, &res);
    MPI_Comm_dup(MPI_COMM_WORLD, &com2);
    
    MPI_Comm_split(MPI_COMM_WORLD, myrank, myrank, &com3);

    MPI_Comm_free(&com2);
    MPI_Comm_free(&com3);

  }


  MPI_Finalize();
  return 0;
}
Exemple #3
0
/*MC
     PCTFS - A parallel direct solver intended for problems with very few unknowns (like the
         coarse grid in multigrid).

   Implemented by  Henry M. Tufo III and Paul Fischer

   Level: beginner

   Notes: Only implemented for the MPIAIJ matrices

          Only works on a solver object that lives on all of PETSC_COMM_WORLD!

.seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC
M*/
PETSC_EXTERN PetscErrorCode PCCreate_TFS(PC pc)
{
  PetscErrorCode ierr;
  PC_TFS         *tfs;
  PetscMPIInt    cmp;

  PetscFunctionBegin;
  ierr = MPI_Comm_compare(PETSC_COMM_WORLD,PetscObjectComm((PetscObject)pc),&cmp);CHKERRQ(ierr);
  if (cmp != MPI_IDENT && cmp != MPI_CONGRUENT) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"TFS only works with PETSC_COMM_WORLD objects");
  ierr = PetscNewLog(pc,&tfs);CHKERRQ(ierr);

  tfs->xxt = 0;
  tfs->xyt = 0;
  tfs->b   = 0;
  tfs->xd  = 0;
  tfs->xo  = 0;
  tfs->nd  = 0;

  pc->ops->apply               = 0;
  pc->ops->applytranspose      = 0;
  pc->ops->setup               = PCSetUp_TFS;
  pc->ops->destroy             = PCDestroy_TFS;
  pc->ops->setfromoptions      = PCSetFromOptions_TFS;
  pc->ops->view                = PCView_TFS;
  pc->ops->applyrichardson     = 0;
  pc->ops->applysymmetricleft  = 0;
  pc->ops->applysymmetricright = 0;
  pc->data                     = (void*)tfs;
  PetscFunctionReturn(0);
}
Exemple #4
0
/*@
   ISEqual  - Compares if two index sets have the same set of indices.

   Collective on IS

   Input Parameters:
.  is1, is2 - The index sets being compared

   Output Parameters:
.  flg - output flag, either PETSC_TRUE (if both index sets have the
         same indices), or PETSC_FALSE if the index sets differ by size 
         or by the set of indices)

   Level: intermediate

   Note: 
   This routine sorts the contents of the index sets before
   the comparision is made, so the order of the indices on a processor is immaterial.

   Each processor has to have the same indices in the two sets, for example,
$           Processor 
$             0      1
$    is1 = {0, 1} {2, 3}
$    is2 = {2, 3} {0, 1}
   will return false.

    Concepts: index sets^equal
    Concepts: IS^equal

@*/
PetscErrorCode PETSCVEC_DLLEXPORT ISEqual(IS is1,IS is2,PetscTruth *flg)
{
  PetscInt       sz1,sz2,*a1,*a2;
  const PetscInt *ptr1,*ptr2;
  PetscTruth     flag;
  MPI_Comm       comm;
  PetscErrorCode ierr;
  PetscMPIInt    mflg;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(is1,IS_COOKIE,1);
  PetscValidHeaderSpecific(is2,IS_COOKIE,2);
  PetscValidIntPointer(flg,3);

  if (is1 == is2) {
    *flg = PETSC_TRUE;
    PetscFunctionReturn(0);
  }  

  ierr = MPI_Comm_compare(((PetscObject)is1)->comm,((PetscObject)is2)->comm,&mflg);CHKERRQ(ierr);
  if (mflg != MPI_CONGRUENT && mflg != MPI_IDENT) {
    *flg = PETSC_FALSE;
    PetscFunctionReturn(0);
  }

  ierr = ISGetSize(is1,&sz1);CHKERRQ(ierr);
  ierr = ISGetSize(is2,&sz2);CHKERRQ(ierr);
  if (sz1 != sz2) { 
    *flg = PETSC_FALSE;
  } else {
    ierr = ISGetLocalSize(is1,&sz1);CHKERRQ(ierr);
    ierr = ISGetLocalSize(is2,&sz2);CHKERRQ(ierr);

    if (sz1 != sz2) {
      flag = PETSC_FALSE;
    } else {
      ierr = ISGetIndices(is1,&ptr1);CHKERRQ(ierr);
      ierr = ISGetIndices(is2,&ptr2);CHKERRQ(ierr);
  
      ierr = PetscMalloc(sz1*sizeof(PetscInt),&a1);CHKERRQ(ierr);
      ierr = PetscMalloc(sz2*sizeof(PetscInt),&a2);CHKERRQ(ierr);

      ierr = PetscMemcpy(a1,ptr1,sz1*sizeof(PetscInt));CHKERRQ(ierr);
      ierr = PetscMemcpy(a2,ptr2,sz2*sizeof(PetscInt));CHKERRQ(ierr);

      ierr = PetscSortInt(sz1,a1);CHKERRQ(ierr);
      ierr = PetscSortInt(sz2,a2);CHKERRQ(ierr);
      ierr = PetscMemcmp(a1,a2,sz1*sizeof(PetscInt),&flag);CHKERRQ(ierr);

      ierr = ISRestoreIndices(is1,&ptr1);CHKERRQ(ierr);
      ierr = ISRestoreIndices(is2,&ptr2);CHKERRQ(ierr);
  
      ierr = PetscFree(a1);CHKERRQ(ierr);
      ierr = PetscFree(a2);CHKERRQ(ierr);
    }
    ierr = PetscObjectGetComm((PetscObject)is1,&comm);CHKERRQ(ierr);  
    ierr = MPI_Allreduce(&flag,flg,1,MPI_INT,MPI_MIN,comm);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemple #5
0
/*@C
   PetscBagLoad - Loads a bag of values from a binary file

   Collective on PetscViewer

   Input Parameter:
+  viewer - file to load values from
-  bag - the bag of values

   Notes: You must have created and registered all the fields in the bag before loading into it.

   Notes:
   Level: beginner

.seealso: PetscBag, PetscBagSetName(), PetscBagDestroy(), PetscBagView(), PetscBagGetData()
           PetscBagRegisterReal(), PetscBagRegisterInt(), PetscBagRegisterBool(), PetscBagRegisterScalar()
           PetscBagSetFromOptions(), PetscBagCreate(), PetscBagGetName(), PetscBagRegisterEnum()

@*/
PetscErrorCode  PetscBagLoad(PetscViewer view,PetscBag bag)
{
  PetscErrorCode ierr;
  PetscBool      isbinary;
  PetscInt       classid,bagcount,i,dtype,msize,offset,deprecatedbagsize;
  char           name[PETSC_BAG_NAME_LENGTH],help[PETSC_BAG_HELP_LENGTH],**list;
  PetscBagItem   nitem;
  MPI_Comm       comm;
  PetscMPIInt    flag;

  PetscFunctionBegin;
  ierr = PetscObjectGetComm((PetscObject)view,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_compare(comm,bag->bagcomm,&flag);CHKERRQ(ierr);
  if (flag != MPI_CONGRUENT && flag != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NOTSAMECOMM,"Different communicators in the viewer and bag"); \
  ierr = PetscObjectTypeCompare((PetscObject)view,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr);
  if (!isbinary) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support for this viewer type");

  ierr = PetscViewerBinaryRead(view,&classid,1,NULL,PETSC_INT);CHKERRQ(ierr);
  if (classid != PETSC_BAG_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not PetscBag next in binary file");
  ierr = PetscViewerBinaryRead(view,&deprecatedbagsize,1,NULL,PETSC_INT);CHKERRQ(ierr);
  ierr = PetscViewerBinaryRead(view,&bagcount,1,NULL,PETSC_INT);CHKERRQ(ierr);
  if (bagcount != bag->count) SETERRQ2(comm,PETSC_ERR_ARG_INCOMP,"Bag in file has different number of entries %d then passed in bag %d\n",(int)bagcount,(int)bag->count);CHKERRQ(ierr);
  ierr = PetscViewerBinaryRead(view,bag->bagname,PETSC_BAG_NAME_LENGTH,NULL,PETSC_CHAR);CHKERRQ(ierr);
  ierr = PetscViewerBinaryRead(view,bag->baghelp,PETSC_BAG_HELP_LENGTH,NULL,PETSC_CHAR);CHKERRQ(ierr);

  nitem = bag->bagitems;
  for (i=0; i<bagcount; i++) {
    ierr = PetscViewerBinaryRead(view,&offset,1,NULL,PETSC_INT);CHKERRQ(ierr);
    /* ignore the offset in the file */
    ierr = PetscViewerBinaryRead(view,&dtype,1,NULL,PETSC_INT);CHKERRQ(ierr);
    ierr = PetscViewerBinaryRead(view,name,PETSC_BAG_NAME_LENGTH,NULL,PETSC_CHAR);CHKERRQ(ierr);
    ierr = PetscViewerBinaryRead(view,help,PETSC_BAG_HELP_LENGTH,NULL,PETSC_CHAR);CHKERRQ(ierr);
    ierr = PetscViewerBinaryRead(view,&msize,1,NULL,PETSC_INT);CHKERRQ(ierr);

    if (dtype == (PetscInt) PETSC_CHAR) {
      ierr = PetscViewerBinaryRead(view,((char*)bag)+nitem->offset,msize,NULL,PETSC_CHAR);CHKERRQ(ierr);
    } else if (dtype == (PetscInt) PETSC_REAL) {
      ierr = PetscViewerBinaryRead(view,((char*)bag)+nitem->offset,msize,NULL,PETSC_REAL);CHKERRQ(ierr);
    } else if (dtype == (PetscInt) PETSC_SCALAR) {
      ierr = PetscViewerBinaryRead(view,((char*)bag)+nitem->offset,1,NULL,PETSC_SCALAR);CHKERRQ(ierr);
    } else if (dtype == (PetscInt) PETSC_INT) {
      ierr = PetscViewerBinaryRead(view,((char*)bag)+nitem->offset,msize,NULL,PETSC_INT);CHKERRQ(ierr);
    } else if (dtype == (PetscInt) PETSC_BOOL) {
      ierr = PetscViewerBinaryRead(view,((char*)bag)+nitem->offset,msize,NULL,PETSC_BOOL);CHKERRQ(ierr);
    } else if (dtype == (PetscInt) PETSC_ENUM) {
      ierr = PetscViewerBinaryRead(view,((char*)bag)+nitem->offset,1,NULL,PETSC_ENUM);CHKERRQ(ierr);
      ierr = PetscViewerBinaryReadStringArray(view,&list);CHKERRQ(ierr);
      /* don't need to save list because it is already registered in the bag */
      ierr = PetscFree(list);CHKERRQ(ierr);
    }
    nitem = nitem->next;
  }
  PetscFunctionReturn(0);
}
Exemple #6
0
void check_mpi_comm_membership(MPI_Comm commself,MPI_Comm commcheck,const char *name_a,const char *name_b,FILE *logFile){
  int result;
  fprintf(logFile,"checking %s against %s : \n" , name_a,name_b);
    MPI_Comm_compare(MPI_COMM_SELF,commcheck,&result);
    switch(result){
    case MPI_CONGRUENT: fprintf(logFile,"CONGRUENT\n"); break;
    case MPI_IDENT: fprintf(logFile,"IDENTICAL\n"); break;
    case MPI_SIMILAR: fprintf(logFile,"SIMILAR\n"); break;
    case MPI_UNEQUAL: fprintf(logFile,"UNEQUAL\n"); break;
    default : fprintf(logFile,"unknown relation ??\n");break;
    }
}
Exemple #7
0
void mpi_comm_compare_f(MPI_Fint *comm1, MPI_Fint *comm2, MPI_Fint *result, MPI_Fint *ierr)
{
    MPI_Comm c_comm1 = MPI_Comm_f2c(*comm1);
    MPI_Comm c_comm2 = MPI_Comm_f2c(*comm2);
    OMPI_SINGLE_NAME_DECL(result);

    *ierr = OMPI_INT_2_FINT(MPI_Comm_compare(c_comm1, c_comm2, 
				     OMPI_SINGLE_NAME_CONVERT(result)));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        OMPI_SINGLE_INT_2_FINT(result);
    }
}
Exemple #8
0
/*@C
    DMMGView - prints information on a DA based multi-level preconditioner

    Collective on DMMG and PetscViewer

    Input Parameter:
+   dmmg - the context
-   viewer - the viewer

    Level: advanced

.seealso DMMGCreate(), DMMGDestroy(), DMMGSetMatType()

@*/
PetscErrorCode PETSCSNES_DLLEXPORT DMMGView(DMMG *dmmg,PetscViewer viewer)
{
  PetscErrorCode ierr;
  PetscInt       i,nlevels = dmmg[0]->nlevels;
  PetscMPIInt    flag;
  MPI_Comm       comm;
  PetscTruth     iascii,isbinary;

  PetscFunctionBegin;
  PetscValidPointer(dmmg,1);
  PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE,2);
  ierr = PetscObjectGetComm((PetscObject)viewer,&comm);CHKERRQ(ierr);
  ierr = MPI_Comm_compare(comm,dmmg[0]->comm,&flag);CHKERRQ(ierr);
  if (flag != MPI_CONGRUENT && flag != MPI_IDENT) {
    SETERRQ(PETSC_ERR_ARG_NOTSAMECOMM,"Different communicators in the DMMG and the PetscViewer");
  }

  ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
  ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
  if (isbinary) {
    for (i=0; i<nlevels; i++) {
      ierr = MatView(dmmg[i]->J,viewer);CHKERRQ(ierr);
    }
    for (i=1; i<nlevels; i++) {
      ierr = MatView(dmmg[i]->R,viewer);CHKERRQ(ierr);
    }
  } else {
    if (iascii) {
      ierr = PetscViewerASCIIPrintf(viewer,"DMMG Object with %D levels\n",nlevels);CHKERRQ(ierr);
      if (dmmg[0]->isctype == IS_COLORING_GLOBAL) {
        ierr = PetscViewerASCIIPrintf(viewer,"Using global (nonghosted) Jacobian coloring computation\n");CHKERRQ(ierr);
      } else {
        ierr = PetscViewerASCIIPrintf(viewer,"Using ghosted Jacobian coloring computation\n");CHKERRQ(ierr);
      }
    }
    for (i=0; i<nlevels; i++) {
      ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
      ierr = DMView(dmmg[i]->dm,viewer);CHKERRQ(ierr);
      ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
    }
    if (iascii) {
      ierr = PetscViewerASCIIPrintf(viewer,"Using matrix type %s\n",dmmg[nlevels-1]->mtype);CHKERRQ(ierr);
    }
    if (DMMGGetKSP(dmmg)) {
      ierr = KSPView(DMMGGetKSP(dmmg),viewer);CHKERRQ(ierr);
    } else if (DMMGGetSNES(dmmg)) {
      ierr = SNESView(DMMGGetSNES(dmmg),viewer);CHKERRQ(ierr);
    } else if (iascii) {
      ierr = PetscViewerASCIIPrintf(viewer,"DMMG does not have a SNES or KSP set\n");CHKERRQ(ierr);
    }
  }
  PetscFunctionReturn(0);
}
/**
 * Utility routine checking if the argument boost MPI communicators are either
 * identical or congruent and returns true in this case;
 * returns false otherwise.
 */
bool compatible(const mpi::communicator& comm_A,
                const mpi::communicator& comm_B) {
    int error_code, result;
    error_code  = MPI_Comm_compare((MPI_Comm)comm_A, (MPI_Comm)comm_B, &result);
    if(error_code != MPI_SUCCESS) {
        throw std::runtime_error("MPI failure during call");
    }

    if (result == MPI_IDENT || result == MPI_CONGRUENT) {
        return true;
    }
    return false;
}
Exemple #10
0
void PARMCIX_AllFence_comm(MPI_Comm comm)
{
    int comm_result;

    MPI_Comm_compare( MPI_COMM_WORLD, comm, &comm_result );

    if ( comm_result==MPI_IDENT || comm_result==MPI_CONGRUENT )
        A1D_Flush_all();
    else
        A1D_Flush_comm(comm);

    return;
}
Exemple #11
0
static PetscErrorCode DMCoarsen_Redundant(DM dmf,MPI_Comm comm,DM *dmc)
{
  PetscErrorCode ierr;
  PetscMPIInt    flag;
  DM_Redundant   *redf = (DM_Redundant*)dmf->data;

  PetscFunctionBegin;
  if (comm == MPI_COMM_NULL) {
    ierr = PetscObjectGetComm((PetscObject)dmf,&comm);CHKERRQ(ierr);
  }
  ierr = MPI_Comm_compare(PetscObjectComm((PetscObject)dmf),comm,&flag);CHKERRQ(ierr);
  if (flag != MPI_CONGRUENT && flag != MPI_IDENT) SETERRQ(PetscObjectComm((PetscObject)dmf),PETSC_ERR_SUP,"cannot change communicators");
  ierr = DMRedundantCreate(comm,redf->rank,redf->N,dmc);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Exemple #12
0
EXTERN_C_BEGIN
#undef __FUNCT__
#define __FUNCT__ "PCCreate_TFS"
/*MC
     PCTFS - A parallel direct solver intended for problems with very few unknowns (like the 
         coarse grid in multigrid).

   Implemented by  Henry M. Tufo III and Paul Fischer

   Level: beginner

   Notes: Only implemented for the MPIAIJ matrices

          Only works on a solver object that lives on all of PETSC_COMM_WORLD!

.seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC
M*/
PetscErrorCode  PCCreate_TFS(PC pc)
{
  PetscErrorCode ierr;
  PC_TFS         *tfs;
  PetscMPIInt    cmp;

  PetscFunctionBegin;
  ierr = MPI_Comm_compare(PETSC_COMM_WORLD,((PetscObject)pc)->comm,&cmp);CHKERRQ(ierr);
  if (cmp != MPI_IDENT && cmp != MPI_CONGRUENT) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"TFS only works with PETSC_COMM_WORLD objects");
  ierr = PetscNewLog(pc,PC_TFS,&tfs);CHKERRQ(ierr);

  tfs->xxt = 0;
  tfs->xyt = 0;
  tfs->b   = 0;
  tfs->xd  = 0;
  tfs->xo  = 0;
  tfs->nd  = 0;

  pc->ops->apply               = 0;
  pc->ops->applytranspose      = 0;
  pc->ops->setup               = PCSetUp_TFS;
  pc->ops->destroy             = PCDestroy_TFS;
  pc->ops->setfromoptions      = PCSetFromOptions_TFS;
  pc->ops->view                = PCView_TFS;
  pc->ops->applyrichardson     = 0;
  pc->ops->applysymmetricleft  = 0;
  pc->ops->applysymmetricright = 0;
  pc->data                     = (void*)tfs;
  PetscFunctionReturn(0);
}
Exemple #13
0
/*@
   ISOnComm - Split a parallel IS on subcomms (usually self) or concatenate index sets on subcomms into a parallel index set

   Collective on IS and comm

   Input Arguments:
+ is - index set
. comm - communicator for new index set
- mode - copy semantics, PETSC_USE_POINTER for no-copy if possible, otherwise PETSC_COPY_VALUES

   Output Arguments:
. newis - new IS on comm

   Level: advanced

   Notes:
   It is usually desirable to create a parallel IS and look at the local part when necessary.

   This function is useful if serial ISs must be created independently, or to view many
   logically independent serial ISs.

   The input IS must have the same type on every process.

.seealso: ISSplit()
@*/
PetscErrorCode  ISOnComm(IS is,MPI_Comm comm,PetscCopyMode mode,IS *newis)
{
  PetscErrorCode ierr;
  PetscMPIInt    match;

  PetscFunctionBegin;
  PetscValidHeaderSpecific(is,IS_CLASSID,1);
  PetscValidPointer(newis,3);
  ierr = MPI_Comm_compare(PetscObjectComm((PetscObject)is),comm,&match);CHKERRQ(ierr);
  if (mode != PETSC_COPY_VALUES && (match == MPI_IDENT || match == MPI_CONGRUENT)) {
    ierr   = PetscObjectReference((PetscObject)is);CHKERRQ(ierr);
    *newis = is;
  } else {
    ierr = (*is->ops->oncomm)(is,comm,mode,newis);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Exemple #14
0
int
SAMRAI_MPI::Comm_compare(
   Comm comm1,
   Comm comm2,
   int* result)
{
#ifndef HAVE_MPI
   NULL_USE(comm1);
   NULL_USE(comm2);
   NULL_USE(result);
#endif
   int rval = MPI_SUCCESS;
   if (!s_mpi_is_initialized) {
      TBOX_ERROR("SAMRAI_MPI::Comm_compare is a no-op without run-time MPI!");
   }
#ifdef HAVE_MPI
   else {
      rval = MPI_Comm_compare(comm1, comm2, result);
   }
#endif
   return rval;
}
Exemple #15
0
value caml_mpi_comm_compare(value comm1, value comm2)
{
  int res;
  MPI_Comm_compare(Comm_val(comm1), Comm_val(comm2), &res);
  return Val_bool(res);
}
Exemple #16
0
int main( int argc, char **argv )
{
  int size, rank, key, his_key, lrank, result;
  MPI_Comm myComm;
  MPI_Comm myFirstComm;
  MPI_Comm mySecondComm;
  int errors = 0, sum_errors;
  MPI_Status status;
  
  /* Initialization */
  MPI_Init ( &argc, &argv );
  MPI_Comm_rank ( MPI_COMM_WORLD, &rank);
  if (verbose) printf("[%d] MPI_Init complete!\n",rank);fflush(stdout);
  MPI_Comm_size ( MPI_COMM_WORLD, &size);

  /* Only works for 2 or more processes */
  if (size >= 2) {
    MPI_Comm merge1, merge2, merge3, merge4;

    /* Generate membership key in the range [0,1] */
    key = rank % 2;
    
    MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &myComm );
    /* This creates an intercomm that is the size of comm world 
       but has processes grouped by even and odd */
    MPI_Intercomm_create (myComm, 0, MPI_COMM_WORLD, (key+1)%2, 1, 
                          &myFirstComm );
    /* Dup an intercomm */
    MPI_Comm_dup ( myFirstComm, &mySecondComm );
    MPI_Comm_rank( mySecondComm, &lrank );
    his_key = -1;

	if (verbose) printf("[%d] Communicators created!\n",rank);fflush(stdout);
    
    /* Leaders communicate with each other */
    if (lrank == 0) {
      MPI_Sendrecv (&key,     1, MPI_INT, 0, 0,
                    &his_key, 1, MPI_INT, 0, 0, mySecondComm, &status);
      if (key != (his_key+1)%2) {
	  printf( "Received %d but expected %d\n", his_key, (his_key+1)%2 );
	  errors++;
      }
    }
    if (verbose) printf("[%d] MPI_Sendrecv completed!\n",rank);fflush(stdout);
    if (errors)
      printf("[%d] Failed!\n",rank);

	
    if (verbose) printf( "About to merge intercommunicators\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, key, &merge1 );
	if (verbose) printf( "merge1 done\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 );
	if (verbose) printf( "merge2 done\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, 0, &merge3 );
	if (verbose) printf( "merge3 done\n" );fflush(stdout);
    MPI_Intercomm_merge ( mySecondComm, 1, &merge4 );
	if (verbose) printf( "merge4 done\n" );fflush(stdout);

	if (verbose) printf("[%d] MPI_Intercomm_merge completed!\n",rank);fflush(stdout);

    /* We should check that these are correct!  An easy test is that
       the merged comms are all MPI_SIMILAR (unless 2 processes used, 
       in which case MPI_CONGRUENT is ok */
    MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result );
    if ((size > 2 && result != MPI_SIMILAR) ||
	(size == 2 && result != MPI_CONGRUENT)) {
	errors ++;
	printf( "merge1 is not the same size as comm world\n" );
    }
    /* merge 2 isn't ordered the same way as the others, even for 2 processes */
    MPI_Comm_compare( merge2, MPI_COMM_WORLD, &result );
    if (result != MPI_SIMILAR) {
	errors ++;
	printf( "merge2 is not the same size as comm world\n" );
    }
    MPI_Comm_compare( merge3, MPI_COMM_WORLD, &result );
    if ((size > 2 && result != MPI_SIMILAR) ||
	(size == 2 && result != MPI_CONGRUENT)) {
	errors ++;
	printf( "merge3 is not the same size as comm world\n" );
    }
    MPI_Comm_compare( merge4, MPI_COMM_WORLD, &result );
    if ((size > 2 && result != MPI_SIMILAR) ||
	(size == 2 && result != MPI_CONGRUENT)) {
	errors ++;
	printf( "merge4 is not the same size as comm world\n" );
    }

	if (verbose) printf("[%d] MPI_Comm_compare completed!\n",rank);fflush(stdout);

    /* Free communicators */
    if (verbose) printf( "About to free communicators\n" );
    MPI_Comm_free( &myComm );
    MPI_Comm_free( &myFirstComm );
    MPI_Comm_free( &mySecondComm );
    MPI_Comm_free( &merge1 );
    MPI_Comm_free( &merge2 );
    MPI_Comm_free( &merge3 );
    MPI_Comm_free( &merge4 );

	if (verbose) printf("[%d] MPI_Comm_free completed!\n",rank);fflush(stdout);

  }
  else {
      errors ++;
      printf("[%d] Failed - at least 2 nodes must be used\n",rank);
  }

  MPI_Barrier( MPI_COMM_WORLD );
  MPI_Allreduce( &errors, &sum_errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
  if (sum_errors > 0) {
      printf( "%d errors on process %d\n", errors, rank );
      }
  else if (rank == 0) {
      printf( " No Errors\n" );
      }
  /* Finalize and end! */

  MPI_Finalize();
  return 0;
}
Exemple #17
0
ADCL_emethod_t *ADCL_emethod_init (ADCL_topology_t *t, ADCL_vector_t *v,
                                   ADCL_fnctset_t *f, int root )

{
    ADCL_emethod_t *e = NULL;
    ADCL_hypothesis_t *hypo = NULL;

    int i, ret=ADCL_SUCCESS;

    if ( ADCL_merge_requests && v != ADCL_VECTOR_NULL ) {
       int j, last, found=-1;
       int result;
       ADCL_topology_t *topo;
       ADCL_vector_t *vec;
       ADCL_vmap_t *vec_map, *v_map;

       /* Check first, whether we have an entry in the ADCL_emethods_array,
          which fulfills already our requirements;
       */
       last = ADCL_array_get_last ( ADCL_emethod_array );
       for ( i=0; i<= last; i++ ) {
          e = ( ADCL_emethod_t * ) ADCL_array_get_ptr_by_pos (
          ADCL_emethod_array, i );
	  if ( NULL == e ) {
	      continue;
	  }
          topo = e->em_topo;
          vec  = e->em_vec;
          if ( ADCL_VECTOR_NULL == vec  ) {
              continue;
          }

          MPI_Comm_compare ( topo->t_comm, t->t_comm, &result );
          if ( ( result != MPI_IDENT) && (result != MPI_CONGRUENT) ) {
              continue;
          }
          vec_map = vec->v_map;
	  v_map   = v->v_map;

          if ( ( e->em_orgfnctset != f )          ||
               ( topo->t_ndims   != t->t_ndims  ) ||
               ( topo->t_nneigh  != t->t_nneigh ) ||
               ( vec->v_ndims    != v->v_ndims  ) ||
               ( vec->v_nc       != v->v_nc     ) ||
               ( vec_map->m_vectype != v_map->m_vectype ) || 
               ( e->em_root != root ) ) {
	       continue;
          }

          for ( j=0 ; j<vec->v_ndims; j++ ){
             if ( vec->v_dims[j] != v->v_dims[j] ) {
                goto nextemethod;
             }
	  }

          switch (vec_map->m_vectype) {
	  case ADCL_VECTOR_HALO:
             if ( vec_map->m_hwidth != v_map->m_hwidth  ) {
	        continue;
	     }

             for ( j=0; j< (2*topo->t_nneigh); j++ ) {
                if ( topo->t_neighbors[j] != t->t_neighbors [j] ) {
                   goto nextemethod;
	        }
             }
	     break;
	  case ADCL_VECTOR_LIST:
             for ( j=0; j<topo->t_size; j++){
                if ((vec_map->m_rcnts[j] != v_map->m_rcnts[j]) || 
	           (vec_map->m_displ[j] != v_map->m_rcnts[j])) {
	          goto nextemethod;
	        }
             }
	     break;
	  }

          /* all tests OK */
          found = i;
          break;
nextemethod:
          continue;
       }

       if ( found > -1 ) {
           e->em_rfcnt++;
           return e;
       }
    }
    /* we did not find this configuraion yet, so we have to add it */
    e = ( ADCL_emethod_t *) calloc (1, sizeof(ADCL_emethod_t));
    if ( NULL == e ) {
        return NULL;
    }

    ADCL_array_get_next_free_pos ( ADCL_emethod_array, &e->em_findex );
    ADCL_array_set_element ( ADCL_emethod_array, e->em_findex,
                 e->em_id, e );

    e->em_id          = ADCL_local_id_counter++;
    e->em_rfcnt       = 1;
    e->em_state       = ADCL_STATE_TESTING;
    e->em_explored_hist = -2;
    e->em_topo        = t;
    e->em_vec         = v;
    e->em_root        = root;
    if ( NULL != v && ADCL_VECTOR_NULL != v) {
       ADCL_vector_add_reference(v);
    }
    e->em_orgfnctset  = f;
    /*
    ** Set the algorithm for the selection logic. Set it to the default value,
    ** if we have an attribute set. However, if no attributes are assigned to
    ** functionset, we have to use the brute force algorithm
    */
    if ( f->fs_attrset != NULL && f->fs_attrset != ADCL_ATTRSET_NULL ) {
	e->em_search_algo = ADCL_emethod_search_algo;
    }
    else {
       	e->em_search_algo = ADCL_BRUTE_FORCE;
    }

    /*
    ** Generate a duplicate of the functions which we will work with.
    ** The reason is, that the list of functions etc. might be modified
    ** during the runtime optimization. We do not want to delete the original
    ** set, since it might be use by multiple requests/emehods *and*
    ** we might need the original list again when a re-evaluation has been
    ** initiated.
    */
    ADCL_fnctset_dup ( f, &(e->em_fnctset));
    ADCL_statistics_create ( &(e->em_stats), f->fs_maxnum, 1  ); 
    ADCL_statistics_create ( &(e->em_orgstats), f->fs_maxnum, 0  ); 


    DISPLAY((ADCL_DISPLAY_CHANGE_FUNCTION,e->em_id,e->em_fnctset.fs_fptrs[0]->f_id,e->em_fnctset.fs_fptrs[0]->f_name));
    /* initiate the performance hypothesis structure */
    if ( ADCL_PERF_HYPO == e->em_search_algo ) {
        ADCL_hypothesis_init ( e );
    } 
    else if ( ADCL_TWOK_FACTORIAL == e->em_search_algo ) {
	ADCL_twok_init ( e );
    }
    /* for verification runs */
    if ( 0 == strcmp ( f->fs_name , "Neighborhood communication") ) {
        if ( -1 != ADCL_emethod_selection ) {
            e->em_state = ADCL_STATE_REGULAR;
            e->em_wfunction = ADCL_emethod_get_function ( e, ADCL_emethod_selection );
        }
    }
    if ( 0 == strcmp ( f->fs_name , "AllGatherV") ) {
        if ( -1 != ADCL_emethod_allgatherv_selection ) {
            e->em_state = ADCL_STATE_REGULAR;
            e->em_wfunction = ADCL_emethod_get_function ( e, ADCL_emethod_allgatherv_selection );
        }
    }
    if ( 0 == strcmp ( f->fs_name , "AllReduce") ) {
        if ( -1 != ADCL_emethod_allreduce_selection ) {
            e->em_state = ADCL_STATE_REGULAR;
            e->em_wfunction = ADCL_emethod_get_function ( e, ADCL_emethod_allreduce_selection );
        }
    }
    if ( 0 == strcmp ( f->fs_name , "Reduce") ) {
        if ( -1 != ADCL_emethod_reduce_selection ) {
            e->em_state = ADCL_STATE_REGULAR;
            e->em_wfunction = ADCL_emethod_get_function ( e, ADCL_emethod_reduce_selection );
        }
    }
    if ( 0 == strcmp ( f->fs_name , "Alltoall") ) {
        if ( -1 != ADCL_emethod_alltoall_selection ) {
            e->em_state = ADCL_STATE_REGULAR;
            e->em_wfunction = ADCL_emethod_get_function ( e, ADCL_emethod_alltoall_selection );
        }
    }
    if ( 0 == strcmp ( f->fs_name , "Alltoallv") ) {
        if ( -1 != ADCL_emethod_alltoallv_selection ) {
            e->em_state = ADCL_STATE_REGULAR;
            e->em_wfunction = ADCL_emethod_get_function ( e, ADCL_emethod_alltoallv_selection );
        }
    }

    /* History list initialization */
    e->em_hist_list =  (ADCL_hist_list_t *)calloc(1, sizeof(ADCL_hist_list_t));
    if ( NULL ==  e->em_hist_list ) {
        ret = ADCL_NO_MEMORY;
        goto exit;
    }
    /* Initialize history entries count to 0 */
    e->em_hist_cnt = 0;
 exit:
    if ( ret != ADCL_SUCCESS  ) {
        ADCL_statistics_free ( &(e->em_stats), f->fs_maxnum );

        if ( NULL != hypo->h_attr_hypothesis ) {
            free ( hypo->h_attr_hypothesis );
        }
        if ( NULL != hypo->h_attr_confidence ) {
            free ( hypo->h_attr_confidence );
        }
        if ( NULL != hypo->h_curr_attrvals ) {
            free ( hypo->h_curr_attrvals );
        }

        ADCL_array_remove_element ( ADCL_emethod_array, e->em_findex );
        free ( e );
        e = NULL;
    }

    return e;
}
Exemple #18
0
void ARMCI_Bcast_(void *buffer, int len, int root, ARMCI_Comm comm) {
    int result;
    MPI_Comm_compare(comm, MPI_COMM_WORLD, &result);
    if(result == MPI_IDENT)  armci_msg_brdcst(buffer, len, root); 
    else MPI_Bcast(buffer, len, MPI_BYTE, root, (MPI_Comm)comm);
}
Exemple #19
0
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
Exemple #20
0
/*
 * Increase overlap for the sub-matrix across sub communicator
 * sub-matrix could be a graph or numerical matrix
 * */
PetscErrorCode  MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov)
{
  PetscInt         i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp;
  PetscInt         *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd;
  const PetscInt   *indices;
  PetscMPIInt      srank,ssize,issamecomm,k,grank;
  IS               is_sc,allis_sc,partitioning;
  MPI_Comm         gcomm,dcomm,scomm;
  PetscSF          sf;
  PetscSFNode      *remote;
  Mat              *smat;
  MatPartitioning  part;
  PetscErrorCode   ierr;

  PetscFunctionBegin;
  /* get a sub communicator before call individual MatIncreaseOverlap
   * since the sub communicator may be changed.
   * */
  ierr = PetscObjectGetComm((PetscObject)(*is),&dcomm);CHKERRQ(ierr);
  /*make a copy before the original one is deleted*/
  ierr = PetscCommDuplicate(dcomm,&scomm,NULL);CHKERRQ(ierr);
  /*get a global communicator, where mat should be a global matrix  */
  ierr = PetscObjectGetComm((PetscObject)mat,&gcomm);CHKERRQ(ierr);
  /*increase overlap on each individual subdomain*/
  ierr = (*mat->ops->increaseoverlap)(mat,1,is,ov);CHKERRQ(ierr);
  /*compare communicators */
  ierr = MPI_Comm_compare(gcomm,scomm,&issamecomm);CHKERRQ(ierr);
  /* if the sub-communicator is the same as the global communicator,
   * user does not want to use a sub-communicator
   * */
  if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) PetscFunctionReturn(0);
  /* if the sub-communicator is petsc_comm_self,
   * user also does not care the sub-communicator
   * */
  ierr = MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm);CHKERRQ(ierr);
  if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){PetscFunctionReturn(0);}
  /*local rank, size in a sub-communicator  */
  ierr = MPI_Comm_rank(scomm,&srank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(scomm,&ssize);CHKERRQ(ierr);
  ierr = MPI_Comm_rank(gcomm,&grank);CHKERRQ(ierr);
  /*create a new IS based on sub-communicator
   * since the old IS is often based on petsc_comm_self
   * */
  ierr = ISGetLocalSize(*is,&nindx);CHKERRQ(ierr);
  ierr = PetscCalloc1(nindx,&indices_sc);CHKERRQ(ierr);
  ierr = ISGetIndices(*is,&indices);CHKERRQ(ierr);
  ierr = PetscMemcpy(indices_sc,indices,sizeof(PetscInt)*nindx);CHKERRQ(ierr);
  ierr = ISRestoreIndices(*is,&indices);CHKERRQ(ierr);
  /*we do not need any more*/
  ierr = ISDestroy(is);CHKERRQ(ierr);
  /*create a index set based on the sub communicator  */
  ierr = ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc);CHKERRQ(ierr);
  /*gather all indices within  the sub communicator*/
  ierr = ISAllGather(is_sc,&allis_sc);CHKERRQ(ierr);
  ierr = ISDestroy(&is_sc);CHKERRQ(ierr);
  /* gather local sizes */
  ierr = PetscMalloc1(ssize,&localsizes_sc);CHKERRQ(ierr);
  /*get individual local sizes for all index sets*/
  ierr = MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm);CHKERRQ(ierr);
  /*only root does these computations */
  if(!srank){
   /*get local size for the big index set*/
   ierr = ISGetLocalSize(allis_sc,&localsize);CHKERRQ(ierr);
   ierr = PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc);CHKERRQ(ierr);
   ierr = PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd);CHKERRQ(ierr);
   ierr = ISGetIndices(allis_sc,&indices);CHKERRQ(ierr);
   ierr = PetscMemcpy(indices_ov,indices,sizeof(PetscInt)*localsize);CHKERRQ(ierr);
   ierr = ISRestoreIndices(allis_sc,&indices);CHKERRQ(ierr);
   /*we do not need it any more */
   ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
   /*assign corresponding sources */
   localsize_tmp = 0;
   for(k=0; k<ssize; k++){
     for(i=0; i<localsizes_sc[k]; i++){
       sources_sc[localsize_tmp++] = k;
     }
   }
   /*record where indices come from */
   ierr = PetscSortIntWithArray(localsize,indices_ov,sources_sc);CHKERRQ(ierr);
   /*count local sizes for reduced indices */
   ierr = PetscMemzero(localsizes_sc,sizeof(PetscInt)*ssize);CHKERRQ(ierr);
   /*initialize the first entity*/
   if(localsize){
	 indices_ov_rd[0] = indices_ov[0];
	 sources_sc_rd[0] = sources_sc[0];
	 localsizes_sc[sources_sc[0]]++;
   }
   localsize_tmp = 1;
   /*remove duplicate integers */
   for(i=1; i<localsize; i++){
	 if(indices_ov[i] != indices_ov[i-1]){
	   indices_ov_rd[localsize_tmp]   = indices_ov[i];
	   sources_sc_rd[localsize_tmp++] = sources_sc[i];
	   localsizes_sc[sources_sc[i]]++;
	 }
   }
   ierr = PetscFree2(indices_ov,sources_sc);CHKERRQ(ierr);
   ierr = PetscCalloc1(ssize+1,&localoffsets);CHKERRQ(ierr);
   for(k=0; k<ssize; k++){
	 localoffsets[k+1] = localoffsets[k] + localsizes_sc[k];
   }
   /*construct a star forest to send data back */
   nleaves = localoffsets[ssize];
   ierr = PetscMemzero(localoffsets,(ssize+1)*sizeof(PetscInt));CHKERRQ(ierr);
   nroots  = localsizes_sc[srank];
   ierr = PetscCalloc1(nleaves,&remote);CHKERRQ(ierr);
   for(i=0; i<nleaves; i++){
	 remote[i].rank  = sources_sc_rd[i];
	 remote[i].index = localoffsets[sources_sc_rd[i]]++;
   }
   ierr = PetscFree(localoffsets);CHKERRQ(ierr);
  }else{
   ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
   /*Allocate a 'zero' pointer */
   ierr = PetscCalloc1(0,&remote);CHKERRQ(ierr);
   nleaves = 0;
   indices_ov_rd = 0;
   sources_sc_rd = 0;
  }
  /*scatter sizes to everybody */
  ierr = MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm);CHKERRQ(ierr);
  /*free memory */
  ierr = PetscFree(localsizes_sc);CHKERRQ(ierr);
  ierr = PetscCalloc1(nroots,&indices_recv);CHKERRQ(ierr);
  /*ierr = MPI_Comm_dup(scomm,&dcomm);CHKERRQ(ierr);*/
  /*set data back to every body */
  ierr = PetscSFCreate(scomm,&sf);CHKERRQ(ierr);
  ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr);
  ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
  ierr = PetscSFSetGraph(sf,nroots,nleaves,PETSC_NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
  ierr = PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);CHKERRQ(ierr);
  ierr = PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);CHKERRQ(ierr);
  ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
  /* free memory */
  ierr = PetscFree2(indices_ov_rd,sources_sc_rd);CHKERRQ(ierr);
  /*create a index set*/
  ierr = ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc);CHKERRQ(ierr);
  /*construct a parallel submatrix */
  ierr = MatGetSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat);CHKERRQ(ierr);
  /* we do not need them any more */
  ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
  /*create a partitioner to repartition the sub-matrix*/
  ierr = MatPartitioningCreate(scomm,&part);CHKERRQ(ierr);
  ierr = MatPartitioningSetAdjacency(part,smat[0]);CHKERRQ(ierr);
#if PETSC_HAVE_PARMETIS
  /* if there exists a ParMETIS installation, we try to use ParMETIS
   * because a repartition routine possibly work better
   * */
  ierr = MatPartitioningSetType(part,MATPARTITIONINGPARMETIS);CHKERRQ(ierr);
  /*try to use reparition function, instead of partition function */
  ierr = MatPartitioningParmetisSetRepartition(part);CHKERRQ(ierr);
#else
  /*we at least provide a default partitioner to rebalance the computation  */
  ierr = MatPartitioningSetType(part,MATPARTITIONINGAVERAGE);CHKERRQ(ierr);
#endif
  /*user can pick up any partitioner by using an option*/
  ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr);
  /* apply partition */
  ierr = MatPartitioningApply(part,&partitioning);CHKERRQ(ierr);
  ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr);
  ierr = MatDestroy(&(smat[0]));CHKERRQ(ierr);
  ierr = PetscFree(smat);CHKERRQ(ierr);
  /* get local rows including  overlap */
  ierr = ISBuildTwoSided(partitioning,is_sc,is);CHKERRQ(ierr);
  /* destroy */
  ierr = ISDestroy(&is_sc);CHKERRQ(ierr);
  ierr = ISDestroy(&partitioning);CHKERRQ(ierr);
  ierr = PetscCommDestroy(&scomm);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Exemple #21
0
static PetscErrorCode MatGetSubMatrices_MPIAdj_Private(Mat mat,PetscInt n,const IS irow[],const IS icol[],PetscBool subcomm,MatReuse scall,Mat *submat[])
{
  PetscInt           i,irow_n,icol_n,*sxadj,*sadjncy,*svalues;
  PetscInt          *indices,nindx,j,k,loc;
  PetscMPIInt        issame;
  const PetscInt    *irow_indices,*icol_indices;
  MPI_Comm           scomm_row,scomm_col,scomm_mat;
  PetscErrorCode     ierr;

  PetscFunctionBegin;
  nindx = 0;
  /*
   * Estimate a maximum number for allocating memory
   */
  for(i=0; i<n; i++){
    ierr = ISGetLocalSize(irow[i],&irow_n);CHKERRQ(ierr);
    ierr = ISGetLocalSize(icol[i],&icol_n);CHKERRQ(ierr);
    nindx = nindx>(irow_n+icol_n)? nindx:(irow_n+icol_n);
  }
  ierr = PetscCalloc1(nindx,&indices);CHKERRQ(ierr);
  /* construct a submat */
  for(i=0; i<n; i++){
	/*comms */
    if(subcomm){
	  ierr = PetscObjectGetComm((PetscObject)irow[i],&scomm_row);CHKERRQ(ierr);
	  ierr = PetscObjectGetComm((PetscObject)icol[i],&scomm_col);CHKERRQ(ierr);
	  ierr = MPI_Comm_compare(scomm_row,scomm_col,&issame);CHKERRQ(ierr);
	  if(issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"row index set must have the same comm as the col index set\n");
	  ierr = MPI_Comm_compare(scomm_row,PETSC_COMM_SELF,&issame);CHKERRQ(ierr);
	  if(issame == MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP," can not use PETSC_COMM_SELF as comm when extracting a parallel submatrix\n");
	}else{
	  scomm_row = PETSC_COMM_SELF;
	}
	/*get sub-matrix data*/
	sxadj=0; sadjncy=0; svalues=0;
    ierr = MatGetSubMatrix_MPIAdj_data(mat,irow[i],icol[i],&sxadj,&sadjncy,&svalues);CHKERRQ(ierr);
    ierr = ISGetLocalSize(irow[i],&irow_n);CHKERRQ(ierr);
    ierr = ISGetLocalSize(icol[i],&icol_n);CHKERRQ(ierr);
    ierr = ISGetIndices(irow[i],&irow_indices);CHKERRQ(ierr);
    ierr = PetscMemcpy(indices,irow_indices,sizeof(PetscInt)*irow_n);CHKERRQ(ierr);
    ierr = ISRestoreIndices(irow[i],&irow_indices);CHKERRQ(ierr);
    ierr = ISGetIndices(icol[i],&icol_indices);CHKERRQ(ierr);
    ierr = PetscMemcpy(indices+irow_n,icol_indices,sizeof(PetscInt)*icol_n);CHKERRQ(ierr);
    ierr = ISRestoreIndices(icol[i],&icol_indices);CHKERRQ(ierr);
    nindx = irow_n+icol_n;
    ierr = PetscSortRemoveDupsInt(&nindx,indices);CHKERRQ(ierr);
    /* renumber columns */
    for(j=0; j<irow_n; j++){
      for(k=sxadj[j]; k<sxadj[j+1]; k++){
    	ierr = PetscFindInt(sadjncy[k],nindx,indices,&loc);CHKERRQ(ierr);
#if PETSC_USE_DEBUG
    	if(loc<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"can not find col %d \n",sadjncy[k]);
#endif
        sadjncy[k] = loc;
      }
    }
    if(scall==MAT_INITIAL_MATRIX){
      ierr = MatCreateMPIAdj(scomm_row,irow_n,icol_n,sxadj,sadjncy,svalues,submat[i]);CHKERRQ(ierr);
    }else{
       Mat                sadj = *(submat[i]);
       Mat_MPIAdj         *sa  = (Mat_MPIAdj*)((sadj)->data);
       ierr = PetscObjectGetComm((PetscObject)sadj,&scomm_mat);CHKERRQ(ierr);
       ierr = MPI_Comm_compare(scomm_row,scomm_mat,&issame);CHKERRQ(ierr);
       if(issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"submatrix  must have the same comm as the col index set\n");
       ierr = PetscMemcpy(sa->i,sxadj,sizeof(PetscInt)*(irow_n+1));CHKERRQ(ierr);
       ierr = PetscMemcpy(sa->j,sadjncy,sizeof(PetscInt)*sxadj[irow_n]);CHKERRQ(ierr);
       if(svalues){ierr = PetscMemcpy(sa->values,svalues,sizeof(PetscInt)*sxadj[irow_n]);CHKERRQ(ierr);}
       ierr = PetscFree(sxadj);CHKERRQ(ierr);
       ierr = PetscFree(sadjncy);CHKERRQ(ierr);
       if(svalues) {ierr = PetscFree(svalues);CHKERRQ(ierr);}
    }
  }
  ierr = PetscFree(indices);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Exemple #22
0
int test_communicators( void )
{
    MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm, 
	split_comm, world_comm;
    MPI_Group world_group, lo_group, rev_group;
    void *vvalue;
    int ranges[1][3];
    int flag, world_rank, world_size, rank, size, n, key_1, key_3;
    int color, key, result;
    int errs = 0;
    MPI_Aint value;

    MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
    MPI_Comm_size( MPI_COMM_WORLD, &world_size );
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "*** Communicators ***\n" ); fflush(stdout);
    }
#endif

    MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );

    /*
      Exercise Comm_create by creating an equivalent to dup_comm_world
      (sans attributes) and a half-world communicator.
    */

#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_create\n" ); fflush(stdout);
    }
#endif

    MPI_Comm_group( dup_comm_world, &world_group );
    MPI_Comm_create( dup_comm_world, world_group, &world_comm );
    MPI_Comm_rank( world_comm, &rank );
    if (rank != world_rank) {
	errs++;
	printf( "incorrect rank in world comm: %d\n", rank );
	MPI_Abort(MPI_COMM_WORLD, 3001 );
    }

    n = world_size / 2;

    ranges[0][0] = 0;
    ranges[0][1] = (world_size - n) - 1;
    ranges[0][2] = 1;

#ifdef DEBUG
    printf( "world rank = %d before range incl\n", world_rank );FFLUSH;
#endif
    MPI_Group_range_incl(world_group, 1, ranges, &lo_group );
#ifdef DEBUG
    printf( "world rank = %d after range incl\n", world_rank );FFLUSH;
#endif
    MPI_Comm_create(world_comm, lo_group, &lo_comm );
#ifdef DEBUG
    printf( "world rank = %d before group free\n", world_rank );FFLUSH;
#endif
    MPI_Group_free( &lo_group );

#ifdef DEBUG
    printf( "world rank = %d after group free\n", world_rank );FFLUSH;
#endif

    if (world_rank < (world_size - n)) {
	MPI_Comm_rank(lo_comm, &rank );
	if (rank == MPI_UNDEFINED) {
	    errs++;
	    printf( "incorrect lo group rank: %d\n", rank ); fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, 3002 );
	}
	else {
	    /* printf( "lo in\n" );FFLUSH; */
	    MPI_Barrier(lo_comm );
	    /* printf( "lo out\n" );FFLUSH; */
	}
    }
    else {
	if (lo_comm != MPI_COMM_NULL) {
	    errs++;
	    printf( "incorrect lo comm:\n" ); fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, 3003 );
	}
    }

#ifdef DEBUG
    printf( "worldrank = %d\n", world_rank );FFLUSH;
#endif
    MPI_Barrier(world_comm);

#ifdef DEBUG
    printf( "bar!\n" );FFLUSH;
#endif
    /*
      Check Comm_dup by adding attributes to lo_comm & duplicating
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_dup\n" );
	fflush(stdout);
    }
#endif
    
    if (lo_comm != MPI_COMM_NULL) {
	value = 9;
	MPI_Keyval_create(copy_fn,     delete_fn,   &key_1, &value );
	value = 8;
	value = 7;
	MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
			  &key_3, &value ); 

	/* This may generate a compilation warning; it is, however, an
	   easy way to cache a value instead of a pointer */
	/* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
	MPI_Attr_put(lo_comm, key_1, (void *) (MPI_Aint) world_rank );
	MPI_Attr_put(lo_comm, key_3, (void *)0 );
	
	MPI_Comm_dup(lo_comm, &dup_comm );

	/* Note that if sizeof(int) < sizeof(void *), we can't use
	   (void **)&value to get the value we passed into Attr_put.  To avoid 
	   problems (e.g., alignment errors), we recover the value into 
	   a (void *) and cast to int. Note that this may generate warning
	   messages from the compiler.  */
	MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
	value = (MPI_Aint)vvalue;
	
	if (! flag) {
	    errs++;
	    printf( "dup_comm key_1 not found on %d\n", world_rank );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3004 );
	}
	
	if (value != world_rank) {
	    errs++;
	    printf( "dup_comm key_1 value incorrect: %ld, expected %d\n", 
		    (long)value, world_rank );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3005 );
	}

	MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
	value = (MPI_Aint)vvalue;
	if (flag) {
	    errs++;
	    printf( "dup_comm key_3 found!\n" );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3008 );
	}
	MPI_Keyval_free(&key_1 );
	MPI_Keyval_free(&key_3 );
    }
    /* 
       Split the world into even & odd communicators with reversed ranks.
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_split\n" );
	fflush(stdout);
    }
#endif
    
    color = world_rank % 2;
    key   = world_size - world_rank;
    
    MPI_Comm_split(dup_comm_world, color, key, &split_comm );
    MPI_Comm_size(split_comm, &size );
    MPI_Comm_rank(split_comm, &rank );
    if (rank != ((size - world_rank/2) - 1)) {
	errs++;
	printf( "incorrect split rank: %d\n", rank ); fflush(stdout);
	MPI_Abort(MPI_COMM_WORLD, 3009 );
    }
    
    MPI_Barrier(split_comm );
    /*
      Test each possible Comm_compare result
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_compare\n" );
	fflush(stdout);
    }
#endif
    
    MPI_Comm_compare(world_comm, world_comm, &result );
    if (result != MPI_IDENT) {
	errs++;
	printf( "incorrect ident result: %d\n", result );
	MPI_Abort(MPI_COMM_WORLD, 3010 );
    }
    
    if (lo_comm != MPI_COMM_NULL) {
	MPI_Comm_compare(lo_comm, dup_comm, &result );
	if (result != MPI_CONGRUENT) {
	    errs++;
            printf( "incorrect congruent result: %d\n", result );
            MPI_Abort(MPI_COMM_WORLD, 3011 );
	}
    }
    
    ranges[0][0] = world_size - 1;
    ranges[0][1] = 0;
    ranges[0][2] = -1;

    MPI_Group_range_incl(world_group, 1, ranges, &rev_group );
    MPI_Comm_create(world_comm, rev_group, &rev_comm );

    MPI_Comm_compare(world_comm, rev_comm, &result );
    if (result != MPI_SIMILAR && world_size != 1) {
	errs++;
	printf( "incorrect similar result: %d\n", result );
	MPI_Abort(MPI_COMM_WORLD, 3012 );
    }
    
    if (lo_comm != MPI_COMM_NULL) {
	MPI_Comm_compare(world_comm, lo_comm, &result );
	if (result != MPI_UNEQUAL && world_size != 1) {
	    errs++;
	    printf( "incorrect unequal result: %d\n", result );
	    MPI_Abort(MPI_COMM_WORLD, 3013 );
	}
    }
    /*
      Free all communicators created
    */
#ifdef DEBUG
    if (world_rank == 0) 
	printf( "    Comm_free\n" );
#endif
    
    MPI_Comm_free( &world_comm );
    MPI_Comm_free( &dup_comm_world );
    
    MPI_Comm_free( &rev_comm );
    MPI_Comm_free( &split_comm );
    
    MPI_Group_free( &world_group );
    MPI_Group_free( &rev_group );
    
    if (lo_comm != MPI_COMM_NULL) {
        MPI_Comm_free( &lo_comm );
        MPI_Comm_free( &dup_comm );
    }
    
    return errs;
}
FORT_DLL_SPEC void FORT_CALL mpi_comm_compare_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *v3, MPI_Fint *ierr ){
    *ierr = MPI_Comm_compare( (MPI_Comm)(*v1), (MPI_Comm)(*v2), v3 );
}