PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank,subrank;
  PetscMPIInt    np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);

  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++) {
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* find color for this proc */
  color   = rank%nsubcomm;
  subrank = rank/nsubcomm;

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  j = 0; duprank = 0;
  for (i=0; i<nsubcomm; i++) {
    if (j == color) {
      duprank += subrank;
      break;
    }
    duprank += subsize[i]; j++;
  }

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  {
    PetscThreadComm tcomm;
    ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
    ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
    ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_INTERLACED;
  PetscFunctionReturn(0);
}
Beispiel #2
0
int test_communicators( void )
{
    MPI_Comm dup_comm_world, d2;
    ptrdiff_t world_rank;
    int world_size, key_1;
    int err;
    MPI_Aint value;
    int rank;
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    world_rank=rank;
    MPI_Comm_size( MPI_COMM_WORLD, &world_size );
    if (world_rank == 0) {
	printf( "*** Attribute copy/delete return codes ***\n" );
    }

    MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
    MPI_Barrier( dup_comm_world );

    MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN );

    value = - 11;
    if ((err=MPI_Keyval_create( copybomb_fn, deletebomb_fn, &key_1, &value )))
	abort_msg( "Keyval_create", err );

    err = MPI_Attr_put( dup_comm_world, key_1, (void *)world_rank );
    if (err) {
	printf( "Error with first put\n" );
    }

    err = MPI_Attr_put( dup_comm_world, key_1, (void *)(2*world_rank) );
    if (err == MPI_SUCCESS) {
	printf( "delete function return code was MPI_SUCCESS in put\n" );
    }

    /* Because the attribute delete function should fail, the attribute
       should *not be removed* */
    err = MPI_Attr_delete( dup_comm_world, key_1 );
    if (err == MPI_SUCCESS) {
	printf( "delete function return code was MPI_SUCCESS in delete\n" );
    }
    
    err = MPI_Comm_dup( dup_comm_world, &d2 );
    if (err == MPI_SUCCESS) {
	printf( "copy function return code was MPI_SUCCESS in dup\n" );
    }
    if (err && d2 != MPI_COMM_NULL) {
	printf( "dup did not return MPI_COMM_NULL on error\n" );
    }

    delete_flag = 1;
    MPI_Comm_free( &dup_comm_world );

    return 0;
}
PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
{
  PetscErrorCode ierr;
  PetscMPIInt    rank,size,*subsize,duprank=-1,subrank=-1;
  PetscMPIInt    np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
  MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;

  PetscFunctionBegin;
  ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);

  /* get size of each subcommunicator */
  ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr);

  np_subcomm = size/nsubcomm;
  nleftover  = size - nsubcomm*np_subcomm;
  for (i=0; i<nsubcomm; i++) {
    subsize[i] = np_subcomm;
    if (i<nleftover) subsize[i]++;
  }

  /* get color and subrank of this proc */
  rankstart = 0;
  for (i=0; i<nsubcomm; i++) {
    if (rank >= rankstart && rank < rankstart+subsize[i]) {
      color   = i;
      subrank = rank - rankstart;
      duprank = rank;
      break;
    } else rankstart += subsize[i];
  }

  ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);

  /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
  ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
  {
    PetscThreadComm tcomm;
    ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
    ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
    ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
    tcomm->refct++;
  }
  ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
  ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
  ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);

  psubcomm->color   = color;
  psubcomm->subsize = subsize;
  psubcomm->type    = PETSC_SUBCOMM_CONTIGUOUS;
  PetscFunctionReturn(0);
}
Beispiel #4
0
int MPIR_InitFortran( void )
{
    int *attr_ptr, flag, i;
    MPI_Aint attr_val;
    /* Create the attribute values */

    /* Do the Fortran versions - Pass the actual value.  Note that these
       use MPIR_Keyval_create with the "is_fortran" flag set. 
       If you change these; change the removal in finalize.c. */
#define NULL_COPY (MPI_Copy_function *)0
#define NULL_DEL  (MPI_Delete_function*)0
        i = MPIR_TAG_UB;
    MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 );
        i = MPIR_HOST;
    MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 );
        i = MPIR_IO;
    MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 );
        i = MPIR_WTIME_IS_GLOBAL;
    MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 );

    /* We need to switch this to the MPI-2 version to handle different
       word lengths */
    /* Attr_get needs to be referenced from MPI_Init so that we can
       use it here */
    MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &attr_ptr, &flag );
    attr_val = (MPI_Aint) *attr_ptr;
    MPI_Attr_put( MPI_COMM_WORLD, MPIR_TAG_UB, (void*)attr_val );
    MPI_Attr_get( MPI_COMM_WORLD, MPI_HOST, &attr_ptr, &flag );
    attr_val = (MPI_Aint) *attr_ptr;
    MPI_Attr_put( MPI_COMM_WORLD, MPIR_HOST,   (void*)attr_val );
    MPI_Attr_get( MPI_COMM_WORLD, MPI_IO, &attr_ptr, &flag );
    attr_val = (MPI_Aint) *attr_ptr;
    MPI_Attr_put( MPI_COMM_WORLD, MPIR_IO,     (void*)attr_val );
    MPI_Attr_get( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &attr_ptr, &flag );
    attr_val = (MPI_Aint) *attr_ptr;
    MPI_Attr_put( MPI_COMM_WORLD, MPIR_WTIME_IS_GLOBAL, (void*)attr_val );

    MPIR_Attr_make_perm( MPIR_TAG_UB );
    MPIR_Attr_make_perm( MPIR_HOST );
    MPIR_Attr_make_perm( MPIR_IO );
    MPIR_Attr_make_perm( MPIR_WTIME_IS_GLOBAL );

#ifndef F77_TRUE_VALUE_SET
    mpir_init_flog_( &MPIR_F_TRUE, &MPIR_F_FALSE );
#endif
    /* fcm sets MPI_BOTTOM */
    mpir_init_fcm_( );

    return 0;
}
Beispiel #5
0
/*@C
   PETSC_MATLAB_ENGINE_ - Creates a matlab engine shared by all processors
                    in a communicator.

   Not Collective

   Input Parameter:
.  comm - the MPI communicator to share the engine

   Level: developer

   Notes:
   Unlike almost all other PETSc routines, this does not return
   an error code. Usually used in the form
$      PetscMatlabEngineYYY(XXX object,PETSC_MATLAB_ENGINE_(comm));

.seealso: PetscMatlabEngineDestroy(), PetscMatlabEnginePut(), PetscMatlabEngineGet(),
          PetscMatlabEngineEvaluate(), PetscMatlabEngineGetOutput(), PetscMatlabEnginePrintOutput(),
          PetscMatlabEngineCreate(), PetscMatlabEnginePutArray(), PetscMatlabEngineGetArray(), PetscMatlabEngine,
          PETSC_MATLAB_ENGINE_WORLD, PETSC_MATLAB_ENGINE_SELF

@*/
PetscMatlabEngine  PETSC_MATLAB_ENGINE_(MPI_Comm comm)
{
  PetscErrorCode    ierr;
  PetscBool         flg;
  PetscMatlabEngine mengine;

  PetscFunctionBegin;
  if (Petsc_Matlab_Engine_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Matlab_Engine_keyval,0);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;}
  }
  ierr = MPI_Attr_get(comm,Petsc_Matlab_Engine_keyval,(void**)&mengine,(int*)&flg);
  if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;}
  if (!flg) { /* viewer not yet created */
    char *machinename = 0,machine[64];

    ierr = PetscOptionsGetString(NULL,NULL,"-matlab_engine_machine",machine,64,&flg);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;}
    if (flg) machinename = machine;
    ierr = PetscMatlabEngineCreate(comm,machinename,&mengine);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;}
    ierr = PetscObjectRegisterDestroy((PetscObject)mengine);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;}
    ierr = MPI_Attr_put(comm,Petsc_Matlab_Engine_keyval,mengine);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;}
  }
  PetscFunctionReturn(mengine);
}
Beispiel #6
0
PetscErrorCode PetscViewerDestroy_ASCII(PetscViewer viewer)
{
  PetscErrorCode    ierr;
  PetscViewer_ASCII *vascii = (PetscViewer_ASCII *)viewer->data;
  PetscViewerLink   *vlink;
  PetscBool         flg;

  PetscFunctionBegin;
  if (vascii->sviewer) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"ASCII PetscViewer destroyed before restoring singleton or subcomm PetscViewer");
  ierr = PetscViewerFileClose_ASCII(viewer);CHKERRQ(ierr);
  ierr = PetscFree(vascii);CHKERRQ(ierr);

  /* remove the viewer from the list in the MPI Communicator */
  if (Petsc_Viewer_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelViewer,&Petsc_Viewer_keyval,(void*)0);CHKERRQ(ierr);
  }

  ierr = MPI_Attr_get(((PetscObject)viewer)->comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr);
  if (flg) {
    if (vlink && vlink->viewer == viewer) {
      ierr = MPI_Attr_put(((PetscObject)viewer)->comm,Petsc_Viewer_keyval,vlink->next);CHKERRQ(ierr);
      ierr = PetscFree(vlink);CHKERRQ(ierr);
    } else {
      while (vlink && vlink->next) {
        if (vlink->next->viewer == viewer) {
          PetscViewerLink *nv = vlink->next;
          vlink->next = vlink->next->next;
          ierr = PetscFree(nv);CHKERRQ(ierr);
        }
        vlink = vlink->next;
      }
    }
  }
  PetscFunctionReturn(0);
}
Beispiel #7
0
/*@C
     PETSC_VIEWER_BINARY_ - Creates a binary PetscViewer shared by all processors
                     in a communicator.

     Collective on MPI_Comm

     Input Parameter:
.    comm - the MPI communicator to share the binary PetscViewer

     Level: intermediate

   Options Database Keys:
+    -viewer_binary_filename <name>
.    -viewer_binary_skip_info
-    -viewer_binary_skip_options

   Environmental variables:
-   PETSC_VIEWER_BINARY_FILENAME

     Notes:
     Unlike almost all other PETSc routines, PETSC_VIEWER_BINARY_ does not return
     an error code.  The binary PetscViewer is usually used in the form
$       XXXView(XXX object,PETSC_VIEWER_BINARY_(comm));

.seealso: PETSC_VIEWER_BINARY_WORLD, PETSC_VIEWER_BINARY_SELF, PetscViewerBinaryOpen(), PetscViewerCreate(),
          PetscViewerDestroy()
@*/
PetscViewer  PETSC_VIEWER_BINARY_(MPI_Comm comm)
{
  PetscErrorCode ierr;
  PetscBool      flg;
  PetscViewer    viewer;
  char           fname[PETSC_MAX_PATH_LEN];
  MPI_Comm       ncomm;

  PetscFunctionBegin;
  ierr = PetscCommDuplicate(comm,&ncomm,NULL);if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  if (Petsc_Viewer_Binary_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Binary_keyval,0);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  }
  ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Binary_keyval,(void**)&viewer,(int*)&flg);
  if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  if (!flg) { /* PetscViewer not yet created */
    ierr = PetscOptionsGetenv(ncomm,"PETSC_VIEWER_BINARY_FILENAME",fname,PETSC_MAX_PATH_LEN,&flg);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
    if (!flg) {
      ierr = PetscStrcpy(fname,"binaryoutput");
      if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
    }
    ierr = PetscViewerBinaryOpen(ncomm,fname,FILE_MODE_WRITE,&viewer);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
    ierr = PetscObjectRegisterDestroy((PetscObject)viewer);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
    ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Binary_keyval,(void*)viewer);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  }
  ierr = PetscCommDestroy(&ncomm);
  if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  PetscFunctionReturn(viewer);
}
Beispiel #8
0
/*@C
    PETSC_VIEWER_DRAW_ - Creates a window PetscViewer shared by all processors
                     in a communicator.

     Collective on MPI_Comm

     Input Parameter:
.    comm - the MPI communicator to share the window PetscViewer

     Level: intermediate

     Notes:
     Unlike almost all other PETSc routines, PETSC_VIEWER_DRAW_ does not return
     an error code.  The window is usually used in the form
$       XXXView(XXX object,PETSC_VIEWER_DRAW_(comm));

.seealso: PETSC_VIEWER_DRAW_WORLD, PETSC_VIEWER_DRAW_SELF, PetscViewerDrawOpen(),
@*/
PetscViewer  PETSC_VIEWER_DRAW_(MPI_Comm comm)
{
  PetscErrorCode ierr;
  PetscMPIInt    flag;
  PetscViewer    viewer;
  MPI_Comm       ncomm;

  PetscFunctionBegin;
  ierr = PetscCommDuplicate(comm,&ncomm,NULL);if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  if (Petsc_Viewer_Draw_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Draw_keyval,0);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  }
  ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Draw_keyval,(void**)&viewer,&flag);
  if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  if (!flag) { /* PetscViewer not yet created */
    ierr = PetscViewerDrawOpen(ncomm,0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,&viewer);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
    ierr = PetscObjectRegisterDestroy((PetscObject)viewer);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
    ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Draw_keyval,(void*)viewer);
    if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  }
  ierr = PetscCommDestroy(&ncomm);
  if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);}
  PetscFunctionReturn(viewer);
}
Beispiel #9
0
void MPIR_MPIOInit(int * error_code) {

    int flag;
    char myname[] = "MPIR_MPIOInit";

    /* first check if ADIO has been initialized. If not, initialize it */
    if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) {
        MPI_Initialized(&flag);

	/* --BEGIN ERROR HANDLING-- */
        if (!flag) {
	    *error_code = MPIO_Err_create_code(MPI_SUCCESS,
		    MPIR_ERR_RECOVERABLE, myname, __LINE__,
		    MPI_ERR_OTHER, "**initialized", 0);
	    *error_code = MPIO_Err_return_file(MPI_FILE_NULL, *error_code);
	    return;
	}
	/* --END ERROR HANDLING-- */

        MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval,
                          (void *) 0);

	/* put a dummy attribute on MPI_COMM_SELF, because we want the delete
	   function to be called when MPI_COMM_SELF is freed. Clarified
	   in MPI-2 section 4.8, the standard mandates that attributes on
	   MPI_COMM_SELF get cleaned up early in MPI_Finalize */

        MPI_Attr_put(MPI_COMM_SELF, ADIO_Init_keyval, (void *) 0);

	/* initialize ADIO */
        ADIO_Init( (int *)0, (char ***)0, error_code);
    }
    *error_code = MPI_SUCCESS;
}
Beispiel #10
0
void smpi_global_init(void)
{
  int i;
  MPI_Group group;
  char name[MAILBOX_NAME_MAXLEN];
  int smpirun=0;

  if (!MC_is_active()) {
    global_timer = xbt_os_timer_new();
    xbt_os_walltimer_start(global_timer);
  }
  if (process_count == 0){
    process_count = SIMIX_process_count();
    smpirun=1;
  }
  smpi_universe_size = process_count;
  process_data = xbt_new0(smpi_process_data_t, process_count);
  for (i = 0; i < process_count; i++) {
    process_data[i]                       = xbt_new(s_smpi_process_data_t, 1);
    //process_data[i]->index              = i;
    process_data[i]->argc                 = NULL;
    process_data[i]->argv                 = NULL;
    process_data[i]->mailbox              = simcall_rdv_create(get_mailbox_name(name, i));
    process_data[i]->mailbox_small        =
        simcall_rdv_create(get_mailbox_name_small(name, i));
    process_data[i]->mailboxes_mutex      = xbt_mutex_init();
    process_data[i]->timer                = xbt_os_timer_new();
    if (MC_is_active())
      MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size());
    process_data[i]->comm_self            = MPI_COMM_NULL;
    process_data[i]->comm_intra           = MPI_COMM_NULL;
    process_data[i]->comm_world           = NULL;
    process_data[i]->state                = SMPI_UNINITIALIZED;
    process_data[i]->sampling             = 0;
    process_data[i]->finalization_barrier = NULL;
  }
  //if the process was launched through smpirun script
  //we generate a global mpi_comm_world
  //if not, we let MPI_COMM_NULL, and the comm world
  //will be private to each mpi instance
  if(smpirun){
    group = smpi_group_new(process_count);
    MPI_COMM_WORLD = smpi_comm_new(group, NULL);
    MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, (void *)(MPI_Aint)process_count);
    xbt_bar_t bar=xbt_barrier_init(process_count);

    for (i = 0; i < process_count; i++) {
      smpi_group_set_mapping(group, i, i);
      process_data[i]->finalization_barrier = bar;
    }
  }
}
Beispiel #11
0
/*@
    MPI_File_delete - Deletes a file

Input Parameters:
. filename - name of file to delete (string)
. info - info object (handle)

.N fortran
@*/
int MPI_File_delete(char *filename, MPI_Info info)
{
    int flag, error_code;
    char *tmp;
#ifdef MPI_hpux
    int fl_xmpi;
  
    HPMP_IO_START(fl_xmpi, BLKMPIFILEDELETE, TRDTBLOCK,
                MPI_FILE_NULL, MPI_DATATYPE_NULL, -1);
#endif /* MPI_hpux */

    /* first check if ADIO has been initialized. If not, initialize it */
    if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) {

   /* check if MPI itself has been initialized. If not, flag an error.
   Can't initialize it here, because don't know argc, argv */
        MPI_Initialized(&flag);
        if (!flag) {
            FPRINTF(stderr, "Error: MPI_Init() must be called before using MPI-IO\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval,
                          (void *) 0);  

   /* put a dummy attribute on MPI_COMM_WORLD, because we want the delete
   function to be called when MPI_COMM_WORLD is freed. Hopefully the
   MPI library frees MPI_COMM_WORLD when MPI_Finalize is called,
   though the standard does not mandate this. */

        MPI_Attr_put(MPI_COMM_WORLD, ADIO_Init_keyval, (void *) 0);

/* initialize ADIO */

        ADIO_Init( (int *)0, (char ***)0, &error_code);
    }

    tmp = strchr(filename, ':');
#ifdef WIN32
    // Unfortunately Windows uses ':' behind the drive letter.
    // So we check if there is only one letter before the ':'
    // Please do not use a single letter filesystem name!
    if(tmp && ((tmp-filename) == 1)) tmp = 0;
#endif
    if (tmp) filename = tmp + 1;

    ADIO_Delete(filename, &error_code);
#ifdef MPI_hpux
    HPMP_IO_END(fl_xmpi, MPI_FILE_NULL, MPI_DATATYPE_NULL, -1);
#endif /* MPI_hpux */
    return error_code;
}
int main (int argc, char **argv)
{
    MPI_Comm duped;
    int keyval = MPI_KEYVAL_INVALID;
    int keyval_copy = MPI_KEYVAL_INVALID;
    int errs=0;

    MTest_Init( &argc, &argv );
    MPI_Comm_dup(MPI_COMM_SELF, &duped);

    MPI_Keyval_create(MPI_NULL_COPY_FN, delete_fn,  &keyval, NULL);
    keyval_copy = keyval;

    MPI_Attr_put(MPI_COMM_SELF, keyval, NULL);
    MPI_Attr_put(duped, keyval, NULL);

    MPI_Comm_free(&duped);         /* first MPI_Keyval_free */
    MPI_Keyval_free(&keyval);      /* second MPI_Keyval_free */
    MPI_Keyval_free(&keyval_copy); /* third MPI_Keyval_free */
    MTest_Finalize( errs );
    MPI_Finalize();                /* fourth MPI_Keyval_free */
    return 0;
}
Beispiel #13
0
/*@C
   PetscViewerASCIIGetStdout - Creates a ASCII PetscViewer shared by all processors
                    in a communicator. Error returning version of PETSC_VIEWER_STDOUT_()

   Collective on MPI_Comm

   Input Parameter:
.  comm - the MPI communicator to share the PetscViewer

   Level: beginner

   Notes:
     This should be used in all PETSc source code instead of PETSC_VIEWER_STDOUT_()

.seealso: PETSC_VIEWER_DRAW_(), PetscViewerASCIIOpen(), PETSC_VIEWER_STDERR_, PETSC_VIEWER_STDOUT_WORLD,
          PETSC_VIEWER_STDOUT_SELF

@*/
PetscErrorCode  PetscViewerASCIIGetStdout(MPI_Comm comm,PetscViewer *viewer)
{
  PetscErrorCode ierr;
  PetscBool      flg;
  MPI_Comm       ncomm;

  PetscFunctionBegin;
  ierr = PetscCommDuplicate(comm,&ncomm,NULL);CHKERRQ(ierr);
  if (Petsc_Viewer_Stdout_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Stdout_keyval,0);CHKERRQ(ierr);
  }
  ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Stdout_keyval,(void**)viewer,(PetscMPIInt*)&flg);CHKERRQ(ierr);
  if (!flg) { /* PetscViewer not yet created */
    ierr = PetscViewerASCIIOpen(ncomm,"stdout",viewer);CHKERRQ(ierr);
    ierr = PetscObjectRegisterDestroy((PetscObject)*viewer);CHKERRQ(ierr);
    ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Stdout_keyval,(void*)*viewer);CHKERRQ(ierr);
  }
  ierr = PetscCommDestroy(&ncomm);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #14
0
/*@
    MPI_Info_create - Creates a new info object

Output Parameters:
. info - info object (handle)

.N fortran
@*/
int MPI_Info_create(MPI_Info *info)
{
    int flag, error_code;

    /* first check if ADIO has been initialized. If not, initialize it */
    if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) {

   /* check if MPI itself has been initialized. If not, flag an error.
   Can't initialize it here, because don't know argc, argv */
        MPI_Initialized(&flag);
        if (!flag) {
            printf("Error: MPI_Init() must be called before using MPI_Info_create\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval,
                          (void *) 0);  

   /* put a dummy attribute on MPI_COMM_WORLD, because we want the delete
   function to be called when MPI_COMM_WORLD is freed. Hopefully the
   MPI library frees MPI_COMM_WORLD when MPI_Finalize is called,
   though the standard does not mandate this. */

        MPI_Attr_put(MPI_COMM_WORLD, ADIO_Init_keyval, (void *) 0);

/* initialize ADIO */

        ADIO_Init( (int *)0, (char ***)0, &error_code);
    }

    *info = (MPI_Info) ADIOI_Malloc(sizeof(struct MPIR_Info));
    (*info)->cookie = MPIR_INFO_COOKIE;
    (*info)->key = 0;
    (*info)->value = 0;
    (*info)->next = 0;
    /* this is the first structure in this linked list. it is 
       always kept empty. new (key,value) pairs are added after it. */

    return MPI_SUCCESS;
}
Beispiel #15
0
/*@
  MPE_GetTags - Returns tags that can be used in communication with a 
  communicator

  Input Parameters:
+ comm_in - Input communicator
- ntags   - Number of tags

  Output Parameters:
+ comm_out - Output communicator.  May be 'comm_in'.
- first_tag - First tag available

  Returns:
  MPI_SUCCESS on success, MPI error class on failure.

  Notes:
  This routine returns the requested number of tags, with the tags being
  'first_tag', 'first_tag+1', ..., 'first_tag+ntags-1'.

  These tags are guarenteed to be unique within 'comm_out'.  

.seealso: MPE_ReturnTags
  
@*/
int MPE_GetTags( MPI_Comm comm_in, int ntags, MPI_Comm *comm_out,
		 int *first_tag )
{
int mpe_errno = MPI_SUCCESS;
int *tagvalp, *maxval, flag;

if (MPE_Tag_keyval == MPI_KEYVAL_INVALID) {
    MPI_Keyval_create( MPI_NULL_COPY_FN, MPE_DelTag, 
		       &MPE_Tag_keyval, (void *)0 );
    }

if ((mpe_errno = MPI_Attr_get( comm_in, MPE_Tag_keyval, &tagvalp, &flag )))
    return mpe_errno;

if (!flag) {
    /* This communicator is not yet known to this system, so we
       dup it and setup the first value */
    MPI_Comm_dup( comm_in, comm_out );
    comm_in = *comm_out;
    MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &flag );
    tagvalp = (int *)malloc( 2 * sizeof(int) );
    if (!tagvalp) return MPI_ERR_OTHER;
    *tagvalp   = *maxval;
    *first_tag = *tagvalp - ntags;
    *tagvalp   = *first_tag;
    MPI_Attr_put( comm_in, MPE_Tag_keyval, tagvalp );
    return MPI_SUCCESS;
    }
*comm_out = comm_in;

if (*tagvalp < ntags) {
    /* Error, out of tags.  Another solution would be to 
       do an MPI_Comm_dup. */
    return MPI_ERR_INTERN;
    }
*first_tag = *tagvalp - ntags;
*tagvalp   = *first_tag;

return MPI_SUCCESS;
}
Beispiel #16
0
/*@
   PetscSequentialPhaseBegin - Begins a sequential section of code.  

   Collective on MPI_Comm

   Input Parameters:
+  comm - Communicator to sequentialize.  
-  ng   - Number in processor group.  This many processes are allowed to execute
   at the same time (usually 1)

   Level: intermediate

   Notes:
   PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a
   way to force a section of code to be executed by the processes in
   rank order.  Typically, this is done with
.vb
      PetscSequentialPhaseBegin(comm, 1);
      <code to be executed sequentially>
      PetscSequentialPhaseEnd(comm, 1);
.ve

   Often, the sequential code contains output statements (e.g., printf) to
   be executed.  Note that you may need to flush the I/O buffers before
   calling PetscSequentialPhaseEnd().  Also, note that some systems do
   not propagate I/O in any order to the controling terminal (in other words, 
   even if you flush the output, you may not get the data in the order
   that you want).

.seealso: PetscSequentialPhaseEnd()

   Concepts: sequential stage

@*/
PetscErrorCode  PetscSequentialPhaseBegin(MPI_Comm comm,int ng)
{
  PetscErrorCode ierr;
  PetscMPIInt    size;
  MPI_Comm       local_comm,*addr_local_comm;

  PetscFunctionBegin;
  ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
  if (size == 1) PetscFunctionReturn(0);

  /* Get the private communicator for the sequential operations */
  if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Seq_keyval,0);CHKERRQ(ierr);
  }

  ierr = MPI_Comm_dup(comm,&local_comm);CHKERRQ(ierr);
  ierr = PetscMalloc(sizeof(MPI_Comm),&addr_local_comm);CHKERRQ(ierr);
  *addr_local_comm = local_comm;
  ierr = MPI_Attr_put(comm,Petsc_Seq_keyval,(void*)addr_local_comm);CHKERRQ(ierr);
  ierr = PetscSequentialPhaseBegin_Private(local_comm,ng);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #17
0
/*@C
     PETSC_VIEWER_SOCKET_ - Creates a socket viewer shared by all processors in a communicator.

     Collective on MPI_Comm

     Input Parameter:
.    comm - the MPI communicator to share the socket PetscViewer

     Level: intermediate

   Options Database Keys:
   For use with the default PETSC_VIEWER_SOCKET_WORLD or if 
    PETSC_NULL is passed for machine or PETSC_DEFAULT is passed for port
$    -viewer_socket_machine <machine>
$    -viewer_socket_port <port>

   Environmental variables:
+   PETSC_VIEWER_SOCKET_PORT portnumber
-   PETSC_VIEWER_SOCKET_MACHINE machine name

     Notes:
     Unlike almost all other PETSc routines, PetscViewer_SOCKET_ does not return 
     an error code.  The socket PetscViewer is usually used in the form
$       XXXView(XXX object,PETSC_VIEWER_SOCKET_(comm));

     Currently the only socket client available is Matlab. See 
     src/dm/da/examples/tests/ex12.c and ex12.m for an example of usage.

     Connects to a waiting socket and stays connected until PetscViewerDestroy() is called.

     Use this for communicating with an interactive Matlab session, see PETSC_VIEWER_MATLAB_() for communicating with the Matlab engine. 

.seealso: PETSC_VIEWER_SOCKET_WORLD, PETSC_VIEWER_SOCKET_SELF, PetscViewerSocketOpen(), PetscViewerCreate(),
          PetscViewerSocketSetConnection(), PetscViewerDestroy(), PETSC_VIEWER_SOCKET_(), PetscViewerBinaryWrite(), PetscViewerBinaryRead(),
          PetscViewerBinaryWriteStringArray(), PetscBinaryViewerGetDescriptor(), PETSC_VIEWER_MATLAB_()
@*/
PetscViewer PETSC_DLLEXPORT PETSC_VIEWER_SOCKET_(MPI_Comm comm)
{
  PetscErrorCode ierr;
  PetscTruth     flg;
  PetscViewer    viewer;

  PetscFunctionBegin;
  if (Petsc_Viewer_Socket_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Socket_keyval,0);
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
  }
  ierr = MPI_Attr_get(comm,Petsc_Viewer_Socket_keyval,(void **)&viewer,(int*)&flg);
  if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
  if (!flg) { /* PetscViewer not yet created */
    ierr = PetscViewerSocketOpen(comm,0,0,&viewer); 
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
    ierr = PetscObjectRegisterDestroy((PetscObject)viewer);
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
    ierr = MPI_Attr_put(comm,Petsc_Viewer_Socket_keyval,(void*)viewer);
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
  } 
  PetscFunctionReturn(viewer);
}
Beispiel #18
0
/*@C
    PETSC_VIEWER_DRAW_ - Creates a window PetscViewer shared by all processors 
                     in a communicator.

     Collective on MPI_Comm

     Input Parameter:
.    comm - the MPI communicator to share the window PetscViewer

     Level: intermediate

     Notes:
     Unlike almost all other PETSc routines, PETSC_VIEWER_DRAW_ does not return 
     an error code.  The window is usually used in the form
$       XXXView(XXX object,PETSC_VIEWER_DRAW_(comm));

.seealso: PETSC_VIEWER_DRAW_WORLD, PETSC_VIEWER_DRAW_SELF, PetscViewerDrawOpen(), 
@*/
PetscViewer PETSC_DLLEXPORT PETSC_VIEWER_DRAW_(MPI_Comm comm)
{
  PetscErrorCode ierr;
  PetscMPIInt    flag;
  PetscViewer    viewer;

  PetscFunctionBegin;
  if (Petsc_Viewer_Draw_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Draw_keyval,0);
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
  }
  ierr = MPI_Attr_get(comm,Petsc_Viewer_Draw_keyval,(void **)&viewer,&flag);
  if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
  if (!flag) { /* PetscViewer not yet created */
    ierr = PetscViewerDrawOpen(comm,0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,&viewer); 
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
    ierr = PetscObjectRegisterDestroy((PetscObject)viewer);
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
    ierr = MPI_Attr_put(comm,Petsc_Viewer_Draw_keyval,(void*)viewer);
    if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);}
  } 
  PetscFunctionReturn(viewer);
}
Beispiel #19
0
static PetscErrorCode PetscSplitReductionGet(MPI_Comm comm,PetscSplitReduction **sr)
{
  PetscErrorCode ierr;
  PetscMPIInt    flag;

  PetscFunctionBegin;
  if (Petsc_Reduction_keyval == MPI_KEYVAL_INVALID) {
    /* 
       The calling sequence of the 2nd argument to this function changed
       between MPI Standard 1.0 and the revisions 1.1 Here we match the 
       new standard, if you are using an MPI implementation that uses 
       the older version you will get a warning message about the next line;
       it is only a warning message and should do no harm.
    */
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelReduction,&Petsc_Reduction_keyval,0);CHKERRQ(ierr);
  }
  ierr = MPI_Attr_get(comm,Petsc_Reduction_keyval,(void **)sr,&flag);CHKERRQ(ierr);
  if (!flag) {  /* doesn't exist yet so create it and put it in */
    ierr = PetscSplitReductionCreate(comm,sr);CHKERRQ(ierr);
    ierr = MPI_Attr_put(comm,Petsc_Reduction_keyval,*sr);CHKERRQ(ierr);
    ierr = PetscInfo1(0,"Putting reduction data in an MPI_Comm %ld\n",(long)comm);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Beispiel #20
0
/*@
    MPI_File_open - Opens a file

Input Parameters:
. comm - communicator (handle)
. filename - name of file to open (string)
. amode - file access mode (integer)
. info - info object (handle)

Output Parameters:
. fh - file handle (handle)

.N fortran
@*/
int MPI_File_open(MPI_Comm comm, char *filename, int amode, 
                  MPI_Info info, MPI_File *fh)
{
    int error_code, file_system, flag, tmp_amode, rank, orig_amode;
#ifndef PRINT_ERR_MSG
    static char myname[] = "MPI_FILE_OPEN";
#endif
    int err, min_code;
    char *tmp;
    MPI_Comm dupcomm, dupcommself;
#ifdef MPI_hpux
    int fl_xmpi;

    HPMP_IO_OPEN_START(fl_xmpi, comm);
#endif /* MPI_hpux */

	error_code = MPI_SUCCESS;

    if (comm == MPI_COMM_NULL) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: Invalid communicator\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_COMM, MPIR_ERR_COMM_NULL,
				     myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }

    MPI_Comm_test_inter(comm, &flag);
    if (flag) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: Intercommunicator cannot be passed to MPI_File_open\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_COMM, MPIR_ERR_COMM_INTER,
				     myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }

    if ( ((amode&MPI_MODE_RDONLY)?1:0) + ((amode&MPI_MODE_RDWR)?1:0) +
	 ((amode&MPI_MODE_WRONLY)?1:0) != 1 ) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: Exactly one of MPI_MODE_RDONLY, MPI_MODE_WRONLY, or MPI_MODE_RDWR must be specified\n");
	MPI_Abort(MPI_COMM_WORLD, 1); 
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_AMODE, 3,
				     myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }

    if ((amode & MPI_MODE_RDONLY) && 
            ((amode & MPI_MODE_CREATE) || (amode & MPI_MODE_EXCL))) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: It is erroneous to specify MPI_MODE_CREATE or MPI_MODE_EXCL with MPI_MODE_RDONLY\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_AMODE, 5,
				     myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }

    if ((amode & MPI_MODE_RDWR) && (amode & MPI_MODE_SEQUENTIAL)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: It is erroneous to specify MPI_MODE_SEQUENTIAL with MPI_MODE_RDWR\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_AMODE, 7,
				     myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }

/* check if amode is the same on all processes */
    MPI_Comm_dup(comm, &dupcomm);
    tmp_amode = amode;

/*  
    Removed this check because broadcast is too expensive. 
    MPI_Bcast(&tmp_amode, 1, MPI_INT, 0, dupcomm);
    if (amode != tmp_amode) {
	FPRINTF(stderr, "MPI_File_open: amode must be the same on all processes\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
*/

/* check if ADIO has been initialized. If not, initialize it */
    if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) {

/* check if MPI itself has been initialized. If not, flag an error.
   Can't initialize it here, because don't know argc, argv */
	MPI_Initialized(&flag);
	if (!flag) {
	    FPRINTF(stderr, "Error: MPI_Init() must be called before using MPI-IO\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}

	MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval,
			  (void *) 0);  

/* put a dummy attribute on MPI_COMM_WORLD, because we want the delete
   function to be called when MPI_COMM_WORLD is freed. Hopefully the
   MPI library frees MPI_COMM_WORLD when MPI_Finalize is called,
   though the standard does not mandate this. */

	MPI_Attr_put(MPI_COMM_WORLD, ADIO_Init_keyval, (void *) 0);

/* initialize ADIO */

	ADIO_Init( (int *)0, (char ***)0, &error_code);
    }


    file_system = -1;
    tmp = strchr(filename, ':');
#ifdef WIN32
    // Unfortunately Windows uses ':' behind the drive letter.
    // So we check if there is only one letter before the ':'
    // Please do not use a single letter filesystem name!
    if(tmp && ((tmp-filename) == 1)) tmp = 0;
#endif
    if (!tmp) {
	ADIO_FileSysType(filename, &file_system, &err);
	if (err != MPI_SUCCESS) {
#ifdef PRINT_ERR_MSG
	    FPRINTF(stderr, "MPI_File_open: Can't determine the file-system type. Check the filename/path you provided and try again. Otherwise, prefix the filename with a string to indicate the type of file sytem (piofs:, pfs:, nfs:, ufs:, hfs:, xfs:, sfs:, pvfs:, svm:).\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_FSTYPE,
				     myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
	}
	MPI_Allreduce(&file_system, &min_code, 1, MPI_INT, MPI_MIN, dupcomm);
	if (min_code == ADIO_NFS) file_system = ADIO_NFS;
    }


#ifndef PFS
    if (!strncmp(filename, "pfs:", 4) || !strncmp(filename, "PFS:", 4) || (file_system == ADIO_PFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the PFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_PFS,
				     myname, (char *) 0, (char *) 0,"PFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef PIOFS
    if (!strncmp(filename, "piofs:", 6) || !strncmp(filename, "PIOFS:", 6) || (file_system == ADIO_PIOFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the PIOFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_PIOFS,
				     myname, (char *) 0, (char *) 0,"PIOFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef UFS
    if (!strncmp(filename, "ufs:", 4) || !strncmp(filename, "UFS:", 4) || (file_system == ADIO_UFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the UFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_UFS,
				     myname, (char *) 0, (char *) 0,"UFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef NFS
    if (!strncmp(filename, "nfs:", 4) || !strncmp(filename, "NFS:", 4) || (file_system == ADIO_NFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the NFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_NFS,
				     myname, (char *) 0, (char *) 0,"NFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef HFS
    if (!strncmp(filename, "hfs:", 4) || !strncmp(filename, "HFS:", 4) || (file_system == ADIO_HFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the HFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_HFS,
				     myname, (char *) 0, (char *) 0,"HFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef XFS
    if (!strncmp(filename, "xfs:", 4) || !strncmp(filename, "XFS:", 4) || (file_system == ADIO_XFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the XFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_XFS,
				     myname, (char *) 0, (char *) 0,"XFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef SFS
    if (!strncmp(filename, "sfs:", 4) || !strncmp(filename, "SFS:", 4) || (file_system == ADIO_SFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the SFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_SFS,
				     myname, (char *) 0, (char *) 0,"SFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif
#ifndef PVFS
    if (!strncmp(filename, "pvfs:", 5) || !strncmp(filename, "PVFS:", 5) || (file_system == ADIO_PVFS)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the PVFS file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_PVFS,
				     myname, (char *) 0, (char *) 0,"PVFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif

#ifndef SVM
    if (!strncmp(filename, "svm:", 4) || !strncmp(filename, "SVM:", 4) || (file_system == ADIO_SVM)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the SVM file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_SVM,
				     myname, (char *) 0, (char *) 0,"SVM");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif

#ifndef NTFS
    if (!strncmp(filename, "svm:", 4) || !strncmp(filename, "NTFS:", 4) || (file_system == ADIO_SVM)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the SVM file system\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_NTFS,
				     myname, (char *) 0, (char *) 0,"NTFS");
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }
#endif

    if (!strncmp(filename, "pfs:", 4) || !strncmp(filename, "PFS:", 4)) {
	file_system = ADIO_PFS;
	filename += 4;
    }
    else if (!strncmp(filename, "piofs:", 6) || !strncmp(filename, "PIOFS:", 6)) {
	file_system = ADIO_PIOFS;
	filename += 6;
    }
    else if (!strncmp(filename, "ufs:", 4) || !strncmp(filename, "UFS:", 4)) {
	file_system = ADIO_UFS;
	filename += 4;
    }
    else if (!strncmp(filename, "nfs:", 4) || !strncmp(filename, "NFS:", 4)) {
	file_system = ADIO_NFS;
	filename += 4;
    }
    else if (!strncmp(filename, "hfs:", 4) || !strncmp(filename, "HFS:", 4)) {
	file_system = ADIO_HFS;
	filename += 4;
    }
    else if (!strncmp(filename, "xfs:", 4) || !strncmp(filename, "XFS:", 4)) {
	file_system = ADIO_XFS;
	filename += 4;
    }
    else if (!strncmp(filename, "sfs:", 4) || !strncmp(filename, "SFS:", 4)) {
	file_system = ADIO_SFS;
	filename += 4;
    }
    else if (!strncmp(filename, "pvfs:", 5) || !strncmp(filename, "PVFS:", 5)) {
	file_system = ADIO_PVFS;
	filename += 5;
    }
    else if (!strncmp(filename, "svm:", 4) || !strncmp(filename, "SVM:", 4)) {
	file_system = ADIO_SVM;
	filename += 4;
    }
    else if (!strncmp(filename, "ntfs:", 4) || !strncmp(filename, "NTFS:", 4)) {
	file_system = ADIO_NTFS;
	filename += 5;
    }
    if (((file_system == ADIO_PIOFS) || (file_system == ADIO_PVFS)) && 
        (amode & MPI_MODE_SEQUENTIAL)) {
#ifdef PRINT_ERR_MSG
	FPRINTF(stderr, "MPI_File_open: MPI_MODE_SEQUENTIAL not supported on PIOFS and PVFS\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
#else
	error_code = MPIR_Err_setmsg(MPI_ERR_UNSUPPORTED_OPERATION, 
                    MPIR_ERR_NO_MODE_SEQ, myname, (char *) 0, (char *) 0);
	return ADIOI_Error(MPI_FILE_NULL, error_code, myname);
#endif
    }

    orig_amode = amode;
    MPI_Comm_rank(dupcomm, &rank);

    if ((amode & MPI_MODE_CREATE) && (amode & MPI_MODE_EXCL)) {
	/* the open should fail if the file exists. Only process 0 should
           check this. Otherwise, if all processes try to check and the file 
           does not exist, one process will create the file and others who 
           reach later will return error. */

	if (!rank) {
	    MPI_Comm_dup(MPI_COMM_SELF, &dupcommself);
	    /* this dup is freed either in ADIO_Open if the open fails,
               or in ADIO_Close */
	    *fh = ADIO_Open(dupcommself, filename, file_system, amode, 0, 
               MPI_BYTE, MPI_BYTE, M_ASYNC, info, ADIO_PERM_NULL, &error_code);
	    /* broadcast the error code to other processes */
	    MPI_Bcast(&error_code, 1, MPI_INT, 0, dupcomm);
	    /* if no error, close the file. It will be reopened normally 
               below. */
	    if (error_code == MPI_SUCCESS) ADIO_Close(*fh, &error_code);
	}
	else MPI_Bcast(&error_code, 1, MPI_INT, 0, dupcomm);

	if (error_code != MPI_SUCCESS) {
	    MPI_Comm_free(&dupcomm);
	    *fh = MPI_FILE_NULL;
#ifdef MPI_hpux
	    HPMP_IO_OPEN_END(fl_xmpi, *fh, comm);
#endif /* MPI_hpux */
	    return error_code;
	}
	else amode = amode ^ MPI_MODE_EXCL;  /* turn off MPI_MODE_EXCL */
    }

/* use default values for disp, etype, filetype */    
/* set iomode=M_ASYNC. It is used to implement the Intel PFS interface
   on top of ADIO. Not relevant for MPI-IO implementation */    

    *fh = ADIO_Open(dupcomm, filename, file_system, amode, 0, MPI_BYTE,
                     MPI_BYTE, M_ASYNC, info, ADIO_PERM_NULL, &error_code);

    /* if MPI_MODE_EXCL was removed, add it back */
    if ((error_code == MPI_SUCCESS) && (amode != orig_amode))
	(*fh)->access_mode = orig_amode;

    /* determine name of file that will hold the shared file pointer */
    /* can't support shared file pointers on a file system that doesn't
       support file locking, e.g., PIOFS, PVFS */
    if ((error_code == MPI_SUCCESS) && ((*fh)->file_system != ADIO_PIOFS)
          && ((*fh)->file_system != ADIO_PVFS)) {
	ADIOI_Shfp_fname(*fh, rank);

        /* if MPI_MODE_APPEND, set the shared file pointer to end of file.
           indiv. file pointer already set to end of file in ADIO_Open. 
           Here file view is just bytes. */
	if ((*fh)->access_mode & MPI_MODE_APPEND) {
	    if (!rank) ADIO_Set_shared_fp(*fh, (*fh)->fp_ind, &error_code);
	    MPI_Barrier(dupcomm);
	}
    }

#ifdef MPI_hpux
    HPMP_IO_OPEN_END(fl_xmpi, *fh, comm);
#endif /* MPI_hpux */
    return error_code;
}
Beispiel #21
0
int test_communicators( void )
{
    MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm, 
	split_comm, world_comm;
    MPI_Group world_group, lo_group, rev_group;
    void *vvalue;
    int ranges[1][3];
    int flag, world_rank, world_size, rank, size, n, key_1, key_3;
    int color, key, result;
    int errs = 0;
    MPI_Aint value;

    MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
    MPI_Comm_size( MPI_COMM_WORLD, &world_size );
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "*** Communicators ***\n" ); fflush(stdout);
    }
#endif

    MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );

    /*
      Exercise Comm_create by creating an equivalent to dup_comm_world
      (sans attributes) and a half-world communicator.
    */

#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_create\n" ); fflush(stdout);
    }
#endif

    MPI_Comm_group( dup_comm_world, &world_group );
    MPI_Comm_create( dup_comm_world, world_group, &world_comm );
    MPI_Comm_rank( world_comm, &rank );
    if (rank != world_rank) {
	errs++;
	printf( "incorrect rank in world comm: %d\n", rank );
	MPI_Abort(MPI_COMM_WORLD, 3001 );
    }

    n = world_size / 2;

    ranges[0][0] = 0;
    ranges[0][1] = (world_size - n) - 1;
    ranges[0][2] = 1;

#ifdef DEBUG
    printf( "world rank = %d before range incl\n", world_rank );FFLUSH;
#endif
    MPI_Group_range_incl(world_group, 1, ranges, &lo_group );
#ifdef DEBUG
    printf( "world rank = %d after range incl\n", world_rank );FFLUSH;
#endif
    MPI_Comm_create(world_comm, lo_group, &lo_comm );
#ifdef DEBUG
    printf( "world rank = %d before group free\n", world_rank );FFLUSH;
#endif
    MPI_Group_free( &lo_group );

#ifdef DEBUG
    printf( "world rank = %d after group free\n", world_rank );FFLUSH;
#endif

    if (world_rank < (world_size - n)) {
	MPI_Comm_rank(lo_comm, &rank );
	if (rank == MPI_UNDEFINED) {
	    errs++;
	    printf( "incorrect lo group rank: %d\n", rank ); fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, 3002 );
	}
	else {
	    /* printf( "lo in\n" );FFLUSH; */
	    MPI_Barrier(lo_comm );
	    /* printf( "lo out\n" );FFLUSH; */
	}
    }
    else {
	if (lo_comm != MPI_COMM_NULL) {
	    errs++;
	    printf( "incorrect lo comm:\n" ); fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, 3003 );
	}
    }

#ifdef DEBUG
    printf( "worldrank = %d\n", world_rank );FFLUSH;
#endif
    MPI_Barrier(world_comm);

#ifdef DEBUG
    printf( "bar!\n" );FFLUSH;
#endif
    /*
      Check Comm_dup by adding attributes to lo_comm & duplicating
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_dup\n" );
	fflush(stdout);
    }
#endif
    
    if (lo_comm != MPI_COMM_NULL) {
	value = 9;
	MPI_Keyval_create(copy_fn,     delete_fn,   &key_1, &value );
	value = 8;
	value = 7;
	MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
			  &key_3, &value ); 

	/* This may generate a compilation warning; it is, however, an
	   easy way to cache a value instead of a pointer */
	/* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
	MPI_Attr_put(lo_comm, key_1, (void *) (MPI_Aint) world_rank );
	MPI_Attr_put(lo_comm, key_3, (void *)0 );
	
	MPI_Comm_dup(lo_comm, &dup_comm );

	/* Note that if sizeof(int) < sizeof(void *), we can't use
	   (void **)&value to get the value we passed into Attr_put.  To avoid 
	   problems (e.g., alignment errors), we recover the value into 
	   a (void *) and cast to int. Note that this may generate warning
	   messages from the compiler.  */
	MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
	value = (MPI_Aint)vvalue;
	
	if (! flag) {
	    errs++;
	    printf( "dup_comm key_1 not found on %d\n", world_rank );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3004 );
	}
	
	if (value != world_rank) {
	    errs++;
	    printf( "dup_comm key_1 value incorrect: %ld, expected %d\n", 
		    (long)value, world_rank );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3005 );
	}

	MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
	value = (MPI_Aint)vvalue;
	if (flag) {
	    errs++;
	    printf( "dup_comm key_3 found!\n" );
	    fflush( stdout );
	    MPI_Abort(MPI_COMM_WORLD, 3008 );
	}
	MPI_Keyval_free(&key_1 );
	MPI_Keyval_free(&key_3 );
    }
    /* 
       Split the world into even & odd communicators with reversed ranks.
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_split\n" );
	fflush(stdout);
    }
#endif
    
    color = world_rank % 2;
    key   = world_size - world_rank;
    
    MPI_Comm_split(dup_comm_world, color, key, &split_comm );
    MPI_Comm_size(split_comm, &size );
    MPI_Comm_rank(split_comm, &rank );
    if (rank != ((size - world_rank/2) - 1)) {
	errs++;
	printf( "incorrect split rank: %d\n", rank ); fflush(stdout);
	MPI_Abort(MPI_COMM_WORLD, 3009 );
    }
    
    MPI_Barrier(split_comm );
    /*
      Test each possible Comm_compare result
    */
#ifdef DEBUG
    if (world_rank == 0) {
	printf( "    Comm_compare\n" );
	fflush(stdout);
    }
#endif
    
    MPI_Comm_compare(world_comm, world_comm, &result );
    if (result != MPI_IDENT) {
	errs++;
	printf( "incorrect ident result: %d\n", result );
	MPI_Abort(MPI_COMM_WORLD, 3010 );
    }
    
    if (lo_comm != MPI_COMM_NULL) {
	MPI_Comm_compare(lo_comm, dup_comm, &result );
	if (result != MPI_CONGRUENT) {
	    errs++;
            printf( "incorrect congruent result: %d\n", result );
            MPI_Abort(MPI_COMM_WORLD, 3011 );
	}
    }
    
    ranges[0][0] = world_size - 1;
    ranges[0][1] = 0;
    ranges[0][2] = -1;

    MPI_Group_range_incl(world_group, 1, ranges, &rev_group );
    MPI_Comm_create(world_comm, rev_group, &rev_comm );

    MPI_Comm_compare(world_comm, rev_comm, &result );
    if (result != MPI_SIMILAR && world_size != 1) {
	errs++;
	printf( "incorrect similar result: %d\n", result );
	MPI_Abort(MPI_COMM_WORLD, 3012 );
    }
    
    if (lo_comm != MPI_COMM_NULL) {
	MPI_Comm_compare(world_comm, lo_comm, &result );
	if (result != MPI_UNEQUAL && world_size != 1) {
	    errs++;
	    printf( "incorrect unequal result: %d\n", result );
	    MPI_Abort(MPI_COMM_WORLD, 3013 );
	}
    }
    /*
      Free all communicators created
    */
#ifdef DEBUG
    if (world_rank == 0) 
	printf( "    Comm_free\n" );
#endif
    
    MPI_Comm_free( &world_comm );
    MPI_Comm_free( &dup_comm_world );
    
    MPI_Comm_free( &rev_comm );
    MPI_Comm_free( &split_comm );
    
    MPI_Group_free( &world_group );
    MPI_Group_free( &rev_group );
    
    if (lo_comm != MPI_COMM_NULL) {
        MPI_Comm_free( &lo_comm );
        MPI_Comm_free( &dup_comm );
    }
    
    return errs;
}
Beispiel #22
0
EXPORT_MPI_API void FORTRAN_API mpi_attr_put_ ( MPI_Fint *comm, MPI_Fint *keyval, MPI_Fint *attr_value, MPI_Fint *__ierr )
{
    *__ierr = MPI_Attr_put( MPI_Comm_f2c(*comm), (int)*keyval,
                            (void *)(MPI_Aint)((int)*attr_value));
}
Beispiel #23
0
int test_communicators(void)
{
    MPI_Comm dup_comm_world, d2;
    int world_rank, world_size, key_1;
    int err, errs = 0;
    MPI_Aint value;

    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
#ifdef DEBUG
    if (world_rank == 0) {
        printf("*** Attribute copy/delete return codes ***\n");
    }
#endif

    MPI_Comm_dup(MPI_COMM_WORLD, &dup_comm_world);
    MPI_Barrier(dup_comm_world);

    MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_RETURN);

    value = -11;
    if ((err = MPI_Keyval_create(copybomb_fn, deletebomb_fn, &key_1, &value)))
        abort_msg("Keyval_create", err);

    err = MPI_Attr_put(dup_comm_world, key_1, (void *) (MPI_Aint) world_rank);
    if (err) {
        errs++;
        printf("Error with first put\n");
    }

    err = MPI_Attr_put(dup_comm_world, key_1, (void *) (MPI_Aint) (2 * world_rank));
    if (err == MPI_SUCCESS) {
        errs++;
        printf("delete function return code was MPI_SUCCESS in put\n");
    }

    /* Because the attribute delete function should fail, the attribute
     * should *not be removed* */
    err = MPI_Attr_delete(dup_comm_world, key_1);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("delete function return code was MPI_SUCCESS in delete\n");
    }

    err = MPI_Comm_dup(dup_comm_world, &d2);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("copy function return code was MPI_SUCCESS in dup\n");
    }
#ifndef USE_STRICT_MPI
    /* Another interpretation is to leave d2 unchanged on error */
    if (err && d2 != MPI_COMM_NULL) {
        errs++;
        printf("dup did not return MPI_COMM_NULL on error\n");
    }
#endif

    delete_flag = 1;
    MPI_Comm_free(&dup_comm_world);
    MPI_Keyval_free(&key_1);

    return errs;
}
Beispiel #24
0
Datei: MPI-api.c Projekt: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
Beispiel #25
0
/*@C
  PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.

  Collective on MPI_Comm

  Input Parameters:
. comm_in - Input communicator

  Output Parameters:
+ comm_out - Output communicator.  May be comm_in.
- first_tag - Tag available that has not already been used with this communicator (you may
              pass in NULL if you do not need a tag)

  PETSc communicators are just regular MPI communicators that keep track of which
  tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
  a PETSc creation routine it will attach a private communicator for use in the objects communications.
  The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
  level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.

  Level: developer

  Concepts: communicator^duplicate

.seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
@*/
PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
{
  PetscErrorCode   ierr;
  PetscCommCounter *counter;
  PetscMPIInt      *maxval,flg;

  PetscFunctionBegin;
  ierr = PetscSpinlockLock(&PetscCommSpinLock);CHKERRQ(ierr);
  ierr = MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);

  if (!flg) {  /* this is NOT a PETSc comm */
    union {MPI_Comm comm; void *ptr;} ucomm;
    /* check if this communicator has a PETSc communicator imbedded in it */
    ierr = MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
    if (!flg) {
      /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
      ierr = MPI_Comm_dup(comm_in,comm_out);CHKERRQ(ierr);
      ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
      if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
      ierr = PetscNew(&counter);CHKERRQ(ierr);

      counter->tag       = *maxval;
      counter->refcount  = 0;
      counter->namecount = 0;

      ierr = MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);CHKERRQ(ierr);
      ierr = PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);CHKERRQ(ierr);

      /* save PETSc communicator inside user communicator, so we can get it next time */
      ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
      ierr = MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);CHKERRQ(ierr);
      ucomm.comm = comm_in;
      ierr = MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);CHKERRQ(ierr);
    } else {
      *comm_out = ucomm.comm;
      /* pull out the inner MPI_Comm and hand it back to the caller */
      ierr = MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
      if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
      ierr = PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);CHKERRQ(ierr);
    }
  } else *comm_out = comm_in;

#if defined(PETSC_USE_DEBUG)
  /*
     Hanging here means that some processes have called PetscCommDuplicate() and others have not.
     This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
     ALL processes that share a communicator MUST shared objects created from that communicator.
  */
  ierr = MPI_Barrier(comm_in);CHKERRQ(ierr);
#endif

  if (counter->tag < 1) {
    ierr = PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
    ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
    if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
    counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
  }

  if (first_tag) *first_tag = counter->tag--;

  counter->refcount++; /* number of references to this comm */
  ierr = PetscSpinlockUnlock(&PetscCommSpinLock);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #26
0
/*@C
   PetscViewerASCIIOpen - Opens an ASCII file as a PetscViewer.

   Collective on MPI_Comm

   Input Parameters:
+  comm - the communicator
-  name - the file name

   Output Parameter:
.  lab - the PetscViewer to use with the specified file

   Level: beginner

   Notes:
   This PetscViewer can be destroyed with PetscViewerDestroy().

   The MPI communicator used here must match that used by the object one is viewing. For example if the 
   Mat was created with a PETSC_COMM_WORLD, then the Viewer must be created with PETSC_COMM_WORLD

   As shown below, PetscViewerASCIIOpen() is useful in conjunction with
   MatView() and VecView()
.vb
     PetscViewerASCIIOpen(PETSC_COMM_WORLD,"mat.output",&viewer);
     MatView(matrix,viewer);
.ve

  Concepts: PetscViewerASCII^creating
  Concepts: printf
  Concepts: printing
  Concepts: accessing remote file
  Concepts: remote file

.seealso: MatView(), VecView(), PetscViewerDestroy(), PetscViewerBinaryOpen(),
          PetscViewerASCIIGetPointer(), PetscViewerPushFormat(), PETSC_VIEWER_STDOUT_, PETSC_VIEWER_STDERR_,
          PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_STDOUT_SELF,
@*/
PetscErrorCode  PetscViewerASCIIOpen(MPI_Comm comm,const char name[],PetscViewer *lab)
{
  PetscErrorCode  ierr;
  PetscViewerLink *vlink,*nv;
  PetscBool       flg,eq;
  size_t          len;

  PetscFunctionBegin;
  ierr = PetscStrlen(name,&len);CHKERRQ(ierr);
  if (!len) {
    ierr = PetscViewerASCIIGetStdout(comm,lab);CHKERRQ(ierr);
    ierr = PetscObjectReference((PetscObject)*lab);CHKERRQ(ierr);
    PetscFunctionReturn(0);
  }
  ierr = PetscSpinlockLock(&PetscViewerASCIISpinLockOpen);CHKERRQ(ierr);
  if (Petsc_Viewer_keyval == MPI_KEYVAL_INVALID) {
    ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelViewer,&Petsc_Viewer_keyval,(void*)0);CHKERRQ(ierr);
  }
  /*
       It would be better to move this code to PetscFileSetName() but since it must return a preexiting communicator
     we cannot do that, since PetscFileSetName() takes a communicator that already exists.

      Plus if the original communicator that created the file has since been close this will not detect the old
      communictor and hence will overwrite the old data. It may be better to simply remove all this code
  */
  /* make sure communicator is a PETSc communicator */
  ierr = PetscCommDuplicate(comm,&comm,NULL);CHKERRQ(ierr);
  /* has file already been opened into a viewer */
  ierr = MPI_Attr_get(comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr);
  if (flg) {
    while (vlink) {
      ierr = PetscStrcmp(name,((PetscViewer_ASCII*)(vlink->viewer->data))->filename,&eq);CHKERRQ(ierr);
      if (eq) {
        ierr = PetscObjectReference((PetscObject)vlink->viewer);CHKERRQ(ierr);
        *lab = vlink->viewer;
        ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
        ierr = PetscSpinlockUnlock(&PetscViewerASCIISpinLockOpen);CHKERRQ(ierr);
        PetscFunctionReturn(0);
      }
      vlink = vlink->next;
    }
  }
  ierr = PetscViewerCreate(comm,lab);CHKERRQ(ierr);
  ierr = PetscViewerSetType(*lab,PETSCVIEWERASCII);CHKERRQ(ierr);
  if (name) {
    ierr = PetscViewerFileSetName(*lab,name);CHKERRQ(ierr);
  }
  /* save viewer into communicator if needed later */
  ierr       = PetscNew(&nv);CHKERRQ(ierr);
  nv->viewer = *lab;
  if (!flg) {
    ierr = MPI_Attr_put(comm,Petsc_Viewer_keyval,nv);CHKERRQ(ierr);
  } else {
    ierr = MPI_Attr_get(comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr);
    if (vlink) {
      while (vlink->next) vlink = vlink->next;
      vlink->next = nv;
    } else {
      ierr = MPI_Attr_put(comm,Petsc_Viewer_keyval,nv);CHKERRQ(ierr);
    }
  }
  ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
  ierr = PetscSpinlockUnlock(&PetscViewerASCIISpinLockOpen);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
Beispiel #27
0
void ctoctest_( MPI_Fint *errs )
{
    int errcnt = *errs;
    int baseattrval = (1 << (sizeof(int)*8-2))-3;
    MPI_Datatype cduptype;
    MPI_Comm   cdup;

    /* MPI-1 function */
    ccomm1Attr = baseattrval;
    MPI_Attr_put( MPI_COMM_SELF, ccomm1Key, &ccomm1Attr );
    /* Test that we have the same value */
    errcnt += cmpi1read( MPI_COMM_SELF, ccomm1Key, &ccomm1Attr, "C to C" );

    /* Dup, check that the copy routine does what is expected */
    MPI_Comm_dup( MPI_COMM_SELF, &cdup );
    errcnt += cmpi1read( cdup, ccomm1Key, &ccomm1Attr, "C to C dup" );
    if (ccomm1Attr != baseattrval + 1) {
	printf( " Did not increment int in C to C dup: %d %d\n", 
		ccomm1Attr, baseattrval + 1 );
	errcnt ++;
    }

    MPI_Comm_free( &cdup );
    if (ccomm1Attr != baseattrval) {
	printf( " Did not increment int in C to C delete: %d %d\n", 
		ccomm1Attr, baseattrval );
	errcnt ++;
    }

    /* MPI-2 functions */
    ccomm1Attr = 0;
    ccomm2Attr = baseattrval;
    MPI_Comm_set_attr( MPI_COMM_SELF, ccomm2Key, &ccomm2Attr );
    /* Test that we have the same value */
    errcnt += cmpi2read( MPI_COMM_SELF, ccomm2Key, &ccomm2Attr, "C to C (2)" );

    /* Dup, check that the copy routine does what is expected */
    MPI_Comm_dup( MPI_COMM_SELF, &cdup );
    errcnt += cmpi2read( cdup, ccomm2Key, &ccomm2Attr, "C to C dup (2)" );
    if (ccomm2Attr != baseattrval + 1) {
	printf( " Did not increment int in C to C dup: %d %d\n", 
		ccomm2Attr, baseattrval + 1 );
	errcnt ++;
    }

    MPI_Comm_free( &cdup );
    if (ccomm2Attr != baseattrval) {
	printf( " Did not increment int in C to C delete (2): %d %d\n", 
		ccomm2Attr, baseattrval );
	errcnt ++;
    }

    /* MPI-2 functions */
    ctype2Attr = baseattrval;
    MPI_Type_set_attr( MPI_INTEGER, ctype2Key, &ctype2Attr );
    /* Test that we have the same value */
    errcnt += cmpi2readtype( MPI_INTEGER, ctype2Key, &ctype2Attr, "C to C type (2)" );

    /* Dup, check that the copy routine does what is expected */
    MPI_Type_dup( MPI_INTEGER, &cduptype );
    errcnt += cmpi2readtype( cduptype, ctype2Key, &ctype2Attr, "C to C typedup (2)" );
    if (ctype2Attr != baseattrval + 1) {
	printf( " Did not increment int in C to C typedup: %d %d\n", 
		ctype2Attr, baseattrval + 1 );
	errcnt ++;
    }
    ccomm1Attr = 0;

    MPI_Type_free( &cduptype );
    if (ctype2Attr != baseattrval) {
	printf( " Did not increment int in C to C typedelete (2): %d %d\n", 
		ctype2Attr, baseattrval );
	errcnt ++;
    }

    
    *errs = errcnt;
}
Beispiel #28
0
/*@

MPI_Cart_create - Makes a new communicator to which topology information
                  has been attached

Input Parameters:
+ comm_old - input communicator (handle) 
. ndims - number of dimensions of cartesian grid (integer) 
. dims - integer array of size ndims specifying the number of processes in 
  each dimension 
. periods - logical array of size ndims specifying whether the grid is 
  periodic (true) or not (false) in each dimension 
- reorder - ranking may be reordered (true) or not (false) (logical) 

Output Parameter:
. comm_cart - communicator with new cartesian topology (handle) 

Algorithm:
We ignore 'reorder' info currently.

.N fortran

.N Errors
.N MPI_SUCCESS
.N MPI_ERR_TOPOLOGY
.N MPI_ERR_DIMS
.N MPI_ERR_ARG
@*/
int MPI_Cart_create ( MPI_Comm comm_old, int ndims, int *dims, int *periods, 
		      int reorder, MPI_Comm *comm_cart )
{
  int range[1][3];
  MPI_Group group_old, group;
  int i, rank, num_ranks = 1;
  int mpi_errno = MPI_SUCCESS;
  int flag, size;
  MPIR_TOPOLOGY *topo;
  struct MPIR_COMMUNICATOR *comm_old_ptr;
  static char myname[] = "MPI_CART_CREATE";

  TR_PUSH(myname);
  comm_old_ptr = MPIR_GET_COMM_PTR(comm_old);

  /* Check validity of arguments */
#ifndef MPIR_NO_ERROR_CHECKING
  MPIR_TEST_MPI_COMM(comm_old,comm_old_ptr,comm_old_ptr,myname);
  MPIR_TEST_ARG(comm_cart);
  MPIR_TEST_ARG(periods);
  if (ndims < 1 || dims == (int *)0) mpi_errno = MPI_ERR_DIMS;
  if (mpi_errno)
	return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
  
  /* Check for Intra-communicator */
  MPI_Comm_test_inter ( comm_old, &flag );
  if (flag)
    return MPIR_ERROR(comm_old_ptr, 
            MPIR_ERRCLASS_TO_CODE(MPI_ERR_COMM,MPIR_ERR_COMM_INTER), myname );
#endif

  /* Determine number of ranks in topology */
  for ( i=0; i<ndims; i++ )
    num_ranks    *= (dims[i]>0)?dims[i]:-dims[i];
  if ( num_ranks < 1 ) {
    (*comm_cart)  = MPI_COMM_NULL;
    return MPIR_ERROR( comm_old_ptr, MPI_ERR_TOPOLOGY, myname );
  }

  /* Is the old communicator big enough? */
  MPIR_Comm_size (comm_old_ptr, &size);
  if (num_ranks > size) {
      mpi_errno = MPIR_Err_setmsg( MPI_ERR_TOPOLOGY, MPIR_ERR_TOPO_TOO_LARGE, 
				   myname, 
                  "Topology size is larger than size of communicator",
		  "Topology size %d is greater than communicator size %d", 
				   num_ranks, size );
	return MPIR_ERROR(comm_old_ptr, mpi_errno, myname );
  }
	
  /* Make new comm */
  range[0][0] = 0; range[0][1] = num_ranks - 1; range[0][2] = 1;
  MPI_Comm_group ( comm_old, &group_old );
  MPI_Group_range_incl ( group_old, 1, range, &group );
  MPI_Comm_create  ( comm_old, group, comm_cart );
  MPI_Group_free( &group );
  MPI_Group_free( &group_old );

  /* Store topology information in new communicator */
  if ( (*comm_cart) != MPI_COMM_NULL ) {
      MPIR_ALLOC(topo,(MPIR_TOPOLOGY *) MPIR_SBalloc ( MPIR_topo_els ),
		 comm_old_ptr,MPI_ERR_EXHAUSTED,myname);
      MPIR_SET_COOKIE(&topo->cart,MPIR_CART_TOPOL_COOKIE)
	  topo->cart.type         = MPI_CART;
      topo->cart.nnodes       = num_ranks;
      topo->cart.ndims        = ndims;
      MPIR_ALLOC(topo->cart.dims,(int *)MALLOC( sizeof(int) * 3 * ndims ),
		 comm_old_ptr,MPI_ERR_EXHAUSTED,myname);
      topo->cart.periods      = topo->cart.dims + ndims;
      topo->cart.position     = topo->cart.periods + ndims;
      for ( i=0; i<ndims; i++ ) {
	  topo->cart.dims[i]    = dims[i];
	  topo->cart.periods[i] = periods[i];
      }

    /* Compute my position */
    MPI_Comm_rank ( (*comm_cart), &rank );
    for ( i=0; i < ndims; i++ ) {
      num_ranks = num_ranks / dims[i];
      topo->cart.position[i]  = rank / num_ranks;
      rank   = rank % num_ranks;
    }

    /* cache topology information */
    MPI_Attr_put ( (*comm_cart), MPIR_TOPOLOGY_KEYVAL, (void *)topo );
  }
  TR_POP;
  return (mpi_errno);
}
Beispiel #29
0
EXPORT_MPI_API int MPI_Comm_set_attr ( MPI_Comm comm, int keyval, void *attr_value )
{
  return MPI_Attr_put ( comm, keyval, attr_value );
}
Beispiel #30
0
/* ADIOI_cb_gather_name_array() - gather a list of processor names from all processes
 *                          in a communicator and store them on rank 0.
 *
 * This is a collective call on the communicator(s) passed in.
 *
 * Obtains a rank-ordered list of processor names from the processes in
 * "dupcomm".
 *
 * Returns 0 on success, -1 on failure.
 *
 * NOTE: Needs some work to cleanly handle out of memory cases!  
 */
int ADIOI_cb_gather_name_array(MPI_Comm comm,
			       MPI_Comm dupcomm,
			       ADIO_cb_name_array *arrayp)
{
    char my_procname[MPI_MAX_PROCESSOR_NAME], **procname = 0;
    int *procname_len = NULL, my_procname_len, *disp = NULL, i;
    int commsize, commrank, found;
    ADIO_cb_name_array array = NULL;
    int alloc_size;

    if (ADIOI_cb_config_list_keyval == MPI_KEYVAL_INVALID) {
        /* cleaned up by ADIOI_End_call */
	MPI_Keyval_create((MPI_Copy_function *) ADIOI_cb_copy_name_array, 
			  (MPI_Delete_function *) ADIOI_cb_delete_name_array,
			  &ADIOI_cb_config_list_keyval, NULL);
    }
    else {
	MPI_Attr_get(comm, ADIOI_cb_config_list_keyval, (void *) &array, &found);
        if (found) {
            ADIOI_Assert(array != NULL);
	    *arrayp = array;
	    return 0;
	}
    }

    MPI_Comm_size(dupcomm, &commsize);
    MPI_Comm_rank(dupcomm, &commrank);

    MPI_Get_processor_name(my_procname, &my_procname_len);

    /* allocate space for everything */
    array = (ADIO_cb_name_array) ADIOI_Malloc(sizeof(*array));
    if (array == NULL) {
	return -1;
    }
    array->refct = 2; /* we're going to associate this with two comms */

    if (commrank == 0) {
	/* process 0 keeps the real list */
	array->namect = commsize;

	array->names = (char **) ADIOI_Malloc(sizeof(char *) * commsize);
	if (array->names == NULL) {
	    return -1;
	}
	procname = array->names; /* simpler to read */

	procname_len = (int *) ADIOI_Malloc(commsize * sizeof(int));
	if (procname_len == NULL) { 
	    return -1;
	}
    }
    else {
	/* everyone else just keeps an empty list as a placeholder */
	array->namect = 0;
	array->names = NULL;
    }
    /* gather lengths first */
    MPI_Gather(&my_procname_len, 1, MPI_INT, 
	       procname_len, 1, MPI_INT, 0, dupcomm);

    if (commrank == 0) {
#ifdef CB_CONFIG_LIST_DEBUG
	for (i=0; i < commsize; i++) {
	    FPRINTF(stderr, "len[%d] = %d\n", i, procname_len[i]);
	}
#endif

	alloc_size = 0;
	for (i=0; i < commsize; i++) {
	    /* add one to the lengths because we need to count the
	     * terminator, and we are going to use this list of lengths
	     * again in the gatherv.  
	     */
	    alloc_size += ++procname_len[i];
	}
	
	procname[0] = ADIOI_Malloc(alloc_size);
	if (procname[0] == NULL) {
	    return -1;
	}

	for (i=1; i < commsize; i++) {
	    procname[i] = procname[i-1] + procname_len[i-1];
	}
	
	/* create our list of displacements for the gatherv.  we're going
	 * to do everything relative to the start of the region allocated
	 * for procname[0]
	 */
	disp = ADIOI_Malloc(commsize * sizeof(int));
	disp[0] = 0;
	for (i=1; i < commsize; i++) {
	    disp[i] = (int) (procname[i] - procname[0]);
	}

    }

    /* now gather strings */
    if (commrank == 0) {
	MPI_Gatherv(my_procname, my_procname_len + 1, MPI_CHAR, 
		    procname[0], procname_len, disp, MPI_CHAR,
		    0, dupcomm);
    }
    else {
	/* if we didn't do this, we would need to allocate procname[]
	 * on all processes...which seems a little silly.
	 */
	MPI_Gatherv(my_procname, my_procname_len + 1, MPI_CHAR, 
		    NULL, NULL, NULL, MPI_CHAR, 0, dupcomm);
    }

    if (commrank == 0) {
	/* no longer need the displacements or lengths */
	ADIOI_Free(disp);
	ADIOI_Free(procname_len);

#ifdef CB_CONFIG_LIST_DEBUG
	for (i=0; i < commsize; i++) {
	    FPRINTF(stderr, "name[%d] = %s\n", i, procname[i]);
	}
#endif
    }

    /* store the attribute; we want to store SOMETHING on all processes
     * so that they can all tell if we have gone through this procedure 
     * or not for the given communicator.
     *
     * specifically we put it on both the original comm, so we can find
     * it next time an open is performed on this same comm, and on the
     * dupcomm, so we can use it in I/O operations.
     */
    MPI_Attr_put(comm, ADIOI_cb_config_list_keyval, array);
    MPI_Attr_put(dupcomm, ADIOI_cb_config_list_keyval, array);
    *arrayp = array;
    return 0;
}