/*@C PETSC_VIEWER_BINARY_ - Creates a binary PetscViewer shared by all processors in a communicator. Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the binary PetscViewer Level: intermediate Options Database Keys: + -viewer_binary_filename <name> . -viewer_binary_skip_info - -viewer_binary_skip_options Environmental variables: - PETSC_VIEWER_BINARY_FILENAME Notes: Unlike almost all other PETSc routines, PETSC_VIEWER_BINARY_ does not return an error code. The binary PetscViewer is usually used in the form $ XXXView(XXX object,PETSC_VIEWER_BINARY_(comm)); .seealso: PETSC_VIEWER_BINARY_WORLD, PETSC_VIEWER_BINARY_SELF, PetscViewerBinaryOpen(), PetscViewerCreate(), PetscViewerDestroy() @*/ PetscViewer PETSC_VIEWER_BINARY_(MPI_Comm comm) { PetscErrorCode ierr; PetscBool flg; PetscViewer viewer; char fname[PETSC_MAX_PATH_LEN]; MPI_Comm ncomm; PetscFunctionBegin; ierr = PetscCommDuplicate(comm,&ncomm,NULL);if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (Petsc_Viewer_Binary_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Binary_keyval,0); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Binary_keyval,(void**)&viewer,(int*)&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (!flg) { /* PetscViewer not yet created */ ierr = PetscOptionsGetenv(ncomm,"PETSC_VIEWER_BINARY_FILENAME",fname,PETSC_MAX_PATH_LEN,&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (!flg) { ierr = PetscStrcpy(fname,"binaryoutput"); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = PetscViewerBinaryOpen(ncomm,fname,FILE_MODE_WRITE,&viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = PetscObjectRegisterDestroy((PetscObject)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Binary_keyval,(void*)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = PetscCommDestroy(&ncomm); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} PetscFunctionReturn(viewer); }
/*@C PETSC_MATLAB_ENGINE_ - Creates a matlab engine shared by all processors in a communicator. Not Collective Input Parameter: . comm - the MPI communicator to share the engine Level: developer Notes: Unlike almost all other PETSc routines, this does not return an error code. Usually used in the form $ PetscMatlabEngineYYY(XXX object,PETSC_MATLAB_ENGINE_(comm)); .seealso: PetscMatlabEngineDestroy(), PetscMatlabEnginePut(), PetscMatlabEngineGet(), PetscMatlabEngineEvaluate(), PetscMatlabEngineGetOutput(), PetscMatlabEnginePrintOutput(), PetscMatlabEngineCreate(), PetscMatlabEnginePutArray(), PetscMatlabEngineGetArray(), PetscMatlabEngine, PETSC_MATLAB_ENGINE_WORLD, PETSC_MATLAB_ENGINE_SELF @*/ PetscMatlabEngine PETSC_MATLAB_ENGINE_(MPI_Comm comm) { PetscErrorCode ierr; PetscBool flg; PetscMatlabEngine mengine; PetscFunctionBegin; if (Petsc_Matlab_Engine_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Matlab_Engine_keyval,0); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} } ierr = MPI_Attr_get(comm,Petsc_Matlab_Engine_keyval,(void**)&mengine,(int*)&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} if (!flg) { /* viewer not yet created */ char *machinename = 0,machine[64]; ierr = PetscOptionsGetString(NULL,NULL,"-matlab_engine_machine",machine,64,&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} if (flg) machinename = machine; ierr = PetscMatlabEngineCreate(comm,machinename,&mengine); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} ierr = PetscObjectRegisterDestroy((PetscObject)mengine); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} ierr = MPI_Attr_put(comm,Petsc_Matlab_Engine_keyval,mengine); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} } PetscFunctionReturn(mengine); }
/* MPIR_Topology_init - Initializes topology code. */ void MPIR_Topology_init() { MPI_Keyval_create ( MPIR_Topology_copy_fn, MPIR_Topology_delete_fn, &MPIR_TOPOLOGY_KEYVAL, (void *)0); }
PetscErrorCode PetscViewerDestroy_ASCII(PetscViewer viewer) { PetscErrorCode ierr; PetscViewer_ASCII *vascii = (PetscViewer_ASCII *)viewer->data; PetscViewerLink *vlink; PetscBool flg; PetscFunctionBegin; if (vascii->sviewer) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"ASCII PetscViewer destroyed before restoring singleton or subcomm PetscViewer"); ierr = PetscViewerFileClose_ASCII(viewer);CHKERRQ(ierr); ierr = PetscFree(vascii);CHKERRQ(ierr); /* remove the viewer from the list in the MPI Communicator */ if (Petsc_Viewer_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelViewer,&Petsc_Viewer_keyval,(void*)0);CHKERRQ(ierr); } ierr = MPI_Attr_get(((PetscObject)viewer)->comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr); if (flg) { if (vlink && vlink->viewer == viewer) { ierr = MPI_Attr_put(((PetscObject)viewer)->comm,Petsc_Viewer_keyval,vlink->next);CHKERRQ(ierr); ierr = PetscFree(vlink);CHKERRQ(ierr); } else { while (vlink && vlink->next) { if (vlink->next->viewer == viewer) { PetscViewerLink *nv = vlink->next; vlink->next = vlink->next->next; ierr = PetscFree(nv);CHKERRQ(ierr); } vlink = vlink->next; } } } PetscFunctionReturn(0); }
/*@C PETSC_VIEWER_DRAW_ - Creates a window PetscViewer shared by all processors in a communicator. Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the window PetscViewer Level: intermediate Notes: Unlike almost all other PETSc routines, PETSC_VIEWER_DRAW_ does not return an error code. The window is usually used in the form $ XXXView(XXX object,PETSC_VIEWER_DRAW_(comm)); .seealso: PETSC_VIEWER_DRAW_WORLD, PETSC_VIEWER_DRAW_SELF, PetscViewerDrawOpen(), @*/ PetscViewer PETSC_VIEWER_DRAW_(MPI_Comm comm) { PetscErrorCode ierr; PetscMPIInt flag; PetscViewer viewer; MPI_Comm ncomm; PetscFunctionBegin; ierr = PetscCommDuplicate(comm,&ncomm,NULL);if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (Petsc_Viewer_Draw_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Draw_keyval,0); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Draw_keyval,(void**)&viewer,&flag); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (!flag) { /* PetscViewer not yet created */ ierr = PetscViewerDrawOpen(ncomm,0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,&viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = PetscObjectRegisterDestroy((PetscObject)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Draw_keyval,(void*)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = PetscCommDestroy(&ncomm); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} PetscFunctionReturn(viewer); }
void MPIR_MPIOInit(int * error_code) { int flag; char myname[] = "MPIR_MPIOInit"; /* first check if ADIO has been initialized. If not, initialize it */ if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) { MPI_Initialized(&flag); /* --BEGIN ERROR HANDLING-- */ if (!flag) { *error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_OTHER, "**initialized", 0); *error_code = MPIO_Err_return_file(MPI_FILE_NULL, *error_code); return; } /* --END ERROR HANDLING-- */ MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval, (void *) 0); /* put a dummy attribute on MPI_COMM_SELF, because we want the delete function to be called when MPI_COMM_SELF is freed. Clarified in MPI-2 section 4.8, the standard mandates that attributes on MPI_COMM_SELF get cleaned up early in MPI_Finalize */ MPI_Attr_put(MPI_COMM_SELF, ADIO_Init_keyval, (void *) 0); /* initialize ADIO */ ADIO_Init( (int *)0, (char ***)0, error_code); } *error_code = MPI_SUCCESS; }
int test_communicators( void ) { MPI_Comm dup_comm_world, d2; ptrdiff_t world_rank; int world_size, key_1; int err; MPI_Aint value; int rank; MPI_Comm_rank( MPI_COMM_WORLD, &rank ); world_rank=rank; MPI_Comm_size( MPI_COMM_WORLD, &world_size ); if (world_rank == 0) { printf( "*** Attribute copy/delete return codes ***\n" ); } MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world ); MPI_Barrier( dup_comm_world ); MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN ); value = - 11; if ((err=MPI_Keyval_create( copybomb_fn, deletebomb_fn, &key_1, &value ))) abort_msg( "Keyval_create", err ); err = MPI_Attr_put( dup_comm_world, key_1, (void *)world_rank ); if (err) { printf( "Error with first put\n" ); } err = MPI_Attr_put( dup_comm_world, key_1, (void *)(2*world_rank) ); if (err == MPI_SUCCESS) { printf( "delete function return code was MPI_SUCCESS in put\n" ); } /* Because the attribute delete function should fail, the attribute should *not be removed* */ err = MPI_Attr_delete( dup_comm_world, key_1 ); if (err == MPI_SUCCESS) { printf( "delete function return code was MPI_SUCCESS in delete\n" ); } err = MPI_Comm_dup( dup_comm_world, &d2 ); if (err == MPI_SUCCESS) { printf( "copy function return code was MPI_SUCCESS in dup\n" ); } if (err && d2 != MPI_COMM_NULL) { printf( "dup did not return MPI_COMM_NULL on error\n" ); } delete_flag = 1; MPI_Comm_free( &dup_comm_world ); return 0; }
/*@ MPI_File_delete - Deletes a file Input Parameters: . filename - name of file to delete (string) . info - info object (handle) .N fortran @*/ int MPI_File_delete(char *filename, MPI_Info info) { int flag, error_code; char *tmp; #ifdef MPI_hpux int fl_xmpi; HPMP_IO_START(fl_xmpi, BLKMPIFILEDELETE, TRDTBLOCK, MPI_FILE_NULL, MPI_DATATYPE_NULL, -1); #endif /* MPI_hpux */ /* first check if ADIO has been initialized. If not, initialize it */ if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) { /* check if MPI itself has been initialized. If not, flag an error. Can't initialize it here, because don't know argc, argv */ MPI_Initialized(&flag); if (!flag) { FPRINTF(stderr, "Error: MPI_Init() must be called before using MPI-IO\n"); MPI_Abort(MPI_COMM_WORLD, 1); } MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval, (void *) 0); /* put a dummy attribute on MPI_COMM_WORLD, because we want the delete function to be called when MPI_COMM_WORLD is freed. Hopefully the MPI library frees MPI_COMM_WORLD when MPI_Finalize is called, though the standard does not mandate this. */ MPI_Attr_put(MPI_COMM_WORLD, ADIO_Init_keyval, (void *) 0); /* initialize ADIO */ ADIO_Init( (int *)0, (char ***)0, &error_code); } tmp = strchr(filename, ':'); #ifdef WIN32 // Unfortunately Windows uses ':' behind the drive letter. // So we check if there is only one letter before the ':' // Please do not use a single letter filesystem name! if(tmp && ((tmp-filename) == 1)) tmp = 0; #endif if (tmp) filename = tmp + 1; ADIO_Delete(filename, &error_code); #ifdef MPI_hpux HPMP_IO_END(fl_xmpi, MPI_FILE_NULL, MPI_DATATYPE_NULL, -1); #endif /* MPI_hpux */ return error_code; }
void ccreatekeys_( MPI_Fint *ccomm1_key, MPI_Fint *ccomm2_key, MPI_Fint *ctype2_key, MPI_Fint *cwin2_key ) { MPI_Keyval_create( CMPI1_COPY_FN, CMPI1_DELETE_FN, &ccomm1Key, &ccomm1Extra ); *ccomm1_key = (MPI_Fint)ccomm1Key; MPI_Comm_create_keyval( CMPI1_COPY_FN, CMPI1_DELETE_FN, &ccomm2Key, &ccomm2Extra ); *ccomm2_key = (MPI_Fint)ccomm2Key; MPI_Type_create_keyval( TYPE_COPY_FN, TYPE_DELETE_FN, &ctype2Key, &ctype2Extra ); *ctype2_key = (MPI_Fint)ctype2Key; MPI_Win_create_keyval( WIN_COPY_FN, WIN_DELETE_FN, &cwin2Key, &cwin2Extra ); *cwin2_key = (MPI_Fint)cwin2Key; }
/*@C PetscViewerASCIIGetStdout - Creates a ASCII PetscViewer shared by all processors in a communicator. Error returning version of PETSC_VIEWER_STDOUT_() Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the PetscViewer Level: beginner Notes: This should be used in all PETSc source code instead of PETSC_VIEWER_STDOUT_() .seealso: PETSC_VIEWER_DRAW_(), PetscViewerASCIIOpen(), PETSC_VIEWER_STDERR_, PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_STDOUT_SELF @*/ PetscErrorCode PetscViewerASCIIGetStdout(MPI_Comm comm,PetscViewer *viewer) { PetscErrorCode ierr; PetscBool flg; MPI_Comm ncomm; PetscFunctionBegin; ierr = PetscCommDuplicate(comm,&ncomm,NULL);CHKERRQ(ierr); if (Petsc_Viewer_Stdout_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Stdout_keyval,0);CHKERRQ(ierr); } ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Stdout_keyval,(void**)viewer,(PetscMPIInt*)&flg);CHKERRQ(ierr); if (!flg) { /* PetscViewer not yet created */ ierr = PetscViewerASCIIOpen(ncomm,"stdout",viewer);CHKERRQ(ierr); ierr = PetscObjectRegisterDestroy((PetscObject)*viewer);CHKERRQ(ierr); ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Stdout_keyval,(void*)*viewer);CHKERRQ(ierr); } ierr = PetscCommDestroy(&ncomm);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@ MPI_Info_create - Creates a new info object Output Parameters: . info - info object (handle) .N fortran @*/ int MPI_Info_create(MPI_Info *info) { int flag, error_code; /* first check if ADIO has been initialized. If not, initialize it */ if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) { /* check if MPI itself has been initialized. If not, flag an error. Can't initialize it here, because don't know argc, argv */ MPI_Initialized(&flag); if (!flag) { printf("Error: MPI_Init() must be called before using MPI_Info_create\n"); MPI_Abort(MPI_COMM_WORLD, 1); } MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval, (void *) 0); /* put a dummy attribute on MPI_COMM_WORLD, because we want the delete function to be called when MPI_COMM_WORLD is freed. Hopefully the MPI library frees MPI_COMM_WORLD when MPI_Finalize is called, though the standard does not mandate this. */ MPI_Attr_put(MPI_COMM_WORLD, ADIO_Init_keyval, (void *) 0); /* initialize ADIO */ ADIO_Init( (int *)0, (char ***)0, &error_code); } *info = (MPI_Info) ADIOI_Malloc(sizeof(struct MPIR_Info)); (*info)->cookie = MPIR_INFO_COOKIE; (*info)->key = 0; (*info)->value = 0; (*info)->next = 0; /* this is the first structure in this linked list. it is always kept empty. new (key,value) pairs are added after it. */ return MPI_SUCCESS; }
/*@ MPE_GetTags - Returns tags that can be used in communication with a communicator Input Parameters: + comm_in - Input communicator - ntags - Number of tags Output Parameters: + comm_out - Output communicator. May be 'comm_in'. - first_tag - First tag available Returns: MPI_SUCCESS on success, MPI error class on failure. Notes: This routine returns the requested number of tags, with the tags being 'first_tag', 'first_tag+1', ..., 'first_tag+ntags-1'. These tags are guarenteed to be unique within 'comm_out'. .seealso: MPE_ReturnTags @*/ int MPE_GetTags( MPI_Comm comm_in, int ntags, MPI_Comm *comm_out, int *first_tag ) { int mpe_errno = MPI_SUCCESS; int *tagvalp, *maxval, flag; if (MPE_Tag_keyval == MPI_KEYVAL_INVALID) { MPI_Keyval_create( MPI_NULL_COPY_FN, MPE_DelTag, &MPE_Tag_keyval, (void *)0 ); } if ((mpe_errno = MPI_Attr_get( comm_in, MPE_Tag_keyval, &tagvalp, &flag ))) return mpe_errno; if (!flag) { /* This communicator is not yet known to this system, so we dup it and setup the first value */ MPI_Comm_dup( comm_in, comm_out ); comm_in = *comm_out; MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &flag ); tagvalp = (int *)malloc( 2 * sizeof(int) ); if (!tagvalp) return MPI_ERR_OTHER; *tagvalp = *maxval; *first_tag = *tagvalp - ntags; *tagvalp = *first_tag; MPI_Attr_put( comm_in, MPE_Tag_keyval, tagvalp ); return MPI_SUCCESS; } *comm_out = comm_in; if (*tagvalp < ntags) { /* Error, out of tags. Another solution would be to do an MPI_Comm_dup. */ return MPI_ERR_INTERN; } *first_tag = *tagvalp - ntags; *tagvalp = *first_tag; return MPI_SUCCESS; }
/*@ PetscSequentialPhaseBegin - Begins a sequential section of code. Collective on MPI_Comm Input Parameters: + comm - Communicator to sequentialize. - ng - Number in processor group. This many processes are allowed to execute at the same time (usually 1) Level: intermediate Notes: PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a way to force a section of code to be executed by the processes in rank order. Typically, this is done with .vb PetscSequentialPhaseBegin(comm, 1); <code to be executed sequentially> PetscSequentialPhaseEnd(comm, 1); .ve Often, the sequential code contains output statements (e.g., printf) to be executed. Note that you may need to flush the I/O buffers before calling PetscSequentialPhaseEnd(). Also, note that some systems do not propagate I/O in any order to the controling terminal (in other words, even if you flush the output, you may not get the data in the order that you want). .seealso: PetscSequentialPhaseEnd() Concepts: sequential stage @*/ PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm,int ng) { PetscErrorCode ierr; PetscMPIInt size; MPI_Comm local_comm,*addr_local_comm; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size == 1) PetscFunctionReturn(0); /* Get the private communicator for the sequential operations */ if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Seq_keyval,0);CHKERRQ(ierr); } ierr = MPI_Comm_dup(comm,&local_comm);CHKERRQ(ierr); ierr = PetscMalloc(sizeof(MPI_Comm),&addr_local_comm);CHKERRQ(ierr); *addr_local_comm = local_comm; ierr = MPI_Attr_put(comm,Petsc_Seq_keyval,(void*)addr_local_comm);CHKERRQ(ierr); ierr = PetscSequentialPhaseBegin_Private(local_comm,ng);CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@C PETSC_VIEWER_DRAW_ - Creates a window PetscViewer shared by all processors in a communicator. Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the window PetscViewer Level: intermediate Notes: Unlike almost all other PETSc routines, PETSC_VIEWER_DRAW_ does not return an error code. The window is usually used in the form $ XXXView(XXX object,PETSC_VIEWER_DRAW_(comm)); .seealso: PETSC_VIEWER_DRAW_WORLD, PETSC_VIEWER_DRAW_SELF, PetscViewerDrawOpen(), @*/ PetscViewer PETSC_DLLEXPORT PETSC_VIEWER_DRAW_(MPI_Comm comm) { PetscErrorCode ierr; PetscMPIInt flag; PetscViewer viewer; PetscFunctionBegin; if (Petsc_Viewer_Draw_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Draw_keyval,0); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} } ierr = MPI_Attr_get(comm,Petsc_Viewer_Draw_keyval,(void **)&viewer,&flag); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} if (!flag) { /* PetscViewer not yet created */ ierr = PetscViewerDrawOpen(comm,0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,&viewer); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} ierr = PetscObjectRegisterDestroy((PetscObject)viewer); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} ierr = MPI_Attr_put(comm,Petsc_Viewer_Draw_keyval,(void*)viewer); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} } PetscFunctionReturn(viewer); }
/*@C PETSC_VIEWER_SOCKET_ - Creates a socket viewer shared by all processors in a communicator. Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the socket PetscViewer Level: intermediate Options Database Keys: For use with the default PETSC_VIEWER_SOCKET_WORLD or if PETSC_NULL is passed for machine or PETSC_DEFAULT is passed for port $ -viewer_socket_machine <machine> $ -viewer_socket_port <port> Environmental variables: + PETSC_VIEWER_SOCKET_PORT portnumber - PETSC_VIEWER_SOCKET_MACHINE machine name Notes: Unlike almost all other PETSc routines, PetscViewer_SOCKET_ does not return an error code. The socket PetscViewer is usually used in the form $ XXXView(XXX object,PETSC_VIEWER_SOCKET_(comm)); Currently the only socket client available is Matlab. See src/dm/da/examples/tests/ex12.c and ex12.m for an example of usage. Connects to a waiting socket and stays connected until PetscViewerDestroy() is called. Use this for communicating with an interactive Matlab session, see PETSC_VIEWER_MATLAB_() for communicating with the Matlab engine. .seealso: PETSC_VIEWER_SOCKET_WORLD, PETSC_VIEWER_SOCKET_SELF, PetscViewerSocketOpen(), PetscViewerCreate(), PetscViewerSocketSetConnection(), PetscViewerDestroy(), PETSC_VIEWER_SOCKET_(), PetscViewerBinaryWrite(), PetscViewerBinaryRead(), PetscViewerBinaryWriteStringArray(), PetscBinaryViewerGetDescriptor(), PETSC_VIEWER_MATLAB_() @*/ PetscViewer PETSC_DLLEXPORT PETSC_VIEWER_SOCKET_(MPI_Comm comm) { PetscErrorCode ierr; PetscTruth flg; PetscViewer viewer; PetscFunctionBegin; if (Petsc_Viewer_Socket_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Socket_keyval,0); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} } ierr = MPI_Attr_get(comm,Petsc_Viewer_Socket_keyval,(void **)&viewer,(int*)&flg); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} if (!flg) { /* PetscViewer not yet created */ ierr = PetscViewerSocketOpen(comm,0,0,&viewer); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} ierr = PetscObjectRegisterDestroy((PetscObject)viewer); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} ierr = MPI_Attr_put(comm,Petsc_Viewer_Socket_keyval,(void*)viewer); if (ierr) {PetscError(__LINE__,"PETSC_VIEWER_SOCKET_",__FILE__,__SDIR__,1,1," ");PetscFunctionReturn(0);} } PetscFunctionReturn(viewer); }
int main (int argc, char **argv) { MPI_Comm duped; int keyval = MPI_KEYVAL_INVALID; int keyval_copy = MPI_KEYVAL_INVALID; int errs=0; MTest_Init( &argc, &argv ); MPI_Comm_dup(MPI_COMM_SELF, &duped); MPI_Keyval_create(MPI_NULL_COPY_FN, delete_fn, &keyval, NULL); keyval_copy = keyval; MPI_Attr_put(MPI_COMM_SELF, keyval, NULL); MPI_Attr_put(duped, keyval, NULL); MPI_Comm_free(&duped); /* first MPI_Keyval_free */ MPI_Keyval_free(&keyval); /* second MPI_Keyval_free */ MPI_Keyval_free(&keyval_copy); /* third MPI_Keyval_free */ MTest_Finalize( errs ); MPI_Finalize(); /* fourth MPI_Keyval_free */ return 0; }
static PetscErrorCode PetscSplitReductionGet(MPI_Comm comm,PetscSplitReduction **sr) { PetscErrorCode ierr; PetscMPIInt flag; PetscFunctionBegin; if (Petsc_Reduction_keyval == MPI_KEYVAL_INVALID) { /* The calling sequence of the 2nd argument to this function changed between MPI Standard 1.0 and the revisions 1.1 Here we match the new standard, if you are using an MPI implementation that uses the older version you will get a warning message about the next line; it is only a warning message and should do no harm. */ ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelReduction,&Petsc_Reduction_keyval,0);CHKERRQ(ierr); } ierr = MPI_Attr_get(comm,Petsc_Reduction_keyval,(void **)sr,&flag);CHKERRQ(ierr); if (!flag) { /* doesn't exist yet so create it and put it in */ ierr = PetscSplitReductionCreate(comm,sr);CHKERRQ(ierr); ierr = MPI_Attr_put(comm,Petsc_Reduction_keyval,*sr);CHKERRQ(ierr); ierr = PetscInfo1(0,"Putting reduction data in an MPI_Comm %ld\n",(long)comm);CHKERRQ(ierr); } PetscFunctionReturn(0); }
/* ADIOI_cb_gather_name_array() - gather a list of processor names from all processes * in a communicator and store them on rank 0. * * This is a collective call on the communicator(s) passed in. * * Obtains a rank-ordered list of processor names from the processes in * "dupcomm". * * Returns 0 on success, -1 on failure. * * NOTE: Needs some work to cleanly handle out of memory cases! */ int ADIOI_cb_gather_name_array(MPI_Comm comm, MPI_Comm dupcomm, ADIO_cb_name_array *arrayp) { char my_procname[MPI_MAX_PROCESSOR_NAME], **procname = 0; int *procname_len = NULL, my_procname_len, *disp = NULL, i; int commsize, commrank, found; ADIO_cb_name_array array = NULL; int alloc_size; if (ADIOI_cb_config_list_keyval == MPI_KEYVAL_INVALID) { /* cleaned up by ADIOI_End_call */ MPI_Keyval_create((MPI_Copy_function *) ADIOI_cb_copy_name_array, (MPI_Delete_function *) ADIOI_cb_delete_name_array, &ADIOI_cb_config_list_keyval, NULL); } else { MPI_Attr_get(comm, ADIOI_cb_config_list_keyval, (void *) &array, &found); if (found) { ADIOI_Assert(array != NULL); *arrayp = array; return 0; } } MPI_Comm_size(dupcomm, &commsize); MPI_Comm_rank(dupcomm, &commrank); MPI_Get_processor_name(my_procname, &my_procname_len); /* allocate space for everything */ array = (ADIO_cb_name_array) ADIOI_Malloc(sizeof(*array)); if (array == NULL) { return -1; } array->refct = 2; /* we're going to associate this with two comms */ if (commrank == 0) { /* process 0 keeps the real list */ array->namect = commsize; array->names = (char **) ADIOI_Malloc(sizeof(char *) * commsize); if (array->names == NULL) { return -1; } procname = array->names; /* simpler to read */ procname_len = (int *) ADIOI_Malloc(commsize * sizeof(int)); if (procname_len == NULL) { return -1; } } else { /* everyone else just keeps an empty list as a placeholder */ array->namect = 0; array->names = NULL; } /* gather lengths first */ MPI_Gather(&my_procname_len, 1, MPI_INT, procname_len, 1, MPI_INT, 0, dupcomm); if (commrank == 0) { #ifdef CB_CONFIG_LIST_DEBUG for (i=0; i < commsize; i++) { FPRINTF(stderr, "len[%d] = %d\n", i, procname_len[i]); } #endif alloc_size = 0; for (i=0; i < commsize; i++) { /* add one to the lengths because we need to count the * terminator, and we are going to use this list of lengths * again in the gatherv. */ alloc_size += ++procname_len[i]; } procname[0] = ADIOI_Malloc(alloc_size); if (procname[0] == NULL) { return -1; } for (i=1; i < commsize; i++) { procname[i] = procname[i-1] + procname_len[i-1]; } /* create our list of displacements for the gatherv. we're going * to do everything relative to the start of the region allocated * for procname[0] */ disp = ADIOI_Malloc(commsize * sizeof(int)); disp[0] = 0; for (i=1; i < commsize; i++) { disp[i] = (int) (procname[i] - procname[0]); } } /* now gather strings */ if (commrank == 0) { MPI_Gatherv(my_procname, my_procname_len + 1, MPI_CHAR, procname[0], procname_len, disp, MPI_CHAR, 0, dupcomm); } else { /* if we didn't do this, we would need to allocate procname[] * on all processes...which seems a little silly. */ MPI_Gatherv(my_procname, my_procname_len + 1, MPI_CHAR, NULL, NULL, NULL, MPI_CHAR, 0, dupcomm); } if (commrank == 0) { /* no longer need the displacements or lengths */ ADIOI_Free(disp); ADIOI_Free(procname_len); #ifdef CB_CONFIG_LIST_DEBUG for (i=0; i < commsize; i++) { FPRINTF(stderr, "name[%d] = %s\n", i, procname[i]); } #endif } /* store the attribute; we want to store SOMETHING on all processes * so that they can all tell if we have gone through this procedure * or not for the given communicator. * * specifically we put it on both the original comm, so we can find * it next time an open is performed on this same comm, and on the * dupcomm, so we can use it in I/O operations. */ MPI_Attr_put(comm, ADIOI_cb_config_list_keyval, array); MPI_Attr_put(dupcomm, ADIOI_cb_config_list_keyval, array); *arrayp = array; return 0; }
int main(int argc, char *argv[]) { int errs = 0; int key[3], attrval[3]; int i; MPI_Comm comm; MTest_Init(&argc, &argv); { comm = MPI_COMM_WORLD; /* Create key values */ for (i = 0; i < 3; i++) { MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &key[i], (void *) 0); attrval[i] = 1024 * i; } /* Insert attribute in several orders. Test after put with get, * then delete, then confirm delete with get. */ MPI_Attr_put(comm, key[2], &attrval[2]); MPI_Attr_put(comm, key[1], &attrval[1]); MPI_Attr_put(comm, key[0], &attrval[0]); errs += checkAttrs(comm, 3, key, attrval); MPI_Attr_delete(comm, key[0]); MPI_Attr_delete(comm, key[1]); MPI_Attr_delete(comm, key[2]); errs += checkNoAttrs(comm, 3, key); MPI_Attr_put(comm, key[1], &attrval[1]); MPI_Attr_put(comm, key[2], &attrval[2]); MPI_Attr_put(comm, key[0], &attrval[0]); errs += checkAttrs(comm, 3, key, attrval); MPI_Attr_delete(comm, key[2]); MPI_Attr_delete(comm, key[1]); MPI_Attr_delete(comm, key[0]); errs += checkNoAttrs(comm, 3, key); MPI_Attr_put(comm, key[0], &attrval[0]); MPI_Attr_put(comm, key[1], &attrval[1]); MPI_Attr_put(comm, key[2], &attrval[2]); errs += checkAttrs(comm, 3, key, attrval); MPI_Attr_delete(comm, key[1]); MPI_Attr_delete(comm, key[2]); MPI_Attr_delete(comm, key[0]); errs += checkNoAttrs(comm, 3, key); for (i = 0; i < 3; i++) { MPI_Keyval_free(&key[i]); } } MTest_Finalize(errs); return MTestReturnValue(errs); }
int test_communicators(void) { MPI_Comm dup_comm_world, d2; int world_rank, world_size, key_1; int err, errs = 0; MPI_Aint value; MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); MPI_Comm_size(MPI_COMM_WORLD, &world_size); #ifdef DEBUG if (world_rank == 0) { printf("*** Attribute copy/delete return codes ***\n"); } #endif MPI_Comm_dup(MPI_COMM_WORLD, &dup_comm_world); MPI_Barrier(dup_comm_world); MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_RETURN); value = -11; if ((err = MPI_Keyval_create(copybomb_fn, deletebomb_fn, &key_1, &value))) abort_msg("Keyval_create", err); err = MPI_Attr_put(dup_comm_world, key_1, (void *) (MPI_Aint) world_rank); if (err) { errs++; printf("Error with first put\n"); } err = MPI_Attr_put(dup_comm_world, key_1, (void *) (MPI_Aint) (2 * world_rank)); if (err == MPI_SUCCESS) { errs++; printf("delete function return code was MPI_SUCCESS in put\n"); } /* Because the attribute delete function should fail, the attribute * should *not be removed* */ err = MPI_Attr_delete(dup_comm_world, key_1); if (err == MPI_SUCCESS) { errs++; printf("delete function return code was MPI_SUCCESS in delete\n"); } err = MPI_Comm_dup(dup_comm_world, &d2); if (err == MPI_SUCCESS) { errs++; printf("copy function return code was MPI_SUCCESS in dup\n"); } #ifndef USE_STRICT_MPI /* Another interpretation is to leave d2 unchanged on error */ if (err && d2 != MPI_COMM_NULL) { errs++; printf("dup did not return MPI_COMM_NULL on error\n"); } #endif delete_flag = 1; MPI_Comm_free(&dup_comm_world); MPI_Keyval_free(&key_1); return errs; }
/*@C PetscSharedWorkingDirectory - Determines if all processors in a communicator share a working directory or have different ones. Collective on MPI_Comm Input Parameters: . comm - MPI_Communicator that may share working directory Output Parameters: . shared - PETSC_TRUE or PETSC_FALSE Options Database Keys: + -shared_working_directory . -not_shared_working_directory Environmental Variables: + PETSC_SHARED_WORKING_DIRECTORY . PETSC_NOT_SHARED_WORKING_DIRECTORY Level: developer Notes: Stores the status as a MPI attribute so it does not have to be redetermined each time. Assumes that all processors in a communicator either 1) have a common working directory or 2) each has a separate working directory eventually we can write a fancier one that determines which processors share a common working directory. This will be very slow on runs with a large number of processors since it requires O(p*p) file opens. @*/ PetscErrorCode PetscSharedWorkingDirectory(MPI_Comm comm,PetscBool *shared) { PetscErrorCode ierr; PetscMPIInt size,rank,*tagvalp,sum,cnt,i; PetscBool flg,iflg; FILE *fd; static PetscMPIInt Petsc_WD_keyval = MPI_KEYVAL_INVALID; int err; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size == 1) { *shared = PETSC_TRUE; PetscFunctionReturn(0); } ierr = PetscOptionsGetenv(comm,"PETSC_SHARED_WORKING_DIRECTORY",NULL,0,&flg);CHKERRQ(ierr); if (flg) { *shared = PETSC_TRUE; PetscFunctionReturn(0); } ierr = PetscOptionsGetenv(comm,"PETSC_NOT_SHARED_WORKING_DIRECTORY",NULL,0,&flg);CHKERRQ(ierr); if (flg) { *shared = PETSC_FALSE; PetscFunctionReturn(0); } if (Petsc_WD_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelTmpShared,&Petsc_WD_keyval,0);CHKERRQ(ierr); } ierr = MPI_Attr_get(comm,Petsc_WD_keyval,(void**)&tagvalp,(int*)&iflg);CHKERRQ(ierr); if (!iflg) { char filename[PETSC_MAX_PATH_LEN]; /* This communicator does not yet have a shared attribute */ ierr = PetscMalloc1(1,&tagvalp);CHKERRQ(ierr); ierr = MPI_Attr_put(comm,Petsc_WD_keyval,tagvalp);CHKERRQ(ierr); ierr = PetscGetWorkingDirectory(filename,240);CHKERRQ(ierr); ierr = PetscStrcat(filename,"/petsctestshared");CHKERRQ(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); /* each processor creates a file and all the later ones check */ /* this makes sure no subset of processors is shared */ *shared = PETSC_FALSE; for (i=0; i<size-1; i++) { if (rank == i) { fd = fopen(filename,"w"); if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Unable to open test file %s",filename); err = fclose(fd); if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file"); } ierr = MPI_Barrier(comm);CHKERRQ(ierr); if (rank >= i) { fd = fopen(filename,"r"); if (fd) cnt = 1; else cnt = 0; if (fd) { err = fclose(fd); if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file"); } } else cnt = 0; ierr = MPIU_Allreduce(&cnt,&sum,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr); if (rank == i) unlink(filename); if (sum == size) { *shared = PETSC_TRUE; break; } else if (sum != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Subset of processes share working directory"); } *tagvalp = (int)*shared; } else *shared = (PetscBool) *tagvalp; ierr = PetscInfo1(0,"processors %s working directory\n",(*shared) ? "shared" : "do NOT share");CHKERRQ(ierr); PetscFunctionReturn(0); }
/*@C PetscViewerASCIIOpen - Opens an ASCII file as a PetscViewer. Collective on MPI_Comm Input Parameters: + comm - the communicator - name - the file name Output Parameter: . lab - the PetscViewer to use with the specified file Level: beginner Notes: This PetscViewer can be destroyed with PetscViewerDestroy(). The MPI communicator used here must match that used by the object one is viewing. For example if the Mat was created with a PETSC_COMM_WORLD, then the Viewer must be created with PETSC_COMM_WORLD As shown below, PetscViewerASCIIOpen() is useful in conjunction with MatView() and VecView() .vb PetscViewerASCIIOpen(PETSC_COMM_WORLD,"mat.output",&viewer); MatView(matrix,viewer); .ve Concepts: PetscViewerASCII^creating Concepts: printf Concepts: printing Concepts: accessing remote file Concepts: remote file .seealso: MatView(), VecView(), PetscViewerDestroy(), PetscViewerBinaryOpen(), PetscViewerASCIIGetPointer(), PetscViewerPushFormat(), PETSC_VIEWER_STDOUT_, PETSC_VIEWER_STDERR_, PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_STDOUT_SELF, @*/ PetscErrorCode PetscViewerASCIIOpen(MPI_Comm comm,const char name[],PetscViewer *lab) { PetscErrorCode ierr; PetscViewerLink *vlink,*nv; PetscBool flg,eq; size_t len; PetscFunctionBegin; ierr = PetscStrlen(name,&len);CHKERRQ(ierr); if (!len) { ierr = PetscViewerASCIIGetStdout(comm,lab);CHKERRQ(ierr); ierr = PetscObjectReference((PetscObject)*lab);CHKERRQ(ierr); PetscFunctionReturn(0); } ierr = PetscSpinlockLock(&PetscViewerASCIISpinLockOpen);CHKERRQ(ierr); if (Petsc_Viewer_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelViewer,&Petsc_Viewer_keyval,(void*)0);CHKERRQ(ierr); } /* It would be better to move this code to PetscFileSetName() but since it must return a preexiting communicator we cannot do that, since PetscFileSetName() takes a communicator that already exists. Plus if the original communicator that created the file has since been close this will not detect the old communictor and hence will overwrite the old data. It may be better to simply remove all this code */ /* make sure communicator is a PETSc communicator */ ierr = PetscCommDuplicate(comm,&comm,NULL);CHKERRQ(ierr); /* has file already been opened into a viewer */ ierr = MPI_Attr_get(comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr); if (flg) { while (vlink) { ierr = PetscStrcmp(name,((PetscViewer_ASCII*)(vlink->viewer->data))->filename,&eq);CHKERRQ(ierr); if (eq) { ierr = PetscObjectReference((PetscObject)vlink->viewer);CHKERRQ(ierr); *lab = vlink->viewer; ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); ierr = PetscSpinlockUnlock(&PetscViewerASCIISpinLockOpen);CHKERRQ(ierr); PetscFunctionReturn(0); } vlink = vlink->next; } } ierr = PetscViewerCreate(comm,lab);CHKERRQ(ierr); ierr = PetscViewerSetType(*lab,PETSCVIEWERASCII);CHKERRQ(ierr); if (name) { ierr = PetscViewerFileSetName(*lab,name);CHKERRQ(ierr); } /* save viewer into communicator if needed later */ ierr = PetscNew(&nv);CHKERRQ(ierr); nv->viewer = *lab; if (!flg) { ierr = MPI_Attr_put(comm,Petsc_Viewer_keyval,nv);CHKERRQ(ierr); } else { ierr = MPI_Attr_get(comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr); if (vlink) { while (vlink->next) vlink = vlink->next; vlink->next = nv; } else { ierr = MPI_Attr_put(comm,Petsc_Viewer_keyval,nv);CHKERRQ(ierr); } } ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); ierr = PetscSpinlockUnlock(&PetscViewerASCIISpinLockOpen);CHKERRQ(ierr); PetscFunctionReturn(0); }
int main(int argc, char *argv[]) { int key[3]; int val[3] = { 1, 2, 3 }; int flag; int *out; MPI_Init(&argc, &argv); MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &key[0], (void *)0); MPI_Keyval_create(MPI_NULL_COPY_FN, delete_attr, &key[1], (void *)0); /* TODO: nonempty COPY_FN MPI_Comm_create_keyval(MPI_NULL_COPY_FN, delete_attr, &key[2], (void *)0); */ MPI_Attr_get(MPI_COMM_WORLD, key[0], NULL, &flag); if (flag) { return 1; } MPI_Attr_get(MPI_COMM_WORLD, key[1], NULL, &flag); if (flag) { return 1; } MPI_Attr_put(MPI_COMM_WORLD, key[0], &val[0]); MPI_Attr_put(MPI_COMM_WORLD, key[1], &val[1]); MPI_Attr_get(MPI_COMM_SELF, key[1], NULL, &flag); if (flag) { return 1; } MPI_Attr_put(MPI_COMM_SELF, key[1], &val[2]); MPI_Attr_get(MPI_COMM_SELF, key[1], &out, &flag); if (!flag || *out != 3) { return 1; } MPI_Attr_get(MPI_COMM_WORLD, key[1], &out, &flag); if (!flag || *out != 2) { return 1; } MPI_Attr_get(MPI_COMM_WORLD, key[0], &out, &flag); if (!flag || *out != 1) { return 1; } MPI_Attr_put(MPI_COMM_WORLD, key[1], &val[0]); MPI_Attr_get(MPI_COMM_WORLD, key[1], &out, &flag); if (!flag || *out != 1) { return 1; } MPI_Attr_get(MPI_COMM_SELF, key[1], &out, &flag); if (!flag || *out != 3) { return 1; } MPI_Attr_delete(MPI_COMM_WORLD, key[0]); MPI_Attr_delete(MPI_COMM_WORLD, key[1]); MPI_Attr_delete(MPI_COMM_SELF, key[1]); MPI_Keyval_free(&key[0]); MPI_Keyval_free(&key[1]); if (key[0] != MPI_KEYVAL_INVALID || key[1] != MPI_KEYVAL_INVALID) { return 2; } return 0; }
/*@ MPI_File_open - Opens a file Input Parameters: . comm - communicator (handle) . filename - name of file to open (string) . amode - file access mode (integer) . info - info object (handle) Output Parameters: . fh - file handle (handle) .N fortran @*/ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) { int error_code, file_system, flag, tmp_amode, rank, orig_amode; #ifndef PRINT_ERR_MSG static char myname[] = "MPI_FILE_OPEN"; #endif int err, min_code; char *tmp; MPI_Comm dupcomm, dupcommself; #ifdef MPI_hpux int fl_xmpi; HPMP_IO_OPEN_START(fl_xmpi, comm); #endif /* MPI_hpux */ error_code = MPI_SUCCESS; if (comm == MPI_COMM_NULL) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: Invalid communicator\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_COMM, MPIR_ERR_COMM_NULL, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } MPI_Comm_test_inter(comm, &flag); if (flag) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: Intercommunicator cannot be passed to MPI_File_open\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_COMM, MPIR_ERR_COMM_INTER, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } if ( ((amode&MPI_MODE_RDONLY)?1:0) + ((amode&MPI_MODE_RDWR)?1:0) + ((amode&MPI_MODE_WRONLY)?1:0) != 1 ) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: Exactly one of MPI_MODE_RDONLY, MPI_MODE_WRONLY, or MPI_MODE_RDWR must be specified\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_AMODE, 3, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } if ((amode & MPI_MODE_RDONLY) && ((amode & MPI_MODE_CREATE) || (amode & MPI_MODE_EXCL))) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: It is erroneous to specify MPI_MODE_CREATE or MPI_MODE_EXCL with MPI_MODE_RDONLY\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_AMODE, 5, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } if ((amode & MPI_MODE_RDWR) && (amode & MPI_MODE_SEQUENTIAL)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: It is erroneous to specify MPI_MODE_SEQUENTIAL with MPI_MODE_RDWR\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_AMODE, 7, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } /* check if amode is the same on all processes */ MPI_Comm_dup(comm, &dupcomm); tmp_amode = amode; /* Removed this check because broadcast is too expensive. MPI_Bcast(&tmp_amode, 1, MPI_INT, 0, dupcomm); if (amode != tmp_amode) { FPRINTF(stderr, "MPI_File_open: amode must be the same on all processes\n"); MPI_Abort(MPI_COMM_WORLD, 1); } */ /* check if ADIO has been initialized. If not, initialize it */ if (ADIO_Init_keyval == MPI_KEYVAL_INVALID) { /* check if MPI itself has been initialized. If not, flag an error. Can't initialize it here, because don't know argc, argv */ MPI_Initialized(&flag); if (!flag) { FPRINTF(stderr, "Error: MPI_Init() must be called before using MPI-IO\n"); MPI_Abort(MPI_COMM_WORLD, 1); } MPI_Keyval_create(MPI_NULL_COPY_FN, ADIOI_End_call, &ADIO_Init_keyval, (void *) 0); /* put a dummy attribute on MPI_COMM_WORLD, because we want the delete function to be called when MPI_COMM_WORLD is freed. Hopefully the MPI library frees MPI_COMM_WORLD when MPI_Finalize is called, though the standard does not mandate this. */ MPI_Attr_put(MPI_COMM_WORLD, ADIO_Init_keyval, (void *) 0); /* initialize ADIO */ ADIO_Init( (int *)0, (char ***)0, &error_code); } file_system = -1; tmp = strchr(filename, ':'); #ifdef WIN32 // Unfortunately Windows uses ':' behind the drive letter. // So we check if there is only one letter before the ':' // Please do not use a single letter filesystem name! if(tmp && ((tmp-filename) == 1)) tmp = 0; #endif if (!tmp) { ADIO_FileSysType(filename, &file_system, &err); if (err != MPI_SUCCESS) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: Can't determine the file-system type. Check the filename/path you provided and try again. Otherwise, prefix the filename with a string to indicate the type of file sytem (piofs:, pfs:, nfs:, ufs:, hfs:, xfs:, sfs:, pvfs:, svm:).\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_FSTYPE, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } MPI_Allreduce(&file_system, &min_code, 1, MPI_INT, MPI_MIN, dupcomm); if (min_code == ADIO_NFS) file_system = ADIO_NFS; } #ifndef PFS if (!strncmp(filename, "pfs:", 4) || !strncmp(filename, "PFS:", 4) || (file_system == ADIO_PFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the PFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_PFS, myname, (char *) 0, (char *) 0,"PFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef PIOFS if (!strncmp(filename, "piofs:", 6) || !strncmp(filename, "PIOFS:", 6) || (file_system == ADIO_PIOFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the PIOFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_PIOFS, myname, (char *) 0, (char *) 0,"PIOFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef UFS if (!strncmp(filename, "ufs:", 4) || !strncmp(filename, "UFS:", 4) || (file_system == ADIO_UFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the UFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_UFS, myname, (char *) 0, (char *) 0,"UFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef NFS if (!strncmp(filename, "nfs:", 4) || !strncmp(filename, "NFS:", 4) || (file_system == ADIO_NFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the NFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_NFS, myname, (char *) 0, (char *) 0,"NFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef HFS if (!strncmp(filename, "hfs:", 4) || !strncmp(filename, "HFS:", 4) || (file_system == ADIO_HFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the HFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_HFS, myname, (char *) 0, (char *) 0,"HFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef XFS if (!strncmp(filename, "xfs:", 4) || !strncmp(filename, "XFS:", 4) || (file_system == ADIO_XFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the XFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_XFS, myname, (char *) 0, (char *) 0,"XFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef SFS if (!strncmp(filename, "sfs:", 4) || !strncmp(filename, "SFS:", 4) || (file_system == ADIO_SFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the SFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_SFS, myname, (char *) 0, (char *) 0,"SFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef PVFS if (!strncmp(filename, "pvfs:", 5) || !strncmp(filename, "PVFS:", 5) || (file_system == ADIO_PVFS)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the PVFS file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_PVFS, myname, (char *) 0, (char *) 0,"PVFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef SVM if (!strncmp(filename, "svm:", 4) || !strncmp(filename, "SVM:", 4) || (file_system == ADIO_SVM)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the SVM file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_SVM, myname, (char *) 0, (char *) 0,"SVM"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif #ifndef NTFS if (!strncmp(filename, "svm:", 4) || !strncmp(filename, "NTFS:", 4) || (file_system == ADIO_SVM)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: ROMIO has not been configured to use the SVM file system\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ERR_NO_NTFS, myname, (char *) 0, (char *) 0,"NTFS"); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } #endif if (!strncmp(filename, "pfs:", 4) || !strncmp(filename, "PFS:", 4)) { file_system = ADIO_PFS; filename += 4; } else if (!strncmp(filename, "piofs:", 6) || !strncmp(filename, "PIOFS:", 6)) { file_system = ADIO_PIOFS; filename += 6; } else if (!strncmp(filename, "ufs:", 4) || !strncmp(filename, "UFS:", 4)) { file_system = ADIO_UFS; filename += 4; } else if (!strncmp(filename, "nfs:", 4) || !strncmp(filename, "NFS:", 4)) { file_system = ADIO_NFS; filename += 4; } else if (!strncmp(filename, "hfs:", 4) || !strncmp(filename, "HFS:", 4)) { file_system = ADIO_HFS; filename += 4; } else if (!strncmp(filename, "xfs:", 4) || !strncmp(filename, "XFS:", 4)) { file_system = ADIO_XFS; filename += 4; } else if (!strncmp(filename, "sfs:", 4) || !strncmp(filename, "SFS:", 4)) { file_system = ADIO_SFS; filename += 4; } else if (!strncmp(filename, "pvfs:", 5) || !strncmp(filename, "PVFS:", 5)) { file_system = ADIO_PVFS; filename += 5; } else if (!strncmp(filename, "svm:", 4) || !strncmp(filename, "SVM:", 4)) { file_system = ADIO_SVM; filename += 4; } else if (!strncmp(filename, "ntfs:", 4) || !strncmp(filename, "NTFS:", 4)) { file_system = ADIO_NTFS; filename += 5; } if (((file_system == ADIO_PIOFS) || (file_system == ADIO_PVFS)) && (amode & MPI_MODE_SEQUENTIAL)) { #ifdef PRINT_ERR_MSG FPRINTF(stderr, "MPI_File_open: MPI_MODE_SEQUENTIAL not supported on PIOFS and PVFS\n"); MPI_Abort(MPI_COMM_WORLD, 1); #else error_code = MPIR_Err_setmsg(MPI_ERR_UNSUPPORTED_OPERATION, MPIR_ERR_NO_MODE_SEQ, myname, (char *) 0, (char *) 0); return ADIOI_Error(MPI_FILE_NULL, error_code, myname); #endif } orig_amode = amode; MPI_Comm_rank(dupcomm, &rank); if ((amode & MPI_MODE_CREATE) && (amode & MPI_MODE_EXCL)) { /* the open should fail if the file exists. Only process 0 should check this. Otherwise, if all processes try to check and the file does not exist, one process will create the file and others who reach later will return error. */ if (!rank) { MPI_Comm_dup(MPI_COMM_SELF, &dupcommself); /* this dup is freed either in ADIO_Open if the open fails, or in ADIO_Close */ *fh = ADIO_Open(dupcommself, filename, file_system, amode, 0, MPI_BYTE, MPI_BYTE, M_ASYNC, info, ADIO_PERM_NULL, &error_code); /* broadcast the error code to other processes */ MPI_Bcast(&error_code, 1, MPI_INT, 0, dupcomm); /* if no error, close the file. It will be reopened normally below. */ if (error_code == MPI_SUCCESS) ADIO_Close(*fh, &error_code); } else MPI_Bcast(&error_code, 1, MPI_INT, 0, dupcomm); if (error_code != MPI_SUCCESS) { MPI_Comm_free(&dupcomm); *fh = MPI_FILE_NULL; #ifdef MPI_hpux HPMP_IO_OPEN_END(fl_xmpi, *fh, comm); #endif /* MPI_hpux */ return error_code; } else amode = amode ^ MPI_MODE_EXCL; /* turn off MPI_MODE_EXCL */ } /* use default values for disp, etype, filetype */ /* set iomode=M_ASYNC. It is used to implement the Intel PFS interface on top of ADIO. Not relevant for MPI-IO implementation */ *fh = ADIO_Open(dupcomm, filename, file_system, amode, 0, MPI_BYTE, MPI_BYTE, M_ASYNC, info, ADIO_PERM_NULL, &error_code); /* if MPI_MODE_EXCL was removed, add it back */ if ((error_code == MPI_SUCCESS) && (amode != orig_amode)) (*fh)->access_mode = orig_amode; /* determine name of file that will hold the shared file pointer */ /* can't support shared file pointers on a file system that doesn't support file locking, e.g., PIOFS, PVFS */ if ((error_code == MPI_SUCCESS) && ((*fh)->file_system != ADIO_PIOFS) && ((*fh)->file_system != ADIO_PVFS)) { ADIOI_Shfp_fname(*fh, rank); /* if MPI_MODE_APPEND, set the shared file pointer to end of file. indiv. file pointer already set to end of file in ADIO_Open. Here file view is just bytes. */ if ((*fh)->access_mode & MPI_MODE_APPEND) { if (!rank) ADIO_Set_shared_fp(*fh, (*fh)->fp_ind, &error_code); MPI_Barrier(dupcomm); } } #ifdef MPI_hpux HPMP_IO_OPEN_END(fl_xmpi, *fh, comm); #endif /* MPI_hpux */ return error_code; }
void declareBindings (void) { /* === Point-to-point === */ void* buf; int count; MPI_Datatype datatype; int dest; int tag; MPI_Comm comm; MPI_Send (buf, count, datatype, dest, tag, comm); // L12 int source; MPI_Status status; MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15 MPI_Get_count (&status, datatype, &count); MPI_Bsend (buf, count, datatype, dest, tag, comm); MPI_Ssend (buf, count, datatype, dest, tag, comm); MPI_Rsend (buf, count, datatype, dest, tag, comm); void* buffer; int size; MPI_Buffer_attach (buffer, size); // L22 MPI_Buffer_detach (buffer, &size); MPI_Request request; MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25 MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request); MPI_Issend (buf, count, datatype, dest, tag, comm, &request); MPI_Irsend (buf, count, datatype, dest, tag, comm, &request); MPI_Irecv (buf, count, datatype, source, tag, comm, &request); MPI_Wait (&request, &status); int flag; MPI_Test (&request, &flag, &status); // L32 MPI_Request_free (&request); MPI_Request* array_of_requests; int index; MPI_Waitany (count, array_of_requests, &index, &status); // L36 MPI_Testany (count, array_of_requests, &index, &flag, &status); MPI_Status* array_of_statuses; MPI_Waitall (count, array_of_requests, array_of_statuses); // L39 MPI_Testall (count, array_of_requests, &flag, array_of_statuses); int incount; int outcount; int* array_of_indices; MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L44--45 MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L46--47 MPI_Iprobe (source, tag, comm, &flag, &status); // L48 MPI_Probe (source, tag, comm, &status); MPI_Cancel (&request); MPI_Test_cancelled (&status, &flag); MPI_Send_init (buf, count, datatype, dest, tag, comm, &request); MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Recv_init (buf, count, datatype, source, tag, comm, &request); MPI_Start (&request); MPI_Startall (count, array_of_requests); void* sendbuf; int sendcount; MPI_Datatype sendtype; int sendtag; void* recvbuf; int recvcount; MPI_Datatype recvtype; MPI_Datatype recvtag; MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, &status); // L67--69 MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag, comm, &status); // L70--71 MPI_Datatype oldtype; MPI_Datatype newtype; MPI_Type_contiguous (count, oldtype, &newtype); // L74 int blocklength; { int stride; MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78 } { MPI_Aint stride; MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82 } int* array_of_blocklengths; { int* array_of_displacements; MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L87--88 } { MPI_Aint* array_of_displacements; MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L92--93 MPI_Datatype* array_of_types; MPI_Type_struct (count, array_of_blocklengths, array_of_displacements, array_of_types, &newtype); // L95--96 } void* location; MPI_Aint address; MPI_Address (location, &address); // L100 MPI_Aint extent; MPI_Type_extent (datatype, &extent); // L102 MPI_Type_size (datatype, &size); MPI_Aint displacement; MPI_Type_lb (datatype, &displacement); // L105 MPI_Type_ub (datatype, &displacement); MPI_Type_commit (&datatype); MPI_Type_free (&datatype); MPI_Get_elements (&status, datatype, &count); void* inbuf; void* outbuf; int outsize; int position; MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114 int insize; MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype, comm); // L116--117 MPI_Pack_size (incount, datatype, comm, &size); /* === Collectives === */ MPI_Barrier (comm); // L121 int root; MPI_Bcast (buffer, count, datatype, root, comm); // L123 MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L124--125 int* recvcounts; int* displs; MPI_Gatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm); // L128--130 MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L131--132 int* sendcounts; MPI_Scatterv (sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); // L134--135 MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L136--137 MPI_Allgatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); // L138--140 MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L141--142 int* sdispls; int* rdispls; MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm); // L145--147 MPI_Op op; MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149 #if 0 MPI_User_function function; int commute; MPI_Op_create (function, commute, &op); // L153 #endif MPI_Op_free (&op); // L155 MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm); MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm); MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm); /* === Groups, contexts, and communicators === */ MPI_Group group; MPI_Group_size (group, &size); // L162 int rank; MPI_Group_rank (group, &rank); // L164 MPI_Group group1; int n; int* ranks1; MPI_Group group2; int* ranks2; MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170 int result; MPI_Group_compare (group1, group2, &result); // L172 MPI_Group newgroup; MPI_Group_union (group1, group2, &newgroup); // L174 MPI_Group_intersection (group1, group2, &newgroup); MPI_Group_difference (group1, group2, &newgroup); int* ranks; MPI_Group_incl (group, n, ranks, &newgroup); // L178 MPI_Group_excl (group, n, ranks, &newgroup); extern int ranges[][3]; MPI_Group_range_incl (group, n, ranges, &newgroup); // L181 MPI_Group_range_excl (group, n, ranges, &newgroup); MPI_Group_free (&group); MPI_Comm_size (comm, &size); MPI_Comm_rank (comm, &rank); MPI_Comm comm1; MPI_Comm comm2; MPI_Comm_compare (comm1, comm2, &result); MPI_Comm newcomm; MPI_Comm_dup (comm, &newcomm); MPI_Comm_create (comm, group, &newcomm); int color; int key; MPI_Comm_split (comm, color, key, &newcomm); // L194 MPI_Comm_free (&comm); MPI_Comm_test_inter (comm, &flag); MPI_Comm_remote_size (comm, &size); MPI_Comm_remote_group (comm, &group); MPI_Comm local_comm; int local_leader; MPI_Comm peer_comm; int remote_leader; MPI_Comm newintercomm; MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag, &newintercomm); // L204--205 MPI_Comm intercomm; MPI_Comm newintracomm; int high; MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209 int keyval; #if 0 MPI_Copy_function copy_fn; MPI_Delete_function delete_fn; void* extra_state; MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215 #endif MPI_Keyval_free (&keyval); // L217 void* attribute_val; MPI_Attr_put (comm, keyval, attribute_val); // L219 MPI_Attr_get (comm, keyval, attribute_val, &flag); MPI_Attr_delete (comm, keyval); /* === Environmental inquiry === */ char* name; int resultlen; MPI_Get_processor_name (name, &resultlen); // L226 MPI_Errhandler errhandler; #if 0 MPI_Handler_function function; MPI_Errhandler_create (function, &errhandler); // L230 #endif MPI_Errhandler_set (comm, errhandler); // L232 MPI_Errhandler_get (comm, &errhandler); MPI_Errhandler_free (&errhandler); int errorcode; char* string; MPI_Error_string (errorcode, string, &resultlen); // L237 int errorclass; MPI_Error_class (errorcode, &errorclass); // L239 MPI_Wtime (); MPI_Wtick (); int argc; char** argv; MPI_Init (&argc, &argv); // L244 MPI_Finalize (); MPI_Initialized (&flag); MPI_Abort (comm, errorcode); }
int test_communicators( void ) { MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm, split_comm, world_comm; MPI_Group world_group, lo_group, rev_group; void *vvalue; int ranges[1][3]; int flag, world_rank, world_size, rank, size, n, key_1, key_3; int color, key, result; int errs = 0; MPI_Aint value; MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); MPI_Comm_size( MPI_COMM_WORLD, &world_size ); #ifdef DEBUG if (world_rank == 0) { printf( "*** Communicators ***\n" ); fflush(stdout); } #endif MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world ); /* Exercise Comm_create by creating an equivalent to dup_comm_world (sans attributes) and a half-world communicator. */ #ifdef DEBUG if (world_rank == 0) { printf( " Comm_create\n" ); fflush(stdout); } #endif MPI_Comm_group( dup_comm_world, &world_group ); MPI_Comm_create( dup_comm_world, world_group, &world_comm ); MPI_Comm_rank( world_comm, &rank ); if (rank != world_rank) { errs++; printf( "incorrect rank in world comm: %d\n", rank ); MPI_Abort(MPI_COMM_WORLD, 3001 ); } n = world_size / 2; ranges[0][0] = 0; ranges[0][1] = (world_size - n) - 1; ranges[0][2] = 1; #ifdef DEBUG printf( "world rank = %d before range incl\n", world_rank );FFLUSH; #endif MPI_Group_range_incl(world_group, 1, ranges, &lo_group ); #ifdef DEBUG printf( "world rank = %d after range incl\n", world_rank );FFLUSH; #endif MPI_Comm_create(world_comm, lo_group, &lo_comm ); #ifdef DEBUG printf( "world rank = %d before group free\n", world_rank );FFLUSH; #endif MPI_Group_free( &lo_group ); #ifdef DEBUG printf( "world rank = %d after group free\n", world_rank );FFLUSH; #endif if (world_rank < (world_size - n)) { MPI_Comm_rank(lo_comm, &rank ); if (rank == MPI_UNDEFINED) { errs++; printf( "incorrect lo group rank: %d\n", rank ); fflush(stdout); MPI_Abort(MPI_COMM_WORLD, 3002 ); } else { /* printf( "lo in\n" );FFLUSH; */ MPI_Barrier(lo_comm ); /* printf( "lo out\n" );FFLUSH; */ } } else { if (lo_comm != MPI_COMM_NULL) { errs++; printf( "incorrect lo comm:\n" ); fflush(stdout); MPI_Abort(MPI_COMM_WORLD, 3003 ); } } #ifdef DEBUG printf( "worldrank = %d\n", world_rank );FFLUSH; #endif MPI_Barrier(world_comm); #ifdef DEBUG printf( "bar!\n" );FFLUSH; #endif /* Check Comm_dup by adding attributes to lo_comm & duplicating */ #ifdef DEBUG if (world_rank == 0) { printf( " Comm_dup\n" ); fflush(stdout); } #endif if (lo_comm != MPI_COMM_NULL) { value = 9; MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value ); value = 8; value = 7; MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &key_3, &value ); /* This may generate a compilation warning; it is, however, an easy way to cache a value instead of a pointer */ /* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */ MPI_Attr_put(lo_comm, key_1, (void *) (MPI_Aint) world_rank ); MPI_Attr_put(lo_comm, key_3, (void *)0 ); MPI_Comm_dup(lo_comm, &dup_comm ); /* Note that if sizeof(int) < sizeof(void *), we can't use (void **)&value to get the value we passed into Attr_put. To avoid problems (e.g., alignment errors), we recover the value into a (void *) and cast to int. Note that this may generate warning messages from the compiler. */ MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag ); value = (MPI_Aint)vvalue; if (! flag) { errs++; printf( "dup_comm key_1 not found on %d\n", world_rank ); fflush( stdout ); MPI_Abort(MPI_COMM_WORLD, 3004 ); } if (value != world_rank) { errs++; printf( "dup_comm key_1 value incorrect: %ld, expected %d\n", (long)value, world_rank ); fflush( stdout ); MPI_Abort(MPI_COMM_WORLD, 3005 ); } MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag ); value = (MPI_Aint)vvalue; if (flag) { errs++; printf( "dup_comm key_3 found!\n" ); fflush( stdout ); MPI_Abort(MPI_COMM_WORLD, 3008 ); } MPI_Keyval_free(&key_1 ); MPI_Keyval_free(&key_3 ); } /* Split the world into even & odd communicators with reversed ranks. */ #ifdef DEBUG if (world_rank == 0) { printf( " Comm_split\n" ); fflush(stdout); } #endif color = world_rank % 2; key = world_size - world_rank; MPI_Comm_split(dup_comm_world, color, key, &split_comm ); MPI_Comm_size(split_comm, &size ); MPI_Comm_rank(split_comm, &rank ); if (rank != ((size - world_rank/2) - 1)) { errs++; printf( "incorrect split rank: %d\n", rank ); fflush(stdout); MPI_Abort(MPI_COMM_WORLD, 3009 ); } MPI_Barrier(split_comm ); /* Test each possible Comm_compare result */ #ifdef DEBUG if (world_rank == 0) { printf( " Comm_compare\n" ); fflush(stdout); } #endif MPI_Comm_compare(world_comm, world_comm, &result ); if (result != MPI_IDENT) { errs++; printf( "incorrect ident result: %d\n", result ); MPI_Abort(MPI_COMM_WORLD, 3010 ); } if (lo_comm != MPI_COMM_NULL) { MPI_Comm_compare(lo_comm, dup_comm, &result ); if (result != MPI_CONGRUENT) { errs++; printf( "incorrect congruent result: %d\n", result ); MPI_Abort(MPI_COMM_WORLD, 3011 ); } } ranges[0][0] = world_size - 1; ranges[0][1] = 0; ranges[0][2] = -1; MPI_Group_range_incl(world_group, 1, ranges, &rev_group ); MPI_Comm_create(world_comm, rev_group, &rev_comm ); MPI_Comm_compare(world_comm, rev_comm, &result ); if (result != MPI_SIMILAR && world_size != 1) { errs++; printf( "incorrect similar result: %d\n", result ); MPI_Abort(MPI_COMM_WORLD, 3012 ); } if (lo_comm != MPI_COMM_NULL) { MPI_Comm_compare(world_comm, lo_comm, &result ); if (result != MPI_UNEQUAL && world_size != 1) { errs++; printf( "incorrect unequal result: %d\n", result ); MPI_Abort(MPI_COMM_WORLD, 3013 ); } } /* Free all communicators created */ #ifdef DEBUG if (world_rank == 0) printf( " Comm_free\n" ); #endif MPI_Comm_free( &world_comm ); MPI_Comm_free( &dup_comm_world ); MPI_Comm_free( &rev_comm ); MPI_Comm_free( &split_comm ); MPI_Group_free( &world_group ); MPI_Group_free( &rev_group ); if (lo_comm != MPI_COMM_NULL) { MPI_Comm_free( &lo_comm ); MPI_Comm_free( &dup_comm ); } return errs; }
int test_communicators( void ) { MPI_Comm dup_comm, comm; void *vvalue; int flag, world_rank, world_size, key_1, key_3; int errs = 0; MPI_Aint value; int isLeft; MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); MPI_Comm_size( MPI_COMM_WORLD, &world_size ); #ifdef DEBUG if (world_rank == 0) { printf( "*** Communicators ***\n" ); fflush(stdout); } #endif while (MTestGetIntercomm( &comm, &isLeft, 2 )) { MTestPrintfMsg(1, "start while loop, isLeft=%s\n", (isLeft ? "TRUE" : "FALSE")); if (comm == MPI_COMM_NULL) { MTestPrintfMsg(1, "got COMM_NULL, skipping\n"); continue; } /* Check Comm_dup by adding attributes to comm & duplicating */ value = 9; MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value ); MTestPrintfMsg(1, "Keyval_create key=%#x value=%d\n", key_1, value); value = 7; MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, &key_3, &value ); MTestPrintfMsg(1, "Keyval_create key=%#x value=%d\n", key_3, value); /* This may generate a compilation warning; it is, however, an easy way to cache a value instead of a pointer */ /* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */ MPI_Attr_put(comm, key_1, (void *) (MPI_Aint) world_rank ); MPI_Attr_put(comm, key_3, (void *)0 ); MTestPrintfMsg(1, "Comm_dup\n" ); MPI_Comm_dup(comm, &dup_comm ); /* Note that if sizeof(int) < sizeof(void *), we can't use (void **)&value to get the value we passed into Attr_put. To avoid problems (e.g., alignment errors), we recover the value into a (void *) and cast to int. Note that this may generate warning messages from the compiler. */ MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag ); value = (MPI_Aint)vvalue; if (! flag) { errs++; printf( "dup_comm key_1 not found on %d\n", world_rank ); fflush( stdout ); MPI_Abort(MPI_COMM_WORLD, 3004 ); } if (value != world_rank) { errs++; printf( "dup_comm key_1 value incorrect: %ld\n", (long)value ); fflush( stdout ); MPI_Abort(MPI_COMM_WORLD, 3005 ); } MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag ); value = (MPI_Aint)vvalue; if (flag) { errs++; printf( "dup_comm key_3 found!\n" ); fflush( stdout ); MPI_Abort(MPI_COMM_WORLD, 3008 ); } MTestPrintfMsg(1, "Keyval_free key=%#x\n", key_1); MPI_Keyval_free(&key_1 ); MTestPrintfMsg(1, "Keyval_free key=%#x\n", key_3); MPI_Keyval_free(&key_3 ); /* Free all communicators created */ MTestPrintfMsg(1, "Comm_free comm\n"); MPI_Comm_free( &comm ); MTestPrintfMsg(1, "Comm_free dup_comm\n"); MPI_Comm_free( &dup_comm ); } return errs; }
/*@C PetscInitialize - Initializes the PETSc database and MPI. PetscInitialize() calls MPI_Init() if that has yet to be called, so this routine should always be called near the beginning of your program -- usually the very first line! Collective on MPI_COMM_WORLD or PETSC_COMM_WORLD if it has been set Input Parameters: + argc - count of number of command line arguments . args - the command line arguments . file - [optional] PETSc database file, also checks ~username/.petscrc and .petscrc use NULL to not check for code specific file. Use -skip_petscrc in the code specific file to skip the .petscrc files - help - [optional] Help message to print, use NULL for no message If you wish PETSc code to run ONLY on a subcommunicator of MPI_COMM_WORLD, create that communicator first and assign it to PETSC_COMM_WORLD BEFORE calling PetscInitialize(). Thus if you are running a four process job and two processes will run PETSc and have PetscInitialize() and PetscFinalize() and two process will not, then do this. If ALL processes in the job are using PetscInitialize() and PetscFinalize() then you don't need to do this, even if different subcommunicators of the job are doing different things with PETSc. Options Database Keys: + -start_in_debugger [noxterm,dbx,xdb,gdb,...] - Starts program in debugger . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected . -on_error_emacs <machinename> causes emacsclient to jump to error file . -on_error_abort calls abort() when error detected (no traceback) . -on_error_mpiabort calls MPI_abort() when error detected . -error_output_stderr prints error messages to stderr instead of the default stdout . -error_output_none does not print the error messages (but handles errors in the same way as if this was not called) . -debugger_nodes [node1,node2,...] - Indicates nodes to start in debugger . -debugger_pause [sleeptime] (in seconds) - Pauses debugger . -stop_for_debugger - Print message on how to attach debugger manually to process and wait (-debugger_pause) seconds for attachment . -malloc - Indicates use of PETSc error-checking malloc (on by default for debug version of libraries) . -malloc no - Indicates not to use error-checking malloc . -malloc_debug - check for memory corruption at EVERY malloc or free . -malloc_test - like -malloc_dump -malloc_debug, but only active for debugging builds . -fp_trap - Stops on floating point exceptions (Note that on the IBM RS6000 this slows code by at least a factor of 10.) . -no_signal_handler - Indicates not to trap error signals . -shared_tmp - indicates /tmp directory is shared by all processors . -not_shared_tmp - each processor has own /tmp . -tmp - alternative name of /tmp directory . -get_total_flops - returns total flops done by all processors . -memory_info - Print memory usage at end of run - -server <port> - start PETSc webserver (default port is 8080) Options Database Keys for Profiling: See the <a href="../../docs/manual.pdf#nameddest=ch_profiling">profiling chapter of the users manual</a> for details. + -info <optional filename> - Prints verbose information to the screen . -info_exclude <null,vec,mat,pc,ksp,snes,ts> - Excludes some of the verbose messages . -log_sync - Log the synchronization in scatters, inner products and norms . -log_trace [filename] - Print traces of all PETSc calls to the screen (useful to determine where a program hangs without running in the debugger). See PetscLogTraceBegin(). . -log_summary [filename] - Prints summary of flop and timing information to screen. If the filename is specified the summary is written to the file. See PetscLogView(). . -log_summary_python [filename] - Prints data on of flop and timing usage to a file or screen. See PetscLogPrintSViewPython(). . -log_all [filename] - Logs extensive profiling information See PetscLogDump(). . -log [filename] - Logs basic profiline information See PetscLogDump(). - -log_mpe [filename] - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution) Only one of -log_trace, -log_summary, -log_all, -log, or -log_mpe may be used at a time Environmental Variables: + PETSC_TMP - alternative tmp directory . PETSC_SHARED_TMP - tmp is shared by all processes . PETSC_NOT_SHARED_TMP - each process has its own private tmp . PETSC_VIEWER_SOCKET_PORT - socket number to use for socket viewer - PETSC_VIEWER_SOCKET_MACHINE - machine to use for socket viewer to connect to Level: beginner Notes: If for some reason you must call MPI_Init() separately, call it before PetscInitialize(). Fortran Version: In Fortran this routine has the format $ call PetscInitialize(file,ierr) + ierr - error return code - file - [optional] PETSc database file, also checks ~username/.petscrc and .petscrc use NULL_CHARACTER to not check for code specific file. Use -skip_petscrc in the code specific file to skip the .petscrc files Important Fortran Note: In Fortran, you MUST use NULL_CHARACTER to indicate a null character string; you CANNOT just use NULL as in the C version. See the <a href="../../docs/manual.pdf">users manual</a> for details. If your main program is C but you call Fortran code that also uses PETSc you need to call PetscInitializeFortran() soon after calling PetscInitialize(). Concepts: initializing PETSc .seealso: PetscFinalize(), PetscInitializeFortran(), PetscGetArgs(), PetscInitializeNoArguments() @*/ PetscErrorCode PetscInitialize(int *argc,char ***args,const char file[],const char help[]) { PetscErrorCode ierr; PetscMPIInt flag, size; PetscInt nodesize; PetscBool flg; char hostname[256]; PetscFunctionBegin; if (PetscInitializeCalled) PetscFunctionReturn(0); /* these must be initialized in a routine, not as a constant declaration*/ PETSC_STDOUT = stdout; PETSC_STDERR = stderr; ierr = PetscOptionsCreate();CHKERRQ(ierr); /* We initialize the program name here (before MPI_Init()) because MPICH has a bug in it that it sets args[0] on all processors to be args[0] on the first processor. */ if (argc && *argc) { ierr = PetscSetProgramName(**args);CHKERRQ(ierr); } else { ierr = PetscSetProgramName("Unknown Name");CHKERRQ(ierr); } ierr = MPI_Initialized(&flag);CHKERRQ(ierr); if (!flag) { if (PETSC_COMM_WORLD != MPI_COMM_NULL) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"You cannot set PETSC_COMM_WORLD if you have not initialized MPI first"); #if defined(PETSC_HAVE_MPI_INIT_THREAD) { PetscMPIInt provided; ierr = MPI_Init_thread(argc,args,MPI_THREAD_FUNNELED,&provided);CHKERRQ(ierr); } #else ierr = MPI_Init(argc,args);CHKERRQ(ierr); #endif PetscBeganMPI = PETSC_TRUE; } if (argc && args) { PetscGlobalArgc = *argc; PetscGlobalArgs = *args; } PetscFinalizeCalled = PETSC_FALSE; if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD; ierr = MPI_Comm_set_errhandler(PETSC_COMM_WORLD,MPI_ERRORS_RETURN);CHKERRQ(ierr); /* Done after init due to a bug in MPICH-GM? */ ierr = PetscErrorPrintfInitialize();CHKERRQ(ierr); ierr = MPI_Comm_rank(MPI_COMM_WORLD,&PetscGlobalRank);CHKERRQ(ierr); ierr = MPI_Comm_size(MPI_COMM_WORLD,&PetscGlobalSize);CHKERRQ(ierr); MPIU_BOOL = MPI_INT; MPIU_ENUM = MPI_INT; /* Initialized the global complex variable; this is because with shared libraries the constructors for global variables are not called; at least on IRIX. */ #if defined(PETSC_HAVE_COMPLEX) { #if defined(PETSC_CLANGUAGE_CXX) PetscComplex ic(0.0,1.0); PETSC_i = ic; #elif defined(PETSC_CLANGUAGE_C) PETSC_i = _Complex_I; #endif } #if !defined(PETSC_HAVE_MPI_C_DOUBLE_COMPLEX) ierr = MPI_Type_contiguous(2,MPI_DOUBLE,&MPIU_C_DOUBLE_COMPLEX);CHKERRQ(ierr); ierr = MPI_Type_commit(&MPIU_C_DOUBLE_COMPLEX);CHKERRQ(ierr); ierr = MPI_Type_contiguous(2,MPI_FLOAT,&MPIU_C_COMPLEX);CHKERRQ(ierr); ierr = MPI_Type_commit(&MPIU_C_COMPLEX);CHKERRQ(ierr); #endif #endif /* PETSC_HAVE_COMPLEX */ /* Create the PETSc MPI reduction operator that sums of the first half of the entries and maxes the second half. */ ierr = MPI_Op_create(PetscMaxSum_Local,1,&PetscMaxSum_Op);CHKERRQ(ierr); #if defined(PETSC_USE_REAL___FLOAT128) ierr = MPI_Type_contiguous(2,MPI_DOUBLE,&MPIU___FLOAT128);CHKERRQ(ierr); ierr = MPI_Type_commit(&MPIU___FLOAT128);CHKERRQ(ierr); #if defined(PETSC_HAVE_COMPLEX) ierr = MPI_Type_contiguous(4,MPI_DOUBLE,&MPIU___COMPLEX128);CHKERRQ(ierr); ierr = MPI_Type_commit(&MPIU___COMPLEX128);CHKERRQ(ierr); #endif ierr = MPI_Op_create(PetscMax_Local,1,&MPIU_MAX);CHKERRQ(ierr); ierr = MPI_Op_create(PetscMin_Local,1,&MPIU_MIN);CHKERRQ(ierr); #endif #if (defined(PETSC_HAVE_COMPLEX) && !defined(PETSC_HAVE_MPI_C_DOUBLE_COMPLEX)) || defined(PETSC_USE_REAL___FLOAT128) ierr = MPI_Op_create(PetscSum_Local,1,&MPIU_SUM);CHKERRQ(ierr); #endif ierr = MPI_Type_contiguous(2,MPIU_SCALAR,&MPIU_2SCALAR);CHKERRQ(ierr); ierr = MPI_Type_commit(&MPIU_2SCALAR);CHKERRQ(ierr); ierr = MPI_Op_create(PetscADMax_Local,1,&PetscADMax_Op);CHKERRQ(ierr); ierr = MPI_Op_create(PetscADMin_Local,1,&PetscADMin_Op);CHKERRQ(ierr); #if defined(PETSC_USE_64BIT_INDICES) || !defined(MPI_2INT) ierr = MPI_Type_contiguous(2,MPIU_INT,&MPIU_2INT);CHKERRQ(ierr); ierr = MPI_Type_commit(&MPIU_2INT);CHKERRQ(ierr); #endif /* Attributes to be set on PETSc communicators */ ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelCounter,&Petsc_Counter_keyval,(void*)0);CHKERRQ(ierr); ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm_Outer,&Petsc_InnerComm_keyval,(void*)0);CHKERRQ(ierr); ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelComm_Inner,&Petsc_OuterComm_keyval,(void*)0);CHKERRQ(ierr); /* Build the options database */ ierr = PetscOptionsInsert(argc,args,file);CHKERRQ(ierr); /* Print main application help message */ ierr = PetscOptionsHasName(NULL,"-help",&flg);CHKERRQ(ierr); if (help && flg) { ierr = PetscPrintf(PETSC_COMM_WORLD,help);CHKERRQ(ierr); } ierr = PetscOptionsCheckInitial_Private();CHKERRQ(ierr); /* SHOULD PUT IN GUARDS: Make sure logging is initialized, even if we do not print it out */ #if defined(PETSC_USE_LOG) ierr = PetscLogBegin_Private();CHKERRQ(ierr); #endif /* Load the dynamic libraries (on machines that support them), this registers all the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes) */ ierr = PetscInitialize_DynamicLibraries();CHKERRQ(ierr); ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr); ierr = PetscInfo1(0,"PETSc successfully started: number of processors = %d\n",size);CHKERRQ(ierr); ierr = PetscGetHostName(hostname,256);CHKERRQ(ierr); ierr = PetscInfo1(0,"Running on machine: %s\n",hostname);CHKERRQ(ierr); ierr = PetscOptionsCheckInitial_Components();CHKERRQ(ierr); /* Check the options database for options related to the options database itself */ ierr = PetscOptionsSetFromOptions();CHKERRQ(ierr); #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32) /* Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI Currently not used because it is not supported by MPICH. */ #if !defined(PETSC_WORDS_BIGENDIAN) ierr = MPI_Register_datarep((char*)"petsc",PetscDataRep_read_conv_fn,PetscDataRep_write_conv_fn,PetscDataRep_extent_fn,NULL);CHKERRQ(ierr); #endif #endif ierr = PetscOptionsGetInt(NULL,"-hmpi_spawn_size",&nodesize,&flg);CHKERRQ(ierr); if (flg) { #if defined(PETSC_HAVE_MPI_COMM_SPAWN) ierr = PetscHMPISpawn((PetscMPIInt) nodesize);CHKERRQ(ierr); /* worker nodes never return from here; they go directly to PetscEnd() */ #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"PETSc built without MPI 2 (MPI_Comm_spawn) support, use -hmpi_merge_size instead"); #endif } else { ierr = PetscOptionsGetInt(NULL,"-hmpi_merge_size",&nodesize,&flg);CHKERRQ(ierr); if (flg) { ierr = PetscHMPIMerge((PetscMPIInt) nodesize,NULL,NULL);CHKERRQ(ierr); if (PetscHMPIWorker) { /* if worker then never enter user code */ PetscInitializeCalled = PETSC_TRUE; PetscEnd(); } } } #if defined(PETSC_HAVE_CUDA) { PetscMPIInt p; for (p = 0; p < PetscGlobalSize; ++p) { if (p == PetscGlobalRank) cublasInit(); ierr = MPI_Barrier(PETSC_COMM_WORLD);CHKERRQ(ierr); } } #endif ierr = PetscOptionsHasName(NULL,"-python",&flg);CHKERRQ(ierr); if (flg) { PetscInitializeCalled = PETSC_TRUE; ierr = PetscPythonInitialize(NULL,NULL);CHKERRQ(ierr); } ierr = PetscThreadCommInitializePackage();CHKERRQ(ierr); /* Setup building of stack frames for all function calls */ #if defined(PETSC_USE_DEBUG) PetscThreadLocalRegister((PetscThreadKey*)&petscstack); /* Creates petscstack_key if needed */ ierr = PetscStackCreate();CHKERRQ(ierr); #endif #if defined(PETSC_SERIALIZE_FUNCTIONS) ierr = PetscFPTCreate(10000);CHKERRQ(ierr); #endif /* Once we are completedly initialized then we can set this variables */ PetscInitializeCalled = PETSC_TRUE; PetscFunctionReturn(0); }
/*@C PetscSharedTmp - Determines if all processors in a communicator share a /tmp or have different ones. Collective on MPI_Comm Input Parameters: . comm - MPI_Communicator that may share /tmp Output Parameters: . shared - PETSC_TRUE or PETSC_FALSE Options Database Keys: + -shared_tmp . -not_shared_tmp - -tmp tmpdir Environmental Variables: + PETSC_SHARED_TMP . PETSC_NOT_SHARED_TMP - PETSC_TMP Level: developer Notes: Stores the status as a MPI attribute so it does not have to be redetermined each time. Assumes that all processors in a communicator either 1) have a common /tmp or 2) each has a separate /tmp eventually we can write a fancier one that determines which processors share a common /tmp. This will be very slow on runs with a large number of processors since it requires O(p*p) file opens. If the environmental variable PETSC_TMP is set it will use this directory as the "/tmp" directory. @*/ PetscErrorCode PETSC_DLLEXPORT PetscSharedTmp(MPI_Comm comm,PetscTruth *shared) { PetscErrorCode ierr; PetscMPIInt size,rank,*tagvalp,sum,cnt,i; PetscTruth flg,iflg; FILE *fd; static PetscMPIInt Petsc_Tmp_keyval = MPI_KEYVAL_INVALID; int err; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size == 1) { *shared = PETSC_TRUE; PetscFunctionReturn(0); } ierr = PetscOptionsGetenv(comm,"PETSC_SHARED_TMP",PETSC_NULL,0,&flg);CHKERRQ(ierr); if (flg) { *shared = PETSC_TRUE; PetscFunctionReturn(0); } ierr = PetscOptionsGetenv(comm,"PETSC_NOT_SHARED_TMP",PETSC_NULL,0,&flg);CHKERRQ(ierr); if (flg) { *shared = PETSC_FALSE; PetscFunctionReturn(0); } if (Petsc_Tmp_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelTmpShared,&Petsc_Tmp_keyval,0);CHKERRQ(ierr); } ierr = MPI_Attr_get(comm,Petsc_Tmp_keyval,(void**)&tagvalp,(int*)&iflg);CHKERRQ(ierr); if (!iflg) { char filename[PETSC_MAX_PATH_LEN],tmpname[PETSC_MAX_PATH_LEN]; /* This communicator does not yet have a shared tmp attribute */ ierr = PetscMalloc(sizeof(PetscMPIInt),&tagvalp);CHKERRQ(ierr); ierr = MPI_Attr_put(comm,Petsc_Tmp_keyval,tagvalp);CHKERRQ(ierr); ierr = PetscOptionsGetenv(comm,"PETSC_TMP",tmpname,238,&iflg);CHKERRQ(ierr); if (!iflg) { ierr = PetscStrcpy(filename,"/tmp");CHKERRQ(ierr); } else { ierr = PetscStrcpy(filename,tmpname);CHKERRQ(ierr); } ierr = PetscStrcat(filename,"/petsctestshared");CHKERRQ(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); /* each processor creates a /tmp file and all the later ones check */ /* this makes sure no subset of processors is shared */ *shared = PETSC_FALSE; for (i=0; i<size-1; i++) { if (rank == i) { fd = fopen(filename,"w"); if (!fd) { SETERRQ1(PETSC_ERR_FILE_OPEN,"Unable to open test file %s",filename); } err = fclose(fd); if (err) SETERRQ(PETSC_ERR_SYS,"fclose() failed on file"); } ierr = MPI_Barrier(comm);CHKERRQ(ierr); if (rank >= i) { fd = fopen(filename,"r"); if (fd) cnt = 1; else cnt = 0; if (fd) { err = fclose(fd); if (err) SETERRQ(PETSC_ERR_SYS,"fclose() failed on file"); } } else { cnt = 0; } ierr = MPI_Allreduce(&cnt,&sum,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr); if (rank == i) { unlink(filename); } if (sum == size) { *shared = PETSC_TRUE; break; } else if (sum != 1) { SETERRQ(PETSC_ERR_SUP_SYS,"Subset of processes share /tmp "); } } *tagvalp = (int)*shared; ierr = PetscInfo2(0,"processors %s %s\n",(*shared) ? "share":"do NOT share",(iflg ? tmpname:"/tmp"));CHKERRQ(ierr); } else { *shared = (PetscTruth) *tagvalp; } PetscFunctionReturn(0); }