/*@ PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All processors that share the communicator MUST call this routine EXACTLY the same number of times. This tag should only be used with the current objects communicator; do NOT use it with any other MPI communicator. Collective on comm Input Parameter: . comm - the MPI communicator Output Parameter: . tag - the new tag Level: developer Concepts: tag^getting Concepts: message tag^getting Concepts: MPI message tag^getting .seealso: PetscObjectGetNewTag(), PetscCommDuplicate() @*/ PetscErrorCode PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag) { PetscErrorCode ierr; PetscCommCounter *counter; PetscMPIInt *maxval,flg; PetscFunctionBegin; PetscValidIntPointer(tag,2); ierr = MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator"); if (counter->tag < 1) { ierr = PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr); ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB"); counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */ } *tag = counter->tag--; #if defined(PETSC_USE_DEBUG) /* Hanging here means that some processes have called PetscCommGetNewTag() and others have not. */ ierr = MPI_Barrier(comm);CHKERRQ(ierr); #endif PetscFunctionReturn(0); }
int MPIR_InitFortran( void ) { int *attr_ptr, flag, i; MPI_Aint attr_val; /* Create the attribute values */ /* Do the Fortran versions - Pass the actual value. Note that these use MPIR_Keyval_create with the "is_fortran" flag set. If you change these; change the removal in finalize.c. */ #define NULL_COPY (MPI_Copy_function *)0 #define NULL_DEL (MPI_Delete_function*)0 i = MPIR_TAG_UB; MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 ); i = MPIR_HOST; MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 ); i = MPIR_IO; MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 ); i = MPIR_WTIME_IS_GLOBAL; MPIR_Keyval_create( NULL_COPY, NULL_DEL, &i, (void *)0, 1 ); /* We need to switch this to the MPI-2 version to handle different word lengths */ /* Attr_get needs to be referenced from MPI_Init so that we can use it here */ MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &attr_ptr, &flag ); attr_val = (MPI_Aint) *attr_ptr; MPI_Attr_put( MPI_COMM_WORLD, MPIR_TAG_UB, (void*)attr_val ); MPI_Attr_get( MPI_COMM_WORLD, MPI_HOST, &attr_ptr, &flag ); attr_val = (MPI_Aint) *attr_ptr; MPI_Attr_put( MPI_COMM_WORLD, MPIR_HOST, (void*)attr_val ); MPI_Attr_get( MPI_COMM_WORLD, MPI_IO, &attr_ptr, &flag ); attr_val = (MPI_Aint) *attr_ptr; MPI_Attr_put( MPI_COMM_WORLD, MPIR_IO, (void*)attr_val ); MPI_Attr_get( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &attr_ptr, &flag ); attr_val = (MPI_Aint) *attr_ptr; MPI_Attr_put( MPI_COMM_WORLD, MPIR_WTIME_IS_GLOBAL, (void*)attr_val ); MPIR_Attr_make_perm( MPIR_TAG_UB ); MPIR_Attr_make_perm( MPIR_HOST ); MPIR_Attr_make_perm( MPIR_IO ); MPIR_Attr_make_perm( MPIR_WTIME_IS_GLOBAL ); #ifndef F77_TRUE_VALUE_SET mpir_init_flog_( &MPIR_F_TRUE, &MPIR_F_FALSE ); #endif /* fcm sets MPI_BOTTOM */ mpir_init_fcm_( ); return 0; }
/*@C PETSC_VIEWER_BINARY_ - Creates a binary PetscViewer shared by all processors in a communicator. Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the binary PetscViewer Level: intermediate Options Database Keys: + -viewer_binary_filename <name> . -viewer_binary_skip_info - -viewer_binary_skip_options Environmental variables: - PETSC_VIEWER_BINARY_FILENAME Notes: Unlike almost all other PETSc routines, PETSC_VIEWER_BINARY_ does not return an error code. The binary PetscViewer is usually used in the form $ XXXView(XXX object,PETSC_VIEWER_BINARY_(comm)); .seealso: PETSC_VIEWER_BINARY_WORLD, PETSC_VIEWER_BINARY_SELF, PetscViewerBinaryOpen(), PetscViewerCreate(), PetscViewerDestroy() @*/ PetscViewer PETSC_VIEWER_BINARY_(MPI_Comm comm) { PetscErrorCode ierr; PetscBool flg; PetscViewer viewer; char fname[PETSC_MAX_PATH_LEN]; MPI_Comm ncomm; PetscFunctionBegin; ierr = PetscCommDuplicate(comm,&ncomm,NULL);if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (Petsc_Viewer_Binary_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Binary_keyval,0); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Binary_keyval,(void**)&viewer,(int*)&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (!flg) { /* PetscViewer not yet created */ ierr = PetscOptionsGetenv(ncomm,"PETSC_VIEWER_BINARY_FILENAME",fname,PETSC_MAX_PATH_LEN,&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (!flg) { ierr = PetscStrcpy(fname,"binaryoutput"); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = PetscViewerBinaryOpen(ncomm,fname,FILE_MODE_WRITE,&viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = PetscObjectRegisterDestroy((PetscObject)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Binary_keyval,(void*)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = PetscCommDestroy(&ncomm); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_BINARY_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} PetscFunctionReturn(viewer); }
int main(int argc, char **argv) { int rc, flag, vval, size; void *v; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); rc = MPI_Attr_get(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag); if (rc) { fprintf(stderr, "MPI_UNIVERSE_SIZE missing\n"); MPI_Abort(MPI_COMM_WORLD, 1); } else { /* MPI_UNIVERSE_SIZE need not be set */ if (flag) { vval = *(int *) v; if (vval < 5) { fprintf(stderr, "MPI_UNIVERSE_SIZE = %d, less than expected (%d)\n", vval, size); MPI_Abort(MPI_COMM_WORLD, 1); } } } MPI_Finalize(); printf(" No errors\n"); return 0; }
PetscErrorCode PetscViewerDestroy_ASCII(PetscViewer viewer) { PetscErrorCode ierr; PetscViewer_ASCII *vascii = (PetscViewer_ASCII *)viewer->data; PetscViewerLink *vlink; PetscBool flg; PetscFunctionBegin; if (vascii->sviewer) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"ASCII PetscViewer destroyed before restoring singleton or subcomm PetscViewer"); ierr = PetscViewerFileClose_ASCII(viewer);CHKERRQ(ierr); ierr = PetscFree(vascii);CHKERRQ(ierr); /* remove the viewer from the list in the MPI Communicator */ if (Petsc_Viewer_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelViewer,&Petsc_Viewer_keyval,(void*)0);CHKERRQ(ierr); } ierr = MPI_Attr_get(((PetscObject)viewer)->comm,Petsc_Viewer_keyval,(void**)&vlink,(PetscMPIInt*)&flg);CHKERRQ(ierr); if (flg) { if (vlink && vlink->viewer == viewer) { ierr = MPI_Attr_put(((PetscObject)viewer)->comm,Petsc_Viewer_keyval,vlink->next);CHKERRQ(ierr); ierr = PetscFree(vlink);CHKERRQ(ierr); } else { while (vlink && vlink->next) { if (vlink->next->viewer == viewer) { PetscViewerLink *nv = vlink->next; vlink->next = vlink->next->next; ierr = PetscFree(nv);CHKERRQ(ierr); } vlink = vlink->next; } } } PetscFunctionReturn(0); }
/*@C PetscObjectName - Gives an object a name if it does not have one Collective Input Parameters: . obj - the Petsc variable Thus must be cast with a (PetscObject), for example, PetscObjectName((PetscObject)mat,name); Level: developer Concepts: object name^setting default Notes: This is used in a small number of places when an object NEEDS a name, for example when it is saved to MATLAB with that variable name. Use PetscObjectSetName() to set the name of an object to what you want. The SAWs viewer requires that no two published objects share the same name. Developer Note: this needs to generate the exact same string on all ranks that share the object. The current algorithm may not always work. .seealso: PetscObjectGetName(), PetscObjectSetName() @*/ PetscErrorCode PetscObjectName(PetscObject obj) { PetscErrorCode ierr; PetscCommCounter *counter; PetscMPIInt flg; char name[64]; PetscFunctionBegin; PetscValidHeader(obj,1); if (!obj->name) { union { MPI_Comm comm; void *ptr; char raw[sizeof(MPI_Comm)]; } ucomm; ierr = MPI_Attr_get(obj->comm,Petsc_Counter_keyval,(void*)&counter,&flg); CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator"); ucomm.ptr = NULL; ucomm.comm = obj->comm; ierr = MPI_Bcast(ucomm.raw,sizeof(MPI_Comm),MPI_BYTE,0,obj->comm); CHKERRQ(ierr); /* If the union has extra bytes, their value is implementation-dependent, but they will normally be what we set last * in 'ucomm.ptr = NULL'. This output is always implementation-defined (and varies from run to run) so the union * abuse acceptable. */ ierr = PetscSNPrintf(name,64,"%s_%p_%D",obj->class_name,ucomm.ptr,counter->namecount++); CHKERRQ(ierr); ierr = PetscStrallocpy(name,&obj->name); CHKERRQ(ierr); } PetscFunctionReturn(0); }
/* Set in Fortran (MPI-1), read in C */ void cmpif1read_( MPI_Fint *fcomm, MPI_Fint *fkey, MPI_Fint *expected, MPI_Fint *errs, const char *msg, int msglen ) { void *attrval; int flag, result; MPI_Comm comm = MPI_Comm_f2c( *fcomm ); char lmsg[MAX_ATTRTEST_MSG]; if (msglen > sizeof(lmsg)- 1) { fprintf( stderr, "Message too long for buffer (%d)\n", msglen ); MPI_Abort( MPI_COMM_WORLD, 1 ); } MPI_Attr_get( comm, *fkey, &attrval, &flag ); if (!flag) { *errs = *errs + 1; strncpy( lmsg, msg, msglen ); lmsg[msglen] = 0; printf( " Error: flag false for Attr_get (set in F1): %s\n", lmsg ); return; } /* Must be careful to compare as required in the MPI specification */ ccompareint2void_( expected, attrval, &result ); if (!result) { *errs = *errs + 1; strncpy( lmsg, msg, msglen ); lmsg[msglen] = 0; printf( " Error: (set in F1) expected %d but saw %d: %s\n", *expected, *(MPI_Fint*)attrval, lmsg ); return; } return; }
/*@C PETSC_MATLAB_ENGINE_ - Creates a matlab engine shared by all processors in a communicator. Not Collective Input Parameter: . comm - the MPI communicator to share the engine Level: developer Notes: Unlike almost all other PETSc routines, this does not return an error code. Usually used in the form $ PetscMatlabEngineYYY(XXX object,PETSC_MATLAB_ENGINE_(comm)); .seealso: PetscMatlabEngineDestroy(), PetscMatlabEnginePut(), PetscMatlabEngineGet(), PetscMatlabEngineEvaluate(), PetscMatlabEngineGetOutput(), PetscMatlabEnginePrintOutput(), PetscMatlabEngineCreate(), PetscMatlabEnginePutArray(), PetscMatlabEngineGetArray(), PetscMatlabEngine, PETSC_MATLAB_ENGINE_WORLD, PETSC_MATLAB_ENGINE_SELF @*/ PetscMatlabEngine PETSC_MATLAB_ENGINE_(MPI_Comm comm) { PetscErrorCode ierr; PetscBool flg; PetscMatlabEngine mengine; PetscFunctionBegin; if (Petsc_Matlab_Engine_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Matlab_Engine_keyval,0); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} } ierr = MPI_Attr_get(comm,Petsc_Matlab_Engine_keyval,(void**)&mengine,(int*)&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} if (!flg) { /* viewer not yet created */ char *machinename = 0,machine[64]; ierr = PetscOptionsGetString(NULL,NULL,"-matlab_engine_machine",machine,64,&flg); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} if (flg) machinename = machine; ierr = PetscMatlabEngineCreate(comm,machinename,&mengine); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} ierr = PetscObjectRegisterDestroy((PetscObject)mengine); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} ierr = MPI_Attr_put(comm,Petsc_Matlab_Engine_keyval,mengine); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_MATLAB_ENGINE_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," "); mengine = 0;} } PetscFunctionReturn(mengine); }
/*@C PETSC_VIEWER_DRAW_ - Creates a window PetscViewer shared by all processors in a communicator. Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the window PetscViewer Level: intermediate Notes: Unlike almost all other PETSc routines, PETSC_VIEWER_DRAW_ does not return an error code. The window is usually used in the form $ XXXView(XXX object,PETSC_VIEWER_DRAW_(comm)); .seealso: PETSC_VIEWER_DRAW_WORLD, PETSC_VIEWER_DRAW_SELF, PetscViewerDrawOpen(), @*/ PetscViewer PETSC_VIEWER_DRAW_(MPI_Comm comm) { PetscErrorCode ierr; PetscMPIInt flag; PetscViewer viewer; MPI_Comm ncomm; PetscFunctionBegin; ierr = PetscCommDuplicate(comm,&ncomm,NULL);if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (Petsc_Viewer_Draw_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Draw_keyval,0); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Draw_keyval,(void**)&viewer,&flag); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} if (!flag) { /* PetscViewer not yet created */ ierr = PetscViewerDrawOpen(ncomm,0,0,PETSC_DECIDE,PETSC_DECIDE,300,300,&viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = PetscObjectRegisterDestroy((PetscObject)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Draw_keyval,(void*)viewer); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} } ierr = PetscCommDestroy(&ncomm); if (ierr) {PetscError(PETSC_COMM_SELF,__LINE__,"PETSC_VIEWER_DRAW_",__FILE__,PETSC_ERR_PLIB,PETSC_ERROR_INITIAL," ");PetscFunctionReturn(0);} PetscFunctionReturn(viewer); }
Obj MPIattr_get( Obj self, Obj keyval ) { void *attribute_val; int flag=1; /* hd1 = MPI_IO -> can also return MPI_ANY_SOURCE / MPI_PROC_NULL */ MPI_Attr_get(MPI_COMM_WORLD, INT_INTOBJ(keyval), &attribute_val, &flag); /* if (INT_INTOBJ(keyval) == MPI_HOST) flag = 0; */ if (!flag) return False; return INTOBJ_INT(attribute_val); }
int main( int argc, char **argv ) { int err = 0; void *v; int flag=0; int vval; int rank; double t1; MPI_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); MPI_Attr_get( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &v, &flag ); #ifdef DEBUG if (v) vval = *(int*)v; else vval = 0; printf( "WTIME flag = %d; val = %d\n", flag, vval ); #endif if (flag) { /* Wtime need not be set */ vval = *(int*)v; if (vval < 0 || vval > 1) { err++; fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n", vval ); } } if (flag && vval) { /* Wtime is global is true. Check it */ #ifdef DEBUG printf( "WTIME_IS_GLOBAL\n" ); #endif err += CheckTime(); /* Wait for 10 seconds */ t1 = MPI_Wtime(); while (MPI_Wtime() - t1 < 10.0) ; err += CheckTime(); } if (rank == 0) { if (err > 0) { printf( "Errors in MPI_WTIME_IS_GLOBAL\n" ); } else { printf( " No Errors\n" ); } } /* The SGI implementation of MPI sometimes fails to flush stdout properly. This fflush will work around that bug. */ /* fflush( stdout ); */ MPI_Finalize( ); return err; }
/* This does not actually free anything, it simply marks when a reference count to an internal or external MPI_Comm reaches zero and the the external MPI_Comm drops its reference to the internal or external MPI_Comm This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator. Note: this is declared extern "C" because it is passed to MPI_Keyval_create() */ PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm(MPI_Comm comm,PetscMPIInt keyval,void *attr_val,void *extra_state) { PetscErrorCode ierr; PetscMPIInt flg; MPI_Comm icomm; void *ptr; PetscFunctionBegin; ierr = MPI_Attr_get(comm,Petsc_InnerComm_keyval,&ptr,&flg);CHKERRQ(ierr); if (flg) { /* Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers */ ierr = PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));CHKERRQ(ierr); ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ptr,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected reference to outer comm"); ierr = MPI_Attr_delete(icomm,Petsc_OuterComm_keyval);CHKERRQ(ierr); ierr = PetscInfo1(0,"User MPI_Comm m %ld is being freed, removing reference from inner PETSc comm to this outer comm\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr); } else { ierr = PetscInfo1(0,"Removing reference to PETSc communicator imbedded in a user MPI_Comm m %ld\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr); } PetscFunctionReturn(MPI_SUCCESS); }
/*@ MPE_GetTags - Returns tags that can be used in communication with a communicator Input Parameters: + comm_in - Input communicator - ntags - Number of tags Output Parameters: + comm_out - Output communicator. May be 'comm_in'. - first_tag - First tag available Returns: MPI_SUCCESS on success, MPI error class on failure. Notes: This routine returns the requested number of tags, with the tags being 'first_tag', 'first_tag+1', ..., 'first_tag+ntags-1'. These tags are guarenteed to be unique within 'comm_out'. .seealso: MPE_ReturnTags @*/ int MPE_GetTags( MPI_Comm comm_in, int ntags, MPI_Comm *comm_out, int *first_tag ) { int mpe_errno = MPI_SUCCESS; int *tagvalp, *maxval, flag; if (MPE_Tag_keyval == MPI_KEYVAL_INVALID) { MPI_Keyval_create( MPI_NULL_COPY_FN, MPE_DelTag, &MPE_Tag_keyval, (void *)0 ); } if ((mpe_errno = MPI_Attr_get( comm_in, MPE_Tag_keyval, &tagvalp, &flag ))) return mpe_errno; if (!flag) { /* This communicator is not yet known to this system, so we dup it and setup the first value */ MPI_Comm_dup( comm_in, comm_out ); comm_in = *comm_out; MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &flag ); tagvalp = (int *)malloc( 2 * sizeof(int) ); if (!tagvalp) return MPI_ERR_OTHER; *tagvalp = *maxval; *first_tag = *tagvalp - ntags; *tagvalp = *first_tag; MPI_Attr_put( comm_in, MPE_Tag_keyval, tagvalp ); return MPI_SUCCESS; } *comm_out = comm_in; if (*tagvalp < ntags) { /* Error, out of tags. Another solution would be to do an MPI_Comm_dup. */ return MPI_ERR_INTERN; } *first_tag = *tagvalp - ntags; *tagvalp = *first_tag; return MPI_SUCCESS; }
/*@C PetscCommDestroy - Frees communicator. Use in conjunction with PetscCommDuplicate(). Collective on MPI_Comm Input Parameter: . comm - the communicator to free Level: developer Concepts: communicator^destroy .seealso: PetscCommDuplicate() @*/ PetscErrorCode PetscCommDestroy(MPI_Comm *comm) { PetscErrorCode ierr; PetscCommCounter *counter; PetscMPIInt flg; MPI_Comm icomm = *comm,ocomm; union {MPI_Comm comm; void *ptr;} ucomm; PetscFunctionBegin; if (*comm == MPI_COMM_NULL) PetscFunctionReturn(0); ierr = PetscSpinlockLock(&PetscCommSpinLock);CHKERRQ(ierr); ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr); if (!flg) { /* not a PETSc comm, check if it has an inner comm */ ierr = MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm"); icomm = ucomm.comm; ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory"); } counter->refcount--; if (!counter->refcount) { /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */ ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);CHKERRQ(ierr); if (flg) { ocomm = ucomm.comm; ierr = MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr); if (flg) { ierr = MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);CHKERRQ(ierr); } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm); } ierr = PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);CHKERRQ(ierr); ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr); } *comm = MPI_COMM_NULL; ierr = PetscSpinlockUnlock(&PetscCommSpinLock);CHKERRQ(ierr); PetscFunctionReturn(0); }
inline channel::mount::mount(){ int *ub, flag, level, zero = 0; MPI_Init_thread(&zero, NULL, MPI_THREAD_FUNNELED, &level); if(level != MPI_THREAD_FUNNELED) throw std::runtime_error("Error: Wrong threading level"); MPI_Comm_size(MPI_COMM_WORLD, &np); MPI_Comm_rank(MPI_COMM_WORLD, &self); MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &ub, &flag); this->tag_ub = flag ? *ub : 32767; this->sid = 1; trees.resize(2); // 0,1 are empty for(int i = 2; i <= np; i++) trees.push_back(new binary_tree<rank_t>(i)); for(int i = 0; i < 2*np; i++) circle.push_back(i % np); }
int cmpi1read( MPI_Comm comm, int key, void *expected, const char *msg ) { void *attrval; int flag; MPI_Attr_get( comm, key, &attrval, &flag ); if (!flag) { printf( " Error: flag false for Attr_get: %s\n", msg ); return 1; } if (attrval != expected) { printf( " Error: expected %p but saw %p: %s\n", expected, attrval, msg ); return 1; } return 0; }
int checkNoAttrs(MPI_Comm comm, int n, int key[]) { int errs = 0; int i, flag, *val_p; for (i = 0; i < n; i++) { MPI_Attr_get(comm, key[i], &val_p, &flag); if (flag) { errs++; fprintf(stderr, "Attribute for key %d set but should be deleted\n", i); } } return errs; }
/* Gather value from each task and print statistics */ double Print_Timings(double value, char* title, size_t bytes, int iters, MPI_Comm comm) { int i; double min, max, avg, dev; double* times = NULL; if(rank_local == 0) { times = (double*) malloc(sizeof(double) * rank_count); } /* gather single time value from each task to rank 0 */ MPI_Gather(&value, 1, MPI_DOUBLE, times, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); /* rank 0 computes the min, max, and average over the set */ if(rank_local == 0) { avg = 0; dev = 0; min = 100000000; max = -1; for(i = 0; i < rank_count; i++) { if(times[i] < min) { min = times[i]; } if(times[i] > max) { max = times[i]; } avg += times[i]; dev += times[i] * times[i]; } avg /= (double) rank_count; dev = 0; /*sqrt((dev / (double) rank_count - avg * avg)); */ /* determine who we are in this communicator */ int nranks, flag; char* str; MPI_Attr_get(comm, dimid_key, (void*) &str, &flag); MPI_Comm_size(comm, &nranks); printf("%-20.20s\t", title); printf("Bytes:\t%8u\tIters:\t%7d\t", bytes, iters); printf("Avg:\t%8.4f\tMin:\t%8.4f\tMax:\t%8.4f\t", avg, min, max); printf("Comm: %s\tRanks: %d\n", str, nranks); fflush(stdout); free((void*) times); } /* broadcast the average value back out */ MPI_Bcast(&avg, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); return avg; }
PetscErrorCode PetscViewer_SAWS_Destroy(MPI_Comm comm) { PetscErrorCode ierr; PetscMPIInt flag; PetscViewer viewer; PetscFunctionBegin; if (Petsc_Viewer_SAWs_keyval == MPI_KEYVAL_INVALID) PetscFunctionReturn(0); ierr = MPI_Attr_get(comm,Petsc_Viewer_SAWs_keyval,(void**)&viewer,&flag);CHKERRQ(ierr); if (flag) { ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); ierr = MPI_Attr_delete(comm,Petsc_Viewer_SAWs_keyval);CHKERRQ(ierr); } PetscFunctionReturn(0); }
/*@ MPI_Graph_get - Retrieves graph topology information associated with a communicator Input Parameters: + comm - communicator with graph structure (handle) . maxindex - length of vector 'index' in the calling program (integer) - maxedges - length of vector 'edges' in the calling program (integer) Output Parameter: + index - array of integers containing the graph structure (for details see the definition of 'MPI_GRAPH_CREATE') - edges - array of integers containing the graph structure .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ int MPI_Graph_get ( MPI_Comm comm, int maxindex, int maxedges, int *index, int *edges ) { int i, num, flag; int *array; int mpi_errno = MPI_SUCCESS; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_GRAPH_GET"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_ARG(index); MPIR_TEST_ARG(edges); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* Get topology information from the communicator */ MPIR_ERROR_PUSH(comm_ptr); mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag ); MPIR_ERROR_POP(comm_ptr); if ( ( (flag != 1) && (mpi_errno = MPI_ERR_TOPOLOGY) ) || ( (topo->type != MPI_GRAPH) && (mpi_errno = MPI_ERR_TOPOLOGY) ) ) return MPIR_ERROR( comm_ptr, mpi_errno, myname ); /* Get index */ num = topo->graph.nnodes; array = topo->graph.index; if ( index != (int *)0 ) for ( i=0; (i<maxindex) && (i<num); i++ ) (*index++) = (*array++); /* Get edges */ num = topo->graph.nedges; array = topo->graph.edges; if ( edges != (int *)0 ) for ( i=0; (i<maxedges) && (i<num); i++ ) (*edges++) = (*array++); TR_POP; return (mpi_errno); }
/*@C PetscObjectName - Gives an object a name if it does not have one Not Collective Input Parameters: . obj - the Petsc variable Thus must be cast with a (PetscObject), for example, PetscObjectSetName((PetscObject)mat,name); Level: advanced Concepts: object name^setting default .seealso: PetscObjectGetName(), PetscObjectSetName() @*/ PetscErrorCode PETSC_DLLEXPORT PetscObjectName(PetscObject obj) { PetscErrorCode ierr; PetscCommCounter *counter; PetscMPIInt flg; char name[64]; PetscFunctionBegin; PetscValidHeader(obj,1); if (!obj->name) { ierr = MPI_Attr_get(obj->comm,Petsc_Counter_keyval,(void*)&counter,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator"); ierr = PetscSNPrintf(name,64,"%s_%D",obj->class_name,counter->namecount++);CHKERRQ(ierr); ierr = PetscStrallocpy(name,&obj->name);CHKERRQ(ierr); } PetscFunctionReturn(0); }
/*@ MPI_Graphdims_get - Retrieves graph topology information associated with a communicator Input Parameters: . comm - communicator for group with graph structure (handle) Output Parameter: + nnodes - number of nodes in graph (integer) - nedges - number of edges in graph (integer) .N fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_TOPOLOGY .N MPI_ERR_COMM .N MPI_ERR_ARG @*/ EXPORT_MPI_API int MPI_Graphdims_get ( MPI_Comm comm, int *nnodes, int *nedges ) { int mpi_errno = MPI_SUCCESS, flag; MPIR_TOPOLOGY *topo; struct MPIR_COMMUNICATOR *comm_ptr; static char myname[] = "MPI_GRAPHDIMS_GET"; MPIR_ERROR_DECL; TR_PUSH(myname); comm_ptr = MPIR_GET_COMM_PTR(comm); #ifndef MPIR_NO_ERROR_CHECKING MPIR_TEST_MPI_COMM(comm,comm_ptr,comm_ptr,myname); MPIR_TEST_ARG(nnodes); MPIR_TEST_ARG(nedges); if (mpi_errno) return MPIR_ERROR(comm_ptr, mpi_errno, myname ); #endif /* Get topology information from the communicator */ MPIR_ERROR_PUSH( comm_ptr ); mpi_errno = MPI_Attr_get ( comm, MPIR_TOPOLOGY_KEYVAL, (void **)&topo, &flag ); MPIR_ERROR_POP( comm_ptr ); if (mpi_errno) { return MPIR_ERROR( comm_ptr, mpi_errno, myname ); } /* Set nnodes */ if ( nnodes != (int *)0 ) if ( (flag == 1) && (topo->type == MPI_GRAPH) ) (*nnodes) = topo->graph.nnodes; else (*nnodes) = MPI_UNDEFINED; /* Set nedges */ if ( nedges != (int *)0 ) if ( (flag == 1) && (topo->type == MPI_GRAPH) ) (*nedges) = topo->graph.nedges; else (*nedges) = MPI_UNDEFINED; TR_POP; return (MPI_SUCCESS); }
/*@ MPE_ReturnTags - Returns tags allocated with MPE_GetTags. Input Parameters: + comm - Communicator to return tags to . first_tag - First of the tags to return - ntags - Number of tags to return. .seealso: MPE_GetTags @*/ int MPE_ReturnTags(MPI_Comm comm, int first_tag, int ntags) { int *tagvalp, flag, mpe_errno; if ((mpe_errno = MPI_Attr_get( comm, MPE_Tag_keyval, &tagvalp, &flag ))) return mpe_errno; if (!flag) { /* Error, attribute does not exist in this communicator */ return MPI_ERR_OTHER; } if (*tagvalp == first_tag) { *tagvalp = first_tag + ntags; } return MPI_SUCCESS; }
/* This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Attr_delete) or when the user calls MPI_Comm_free(). This is the only entry point for breaking the links between inner and outer comms. This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator. Note: this is declared extern "C" because it is passed to MPI_Keyval_create() */ PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelComm_Outer(MPI_Comm comm,PetscMPIInt keyval,void *attr_val,void *extra_state) { PetscErrorCode ierr; PetscMPIInt flg; union {MPI_Comm comm; void *ptr;} icomm,ocomm; PetscFunctionBegin; if (keyval != Petsc_InnerComm_keyval) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Unexpected keyval"); icomm.ptr = attr_val; ierr = MPI_Attr_get(icomm.comm,Petsc_OuterComm_keyval,&ocomm,&flg);CHKERRQ(ierr); if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected reference to outer comm"); if (ocomm.comm != comm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm has reference to non-matching outer comm"); ierr = MPI_Attr_delete(icomm.comm,Petsc_OuterComm_keyval);CHKERRQ(ierr); /* Calls Petsc_DelComm_Inner */ ierr = PetscInfo1(0,"User MPI_Comm %ld is being freed after removing reference from inner PETSc comm to this outer comm\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr); PetscFunctionReturn(MPI_SUCCESS); }
int checkAttrs(MPI_Comm comm, int n, int key[], int attrval[]) { int errs = 0; int i, flag, *val_p; for (i = 0; i < n; i++) { MPI_Attr_get(comm, key[i], &val_p, &flag); if (!flag) { errs++; fprintf(stderr, "Attribute for key %d not set\n", i); } else if (val_p != &attrval[i]) { errs++; fprintf(stderr, "Atribute value for key %d not correct\n", i); } } return errs; }
/*@C PetscViewerASCIIGetStdout - Creates a ASCII PetscViewer shared by all processors in a communicator. Error returning version of PETSC_VIEWER_STDOUT_() Collective on MPI_Comm Input Parameter: . comm - the MPI communicator to share the PetscViewer Level: beginner Notes: This should be used in all PETSc source code instead of PETSC_VIEWER_STDOUT_() .seealso: PETSC_VIEWER_DRAW_(), PetscViewerASCIIOpen(), PETSC_VIEWER_STDERR_, PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_STDOUT_SELF @*/ PetscErrorCode PetscViewerASCIIGetStdout(MPI_Comm comm,PetscViewer *viewer) { PetscErrorCode ierr; PetscBool flg; MPI_Comm ncomm; PetscFunctionBegin; ierr = PetscCommDuplicate(comm,&ncomm,NULL);CHKERRQ(ierr); if (Petsc_Viewer_Stdout_keyval == MPI_KEYVAL_INVALID) { ierr = MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Viewer_Stdout_keyval,0);CHKERRQ(ierr); } ierr = MPI_Attr_get(ncomm,Petsc_Viewer_Stdout_keyval,(void**)viewer,(PetscMPIInt*)&flg);CHKERRQ(ierr); if (!flg) { /* PetscViewer not yet created */ ierr = PetscViewerASCIIOpen(ncomm,"stdout",viewer);CHKERRQ(ierr); ierr = PetscObjectRegisterDestroy((PetscObject)*viewer);CHKERRQ(ierr); ierr = MPI_Attr_put(ncomm,Petsc_Viewer_Stdout_keyval,(void*)*viewer);CHKERRQ(ierr); } ierr = PetscCommDestroy(&ncomm);CHKERRQ(ierr); PetscFunctionReturn(0); }
void Print_attr( char* source_comm_name, char* new_comm_name, MPI_Comm new_comm) { int* io_rank_ptr; int flag; int my_rank; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Attr_get(new_comm, IO_KEY, &io_rank_ptr, &flag); if (flag == 0) { printf("Process %d > No attribute associated to IO_KEY in %s from %s\n", my_rank, new_comm_name, source_comm_name); } else { printf("Process %d > io_rank = %d in %s (received from %s)\n", my_rank, *io_rank_ptr, new_comm_name, source_comm_name); } } /* Print_attr */
/*@ PetscSequentialPhaseEnd - Ends a sequential section of code. Collective on MPI_Comm Input Parameters: + comm - Communicator to sequentialize. - ng - Number in processor group. This many processes are allowed to execute at the same time (usually 1) Level: intermediate Notes: See PetscSequentialPhaseBegin() for more details. .seealso: PetscSequentialPhaseBegin() Concepts: sequential stage @*/ PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm,int ng) { PetscErrorCode ierr; PetscMPIInt size,flag; MPI_Comm local_comm,*addr_local_comm; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); if (size == 1) PetscFunctionReturn(0); ierr = MPI_Attr_get(comm,Petsc_Seq_keyval,(void **)&addr_local_comm,&flag);CHKERRQ(ierr); if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Wrong MPI communicator; must pass in one used with PetscSequentialPhaseBegin()"); local_comm = *addr_local_comm; ierr = PetscSequentialPhaseEnd_Private(local_comm,ng);CHKERRQ(ierr); ierr = PetscFree(addr_local_comm);CHKERRQ(ierr); ierr = MPI_Comm_free(&local_comm);CHKERRQ(ierr); ierr = MPI_Attr_delete(comm,Petsc_Seq_keyval);CHKERRQ(ierr); PetscFunctionReturn(0); }
void init_synchronization_module(void) { int i, flag; int *mpi_wtime_is_global_ptr; tds = skampi_malloc_doubles(get_global_size()); for(i = 0; i < get_global_size(); i++) tds[i] = 0.0; ping_pong_min_time = skampi_malloc_doubles(get_global_size()); for( i = 0; i < get_global_size(); i++) ping_pong_min_time[i] = -1.0; #if MPI_VERSION < 2 MPI_Attr_get(MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &mpi_wtime_is_global_ptr, &flag); #else MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &mpi_wtime_is_global_ptr, &flag); #endif if( flag == 0 ) mpi_wtime_is_global = False; else mpi_wtime_is_global = *mpi_wtime_is_global_ptr; rdebug(DBG_TIMEDIFF, "mpi_wtime_is_global = %d\n", mpi_wtime_is_global); }
int SAMRAI_MPI::Attr_get( int keyval, void* attribute_val, int* flag) const { #ifndef HAVE_MPI NULL_USE(keyval); NULL_USE(attribute_val); NULL_USE(flag); #endif int rval = MPI_SUCCESS; if (!s_mpi_is_initialized) { TBOX_ERROR("SAMRAI_MPI::Attr_get is a no-op without run-time MPI!"); } #ifdef HAVE_MPI else { rval = MPI_Attr_get(d_comm, keyval, attribute_val, flag); } #endif return rval; }