int MPI_File_get_info(MPI_File fh, MPI_Info *info_used) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == info_used) { return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_INFO, FUNC_NAME); } if (ompi_file_invalid(fh)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME); } } if (NULL == fh->super.s_info) { /* * Setup any defaults if MPI_Win_set_info was never called */ opal_infosubscribe_change_info(fh, &MPI_INFO_NULL->super); } (*info_used) = OBJ_NEW(ompi_info_t); if (NULL == (*info_used)) { return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME); } opal_info_dup(fh->super.s_info, &(*info_used)->super); return OMPI_SUCCESS; }
int MPI_Add_error_string(int errorcode, const char *string) { int rc; OPAL_CR_NOOP_PROGRESS(); if ( MPI_PARAM_CHECK ) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if ( ompi_mpi_errcode_is_invalid(errorcode) ) return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); if ( ompi_mpi_errcode_is_predefined(errorcode) ) return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); if ( MPI_MAX_ERROR_STRING < (strlen(string)+1) ) return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } rc = ompi_mpi_errnum_add_string (errorcode, string, (int)(strlen(string)+1)); if ( OMPI_SUCCESS != rc ) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN, FUNC_NAME); } return MPI_SUCCESS; }
int MPI_Comm_get_info(MPI_Comm comm, MPI_Info *info_used) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == info_used) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO, FUNC_NAME); } if (ompi_comm_invalid(comm)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME); } } if (NULL == comm->super.s_info) { /* * Setup any defaults if MPI_Win_set_info was never called */ opal_infosubscribe_change_info(&comm->super, &MPI_INFO_NULL->super); } (*info_used) = OBJ_NEW(ompi_info_t); if (NULL == (*info_used)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME); } opal_info_t *opal_info_used = &(*info_used)->super; opal_info_dup_mpistandard(comm->super.s_info, &opal_info_used); return MPI_SUCCESS; }
int MPI_Win_get_info(MPI_Win win, MPI_Info *info_used) { int ret; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_win_invalid(win)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME); } if (NULL == info_used) { return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG, FUNC_NAME); } } if (NULL == win->super.s_info) { /* * Setup any defaults if MPI_Win_set_info was never called */ opal_infosubscribe_change_info(win, &MPI_INFO_NULL->super); } (*info_used) = OBJ_NEW(ompi_info_t); if (NULL == (*info_used)) { return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_NO_MEM, FUNC_NAME); } ret = opal_info_dup(&win->super.s_info, &(*info_used)->super); OMPI_ERRHANDLER_RETURN(ret, win, ret, FUNC_NAME); }
int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { MPI_Errhandler tmp; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_win_invalid(win)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME); } else if (NULL == errhandler || MPI_ERRHANDLER_NULL == errhandler || (OMPI_ERRHANDLER_TYPE_WIN != errhandler->eh_mpi_object_type && OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) { return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG, FUNC_NAME); } } /* Prepare the new error handler */ OBJ_RETAIN(errhandler); OPAL_THREAD_LOCK(&win->w_lock); /* Ditch the old errhandler, and decrement its refcount. */ tmp = win->error_handler; win->error_handler = errhandler; OBJ_RELEASE(tmp); OPAL_THREAD_UNLOCK(&win->w_lock); /* All done */ return MPI_SUCCESS; }
int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { MPI_Errhandler tmp; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_win_invalid(win)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME); } else if (NULL == errhandler || MPI_ERRHANDLER_NULL == errhandler || (OMPI_ERRHANDLER_TYPE_WIN != errhandler->eh_mpi_object_type && OMPI_ERRHANDLER_TYPE_PREDEFINED != errhandler->eh_mpi_object_type) ) { return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG, FUNC_NAME); } } /* Prepare the new error handler */ OBJ_RETAIN(errhandler); /* Ditch the old errhandler, and decrement its refcount. On 64 bits environments we have to make sure the reading of the error_handler became atomic. */ do { tmp = win->error_handler; } while (!OPAL_ATOMIC_CMPSET(&(win->error_handler), tmp, errhandler)); OBJ_RELEASE(tmp); /* All done */ return MPI_SUCCESS; }
int MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler) { MPI_Errhandler tmp; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_win_invalid(win)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME); } else if (NULL == errhandler) { return OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_ARG, FUNC_NAME); } } /* On 64 bits environments we have to make sure the reading of the error_handler became atomic. */ do { tmp = win->error_handler; } while (!OPAL_ATOMIC_CMPSET_PTR(&(win->error_handler), tmp, tmp)); /* Retain the errhandler, corresponding to object refcount decrease in errhandler_free.c. */ OBJ_RETAIN(win->error_handler); *errhandler = win->error_handler; /* All done */ return MPI_SUCCESS; }
int MPI_Initialized(int *flag) { MPI_Comm null = NULL; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { if (NULL == flag) { /* If we have an error, the action that we take depends on whether we're currently (after MPI_Init and before MPI_Finalize) or not */ if (ompi_mpi_initialized && !ompi_mpi_finalized) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } else { return OMPI_ERRHANDLER_INVOKE(null, MPI_ERR_ARG, FUNC_NAME); } } } /* Pretty simple */ *flag = ompi_mpi_initialized; return MPI_SUCCESS; }
int MPI_Op_commutative(MPI_Op op, int *commute) { OPAL_CR_NOOP_PROGRESS(); /* Error checking */ if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == op || MPI_OP_NULL == op) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_OP, FUNC_NAME); } if (NULL == commute) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } } /* We have a valid op, get the flag */ *commute = ompi_op_is_commute(op); /* All done */ return MPI_SUCCESS; }
int MPI_File_set_info(MPI_File fh, MPI_Info info) { int ret; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_file_invalid(fh)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_FILE, FUNC_NAME); } if (NULL == info || MPI_INFO_NULL == info || ompi_info_is_freed(info)) { return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_INFO, FUNC_NAME); } } OPAL_CR_ENTER_LIBRARY(); ret = opal_infosubscribe_change_info(fh, &info->super); OMPI_ERRHANDLER_RETURN(ret, fh, ret, FUNC_NAME); }
MPI_Fint MPI_Type_c2f(MPI_Datatype datatype) { OPAL_CR_NOOP_PROGRESS(); MEMCHECKER( memchecker_datatype(datatype); );
double MPI_Wtick(void) { OPAL_CR_NOOP_PROGRESS(); #if OPAL_TIMER_USEC_NATIVE return 0.000001; #else /* Otherwise, we already return usec precision. */ return 0.000001; #endif }
double MPI_Wtick(void) { OPAL_CR_NOOP_PROGRESS(); #if OPAL_TIMER_CYCLE_NATIVE return opal_timer_base_get_freq(); #elif OPAL_TIMER_USEC_NATIVE return 0.000001; #else /* Otherwise, we already return usec precision. */ return 0.000001; #endif }
MPI_Fint MPI_Op_c2f(MPI_Op op) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == op) { return OMPI_INT_2_FINT(-1); } } return OMPI_INT_2_FINT(op->o_f_to_c_index); }
int MPI_Get_address(const void *location, MPI_Aint *address) { OPAL_CR_NOOP_PROGRESS(); if( MPI_PARAM_CHECK ) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == location || NULL == address) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } } *address = (MPI_Aint)location; return MPI_SUCCESS; }
MPI_Fint MPI_Info_c2f(MPI_Info info) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == info || ompi_info_is_freed(info)) { return OMPI_INT_2_FINT(-1); } } return OMPI_INT_2_FINT(info->i_f_to_c_index); }
int MPI_Add_error_code(int errorclass, int *errorcode) { int code; int rc; OPAL_CR_NOOP_PROGRESS(); if ( MPI_PARAM_CHECK ) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if ( ompi_mpi_errcode_is_invalid(errorclass) ) return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); if ( !ompi_mpi_errnum_is_class ( errorclass) ) return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); if (NULL == errorcode) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } } code = ompi_mpi_errcode_add ( errorclass); if ( 0 > code ) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INTERN, FUNC_NAME); } /* ** Update the attribute value. See the comments ** in attribute/attribute.c and attribute/attribute_predefined.c ** why we have to call the fortran attr_set function */ rc = ompi_attr_set_fint (COMM_ATTR, MPI_COMM_WORLD, &MPI_COMM_WORLD->c_keyhash, MPI_LASTUSEDCODE, ompi_mpi_errcode_lastused, true); if ( MPI_SUCCESS != rc ) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, rc, FUNC_NAME); } *errorcode = code; return MPI_SUCCESS; }
int MPI_Is_thread_main(int *flag) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == flag) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME); } } /* Compare this thread ID to the main thread ID */ *flag = (int) opal_thread_self_compare(ompi_mpi_main_thread); return MPI_SUCCESS; }
int MPI_Query_thread(int *provided) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (NULL == provided) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } } /* Simple */ *provided = ompi_mpi_thread_provided; return MPI_SUCCESS; }
int MPI_Close_port(char *port_name) { int ret; OPAL_CR_NOOP_PROGRESS(); if ( MPI_PARAM_CHECK ) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if ( NULL == port_name ) return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } ret = ompi_dpm.close_port(port_name); OMPI_ERRHANDLER_RETURN(ret, MPI_COMM_WORLD, ret, FUNC_NAME); }
int MPI_Error_class(int errorcode, int *errorclass) { OPAL_CR_NOOP_PROGRESS(); if ( MPI_PARAM_CHECK ) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if ( ompi_mpi_errcode_is_invalid(errorcode)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } } *errorclass = ompi_mpi_errcode_get_class(errorcode); return MPI_SUCCESS; }
int MPI_Get_count(const MPI_Status *status, MPI_Datatype datatype, int *count) { size_t size = 0, internal_count; int rc = MPI_SUCCESS; OPAL_CR_NOOP_PROGRESS(); MEMCHECKER( if (status != MPI_STATUSES_IGNORE) { /* * Before checking the complete status, we need to reset the definedness * of the MPI_ERROR-field (single-completion calls wait/test). */ opal_memchecker_base_mem_defined((void*)&status->MPI_ERROR, sizeof(int)); memchecker_status(status); memchecker_datatype(datatype); } );
int MPI_Pcontrol(const int level, ...) { va_list arglist; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); } /* Silence some compiler warnings */ va_start(arglist, level); va_end(arglist); /* There's nothing to do here */ return MPI_SUCCESS; }
MPI_Fint MPI_Errhandler_c2f(MPI_Errhandler errhandler) { OPAL_CR_NOOP_PROGRESS(); /* Error checking */ if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); /* mapping an invalid handle to a null handle */ if (NULL == errhandler) { return OMPI_INT_2_FINT(-1); } } return OMPI_INT_2_FINT(errhandler->eh_f_to_c_index); }
int MPI_Status_f2c(MPI_Fint *f_status, MPI_Status *c_status) { int i, *c_ints; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); /* MPI-2:4.12.5 says that if you pass in MPI_STATUS[ES]_IGNORE, it's erroneous */ if (NULL == f_status || #if OMPI_WANT_F77_BINDINGS || OMPI_WANT_F90_BINDINGS /* This section is #if'ed out if we are not building the fortran bindings because these macros check values against constants that only exist if the fortran bindings exist. */ OMPI_IS_FORTRAN_STATUS_IGNORE(f_status) || OMPI_IS_FORTRAN_STATUSES_IGNORE(f_status) || #endif NULL == c_status) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_IN_STATUS, FUNC_NAME); } } /* We can't use OMPI_FINT_2_INT here because of some complications with include files. :-( So just do the casting manually. */ c_ints = (int*)c_status; for( i = 0; i < (int)(sizeof(MPI_Status) / sizeof(int)); i++ ) c_ints[i] = (int)f_status[i]; /* c_status->MPI_SOURCE = (int) f_status[0]; c_status->MPI_TAG = (int) f_status[1]; c_status->MPI_ERROR = (int) f_status[2]; c_status->_count = (int) f_status[3]; c_status->_cancelled = (int) f_status[4]; */ return MPI_SUCCESS; }
int MPI_Win_call_errhandler(MPI_Win win, int errorcode) { OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_win_invalid(win)) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME); } } /* Invoke the errhandler */ OMPI_ERRHANDLER_INVOKE(win, errorcode, FUNC_NAME); /* See MPI-2 8.5 why this function has to return MPI_SUCCESS */ return MPI_SUCCESS; }
double MPI_Wtime(void) { double wtime; #if OPAL_TIMER_CYCLE_NATIVE wtime = ((double) opal_timer_base_get_cycles()) / opal_timer_base_get_freq(); #elif OPAL_TIMER_USEC_NATIVE wtime = ((double) opal_timer_base_get_usec()) / 1000000.0; #else /* Fall back to gettimeofday() if we have nothing else */ struct timeval tv; gettimeofday(&tv, NULL); wtime = tv.tv_sec; wtime += (double)tv.tv_usec / 1000000.0; #endif OPAL_CR_NOOP_PROGRESS(); return wtime; }
MPI_Win MPI_Win_f2c(MPI_Fint win) { int o_index= OMPI_FINT_2_INT(win); OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); } /* Per MPI-2:4.12.4, do not invoke an error handler if we get an invalid fortran handle. If we get an invalid fortran handle, return an invalid C handle. */ if ( 0 > o_index || o_index >= opal_pointer_array_get_size(&ompi_mpi_windows)) { return NULL; } return (MPI_Win)opal_pointer_array_get_item(&ompi_mpi_windows, o_index); }
int MPI_Group_rank(MPI_Group group, int *rank) { OPAL_CR_NOOP_PROGRESS(); /* error checking */ if( MPI_PARAM_CHECK ) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if( (MPI_GROUP_NULL == group) || ( NULL == group) ){ return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP, FUNC_NAME); } else if (NULL == rank) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME); } } *rank=ompi_group_rank((ompi_group_t *)group); return MPI_SUCCESS; }
int MPI_Status_f2c(MPI_Fint *f_status, MPI_Status *c_status) { int i, *c_ints; OPAL_CR_NOOP_PROGRESS(); if (MPI_PARAM_CHECK) { OMPI_ERR_INIT_FINALIZE(FUNC_NAME); /* MPI-2:4.12.5 says that if you pass in MPI_STATUS[ES]_IGNORE, it's erroneous */ if (NULL == f_status || #if OMPI_WANT_F77_BINDINGS || OMPI_WANT_F90_BINDINGS /* This section is #if'ed out if we are not building the fortran bindings because these macros check values against constants that only exist if the fortran bindings exist. */ OMPI_IS_FORTRAN_STATUS_IGNORE(f_status) || OMPI_IS_FORTRAN_STATUSES_IGNORE(f_status) || #endif NULL == c_status) { return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_IN_STATUS, FUNC_NAME); } } /* ***NOTE*** See huge comment in status_c2f.c (yes, I know there's a size_t member in the C MPI_Status -- go read that comment for an explanation why copying everything as a bunch of int's is ok). We can't use OMPI_FINT_2_INT here because of some complications with include files. :-( So just do the casting manually. */ c_ints = (int*)c_status; for( i = 0; i < (int)(sizeof(MPI_Status) / sizeof(int)); i++ ) c_ints[i] = (int)f_status[i]; return MPI_SUCCESS; }