/*@ MPI_Accumulate - Accumulate data into the target process using remote memory access Input Parameters: + origin_addr - initial address of buffer (choice) . origin_count - number of entries in buffer (nonnegative integer) . origin_datatype - datatype of each buffer entry (handle) . target_rank - rank of target (nonnegative integer) . target_disp - displacement from start of window to beginning of target buffer (nonnegative integer) . target_count - number of entries in target buffer (nonnegative integer) . target_datatype - datatype of each entry in target buffer (handle) . op - predefined reduce operation (handle) - win - window object (handle) Notes: The basic components of both the origin and target datatype must be the same predefined datatype (e.g., all 'MPI_INT' or all 'MPI_DOUBLE_PRECISION'). .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_COUNT .N MPI_ERR_RANK .N MPI_ERR_TYPE .N MPI_ERR_WIN @*/ int MPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) { static const char FCNAME[] = "MPI_Accumulate"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_ACCUMULATE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_ACCUMULATE); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(win, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Comm * comm_ptr; /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; MPIR_ERRTEST_COUNT(origin_count, mpi_errno); MPIR_ERRTEST_DATATYPE(origin_datatype, "origin_datatype", mpi_errno); MPIR_ERRTEST_COUNT(target_count, mpi_errno); MPIR_ERRTEST_DATATYPE(target_datatype, "target_datatype", mpi_errno); MPIR_ERRTEST_DISP(target_disp, mpi_errno); if (HANDLE_GET_KIND(origin_datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype *datatype_ptr = NULL; MPID_Datatype_get_ptr(origin_datatype, datatype_ptr); MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno); MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); } if (HANDLE_GET_KIND(target_datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype *datatype_ptr = NULL; MPID_Datatype_get_ptr(target_datatype, datatype_ptr); MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno); MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); } comm_ptr = win_ptr->comm_ptr; MPIR_ERRTEST_SEND_RANK(comm_ptr, target_rank, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ if (target_rank == MPI_PROC_NULL) goto fn_exit; mpi_errno = MPIU_RMA_CALL(win_ptr,Accumulate(origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, target_datatype, op, win_ptr)); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_ACCUMULATE); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_accumulate", "**mpi_accumulate %p %d %D %d %d %d %D %O %W", origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, target_datatype, op, win); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Win_free - Free an MPI RMA window Input Parameters: . win - window object (handle) Notes: If successfully freed, 'win' is set to 'MPI_WIN_NULL'. .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_WIN .N MPI_ERR_OTHER @*/ int MPI_Win_free(MPI_Win *win) { static const char FCNAME[] = "MPI_Win_free"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_WIN_FREE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_WIN_FREE); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(*win, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( *win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; /* TODO: Check for unterminated passive target epoch */ /* TODO: check for unterminated active mode epoch */ if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ if (MPIR_Process.attr_free && win_ptr->attributes) { mpi_errno = MPIR_Process.attr_free( win_ptr->handle, &win_ptr->attributes ); } /* * If the user attribute free function returns an error, * then do not free the window */ if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* We need to release the error handler */ if (win_ptr->errhandler && ! (HANDLE_GET_KIND(win_ptr->errhandler->handle) == HANDLE_KIND_BUILTIN) ) { int in_use; MPIR_Errhandler_release_ref( win_ptr->errhandler,&in_use); if (!in_use) { MPIU_Handle_obj_free( &MPID_Errhandler_mem, win_ptr->errhandler ); } } mpi_errno = MPID_Win_free(&win_ptr); if (mpi_errno) goto fn_fail; *win = MPI_WIN_NULL; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_WIN_FREE); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_win_free", "**mpi_win_free %p", win); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Win_complete - Completes an RMA operations begun after an MPI_Win_start. Input Parameter: . win - window object (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_WIN .N MPI_ERR_OTHER @*/ int MPI_Win_complete(MPI_Win win) { static const char FCNAME[] = "MPI_Win_complete"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_WIN_COMPLETE); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_WIN_COMPLETE); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(win, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPIU_RMA_CALL(win_ptr,Win_complete(win_ptr)); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_WIN_COMPLETE); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_win_complete", "**mpi_win_complete %W", win); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Fetch_and_op - Perform one-sided read-modify-write. Accumulate one element of type datatype from the origin buffer (origin_addr) to the buffer at offset target_disp, in the target window specified by target_rank and win, using the operation op and return in the result buffer result_addr the content of the target buffer before the accumulation. Input Parameters: + origin_addr - initial address of buffer (choice) . result_addr - initial address of result buffer (choice) . datatype - datatype of the entry in origin, result, and target buffers (handle) . target_rank - rank of target (nonnegative integer) . target_disp - displacement from start of window to beginning of target buffer (non-negative integer) . op - reduce operation (handle) - win - window object (handle) Notes: This operations is atomic with respect to other "accumulate" operations. The generic functionality of 'MPI_Get_accumulate' might limit the performance of fetch-and-increment or fetch-and-add calls that might be supported by special hardware operations. 'MPI_Fetch_and_op' thus allows for a fast implementation of a commonly used subset of the functionality of 'MPI_Get_accumulate'. The origin and result buffers (origin_addr and result_addr) must be disjoint. Any of the predefined operations for 'MPI_Reduce', as well as 'MPI_NO_OP' or 'MPI_REPLACE', can be specified as op; user-defined functions cannot be used. The datatype argument must be a predefined datatype. .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_COUNT .N MPI_ERR_OP .N MPI_ERR_RANK .N MPI_ERR_TYPE .N MPI_ERR_WIN .seealso: MPI_Get_accumulate @*/ int MPI_Fetch_and_op(const void *origin_addr, void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win) { static const char FCNAME[] = "MPI_Fetch_and_op"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_FETCH_AND_OP); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_FETCH_AND_OP); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(win, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Comm *comm_ptr; /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; if (op != MPI_NO_OP) { MPIR_ERRTEST_ARGNULL(origin_addr, "origin_addr", mpi_errno); } MPIR_ERRTEST_ARGNULL(result_addr, "result_addr", mpi_errno); MPIR_ERRTEST_DATATYPE(datatype, "datatype", mpi_errno); if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) { MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_TYPE, "**typenotpredefined"); } if (win_ptr->create_flavor != MPI_WIN_FLAVOR_DYNAMIC) MPIR_ERRTEST_DISP(target_disp, mpi_errno); comm_ptr = win_ptr->comm_ptr; MPIR_ERRTEST_SEND_RANK(comm_ptr, target_rank, mpi_errno); MPIR_ERRTEST_OP_GACC(op, mpi_errno); if (HANDLE_GET_KIND(op) != HANDLE_KIND_BUILTIN) { MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OP, "**opnotpredefined"); } } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPIU_RMA_CALL(win_ptr,Fetch_and_op(origin_addr, result_addr, datatype, target_rank, target_disp, op, win_ptr)); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_FETCH_AND_OP); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_fetch_and_op", "**mpi_fetch_and_op %p %p %D %d %d %O %W", origin_addr, result_addr, datatype, target_rank, target_disp, op, win); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Win_allocate_shared - Create an MPI Window object for one-sided communication and shared memory access, and allocate memory at each process. This is a collective call executed by all processes in the group of comm. On each process i, it allocates memory of at least size bytes that is shared among all processes in comm, and returns a pointer to the locally allocated segment in baseptr that can be used for load/store accesses on the calling process. The locally allocated memory can be the target of load/store accesses by remote processes; the base pointers for other processes can be queried using the function 'MPI_Win_shared_query'. The call also returns a window object that can be used by all processes in comm to perform RMA operations. The size argument may be different at each process and size = 0 is valid. It is the user''s responsibility to ensure that the communicator comm represents a group of processes that can create a shared memory segment that can be accessed by all processes in the group. The allocated memory is contiguous across process ranks unless the info key alloc_shared_noncontig is specified. Contiguous across process ranks means that the first address in the memory segment of process i is consecutive with the last address in the memory segment of process i − 1. This may enable the user to calculate remote address offsets with local information only. Input Parameters: . size - size of window in bytes (nonnegative integer) . disp_unit - local unit size for displacements, in bytes (positive integer) . info - info argument (handle) - comm - communicator (handle) Output Parameters: . baseptr - initial address of window (choice) - win - window object returned by the call (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_COMM .N MPI_ERR_INFO .N MPI_ERR_OTHER .N MPI_ERR_SIZE .seealso: MPI_Win_allocate MPI_Win_create MPI_Win_create_dynamic MPI_Win_free MPI_Win_shared_query @*/ int MPI_Win_allocate_shared(MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *baseptr, MPI_Win *win) { int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_Comm *comm_ptr = NULL; MPID_Info *info_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_WIN_ALLOCATE_SHARED); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_WIN_ALLOCATE_SHARED); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_COMM(comm, mpi_errno); MPIR_ERRTEST_INFO_OR_NULL(info, mpi_errno); MPIR_ERRTEST_ARGNULL(win, "win", mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Comm_get_ptr( comm, comm_ptr ); MPID_Info_get_ptr( info, info_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate pointers */ MPID_Comm_valid_ptr( comm_ptr, mpi_errno, FALSE ); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPIU_ERR_CHKANDJUMP1(disp_unit <= 0, mpi_errno, MPI_ERR_ARG, "**arg", "**arg %s", "disp_unit must be positive"); MPIU_ERR_CHKANDJUMP1(size < 0, mpi_errno, MPI_ERR_SIZE, "**rmasize", "**rmasize %d", size); MPIU_ERR_CHKANDJUMP1(size > 0 && baseptr == NULL, mpi_errno, MPI_ERR_ARG, "**nullptr", "**nullptr %s", "NULL base pointer is invalid when size is nonzero"); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPID_Win_allocate_shared(size, disp_unit, info_ptr, comm_ptr, baseptr, &win_ptr); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* Initialize a few fields that have specific defaults */ win_ptr->name[0] = 0; win_ptr->errhandler = 0; /* return the handle of the window object to the user */ MPIU_OBJ_PUBLISH_HANDLE(*win, win_ptr->handle); /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_WIN_ALLOCATE_SHARED); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_win_allocate_shared", "**mpi_win_allocate_shared %d %I %C %p %p", size, info, comm, baseptr, win); } # endif mpi_errno = MPIR_Err_return_comm( comm_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Rput - Put data into a memory window on a remote process and return a request handle for the operation. 'MPI_Rput' is similar to 'MPI_Put', except that it allocates a communication request object and associates it with the request handle (the argument request). The completion of an 'MPI_Rput' operation (i.e., after the corresponding test or wait) indicates that the sender is now free to update the locations in the origin buffer. It does not indicate that the data is available at the target window. If remote completion is required, 'MPI_Win_flush', 'MPI_Win_flush_all', 'MPI_Win_unlock', or 'MPI_Win_unlock_all' can be used. Input Parameters: + origin_addr -initial address of origin buffer (choice) . origin_count -number of entries in origin buffer (nonnegative integer) . origin_datatype -datatype of each entry in origin buffer (handle) . target_rank -rank of target (nonnegative integer) . target_disp -displacement from start of window to target buffer (nonnegative integer) . target_count -number of entries in target buffer (nonnegative integer) . target_datatype -datatype of each entry in target buffer (handle) - win - window object used for communication (handle) Output Parameters: . request -RMA request (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_COUNT .N MPI_ERR_RANK .N MPI_ERR_TYPE .N MPI_ERR_WIN .seealso: MPI_Put @*/ int MPI_Rput(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request *request) { static const char FCNAME[] = "MPI_Rput"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_Request *request_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_RPUT); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_RPUT); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(win, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Comm * comm_ptr; /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; MPIR_ERRTEST_COUNT(origin_count, mpi_errno); MPIR_ERRTEST_DATATYPE(origin_datatype, "origin_datatype", mpi_errno); MPIR_ERRTEST_USERBUFFER(origin_addr, origin_count, origin_datatype, mpi_errno); MPIR_ERRTEST_COUNT(target_count, mpi_errno); MPIR_ERRTEST_DATATYPE(target_datatype, "target_datatype", mpi_errno); if (win_ptr->create_flavor != MPI_WIN_FLAVOR_DYNAMIC) MPIR_ERRTEST_DISP(target_disp, mpi_errno); if (HANDLE_GET_KIND(origin_datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype *datatype_ptr = NULL; MPID_Datatype_get_ptr(origin_datatype, datatype_ptr); MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } if (HANDLE_GET_KIND(target_datatype) != HANDLE_KIND_BUILTIN) { MPID_Datatype *datatype_ptr = NULL; MPID_Datatype_get_ptr(target_datatype, datatype_ptr); MPID_Datatype_valid_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; MPID_Datatype_committed_ptr(datatype_ptr, mpi_errno); if (mpi_errno != MPI_SUCCESS) goto fn_fail; } comm_ptr = win_ptr->comm_ptr; MPIR_ERRTEST_SEND_RANK(comm_ptr, target_rank, mpi_errno); MPIR_ERRTEST_ARGNULL(request,"request",mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPID_Rput(origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, target_datatype, win_ptr, &request_ptr); if (mpi_errno != MPI_SUCCESS) goto fn_fail; *request = request_ptr->handle; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_RPUT); MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_rput", "**mpi_rput %p %d %D %d %d %d %D %W %p", origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, target_datatype, win, request); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Win_attach - Attach memory to a dynamic window. Attaches a local memory region beginning at base for remote access within the given window. The memory region specified must not contain any part that is already attached to the window win, that is, attaching overlapping memory concurrently within the same window is erroneous. The argument win must be a window that was created with 'MPI_Win_create_dynamic'. Multiple (but non-overlapping) memory regions may be attached to the same window. Input Parameters: + size - size of memory to be attached in bytes . base - initial address of memory to be attached - win - window object used for communication (handle) .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_COUNT .N MPI_ERR_RANK .N MPI_ERR_TYPE .N MPI_ERR_WIN .seealso: MPI_Win_create_dynamic MPI_Win_detach @*/ int MPI_Win_attach(MPI_Win win, void *base, MPI_Aint size) { static const char FCNAME[] = "MPI_Win_attach"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_WIN_ATTACH); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_WIN_ATTACH); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(win, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; if (size < 0) mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_SIZE, "**rmasize", "**rmasize %d", size); if (size > 0 && base == NULL) mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_ARG, "**nullptr", "**nullptr %s", "NULL base pointer is invalid when size is nonzero"); if (mpi_errno) goto fn_fail; } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ if (size == 0) goto fn_exit; mpi_errno = MPIU_RMA_CALL(win_ptr, Win_attach(win_ptr, base, size)); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_WIN_ATTACH); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_win_attach", "**mpi_win_attach %W %p %d", size, base, win); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }
/*@ MPI_Win_shared_query - Query the size and base pointer for a patch of a shared memory window. This function queries the process-local address for remote memory segments created with 'MPI_Win_allocate_shared'. This function can return different process-local addresses for the same physical memory on different processes. The returned memory can be used for load/store accesses subject to the constraints defined in MPI 3.0, Section 11.7. This function can only be called with windows of type 'MPI_Win_flavor_shared'. If the passed window is not of flavor 'MPI_Win_flavor_shared', the error 'MPI_ERR_RMA_FLAVOR' is raised. When rank is 'MPI_PROC_NULL', the pointer, disp_unit, and size returned are the pointer, disp_unit, and size of the memory segment belonging the lowest rank that specified size > 0. If all processes in the group attached to the window specified size = 0, then the call returns size = 0 and a baseptr as if 'MPI_Alloc_mem' was called with size = 0. Input Parameters: + win - window object used for communication (handle) - rank - target rank Output Parameters: + size - size of the segment at the given rank . disp_unit - local unit size for displacements, in bytes (positive integer) - baseptr - base pointer in the calling process'' address space of the shared segment belonging to the target rank. .N ThreadSafe .N Fortran .N Errors .N MPI_SUCCESS .N MPI_ERR_ARG .N MPI_ERR_RANK .N MPI_ERR_WIN .seealso: MPI_Win_allocate_shared @*/ int MPI_Win_shared_query(MPI_Win win, int rank, MPI_Aint *size, int *disp_unit, void *baseptr) { static const char FCNAME[] = "MPI_Win_shared_query"; int mpi_errno = MPI_SUCCESS; MPID_Win *win_ptr = NULL; MPID_MPI_STATE_DECL(MPID_STATE_MPI_WIN_SHARED_QUERY); MPIR_ERRTEST_INITIALIZED_ORDIE(); MPIU_THREAD_CS_ENTER(ALLFUNC,); MPID_MPI_RMA_FUNC_ENTER(MPID_STATE_MPI_WIN_SHARED_QUERY); /* Validate parameters, especially handles needing to be converted */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPIR_ERRTEST_WIN(win, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* Convert MPI object handles to object pointers */ MPID_Win_get_ptr( win, win_ptr ); /* Validate parameters and objects (post conversion) */ # ifdef HAVE_ERROR_CHECKING { MPID_BEGIN_ERROR_CHECKS; { MPID_Comm * comm_ptr; /* Validate win_ptr */ MPID_Win_valid_ptr( win_ptr, mpi_errno ); if (mpi_errno) goto fn_fail; MPIR_ERRTEST_ARGNULL(size, "size", mpi_errno); MPIR_ERRTEST_ARGNULL(disp_unit, "disp_unit", mpi_errno); MPIR_ERRTEST_ARGNULL(baseptr, "baseptr", mpi_errno); comm_ptr = win_ptr->comm_ptr; MPIR_ERRTEST_SEND_RANK(comm_ptr, rank, mpi_errno); MPIR_ERRTEST_WIN_FLAVOR(win_ptr, MPI_WIN_FLAVOR_SHARED, mpi_errno); } MPID_END_ERROR_CHECKS; } # endif /* HAVE_ERROR_CHECKING */ /* ... body of routine ... */ mpi_errno = MPIU_RMA_CALL(win_ptr, Win_shared_query(win_ptr, rank, size, disp_unit, baseptr)); if (mpi_errno != MPI_SUCCESS) goto fn_fail; /* ... end of body of routine ... */ fn_exit: MPID_MPI_RMA_FUNC_EXIT(MPID_STATE_MPI_WIN_SHARED_QUERY); MPIU_THREAD_CS_EXIT(ALLFUNC,); return mpi_errno; fn_fail: /* --BEGIN ERROR HANDLING-- */ # ifdef HAVE_ERROR_CHECKING { mpi_errno = MPIR_Err_create_code( mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**mpi_win_shared_query", "**mpi_win_shared_query %W %d %p %p", win, rank, size, baseptr); } # endif mpi_errno = MPIR_Err_return_win( win_ptr, FCNAME, mpi_errno ); goto fn_exit; /* --END ERROR HANDLING-- */ }