Esempio n. 1
0
int MPI_File_set_info(MPI_File fh, MPI_Info info)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            fh = MPI_FILE_NULL;
            rc = MPI_ERR_FILE;
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_2_0_0:
        rc = fh->f_io_selected_module.v2_0_0.
            io_module_file_set_info(fh, info);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */

    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 2
0
int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset,
                             MPI_Offset *disp)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            rc = MPI_ERR_FILE;
            fh = MPI_FILE_NULL;
        } else if (NULL == disp) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_2_0_0:
        rc = fh->f_io_selected_module.v2_0_0.
            io_module_file_get_byte_offset(fh, offset, disp);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */

    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 3
0
int MPI_Compare_and_swap(const void *origin_addr, const void *compare_addr, void *result_addr,
                         MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = OMPI_SUCCESS;

        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME);
        } else if (ompi_win_peer_invalid(win, target_rank) &&
                   (MPI_PROC_NULL != target_rank)) {
            rc = MPI_ERR_RANK;
        } else if ( MPI_WIN_FLAVOR_DYNAMIC != win->w_flavor && target_disp < 0 ) {
            rc = MPI_ERR_DISP;
        } else {
            OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, datatype, 1);
        }
        OMPI_ERRHANDLER_CHECK(rc, win, rc, FUNC_NAME);
    }

    if (MPI_PROC_NULL == target_rank) return MPI_SUCCESS;

    OPAL_CR_ENTER_LIBRARY();

    rc = win->w_osc_module->osc_compare_and_swap(origin_addr, compare_addr, result_addr,
                                                 datatype, target_rank, target_disp, win);
    OMPI_ERRHANDLER_RETURN(rc, win, rc, FUNC_NAME);
}
int MPI_File_read_ordered_end(MPI_File fh, void *buf, MPI_Status *status)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            fh = MPI_FILE_NULL;
            rc = MPI_ERR_FILE;
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_2_0_0:
        rc = fh->f_io_selected_module.v2_0_0.
            io_module_file_read_ordered_end(fh, buf, status);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 5
0
int MPI_Type_contiguous(int count,
                        MPI_Datatype oldtype,
                        MPI_Datatype *newtype)
{
   int rc;

   if( MPI_PARAM_CHECK ) {
     OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
     if (MPI_DATATYPE_NULL == oldtype || NULL == newtype ||
         NULL == newtype) {
       return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TYPE, FUNC_NAME);
     } else if( count < 0 ) {
       return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COUNT, FUNC_NAME);
      }
   }
   
   rc = ompi_ddt_create_contiguous( count, oldtype, newtype );
   OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME );

   /* data description */
   {
      int* a_i[1];
      a_i[0] = &count;
      ompi_ddt_set_args( *newtype, 1, a_i, 0, NULL, 1, &oldtype, MPI_COMBINER_CONTIGUOUS );
   }

   OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME );
}
int MPI_File_get_atomicity(MPI_File fh, int *flag)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            rc = MPI_ERR_FILE;
            fh = MPI_FILE_NULL;
        } else if (NULL == flag) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_2_0_0:
        rc = fh->f_io_selected_module.v2_0_0.
            io_module_file_get_atomicity(fh, flag);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 7
0
int MPI_Rsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Comm comm)
{
    int rc = MPI_SUCCESS;

    if ( MPI_PARAM_CHECK ) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_comm_invalid(comm)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
        } else if (count < 0) {
            rc = MPI_ERR_COUNT;
        } else if (type == MPI_DATATYPE_NULL) {
            rc = MPI_ERR_TYPE;
        } else if (tag < 0 || tag > mca_pml.pml_max_tag) {
            rc = MPI_ERR_TAG;
        } else if (ompi_comm_peer_invalid(comm, dest) &&
                   (MPI_PROC_NULL != dest)) {
            rc = MPI_ERR_RANK;
        } else {
            OMPI_CHECK_DATATYPE_FOR_SEND(rc, type, count);
            OMPI_CHECK_USER_BUFFER(rc, buf, type, count);
        }
        OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
    }

    if (MPI_PROC_NULL == dest) {
        return MPI_SUCCESS;
    }

    rc = MCA_PML_CALL(send(buf, count, type, dest, tag,
                           MCA_PML_BASE_SEND_READY, comm));
    OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}
Esempio n. 8
0
int MPI_File_delete(char *filename, MPI_Info info) 
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (NULL == info || ompi_info_is_freed(info)) {
            rc = MPI_ERR_INFO;
        } else if (NULL == filename) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, MPI_FILE_NULL, rc, FUNC_NAME);
    }

    /* Note that MPI-2:9.7 (p265 in the ps; 261 in the pdf) says that
       errors in MPI_FILE_OPEN (before the file handle is created)
       should invoke the default error handler on MPI_FILE_NULL.
       Hence, if we get a file handle out of ompi_file_open(), invoke
       the error handler on that.  If not, invoke the error handler on
       MPI_FILE_NULL. */

    /* The io framework is only initialized lazily.  If it hasn't
       already been initialized, do so now (note that MPI_FILE_OPEN
       and MPI_FILE_DELETE are the only two places that it will be
       initialized). */

    if (!(mca_io_base_components_opened_valid ||
          mca_io_base_components_available_valid)) {
        if (OMPI_SUCCESS != (rc = mca_io_base_open())) {
            return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
        }
        if (OMPI_SUCCESS != 
            (rc = mca_io_base_find_available(OPAL_ENABLE_PROGRESS_THREADS,
                                             OMPI_ENABLE_THREAD_MULTIPLE))) {
            return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Since there is no MPI_File handle associated with this
       function, the MCA has to do a selection and perform the
       action */

    rc = mca_io_base_delete(filename, info);
    OMPI_ERRHANDLER_RETURN(rc, MPI_FILE_NULL, rc, FUNC_NAME);
}
Esempio n. 9
0
int MPI_Grequest_complete(MPI_Request request) 
{
    int rc = MPI_SUCCESS;
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (request == MPI_REQUEST_NULL) {
           rc = MPI_ERR_REQUEST;
        } else if (OMPI_REQUEST_GEN != request->req_type) {
            rc = MPI_ERR_REQUEST;
        }
        OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
    }

    rc = ompi_grequest_complete(request);
    OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, MPI_ERR_INTERN, FUNC_NAME);
}
Esempio n. 10
0
int MPI_Register_datarep(char *datarep,
                       MPI_Datarep_conversion_function *read_conversion_fn,
                       MPI_Datarep_conversion_function *write_conversion_fn,
                       MPI_Datarep_extent_function *dtype_file_extent_fn,
                       void *extra_state) 
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (NULL == datarep) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, MPI_FILE_NULL, rc, FUNC_NAME);
    }

    /* The io framework is only initialized lazily.  If it hasn't
       already been initialized, do so now (note that MPI_FILE_OPEN
       and MPI_FILE_DELETE are the only two places that it will be
       initialized). */

    if (!(mca_io_base_components_opened_valid ||
          mca_io_base_components_available_valid)) {
        if (OMPI_SUCCESS != (rc = mca_io_base_open())) {
            return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
        }
        if (OMPI_SUCCESS != 
            (rc = mca_io_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
                                             OMPI_ENABLE_MPI_THREADS))) {
            return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
        }
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Call the back-end io component function */
    rc = mca_io_base_register_datarep(datarep, read_conversion_fn,
                                      write_conversion_fn,
                                      dtype_file_extent_fn,
                                      extra_state);


    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, MPI_FILE_NULL, rc, FUNC_NAME);
}
Esempio n. 11
0
int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf,
                      int count, MPI_Datatype datatype, MPI_Request *request)
{
    int rc;
    mca_io_base_request_t *io_request;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            fh = MPI_FILE_NULL;
            rc = MPI_ERR_FILE;
        } else if (count < 0) {
            rc = MPI_ERR_COUNT;
        } else {
           OMPI_CHECK_DATATYPE_FOR_RECV(rc, datatype, count);
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    /* Get a request */

    if (OMPI_SUCCESS != mca_io_base_request_alloc(fh, &io_request)) {
        return OMPI_ERRHANDLER_INVOKE(fh, MPI_ERR_NO_MEM, FUNC_NAME);
    }
    *request = (ompi_request_t*) io_request;

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_1_0_0:
        rc = fh->f_io_selected_module.v1_0_0.
            io_module_file_iread_at(fh, offset, buf, count, datatype, 
                                    io_request);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 12
0
File: rput.c Progetto: bureddy/ompi
int MPI_Rput(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
            int target_rank, MPI_Aint target_disp, int target_count,
             MPI_Datatype target_datatype, MPI_Win win, MPI_Request *request)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = OMPI_SUCCESS;

        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME);
        } else if (origin_count < 0 || target_count < 0) {
            rc = MPI_ERR_COUNT;
        } else if (ompi_win_peer_invalid(win, target_rank) &&
                   (MPI_PROC_NULL != target_rank)) {
            rc = MPI_ERR_RANK;
        } else if (NULL == target_datatype ||
                   MPI_DATATYPE_NULL == target_datatype) {
            rc = MPI_ERR_TYPE;
        } else if ( target_disp < 0 ) {
            rc = MPI_ERR_DISP;
        } else {
            OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, origin_datatype, origin_count);
            if (OMPI_SUCCESS == rc) {
                OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, target_datatype, target_count);
            }
        }
        OMPI_ERRHANDLER_CHECK(rc, win, rc, FUNC_NAME);
    }

    if (MPI_PROC_NULL == target_rank) {
        *request = &ompi_request_empty;
        return MPI_SUCCESS;
    }

    OPAL_CR_ENTER_LIBRARY();

    rc = win->w_osc_module->osc_rput(origin_addr, origin_count, origin_datatype,
                                     target_rank, target_disp, target_count,
                                     target_datatype, win, request);
    OMPI_ERRHANDLER_RETURN(rc, win, rc, FUNC_NAME);
}
Esempio n. 13
0
int MPI_Ssend_init(void *buf, int count, MPI_Datatype type,
                   int dest, int tag, MPI_Comm comm,
                   MPI_Request *request) 
{
    int rc;

    if ( MPI_PARAM_CHECK ) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_comm_invalid(comm)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
        } else if (count < 0) {
            rc = MPI_ERR_COUNT;
        } else if (type == MPI_DATATYPE_NULL) {
            rc = MPI_ERR_TYPE;
        } else if (tag < 0 || tag > mca_pml.pml_max_tag) {
            rc = MPI_ERR_TAG;
        } else if (ompi_comm_peer_invalid(comm, dest) &&
                   (MPI_PROC_NULL != dest)) {
            rc = MPI_ERR_RANK;
        } else if (request == NULL) {
            rc = MPI_ERR_REQUEST;
        }
        OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
    }

    if (MPI_PROC_NULL == dest) {
        *request = OBJ_NEW(ompi_request_t);
        /* Other fields were initialized by the constructor for
           ompi_request_t */
        (*request)->req_type = OMPI_REQUEST_NOOP;
        (*request)->req_status = ompi_request_empty.req_status;
        (*request)->req_complete = true;
        (*request)->req_state = OMPI_REQUEST_INACTIVE;
        (*request)->req_persistent = true;
        (*request)->req_free = ompi_request_persistent_proc_null_free;
        return MPI_SUCCESS;
    }

    rc = MCA_PML_CALL(isend_init(buf,count,type,dest,tag,
                                 MCA_PML_BASE_SEND_SYNCHRONOUS,comm,request));
    OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}
Esempio n. 14
0
int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
		  void *recvbuf, int recvcount, MPI_Datatype recvtype,
		  MPI_Comm comm) 
{
    int err;

    if (MPI_PARAM_CHECK) {

        /* Unrooted operation -- same checks for all ranks on both
           intracommunicators and intercommunicators */

        err = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_comm_invalid(comm)) {
          OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
        } else if (MPI_DATATYPE_NULL == recvtype) {
          err = MPI_ERR_TYPE;
        } else if (recvcount < 0) {
          err = MPI_ERR_COUNT;
        } else if (MPI_IN_PLACE == recvbuf) {
          return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
        } else if (MPI_IN_PLACE != sendbuf) {
            OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
        }
        OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
    }

    /* Do we need to do anything?  Everyone had to give the same send
       signature, which means that everyone must have given a
       sendcount > 0 if there's anything to send.  If we're doing
       IN_PLACE, however, check recvcount, not sendcount. */

    if ((MPI_IN_PLACE != sendbuf && 0 == sendcount) ||
        (0 == recvcount)) {
        return MPI_SUCCESS;
    }

    /* Invoke the coll component to perform the back-end operation */

    err = comm->c_coll.coll_allgather(sendbuf, sendcount, sendtype, 
                                      recvbuf, recvcount, recvtype, comm);
    OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}
int MPI_Win_flush_local_all(MPI_Win win)
{
    int ret = MPI_SUCCESS;
    
    /* argument checking */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME);
        }
        OMPI_ERRHANDLER_CHECK(ret, win, ret, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* create window and return */
    ret = win->w_osc_module->osc_flush_local_all(win);
    OMPI_ERRHANDLER_RETURN(ret, win, ret, FUNC_NAME);
}
Esempio n. 16
0
int MPI_Group_free(MPI_Group *group)
{
    int ret;

    /* check to make sure we don't free GROUP_EMPTY or GROUP_NULL */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ((MPI_GROUP_NULL == *group) || (NULL == *group) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
                                          FUNC_NAME);
        }

    }

    ret = ompi_group_free ( group);
    OMPI_ERRHANDLER_CHECK(ret, MPI_COMM_WORLD, ret, FUNC_NAME);

    return MPI_SUCCESS;
}
Esempio n. 17
0
int MPI_Get(void *origin_addr, int origin_count,
            MPI_Datatype origin_datatype, int target_rank,
            MPI_Aint target_disp, int target_count,
            MPI_Datatype target_datatype, MPI_Win win)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = OMPI_SUCCESS;

        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME);
        } else if (origin_count < 0 || target_count < 0) {
            rc = MPI_ERR_COUNT;
        } else if (ompi_win_peer_invalid(win, target_rank) &&
                   (MPI_PROC_NULL != target_rank)) {
            rc = MPI_ERR_RANK;
        } else if (!ompi_win_comm_allowed(win)) {
            rc = MPI_ERR_RMA_SYNC;
        } else if ( target_disp < 0 ) {
            rc = MPI_ERR_DISP;
        } else if ( (origin_count < 0) || (target_count < 0) ) {
            rc = MPI_ERR_COUNT;
        } else {
            OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, origin_datatype, origin_count);
            if (OMPI_SUCCESS == rc) {
                OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, target_datatype, target_count);
            }
        }
        OMPI_ERRHANDLER_CHECK(rc, win, rc, FUNC_NAME);
    }

    if (MPI_PROC_NULL == target_rank) return MPI_SUCCESS;

    rc = win->w_osc_module->osc_get(origin_addr, origin_count, origin_datatype,
                                    target_rank, target_disp, target_count,
                                    target_datatype, win);
    OMPI_ERRHANDLER_RETURN(rc, win, rc, FUNC_NAME);
}
Esempio n. 18
0
int MPI_Testany(int count, MPI_Request requests[], int *index, int *completed, MPI_Status *status) 
{
    if ( MPI_PARAM_CHECK ) {
        int rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if( 0 != count ) {
            if (NULL == requests) {
                rc = MPI_ERR_REQUEST;
            } else if (NULL == index) {
                rc = MPI_ERR_ARG;
            }
        }
        OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
    }

    if (OMPI_SUCCESS == ompi_request_test_any(count, requests, 
                                              index, completed, status)) {
        return MPI_SUCCESS;
    }
    return ompi_errhandler_request_invoke(count, requests, FUNC_NAME);
}
Esempio n. 19
0
int MPI_Register_datarep(const char *datarep,
			 MPI_Datarep_conversion_function *read_conversion_fn,
			 MPI_Datarep_conversion_function *write_conversion_fn,
			 MPI_Datarep_extent_function *dtype_file_extent_fn,
			 void *extra_state)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (NULL == datarep) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, MPI_FILE_NULL, rc, FUNC_NAME);
    }

    /* The io framework is only initialized lazily.  If it hasn't
       already been initialized, do so now (note that MPI_FILE_OPEN
       and MPI_FILE_DELETE are the only two places that it will be
       initialized). */

    if (OMPI_SUCCESS != (mca_base_framework_open(&ompi_io_base_framework, 0))) {
        return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Call the back-end io component function */
    /* XXX -- CONST -- do not cast away const -- update mca/io */
    rc = mca_io_base_register_datarep((char *) datarep, read_conversion_fn,
                                      write_conversion_fn,
                                      dtype_file_extent_fn,
                                      extra_state);


    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, MPI_FILE_NULL, rc, FUNC_NAME);
}
Esempio n. 20
0
int MPI_File_delete(const char *filename, MPI_Info info)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (NULL == info || ompi_info_is_freed(info)) {
            rc = MPI_ERR_INFO;
        } else if (NULL == filename) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, MPI_FILE_NULL, rc, FUNC_NAME);
    }

    /* Note that MPI-2:9.7 (p265 in the ps; 261 in the pdf) says that
       errors in MPI_FILE_OPEN (before the file handle is created)
       should invoke the default error handler on MPI_FILE_NULL.
       Hence, if we get a file handle out of ompi_file_open(), invoke
       the error handler on that.  If not, invoke the error handler on
       MPI_FILE_NULL. */

    /* The io framework is only initialized lazily.  If it hasn't
       already been initialized, do so now (note that MPI_FILE_OPEN
       and MPI_FILE_DELETE are the only two places that it will be
       initialized). We might want to add a check to see if the
       framework is open instead of just incrementing the open count. */

    if (OMPI_SUCCESS != (rc = mca_base_framework_open(&ompi_io_base_framework, 0))) {
        return OMPI_ERRHANDLER_INVOKE(MPI_FILE_NULL, rc, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Since there is no MPI_File handle associated with this
       function, the MCA has to do a selection and perform the
       action */
    rc = mca_io_base_delete(filename, info);
    OMPI_ERRHANDLER_RETURN(rc, MPI_FILE_NULL, rc, FUNC_NAME);
}
Esempio n. 21
0
int MPI_Win_detach(MPI_Win win, void *base)
{
    int ret = MPI_SUCCESS;
    
    /* argument checking */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME);
        } else if (NULL == base) {
            ret = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(ret, win, ret, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* create window and return */
    ret = win->w_osc_module->osc_win_detach(win, base);
    OMPI_ERRHANDLER_RETURN(ret, win, ret, FUNC_NAME);
}
Esempio n. 22
0
int MPI_Group_free(MPI_Group *group)
{
    int ret;

    /* check to make sure we don't free GROUP_NULL.  Note that we *do*
       allow freeing GROUP_EMPTY after much debate in the OMPI core
       group.  The final thread about this, and the decision to
       support freeing GROUP_EMPTY can be found here:

       http://www.open-mpi.org/community/lists/devel/2007/12/2750.php

       The short version: other MPI's allow it (LAM/MPI, CT6, MPICH2)
       probably mainly because the Intel MPI test suite expects it to
       happen and there's now several years worth of expected behavior
       to allow this behavior.  Rather than have to explain every time
       why OMPI is the only one who completely adheres to the standard
       / fails the intel tests, it seemed easier to just let this one
       slide.  It's not really that important, after all! */
    if (MPI_PARAM_CHECK) {
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if ((NULL == group) ||
            (MPI_GROUP_NULL == *group) || (NULL == *group) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_GROUP,
                                          FUNC_NAME);
        }

    }

    OPAL_CR_ENTER_LIBRARY();

    ret = ompi_group_free ( group);
    OMPI_ERRHANDLER_CHECK(ret, MPI_COMM_WORLD, ret, FUNC_NAME);

    OPAL_CR_EXIT_LIBRARY();
    return MPI_SUCCESS;
}
Esempio n. 23
0
int MPI_File_read_ordered(MPI_File fh, void *buf, int count,
                          MPI_Datatype datatype, MPI_Status *status)
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            fh = MPI_FILE_NULL;
            rc = MPI_ERR_FILE;
        } else if (count < 0) {
            rc = MPI_ERR_COUNT;
        } else {
           OMPI_CHECK_DATATYPE_FOR_RECV(rc, datatype, count);
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_1_0_0:
        rc = fh->f_io_selected_module.v1_0_0.
            io_module_file_read_ordered(fh, buf, count, datatype, status);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 24
0
int MPI_File_seek(MPI_File fh, MPI_Offset offset, int whence) 
{
    int rc;

    if (MPI_PARAM_CHECK) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_file_invalid(fh)) {
            rc = MPI_ERR_FILE;
            fh = MPI_FILE_NULL;
        } else if (MPI_SEEK_SET != whence && MPI_SEEK_CUR != whence &&
                   MPI_SEEK_END != whence) {
            rc = MPI_ERR_ARG;
        }
        OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME);
    }

    OPAL_CR_ENTER_LIBRARY();

    /* Call the back-end io component function */

    switch (fh->f_io_version) {
    case MCA_IO_BASE_V_2_0_0:
        rc = fh->f_io_selected_module.v2_0_0.
            io_module_file_seek(fh, offset, whence);
        break;

    default:
        rc = MPI_ERR_INTERN;
        break;
    }

    /* All done */
    
    OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME);
}
Esempio n. 25
0
int MPI_Type_vector(int count,
                    int blocklength,
                    int stride,
                    MPI_Datatype oldtype,
                    MPI_Datatype *newtype)
{
   int rc;

   if( MPI_PARAM_CHECK ) {
      OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
      if (NULL == oldtype || MPI_DATATYPE_NULL == oldtype ||
          NULL == newtype) {
        return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TYPE,
                                      FUNC_NAME );
      } else  if( count < 0 ) {
         OMPI_ERRHANDLER_RETURN( MPI_ERR_COUNT, MPI_COMM_WORLD,
                                MPI_ERR_COUNT, FUNC_NAME );
      } else if( blocklength < 0) {
         OMPI_ERRHANDLER_RETURN( MPI_ERR_ARG, MPI_COMM_WORLD,
                                MPI_ERR_ARG, FUNC_NAME );
      }
   }

   rc = ompi_ddt_create_vector ( count, blocklength, stride, oldtype, newtype );
   OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME );

   {
      int* a_i[3];
      a_i[0] = &count;
      a_i[1] = &blocklength;
      a_i[2] = &stride;

      ompi_ddt_set_args( *newtype, 3, a_i, 0, NULL, 1, &oldtype, MPI_COMBINER_VECTOR );
   }
   return MPI_SUCCESS;
}
Esempio n. 26
0
int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
                 MPI_Datatype sendtype, void *recvbuf, int recvcount,
                 MPI_Datatype recvtype, int root, MPI_Comm comm) 
{
    int i, size, err;
    
    if (MPI_PARAM_CHECK) {
        err = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if (ompi_comm_invalid(comm)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, 
                                          FUNC_NAME);
        } else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == recvbuf) ||
                   (ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf)) {
            return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
        }

        /* Errors for intracommunicators */

        if (OMPI_COMM_IS_INTRA(comm)) {

          /* Errors for all ranks */

          if ((root >= ompi_comm_size(comm)) || (root < 0)) {
            return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
          }

          if (MPI_IN_PLACE != recvbuf) {
              if (recvcount < 0) {
                  return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, 
                                                FUNC_NAME);
              }
              
              if (MPI_DATATYPE_NULL == recvtype) {
                  return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, 
                                                FUNC_NAME); 
              }
          }

          /* Errors for the root.  Some of these could have been
             combined into compound if statements above, but since
             this whole section can be compiled out (or turned off at
             run time) for efficiency, it's more clear to separate
             them out into individual tests. */

          if (ompi_comm_rank(comm) == root) {
            if (NULL == displs) {
                return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
            }

            if (NULL == sendcounts) {
                return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
            }

            size = ompi_comm_size(comm);
            for (i = 0; i < size; ++i) {
              OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
              OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
            }
          }
        }

        /* Errors for intercommunicators */

        else {
          if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
                 MPI_ROOT == root || MPI_PROC_NULL == root)) {
            return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
          }

          /* Errors for the receivers */

          if (MPI_ROOT != root && MPI_PROC_NULL != root) {
            if (recvcount < 0) {
              return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
            }

            if (MPI_DATATYPE_NULL == recvtype) {
              return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); 
            }
          }

          /* Errors for the root.  Ditto on the comment above -- these
             error checks could have been combined above, but let's
             make the code easier to read. */

          else if (MPI_ROOT == root) {
            if (NULL == displs) {
                return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
            }

            if (NULL == sendcounts) {
                return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
            }

            size = ompi_comm_size(comm);
            for (i = 0; i < size; ++i) {
              OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
              OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
            }
          }
        }
    }

    /* Invoke the coll component to perform the back-end operation */
	
    err = comm->c_coll.coll_scatterv(sendbuf, sendcounts, displs, sendtype, 
                                     recvbuf, recvcount, recvtype, root, comm);
    OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}
Esempio n. 27
0
int MPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
                   int target_rank, MPI_Aint target_disp, int target_count,
                   MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) 
{
    int rc;
    ompi_win_t *ompi_win = (ompi_win_t*) win;

    if (MPI_PARAM_CHECK) {
        rc = OMPI_SUCCESS;

        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

        if (ompi_win_invalid(win)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_WIN, FUNC_NAME);
        } else if (origin_count < 0 || target_count < 0) {
            rc = MPI_ERR_COUNT;
        } else if (ompi_win_peer_invalid(win, target_rank) &&
                   (MPI_PROC_NULL != target_rank)) {
            rc = MPI_ERR_RANK;
        } else if (MPI_OP_NULL == op) {
            rc = MPI_ERR_OP;
        } else if (!ompi_op_is_intrinsic(op)) {
            rc = MPI_ERR_OP;
        } else if (!ompi_win_comm_allowed(win)) {
            rc = MPI_ERR_RMA_SYNC;
        } else if ( target_disp < 0 ) {
            rc = MPI_ERR_DISP;
        } else if ( (origin_count < 0) || (target_count < 0) ) {
            rc = MPI_ERR_COUNT;
        } else {
            OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, origin_datatype, origin_count);
            if (OMPI_SUCCESS == rc) {
                OMPI_CHECK_DATATYPE_FOR_ONE_SIDED(rc, target_datatype, target_count);
            }
            if (OMPI_SUCCESS == rc) {
                /* While technically the standard probably requires that the
                   datatypes used with MPI_REPLACE conform to all the rules
                   for other reduction operators, we don't require such
                   behaivor, as checking for it is expensive here and we don't
                   care in implementation.. */
                if (op != &ompi_mpi_op_replace) {
                    ompi_datatype_t *op_check_dt, *origin_check_dt;
                    char *msg;

                    if (ompi_ddt_is_predefined(origin_datatype)) {
                        origin_check_dt = origin_datatype;
                    } else {
                        int i, index = -1, num_found = 0;
                        uint64_t mask = 1;

                        for (i = 0 ; i < DT_MAX_PREDEFINED ; ++i) {
                            if (origin_datatype->bdt_used & mask) {
                                num_found++;
                                index = i;
                            }
                            mask *= 2;
                        }
                        if (index < 0 || num_found > 1) {
                            /* this is an erroneous datatype.  Let
                               ompi_op_is_valid tell the user that */
                            OMPI_ERRHANDLER_RETURN(MPI_ERR_TYPE, win, MPI_ERR_TYPE, FUNC_NAME);
                        } else {
                            origin_check_dt = (ompi_datatype_t*)
                                ompi_ddt_basicDatatypes[index];
                        }
                    }

                    /* ACCUMULATE, unlike REDUCE, can use with derived
                       datatypes with predefinied operations, with some
                       restrictions outlined in MPI-2:6.3.4.  The derived
                       datatype must be composed entirley from one predefined
                       datatype (so you can do all the construction you want,
                       but at the bottom, you can only use one datatype, say,
                       MPI_INT).  If the datatype at the target isn't
                       predefined, then make sure it's composed of only one
                       datatype, and check that datatype against
                       ompi_op_is_valid(). */
                    if (ompi_ddt_is_predefined(target_datatype)) {
                        op_check_dt = target_datatype;
                    } else {
                        int i, index = -1, num_found = 0;
                        uint64_t mask = 1;

                        for (i = 0 ; i < DT_MAX_PREDEFINED ; ++i) {
                            if (target_datatype->bdt_used & mask) {
                                num_found++;
                                index = i;
                            }
                            mask *= 2;
                        }
                        if (index < 0 || num_found > 1) {
                            /* this is an erroneous datatype.  Let
                               ompi_op_is_valid tell the user that */
                            OMPI_ERRHANDLER_RETURN(MPI_ERR_TYPE, win, MPI_ERR_TYPE, FUNC_NAME);
                        } else {
                            /* datatype passes muster as far as restrictions
                               in MPI-2:6.3.4.  Is the primitive ok with the
                               op?  Unfortunately have to cast away
                               constness... */
                            op_check_dt = (ompi_datatype_t*)
                                ompi_ddt_basicDatatypes[index];
                        }
                    }

                    /* check to make sure same primitive type */
                    if (op_check_dt != origin_check_dt) {
                        OMPI_ERRHANDLER_RETURN(MPI_ERR_ARG, win, MPI_ERR_ARG, FUNC_NAME);
                    }

                    /* check to make sure primitive type is valid for
                       reduction.  Should do this on the target, but
                       then can't get the errcode back for this
                       call */
                    if (!ompi_op_is_valid(op, op_check_dt, &msg, FUNC_NAME)) {
                        int ret = OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_OP, msg);
                        free(msg);
                        return ret;
                    }
                }
            }
        }
        OMPI_ERRHANDLER_CHECK(rc, win, rc, FUNC_NAME);

        /* While technically the standard probably requires that the
           datatypes used with MPI_REPLACE conform to all the rules
           for other reduction operators, we don't require such
           behaivor, as checking for it is expensive here and we don't
           care in implementation.. */
        if (op != &ompi_mpi_op_replace) {
            ompi_datatype_t *op_check_dt;
            char *msg;

            /* ACCUMULATE, unlike REDUCE, can use with derived
               datatypes with predefinied operations, with some
               restrictions outlined in MPI-2:6.3.4.  The derived
               datatype must be composed entirley from one predefined
               datatype (so you can do all the construction you want,
               but at the bottom, you can only use one datatype, say,
               MPI_INT).  If the datatype at the target isn't
               predefined, then make sure it's composed of only one
               datatype, and check that datatype against
               ompi_op_is_valid(). */
            if (ompi_ddt_is_predefined(target_datatype)) {
                op_check_dt = target_datatype;
            } else {
                int i, index = -1, num_found = 0;
                uint64_t mask = 1;

                for (i = 0 ; i < DT_MAX_PREDEFINED ; ++i) {
                    if (target_datatype->bdt_used & mask) {
                        num_found++;
                        index = i;
                    }
                    mask *= 2;
                }
                if (index < 0 || num_found > 1) {
                    /* this is an erroneous datatype.  Let
                       ompi_op_is_valid tell the user that */
                    op_check_dt = target_datatype;
                } else {
                    /* datatype passes muster as far as restrictions
                       in MPI-2:6.3.4.  Is the primitive ok with the
                       op?  Unfortunately have to cast away
                       constness... */
                    op_check_dt = (ompi_datatype_t*)
                        ompi_ddt_basicDatatypes[index];
                }
            }
            if (!ompi_op_is_valid(op, op_check_dt, &msg, FUNC_NAME)) {
                int ret = OMPI_ERRHANDLER_INVOKE(win, MPI_ERR_OP, msg);
                free(msg);
                return ret;
            }
        }
    }

    if (MPI_PROC_NULL == target_rank) return MPI_SUCCESS;

    rc = ompi_win->w_osc_module->osc_accumulate(origin_addr, 
                                                origin_count,
                                                origin_datatype,
                                                target_rank, 
                                                target_disp, 
                                                target_count,
                                                target_datatype, 
                                                op, win);
    OMPI_ERRHANDLER_RETURN(rc, win, rc, FUNC_NAME);
}
Esempio n. 28
0
int MPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype,
                         int dest, int sendtag, int source, int recvtag,
                         MPI_Comm comm, MPI_Status *status)

{
    int rc = MPI_SUCCESS;

    if ( MPI_PARAM_CHECK ) {
        rc = MPI_SUCCESS;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        OMPI_CHECK_DATATYPE_FOR_RECV(rc, datatype, count);

        if (ompi_comm_invalid(comm)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
        } else if (dest != MPI_PROC_NULL && ompi_comm_peer_invalid(comm, dest)) {
            rc = MPI_ERR_RANK;
        } else if (sendtag < 0 || sendtag > mca_pml.pml_max_tag) {
            rc = MPI_ERR_TAG;
        } else if (source != MPI_PROC_NULL && source != MPI_ANY_SOURCE && ompi_comm_peer_invalid(comm, source)) {
            rc = MPI_ERR_RANK;
        } else if (((recvtag < 0) && (recvtag !=  MPI_ANY_TAG)) || (recvtag > mca_pml.pml_max_tag)) {
            rc = MPI_ERR_TAG;
        }
        OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
    }
 
    /* simple case */
    if ( source == MPI_PROC_NULL || dest == MPI_PROC_NULL || count == 0 ) {
        return MPI_Sendrecv(buf,count,datatype,dest,sendtag,buf,count,datatype,source,recvtag,comm,status);
    } else {

        ompi_convertor_t convertor;
        struct iovec iov;
        unsigned char recv_data[2048];
        size_t packed_size, max_data;
        uint32_t iov_count;
        ompi_status_public_t recv_status;
        ompi_proc_t* proc = ompi_comm_peer_lookup(comm,dest);
        if(proc == NULL) {
            rc = MPI_ERR_RANK;
            OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
        }

        /* initialize convertor to unpack recv buffer */
        OBJ_CONSTRUCT(&convertor, ompi_convertor_t);
        ompi_convertor_copy_and_prepare_for_recv( proc->proc_convertor, datatype,
                                                  count, buf, 0, &convertor );

        /* setup a buffer for recv */
        ompi_convertor_get_packed_size( &convertor, &packed_size );
        if( packed_size > sizeof(recv_data) ) {
            rc = MPI_Alloc_mem(packed_size, MPI_INFO_NULL, &iov.iov_base);
            if(OMPI_SUCCESS != rc) {
                OMPI_ERRHANDLER_RETURN(OMPI_ERR_OUT_OF_RESOURCE, comm, MPI_ERR_BUFFER, FUNC_NAME);
            }
        } else {
            iov.iov_base = (caddr_t)recv_data;
        }

        /* recv into temporary buffer */
        rc = MPI_Sendrecv( buf, count, datatype, dest, sendtag, iov.iov_base, packed_size, 
                           MPI_BYTE, source, recvtag, comm, &recv_status );
        if (rc != MPI_SUCCESS) {
            if(packed_size > sizeof(recv_data))
                MPI_Free_mem(iov.iov_base);
            OBJ_DESTRUCT(&convertor);
            OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
        }

        /* unpack into users buffer */
        iov.iov_len = recv_status._count;
        iov_count = 1;
        max_data = recv_status._count;
        ompi_convertor_unpack(&convertor, &iov, &iov_count, &max_data );

        /* return status to user */
        if(status != MPI_STATUS_IGNORE) {
            *status = recv_status;
        }

        /* release resources */
        if(packed_size > sizeof(recv_data)) {
            MPI_Free_mem(iov.iov_base);
        }
        OBJ_DESTRUCT(&convertor);
        return MPI_SUCCESS;
    }
}