예제 #1
0
int mca_pml_ob1_iprobe(int src,
                       int tag,
                       struct ompi_communicator_t *comm,
                       int *matched, ompi_status_public_t * status)
{
    int rc = OMPI_SUCCESS;
    mca_pml_ob1_recv_request_t recvreq;

    OBJ_CONSTRUCT( &recvreq, mca_pml_ob1_recv_request_t );
    recvreq.req_recv.req_base.req_ompi.req_type = OMPI_REQUEST_PML;
    recvreq.req_recv.req_base.req_type = MCA_PML_REQUEST_IPROBE;

    MCA_PML_OB1_RECV_REQUEST_INIT(&recvreq, NULL, 0, &ompi_mpi_char.dt, src, tag, comm, true);
    MCA_PML_OB1_RECV_REQUEST_START(&recvreq);

    if( recvreq.req_recv.req_base.req_ompi.req_complete == true ) {
        if( NULL != status ) {
            OMPI_STATUS_SET(status, &recvreq.req_recv.req_base.req_ompi.req_status);
        }
        rc = recvreq.req_recv.req_base.req_ompi.req_status.MPI_ERROR;
        *matched = 1;
    } else {
        *matched = 0;
        opal_progress();
    }
    MCA_PML_BASE_RECV_REQUEST_FINI( &recvreq.req_recv );
    return rc;
}
예제 #2
0
int mca_pml_ob1_recv(void *addr,
                     size_t count,
                     ompi_datatype_t * datatype,
                     int src,
                     int tag,
                     struct ompi_communicator_t *comm,
                     ompi_status_public_t * status)
{
    int rc;
    mca_pml_ob1_recv_request_t *recvreq;
    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
    if (NULL == recvreq)
        return rc;

    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq,
                                   addr,
                                   count, datatype, src, tag, comm, false);

    PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
                             &((recvreq)->req_recv.req_base),
                             PERUSE_RECV);

    MCA_PML_OB1_RECV_REQUEST_START(recvreq);
    ompi_request_wait_completion(&recvreq->req_recv.req_base.req_ompi);

    if (NULL != status) {  /* return status */
        OMPI_STATUS_SET(status, &recvreq->req_recv.req_base.req_ompi.req_status);
    }
    rc = recvreq->req_recv.req_base.req_ompi.req_status.MPI_ERROR;
    ompi_request_free( (ompi_request_t**)&recvreq );
    return rc;
}
예제 #3
0
int
mca_pml_ob1_mprobe(int src,
                   int tag,
                   struct ompi_communicator_t *comm,
                   struct ompi_message_t **message,
                   ompi_status_public_t * status)
{
    int rc = OMPI_SUCCESS;
    mca_pml_ob1_recv_request_t *recvreq;

    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
    if (NULL == recvreq)
        return rc;
    recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_MPROBE;

    /* initialize the request enough to probe and get the status */
    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq, NULL, 0, &ompi_mpi_char.dt, 
                                  src, tag, comm, false);
    MCA_PML_OB1_RECV_REQUEST_START(recvreq);

    ompi_request_wait_completion(&recvreq->req_recv.req_base.req_ompi);

    if( NULL != status ) {
        *status = recvreq->req_recv.req_base.req_ompi.req_status;
    }

    *message = ompi_message_alloc();
    (*message)->comm = comm;
    (*message)->req_ptr = recvreq;
    (*message)->count = recvreq->req_recv.req_base.req_ompi.req_status._ucount;

    return OMPI_SUCCESS;
}
예제 #4
0
int mca_pml_ob1_irecv(void *addr,
                      size_t count,
                      ompi_datatype_t * datatype,
                      int src,
                      int tag,
                      struct ompi_communicator_t *comm,
                      struct ompi_request_t **request)
{
    mca_pml_ob1_recv_request_t *recvreq;
    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
    if (NULL == recvreq)
        return OMPI_ERR_TEMP_OUT_OF_RESOURCE;

    recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_RECV;
    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq,
                                   addr,
                                   count, datatype, src, tag, comm, false);

    PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
                             &((recvreq)->req_recv.req_base),
                             PERUSE_RECV);

    MCA_PML_OB1_RECV_REQUEST_START(recvreq);
    *request = (ompi_request_t *) recvreq;
    return OMPI_SUCCESS;
}
예제 #5
0
int mca_pml_ob1_recv(void *addr,
                     size_t count,
                     ompi_datatype_t * datatype,
                     int src,
                     int tag,
                     struct ompi_communicator_t *comm,
                     ompi_status_public_t * status)
{
    mca_pml_ob1_recv_request_t recvreq;
    int rc;

    OBJ_CONSTRUCT(&recvreq, mca_pml_ob1_recv_request_t);

    MCA_PML_OB1_RECV_REQUEST_INIT(&recvreq, addr, count, datatype,
                                  src, tag, comm, false);

    PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
                             &(recvreq.req_recv.req_base),
                             PERUSE_RECV);

    MCA_PML_OB1_RECV_REQUEST_START(&recvreq);
    ompi_request_wait_completion(&recvreq.req_recv.req_base.req_ompi);

    if (NULL != status) {  /* return status */
        *status = recvreq.req_recv.req_base.req_ompi.req_status;
    }

    rc = recvreq.req_recv.req_base.req_ompi.req_status.MPI_ERROR;
    MCA_PML_BASE_RECV_REQUEST_FINI(&recvreq.req_recv);
    OBJ_DESTRUCT(&recvreq);

    return rc;
}
예제 #6
0
파일: pml_ob1_irecv.c 프로젝트: AT95/ompi
int mca_pml_ob1_recv(void *addr,
                     size_t count,
                     ompi_datatype_t * datatype,
                     int src,
                     int tag,
                     struct ompi_communicator_t *comm,
                     ompi_status_public_t * status)
{
    mca_pml_ob1_recv_request_t *recvreq = NULL;
    int rc;

#if !OMPI_ENABLE_THREAD_MULTIPLE
    recvreq = mca_pml_ob1_recvreq;
    mca_pml_ob1_recvreq = NULL;
    if( OPAL_UNLIKELY(NULL == recvreq) )
#endif  /* !OMPI_ENABLE_THREAD_MULTIPLE */
        {
            MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
            if (NULL == recvreq)
                return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
        }

    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq, addr, count, datatype,
                                  src, tag, comm, false);

    PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
                             &(recvreq->req_recv.req_base),
                             PERUSE_RECV);

    MCA_PML_OB1_RECV_REQUEST_START(recvreq);
    ompi_request_wait_completion(&recvreq->req_recv.req_base.req_ompi);

    if (NULL != status) {  /* return status */
        *status = recvreq->req_recv.req_base.req_ompi.req_status;
    }

    rc = recvreq->req_recv.req_base.req_ompi.req_status.MPI_ERROR;

#if OMPI_ENABLE_THREAD_MULTIPLE
    MCA_PML_OB1_RECV_REQUEST_RETURN(recvreq);
#else
    if( NULL != mca_pml_ob1_recvreq ) {
        MCA_PML_OB1_RECV_REQUEST_RETURN(recvreq);
    } else {
        mca_pml_ob1_recv_request_fini (recvreq);
        mca_pml_ob1_recvreq = recvreq;
    }
#endif

    return rc;
}
예제 #7
0
int mca_pml_ob1_recv(void *addr,
                     size_t count,
                     ompi_datatype_t * datatype,
                     int src,
                     int tag,
                     struct ompi_communicator_t *comm,
                     ompi_status_public_t * status)
{
    mca_pml_ob1_recv_request_t *recvreq = NULL;
    int rc;

    if (OPAL_LIKELY(!ompi_mpi_thread_multiple)) {
        recvreq = mca_pml_ob1_recvreq;
        mca_pml_ob1_recvreq = NULL;
    }

    if( OPAL_UNLIKELY(NULL == recvreq) ) {
        MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
        if (NULL == recvreq)
            return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
    }

    recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_RECV;
    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq, addr, count, datatype,
                                  src, tag, comm, false);

    PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
                             &(recvreq->req_recv.req_base),
                             PERUSE_RECV);

    MCA_PML_OB1_RECV_REQUEST_START(recvreq);
    ompi_request_wait_completion(&recvreq->req_recv.req_base.req_ompi);

    if (NULL != status) {  /* return status */
        *status = recvreq->req_recv.req_base.req_ompi.req_status;
    }

    rc = recvreq->req_recv.req_base.req_ompi.req_status.MPI_ERROR;

    if (recvreq->req_recv.req_base.req_pml_complete) {
        /* make buffer defined when the request is compeleted,
           and before releasing the objects. */
        MEMCHECKER(
            memchecker_call(&opal_memchecker_base_mem_defined,
                            recvreq->req_recv.req_base.req_addr,
                            recvreq->req_recv.req_base.req_count,
                            recvreq->req_recv.req_base.req_datatype);
        );
예제 #8
0
int mca_pml_ob1_recv(void *addr,
                     size_t count,
                     ompi_datatype_t * datatype,
                     int src,
                     int tag,
                     struct ompi_communicator_t *comm,
                     ompi_status_public_t * status)
{
    mca_pml_ob1_recv_request_t *recvreq = NULL;
    int rc;

    if (OPAL_LIKELY(!ompi_mpi_thread_multiple)) {
        recvreq = mca_pml_ob1_recvreq;
        mca_pml_ob1_recvreq = NULL;
    }

    if( OPAL_UNLIKELY(NULL == recvreq) ) {
        MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq);
        if (NULL == recvreq)
            return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
    }

    recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_RECV;
    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq, addr, count, datatype,
                                  src, tag, comm, false);

    PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
                             &(recvreq->req_recv.req_base),
                             PERUSE_RECV);

    MCA_PML_OB1_RECV_REQUEST_START(recvreq);
    ompi_request_wait_completion(&recvreq->req_recv.req_base.req_ompi);

    if (NULL != status) {  /* return status */
        *status = recvreq->req_recv.req_base.req_ompi.req_status;
    }

    rc = recvreq->req_recv.req_base.req_ompi.req_status.MPI_ERROR;

    if (OPAL_UNLIKELY(ompi_mpi_thread_multiple || NULL != mca_pml_ob1_recvreq)) {
        MCA_PML_OB1_RECV_REQUEST_RETURN(recvreq);
    } else {
        mca_pml_ob1_recv_request_fini (recvreq);
        mca_pml_ob1_recvreq = recvreq;
    }

    return rc;
}
예제 #9
0
int
mca_pml_ob1_improbe(int src,
                    int tag,
                    struct ompi_communicator_t *comm,
                    int *matched, 
                    struct ompi_message_t **message,
                    ompi_status_public_t * status)
{
    int rc = OMPI_SUCCESS;
    mca_pml_ob1_recv_request_t *recvreq;

    MCA_PML_OB1_RECV_REQUEST_ALLOC(recvreq, rc);
    if (NULL == recvreq)
        return rc;
    recvreq->req_recv.req_base.req_type = MCA_PML_REQUEST_IMPROBE;

    /* initialize the request enough to probe and get the status */
    MCA_PML_OB1_RECV_REQUEST_INIT(recvreq, NULL, 0, &ompi_mpi_char.dt, 
                                  src, tag, comm, false);
    MCA_PML_OB1_RECV_REQUEST_START(recvreq);

    if( recvreq->req_recv.req_base.req_ompi.req_complete == true ) {
        if( NULL != status ) {
            *status = recvreq->req_recv.req_base.req_ompi.req_status;
        }
        *matched = 1;

        *message = ompi_message_alloc();
        (*message)->comm = comm;
        (*message)->req_ptr = recvreq;
        (*message)->peer = recvreq->req_recv.req_base.req_ompi.req_status.MPI_SOURCE;
        (*message)->count = recvreq->req_recv.req_base.req_ompi.req_status._ucount;

        rc = OMPI_SUCCESS;
    } else {
        *matched = 0;

        /* we only free if we didn't match, because we're going to
           translate the request into a receive request later on if it
           was matched */
        ompi_request_free((ompi_request_t**)&recvreq);
        
        opal_progress();
    }

    return rc;
}
예제 #10
0
int mca_pml_ob1_probe(int src,
                      int tag,
                      struct ompi_communicator_t *comm,
                      ompi_status_public_t * status)
{
    mca_pml_ob1_recv_request_t recvreq;

    OBJ_CONSTRUCT( &recvreq, mca_pml_ob1_recv_request_t );
    recvreq.req_recv.req_base.req_ompi.req_type = OMPI_REQUEST_PML;
    recvreq.req_recv.req_base.req_type = MCA_PML_REQUEST_PROBE;

    MCA_PML_OB1_RECV_REQUEST_INIT(&recvreq, NULL, 0, &ompi_mpi_char.dt, src, tag, comm, true);
    MCA_PML_OB1_RECV_REQUEST_START(&recvreq);

    ompi_request_wait_completion(&recvreq.req_recv.req_base.req_ompi);

    if (NULL != status) {
        OMPI_STATUS_SET(status, &recvreq.req_recv.req_base.req_ompi.req_status);
    }
    MCA_PML_BASE_RECV_REQUEST_FINI( &recvreq.req_recv );
    return OMPI_SUCCESS;
}
예제 #11
0
int mca_pml_ob1_start(size_t count, ompi_request_t** requests)
{
    int rc;

    for (size_t i = 0 ; i < count ; ++i) {
        mca_pml_base_request_t *pml_request = (mca_pml_base_request_t*)requests[i];
        if (NULL == pml_request || OMPI_REQUEST_PML != requests[i]->req_type) {
            continue;
        }

        /* If the persistent request is currently active - verify the status
         * is incomplete. if the pml layer has not completed the request - mark
         * the request as free called - so that it will be freed when the request
         * completes - and create a new request.
         */

#if OPAL_ENABLE_MULTI_THREADS
        opal_atomic_rmb();
#endif

        /* start the request */
        switch(pml_request->req_type) {
            case MCA_PML_REQUEST_SEND:
            {
                mca_pml_ob1_send_request_t* sendreq = (mca_pml_ob1_send_request_t*)pml_request;
                MEMCHECKER(
                    memchecker_call(&opal_memchecker_base_isdefined,
                                    pml_request->req_addr, pml_request->req_count,
                                    pml_request->req_datatype);
                );

                if (!pml_request->req_pml_complete) {
                    ompi_request_t *request;

                    /* buffered sends can be mpi complete and pml incomplete. to support this
                     * case we need to allocate a new request. */
                    rc = mca_pml_ob1_isend_init (pml_request->req_addr,
                                                 pml_request->req_count,
                                                 pml_request->req_datatype,
                                                 pml_request->req_peer,
                                                 pml_request->req_tag,
                                                 sendreq->req_send.req_send_mode,
                                                 pml_request->req_comm,
                                                 &request);
                    if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
                        return rc;
                    }

                    /* copy the callback and callback data to the new requests */
                    request->req_complete_cb = pml_request->req_ompi.req_complete_cb;
                    request->req_complete_cb_data = pml_request->req_ompi.req_complete_cb_data;

                    /* ensure the old request gets released */
                    pml_request->req_free_called = true;

                    sendreq = (mca_pml_ob1_send_request_t *) request;
                    requests[i] = request;
                } else if (sendreq->req_send.req_bytes_packed != 0) {
                    size_t offset = 0;
                    /**
                     * Reset the convertor in case we're dealing with the original
                     * request, which when completed do not reset the convertor.
                     */
                    opal_convertor_set_position (&sendreq->req_send.req_base.req_convertor,
                                                 &offset);
                }

                /* reset the completion flag */
                pml_request->req_pml_complete = false;

                MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc);
                if(rc != OMPI_SUCCESS)
                    return rc;
                break;
            }
            case MCA_PML_REQUEST_RECV:
            {
                mca_pml_ob1_recv_request_t* recvreq = (mca_pml_ob1_recv_request_t*)pml_request;
                MCA_PML_OB1_RECV_REQUEST_START(recvreq);
                break;
            }
            default:
                return OMPI_ERR_REQUEST;
        }
    }