/*
 * send connect information to remote endpoint
 */
static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
        uint8_t message_type)
{
    opal_buffer_t* buffer = OBJ_NEW(opal_buffer_t);
    int rc, srq;

    if (NULL == buffer) {
        OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* Bulding standart header that we use in all messages:
     * - Message type,
     * - Our subnet id
     * - Our LID
     */
    /* pack the info in the send buffer */
    BTL_VERBOSE(("Send pack Message type = %d", message_type));
    BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
    rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
    if (OPAL_SUCCESS != rc) {
        OMPI_ERROR_LOG(rc);
        return rc;
    }

    BTL_VERBOSE(("Send pack sid = %" PRIx64 "\n", endpoint->subnet_id));
    BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
    rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
    if (OPAL_SUCCESS != rc) {
        OMPI_ERROR_LOG(rc);
        return rc;
    }

    BTL_VERBOSE(("Send pack lid = %d", endpoint->endpoint_btl->lid));
    BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
    rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
    if (OPAL_SUCCESS != rc) {
        OMPI_ERROR_LOG(rc);
        return rc;
    }

    /* Now we append to standart header additional information
     * that is required for full (open qp,etc..) connect request and response:
     * - qp_num of first qp
     * - psn of first qp
     * - MTU
     */
    if (ENDPOINT_XOOB_CONNECT_REQUEST == message_type ||
            ENDPOINT_XOOB_CONNECT_RESPONSE == message_type) {
        uint32_t psn, qp_num;

        if (ENDPOINT_XOOB_CONNECT_REQUEST == message_type) {
            qp_num = endpoint->qps[0].qp->lcl_qp->qp_num;
            psn = endpoint->qps[0].qp->lcl_psn;
        } else {
            qp_num = endpoint->xrc_recv_qp_num;
            psn = endpoint->xrc_recv_psn;
        }
        /* stuff all the QP info into the buffer */
        /* we need to send only one QP */
        BTL_VERBOSE(("Send pack qp num = %x", qp_num));
        BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
        rc = opal_dss.pack(buffer, &qp_num, 1, OPAL_UINT32);
        if (OPAL_SUCCESS != rc) {
            OMPI_ERROR_LOG(rc);
            return rc;
        }
        BTL_VERBOSE(("Send pack lpsn = %d", psn));
        BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
        rc = opal_dss.pack(buffer, &psn, 1, OPAL_UINT32);
        if (OPAL_SUCCESS != rc) {
            OMPI_ERROR_LOG(rc);
            return rc;
        }

        BTL_VERBOSE(("Send pack mtu = %d", endpoint->endpoint_btl->device->mtu));
        BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
        rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
                OPAL_UINT32);
        if (OPAL_SUCCESS != rc) {
            OMPI_ERROR_LOG(rc);
            return rc;
        }
    }

    /* We append to header above additional information
     * that is required for full & XRC connect request:
     * - The lid ob btl on remote site that we want to connect
     */
    if (ENDPOINT_XOOB_CONNECT_REQUEST == message_type ||
            ENDPOINT_XOOB_CONNECT_XRC_REQUEST == message_type) {
        /* when we are sending request we add remote lid that we want to connect */

        BTL_VERBOSE(("Send pack remote lid = %d", endpoint->ib_addr->lid));
        BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
        rc = opal_dss.pack(buffer, &endpoint->ib_addr->lid, 1, OPAL_UINT16);
        if (OPAL_SUCCESS != rc) {
            OMPI_ERROR_LOG(rc);
            return rc;
        }
    }

    /* when we are sending xrc request we add remote
     * recv qp number that we want to connect. */
    if (ENDPOINT_XOOB_CONNECT_XRC_REQUEST == message_type) {
        BTL_VERBOSE(("Send pack remote qp = %x", endpoint->ib_addr->remote_xrc_rcv_qp_num));
        BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
        rc = opal_dss.pack(buffer, &endpoint->ib_addr->remote_xrc_rcv_qp_num,
                1, OPAL_UINT32);
        if (OPAL_SUCCESS != rc) {
            OMPI_ERROR_LOG(rc);
            return rc;
        }
    }
    /* We append to header above additional information
     * that is required for full & XRC connect response:
     * - index of our endpoint
     * - array of xrc-srq numbers
     */
    if (ENDPOINT_XOOB_CONNECT_RESPONSE == message_type ||
            ENDPOINT_XOOB_CONNECT_XRC_RESPONSE == message_type) {
        /* we need to send the endpoint index for immidate send */
        BTL_VERBOSE(("Send pack index = %d", endpoint->index));
        BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
        rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
        if (OPAL_SUCCESS != rc) {
            OMPI_ERROR_LOG(rc);
            return rc;
        }
        /* on response we add all SRQ numbers */
        for (srq = 0; srq < mca_btl_openib_component.num_xrc_qps; srq++) {
            BTL_VERBOSE(("Send pack srq[%d] num  = %d", srq, endpoint->endpoint_btl->qps[srq].u.srq_qp.srq->xrc_srq_num));
            BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
            rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->qps[srq].u.srq_qp.srq->xrc_srq_num,
                    1, OPAL_UINT32);
            if (OPAL_SUCCESS != rc) {
                OMPI_ERROR_LOG(rc);
                return rc;
            }
        }
    }

    /* send to remote endpoint */
    rc = ompi_rte_send_buffer_nb(&endpoint->endpoint_proc->proc_ompi->proc_name,
            buffer, OMPI_RML_TAG_XOPENIB,
            xoob_rml_send_cb, NULL);
    if (OMPI_SUCCESS != rc) {
        OMPI_ERROR_LOG(rc);
        return rc;
    }

    BTL_VERBOSE(("Send QP Info, LID = %d, SUBNET = %" PRIx64 ", Message type = %d",
                endpoint->endpoint_btl->lid,
                endpoint->subnet_id,
                message_type));

    return OMPI_SUCCESS;
}
int vprotocol_pessimist_event_logger_connect(int el_rank, ompi_communicator_t **el_comm)
{
    int rc;
    opal_buffer_t *buffer;
    char *port;
    ompi_process_name_t el_proc;
    char *hnp_uri, *rml_uri;
    ompi_rml_tag_t el_tag;
    char name[MPI_MAX_PORT_NAME];
    int rank;
    vprotocol_pessimist_clock_t connect_info[2];

    snprintf(name, MPI_MAX_PORT_NAME, VPROTOCOL_EVENT_LOGGER_NAME_FMT, el_rank);
    port = ompi_pubsub.lookup(name, MPI_INFO_NULL);
    if(NULL == port)
    {
        return OMPI_ERR_NOT_FOUND;
    }
    V_OUTPUT_VERBOSE(45, "Found port < %s >", port);

    /* separate the string into the HNP and RML URI and tag */
    if (OMPI_SUCCESS != (rc = ompi_dpm.parse_port(port, &hnp_uri, &rml_uri, &el_tag))) {
        OMPI_ERROR_LOG(rc);
        return rc;
    }
    /* extract the originating proc's name */
    if (OMPI_SUCCESS != (rc = ompi_rte_parse_uris(rml_uri, &el_proc, NULL))) {
        OMPI_ERROR_LOG(rc);
        free(rml_uri); free(hnp_uri);
        return rc;
    }
    /* make sure we can route rml messages to the destination */
    if (OMPI_SUCCESS != (rc = ompi_dpm.route_to_port(hnp_uri, &el_proc))) {
        OMPI_ERROR_LOG(rc);
        free(rml_uri); free(hnp_uri);
        return rc;
    }
    free(rml_uri); free(hnp_uri);

    /* Send an rml message to tell the remote end to wake up and jump into
     * connect/accept */
    buffer = OBJ_NEW(opal_buffer_t);
    ompi_rte_send_buffer_nb(&el_proc, buffer, el_tag+1, NULL, NULL);

    rc = ompi_dpm.connect_accept(MPI_COMM_SELF, 0, port, true, el_comm);
    if(OMPI_SUCCESS != rc) {
        OMPI_ERROR_LOG(rc);
    }

    /* Send Rank, receive max buffer size and max_clock back */
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    rc = mca_pml_v.host_pml.pml_send(&rank, 1, MPI_INTEGER, 0,
                                     VPROTOCOL_PESSIMIST_EVENTLOG_NEW_CLIENT_CMD,
                                     MCA_PML_BASE_SEND_STANDARD,
                                     mca_vprotocol_pessimist.el_comm);
    if(OPAL_UNLIKELY(MPI_SUCCESS != rc))
        OMPI_ERRHANDLER_INVOKE(mca_vprotocol_pessimist.el_comm, rc,
                               __FILE__ ": failed sending event logger handshake");
    rc = mca_pml_v.host_pml.pml_recv(&connect_info, 2, MPI_UNSIGNED_LONG_LONG,
                                     0, VPROTOCOL_PESSIMIST_EVENTLOG_NEW_CLIENT_CMD,
                                     mca_vprotocol_pessimist.el_comm, MPI_STATUS_IGNORE);
    if(OPAL_UNLIKELY(MPI_SUCCESS != rc))                                  \
        OMPI_ERRHANDLER_INVOKE(mca_vprotocol_pessimist.el_comm, rc,       \
                               __FILE__ ": failed receiving event logger handshake");

    return rc;
}