示例#1
0
static void mca_oob_tcp_msg_matched(mca_oob_tcp_msg_t* msg, mca_oob_tcp_msg_t* match)
{
    int i,rc;
    if(match->msg_rc < 0)  {
        rc = match->msg_rc; 

    } else {

        if (msg->msg_flags & ORTE_RML_ALLOC) match->msg_flags |= ORTE_RML_ALLOC;
        /* if we are just doing peek, return bytes without dequeing message */
        rc = mca_oob_tcp_msg_copy(match, msg->msg_uiov, msg->msg_ucnt);
        if(rc >= 0 && ORTE_RML_TRUNC & msg->msg_flags) {
            rc = 0;
            for(i=1; i<match->msg_rwcnt+1; i++)
                rc += match->msg_rwiov[i].iov_len;
        }
        if(ORTE_RML_PEEK & msg->msg_flags) {
            OPAL_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);
            msg->msg_cbfunc(rc, 
                &match->msg_peer, 
                msg->msg_uiov, 
                msg->msg_ucnt,  
                match->msg_hdr.msg_tag, 
                msg->msg_cbdata);
            OPAL_THREAD_LOCK(&mca_oob_tcp_component.tcp_match_lock);
            return;
        }
    }

    /* otherwise remove the match */
    opal_list_remove_item(&mca_oob_tcp_component.tcp_msg_recv, (opal_list_item_t *) match);

    /* invoke callback */
    OPAL_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);
    msg->msg_cbfunc(rc, 
        &match->msg_peer, 
        msg->msg_uiov, 
        msg->msg_ucnt, 
        match->msg_hdr.msg_tag, 
        msg->msg_cbdata);
    OPAL_THREAD_LOCK(&mca_oob_tcp_component.tcp_match_lock);

    /* return match to free list */
    MCA_OOB_TCP_MSG_RETURN(match);
}
示例#2
0
/*
 * Progress a completed recv:
 * (1) signal a posted recv as complete
 * (2) queue an unexpected message in the recv list
 */
static void mca_oob_tcp_msg_data(mca_oob_tcp_msg_t* msg, mca_oob_tcp_peer_t* peer)
{
    /* attempt to match unexpected message to a posted recv */
    mca_oob_tcp_msg_t* post;
    int rc;
    OPAL_THREAD_LOCK(&mca_oob_tcp_component.tcp_match_lock);

    /* if I'm not a proc, check if this message came from
     * another job family - procs dont' need to do this because
     * they always route through their daemons anyway
     */
    if (!ORTE_PROC_IS_MPI) {
        if ((ORTE_JOB_FAMILY(msg->msg_hdr.msg_origin.jobid) !=
             ORTE_JOB_FAMILY(ORTE_PROC_MY_NAME->jobid)) &&
            (0 != ORTE_JOB_FAMILY(msg->msg_hdr.msg_origin.jobid))) {
            /* this message came from a different job family that is not
             * a local slave, so we may
             * not know how to route any reply back to the originator. Update
             * our route so we can dynamically build the routing table
             */
            if (ORTE_SUCCESS != (rc = orte_routed.update_route(&(msg->msg_hdr.msg_origin),
                                                               &(msg->msg_hdr.msg_src)))) {
                /* Nothing we can do about errors here as we definitely want
                 * the receive to complete, but at least bark loudly
                 */
                ORTE_ERROR_LOG(rc);
            }
        }
    }
    
    /* match msg against posted receives */
    post = mca_oob_tcp_msg_match_post(&msg->msg_hdr.msg_origin, msg->msg_hdr.msg_tag);
    if(NULL != post) {

        if(NULL == post->msg_uiov || 0 == post->msg_ucnt) {
            opal_output(0, "msg_data returning bad param");
            post->msg_rc = ORTE_ERR_BAD_PARAM;
        } else {
            /* copy msg data into posted recv */
            if (post->msg_flags & ORTE_RML_ALLOC) msg->msg_flags |= ORTE_RML_ALLOC;
            post->msg_rc = mca_oob_tcp_msg_copy(msg, post->msg_uiov, post->msg_ucnt);
            if(post->msg_flags & ORTE_RML_TRUNC) {
                 int i, size = 0;
                 for(i=1; i<msg->msg_rwcnt+1; i++)
                     size += msg->msg_rwiov[i].iov_len;
                 post->msg_rc = size;
            }
        }

        if(post->msg_flags & ORTE_RML_PEEK) {
            /* will need message for actual receive */
            opal_list_append(&mca_oob_tcp_component.tcp_msg_recv, &msg->super.super);
        } else {
            MCA_OOB_TCP_MSG_RETURN(msg);
        }
        mca_oob_tcp_component.tcp_match_count++;
        OPAL_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);

        if(post->msg_flags & ORTE_RML_PERSISTENT) {
            post->msg_cbfunc(
                post->msg_rc, 
                &peer->peer_name, 
                post->msg_uiov, 
                post->msg_ucnt, 
                post->msg_hdr.msg_tag, 
                post->msg_cbdata);
        } else {
            mca_oob_tcp_msg_complete(post, &msg->msg_hdr.msg_origin);
        }

        OPAL_THREAD_LOCK(&mca_oob_tcp_component.tcp_match_lock);
        if(--mca_oob_tcp_component.tcp_match_count == 0)
            opal_condition_signal(&mca_oob_tcp_component.tcp_match_cond);
        OPAL_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);

    } else {
        opal_list_append(&mca_oob_tcp_component.tcp_msg_recv, (opal_list_item_t*)msg);
        OPAL_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);
    }
}