static void pselan_flush_event(pselan_con_info_t *ci) { // Flush events: if (ci->event && elan_poll(ci->event, 0)) { ci->event = NULL; } }
/* * Elan4 component progress. */ int mca_btl_elan_component_progress( void ) { int num_progressed = 0, i; for( i = 0; i < (int)mca_btl_elan_component.elan_num_btls; i++ ) { mca_btl_elan_module_t* elan_btl = mca_btl_elan_component.elan_btls[i]; /* This is a fast receive over the queue */ if( elan_queueRxPoll( elan_btl->rx_queue, 0 ) ) { mca_btl_active_message_callback_t* reg; mca_btl_elan_hdr_t* elan_hdr = NULL; mca_btl_elan_frag_t frag; elan_hdr = (mca_btl_elan_hdr_t*)elan_queueRxWait( elan_btl->rx_queue, NULL, 0 ); frag.base.des_dst = &frag.segment; frag.base.des_dst->seg_addr.pval = (void*)(elan_hdr+1); frag.base.des_dst->seg_len = (size_t)elan_hdr->length; frag.base.des_dst_cnt = 1; frag.tag = (mca_btl_base_tag_t)elan_hdr->tag; frag.size = elan_hdr->length; reg = mca_btl_base_active_message_trigger + frag.tag; reg->cbfunc( &(elan_btl->super), frag.tag, &(frag.base), reg->cbdata ); elan_queueRxComplete( elan_btl->rx_queue ); num_progressed++; } /* This is the slower receive over the tport */ if(elan_btl->expect_tport_recv && !OPAL_THREAD_TRYLOCK(&elan_btl->elan_lock)) { mca_btl_elan_frag_t* frag = (mca_btl_elan_frag_t*)opal_list_get_first( &(elan_btl->recv_list) ); if( elan_done(frag->elan_event, 0) ) { int tag; size_t length; mca_btl_active_message_callback_t* reg; void* recv_buf; recv_buf = (mca_btl_elan_hdr_t*)elan_tportRxWait( frag->elan_event, NULL, &tag, &length ); num_progressed++; /*elan_btl->expect_tport_recv--;*/ opal_list_remove_first( &(elan_btl->recv_list) ); OPAL_THREAD_UNLOCK(&elan_btl->elan_lock); frag->base.des_dst->seg_addr.pval = (void*)recv_buf; frag->base.des_dst->seg_len = length; frag->tag = (mca_btl_base_tag_t)tag; reg = mca_btl_base_active_message_trigger + frag->tag; reg->cbfunc( &(elan_btl->super), frag->tag, &(frag->base), reg->cbdata ); if( recv_buf != (void*)(frag+1) ) { elan_tportBufFree( elan_btl->tport, recv_buf ); frag->base.des_dst->seg_addr.pval = (void*)(frag+1); } frag->elan_event = elan_tportRxStart( elan_btl->tport, ELAN_TPORT_RXBUF | ELAN_TPORT_RXANY, 0, 0, 0, 0, frag->base.des_dst->seg_addr.pval, mca_btl_elan_module.super.btl_eager_limit ); OPAL_THREAD_LOCK(&elan_btl->elan_lock); opal_list_append( &(elan_btl->recv_list), (opal_list_item_t*)frag ); } OPAL_THREAD_UNLOCK(&elan_btl->elan_lock); } /* If there are any pending sends check their completion */ recheck_send_list: if( !opal_list_is_empty( &(elan_btl->send_list) ) && !OPAL_THREAD_TRYLOCK(&elan_btl->elan_lock) ) { mca_btl_elan_frag_t* frag = (mca_btl_elan_frag_t*)opal_list_get_first( &(elan_btl->send_list) ); if( (NULL != frag) && elan_poll(frag->elan_event, 0) ) { int btl_ownership = (frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP ); opal_list_remove_first( &(elan_btl->send_list) ); OPAL_THREAD_UNLOCK(&elan_btl->elan_lock); num_progressed++; frag->base.des_cbfunc( &(elan_btl->super), frag->endpoint, &(frag->base), OMPI_SUCCESS ); if( btl_ownership ) { MCA_BTL_ELAN_FRAG_RETURN(frag); } goto recheck_send_list; } else { OPAL_THREAD_UNLOCK(&elan_btl->elan_lock); } } recheck_rdma_list: /* If any RDMA have been posted, check their status */ if( !opal_list_is_empty( &(elan_btl->rdma_list) ) && !OPAL_THREAD_TRYLOCK(&elan_btl->elan_lock) ) { mca_btl_elan_frag_t* frag = (mca_btl_elan_frag_t*)opal_list_get_first( &(elan_btl->rdma_list) ); if( (NULL != frag) && elan_poll(frag->elan_event, 0) ) { int btl_ownership = (frag->base.des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP ); opal_list_remove_first( &(elan_btl->rdma_list) ); OPAL_THREAD_UNLOCK(&elan_btl->elan_lock); num_progressed++; frag->base.des_cbfunc( &(elan_btl->super), frag->endpoint, &(frag->base), OMPI_SUCCESS ); if( btl_ownership ) { MCA_BTL_ELAN_FRAG_RETURN(frag); } goto recheck_rdma_list; } else { OPAL_THREAD_UNLOCK(&elan_btl->elan_lock); } } } return num_progressed; }