示例#1
0
static ucs_status_t progress_local_cq(uct_ugni_smsg_iface_t *iface){
    gni_return_t ugni_rc;
    gni_cq_entry_t event_data;
    uct_ugni_smsg_desc_t message_data;
    uct_ugni_smsg_desc_t *message_pointer;

    ugni_rc = GNI_CqGetEvent(iface->super.local_cq, &event_data);

    if(GNI_RC_NOT_DONE == ugni_rc){
        return UCS_OK;
    }

    if((GNI_RC_SUCCESS != ugni_rc && !event_data) || GNI_CQ_OVERRUN(event_data)){
        /* TODO: handle overruns */
        ucs_error("Error posting data. CQ overrun = %d", (int)GNI_CQ_OVERRUN(event_data));
        return UCS_ERR_NO_RESOURCE;
    }

    message_data.msg_id = GNI_CQ_GET_MSG_ID(event_data);
    message_pointer = sglib_hashed_uct_ugni_smsg_desc_t_find_member(iface->smsg_list,&message_data);
    ucs_assert(NULL != message_pointer);
    message_pointer->ep->outstanding--;
    iface->super.outstanding--;
    uct_ugni_ep_check_flush(message_pointer->ep);
    sglib_hashed_uct_ugni_smsg_desc_t_delete(iface->smsg_list,message_pointer);
    ucs_mpool_put(message_pointer);
    return UCS_INPROGRESS;
}
示例#2
0
static void progress_local_cq(uct_ugni_smsg_iface_t *iface){
    gni_return_t ugni_rc;
    gni_cq_entry_t event_data;
    uct_ugni_smsg_desc_t message_data;
    uct_ugni_smsg_desc_t *message_pointer;

    if(0 == iface->super.outstanding){
        return;
    }

    ugni_rc = GNI_CqGetEvent(iface->super.local_cq, &event_data);
    if(GNI_RC_NOT_DONE == ugni_rc){
        return;
    }

    if((GNI_RC_SUCCESS != ugni_rc && !event_data) || GNI_CQ_OVERRUN(event_data)){
        /* TODO: handle overruns */
        ucs_error("Error posting data. CQ overrun = %d", (int)GNI_CQ_OVERRUN(event_data));
        return;
    }

    message_data.msg_id = GNI_CQ_GET_MSG_ID(event_data);
    message_pointer = sglib_hashed_uct_ugni_smsg_desc_t_find_member(iface->smsg_list,&message_data);
    message_pointer->ep->outstanding--;
    iface->super.outstanding--;
    sglib_hashed_uct_ugni_smsg_desc_t_delete(iface->smsg_list,message_pointer);
    ucs_mpool_put(message_pointer);
}
示例#3
0
ucs_status_t progress_remote_cq(uct_ugni_smsg_iface_t *iface)
{
    gni_return_t ugni_rc;
    gni_cq_entry_t event_data;
    uct_ugni_ep_t tl_ep;
    uct_ugni_ep_t *ugni_ep;
    uct_ugni_smsg_ep_t *ep;

    ugni_rc = GNI_CqGetEvent(iface->remote_cq, &event_data);

    if(GNI_RC_NOT_DONE == ugni_rc){
        return UCS_OK;
    }

    if (GNI_RC_SUCCESS != ugni_rc || !GNI_CQ_STATUS_OK(event_data) || GNI_CQ_OVERRUN(event_data)) {
        if(GNI_RC_ERROR_RESOURCE == ugni_rc || (GNI_RC_SUCCESS == ugni_rc && GNI_CQ_OVERRUN(event_data))){
            ucs_debug("Detected remote CQ overrun. ungi_rc = %d [%s]", ugni_rc, gni_err_str[ugni_rc]);
            uct_ugni_smsg_handle_remote_overflow(iface);
            return UCS_OK;
        }
        ucs_error("GNI_CqGetEvent falied with unhandled error. Error status %s %d ",
                  gni_err_str[ugni_rc], ugni_rc);
        return UCS_ERR_IO_ERROR;
    }

    tl_ep.hash_key = GNI_CQ_GET_INST_ID(event_data);
    ugni_ep = sglib_hashed_uct_ugni_ep_t_find_member(iface->super.eps, &tl_ep);
    ep = ucs_derived_of(ugni_ep, uct_ugni_smsg_ep_t);

    process_mbox(iface, ep);
    return UCS_INPROGRESS;
}
示例#4
0
static inline int
mca_btl_ugni_handle_remote_smsg_overrun (mca_btl_ugni_module_t *btl)
{
    gni_cq_entry_t event_data;
    unsigned int ep_index;
    int count, rc;

    BTL_VERBOSE(("btl/ugni_component detected SMSG CQ overrun. "
                 "processing message backlog..."));

    /* we don't know which endpoint lost an smsg completion. clear the
       smsg remote cq and check all mailboxes */

    /* clear out remote cq */
    do {
        rc = GNI_CqGetEvent (btl->smsg_remote_cq, &event_data);
    } while (GNI_RC_NOT_DONE != rc);

    for (ep_index = 0, count = 0 ; ep_index < btl->endpoint_count ; ++ep_index) {
        mca_btl_base_endpoint_t *ep = btl->endpoints[ep_index];

        if (NULL == ep || MCA_BTL_UGNI_EP_STATE_CONNECTED != ep->state) {
            continue;
        }

        /* clear out smsg mailbox */
        rc = mca_btl_ugni_smsg_process (ep);
        if (OPAL_LIKELY(rc >= 0)) {
            count += rc;
        }
    }

    return count;
}
示例#5
0
static void __nic_get_completed_txd(struct gnix_nic *nic,
				   gni_cq_handle_t hw_cq,
				   struct gnix_tx_descriptor **txd,
				   gni_return_t *tx_status)
{
	gni_post_descriptor_t *gni_desc;
	struct gnix_tx_descriptor *txd_p = NULL;
	struct gnix_fab_req *req;
	gni_return_t status;
	int msg_id;
	gni_cq_entry_t cqe;
	uint32_t recov = 1;

	if (__gnix_nic_txd_err_get(nic, &txd_p)) {
		*txd = txd_p;
		*tx_status = GNI_RC_TRANSACTION_ERROR;
		return;
	}

	status = GNI_CqGetEvent(hw_cq, &cqe);
	if (status == GNI_RC_NOT_DONE) {
		*txd = NULL;
		*tx_status = GNI_RC_NOT_DONE;
		return;
	}

	assert(status == GNI_RC_SUCCESS ||
	       status == GNI_RC_TRANSACTION_ERROR);

	if (unlikely(status == GNI_RC_TRANSACTION_ERROR)) {
		status = GNI_CqErrorRecoverable(cqe, &recov);
		if (status == GNI_RC_SUCCESS) {
			if (!recov) {
				char ebuf[512];

				GNI_CqErrorStr(cqe, ebuf, sizeof(ebuf));
				GNIX_WARN(FI_LOG_EP_DATA,
					  "CQ error status: %s\n",
					   ebuf);
			}
		} else {
			GNIX_WARN(FI_LOG_EP_DATA,
				  "GNI_CqErrorRecover returned: %s\n",
				   gni_err_str[status]);
			recov = 0;  /* assume something bad has happened */
		}
	}

	if (GNI_CQ_GET_TYPE(cqe) == GNI_CQ_EVENT_TYPE_POST) {
		status = GNI_GetCompleted(hw_cq, cqe, &gni_desc);

		assert(status == GNI_RC_SUCCESS ||
		       status == GNI_RC_TRANSACTION_ERROR);

		txd_p = container_of(gni_desc,
				   struct gnix_tx_descriptor,
				   gni_desc);
	} else if (GNI_CQ_GET_TYPE(cqe) == GNI_CQ_EVENT_TYPE_SMSG) {
示例#6
0
static inline int
mca_btl_ugni_progress_rdma (mca_btl_ugni_module_t *ugni_module)
{
    ompi_common_ugni_post_desc_t *desc;
    mca_btl_ugni_base_frag_t *frag;
    gni_cq_entry_t event_data = 0;
    uint32_t recoverable = 1;
    gni_return_t rc;

    rc = GNI_CqGetEvent (ugni_module->rdma_local_cq, &event_data);
    if (GNI_RC_NOT_DONE == rc) {
        return 0;
    }

    if (OPAL_UNLIKELY((GNI_RC_SUCCESS != rc && !event_data) || GNI_CQ_OVERRUN(event_data))) {
        /* TODO -- need to handle overrun -- how do we do this without an event?
           will the event eventually come back? Ask Cray */
        BTL_ERROR(("unhandled post error! ugni rc = %d", rc));
        assert (0);
        return ompi_common_rc_ugni_to_ompi (rc);
    }

    rc = GNI_GetCompleted (ugni_module->rdma_local_cq, event_data, (gni_post_descriptor_t **) &desc);
    if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc && GNI_RC_TRANSACTION_ERROR != rc)) {
        BTL_ERROR(("Error in GNI_GetComplete %s", gni_err_str[rc]));
        return ompi_common_rc_ugni_to_ompi (rc);
    }

    frag = MCA_BTL_UGNI_DESC_TO_FRAG(desc);

    if (OPAL_UNLIKELY(GNI_RC_SUCCESS != rc || !GNI_CQ_STATUS_OK(event_data))) {
        (void) GNI_CqErrorRecoverable (event_data, &recoverable);

        if (OPAL_UNLIKELY(++frag->post_desc.tries >= mca_btl_ugni_component.rdma_max_retries ||
                          !recoverable)) {
            /* give up */
            BTL_ERROR(("giving up on frag %p", (void *) frag));
            frag->cbfunc (frag, OMPI_ERROR);

            return OMPI_ERROR;
        }

        /* repost transaction */
        mca_btl_ugni_repost (frag, OMPI_SUCCESS);

        return 0;
    }

    BTL_VERBOSE(("RDMA/FMA complete for frag %p", (void *) frag));

    frag->cbfunc (frag, ompi_common_rc_ugni_to_ompi (rc));

    return 1;
}
示例#7
0
static int __nic_rx_progress(struct gnix_nic *nic)
{
	int ret = FI_SUCCESS;
	gni_return_t status = GNI_RC_NOT_DONE;
	gni_cq_entry_t cqe;

	status = GNI_CqTestEvent(nic->rx_cq);
	if (status == GNI_RC_NOT_DONE)
		return FI_SUCCESS;

	COND_ACQUIRE(nic->requires_lock, &nic->lock);

	do {
		status = GNI_CqGetEvent(nic->rx_cq, &cqe);
		if (unlikely(status == GNI_RC_NOT_DONE)) {
			ret = FI_SUCCESS;
			break;
		}

		if (likely(status == GNI_RC_SUCCESS)) {
			/* Find and schedule the associated VC. */
			ret = __process_rx_cqe(nic, cqe);
			if (ret != FI_SUCCESS) {
				GNIX_WARN(FI_LOG_EP_DATA,
					  "process_rx_cqe() failed: %d\n",
					  ret);
			}
		} else if (status == GNI_RC_ERROR_RESOURCE) {
			/* The remote CQ was overrun.  Events related to any VC
			 * could have been missed.  Schedule each VC to be sure
			 * all messages are processed. */
			assert(GNI_CQ_OVERRUN(cqe));
			__nic_rx_overrun(nic);
		} else {
			GNIX_WARN(FI_LOG_EP_DATA,
				  "GNI_CqGetEvent returned %s\n",
				  gni_err_str[status]);
			ret = gnixu_to_fi_errno(status);
			break;
		}
	} while (1);

	COND_RELEASE(nic->requires_lock, &nic->lock);

	return ret;
}
示例#8
0
void uct_ugni_progress(void *arg)
{
    gni_cq_entry_t  event_data = 0;
    gni_post_descriptor_t *event_post_desc_ptr;
    uct_ugni_base_desc_t *desc;
    uct_ugni_iface_t * iface = (uct_ugni_iface_t *)arg;
    gni_return_t ugni_rc;

    ugni_rc = GNI_CqGetEvent(iface->local_cq, &event_data);
    if (GNI_RC_NOT_DONE == ugni_rc) {
        goto out;
    }

    if ((GNI_RC_SUCCESS != ugni_rc && !event_data) || GNI_CQ_OVERRUN(event_data)) {
        ucs_error("GNI_CqGetEvent falied. Error status %s %d ",
                  gni_err_str[ugni_rc], ugni_rc);
        return;
    }

    ugni_rc = GNI_GetCompleted(iface->local_cq, event_data, &event_post_desc_ptr);
    if (GNI_RC_SUCCESS != ugni_rc && GNI_RC_TRANSACTION_ERROR != ugni_rc) {
        ucs_error("GNI_GetCompleted falied. Error status %s %d %d",
                  gni_err_str[ugni_rc], ugni_rc, GNI_RC_TRANSACTION_ERROR);
        return;
    }

    desc = (uct_ugni_base_desc_t *)event_post_desc_ptr;
    ucs_trace_async("Completion received on %p", desc);

    if (NULL != desc->comp_cb) {
        uct_invoke_completion(desc->comp_cb, UCS_OK);
    }
    --iface->outstanding;
    --desc->ep->outstanding;

    if (ucs_likely(0 == desc->not_ready_to_free)) {
        ucs_mpool_put(desc);
    }

    uct_ugni_ep_check_flush(desc->ep);

out:
    /* have a go a processing the pending queue */
    ucs_arbiter_dispatch(&iface->arbiter, 1, uct_ugni_ep_process_pending, NULL);
    return;
}
示例#9
0
int mca_btl_ugni_progress_remote_smsg (mca_btl_ugni_module_t *btl)
{
    mca_btl_base_endpoint_t *ep;
    gni_cq_entry_t event_data;
    gni_return_t grc;
    uint64_t inst_id;

    grc = GNI_CqGetEvent (btl->smsg_remote_cq, &event_data);
    if (GNI_RC_NOT_DONE == grc) {
        return 0;
    }

    if (OPAL_UNLIKELY(GNI_RC_SUCCESS != grc || !GNI_CQ_STATUS_OK(event_data) ||
                      GNI_CQ_OVERRUN(event_data))) {
        if (GNI_RC_ERROR_RESOURCE == grc ||
            (GNI_RC_SUCCESS == grc && GNI_CQ_OVERRUN(event_data))) {
            /* recover from smsg cq overrun */
            return mca_btl_ugni_handle_remote_smsg_overrun (btl);
        }

        BTL_ERROR(("unhandled error in GNI_CqGetEvent"));

        /* unhandled error: crash */
        assert (0);
        return ompi_common_rc_ugni_to_ompi (grc);
    }

    BTL_VERBOSE(("REMOTE CQ: Got event 0x%" PRIx64 ". msg id = %" PRIu64
                 ". ok = %d, type = %" PRIu64 "\n", (uint64_t) event_data,
                 GNI_CQ_GET_MSG_ID(event_data), GNI_CQ_STATUS_OK(event_data),
                 GNI_CQ_GET_TYPE(event_data)));

    inst_id = GNI_CQ_GET_INST_ID(event_data);

    ep = btl->endpoints[inst_id & 0xffffffff];
    if (OPAL_UNLIKELY(MCA_BTL_UGNI_EP_STATE_CONNECTED != ep->state)) {
        /* due to the nature of datagrams we may get a smsg completion before
           we get mailbox info from the peer */
        BTL_VERBOSE(("event occurred on an unconnected endpoint! ep state = %d", ep->state));
        return 0;
    }

    return mca_btl_ugni_smsg_process (ep);
}
示例#10
0
static void uct_ugni_smsg_handle_remote_overflow(uct_ugni_smsg_iface_t *iface){
    gni_return_t ugni_rc;
    gni_cq_entry_t event_data;
    struct sglib_hashed_uct_ugni_ep_t_iterator ep_iterator;
    uct_ugni_ep_t *current_ep;
    uct_ugni_smsg_ep_t *ep;

    /* We don't know which EP dropped a completion entry, so flush everything */
    do{
        ugni_rc = GNI_CqGetEvent(iface->remote_cq, &event_data);
    } while(GNI_RC_NOT_DONE != ugni_rc);

    current_ep = sglib_hashed_uct_ugni_ep_t_it_init(&ep_iterator, iface->super.eps);
    while(NULL != current_ep){
        ep = ucs_derived_of(current_ep, uct_ugni_smsg_ep_t);
        process_mbox(iface, ep);
        current_ep = sglib_hashed_uct_ugni_ep_t_it_next(&ep_iterator);
    }
}
示例#11
0
static int __nic_rx_overrun(struct gnix_nic *nic)
{
	int i, max_id, ret;
	struct gnix_vc *vc;
	gni_return_t status;
	gni_cq_entry_t cqe;

	GNIX_WARN(FI_LOG_EP_DATA, "\n");

	/* clear out the CQ */
	/*
	 * TODO:  really need to process CQEs better for error reporting,
	 * etc.
	 */
	while ((status = GNI_CqGetEvent(nic->rx_cq, &cqe)) == GNI_RC_SUCCESS);
	assert(status == GNI_RC_NOT_DONE);

	COND_ACQUIRE(nic->requires_lock, &nic->vc_id_lock);
	max_id = nic->vc_id_table_count;
	COND_RELEASE(nic->requires_lock, &nic->vc_id_lock);
	/*
	 * TODO: optimization would
	 * be to keep track of last time
	 * this happened and where smsg msgs.
	 * were found.
	 */
	for (i = 0; i < max_id; i++) {
		ret = _gnix_test_bit(&nic->vc_id_bitmap, i);
		if (ret) {
			vc = __gnix_nic_elem_by_rem_id(nic, i);
			ret = _gnix_vc_dequeue_smsg(vc);
			if (ret != FI_SUCCESS) {
				GNIX_WARN(FI_LOG_EP_DATA,
					  "_gnix_vc_dqueue_smsg returned %d\n",
					  ret);
			}
		}
	}

	return FI_SUCCESS;
}
示例#12
0
void uct_ugni_progress(void *arg)
{
    gni_cq_entry_t  event_data = 0;
    gni_post_descriptor_t *event_post_desc_ptr;
    uct_ugni_base_desc_t *desc;
    uct_ugni_iface_t * iface = (uct_ugni_iface_t *)arg;
    gni_return_t ugni_rc;

    ugni_rc = GNI_CqGetEvent(iface->local_cq, &event_data);
    if (GNI_RC_NOT_DONE == ugni_rc) {
        return;
    }

    if ((GNI_RC_SUCCESS != ugni_rc && !event_data) || GNI_CQ_OVERRUN(event_data)) {
        ucs_error("GNI_CqGetEvent falied. Error status %s %d ",
                  gni_err_str[ugni_rc], ugni_rc);
        return;
    }

    ugni_rc = GNI_GetCompleted(iface->local_cq, event_data, &event_post_desc_ptr);
    if (GNI_RC_SUCCESS != ugni_rc && GNI_RC_TRANSACTION_ERROR != ugni_rc) {
        ucs_error("GNI_GetCompleted falied. Error status %s %d %d",
                  gni_err_str[ugni_rc], ugni_rc, GNI_RC_TRANSACTION_ERROR);
        return;
    }

    desc = (uct_ugni_base_desc_t *)event_post_desc_ptr;
    ucs_trace_async("Completion received on %p", desc);

    if (NULL != desc->comp_cb) {
        uct_invoke_completion(desc->comp_cb);
    }
    --iface->outstanding;
    --desc->ep->outstanding;

    if (ucs_likely(desc->not_ready_to_free == 0)) {
        ucs_mpool_put(desc);
    }
    return;
}
示例#13
0
static inline int mca_btl_ugni_progress_rdma (mca_btl_ugni_module_t *ugni_module, int which_cq)
{
    mca_btl_ugni_post_descriptor_t *post_desc = NULL;
    gni_cq_entry_t event_data = 0;
    gni_post_descriptor_t *desc;
    uint32_t recoverable = 1;
    gni_return_t grc;
    gni_cq_handle_t the_cq;

    the_cq = (which_cq == 0) ? ugni_module->rdma_local_cq : ugni_module->rdma_local_irq_cq;

    OPAL_THREAD_LOCK(&ugni_module->device->dev_lock);
    grc = GNI_CqGetEvent (the_cq, &event_data);
    if (GNI_RC_NOT_DONE == grc) {
        OPAL_THREAD_UNLOCK(&ugni_module->device->dev_lock);
        return 0;
    }

    if (OPAL_UNLIKELY((GNI_RC_SUCCESS != grc && !event_data) || GNI_CQ_OVERRUN(event_data))) {
        /* TODO -- need to handle overrun -- how do we do this without an event?
           will the event eventually come back? Ask Cray */
        BTL_ERROR(("unhandled post error! ugni rc = %d %s", grc, gni_err_str[grc]));
        OPAL_THREAD_UNLOCK(&ugni_module->device->dev_lock);

        return opal_common_rc_ugni_to_opal (grc);
    }

    grc = GNI_GetCompleted (the_cq, event_data, &desc);
    OPAL_THREAD_UNLOCK(&ugni_module->device->dev_lock);
    if (OPAL_UNLIKELY(GNI_RC_SUCCESS != grc && GNI_RC_TRANSACTION_ERROR != grc)) {
        BTL_ERROR(("Error in GNI_GetComplete %s", gni_err_str[grc]));
        return opal_common_rc_ugni_to_opal (grc);
    }

    post_desc = MCA_BTL_UGNI_DESC_TO_PDESC(desc);

    if (OPAL_UNLIKELY(GNI_RC_SUCCESS != grc || !GNI_CQ_STATUS_OK(event_data))) {
        (void) GNI_CqErrorRecoverable (event_data, &recoverable);

        if (OPAL_UNLIKELY(++post_desc->desc.tries >= mca_btl_ugni_component.rdma_max_retries ||
                          !recoverable)) {
            char char_buffer[1024];
            GNI_CqErrorStr (event_data, char_buffer, 1024);
            /* give up */
            BTL_ERROR(("giving up on desciptor %p, recoverable %d: %s", (void *) post_desc,
                       recoverable, char_buffer));
#if OPAL_ENABLE_DEBUG
            btl_ugni_dump_post_desc (post_desc);
#endif
            mca_btl_ugni_post_desc_complete (ugni_module, post_desc, OPAL_ERROR);

            return OPAL_ERROR;
        }

        mca_btl_ugni_repost (ugni_module, post_desc);

        return 0;
    }

    mca_btl_ugni_post_desc_complete (ugni_module, post_desc, opal_common_rc_ugni_to_opal (grc));

    return 1;
}
示例#14
0
/*
 * this function is intended to be invoked as an argument to pthread_create,
 */
static void *__gnix_nic_prog_thread_fn(void *the_arg)
{
	int ret = FI_SUCCESS, prev_state;
	int retry = 0;
	uint32_t which;
	struct gnix_nic *nic = (struct gnix_nic *)the_arg;
	sigset_t  sigmask;
	gni_cq_handle_t cqv[2];
	gni_return_t status;
	gni_cq_entry_t cqe;

	GNIX_TRACE(FI_LOG_EP_CTRL, "\n");

	/*
	 * temporarily disable cancelability while we set up
	 * some stuff
	 */

	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &prev_state);

	/*
	 * help out Cray core-spec, say we're not an app thread
	 * and can be run on core-spec cpus.
	 */

	ret = _gnix_task_is_not_app();
	if (ret)
		GNIX_WARN(FI_LOG_EP_CTRL,
			"_gnix_task_is_not_app call returned %d\n",
			ret);

	/*
	 * block all signals, don't want this thread to catch
	 * signals that may be for app threads
	 */

	memset(&sigmask, 0, sizeof(sigset_t));
	ret = sigfillset(&sigmask);
	if (ret) {
		GNIX_WARN(FI_LOG_EP_CTRL,
		"sigfillset call returned %d\n", ret);
	} else {

		ret = pthread_sigmask(SIG_SETMASK,
					&sigmask, NULL);
		if (ret)
			GNIX_WARN(FI_LOG_EP_CTRL,
			"pthread_sigmask call returned %d\n", ret);
	}

	/*
	 * okay now we're ready to be cancelable.
	 */

	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &prev_state);

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);

	cqv[0] = nic->tx_cq_blk;
	cqv[1] = nic->rx_cq_blk;

try_again:
	status = GNI_CqVectorMonitor(cqv,
				     2,
				     -1,
				     &which);

	switch (status) {
	case GNI_RC_SUCCESS:

		/*
		 * first dequeue RX CQEs
		 */
		if (which == 1) {
			do {
				status = GNI_CqGetEvent(nic->rx_cq_blk,
							&cqe);
			} while (status == GNI_RC_SUCCESS);
		}
		_gnix_nic_progress(nic);
		retry = 1;
		break;
	case GNI_RC_TIMEOUT:
		retry = 1;
		break;
	case GNI_RC_NOT_DONE:
		retry = 1;
		break;
	case GNI_RC_INVALID_PARAM:
	case GNI_RC_INVALID_STATE:
	case GNI_RC_ERROR_RESOURCE:
	case GNI_RC_ERROR_NOMEM:
		retry = 0;
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "GNI_CqGetEvent returned %s\n",
			  gni_err_str[status]);
		break;
	default:
		retry = 0;
		GNIX_WARN(FI_LOG_EP_CTRL,
			  "GNI_CqGetEvent returned unexpected code %s\n",
			  gni_err_str[status]);
		break;
	}

	if (retry)
		goto try_again;

	return NULL;
}