static UCS_F_ALWAYS_INLINE ucs_status_t uct_ugni_smsg_ep_am_common_send(uct_ugni_smsg_ep_t *ep, uct_ugni_smsg_iface_t *iface, uint8_t am_id, unsigned header_length, void *header, unsigned payload_length, void *payload, uct_ugni_smsg_desc_t *desc) { gni_return_t gni_rc; if (ucs_unlikely(!uct_ugni_ep_can_send(&ep->super))) { goto exit_no_res; } desc->msg_id = iface->smsg_id++; desc->flush_group = ep->super.flush_group; uct_ugni_cdm_lock(&iface->super.cdm); gni_rc = GNI_SmsgSendWTag(ep->super.ep, header, header_length, payload, payload_length, desc->msg_id, am_id); uct_ugni_cdm_unlock(&iface->super.cdm); if(GNI_RC_SUCCESS != gni_rc){ goto exit_no_res; } ++desc->flush_group->flush_comp.count; ++iface->super.outstanding; sglib_hashed_uct_ugni_smsg_desc_t_add(iface->smsg_list, desc); return UCS_OK; exit_no_res: ucs_trace("Smsg send failed."); ucs_mpool_put(desc); UCS_STATS_UPDATE_COUNTER(ep->super.super.stats, UCT_EP_STAT_NO_RES, 1); return UCS_ERR_NO_RESOURCE; }
int mca_btl_ugni_ep_disconnect (mca_btl_base_endpoint_t *ep, bool send_disconnect) { gni_return_t rc; do { if (MCA_BTL_UGNI_EP_STATE_INIT == ep->state) { /* nothing to do */ break; } if (MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state && send_disconnect) { rc = GNI_SmsgSendWTag (ep->smsg_ep_handle, NULL, 0, NULL, 0, -1, MCA_BTL_UGNI_TAG_DISCONNECT); if (GNI_RC_SUCCESS != rc) { BTL_VERBOSE(("btl/ugni could not send close message")); } /* we might want to wait for local completion here (do we even care) */ } (void) ompi_common_ugni_ep_destroy (&ep->smsg_ep_handle); (void) ompi_common_ugni_ep_destroy (&ep->rdma_ep_handle); OMPI_FREE_LIST_RETURN(&ep->btl->smsg_mboxes, ((ompi_free_list_item_t *) ep->mailbox)); ep->mailbox = NULL; ep->state = MCA_BTL_UGNI_EP_STATE_INIT; } while (0); return OMPI_SUCCESS; }
static int __gnix_rma_send_data_req(void *arg) { struct gnix_fab_req *req = (struct gnix_fab_req *)arg; struct gnix_fid_ep *ep = req->gnix_ep; struct gnix_nic *nic = ep->nic; struct gnix_tx_descriptor *txd; gni_return_t status; int rc; int inject_err = _gnix_req_inject_err(req); rc = _gnix_nic_tx_alloc(nic, &txd); if (rc) { GNIX_INFO(FI_LOG_EP_DATA, "_gnix_nic_tx_alloc() failed: %d\n", rc); return -FI_ENOSPC; } txd->req = req; txd->completer_fn = __gnix_rma_txd_data_complete; txd->rma_data_hdr.flags = FI_RMA | FI_REMOTE_CQ_DATA; if (req->type == GNIX_FAB_RQ_RDMA_WRITE) { txd->rma_data_hdr.flags |= FI_REMOTE_WRITE; } else { txd->rma_data_hdr.flags |= FI_REMOTE_READ; } txd->rma_data_hdr.data = req->rma.imm; fastlock_acquire(&nic->lock); if (inject_err) { _gnix_nic_txd_err_inject(nic, txd); status = GNI_RC_SUCCESS; } else { status = GNI_SmsgSendWTag(req->vc->gni_ep, &txd->rma_data_hdr, sizeof(txd->rma_data_hdr), NULL, 0, txd->id, GNIX_SMSG_T_RMA_DATA); } fastlock_release(&nic->lock); if (status == GNI_RC_NOT_DONE) { _gnix_nic_tx_free(nic, txd); GNIX_INFO(FI_LOG_EP_DATA, "GNI_SmsgSendWTag returned %s\n", gni_err_str[status]); } else if (status != GNI_RC_SUCCESS) { _gnix_nic_tx_free(nic, txd); GNIX_WARN(FI_LOG_EP_DATA, "GNI_SmsgSendWTag returned %s\n", gni_err_str[status]); } else { GNIX_INFO(FI_LOG_EP_DATA, "Sent RMA CQ data, req: %p\n", req); } return gnixu_to_fi_errno(status); }
static int __gnix_amo_send_cntr_req(void *arg) { struct gnix_fab_req *req = (struct gnix_fab_req *)arg; struct gnix_fid_ep *ep = req->gnix_ep; struct gnix_nic *nic = ep->nic; struct gnix_tx_descriptor *txd; gni_return_t status; int rc; int inject_err = _gnix_req_inject_err(req); rc = _gnix_nic_tx_alloc(nic, &txd); if (rc) { GNIX_INFO(FI_LOG_EP_DATA, "_gnix_nic_tx_alloc() failed: %d\n", rc); return -FI_ENOSPC; } txd->req = req; txd->completer_fn = __gnix_amo_txd_cntr_complete; if (req->type == GNIX_FAB_RQ_AMO) { txd->amo_cntr_hdr.flags = FI_REMOTE_WRITE; } else { txd->amo_cntr_hdr.flags = FI_REMOTE_READ; } COND_ACQUIRE(nic->requires_lock, &nic->lock); if (inject_err) { _gnix_nic_txd_err_inject(nic, txd); status = GNI_RC_SUCCESS; } else { status = GNI_SmsgSendWTag(req->vc->gni_ep, &txd->amo_cntr_hdr, sizeof(txd->amo_cntr_hdr), NULL, 0, txd->id, GNIX_SMSG_T_AMO_CNTR); } COND_RELEASE(nic->requires_lock, &nic->lock); if (status == GNI_RC_NOT_DONE) { _gnix_nic_tx_free(nic, txd); GNIX_INFO(FI_LOG_EP_DATA, "GNI_SmsgSendWTag returned %s\n", gni_err_str[status]); } else if (status != GNI_RC_SUCCESS) { _gnix_nic_tx_free(nic, txd); GNIX_WARN(FI_LOG_EP_DATA, "GNI_SmsgSendWTag returned %s\n", gni_err_str[status]); } else { GNIX_INFO(FI_LOG_EP_DATA, "Sent RMA CQ data, req: %p\n", req); } return gnixu_to_fi_errno(status); }
static int __gnix_rndzv_req_send_fin(void *arg) { struct gnix_fab_req *req = (struct gnix_fab_req *)arg; struct gnix_nic *nic; struct gnix_fid_ep *ep; struct gnix_tx_descriptor *txd; gni_return_t status; int rc; GNIX_TRACE(FI_LOG_EP_DATA, "\n"); ep = req->gnix_ep; assert(ep != NULL); nic = ep->nic; assert(nic != NULL); rc = _gnix_nic_tx_alloc(nic, &txd); if (rc) { GNIX_INFO(FI_LOG_EP_DATA, "_gnix_nic_tx_alloc() failed: %d\n", rc); return -FI_ENOSPC; } txd->rndzv_fin_hdr.req_addr = req->msg.rma_id; txd->req = req; txd->completer_fn = gnix_ep_smsg_completers[GNIX_SMSG_T_RNDZV_FIN]; fastlock_acquire(&nic->lock); status = GNI_SmsgSendWTag(req->vc->gni_ep, &txd->rndzv_fin_hdr, sizeof(txd->rndzv_fin_hdr), NULL, 0, txd->id, GNIX_SMSG_T_RNDZV_FIN); if ((status == GNI_RC_SUCCESS) && (ep->domain->data_progress == FI_PROGRESS_AUTO)) _gnix_rma_post_irq(req->vc); fastlock_release(&nic->lock); if (status == GNI_RC_NOT_DONE) { _gnix_nic_tx_free(nic, txd); GNIX_INFO(FI_LOG_EP_DATA, "GNI_SmsgSendWTag returned %s\n", gni_err_str[status]); } else if (status != GNI_RC_SUCCESS) { _gnix_nic_tx_free(nic, txd); GNIX_WARN(FI_LOG_EP_DATA, "GNI_SmsgSendWTag returned %s\n", gni_err_str[status]); } GNIX_INFO(FI_LOG_EP_DATA, "Initiated RNDZV_FIN, req: %p\n", req); return gnixu_to_fi_errno(status); }
int mca_btl_ugni_ep_disconnect (mca_btl_base_endpoint_t *ep, bool send_disconnect) { gni_return_t rc; if (MCA_BTL_UGNI_EP_STATE_INIT == ep->state) { /* nothing to do */ return OPAL_SUCCESS; } if (MCA_BTL_UGNI_EP_STATE_CONNECTED == ep->state && send_disconnect) { OPAL_THREAD_LOCK(&ep->common->dev->dev_lock); rc = GNI_SmsgSendWTag (ep->smsg_ep_handle, NULL, 0, NULL, 0, -1, MCA_BTL_UGNI_TAG_DISCONNECT); OPAL_THREAD_UNLOCK(&ep->common->dev->dev_lock); if (GNI_RC_SUCCESS != rc) { BTL_VERBOSE(("btl/ugni could not send close message")); } /* we might want to wait for local completion here (do we even care), yes we do */ /* TODO: FIX FIX FIX */ } /* TODO: FIX GROSS */ OPAL_THREAD_LOCK(&ep->common->dev->dev_lock); (void) opal_common_ugni_ep_destroy (&ep->smsg_ep_handle); (void) opal_common_ugni_ep_destroy (&ep->rdma_ep_handle); OPAL_THREAD_UNLOCK(&ep->common->dev->dev_lock); if (ep->mailbox) { opal_free_list_return (&ep->btl->smsg_mboxes, ((opal_free_list_item_t *) ep->mailbox)); ep->mailbox = NULL; } ep->state = MCA_BTL_UGNI_EP_STATE_INIT; (void) opal_atomic_add_64 (&ep->btl->connected_peer_count, -11); return OPAL_SUCCESS; }
static UCS_F_ALWAYS_INLINE ucs_status_t uct_ugni_smsg_ep_am_common_send(uct_ugni_smsg_ep_t *ep, uct_ugni_smsg_iface_t *iface, uint8_t am_id, unsigned header_length, void *header, unsigned payload_length, void *payload, uct_ugni_smsg_desc_t *desc) { gni_return_t gni_rc; desc->msg_id = iface->smsg_id++; desc->ep = &ep->super; gni_rc = GNI_SmsgSendWTag(ep->super.ep, header, header_length, payload, payload_length, desc->msg_id, am_id); if(GNI_RC_SUCCESS != gni_rc){ ucs_mpool_put(desc); return UCS_ERR_NO_RESOURCE; } ++ep->super.outstanding; ++iface->super.outstanding; sglib_hashed_uct_ugni_smsg_desc_t_add(iface->smsg_list, desc); return UCS_OK; }