static void free_sa_qp_cache(void) { struct mca_btl_openib_sa_qp_cache *cache, *tmp; cache = sa_qp_cache; while (NULL != cache) { /* free cache data */ if (cache->device_name) free(cache->device_name); if (NULL != cache->qp) ibv_destroy_qp(cache->qp); if (NULL != cache->ah) ibv_destroy_ah(cache->ah); if (NULL != cache->cq) ibv_destroy_cq(cache->cq); if (NULL != cache->mr) ibv_dereg_mr(cache->mr); if (NULL != cache->pd) ibv_dealloc_pd(cache->pd); tmp = cache->next; free(cache); cache = tmp; } sa_qp_cache = NULL; }
static void destroy_node(struct cmatest_node *node) { if (!node->cma_id) return; if (node->ah) ibv_destroy_ah(node->ah); if (node->cma_id->qp) rdma_destroy_qp(node->cma_id); if (node->cq) ibv_destroy_cq(node->cq); if (node->mem) { ibv_dereg_mr(node->mr); free(node->mem); } if (node->pd) ibv_dealloc_pd(node->pd); /* Destroy the RDMA ID after all device resources */ rdma_destroy_id(node->cma_id); }
static void mca_oob_ud_peer_destruct (mca_oob_ud_peer_t *peer) { if (NULL != peer->peer_ah) { (void) ibv_destroy_ah (peer->peer_ah); } }
static UCS_CLASS_CLEANUP_FUNC(uct_ud_verbs_ep_t) { ucs_trace_func(""); if (self->ah) { ibv_destroy_ah(self->ah); self->ah = NULL; } }
int __ibv_destroy_ah_1_0(struct ibv_ah_1_0 *ah) { int ret; ret = ibv_destroy_ah(ah->real_ah); if (ret) return ret; free(ah); return 0; }
int __ibv_destroy_ah_1_0(struct ibv_ah_1_0 *ah) { fprintf(stderr, "%s:%s:%d \n", __func__, __FILE__, __LINE__); int ret; ret = ibv_destroy_ah(ah->real_ah); if (ret) return ret; free(ah); return 0; }
int mca_oob_ud_peer_update_with_uri (mca_oob_ud_peer_t *peer, const char *uri) { opal_list_item_t *item; struct ibv_ah_attr ah_attr; mca_oob_ud_device_t *device; uint32_t qp_num; /* NTH: port is 16-bit here because C90 does not support hh in sscanf */ uint16_t lid, port_num; int rc; rc = mca_oob_ud_parse_uri (uri, &qp_num, &lid, &port_num); if (ORTE_SUCCESS != rc) { return rc; } if (peer->peer_lid != lid || peer->peer_port != port_num) { if (NULL != peer->peer_ah) { (void) ibv_destroy_ah (peer->peer_ah); peer->peer_ah = NULL; } } peer->peer_qpn = qp_num; peer->peer_qkey = 0; /* NTH: todo -- add qkey support if needed */ peer->peer_lid = lid; peer->peer_port = port_num; if (NULL == peer->peer_ah) { memset (&ah_attr, 0, sizeof (ah_attr)); ah_attr.dlid = lid; ah_attr.port_num = port_num; for (item = opal_list_get_first (&mca_oob_ud_component.ud_devices); item != opal_list_get_end (&mca_oob_ud_component.ud_devices); item = opal_list_get_next (item)) { device = (mca_oob_ud_device_t *)item; /* try to create an address handle using this device */ peer->peer_ah = ibv_create_ah (device->ib_pd, &ah_attr); if (NULL != peer->peer_ah) { peer->peer_context = (void *) item; break; } } if (NULL == peer->peer_ah) { free (peer); return ORTE_ERROR; } } return ORTE_SUCCESS; }
/** ========================================================================= */ static void destroy_sa_qp(struct oib_port *port) { int i; // if the user just unregistered trap messages those messages may still // be on this list, wait 5 seconds for the thread to handle the response. for (i = 0; i < 5000; i++) { if (!LIST_EMPTY(&port->pending_reg_msg_head)) { usleep(1000); } else { DBGPRINT("destroy_sa_qp: wait %d ms for LIST_EMPTY\n", i); break; } } stop_ud_cq_monitor(port); join_port_thread(port); /* Free any remaining unregistration messages */ if (!LIST_EMPTY(&port->pending_reg_msg_head)) { OUTPUT_ERROR("Ignoring Pending Notice un-registation requests\n"); oib_sa_remove_all_pending_reg_msgs(port); } if (port->sa_ah) ibv_destroy_ah(port->sa_ah); if (port->sa_qp) ibv_destroy_qp(port->sa_qp); for (i = 0; i<port->num_userspace_recv_buf; i++) if (port->recv_bufs) ibv_dereg_mr(port->recv_bufs[i].mr); if (port->sa_qp_pd) ibv_dealloc_pd(port->sa_qp_pd); if (port->sa_qp_cq) ibv_destroy_cq(port->sa_qp_cq); if (port->recv_bufs) { free(port->recv_bufs); port->recv_bufs = NULL; } if (port->sa_qp_comp_channel) ibv_destroy_comp_channel(port->sa_qp_comp_channel); }
int rd_close(DEVICE *dev) { #if 0 if (Req.use_cm) cm_close(dev); else #endif ib_close1(dev); if (dev->ah) ibv_destroy_ah(dev->ah); if (dev->cq) ibv_destroy_cq(dev->cq); return 0; }
ucs_status_t uct_ud_mlx5_iface_get_av(uct_ib_iface_t *iface, uct_ud_mlx5_iface_common_t *ud_common_iface, const uct_ib_address_t *ib_addr, uint8_t path_bits, uct_ib_mlx5_base_av_t *base_av, struct mlx5_grh_av *grh_av, int *is_global) { ucs_status_t status; struct ibv_ah *ah; struct mlx5_wqe_av mlx5_av; status = uct_ib_iface_create_ah(iface, ib_addr, path_bits, &ah, is_global); if (status != UCS_OK) { return UCS_ERR_INVALID_ADDR; } uct_ib_mlx5_get_av(ah, &mlx5_av); ibv_destroy_ah(ah); base_av->stat_rate_sl = mlx5_av_base(&mlx5_av)->stat_rate_sl; base_av->fl_mlid = mlx5_av_base(&mlx5_av)->fl_mlid; base_av->rlid = mlx5_av_base(&mlx5_av)->rlid; /* copy MLX5_EXTENDED_UD_AV from the driver, if the flag is not present then * the device supports compact address vector. */ if (ud_common_iface->config.compact_av) { base_av->dqp_dct = mlx5_av_base(&mlx5_av)->dqp_dct & UCT_IB_MLX5_EXTENDED_UD_AV; } else { base_av->dqp_dct = UCT_IB_MLX5_EXTENDED_UD_AV; } ucs_assertv_always((UCT_IB_MLX5_AV_FULL_SIZE > UCT_IB_MLX5_AV_BASE_SIZE) || (base_av->dqp_dct & UCT_IB_MLX5_EXTENDED_UD_AV), "compact address vector not supported, and EXTENDED_AV flag is missing"); if (*is_global) { ucs_assert_always(grh_av != NULL); memcpy(grh_av, mlx5_av_grh(&mlx5_av), sizeof(*grh_av)); } return UCS_OK; }
static void endpoint_destruct(mca_btl_base_endpoint_t* endpoint) { int rc; ompi_btl_usnic_proc_t *proc; if (endpoint->endpoint_ack_needed) { ompi_btl_usnic_remove_from_endpoints_needing_ack(endpoint); } /* Remove the endpoint from the all_endpoints list */ ompi_btl_usnic_module_t *module = endpoint->endpoint_module; opal_mutex_lock(&module->all_endpoints_lock); if (endpoint->endpoint_on_all_endpoints) { opal_list_remove_item(&module->all_endpoints, &endpoint->endpoint_endpoint_li); endpoint->endpoint_on_all_endpoints = false; } opal_mutex_unlock(&module->all_endpoints_lock); OBJ_DESTRUCT(&(endpoint->endpoint_endpoint_li)); if (endpoint->endpoint_hotel.rooms != NULL) { OBJ_DESTRUCT(&(endpoint->endpoint_hotel)); } OBJ_DESTRUCT(&endpoint->endpoint_frag_send_queue); /* release owning proc */ proc = endpoint->endpoint_proc; if (NULL != proc) { proc->proc_endpoints[endpoint->endpoint_proc_index] = NULL; OBJ_RELEASE(proc); } free(endpoint->endpoint_rx_frag_info); if (NULL != endpoint->endpoint_remote_ah) { rc = ibv_destroy_ah(endpoint->endpoint_remote_ah); if (rc) { BTL_ERROR(("failed to ibv_destroy_ah, err=%d (%s)", rc, strerror(rc))); } } }
static ucs_status_t uct_ud_mlx5_ep_create_ah(uct_ud_mlx5_iface_t *iface, uct_ud_mlx5_ep_t *ep, const uct_sockaddr_ib_t *if_addr) { struct ibv_ah *ah; ah = uct_ib_create_ah(&iface->super.super, if_addr->lid); if (ah == NULL) { ucs_error("failed to create address handle: %m"); return UCS_ERR_INVALID_ADDR; } uct_ib_mlx5_get_av(ah, &ep->av); mlx5_av_base(&ep->av)->key.qkey.qkey = htonl(UCT_IB_QKEY); mlx5_av_base(&ep->av)->key.qkey.reserved = iface->super.qp->qp_num; mlx5_av_base(&ep->av)->dqp_dct = htonl(if_addr->qp_num | UCT_IB_MLX5_EXTENDED_UD_AV); ibv_destroy_ah(ah); return UCS_OK; }
static void endpoint_destruct(mca_btl_base_endpoint_t* endpoint) { ompi_btl_usnic_proc_t *proc; OBJ_DESTRUCT(&(endpoint->endpoint_ack_li)); OBJ_DESTRUCT(&(endpoint->endpoint_endpoint_li)); if (endpoint->endpoint_hotel.rooms != NULL) { OBJ_DESTRUCT(&(endpoint->endpoint_hotel)); } OBJ_DESTRUCT(&endpoint->endpoint_frag_send_queue); /* release owning proc */ proc = endpoint->endpoint_proc; proc->proc_endpoints[endpoint->endpoint_proc_index] = NULL; OBJ_RELEASE(proc); free(endpoint->endpoint_rx_frag_info); if (NULL != endpoint->endpoint_remote_ah) { ibv_destroy_ah(endpoint->endpoint_remote_ah); } }
static void destroy_ah_hast_data(gpointer data) { struct ibv_ah *ah = data; ibv_destroy_ah(ah); }
ucs_status_t uct_ud_verbs_ep_create_connected(uct_iface_h iface_h, const struct sockaddr *addr, uct_ep_h *new_ep_p) { uct_ud_ep_t *ready_ep; uct_ud_verbs_ep_t *ep; uct_ud_verbs_iface_t *iface = ucs_derived_of(iface_h, uct_ud_verbs_iface_t); uct_sockaddr_ib_t *if_addr = (uct_sockaddr_ib_t *)addr; uct_ud_send_skb_t *skb; struct ibv_ah *ah; ucs_status_t status; /* check if we can reuse half duplex ep */ ready_ep = uct_ud_iface_cep_lookup(&iface->super, if_addr, UCT_UD_EP_CONN_ID_MAX); if (ready_ep) { *new_ep_p = &ready_ep->super.super; return UCS_OK; } status = iface_h->ops.ep_create(iface_h, new_ep_p); if (status != UCS_OK) { return status; } ep = ucs_derived_of(*new_ep_p, uct_ud_verbs_ep_t); status = uct_ud_ep_connect_to_iface(&ep->super, addr); if (status != UCS_OK) { return status; } ucs_assert_always(ep->ah == NULL); ah = uct_ib_create_ah(&iface->super.super, if_addr->lid); if (ah == NULL) { ucs_error("failed to create address handle: %m"); status = UCS_ERR_INVALID_ADDR; goto err1; } ep->ah = ah; status = uct_ud_iface_cep_insert(&iface->super, if_addr, &ep->super, UCT_UD_EP_CONN_ID_MAX); if (status != UCS_OK) { goto err2; } skb = uct_ud_ep_prepare_creq(&ep->super); if (!skb) { status = UCS_ERR_NO_RESOURCE; goto err3; } iface->tx.sge[0].addr = (uintptr_t)skb->neth; iface->tx.sge[0].length = skb->len; uct_ud_verbs_iface_tx_ctl(iface, ep); ucs_trace_data("TX: CREQ (qp=%x lid=%d)", if_addr->qp_num, if_addr->lid); return UCS_OK; err3: uct_ud_iface_cep_rollback(&iface->super, if_addr, &ep->super); err2: ibv_destroy_ah(ep->ah); ep->ah = NULL; err1: uct_ud_ep_disconnect_from_iface(*new_ep_p); *new_ep_p = NULL; return status; }