int rxd_ep_init_res(struct rxd_ep *ep, struct fi_info *fi_info) { struct rxd_domain *rxd_domain = rxd_ep_domain(ep); struct ofi_bufpool_attr entry_pool_attr = { .size = sizeof(struct rxd_x_entry), .alignment = RXD_BUF_POOL_ALIGNMENT, .max_cnt = 0, .flags = OFI_BUFPOOL_INDEXED, }; int ret; ret = ofi_bufpool_create_ex(&ep->tx_pkt_pool, rxd_domain->max_mtu_sz + sizeof(struct rxd_pkt_entry), RXD_BUF_POOL_ALIGNMENT, 0, RXD_TX_POOL_CHUNK_CNT, ep->do_local_mr ? rxd_buf_region_alloc_fn : NULL, ep->do_local_mr ? rxd_buf_region_free_fn : NULL, rxd_domain); if (ret) return ret; ret = ofi_bufpool_create_ex(&ep->rx_pkt_pool, rxd_domain->max_mtu_sz + sizeof (struct rxd_pkt_entry), RXD_BUF_POOL_ALIGNMENT, 0, RXD_RX_POOL_CHUNK_CNT, ep->do_local_mr ? rxd_buf_region_alloc_fn : NULL, ep->do_local_mr ? rxd_buf_region_free_fn : NULL, rxd_domain); if (ret) goto err; entry_pool_attr.flags |= OFI_BUFPOOL_NO_TRACK; entry_pool_attr.chunk_cnt = ep->tx_size; ret = ofi_bufpool_create_attr(&entry_pool_attr, &ep->tx_entry_pool); if (ret) goto err; entry_pool_attr.chunk_cnt = ep->rx_size; ret = ofi_bufpool_create_attr(&entry_pool_attr, &ep->rx_entry_pool); if (ret) goto err; dlist_init(&ep->rx_list); dlist_init(&ep->rx_tag_list); dlist_init(&ep->active_peers); dlist_init(&ep->rts_sent_list); dlist_init(&ep->unexp_list); dlist_init(&ep->unexp_tag_list); dlist_init(&ep->ctrl_pkts); slist_init(&ep->rx_pkt_list); return 0; err: if (ep->tx_pkt_pool) ofi_bufpool_destroy(ep->tx_pkt_pool); if (ep->rx_pkt_pool) ofi_bufpool_destroy(ep->rx_pkt_pool); if (ep->tx_entry_pool) ofi_bufpool_destroy(ep->tx_entry_pool); if (ep->rx_entry_pool) ofi_bufpool_destroy(ep->rx_entry_pool); return ret; } static void rxd_init_peer(struct rxd_ep *ep, uint64_t rxd_addr) { ep->peers[rxd_addr].peer_addr = FI_ADDR_UNSPEC; ep->peers[rxd_addr].tx_seq_no = 0; ep->peers[rxd_addr].rx_seq_no = 0; ep->peers[rxd_addr].last_rx_ack = 0; ep->peers[rxd_addr].last_tx_ack = 0; ep->peers[rxd_addr].rx_window = rxd_env.max_unacked; ep->peers[rxd_addr].tx_window = rxd_env.max_unacked; ep->peers[rxd_addr].unacked_cnt = 0; ep->peers[rxd_addr].retry_cnt = 0; ep->peers[rxd_addr].active = 0; dlist_init(&ep->peers[rxd_addr].unacked); dlist_init(&ep->peers[rxd_addr].tx_list); dlist_init(&ep->peers[rxd_addr].rx_list); dlist_init(&ep->peers[rxd_addr].rma_rx_list); dlist_init(&ep->peers[rxd_addr].buf_pkts); }
static int tcpx_buf_pools_create(struct tcpx_buf_pool *buf_pools) { int i, ret; struct ofi_bufpool_attr attr = { .size = sizeof(struct tcpx_xfer_entry), .alignment = 16, .chunk_cnt = 1024, .init_fn = tcpx_buf_pool_init, }; for (i = 0; i < TCPX_OP_CODE_MAX; i++) { buf_pools[i].op_type = i; attr.context = &buf_pools[i]; ret = ofi_bufpool_create_attr(&attr, &buf_pools[i].pool); if (ret) { FI_WARN(&tcpx_prov, FI_LOG_EP_CTRL, "Unable to create buf pool\n"); goto err; } } return 0; err: while (i--) ofi_bufpool_destroy(buf_pools[i].pool); return -ret; } int tcpx_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct tcpx_cq *tcpx_cq; tcpx_cq = calloc(1, sizeof(*tcpx_cq)); if (!tcpx_cq) return -FI_ENOMEM; if (!attr->size) attr->size = TCPX_DEF_CQ_SIZE; ret = tcpx_buf_pools_create(tcpx_cq->buf_pools); if (ret) goto free_cq; ret = ofi_cq_init(&tcpx_prov, domain, attr, &tcpx_cq->util_cq, &ofi_cq_progress, context); if (ret) goto destroy_pool; *cq_fid = &tcpx_cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &tcpx_cq_fi_ops; return 0; destroy_pool: tcpx_buf_pools_destroy(tcpx_cq->buf_pools); free_cq: free(tcpx_cq); return ret; }