int rxd_av_lookup(struct fid_av *av, fi_addr_t fi_addr, void *addr, size_t *addrlen) { struct rxd_av *rxd_av; fi_addr_t dg_addr; rxd_av = container_of(av, struct rxd_av, util_av.av_fid); dg_addr = rxd_av_get_dg_addr(rxd_av, fi_addr); return fi_av_lookup(rxd_av->dg_av, dg_addr, addr, addrlen); }
ssize_t rxd_ep_readmsg(struct fid_ep *ep, const struct fi_msg_rma *msg, uint64_t flags) { ssize_t ret; uint64_t peer_addr; struct rxd_ep *rxd_ep; struct rxd_peer *peer; struct rxd_tx_entry *tx_entry; rxd_ep = container_of(ep, struct rxd_ep, ep); peer_addr = rxd_av_get_dg_addr(rxd_ep->av, msg->addr); peer = rxd_ep_getpeer_info(rxd_ep, peer_addr); #if ENABLE_DEBUG if (msg->iov_count > RXD_IOV_LIMIT || msg->rma_iov_count > RXD_IOV_LIMIT) return -FI_EINVAL; #endif rxd_ep_lock_if_required(rxd_ep); if (!peer->addr_published) { ret = rxd_ep_post_conn_msg(rxd_ep, peer, peer_addr); ret = (ret) ? ret : -FI_EAGAIN; goto out; } tx_entry = rxd_tx_entry_acquire(rxd_ep, peer); if (!tx_entry) { ret = -FI_EAGAIN; goto out; } dlist_init(&tx_entry->pkt_list); tx_entry->op_type = RXD_TX_READ_REQ; tx_entry->read_req.msg = *msg; tx_entry->flags = flags; tx_entry->peer = peer_addr; rxd_ep_copy_msg_iov(msg->msg_iov, &tx_entry->read_req.dst_iov[0], msg->iov_count); rxd_ep_copy_rma_iov(msg->rma_iov, &tx_entry->read_req.src_iov[0], msg->rma_iov_count); ret = rxd_ep_post_start_msg(rxd_ep, peer, ofi_op_read_req, tx_entry); if (ret) goto err; dlist_insert_tail(&tx_entry->entry, &rxd_ep->tx_entry_list); out: rxd_ep_unlock_if_required(rxd_ep); return ret; err: rxd_tx_entry_release(rxd_ep, tx_entry); goto out; }
static int rxd_av_remove(struct fid_av *av_fid, fi_addr_t *fi_addr, size_t count, uint64_t flags) { int ret = 0; size_t i; fi_addr_t dg_idx; struct rxd_av *av; av = container_of(av_fid, struct rxd_av, util_av.av_fid); for (i = 0; i < count; i++) { dg_idx = rxd_av_get_dg_addr(av, fi_addr[i]); ret = fi_av_remove(av->dg_av, &dg_idx, 1, flags); if (ret) break; av->dg_av_used--; } return ret; }