int rxd_av_insert_dg_addr(struct rxd_av *av, uint64_t hint_index, const void *addr, fi_addr_t *dg_fiaddr) { int ret; fastlock_acquire(&av->util_av.lock); if (!av->dg_addrlen) { ret = rxd_av_set_addrlen(av, addr); if (ret) goto out; ret = -FI_ENODATA; } else { ret = rxd_av_dg_reverse_lookup(av, hint_index, addr, dg_fiaddr); } if (ret == -FI_ENODATA) { ret = fi_av_insert(av->dg_av, addr, 1, dg_fiaddr, 0, NULL); if (ret == 1) { av->dg_av_used++; ret = 0; } } out: fastlock_release(&av->util_av.lock); return ret; }
int rxd_handle_conn_req(struct rxd_ep *ep, struct ofi_ctrl_hdr *ctrl, struct fi_cq_msg_entry *comp, struct rxd_rx_buf *rx_buf) { int ret; void *addr; size_t addrlen; uint64_t peer; struct rxd_pkt_data *pkt_data; struct rxd_peer *peer_info; rxd_ep_lock_if_required(ep); pkt_data = (struct rxd_pkt_data *) ctrl; addr = pkt_data->data; addrlen = ctrl->seg_size; ret = rxd_av_dg_reverse_lookup(ep->av, ctrl->rx_key, addr, addrlen, &peer); if (ret == -FI_ENODATA) { ret = rxd_av_insert_dg_av(ep->av, addr); assert(ret == 1); ret = rxd_av_dg_reverse_lookup(ep->av, ctrl->rx_key, addr, addrlen, &peer); assert(ret == 0); } peer_info = rxd_ep_getpeer_info(ep, peer); if (!peer_info->addr_published) { peer_info->addr_published = 1; peer_info->conn_initiated = 1; peer_info->conn_data = ctrl->conn_id; peer_info->exp_msg_id++; } rxd_ep_reply_ack(ep, ctrl, ofi_ctrl_connresp, 0, ctrl->conn_id, peer, peer); rxd_ep_repost_buff(rx_buf); rxd_ep_unlock_if_required(ep); return ret; }
int rxd_av_insert_check(struct rxd_av *av, const void *addr, size_t count, fi_addr_t *fi_addr, uint64_t flags, void *context) { int i, success_cnt = 0; int ret, index; void *curr_addr; uint64_t dg_av_idx; for (i = 0; i < count; i++) { curr_addr = (char *) addr + av->addrlen * i; ret = rxd_av_dg_reverse_lookup(av, i, curr_addr, av->addrlen, &dg_av_idx); if (ret == -FI_ENODATA) { ret = fi_av_insert(av->dg_av, curr_addr, 1, &dg_av_idx, flags, context); if (ret <= 0) { if (av->util_av.eq) ofi_av_write_event(&av->util_av, i, (ret == 0) ? FI_EINVAL : -ret, context); if (fi_addr) fi_addr[i] = FI_ADDR_NOTAVAIL; continue; } } ret = ofi_av_insert_addr(&av->util_av, &dg_av_idx, dg_av_idx, &index); if (ret) { if (av->util_av.eq) ofi_av_write_event(&av->util_av, i, -ret, context); } else { success_cnt++; } if (fi_addr) fi_addr[i] = (ret == 0) ? index : FI_ADDR_NOTAVAIL; } av->dg_av_used += success_cnt; if (av->util_av.eq) { ofi_av_write_event(&av->util_av, success_cnt, 0, context); ret = 0; } else { ret = success_cnt; } return ret; }
static int rxd_av_insert(struct fid_av *av_fid, const void *addr, size_t count, fi_addr_t *fi_addr, uint64_t flags, void *context) { struct rxd_av *av; int i = 0, index, ret = 0, success_cnt = 0, lookup = 1; uint64_t dg_fiaddr; av = container_of(av_fid, struct rxd_av, util_av.av_fid); fastlock_acquire(&av->util_av.lock); if (!av->dg_addrlen) { ret = rxd_av_set_addrlen(av, addr); if (ret) goto out; /* Skip lookups if this is the first insertion call. */ lookup = 0; } for (; i < count; i++, addr = (uint8_t *) addr + av->dg_addrlen) { ret = lookup ? rxd_av_dg_reverse_lookup(av, i, addr, &dg_fiaddr) : -FI_ENODATA; if (ret) { ret = fi_av_insert(av->dg_av, addr, 1, &dg_fiaddr, flags, context); if (ret != 1) break; } ret = ofi_av_insert_addr(&av->util_av, &dg_fiaddr, dg_fiaddr, &index); if (ret) break; success_cnt++; if (fi_addr) fi_addr[i] = index; } if (ret) { FI_WARN(&rxd_prov, FI_LOG_AV, "failed to insert address %d: %d (%s)\n", i, -ret, fi_strerror(-ret)); if (av->util_av.eq) ofi_av_write_event(&av->util_av, i, -ret, context); if (fi_addr) fi_addr[i] = FI_ADDR_NOTAVAIL; i++; } out: av->dg_av_used += success_cnt; fastlock_release(&av->util_av.lock); for (; i < count; i++) { if (av->util_av.eq) ofi_av_write_event(&av->util_av, i, FI_ECANCELED, context); if (fi_addr) fi_addr[i] = FI_ADDR_NOTAVAIL; } if (av->util_av.eq) { ofi_av_write_event(&av->util_av, success_cnt, 0, context); return 0; } return success_cnt; }