Example #1
0
int rxm_ep_recv_common(struct fid_ep *ep_fid, const struct iovec *iov, void **desc,
		size_t count, fi_addr_t src_addr, uint64_t tag, uint64_t ignore,
		void *context, uint64_t flags, int op)
{
	struct rxm_recv_entry *recv_entry;
	struct rxm_ep *rxm_ep;
	struct rxm_recv_queue *recv_queue;
	dlist_func_t *match;
	int ret, i;

	rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);

	// TODO pass recv_queue as arg
	if (op == ofi_op_msg) {
		recv_queue = &rxm_ep->recv_queue;
		match = ofi_match_unexp_msg;
	} else if (op == ofi_op_tagged) {
		recv_queue = &rxm_ep->trecv_queue;
		match = ofi_match_unexp_msg_tagged;
	} else {
		FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "Unknown op!\n");
		return -FI_EINVAL;
	}

	if (freestack_isempty(recv_queue->recv_fs)) {
		FI_DBG(&rxm_prov, FI_LOG_CQ, "Exhaused recv_entry freestack\n");
		return -FI_EAGAIN;
	}

	recv_entry = freestack_pop(recv_queue->recv_fs);

	for (i = 0; i < count; i++) {
		recv_entry->iov[i].iov_base = iov[i].iov_base;
		recv_entry->iov[i].iov_len = iov[i].iov_len;
		recv_entry->desc[i] = desc[i];
		FI_DBG(&rxm_prov, FI_LOG_EP_CTRL, "post recv: %u\n",
			iov[i].iov_len);
	}
	recv_entry->count = count;
	recv_entry->addr = (rxm_ep->rxm_info->caps & FI_DIRECTED_RECV) ?
		src_addr : FI_ADDR_UNSPEC;
	recv_entry->flags = flags;
	if (op == ofi_op_tagged) {
		recv_entry->tag = tag;
		recv_entry->ignore = ignore;
	}

	if (!dlist_empty(&recv_queue->unexp_msg_list)) {
		ret = rxm_check_unexp_msg_list(rxm_ep->util_ep.rx_cq, recv_queue,
				recv_entry, match);
		if (ret) {
			FI_WARN(&rxm_prov, FI_LOG_EP_DATA,
					"Unable to check unexp msg list\n");
			return ret;
		}
	}

	dlist_insert_tail(&recv_entry->entry, &recv_queue->recv_list);
	return 0;
}
Example #2
0
static struct rxd_rx_entry *rxd_rx_entry_alloc(struct rxd_ep *ep)
{
	struct rxd_rx_entry *rx_entry;

	if (freestack_isempty(ep->rx_entry_fs))
		return NULL;

	rx_entry = freestack_pop(ep->rx_entry_fs);
	rx_entry->key = rx_entry - &ep->rx_entry_fs->buf[0];
	dlist_insert_tail(&rx_entry->entry, &ep->rx_entry_list);
	return rx_entry;
}
Example #3
0
// TODO handle all flags
static ssize_t rxm_ep_send_common(struct fid_ep *ep_fid, const struct iovec *iov,
		void **desc, size_t count, fi_addr_t dest_addr, void *context,
		uint64_t data, uint64_t tag, uint64_t flags, int op)
{
	struct rxm_ep *rxm_ep;
	struct rxm_conn *rxm_conn;
	struct rxm_tx_entry *tx_entry;
	struct rxm_pkt *pkt;
	struct fid_mr *mr;
	void *desc_tx_buf = NULL;
	struct rxm_rma_iov *rma_iov;
	int pkt_size = 0;
	int i, ret;

	rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);

	ret = rxm_get_conn(rxm_ep, dest_addr, &rxm_conn);
	if (ret)
		return ret;

	if (freestack_isempty(rxm_ep->txe_fs)) {
		FI_DBG(&rxm_prov, FI_LOG_CQ, "Exhaused tx_entry freestack\n");
		return -FI_ENOMEM;
	}

	tx_entry = freestack_pop(rxm_ep->txe_fs);

	tx_entry->ctx_type = RXM_TX_ENTRY;
	tx_entry->ep = rxm_ep;
	tx_entry->context = context;
	tx_entry->flags = flags;

	if (rxm_ep->msg_info->mode & FI_LOCAL_MR) {
		pkt = util_buf_get_ex(rxm_ep->tx_pool, (void **)&mr);
		desc_tx_buf = fi_mr_desc(mr);
	} else {
		pkt = util_buf_get(rxm_ep->tx_pool);
	}
	assert(pkt);

	tx_entry->pkt = pkt;

	rxm_pkt_init(pkt);
	pkt->ctrl_hdr.conn_id = rxm_conn->handle.remote_key;
	pkt->hdr.op = op;
	pkt->hdr.size = ofi_get_iov_len(iov, count);
	rxm_op_hdr_process_flags(&pkt->hdr, flags, data);

	if (op == ofi_op_tagged)
		pkt->hdr.tag = tag;

	if (pkt->hdr.size > RXM_TX_DATA_SIZE) {
		if (flags & FI_INJECT) {
			FI_WARN(&rxm_prov, FI_LOG_EP_DATA,
					"inject size supported: %d, msg size: %d\n",
					rxm_tx_attr.inject_size,
					pkt->hdr.size);
			ret = -FI_EMSGSIZE;
			goto err;
		}
		tx_entry->msg_id = ofi_idx2key(&rxm_ep->tx_key_idx,
				rxm_txe_fs_index(rxm_ep->txe_fs, tx_entry));
		pkt->ctrl_hdr.msg_id = tx_entry->msg_id;
		pkt->ctrl_hdr.type = ofi_ctrl_large_data;
		rma_iov = (struct rxm_rma_iov *)pkt->data;
		rma_iov->count = count;
		for (i = 0; i < count; i++) {
			rma_iov->iov[i].addr = rxm_ep->msg_info->domain_attr->mr_mode == FI_MR_SCALABLE ?
				0 : (uintptr_t)iov->iov_base;
			rma_iov->iov[i].len = (uint64_t)iov->iov_len;
			rma_iov->iov[i].key = fi_mr_key(desc[i]);
		}
		pkt_size = sizeof(*pkt) + sizeof(*rma_iov) + sizeof(*rma_iov->iov) * count;
		FI_DBG(&rxm_prov, FI_LOG_CQ,
				"Sending large msg. msg_id: 0x%" PRIx64 "\n",
				tx_entry->msg_id);
		FI_DBG(&rxm_prov, FI_LOG_CQ, "tx_entry->state -> RXM_LMT_START\n");
		tx_entry->state = RXM_LMT_START;
	} else {
		pkt->ctrl_hdr.type = ofi_ctrl_data;
		ofi_copy_iov_buf(iov, count, pkt->data, pkt->hdr.size, 0,
				OFI_COPY_IOV_TO_BUF);
		pkt_size = sizeof(*pkt) + pkt->hdr.size;
	}

	ret = fi_send(rxm_conn->msg_ep, pkt, pkt_size, desc_tx_buf, 0, tx_entry);
	if (ret) {
		FI_WARN(&rxm_prov, FI_LOG_EP_DATA, "fi_send for MSG provider failed\n");
		goto err;
	}
	return 0;
err:
	util_buf_release(rxm_ep->tx_pool, pkt);
	freestack_push(rxm_ep->txe_fs, tx_entry);
	return ret;
}