示例#1
0
ssize_t rxm_ep_tsendmsg(struct fid_ep *ep_fid, const struct fi_msg_tagged *msg,
			 uint64_t flags)
{
	return rxm_ep_send_common(ep_fid, msg->msg_iov, msg->desc, msg->iov_count,
			msg->addr, msg->context, msg->data, msg->tag,
			flags | (rxm_ep_tx_flags(ep_fid) & FI_COMPLETION),
			ofi_op_tagged);
}
示例#2
0
ssize_t rxm_ep_tsend(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc,
		      fi_addr_t dest_addr, uint64_t tag, void *context)
{
	struct iovec iov;
	iov.iov_base = (void *) buf;
	iov.iov_len = len;

	return rxm_ep_send_common(ep_fid, &iov, &desc, 1, dest_addr, context, 0,
			tag, rxm_ep_tx_flags(ep_fid), ofi_op_tagged);
}
示例#3
0
static ssize_t rxm_ep_senddata(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc,
				uint64_t data, fi_addr_t dest_addr, void *context)
{
	struct iovec iov;

	iov.iov_base = (void *) buf;
	iov.iov_len = len;

	return rxm_ep_send_common(ep_fid, &iov, desc, 1, dest_addr, context, data,
			0, rxm_ep_tx_flags(ep_fid), ofi_op_msg);
}
示例#4
0
ssize_t	rxm_ep_tinjectdata(struct fid_ep *ep_fid, const void *buf, size_t len,
			    uint64_t data, fi_addr_t dest_addr, uint64_t tag)
{
	struct iovec iov;

	iov.iov_base = (void *) buf;
	iov.iov_len = len;

	return rxm_ep_send_common(ep_fid, &iov, NULL, 1, dest_addr, NULL, data,
			tag, (rxm_ep_tx_flags(ep_fid) & ~FI_COMPLETION) | FI_INJECT,
			ofi_op_tagged);
}
示例#5
0
static ssize_t	rxm_ep_inject(struct fid_ep *ep_fid, const void *buf, size_t len,
			       fi_addr_t dest_addr)
{
	struct iovec iov;

	iov.iov_base = (void *) buf;
	iov.iov_len = len;

	return rxm_ep_send_common(ep_fid, &iov, NULL, 1, dest_addr, NULL, 0, 0,
			(rxm_ep_tx_flags(ep_fid) & ~FI_COMPLETION) | FI_INJECT,
			ofi_op_msg);
}
示例#6
0
static ssize_t rxm_ep_readv(struct fid_ep *ep_fid, const struct iovec *iov,
			    void **desc, size_t count, fi_addr_t src_addr,
			    uint64_t addr, uint64_t key, void *context)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = ofi_total_iov_len(iov, count),
		.key = key,
	};
	struct fi_msg_rma msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = src_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};

	return rxm_ep_rma_common(rxm_ep, &msg, rxm_ep_tx_flags(rxm_ep), fi_readmsg, FI_READ);
}

static ssize_t rxm_ep_read(struct fid_ep *ep_fid, void *buf, size_t len,
			   void *desc, fi_addr_t src_addr, uint64_t addr,
			   uint64_t key, void *context)
{
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = &desc,
		.iov_count = 1,
		.addr = src_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_rma_common(rxm_ep, &msg, rxm_ep_tx_flags(rxm_ep), fi_readmsg, FI_READ);
}

static inline void
rxm_ep_format_rma_msg(struct rxm_rma_buf *rma_buf, const struct fi_msg_rma *orig_msg,
		      struct iovec *rxm_iov, struct fi_msg_rma *rxm_msg)
{
	rxm_msg->context = rma_buf;
	rxm_msg->addr = orig_msg->addr;
	rxm_msg->data = orig_msg->data;

	ofi_copy_from_iov(rma_buf->pkt.data, rma_buf->pkt.hdr.size,
			  orig_msg->msg_iov, orig_msg->iov_count, 0);
	rxm_iov->iov_base = &rma_buf->pkt.data;
	rxm_iov->iov_len = rma_buf->pkt.hdr.size;
	rxm_msg->msg_iov = rxm_iov;
	rxm_msg->desc = &rma_buf->hdr.desc;
	rxm_msg->iov_count = 1;

	rxm_msg->rma_iov = orig_msg->rma_iov;
	rxm_msg->rma_iov_count = orig_msg->rma_iov_count;
}

static inline ssize_t
rxm_ep_rma_emulate_inject_msg(struct rxm_ep *rxm_ep, struct rxm_conn *rxm_conn, size_t total_size,
			      const struct fi_msg_rma *msg, uint64_t flags)
{
	struct rxm_rma_buf *rma_buf;
	ssize_t ret;
	struct iovec rxm_msg_iov = { 0 };
	struct fi_msg_rma rxm_rma_msg = { 0 };

	assert(msg->rma_iov_count <= rxm_ep->rxm_info->tx_attr->rma_iov_limit);

	ofi_ep_lock_acquire(&rxm_ep->util_ep);
	rma_buf = rxm_rma_buf_alloc(rxm_ep);
	if (OFI_UNLIKELY(!rma_buf)) {
		FI_WARN(&rxm_prov, FI_LOG_EP_DATA,
			"Ran out of buffers from RMA buffer pool\n");
		ret = -FI_ENOMEM;
		goto unlock;
	}

	rma_buf->pkt.hdr.size = total_size;
	rma_buf->app_context = msg->context;
	rma_buf->flags = flags;
	rxm_ep_format_rma_msg(rma_buf, msg, &rxm_msg_iov, &rxm_rma_msg);

	flags = (flags & ~FI_INJECT) | FI_COMPLETION;

	ret = fi_writemsg(rxm_conn->msg_ep, &rxm_rma_msg, flags);
	if (OFI_UNLIKELY(ret)) {
		if (ret == -FI_EAGAIN)
			rxm_ep_do_progress(&rxm_ep->util_ep);
		ofi_buf_free(rma_buf);
	}
unlock:
	ofi_ep_lock_release(&rxm_ep->util_ep);
	return ret;
}

static inline ssize_t
rxm_ep_rma_emulate_inject(struct rxm_ep *rxm_ep, struct rxm_conn *rxm_conn,
			  const void *buf, size_t len, uint64_t data,
			  fi_addr_t dest_addr, uint64_t addr, uint64_t key,
			  uint64_t flags)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = NULL,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = NULL,
		.data = data,
	};

	return rxm_ep_rma_emulate_inject_msg(rxm_ep, rxm_conn, len, &msg, flags);
}

static inline ssize_t
rxm_ep_rma_inject_common(struct rxm_ep *rxm_ep, const struct fi_msg_rma *msg, uint64_t flags)
{
	struct rxm_conn *rxm_conn;
	size_t total_size = ofi_total_iov_len(msg->msg_iov, msg->iov_count);
	ssize_t ret;

	assert(total_size <= rxm_ep->rxm_info->tx_attr->inject_size);

	ret = rxm_ep_prepare_tx(rxm_ep, msg->addr, &rxm_conn);
	if (OFI_UNLIKELY(ret))
		return ret;

	if ((total_size <= rxm_ep->msg_info->tx_attr->inject_size) &&
	    !(flags & FI_COMPLETION) &&
	    (msg->iov_count == 1) && (msg->rma_iov_count == 1)) {
		if (flags & FI_REMOTE_CQ_DATA) {
			ret = fi_inject_writedata(rxm_conn->msg_ep,
						  msg->msg_iov->iov_base,
						  msg->msg_iov->iov_len, msg->data,
						  msg->addr, msg->rma_iov->addr,
						  msg->rma_iov->key);
		} else {
			ret = fi_inject_write(rxm_conn->msg_ep,
					      msg->msg_iov->iov_base,
					      msg->msg_iov->iov_len, msg->addr,
					      msg->rma_iov->addr,
					      msg->rma_iov->key);
		}
		if (OFI_LIKELY(!ret)) {
			ofi_ep_wr_cntr_inc(&rxm_ep->util_ep);
		} else {
			FI_DBG(&rxm_prov, FI_LOG_EP_DATA,
			       "fi_inject_write* for MSG provider failed with ret - %"
			       PRId64"\n", ret);
			if (OFI_LIKELY(ret == -FI_EAGAIN))
				rxm_ep_progress(&rxm_ep->util_ep);
		}
		return ret;
	} else {
		return rxm_ep_rma_emulate_inject_msg(rxm_ep, rxm_conn, total_size, msg, flags);
	}
}

static inline ssize_t
rxm_ep_generic_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg,
			uint64_t flags)
{
	struct rxm_ep *rxm_ep =
		container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);

	if (flags & FI_INJECT)
		return rxm_ep_rma_inject_common(rxm_ep, msg, flags);
	else
		return rxm_ep_rma_common(rxm_ep, msg, flags,
					 fi_writemsg, FI_WRITE);
}

static inline ssize_t
rxm_ep_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, uint64_t flags)
{
	struct rxm_ep *rxm_ep =
		container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);

	return rxm_ep_generic_writemsg(ep_fid, msg, flags | rxm_ep->util_ep.tx_msg_flags);
}

static ssize_t rxm_ep_writev(struct fid_ep *ep_fid, const struct iovec *iov,
			     void **desc, size_t count, fi_addr_t dest_addr,
			     uint64_t addr, uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = ofi_total_iov_len(iov, count),
		.key = key,
	};
	struct fi_msg_rma msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_generic_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t rxm_ep_writedata(struct fid_ep *ep_fid, const void *buf,
				size_t len, void *desc, uint64_t data,
				fi_addr_t dest_addr, uint64_t addr,
				uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = &desc,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = data,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_generic_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep) |
				       FI_REMOTE_CQ_DATA);
}

static ssize_t rxm_ep_write(struct fid_ep *ep_fid, const void *buf,
			    size_t len, void *desc, fi_addr_t dest_addr,
			    uint64_t addr, uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = &desc,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_generic_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t rxm_ep_inject_write(struct fid_ep *ep_fid, const void *buf,
				   size_t len, fi_addr_t dest_addr,
				   uint64_t addr, uint64_t key)
{
	ssize_t ret;
	struct rxm_conn *rxm_conn;
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	ret = rxm_ep_prepare_tx(rxm_ep, dest_addr, &rxm_conn);
	if (OFI_UNLIKELY(ret))
		return ret;

	if (len <= rxm_ep->msg_info->tx_attr->inject_size) {
		ret = fi_inject_write(rxm_conn->msg_ep, buf, len,
				      dest_addr, addr, key);
		if (OFI_LIKELY(!ret)) {
			ofi_ep_wr_cntr_inc(&rxm_ep->util_ep);
		} else {
			FI_DBG(&rxm_prov, FI_LOG_EP_DATA,
			       "fi_inject_write for MSG provider failed with ret - %"
			       PRId64"\n", ret);
			if (OFI_LIKELY(ret == -FI_EAGAIN))
				rxm_ep_progress(&rxm_ep->util_ep);
		}
		return ret;
	} else {
		return rxm_ep_rma_emulate_inject(rxm_ep, rxm_conn, buf, len,
						 0, dest_addr, addr, key, FI_INJECT);
	}
}

static ssize_t rxm_ep_inject_writedata(struct fid_ep *ep_fid, const void *buf,
				       size_t len, uint64_t data,
				       fi_addr_t dest_addr, uint64_t addr,
				       uint64_t key)
{
	ssize_t ret;
	struct rxm_conn *rxm_conn;
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);
	ret = rxm_ep_prepare_tx(rxm_ep, dest_addr, &rxm_conn);
	if (OFI_UNLIKELY(ret))
		return ret;

	if (len <= rxm_ep->msg_info->tx_attr->inject_size) {
		ret = fi_inject_writedata(rxm_conn->msg_ep, buf, len,
					  data, dest_addr, addr, key);
		if (OFI_LIKELY(!ret)) {
			ofi_ep_wr_cntr_inc(&rxm_ep->util_ep);
		} else {
			FI_DBG(&rxm_prov, FI_LOG_EP_DATA,
			       "fi_inject_writedata for MSG provider failed with ret - %"
			       PRId64"\n", ret);
			if (OFI_LIKELY(ret == -FI_EAGAIN))
				rxm_ep_progress(&rxm_ep->util_ep);
		}
		return ret;
	} else {
		return rxm_ep_rma_emulate_inject(rxm_ep, rxm_conn, buf, len,
						 data, dest_addr, addr, key,
						 FI_REMOTE_CQ_DATA | FI_INJECT);
	}
}

struct fi_ops_rma rxm_ops_rma = {
	.size = sizeof (struct fi_ops_rma),
	.read = rxm_ep_read,
	.readv = rxm_ep_readv,
	.readmsg = rxm_ep_readmsg,
	.write = rxm_ep_write,
	.writev = rxm_ep_writev,
	.writemsg = rxm_ep_writemsg,
	.inject = rxm_ep_inject_write,
	.writedata = rxm_ep_writedata,
	.injectdata = rxm_ep_inject_writedata,
};
示例#7
0
static ssize_t rxm_ep_readv(struct fid_ep *ep_fid, const struct iovec *iov,
			    void **desc, size_t count, fi_addr_t src_addr,
			    uint64_t addr, uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = ofi_total_iov_len(iov, count),
		.key = key,
	};
	struct fi_msg_rma msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = src_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_readmsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t rxm_ep_read(struct fid_ep *ep_fid, void *buf, size_t len,
			   void *desc, fi_addr_t src_addr, uint64_t addr,
			   uint64_t key, void *context)
{
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = &desc,
		.iov_count = 1,
		.addr = src_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_readmsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t rxm_ep_rma_inject(struct fid_ep *msg_ep, struct rxm_ep *rxm_ep,
				 const struct fi_msg_rma *msg, uint64_t flags)
{
	struct rxm_tx_entry *tx_entry;
	struct rxm_tx_buf *tx_buf;
	struct fi_msg_rma msg_rma;
	struct iovec iov;
	size_t size;
	ssize_t ret;

	size = ofi_total_iov_len(msg->msg_iov, msg->iov_count);

	if (size > rxm_ep->rxm_info->tx_attr->inject_size)
		return -FI_EMSGSIZE;

	/* Use fi_inject_write instead of fi_writemsg since the latter generates
	 * completion by default */
	if (size <= rxm_ep->msg_info->tx_attr->inject_size &&
	    !(flags & FI_COMPLETION)) {
		if (flags & FI_REMOTE_CQ_DATA)
			return fi_inject_writedata(msg_ep, msg->msg_iov->iov_base,
					       msg->msg_iov->iov_len, msg->data,
					       msg->addr, msg->rma_iov->addr,
					       msg->rma_iov->key);
		else
			return fi_inject_write(msg_ep, msg->msg_iov->iov_base,
					       msg->msg_iov->iov_len, msg->addr,
					       msg->rma_iov->addr,
					       msg->rma_iov->key);
	}

	tx_buf = rxm_tx_buf_get(rxm_ep, RXM_BUF_POOL_TX_MSG);
	if (!tx_buf) {
		FI_WARN(&rxm_prov, FI_LOG_CQ, "TX queue full!\n");
		rxm_ep_progress_multi(&rxm_ep->util_ep);
		return -FI_EAGAIN;
	}

	tx_entry = rxm_tx_entry_get(&rxm_ep->send_queue);
	if (!tx_entry) {
		rxm_ep_progress_multi(&rxm_ep->util_ep);
		ret = -FI_EAGAIN;
		goto err1;
	}

	tx_entry->state = RXM_TX;
	tx_entry->flags = flags;
	tx_entry->comp_flags = FI_RMA | FI_WRITE;
	tx_entry->tx_buf = tx_buf;

	ofi_copy_from_iov(tx_buf->pkt.data, size, msg->msg_iov, msg->iov_count, 0);

	iov.iov_base = &tx_buf->pkt.data;
	iov.iov_len = size;

	msg_rma.msg_iov = &iov;
	msg_rma.desc = &tx_buf->hdr.desc;
	msg_rma.iov_count = 1;
	msg_rma.addr = msg->addr;
	msg_rma.rma_iov = msg->rma_iov;
	msg_rma.rma_iov_count = msg->rma_iov_count;
	msg_rma.context = tx_entry;
	msg_rma.data = msg->data;
	flags = (flags & ~FI_INJECT) | FI_COMPLETION;

	ret = fi_writemsg(msg_ep, &msg_rma, flags);
	if (ret) {
		if (ret == -FI_EAGAIN)
			rxm_ep_progress_multi(&rxm_ep->util_ep);
		goto err2;
	}
	return 0;
err2:
	rxm_tx_entry_release(&rxm_ep->send_queue, tx_entry);
err1:
	rxm_tx_buf_release(rxm_ep, tx_buf);
	return ret;
}

static ssize_t rxm_ep_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg,
			       uint64_t flags)
{
	struct util_cmap_handle *handle;
	struct rxm_conn *rxm_conn;
	struct rxm_ep *rxm_ep;
	int ret;

	rxm_ep = container_of(ep_fid, struct rxm_ep, util_ep.ep_fid.fid);

	ret = ofi_cmap_get_handle(rxm_ep->util_ep.cmap, msg->addr, &handle);
	if (OFI_UNLIKELY(ret))
		return ret;
	rxm_conn = container_of(handle, struct rxm_conn, handle);

	if (flags & FI_INJECT)
		return rxm_ep_rma_inject(rxm_conn->msg_ep, rxm_ep, msg, flags);
	else
		return rxm_ep_rma_common(rxm_conn->msg_ep, rxm_ep, msg, flags,
					 fi_writemsg, FI_WRITE);
}

static ssize_t rxm_ep_writev(struct fid_ep *ep_fid, const struct iovec *iov,
			     void **desc, size_t count, fi_addr_t dest_addr,
			     uint64_t addr, uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = ofi_total_iov_len(iov, count),
		.key = key,
	};
	struct fi_msg_rma msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t rxm_ep_writedata(struct fid_ep *ep_fid, const void *buf,
				size_t len, void *desc, uint64_t data,
				fi_addr_t dest_addr, uint64_t addr,
				uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = &desc,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = data,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep) |
			       FI_REMOTE_CQ_DATA);
}

static ssize_t rxm_ep_write(struct fid_ep *ep_fid, const void *buf,
			    size_t len, void *desc, fi_addr_t dest_addr,
			    uint64_t addr, uint64_t key, void *context)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = &desc,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = context,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_writemsg(ep_fid, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t rxm_ep_inject_write(struct fid_ep *ep_fid, const void *buf,
			     size_t len, fi_addr_t dest_addr, uint64_t addr,
			     uint64_t key)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = NULL,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = NULL,
		.data = 0,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_writemsg(ep_fid, &msg,
			       (rxm_ep_tx_flags(rxm_ep) & ~FI_COMPLETION) |
			       FI_INJECT);
}

static ssize_t rxm_ep_inject_writedata(struct fid_ep *ep_fid, const void *buf,
				       size_t len, uint64_t data,
				       fi_addr_t dest_addr, uint64_t addr,
				       uint64_t key)
{
	struct fi_rma_iov rma_iov = {
		.addr = addr,
		.len = len,
		.key = key,
	};
	struct iovec iov = {
		.iov_base = (void*)buf,
		.iov_len = len,
	};
	struct fi_msg_rma msg = {
		.msg_iov = &iov,
		.desc = NULL,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.context = NULL,
		.data = data,
	};
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_writemsg(ep_fid, &msg,
			       (rxm_ep_tx_flags(rxm_ep) & ~FI_COMPLETION) |
			       FI_INJECT | FI_REMOTE_CQ_DATA);
}

struct fi_ops_rma rxm_ops_rma = {
	.size = sizeof (struct fi_ops_rma),
	.read = rxm_ep_read,
	.readv = rxm_ep_readv,
	.readmsg = rxm_ep_readmsg,
	.write = rxm_ep_write,
	.writev = rxm_ep_writev,
	.writemsg = rxm_ep_writemsg,
	.inject = rxm_ep_inject_write,
	.writedata = rxm_ep_writedata,
	.injectdata = rxm_ep_inject_writedata,
};
示例#8
0
ssize_t rxm_ep_tsendv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc,
		       size_t count, fi_addr_t dest_addr, uint64_t tag, void *context)
{
	return rxm_ep_send_common(ep_fid, iov, desc, count, dest_addr, context,
			0, tag, rxm_ep_tx_flags(ep_fid), ofi_op_tagged);
}
示例#9
0
static ssize_t rxm_ep_sendv(struct fid_ep *ep_fid, const struct iovec *iov,
		void **desc, size_t count, fi_addr_t dest_addr, void *context)
{
	return rxm_ep_send_common(ep_fid, iov, desc, count, dest_addr, context,
			0, 0, rxm_ep_tx_flags(ep_fid), ofi_op_msg);
}
示例#10
0
static ssize_t
rxm_ep_atomic_writev(struct fid_ep *ep_fid, const struct fi_ioc *iov,
		     void **desc, size_t count, fi_addr_t dest_addr,
		     uint64_t addr, uint64_t key, enum fi_datatype datatype,
		     enum fi_op op, void *context)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);
	struct fi_rma_ioc rma_iov = {
		.addr = addr,
		.count = ofi_total_ioc_cnt(iov, count),
		.key = key,
	};
	struct fi_msg_atomic msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.datatype = datatype,
		.op = op,
		.context = context,
		.data = 0,
	};

	return rxm_ep_generic_atomic_writemsg(rxm_ep, &msg, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t
rxm_ep_atomic_write(struct fid_ep *ep_fid, const void *buf, size_t count,
		    void *desc, fi_addr_t dest_addr, uint64_t addr,
		    uint64_t key, enum fi_datatype datatype, enum fi_op op,
		    void *context)
{
	const struct fi_ioc iov = {
		.addr = (void *) buf,
		.count = count,
	};

	return rxm_ep_atomic_writev(ep_fid, &iov, &desc, 1, dest_addr, addr,
				    key, datatype, op, context);
}

static ssize_t
rxm_ep_atomic_inject(struct fid_ep *ep_fid, const void *buf, size_t count,
		     fi_addr_t dest_addr, uint64_t addr, uint64_t key,
		     enum fi_datatype datatype, enum fi_op op)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);
	struct fi_ioc msg_iov = {
		.addr = (void *) buf,
		.count = count,
	};
	struct fi_rma_ioc rma_iov = {
		.addr = addr,
		.count = count,
		.key = key,
	};
	struct fi_msg_atomic msg = {
		.msg_iov = &msg_iov,
		.desc = NULL,
		.iov_count = 1,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.datatype = datatype,
		.op = op,
		.context = NULL,
		.data = 0,
	};


	return rxm_ep_generic_atomic_writemsg(rxm_ep, &msg, FI_INJECT);
}

static ssize_t
rxm_ep_generic_atomic_readwritemsg(struct rxm_ep *rxm_ep,
				   const struct fi_msg_atomic *msg,
				   struct fi_ioc *resultv, void **result_desc,
				   size_t result_count, uint64_t flags)
{
	int ret;
	struct rxm_conn *rxm_conn;

	ret = rxm_ep_prepare_tx(rxm_ep, msg->addr, &rxm_conn);
	if (OFI_UNLIKELY(ret))
		return ret;

	return rxm_ep_atomic_common(rxm_ep, rxm_conn, msg, NULL, NULL, 0,
				    resultv, result_desc, result_count,
				    ofi_op_atomic_fetch, flags);
}

static ssize_t
rxm_ep_atomic_readwritemsg(struct fid_ep *ep_fid,
			   const struct fi_msg_atomic *msg,
			   struct fi_ioc *resultv, void **result_desc,
			   size_t result_count, uint64_t flags)
{

	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_generic_atomic_readwritemsg(rxm_ep, msg,
			resultv, result_desc, result_count,
			flags | rxm_ep->util_ep.tx_msg_flags);
}

static ssize_t
rxm_ep_atomic_readwritev(struct fid_ep *ep_fid, const struct fi_ioc *iov,
		 void **desc, size_t count, struct fi_ioc *resultv,
		 void **result_desc, size_t result_count, fi_addr_t dest_addr,
		 uint64_t addr, uint64_t key, enum fi_datatype datatype,
		 enum fi_op op, void *context)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);
	struct fi_rma_ioc rma_iov = {
		.addr = addr,
		.count = ofi_total_ioc_cnt(iov, count),
		.key = key,
	};
	struct fi_msg_atomic msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.datatype = datatype,
		.op = op,
		.context = context,
		.data = 0,
	};

	return rxm_ep_generic_atomic_readwritemsg(rxm_ep, &msg, resultv,
			result_desc, result_count, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t
rxm_ep_atomic_readwrite(struct fid_ep *ep_fid, const void *buf, size_t count,
			void *desc, void *result, void *result_desc,
			fi_addr_t dest_addr, uint64_t addr, uint64_t key,
			enum fi_datatype datatype, enum fi_op op, void *context)
{
	struct fi_ioc iov = {
		.addr = (op == FI_ATOMIC_READ) ? NULL : (void *) buf,
		.count = count,
	};
	struct fi_ioc result_iov = {
		.addr = result,
		.count = count,
	};

	if (!buf && op != FI_ATOMIC_READ)
		return -FI_EINVAL;

	return rxm_ep_atomic_readwritev(ep_fid, &iov, &desc, 1, &result_iov,
					&result_desc, 1, dest_addr, addr, key,
					datatype, op, context);
}

static ssize_t
rxm_ep_generic_atomic_compwritemsg(struct rxm_ep *rxm_ep,
				   const struct fi_msg_atomic *msg,
				   const struct fi_ioc *comparev, void **compare_desc,
				   size_t compare_count, struct fi_ioc *resultv,
				   void **result_desc, size_t result_count,
				   uint64_t flags)
{
	int ret;
	struct rxm_conn *rxm_conn;

	ret = rxm_ep_prepare_tx(rxm_ep, msg->addr, &rxm_conn);
	if (OFI_UNLIKELY(ret))
		return ret;

	return rxm_ep_atomic_common(rxm_ep, rxm_conn, msg, comparev,
				    compare_desc, compare_count, resultv,
				    result_desc, result_count,
				    ofi_op_atomic_compare, flags);
}

static ssize_t
rxm_ep_atomic_compwritemsg(struct fid_ep *ep_fid,
			   const struct fi_msg_atomic *msg,
			   const struct fi_ioc *comparev, void **compare_desc,
			   size_t compare_count, struct fi_ioc *resultv,
			   void **result_desc, size_t result_count,
			   uint64_t flags)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);

	return rxm_ep_generic_atomic_compwritemsg(rxm_ep, msg, comparev,
				    compare_desc, compare_count, resultv,
				    result_desc, result_count,
				    flags | rxm_ep->util_ep.tx_msg_flags);
}

static ssize_t
rxm_ep_atomic_compwritev(struct fid_ep *ep_fid, const struct fi_ioc *iov,
		 void **desc, size_t count, const struct fi_ioc *comparev,
		 void **compare_desc, size_t compare_count,
		 struct fi_ioc *resultv, void **result_desc,
		 size_t result_count, fi_addr_t dest_addr, uint64_t addr,
		 uint64_t key, enum fi_datatype datatype, enum fi_op op,
		 void *context)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid.fid);
	struct fi_rma_ioc rma_iov = {
		.addr = addr,
		.count = ofi_total_ioc_cnt(iov, count),
		.key = key,
	};
	struct fi_msg_atomic msg = {
		.msg_iov = iov,
		.desc = desc,
		.iov_count = count,
		.addr = dest_addr,
		.rma_iov = &rma_iov,
		.rma_iov_count = 1,
		.datatype = datatype,
		.op = op,
		.context = context,
		.data = 0,
	};

	return rxm_ep_generic_atomic_compwritemsg(rxm_ep, &msg, comparev,
			compare_desc, compare_count, resultv, result_desc,
			result_count, rxm_ep_tx_flags(rxm_ep));
}

static ssize_t
rxm_ep_atomic_compwrite(struct fid_ep *ep_fid, const void *buf, size_t count,
			void *desc, const void *compare, void *compare_desc,
			void *result, void *result_desc, fi_addr_t dest_addr,
			uint64_t addr, uint64_t key, enum fi_datatype datatype,
			enum fi_op op, void *context)
{
	struct fi_ioc iov = {
		.addr = (void *) buf,
		.count = count,
	};
	struct fi_ioc resultv = {
		.addr = result,
		.count = count,
	};
	struct fi_ioc comparev = {
		.addr = (void *) compare,
		.count = count,
	};

	return rxm_ep_atomic_compwritev(ep_fid, &iov, &desc, 1,
					&comparev, &compare_desc, 1,
					&resultv, &result_desc, 1,
					dest_addr, addr, key,
					datatype, op, context);
}

int rxm_ep_query_atomic(struct fid_domain *domain, enum fi_datatype datatype,
			enum fi_op op, struct fi_atomic_attr *attr,
			uint64_t flags)
{
	struct rxm_domain *rxm_domain = container_of(domain,
						     struct rxm_domain,
						     util_domain.domain_fid);
	size_t tot_size;
	int ret;

	if (flags & FI_TAGGED) {
		FI_WARN(&rxm_prov, FI_LOG_EP_DATA,
			"tagged atomic op not supported\n");
		return -FI_EINVAL;
	}

	ret = ofi_atomic_valid(&rxm_prov, datatype, op, flags);
	if (ret || !attr)
		return ret;

	tot_size = flags & FI_COMPARE_ATOMIC ?
		rxm_domain->max_atomic_size / 2 : rxm_domain->max_atomic_size;
	attr->size = ofi_datatype_size(datatype);
	attr->count = tot_size / attr->size;

	return FI_SUCCESS;
}

static int rxm_ep_atomic_valid(struct fid_ep *ep_fid, enum fi_datatype datatype,
			       enum fi_op op, size_t *count)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid);
	struct fi_atomic_attr attr;
	int ret;

	ret = rxm_ep_query_atomic(&rxm_ep->util_ep.domain->domain_fid,
				  datatype, op, &attr, 0);
	if (!ret)
		*count = attr.count;

	return ret;
}

static int rxm_ep_atomic_fetch_valid(struct fid_ep *ep_fid,
				     enum fi_datatype datatype, enum fi_op op,
				     size_t *count)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid);
	struct fi_atomic_attr attr;
	int ret;

	ret = rxm_ep_query_atomic(&rxm_ep->util_ep.domain->domain_fid,
				  datatype, op, &attr, FI_FETCH_ATOMIC);
	if (!ret)
		*count = attr.count;

	return ret;
}

static int rxm_ep_atomic_cswap_valid(struct fid_ep *ep_fid,
				     enum fi_datatype datatype, enum fi_op op,
				     size_t *count)
{
	struct rxm_ep *rxm_ep = container_of(ep_fid, struct rxm_ep,
					     util_ep.ep_fid);
	struct fi_atomic_attr attr;
	int ret;

	ret = rxm_ep_query_atomic(&rxm_ep->util_ep.domain->domain_fid,
				  datatype, op, &attr, FI_COMPARE_ATOMIC);
	if (!ret)
		*count = attr.count;

	return ret;
}

struct fi_ops_atomic rxm_ops_atomic = {
	.size = sizeof(struct fi_ops_atomic),
	.write = rxm_ep_atomic_write,
	.writev = rxm_ep_atomic_writev,
	.writemsg = rxm_ep_atomic_writemsg,
	.inject = rxm_ep_atomic_inject,
	.readwrite = rxm_ep_atomic_readwrite,
	.readwritev = rxm_ep_atomic_readwritev,
	.readwritemsg = rxm_ep_atomic_readwritemsg,
	.compwrite = rxm_ep_atomic_compwrite,
	.compwritev = rxm_ep_atomic_compwritev,
	.compwritemsg = rxm_ep_atomic_compwritemsg,
	.writevalid = rxm_ep_atomic_valid,
	.readwritevalid = rxm_ep_atomic_fetch_valid,
	.compwritevalid = rxm_ep_atomic_cswap_valid,
};