Пример #1
0
static inline int rxm_finish_sar_segment_send(struct rxm_ep *rxm_ep, struct rxm_tx_sar_buf *tx_buf)
{
	int ret = FI_SUCCESS;
	struct rxm_tx_sar_buf *first_tx_buf;

	switch (rxm_sar_get_seg_type(&tx_buf->pkt.ctrl_hdr)) {
	case RXM_SAR_SEG_FIRST:
		break;
	case RXM_SAR_SEG_MIDDLE:
		ofi_buf_free(tx_buf);
		break;
	case RXM_SAR_SEG_LAST:
		ret = rxm_cq_tx_comp_write(rxm_ep, ofi_tx_cq_flags(tx_buf->pkt.hdr.op),
					   tx_buf->app_context, tx_buf->flags);

		assert(ofi_tx_cq_flags(tx_buf->pkt.hdr.op) & FI_SEND);
		ofi_ep_tx_cntr_inc(&rxm_ep->util_ep);
		first_tx_buf = ofi_bufpool_get_ibuf(rxm_ep->
					buf_pools[RXM_BUF_POOL_TX_SAR].pool,
					tx_buf->pkt.ctrl_hdr.msg_id);
		ofi_buf_free(first_tx_buf);
		ofi_buf_free(tx_buf);
		break;
	}

	return ret;
}
Пример #2
0
static ssize_t
mrail_tsend_common(struct fid_ep *ep_fid, const struct iovec *iov, void **desc,
		   size_t count, size_t len, fi_addr_t dest_addr, uint64_t tag,
		   uint64_t data, void *context, uint64_t flags)
{
	struct mrail_ep *mrail_ep = container_of(ep_fid, struct mrail_ep,
						 util_ep.ep_fid.fid);
	struct mrail_peer_info *peer_info;
	struct iovec *iov_dest = alloca(sizeof(*iov_dest) * (count + 1));
	struct mrail_tx_buf *tx_buf;
	uint32_t i = mrail_get_tx_rail(mrail_ep);
	struct fi_msg msg;
	ssize_t ret;

	peer_info = ofi_av_get_addr(mrail_ep->util_ep.av, (int) dest_addr);

	ofi_ep_lock_acquire(&mrail_ep->util_ep);

	tx_buf = mrail_get_tx_buf(mrail_ep, context, peer_info->seq_no++,
				  ofi_op_tagged, flags | FI_TAGGED);
	if (OFI_UNLIKELY(!tx_buf)) {
		ret = -FI_ENOMEM;
		goto err1;
	}
	tx_buf->hdr.tag = tag;
	mrail_copy_iov_hdr(&tx_buf->hdr, iov_dest, iov, count);

	msg.msg_iov 	= iov_dest;
	msg.desc    	= desc;
	msg.iov_count	= count + 1;
	msg.addr	= dest_addr;
	msg.context	= tx_buf;
	msg.data	= data;

	if (len < mrail_ep->rails[i].info->tx_attr->inject_size)
		flags |= FI_INJECT;

	FI_DBG(&mrail_prov, FI_LOG_EP_DATA, "Posting tsend of length: %" PRIu64
	       " dest_addr: 0x%" PRIx64 " tag: 0x%" PRIx64 " seq: %d"
	       " on rail: %d\n", len, dest_addr, tag, peer_info->seq_no - 1, i);

	ret = fi_sendmsg(mrail_ep->rails[i].ep, &msg, flags);
	if (ret) {
		FI_WARN(&mrail_prov, FI_LOG_EP_DATA,
			"Unable to fi_sendmsg on rail: %" PRIu32 "\n", i);
		goto err2;
	} else if (!(flags & FI_COMPLETION)) {
		ofi_ep_tx_cntr_inc(&mrail_ep->util_ep);
	}
	ofi_ep_lock_release(&mrail_ep->util_ep);
	return ret;
err2:
	util_buf_release(mrail_ep->tx_buf_pool, tx_buf);
err1:
	peer_info->seq_no--;
	ofi_ep_lock_release(&mrail_ep->util_ep);
	return ret;
}
Пример #3
0
static inline int rxm_finish_eager_send(struct rxm_ep *rxm_ep, struct rxm_tx_eager_buf *tx_buf)
{
	int ret = rxm_cq_tx_comp_write(rxm_ep, ofi_tx_cq_flags(tx_buf->pkt.hdr.op),
				       tx_buf->app_context, tx_buf->flags);

	assert(ofi_tx_cq_flags(tx_buf->pkt.hdr.op) & FI_SEND);
	ofi_ep_tx_cntr_inc(&rxm_ep->util_ep);

	return ret;
}
Пример #4
0
static int rxm_rndv_tx_finish(struct rxm_ep *rxm_ep, struct rxm_tx_rndv_buf *tx_buf)
{
	int ret;

	RXM_LOG_STATE_TX(FI_LOG_CQ, tx_buf, RXM_RNDV_FINISH);
	tx_buf->hdr.state = RXM_RNDV_FINISH;
	tx_buf->conn->rndv_tx_credits++;

	if (!rxm_ep->rxm_mr_local)
		rxm_ep_msg_mr_closev(tx_buf->mr, tx_buf->count);

	ret = rxm_cq_tx_comp_write(rxm_ep, ofi_tx_cq_flags(tx_buf->pkt.hdr.op),
				   tx_buf->app_context, tx_buf->flags);

	assert(ofi_tx_cq_flags(tx_buf->pkt.hdr.op) & FI_SEND);
	ofi_ep_tx_cntr_inc(&rxm_ep->util_ep);

	ofi_buf_free(tx_buf);

	return ret;
}