Exemplo n.º 1
0
ssize_t udpx_send(struct fid_ep *ep_fid, const void *buf, size_t len, void *desc,
		fi_addr_t dest_addr, void *context)
{
	struct udpx_ep *ep;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	fastlock_acquire(&ep->util_ep.tx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->util_ep.tx_cq->cirq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	ret = sendto(ep->sock, buf, len, 0,
		     ip_av_get_addr(ep->util_ep.av, dest_addr),
		     ep->util_ep.av->addrlen);
	if (ret == len) {
		ep->tx_comp(ep, context);
		ret = 0;
	} else {
		ret = -errno;
	}
out:
	fastlock_release(&ep->util_ep.tx_cq->cq_lock);
	return ret;
}
Exemplo n.º 2
0
ssize_t udpx_sendmsg(struct fid_ep *ep_fid, const struct fi_msg *msg,
		uint64_t flags)
{
	struct udpx_ep *ep;
	struct msghdr hdr;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	hdr.msg_name = ip_av_get_addr(ep->util_ep.av, msg->addr);
	hdr.msg_namelen = ep->util_ep.av->addrlen;
	hdr.msg_iov = (struct iovec *) msg->msg_iov;
	hdr.msg_iovlen = msg->iov_count;
	hdr.msg_control = NULL;
	hdr.msg_controllen = 0;
	hdr.msg_flags = 0;

	fastlock_acquire(&ep->util_ep.tx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->util_ep.tx_cq->cirq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	ret = sendmsg(ep->sock, &hdr, 0);
	if (ret >= 0) {
		ep->tx_comp(ep, msg->context);
		ret = 0;
	} else {
		ret = -errno;
	}
out:
	fastlock_release(&ep->util_ep.tx_cq->cq_lock);
	return ret;
}
Exemplo n.º 3
0
ssize_t udpx_recvmsg(struct fid_ep *ep_fid, const struct fi_msg *msg,
		uint64_t flags)
{
	struct udpx_ep *ep;
	struct udpx_ep_entry *entry;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	fastlock_acquire(&ep->util_ep.rx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->rxq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	entry = ofi_cirque_tail(ep->rxq);
	entry->context = msg->context;
	for (entry->iov_count = 0; entry->iov_count < msg->iov_count;
	     entry->iov_count++) {
		entry->iov[entry->iov_count] = msg->msg_iov[entry->iov_count];
	}
	entry->flags = 0;

	ofi_cirque_commit(ep->rxq);
	ret = 0;
out:
	fastlock_release(&ep->util_ep.rx_cq->cq_lock);
	return ret;
}
Exemplo n.º 4
0
ssize_t udpx_recv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc,
		fi_addr_t src_addr, void *context)
{
	struct udpx_ep *ep;
	struct udpx_ep_entry *entry;
	ssize_t ret;

	ep = container_of(ep_fid, struct udpx_ep, util_ep.ep_fid.fid);
	fastlock_acquire(&ep->util_ep.rx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->rxq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	entry = ofi_cirque_tail(ep->rxq);
	entry->context = context;
	entry->iov_count = 1;
	entry->iov[0].iov_base = buf;
	entry->iov[0].iov_len = len;
	entry->flags = 0;

	ofi_cirque_commit(ep->rxq);
	ret = 0;
out:
	fastlock_release(&ep->util_ep.rx_cq->cq_lock);
	return ret;
}
Exemplo n.º 5
0
static int rxm_check_unexp_msg_list(struct util_cq *util_cq, struct rxm_recv_queue *recv_queue,
		struct rxm_recv_entry *recv_entry, dlist_func_t *match)
{
	struct dlist_entry *entry;
	struct rxm_unexp_msg *unexp_msg;
	struct rxm_recv_match_attr match_attr;
	struct rxm_rx_buf *rx_buf;
	int ret = 0;

	fastlock_acquire(&util_cq->cq_lock);
	if (ofi_cirque_isfull(util_cq->cirq)) {
		ret = -FI_EAGAIN;
		goto out;
	}

	match_attr.addr = recv_entry->addr;
	match_attr.tag = recv_entry->tag;
	match_attr.ignore = recv_entry->ignore;

	entry = dlist_remove_first_match(&recv_queue->unexp_msg_list, match, &match_attr);
	if (!entry)
		goto out;
	FI_DBG(&rxm_prov, FI_LOG_EP_DATA, "Match for posted recv found in unexp msg list\n");

	unexp_msg = container_of(entry, struct rxm_unexp_msg, entry);
	rx_buf = container_of(unexp_msg, struct rxm_rx_buf, unexp_msg);
	rx_buf->recv_entry = recv_entry;

	ret = rxm_cq_handle_data(rx_buf);
	free(unexp_msg);
out:
	fastlock_release(&util_cq->cq_lock);
	return ret;
}
Exemplo n.º 6
0
static int rxd_cq_write_ctx(struct rxd_cq *cq,
			     struct fi_cq_tagged_entry *cq_entry)
{
	struct fi_cq_tagged_entry *comp;
	if (ofi_cirque_isfull(cq->util_cq.cirq))
		return -FI_ENOSPC;

	comp = ofi_cirque_tail(cq->util_cq.cirq);
	comp->op_context = cq_entry->op_context;
	ofi_cirque_commit(cq->util_cq.cirq);
	return 0;
}
Exemplo n.º 7
0
static int rxd_cq_write_tagged(struct rxd_cq *cq,
				struct fi_cq_tagged_entry *cq_entry)
{
	struct fi_cq_tagged_entry *comp;
	if (ofi_cirque_isfull(cq->util_cq.cirq))
		return -FI_ENOSPC;

	FI_DBG(&rxd_prov, FI_LOG_EP_CTRL,
		"report completion: %p\n", cq_entry->tag);

	comp = ofi_cirque_tail(cq->util_cq.cirq);
	*comp = *cq_entry;
	ofi_cirque_commit(cq->util_cq.cirq);
	return 0;
}
Exemplo n.º 8
0
static int rxd_cq_write_data(struct rxd_cq *cq,
			      struct fi_cq_tagged_entry *cq_entry)
{
	struct fi_cq_tagged_entry *comp;
	if (ofi_cirque_isfull(cq->util_cq.cirq))
		return -FI_ENOSPC;

	comp = ofi_cirque_tail(cq->util_cq.cirq);
	comp->op_context = cq_entry->op_context;
	comp->flags = cq_entry->flags;
	comp->len = cq_entry->len;
	comp->buf = cq_entry->buf;
	comp->data = cq_entry->data;
	ofi_cirque_commit(cq->util_cq.cirq);
	return 0;
}
Exemplo n.º 9
0
static ssize_t rxd_generic_write_inject(struct rxd_ep *rxd_ep,
		const struct iovec *iov, size_t iov_count,
		const struct fi_rma_iov *rma_iov, size_t rma_count,
		fi_addr_t addr, void *context, uint32_t op, uint64_t data,
		uint32_t rxd_flags)
{
	struct rxd_x_entry *tx_entry;
	fi_addr_t rxd_addr;
	ssize_t ret = -FI_EAGAIN;

	assert(iov_count <= RXD_IOV_LIMIT && rma_count <= RXD_IOV_LIMIT);
	assert(ofi_total_iov_len(iov, iov_count) <= rxd_ep_domain(rxd_ep)->max_inline_rma);

	fastlock_acquire(&rxd_ep->util_ep.lock);
	fastlock_acquire(&rxd_ep->util_ep.tx_cq->cq_lock);

	if (ofi_cirque_isfull(rxd_ep->util_ep.tx_cq->cirq))
		goto out;

	rxd_addr = rxd_ep_av(rxd_ep)->fi_addr_table[addr];
	ret = rxd_send_rts_if_needed(rxd_ep, rxd_addr);
	if (ret)
		goto out;

	tx_entry = rxd_tx_entry_init(rxd_ep, iov, iov_count, NULL, 0, rma_count, data,
				     0, context, rxd_addr, op, rxd_flags);
	if (!tx_entry)
		goto out;

	ret = rxd_ep_send_op(rxd_ep, tx_entry, rma_iov, rma_count, NULL, 0, 0, 0);
	if (ret) {
		rxd_tx_entry_free(rxd_ep, tx_entry);
		goto out;
	}

	if (tx_entry->op == RXD_READ_REQ)
		goto out;

	ret = 0;

out:
	fastlock_release(&rxd_ep->util_ep.tx_cq->cq_lock);
	fastlock_release(&rxd_ep->util_ep.lock);
	return ret;
}
Exemplo n.º 10
0
static void smr_post_fetch_resp(struct smr_ep *ep, struct smr_cmd *cmd,
				const struct iovec *result_iov, size_t count)
{
	struct smr_cmd *pend;
	struct smr_resp *resp;

	assert(!ofi_cirque_isfull(smr_resp_queue(ep->region)));
	resp = ofi_cirque_tail(smr_resp_queue(ep->region));

	cmd->msg.hdr.data = (uint64_t) ((char **) resp -
			    (char **) ep->region);

	pend = freestack_pop(ep->pend_fs);
	smr_post_pend_resp(cmd, pend, resp);
	memcpy(pend->msg.data.iov, result_iov,
	       sizeof(*result_iov) * count);
	pend->msg.data.iov_count = count;

	ofi_cirque_commit(smr_resp_queue(ep->region));
}
Exemplo n.º 11
0
ssize_t rxd_generic_rma(struct rxd_ep *rxd_ep, const struct iovec *iov,
	size_t iov_count, const struct fi_rma_iov *rma_iov, size_t rma_count,
	void **desc, fi_addr_t addr, void *context, uint32_t op, uint64_t data,
	uint32_t rxd_flags)
{
	struct rxd_x_entry *tx_entry;
	fi_addr_t rxd_addr;
	ssize_t ret = -FI_EAGAIN;

	if (rxd_flags & RXD_INJECT)
		return rxd_generic_write_inject(rxd_ep, iov, iov_count, rma_iov,
						rma_count, addr, context, op,
						data, rxd_flags);

	assert(iov_count <= RXD_IOV_LIMIT && rma_count <= RXD_IOV_LIMIT);

	fastlock_acquire(&rxd_ep->util_ep.lock);
	fastlock_acquire(&rxd_ep->util_ep.tx_cq->cq_lock);

	if (ofi_cirque_isfull(rxd_ep->util_ep.tx_cq->cirq))
		goto out;

	rxd_addr = rxd_ep_av(rxd_ep)->fi_addr_table[addr];
	ret = rxd_send_rts_if_needed(rxd_ep, rxd_addr);
	if (ret)
		goto out;

	tx_entry = rxd_tx_entry_init(rxd_ep, iov, iov_count, NULL, 0, rma_count,
				     data, 0, context, rxd_addr, op, rxd_flags);
	if (!tx_entry)
		goto out;

	ret = rxd_ep_send_op(rxd_ep, tx_entry, rma_iov, rma_count, NULL, 0, 0, 0);
	if (ret)
		rxd_tx_entry_free(rxd_ep, tx_entry);

out:
	fastlock_release(&rxd_ep->util_ep.tx_cq->cq_lock);
	fastlock_release(&rxd_ep->util_ep.lock);
	return ret;
}
Exemplo n.º 12
0
struct tcpx_xfer_entry *tcpx_xfer_entry_alloc(struct tcpx_cq *tcpx_cq,
					      enum tcpx_xfer_op_codes type)
{
	struct tcpx_xfer_entry *xfer_entry;

	tcpx_cq->util_cq.cq_fastlock_acquire(&tcpx_cq->util_cq.cq_lock);

	/* optimization: don't allocate queue_entry when cq is full */
	if (ofi_cirque_isfull(tcpx_cq->util_cq.cirq)) {
		tcpx_cq->util_cq.cq_fastlock_release(&tcpx_cq->util_cq.cq_lock);
		return NULL;
	}

	xfer_entry = ofi_buf_alloc(tcpx_cq->buf_pools[type].pool);
	if (!xfer_entry) {
		tcpx_cq->util_cq.cq_fastlock_release(&tcpx_cq->util_cq.cq_lock);
		FI_INFO(&tcpx_prov, FI_LOG_DOMAIN,"failed to get buffer\n");
		return NULL;
	}
	tcpx_cq->util_cq.cq_fastlock_release(&tcpx_cq->util_cq.cq_lock);
	return xfer_entry;
}
Exemplo n.º 13
0
static ssize_t smr_generic_atomic(struct fid_ep *ep_fid,
			const struct fi_ioc *ioc, void **desc, size_t count,
			const struct fi_ioc *compare_ioc, void **compare_desc,
			size_t compare_count, struct fi_ioc *result_ioc,
			void **result_desc, size_t result_count,
			fi_addr_t addr, const struct fi_rma_ioc *rma_ioc,
			size_t rma_count, enum fi_datatype datatype,
			enum fi_op atomic_op, void *context, uint32_t op)
{
	struct smr_ep *ep;
	struct smr_domain *domain;
	struct smr_region *peer_smr;
	struct smr_inject_buf *tx_buf;
	struct smr_cmd *cmd;
	struct iovec iov[SMR_IOV_LIMIT];
	struct iovec compare_iov[SMR_IOV_LIMIT];
	struct iovec result_iov[SMR_IOV_LIMIT];
	int peer_id, err = 0;
	uint16_t flags = 0;
	ssize_t ret = 0;
	size_t msg_len, total_len;

	assert(count <= SMR_IOV_LIMIT);
	assert(result_count <= SMR_IOV_LIMIT);
	assert(compare_count <= SMR_IOV_LIMIT);
	assert(rma_count <= SMR_IOV_LIMIT);

	ep = container_of(ep_fid, struct smr_ep, util_ep.ep_fid.fid);
	domain = container_of(ep->util_ep.domain, struct smr_domain, util_domain);

	peer_id = (int) addr;
	ret = smr_verify_peer(ep, peer_id);
	if(ret)
		return ret;

	peer_smr = smr_peer_region(ep->region, peer_id);
	fastlock_acquire(&peer_smr->lock);
	if (peer_smr->cmd_cnt < 2) {
		ret = -FI_EAGAIN;
		goto unlock_region;
	}

	fastlock_acquire(&ep->util_ep.tx_cq->cq_lock);
	if (ofi_cirque_isfull(ep->util_ep.tx_cq->cirq)) {
		ret = -FI_EAGAIN;
		goto unlock_cq;
	}

	cmd = ofi_cirque_tail(smr_cmd_queue(peer_smr));
	msg_len = total_len = ofi_datatype_size(datatype) *
			      ofi_total_ioc_cnt(ioc, count);
	
	switch (op) {
	case ofi_op_atomic_compare:
		assert(compare_ioc);
		ofi_ioc_to_iov(compare_ioc, compare_iov, compare_count,
			       ofi_datatype_size(datatype));
		total_len *= 2;
		/* fall through */
	case ofi_op_atomic_fetch:
		assert(result_ioc);
		ofi_ioc_to_iov(result_ioc, result_iov, result_count,
			       ofi_datatype_size(datatype));
		if (!domain->fast_rma)
			flags |= SMR_RMA_REQ;
		/* fall through */
	case ofi_op_atomic:
		if (atomic_op != FI_ATOMIC_READ) {
			assert(ioc);
			ofi_ioc_to_iov(ioc, iov, count, ofi_datatype_size(datatype));
		} else {
			count = 0;
		}
		break;
	default:
		break;
	}

	if (total_len <= SMR_MSG_DATA_LEN && !(flags & SMR_RMA_REQ)) {
		smr_format_inline_atomic(cmd, smr_peer_addr(ep->region)[peer_id].addr,
					 iov, count, compare_iov, compare_count,
					 op, datatype, atomic_op);
	} else if (total_len <= SMR_INJECT_SIZE) {
		tx_buf = smr_freestack_pop(smr_inject_pool(peer_smr));
		smr_format_inject_atomic(cmd, smr_peer_addr(ep->region)[peer_id].addr,
					 iov, count, result_iov, result_count,
					 compare_iov, compare_count, op, datatype,
					 atomic_op, peer_smr, tx_buf);
	} else {
		FI_WARN(&smr_prov, FI_LOG_EP_CTRL,
			"message too large\n");
		ret = -FI_EINVAL;
		goto unlock_cq;
	}
	cmd->msg.hdr.op_flags |= flags;

	ofi_cirque_commit(smr_cmd_queue(peer_smr));
	peer_smr->cmd_cnt--;

	if (op != ofi_op_atomic) {
		if (flags & SMR_RMA_REQ) {
			smr_post_fetch_resp(ep, cmd,
				(const struct iovec *) result_iov,
				result_count);
			goto format_rma;
		}
		err = smr_fetch_result(ep, peer_smr, result_iov, result_count,
				       rma_ioc, rma_count, datatype, msg_len);
		if (err)
			FI_WARN(&smr_prov, FI_LOG_EP_CTRL,
				"unable to fetch results");
	}

	ret = ep->tx_comp(ep, context, ofi_tx_cq_flags(op), err);
	if (ret) {
		FI_WARN(&smr_prov, FI_LOG_EP_CTRL,
			"unable to process tx completion\n");
	}

format_rma:
	cmd = ofi_cirque_tail(smr_cmd_queue(peer_smr));
	smr_format_rma_ioc(cmd, rma_ioc, rma_count);
	ofi_cirque_commit(smr_cmd_queue(peer_smr));
	peer_smr->cmd_cnt--;
unlock_cq:
	fastlock_release(&ep->util_ep.tx_cq->cq_lock);
unlock_region:
	fastlock_release(&peer_smr->lock);
	return ret;
}