Пример #1
0
/*
 * Handle a receive on a queue servicing a message endpoint
 */
static inline void
usdf_msg_handle_recv(struct usdf_domain *udp, struct usd_completion *comp)
{
	struct rudp_pkt *pkt;
	struct usdf_msg_qe *rqe;
	struct usdf_ep *ep;
	struct usd_qp *qp;
	struct usdf_rx *rx;
	uint32_t peer_id;
	uint32_t opcode;
	uint8_t *rx_ptr;
	uint8_t *rqe_ptr;
	size_t cur_iov;
	size_t iov_resid;
	size_t ms_resid;
	size_t rxlen;
	size_t copylen;
	int ret;

	pkt = comp->uc_context;
	opcode = ntohs(pkt->msg.opcode);
	peer_id = ntohs(pkt->msg.src_peer_id);
	if (peer_id > USDF_MAX_PEERS) {
		qp = comp->uc_qp;
		rx = qp->uq_context;
		goto dropit;
	}
	ep = udp->dom_peer_tab[peer_id];
	if (ep == NULL) {
		qp = comp->uc_qp;
		rx = qp->uq_context;
		goto dropit;
	}
	rx = ep->ep_rx;

	if (comp->uc_status != USD_COMPSTAT_SUCCESS)
		goto dropit;

	switch (opcode) {
	case RUDP_OP_ACK:
		usdf_msg_rx_ack(ep, pkt);
		goto dropit;
	case RUDP_OP_NAK:
		usdf_msg_rx_nak(ep, pkt);
		goto dropit;
	case RUDP_OP_FIRST:
	case RUDP_OP_LAST:
		break;
	default:
		USDF_DBG_SYS(EP_DATA,
				"encountered unexpected opcode %" PRIu32 "\n",
				opcode);
		goto dropit;
	}

	ret = usdf_msg_check_seq(ep, pkt);
	if (ret == -1) {
		goto dropit;
	}

	rqe = ep->e.msg.ep_cur_recv;
	if (rqe == NULL) {
		if (TAILQ_EMPTY(&rx->r.msg.rx_posted_rqe)) {
			goto dropit;
		}
		rqe = TAILQ_FIRST(&rx->r.msg.rx_posted_rqe);
		TAILQ_REMOVE(&rx->r.msg.rx_posted_rqe, rqe, ms_link);
		ep->e.msg.ep_cur_recv = rqe;
	}

	rx_ptr = (uint8_t *)(pkt + 1);
	rxlen = ntohs(pkt->msg.m.rc_data.length);
	rqe->ms_length += rxlen;
	rqe_ptr = (uint8_t *)rqe->ms_cur_ptr;
	iov_resid = rqe->ms_iov_resid;
	cur_iov = rqe->ms_cur_iov;
	ms_resid = rqe->ms_resid;
	while (rxlen > 0) {
		copylen = MIN(rxlen, iov_resid);
		memcpy(rqe_ptr, rx_ptr, copylen);
		rx_ptr += copylen;
		rxlen -= copylen;
		iov_resid -= copylen;
		ms_resid -= copylen;
		if (iov_resid == 0) {
			if (cur_iov == rqe->ms_last_iov) {
				break;
			}
			++cur_iov;
			rqe_ptr = rqe->ms_iov[cur_iov].iov_base;
			iov_resid = rqe->ms_iov[cur_iov].iov_len;
		} else {
			rqe_ptr += copylen;
		}
	}

	if (opcode & RUDP_OP_LAST) {
		/*
		* Normally we need to store back the updated values of
		* ms_resid, ms_cur_iov, ms_cur_ptr and ms_iov_resid. But
		* being the last step of the process, updating these
		* values are not necessary
		*/
		if (rxlen > 0) {
			USDF_DBG_SYS(EP_DATA, "message truncated by %zu bytes",
					rxlen);
			rqe->ms_length -= rxlen;
			usdf_msg_recv_complete(ep, rqe, FI_ETRUNC);
		} else {
			usdf_msg_recv_complete(ep, rqe, FI_SUCCESS);
		}

		ep->e.msg.ep_cur_recv = NULL;
	} else {
		rqe->ms_cur_ptr = rqe_ptr;
		rqe->ms_iov_resid = iov_resid;
		rqe->ms_cur_iov = cur_iov;
		rqe->ms_resid = ms_resid;
	}

dropit:
	/* repost buffer */
	_usdf_msg_post_recv(rx, pkt,
			rx->rx_domain->dom_fabric->fab_dev_attrs->uda_mtu);
}
Пример #2
0
/*
 * Handle a receive on a queue servicing a message endpoint
 */
static inline void
usdf_msg_handle_recv(struct usdf_domain *udp, struct usd_completion *comp)
{
	struct rudp_pkt *pkt;
	struct usdf_msg_qe *rqe;
	struct usdf_ep *ep;
	struct usd_qp *qp;
	struct usdf_rx *rx;
	uint32_t peer_id;
	uint32_t opcode;
	uint8_t *rx_ptr;
	uint8_t *rqe_ptr;
	size_t cur_iov;
	size_t iov_resid;
	size_t rxlen;
	size_t copylen;
	int ret;

	pkt = comp->uc_context;
	opcode = ntohs(pkt->msg.opcode);
	peer_id = ntohs(pkt->msg.src_peer_id);
	if (peer_id > USDF_MAX_PEERS) {
		qp = comp->uc_qp;
		rx = qp->uq_context;
		goto dropit;
	}
	ep = udp->dom_peer_tab[peer_id];
	if (ep == NULL) {
		qp = comp->uc_qp;
		rx = qp->uq_context;
		goto dropit;
	}
	rx = ep->ep_rx;

	switch (opcode) {
	case RUDP_OP_ACK:
		usdf_msg_rx_ack(ep, pkt);
		break;

	case RUDP_OP_NAK:
		usdf_msg_rx_nak(ep, pkt);
		break;

	case RUDP_OP_FIRST:
		ret = usdf_msg_check_seq(ep, pkt);
		if (ret == -1) {
			goto dropit;
		}

		rqe = ep->e.msg.ep_cur_recv;
		if (rqe == NULL) {
			if (TAILQ_EMPTY(&rx->r.msg.rx_posted_rqe)) {
				goto dropit;
			}
			rqe = TAILQ_FIRST(&rx->r.msg.rx_posted_rqe);
			TAILQ_REMOVE(&rx->r.msg.rx_posted_rqe, rqe, ms_link);
			ep->e.msg.ep_cur_recv = rqe;
		}

		rx_ptr = (uint8_t *)(pkt + 1);
		rxlen = ntohs(pkt->msg.m.rc_data.length);
		rqe->ms_length += rxlen;
		rqe_ptr = (uint8_t *)rqe->ms_cur_ptr;
		iov_resid = rqe->ms_iov_resid;
		cur_iov = rqe->ms_cur_iov;
		while (rxlen > 0) {
			copylen = MIN(rxlen, iov_resid);
			memcpy(rqe_ptr, rx_ptr, copylen);
			rx_ptr += copylen;
			rxlen -= copylen;
			iov_resid -= copylen;
			if (iov_resid == 0) {
				if (cur_iov == rqe->ms_last_iov) {
					break;
				}
				++cur_iov;
				rqe_ptr = rqe->ms_iov[cur_iov].iov_base;
				iov_resid = rqe->ms_iov[cur_iov].iov_len;
			} else {
				rqe_ptr += copylen;
			}
		}
		break;

	case RUDP_OP_LAST:
		ret = usdf_msg_check_seq(ep, pkt);
		if (ret == -1) {
			goto dropit;
		}

		rqe = ep->e.msg.ep_cur_recv;
		if (rqe == NULL) {
			rqe = TAILQ_FIRST(&rx->r.msg.rx_posted_rqe);
			if (rqe == NULL) {
				goto dropit;
			}
			TAILQ_REMOVE(&rx->r.msg.rx_posted_rqe, rqe, ms_link);
			ep->e.msg.ep_cur_recv = rqe;
		}

		rx_ptr = (uint8_t *)(pkt + 1);
		rxlen = ntohs(pkt->msg.m.rc_data.length);
		rqe->ms_length += rxlen;
		rqe_ptr = (uint8_t *)rqe->ms_cur_ptr;
		iov_resid = rqe->ms_iov_resid;
		cur_iov = rqe->ms_cur_iov;
		while (rxlen > 0) {
			copylen = MIN(rxlen, iov_resid);
			memcpy(rqe_ptr, rx_ptr, copylen);
			rx_ptr += copylen;
			rxlen -= copylen;
			iov_resid -= copylen;
			if (iov_resid == 0) {
				if (cur_iov == rqe->ms_last_iov) {
					break;
				}
				++cur_iov;
				rqe_ptr = rqe->ms_iov[cur_iov].iov_base;
				iov_resid = rqe->ms_iov[cur_iov].iov_len;
			} else {
				rqe_ptr += copylen;
			}
		}
		if (rxlen > 0) {
			rqe->ms_length -= rxlen;
/* printf("RQE truncated XXX\n"); */
		} else {
			usdf_msg_recv_complete(ep, rqe);
		}
		break;
	default:
		break;
	}

dropit:
	/* repost buffer */
	_usdf_msg_post_recv(rx, pkt,
			rx->rx_domain->dom_fabric->fab_dev_attrs->uda_mtu);
}