Esempio n. 1
0
static int __gnix_rma_post_err(struct gnix_tx_descriptor *txd)
{
	struct gnix_fab_req *req = txd->req;
	int rc;

	req->tx_failures++;
	if (req->tx_failures < req->gnix_ep->domain->params.max_retransmits) {
		_gnix_nic_tx_free(req->gnix_ep->nic, txd);

		GNIX_INFO(FI_LOG_EP_DATA,
			  "Requeueing failed request: %p\n", req);
		return _gnix_vc_queue_work_req(req);
	}

	GNIX_INFO(FI_LOG_EP_DATA, "Failed %d transmits: %p\n",
		  req->tx_failures, req);
	rc = __gnix_rma_send_err(req->vc->ep, req);
	if (rc != FI_SUCCESS)
		GNIX_WARN(FI_LOG_EP_DATA,
			  "__gnix_rma_send_err() failed: %d\n",
			  rc);

	__gnix_rma_fr_complete(req, txd);
	return FI_SUCCESS;
}
Esempio n. 2
0
static int __gnix_amo_txd_complete(void *arg, gni_return_t tx_status)
{
	struct gnix_tx_descriptor *txd = (struct gnix_tx_descriptor *)arg;
	struct gnix_fab_req *req = txd->req;
	int rc = FI_SUCCESS;

	_gnix_nic_tx_free(req->vc->ep->nic, txd);

	if (tx_status != GNI_RC_SUCCESS) {
		return __gnix_amo_post_err(req, FI_ECANCELED);
	}

	if (req->vc->peer_caps & FI_RMA_EVENT) {
		/* control message needed for a counter event. */
		req->work_fn = __gnix_amo_send_cntr_req;
		_gnix_vc_queue_work_req(req);
	} else {
		/* complete request */
		rc = __gnix_amo_send_completion(req->vc->ep, req);
		if (rc != FI_SUCCESS)
			GNIX_WARN(FI_LOG_EP_DATA,
				  "__gnix_amo_send_completion() failed: %d\n",
				  rc);

		__gnix_amo_fr_complete(req);
	}

	return FI_SUCCESS;
}
Esempio n. 3
0
static int __gnix_rndzv_req_complete(void *arg, gni_return_t tx_status)
{
	struct gnix_tx_descriptor *txd = (struct gnix_tx_descriptor *)arg;
	struct gnix_fab_req *req = txd->req;
	int ret;

	if (req->msg.recv_flags & GNIX_MSG_GET_TAIL) {
		/* There are two TXDs involved with this request, an RDMA
		 * transfer to move the middle block and an FMA transfer to
		 * move unaligned tail data.  If this is the FMA TXD, store the
		 * unaligned bytes.  Bytes are copied from the request to the
		 * user buffer once both TXDs arrive. */
		if (txd->gni_desc.type == GNI_POST_FMA_GET)
			req->msg.rndzv_tail = *(uint32_t *)txd->int_buf;

		/* Remember any failure.  Retransmit both TXDs once both are
		 * complete. */
		req->msg.status |= tx_status;

		atomic_dec(&req->msg.outstanding_txds);
		if (atomic_get(&req->msg.outstanding_txds)) {
			_gnix_nic_tx_free(req->gnix_ep->nic, txd);
			GNIX_INFO(FI_LOG_EP_DATA,
				  "Received first RDMA chain TXD, req: %p\n",
				  req);
			return FI_SUCCESS;
		}

		tx_status = req->msg.status;
	}

	_gnix_nic_tx_free(req->gnix_ep->nic, txd);

	if (tx_status != GNI_RC_SUCCESS) {
		req->tx_failures++;
		if (req->tx_failures <
		    req->gnix_ep->domain->params.max_retransmits) {

			GNIX_INFO(FI_LOG_EP_DATA,
				  "Requeueing failed request: %p\n", req);
			return _gnix_vc_queue_work_req(req);
		}

		/* TODO should this be fatal? A request will sit waiting at the
		 * peer. */
		return __gnix_msg_recv_err(req->gnix_ep, req);
	}

	__gnix_msg_copy_unaligned_get_data(req);

	GNIX_INFO(FI_LOG_EP_DATA, "Completed RNDZV GET, req: %p\n", req);

	if (req->msg.recv_flags & FI_LOCAL_MR) {
		GNIX_INFO(FI_LOG_EP_DATA, "freeing auto-reg MR: %p\n",
			  req->msg.recv_md);
		fi_close(&req->msg.recv_md->mr_fid.fid);
	}

	req->work_fn = __gnix_rndzv_req_send_fin;
	ret = _gnix_vc_queue_work_req(req);

	return ret;
}
Esempio n. 4
0
static int __gnix_rma_txd_complete(void *arg, gni_return_t tx_status)
{
	struct gnix_tx_descriptor *txd = (struct gnix_tx_descriptor *)arg;
	struct gnix_fab_req *req = txd->req;
	int rc = FI_SUCCESS;

	/* Wait for both TXDs before processing RDMA chained requests. */
	if (req->flags & GNIX_RMA_CHAINED && req->flags & GNIX_RMA_RDMA) {
		/* There are two TXDs involved with this request, an RDMA
		 * transfer to move the middle block and an FMA transfer to
		 * move unaligned head and/or tail.  If this is the FMA TXD,
		 * copy the unaligned data to the user buffer. */
		if (txd->gni_desc.type == GNI_POST_FMA_GET)
			__gnix_rma_copy_chained_get_data(txd);

		/* Remember any failure.  Retransmit both TXDs once both are
		 * complete. */
		req->rma.status |= tx_status;

		atomic_dec(&req->rma.outstanding_txds);
		if (atomic_get(&req->rma.outstanding_txds)) {
			_gnix_nic_tx_free(req->gnix_ep->nic, txd);
			GNIX_INFO(FI_LOG_EP_DATA,
				  "Received first RDMA chain TXD, req: %p\n",
				  req);
			return FI_SUCCESS;
		}

		tx_status = req->rma.status;
	}

	if (tx_status != GNI_RC_SUCCESS) {
		return __gnix_rma_post_err(txd);
	}

	/* Successful delivery.  Progress request. */
	if (req->flags & FI_REMOTE_CQ_DATA) {
		/* initiate immediate data transfer */
		req->tx_failures = 0;
		req->work_fn = __gnix_rma_send_data_req;
		_gnix_vc_queue_work_req(req);
	} else {
		if (req->flags & GNIX_RMA_INDIRECT) {
			__gnix_rma_copy_indirect_get_data(txd);
		} else if (req->flags & GNIX_RMA_CHAINED &&
			   !(req->flags & GNIX_RMA_RDMA)) {
			__gnix_rma_copy_chained_get_data(txd);
		}

		/* complete request */
		rc = __gnix_rma_send_completion(req->vc->ep, req);
		if (rc != FI_SUCCESS)
			GNIX_WARN(FI_LOG_EP_DATA,
				  "__gnix_rma_send_completion() failed: %d\n",
				  rc);

		__gnix_rma_fr_complete(req, txd);
	}

	return FI_SUCCESS;
}