예제 #1
0
파일: rxe_comp.c 프로젝트: asmalldev/linux
static inline enum comp_state check_psn(struct rxe_qp *qp,
					struct rxe_pkt_info *pkt,
					struct rxe_send_wqe *wqe)
{
	s32 diff;

	/* check to see if response is past the oldest WQE. if it is, complete
	 * send/write or error read/atomic
	 */
	diff = psn_compare(pkt->psn, wqe->last_psn);
	if (diff > 0) {
		if (wqe->state == wqe_state_pending) {
			if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
				return COMPST_ERROR_RETRY;

			reset_retry_counters(qp);
			return COMPST_COMP_WQE;
		} else {
			return COMPST_DONE;
		}
	}

	/* compare response packet to expected response */
	diff = psn_compare(pkt->psn, qp->comp.psn);
	if (diff < 0) {
		/* response is most likely a retried packet if it matches an
		 * uncompleted WQE go complete it else ignore it
		 */
		if (pkt->psn == wqe->last_psn)
			return COMPST_COMP_ACK;
		else
			return COMPST_DONE;
	} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
		return COMPST_DONE;
	} else {
		return COMPST_CHECK_ACK;
	}
}
예제 #2
0
파일: rxe_comp.c 프로젝트: asmalldev/linux
static inline enum comp_state complete_ack(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt,
					   struct rxe_send_wqe *wqe)
{
	unsigned long flags;

	if (wqe->has_rd_atomic) {
		wqe->has_rd_atomic = 0;
		atomic_inc(&qp->req.rd_atomic);
		if (qp->req.need_rd_atomic) {
			qp->comp.timeout_retry = 0;
			qp->req.need_rd_atomic = 0;
			rxe_run_task(&qp->req.task, 1);
		}
	}

	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
		/* state_lock used by requester & completer */
		spin_lock_irqsave(&qp->state_lock, flags);
		if ((qp->req.state == QP_STATE_DRAIN) &&
		    (qp->comp.psn == qp->req.psn)) {
			qp->req.state = QP_STATE_DRAINED;
			spin_unlock_irqrestore(&qp->state_lock, flags);

			if (qp->ibqp.event_handler) {
				struct ib_event ev;

				ev.device = qp->ibqp.device;
				ev.element.qp = &qp->ibqp;
				ev.event = IB_EVENT_SQ_DRAINED;
				qp->ibqp.event_handler(&ev,
					qp->ibqp.qp_context);
			}
		} else {
			spin_unlock_irqrestore(&qp->state_lock, flags);
		}
	}

	do_complete(qp, wqe);

	if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
		return COMPST_UPDATE_COMP;
	else
		return COMPST_DONE;
}
예제 #3
0
파일: rxe_resp.c 프로젝트: Anjali05/linux
static enum resp_states check_psn(struct rxe_qp *qp,
				  struct rxe_pkt_info *pkt)
{
	int diff = psn_compare(pkt->psn, qp->resp.psn);
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);

	switch (qp_type(qp)) {
	case IB_QPT_RC:
		if (diff > 0) {
			if (qp->resp.sent_psn_nak)
				return RESPST_CLEANUP;

			qp->resp.sent_psn_nak = 1;
			rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
			return RESPST_ERR_PSN_OUT_OF_SEQ;

		} else if (diff < 0) {
			rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
			return RESPST_DUPLICATE_REQUEST;
		}

		if (qp->resp.sent_psn_nak)
			qp->resp.sent_psn_nak = 0;

		break;

	case IB_QPT_UC:
		if (qp->resp.drop_msg || diff != 0) {
			if (pkt->mask & RXE_START_MASK) {
				qp->resp.drop_msg = 0;
				return RESPST_CHK_OP_SEQ;
			}

			qp->resp.drop_msg = 1;
			return RESPST_CLEANUP;
		}
		break;
	default:
		break;
	}

	return RESPST_CHK_OP_SEQ;
}
예제 #4
0
파일: rxe_comp.c 프로젝트: asmalldev/linux
static inline enum comp_state complete_wqe(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt,
					   struct rxe_send_wqe *wqe)
{
	qp->comp.opcode = -1;

	if (pkt) {
		if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
			qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;

		if (qp->req.wait_psn) {
			qp->req.wait_psn = 0;
			rxe_run_task(&qp->req.task, 1);
		}
	}

	do_complete(qp, wqe);

	return COMPST_GET_WQE;
}
예제 #5
0
파일: rxe_resp.c 프로젝트: Anjali05/linux
/* RDMA read response. If res is not NULL, then we have a current RDMA request
 * being processed or replayed.
 */
static enum resp_states read_reply(struct rxe_qp *qp,
				   struct rxe_pkt_info *req_pkt)
{
	struct rxe_pkt_info ack_pkt;
	struct sk_buff *skb;
	int mtu = qp->mtu;
	enum resp_states state;
	int payload;
	int opcode;
	int err;
	struct resp_res *res = qp->resp.res;
	u32 icrc;
	u32 *p;

	if (!res) {
		/* This is the first time we process that request. Get a
		 * resource
		 */
		res = &qp->resp.resources[qp->resp.res_head];

		free_rd_atomic_resource(qp, res);
		rxe_advance_resp_resource(qp);

		res->type		= RXE_READ_MASK;
		res->replay		= 0;

		res->read.va		= qp->resp.va;
		res->read.va_org	= qp->resp.va;

		res->first_psn		= req_pkt->psn;

		if (reth_len(req_pkt)) {
			res->last_psn	= (req_pkt->psn +
					   (reth_len(req_pkt) + mtu - 1) /
					   mtu - 1) & BTH_PSN_MASK;
		} else {
			res->last_psn	= res->first_psn;
		}
		res->cur_psn		= req_pkt->psn;

		res->read.resid		= qp->resp.resid;
		res->read.length	= qp->resp.resid;
		res->read.rkey		= qp->resp.rkey;

		/* note res inherits the reference to mr from qp */
		res->read.mr		= qp->resp.mr;
		qp->resp.mr		= NULL;

		qp->resp.res		= res;
		res->state		= rdatm_res_state_new;
	}

	if (res->state == rdatm_res_state_new) {
		if (res->read.resid <= mtu)
			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
		else
			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
	} else {
		if (res->read.resid > mtu)
			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
		else
			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
	}

	res->state = rdatm_res_state_next;

	payload = min_t(int, res->read.resid, mtu);

	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
	if (!skb)
		return RESPST_ERR_RNR;

	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
			   payload, from_mem_obj, &icrc);
	if (err)
		pr_err("Failed copying memory\n");

	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
	*p = ~icrc;

	err = rxe_xmit_packet(qp, &ack_pkt, skb);
	if (err) {
		pr_err("Failed sending RDMA reply.\n");
		return RESPST_ERR_RNR;
	}

	res->read.va += payload;
	res->read.resid -= payload;
	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;

	if (res->read.resid > 0) {
		state = RESPST_DONE;
	} else {
		qp->resp.res = NULL;
		if (!res->replay)
			qp->resp.opcode = -1;
		if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
			qp->resp.psn = res->cur_psn;
		state = RESPST_CLEANUP;
	}

	return state;
}
예제 #6
0
파일: rxe_comp.c 프로젝트: asmalldev/linux
int rxe_completer(void *arg)
{
	struct rxe_qp *qp = (struct rxe_qp *)arg;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	struct rxe_send_wqe *wqe = wqe;
	struct sk_buff *skb = NULL;
	struct rxe_pkt_info *pkt = NULL;
	enum comp_state state;

	rxe_add_ref(qp);

	if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
	    qp->req.state == QP_STATE_RESET) {
		rxe_drain_resp_pkts(qp, qp->valid &&
				    qp->req.state == QP_STATE_ERROR);
		goto exit;
	}

	if (qp->comp.timeout) {
		qp->comp.timeout_retry = 1;
		qp->comp.timeout = 0;
	} else {
		qp->comp.timeout_retry = 0;
	}

	if (qp->req.need_retry)
		goto exit;

	state = COMPST_GET_ACK;

	while (1) {
		pr_debug("qp#%d state = %s\n", qp_num(qp),
			 comp_state_name[state]);
		switch (state) {
		case COMPST_GET_ACK:
			skb = skb_dequeue(&qp->resp_pkts);
			if (skb) {
				pkt = SKB_TO_PKT(skb);
				qp->comp.timeout_retry = 0;
			}
			state = COMPST_GET_WQE;
			break;

		case COMPST_GET_WQE:
			state = get_wqe(qp, pkt, &wqe);
			break;

		case COMPST_CHECK_PSN:
			state = check_psn(qp, pkt, wqe);
			break;

		case COMPST_CHECK_ACK:
			state = check_ack(qp, pkt, wqe);
			break;

		case COMPST_READ:
			state = do_read(qp, pkt, wqe);
			break;

		case COMPST_ATOMIC:
			state = do_atomic(qp, pkt, wqe);
			break;

		case COMPST_WRITE_SEND:
			if (wqe->state == wqe_state_pending &&
			    wqe->last_psn == pkt->psn)
				state = COMPST_COMP_ACK;
			else
				state = COMPST_UPDATE_COMP;
			break;

		case COMPST_COMP_ACK:
			state = complete_ack(qp, pkt, wqe);
			break;

		case COMPST_COMP_WQE:
			state = complete_wqe(qp, pkt, wqe);
			break;

		case COMPST_UPDATE_COMP:
			if (pkt->mask & RXE_END_MASK)
				qp->comp.opcode = -1;
			else
				qp->comp.opcode = pkt->opcode;

			if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
				qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;

			if (qp->req.wait_psn) {
				qp->req.wait_psn = 0;
				rxe_run_task(&qp->req.task, 1);
			}

			state = COMPST_DONE;
			break;

		case COMPST_DONE:
			if (pkt) {
				rxe_drop_ref(pkt->qp);
				kfree_skb(skb);
				skb = NULL;
			}
			goto done;

		case COMPST_EXIT:
			if (qp->comp.timeout_retry && wqe) {
				state = COMPST_ERROR_RETRY;
				break;
			}

			/* re reset the timeout counter if
			 * (1) QP is type RC
			 * (2) the QP is alive
			 * (3) there is a packet sent by the requester that
			 *     might be acked (we still might get spurious
			 *     timeouts but try to keep them as few as possible)
			 * (4) the timeout parameter is set
			 */
			if ((qp_type(qp) == IB_QPT_RC) &&
			    (qp->req.state == QP_STATE_READY) &&
			    (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
			    qp->qp_timeout_jiffies)
				mod_timer(&qp->retrans_timer,
					  jiffies + qp->qp_timeout_jiffies);
			WARN_ON_ONCE(skb);
			goto exit;

		case COMPST_ERROR_RETRY:
			/* we come here if the retry timer fired and we did
			 * not receive a response packet. try to retry the send
			 * queue if that makes sense and the limits have not
			 * been exceeded. remember that some timeouts are
			 * spurious since we do not reset the timer but kick
			 * it down the road or let it expire
			 */

			/* there is nothing to retry in this case */
			if (!wqe || (wqe->state == wqe_state_posted)) {
				WARN_ON_ONCE(skb);
				goto exit;
			}

			if (qp->comp.retry_cnt > 0) {
				if (qp->comp.retry_cnt != 7)
					qp->comp.retry_cnt--;

				/* no point in retrying if we have already
				 * seen the last ack that the requester could
				 * have caused
				 */
				if (psn_compare(qp->req.psn,
						qp->comp.psn) > 0) {
					/* tell the requester to retry the
					 * send queue next time around
					 */
					rxe_counter_inc(rxe,
							RXE_CNT_COMP_RETRY);
					qp->req.need_retry = 1;
					rxe_run_task(&qp->req.task, 1);
				}

				if (pkt) {
					rxe_drop_ref(pkt->qp);
					kfree_skb(skb);
					skb = NULL;
				}

				WARN_ON_ONCE(skb);
				goto exit;

			} else {
				rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
				wqe->status = IB_WC_RETRY_EXC_ERR;
				state = COMPST_ERROR;
			}
			break;

		case COMPST_RNR_RETRY:
			if (qp->comp.rnr_retry > 0) {
				if (qp->comp.rnr_retry != 7)
					qp->comp.rnr_retry--;

				qp->req.need_retry = 1;
				pr_debug("qp#%d set rnr nak timer\n",
					 qp_num(qp));
				mod_timer(&qp->rnr_nak_timer,
					  jiffies + rnrnak_jiffies(aeth_syn(pkt)
						& ~AETH_TYPE_MASK));
				rxe_drop_ref(pkt->qp);
				kfree_skb(skb);
				skb = NULL;
				goto exit;
			} else {
				rxe_counter_inc(rxe,
						RXE_CNT_RNR_RETRY_EXCEEDED);
				wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
				state = COMPST_ERROR;
			}
			break;

		case COMPST_ERROR:
			WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
			do_complete(qp, wqe);
			rxe_qp_error(qp);

			if (pkt) {
				rxe_drop_ref(pkt->qp);
				kfree_skb(skb);
				skb = NULL;
			}

			WARN_ON_ONCE(skb);
			goto exit;
		}
	}

exit:
	/* we come here if we are done with processing and want the task to
	 * exit from the loop calling us
	 */
	WARN_ON_ONCE(skb);
	rxe_drop_ref(qp);
	return -EAGAIN;

done:
	/* we come here if we have processed a packet we want the task to call
	 * us again to see if there is anything else to do
	 */
	WARN_ON_ONCE(skb);
	rxe_drop_ref(qp);
	return 0;
}
예제 #7
0
파일: rxe_comp.c 프로젝트: asmalldev/linux
static inline enum comp_state check_ack(struct rxe_qp *qp,
					struct rxe_pkt_info *pkt,
					struct rxe_send_wqe *wqe)
{
	unsigned int mask = pkt->mask;
	u8 syn;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);

	/* Check the sequence only */
	switch (qp->comp.opcode) {
	case -1:
		/* Will catch all *_ONLY cases. */
		if (!(mask & RXE_START_MASK))
			return COMPST_ERROR;

		break;

	case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
		if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
		    pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
			return COMPST_ERROR;
		}
		break;
	default:
		WARN_ON_ONCE(1);
	}

	/* Check operation validity. */
	switch (pkt->opcode) {
	case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
	case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
	case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
		syn = aeth_syn(pkt);

		if ((syn & AETH_TYPE_MASK) != AETH_ACK)
			return COMPST_ERROR;

		/* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
		 * doesn't have an AETH)
		 */
	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
		if (wqe->wr.opcode != IB_WR_RDMA_READ &&
		    wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
			return COMPST_ERROR;
		}
		reset_retry_counters(qp);
		return COMPST_READ;

	case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
		syn = aeth_syn(pkt);

		if ((syn & AETH_TYPE_MASK) != AETH_ACK)
			return COMPST_ERROR;

		if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
		    wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
			return COMPST_ERROR;
		reset_retry_counters(qp);
		return COMPST_ATOMIC;

	case IB_OPCODE_RC_ACKNOWLEDGE:
		syn = aeth_syn(pkt);
		switch (syn & AETH_TYPE_MASK) {
		case AETH_ACK:
			reset_retry_counters(qp);
			return COMPST_WRITE_SEND;

		case AETH_RNR_NAK:
			rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
			return COMPST_RNR_RETRY;

		case AETH_NAK:
			switch (syn) {
			case AETH_NAK_PSN_SEQ_ERROR:
				/* a nak implicitly acks all packets with psns
				 * before
				 */
				if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
					rxe_counter_inc(rxe,
							RXE_CNT_RCV_SEQ_ERR);
					qp->comp.psn = pkt->psn;
					if (qp->req.wait_psn) {
						qp->req.wait_psn = 0;
						rxe_run_task(&qp->req.task, 1);
					}
				}
				return COMPST_ERROR_RETRY;

			case AETH_NAK_INVALID_REQ:
				wqe->status = IB_WC_REM_INV_REQ_ERR;
				return COMPST_ERROR;

			case AETH_NAK_REM_ACC_ERR:
				wqe->status = IB_WC_REM_ACCESS_ERR;
				return COMPST_ERROR;

			case AETH_NAK_REM_OP_ERR:
				wqe->status = IB_WC_REM_OP_ERR;
				return COMPST_ERROR;

			default:
				pr_warn("unexpected nak %x\n", syn);
				wqe->status = IB_WC_REM_OP_ERR;
				return COMPST_ERROR;
			}

		default:
			return COMPST_ERROR;
		}
		break;

	default:
		pr_warn("unexpected opcode\n");
	}

	return COMPST_ERROR;
}