Exemplo n.º 1
0
Arquivo: qp.c Projeto: 020gzh/linux
/**
 * rvt_clear_mr_refs - Drop help mr refs
 * @qp: rvt qp data structure
 * @clr_sends: If shoudl clear send side or not
 */
static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{
	unsigned n;

	if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
		rvt_put_ss(&qp->s_rdma_read_sge);

	rvt_put_ss(&qp->r_sge);

	if (clr_sends) {
		while (qp->s_last != qp->s_head) {
			struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
			unsigned i;

			for (i = 0; i < wqe->wr.num_sge; i++) {
				struct rvt_sge *sge = &wqe->sg_list[i];

				rvt_put_mr(sge->mr);
			}
			if (qp->ibqp.qp_type == IB_QPT_UD ||
			    qp->ibqp.qp_type == IB_QPT_SMI ||
			    qp->ibqp.qp_type == IB_QPT_GSI)
				atomic_dec(&ibah_to_rvtah(
						wqe->ud_wr.ah)->refcount);
			if (++qp->s_last >= qp->s_size)
				qp->s_last = 0;
			smp_wmb(); /* see qp_set_savail */
		}
		if (qp->s_rdma_mr) {
			rvt_put_mr(qp->s_rdma_mr);
			qp->s_rdma_mr = NULL;
		}
	}

	if (qp->ibqp.qp_type != IB_QPT_RC)
		return;

	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
		struct rvt_ack_entry *e = &qp->s_ack_queue[n];

		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
		    e->rdma_sge.mr) {
			rvt_put_mr(e->rdma_sge.mr);
			e->rdma_sge.mr = NULL;
		}
	}
}
Exemplo n.º 2
0
/*
 * Validate a RWQE and fill in the SGE state.
 * Return 1 if OK.
 */
static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
{
	int i, j, ret;
	struct ib_wc wc;
	struct rvt_lkey_table *rkt;
	struct rvt_pd *pd;
	struct rvt_sge_state *ss;

	rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
	pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
	ss = &qp->r_sge;
	ss->sg_list = qp->r_sg_list;
	qp->r_len = 0;
	for (i = j = 0; i < wqe->num_sge; i++) {
		if (wqe->sg_list[i].length == 0)
			continue;
		/* Check LKEY */
		ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
				  NULL, &wqe->sg_list[i],
				  IB_ACCESS_LOCAL_WRITE);
		if (unlikely(ret <= 0))
			goto bad_lkey;
		qp->r_len += wqe->sg_list[i].length;
		j++;
	}
	ss->num_sge = j;
	ss->total_len = qp->r_len;
	ret = 1;
	goto bail;

bad_lkey:
	while (j) {
		struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;

		rvt_put_mr(sge->mr);
	}
	ss->num_sge = 0;
	memset(&wc, 0, sizeof(wc));
	wc.wr_id = wqe->wr_id;
	wc.status = IB_WC_LOC_PROT_ERR;
	wc.opcode = IB_WC_RECV;
	wc.qp = &qp->ibqp;
	/* Signal solicited completion event. */
	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
	ret = 0;
bail:
	return ret;
}
Exemplo n.º 3
0
void hfi1_put_txreq(struct verbs_txreq *tx)
{
	struct hfi1_ibdev *dev;
	struct rvt_qp *qp;
	unsigned long flags;
	unsigned int seq;
	struct hfi1_qp_priv *priv;

	qp = tx->qp;
	dev = to_idev(qp->ibqp.device);

	if (tx->mr)
		rvt_put_mr(tx->mr);

	sdma_txclean(dd_from_dev(dev), &tx->txreq);

	/* Free verbs_txreq and return to slab cache */
	kmem_cache_free(dev->verbs_txreq_cache, tx);

	do {
		seq = read_seqbegin(&dev->iowait_lock);
		if (!list_empty(&dev->txwait)) {
			struct iowait *wait;

			write_seqlock_irqsave(&dev->iowait_lock, flags);
			wait = list_first_entry(&dev->txwait, struct iowait,
						list);
			qp = iowait_to_qp(wait);
			priv = qp->priv;
			list_del_init(&priv->s_iowait.list);
			/* refcount held until actual wake up */
			write_sequnlock_irqrestore(&dev->iowait_lock, flags);
			hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
			break;
		}
	} while (read_seqretry(&dev->iowait_lock, seq));
}
Exemplo n.º 4
0
Arquivo: qp.c Projeto: 020gzh/linux
/**
 * rvt_post_one_wr - post one RC, UC, or UD send work request
 * @qp: the QP to post on
 * @wr: the work request to send
 */
static int rvt_post_one_wr(struct rvt_qp *qp,
			   struct ib_send_wr *wr,
			   int *call_send)
{
	struct rvt_swqe *wqe;
	u32 next;
	int i;
	int j;
	int acc;
	struct rvt_lkey_table *rkt;
	struct rvt_pd *pd;
	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
	u8 log_pmtu;
	int ret;

	/* IB spec says that num_sge == 0 is OK. */
	if (unlikely(wr->num_sge > qp->s_max_sge))
		return -EINVAL;

	/*
	 * Don't allow RDMA reads or atomic operations on UC or
	 * undefined operations.
	 * Make sure buffer is large enough to hold the result for atomics.
	 */
	if (qp->ibqp.qp_type == IB_QPT_UC) {
		if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
			return -EINVAL;
	} else if (qp->ibqp.qp_type != IB_QPT_RC) {
		/* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
		if (wr->opcode != IB_WR_SEND &&
		    wr->opcode != IB_WR_SEND_WITH_IMM)
			return -EINVAL;
		/* Check UD destination address PD */
		if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
			return -EINVAL;
	} else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
		return -EINVAL;
	} else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
		   (wr->num_sge == 0 ||
		    wr->sg_list[0].length < sizeof(u64) ||
		    wr->sg_list[0].addr & (sizeof(u64) - 1))) {
		return -EINVAL;
	} else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
		return -EINVAL;
	}
	/* check for avail */
	if (unlikely(!qp->s_avail)) {
		qp->s_avail = qp_get_savail(qp);
		if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
			rvt_pr_err(rdi,
				   "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
				   qp->ibqp.qp_num, qp->s_size, qp->s_avail,
				   qp->s_head, qp->s_tail, qp->s_cur,
				   qp->s_acked, qp->s_last);
		if (!qp->s_avail)
			return -ENOMEM;
	}
	next = qp->s_head + 1;
	if (next >= qp->s_size)
		next = 0;

	rkt = &rdi->lkey_table;
	pd = ibpd_to_rvtpd(qp->ibqp.pd);
	wqe = rvt_get_swqe_ptr(qp, qp->s_head);

	if (qp->ibqp.qp_type != IB_QPT_UC &&
	    qp->ibqp.qp_type != IB_QPT_RC)
		memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
	else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
		 wr->opcode == IB_WR_RDMA_WRITE ||
		 wr->opcode == IB_WR_RDMA_READ)
		memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
	else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
		 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
		memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
	else
		memcpy(&wqe->wr, wr, sizeof(wqe->wr));

	wqe->length = 0;
	j = 0;
	if (wr->num_sge) {
		acc = wr->opcode >= IB_WR_RDMA_READ ?
			IB_ACCESS_LOCAL_WRITE : 0;
		for (i = 0; i < wr->num_sge; i++) {
			u32 length = wr->sg_list[i].length;
			int ok;

			if (length == 0)
				continue;
			ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
					 &wr->sg_list[i], acc);
			if (!ok) {
				ret = -EINVAL;
				goto bail_inval_free;
			}
			wqe->length += length;
			j++;
		}
		wqe->wr.num_sge = j;
	}

	/* general part of wqe valid - allow for driver checks */
	if (rdi->driver_f.check_send_wqe) {
		ret = rdi->driver_f.check_send_wqe(qp, wqe);
		if (ret < 0)
			goto bail_inval_free;
		if (ret)
			*call_send = ret;
	}

	log_pmtu = qp->log_pmtu;
	if (qp->ibqp.qp_type != IB_QPT_UC &&
	    qp->ibqp.qp_type != IB_QPT_RC) {
		struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);

		log_pmtu = ah->log_pmtu;
		atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
	}

	wqe->ssn = qp->s_ssn++;
	wqe->psn = qp->s_next_psn;
	wqe->lpsn = wqe->psn +
			(wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0);
	qp->s_next_psn = wqe->lpsn + 1;
	trace_rvt_post_one_wr(qp, wqe);
	smp_wmb(); /* see request builders */
	qp->s_avail--;
	qp->s_head = next;

	return 0;

bail_inval_free:
	/* release mr holds */
	while (j) {
		struct rvt_sge *sge = &wqe->sg_list[--j];

		rvt_put_mr(sge->mr);
	}
	return ret;
}