Пример #1
0
/* called when the last reference to the qp is dropped */
static void rxe_qp_do_cleanup(struct work_struct *work)
{
	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);

	rxe_drop_all_mcast_groups(qp);

	if (qp->sq.queue)
		rxe_queue_cleanup(qp->sq.queue);

	if (qp->srq)
		rxe_drop_ref(qp->srq);

	if (qp->rq.queue)
		rxe_queue_cleanup(qp->rq.queue);

	if (qp->scq)
		rxe_drop_ref(qp->scq);
	if (qp->rcq)
		rxe_drop_ref(qp->rcq);
	if (qp->pd)
		rxe_drop_ref(qp->pd);

	if (qp->resp.mr) {
		rxe_drop_ref(qp->resp.mr);
		qp->resp.mr = NULL;
	}

	if (qp_type(qp) == IB_QPT_RC)
		sk_dst_reset(qp->sk->sk);

	free_rd_atomic_resources(qp);

	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
	sock_release(qp->sk);
}
Пример #2
0
/* called when the last reference to the qp is dropped */
void rxe_qp_cleanup(void *arg)
{
	struct rxe_qp *qp = arg;

	rxe_drop_all_mcast_groups(qp);

	if (qp->sq.queue)
		rxe_queue_cleanup(qp->sq.queue);

	if (qp->srq)
		rxe_drop_ref(qp->srq);

	if (qp->rq.queue)
		rxe_queue_cleanup(qp->rq.queue);

	if (qp->scq)
		rxe_drop_ref(qp->scq);
	if (qp->rcq)
		rxe_drop_ref(qp->rcq);
	if (qp->pd)
		rxe_drop_ref(qp->pd);

	if (qp->resp.mr) {
		rxe_drop_ref(qp->resp.mr);
		qp->resp.mr = NULL;
	}

	free_rd_atomic_resources(qp);

	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
}
Пример #3
0
static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
	struct rxe_srq *srq = to_rsrq(ibsrq);

	if (srq->rq.queue)
		rxe_queue_cleanup(srq->rq.queue);

	rxe_drop_ref(srq->pd);
	rxe_drop_ref(srq);
}
Пример #4
0
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
		      struct ib_udata *udata)
{
	int err;
	struct rxe_queue *q = srq->rq.queue;
	struct mminfo mi = { .offset = 1, .size = 0};

	if (mask & IB_SRQ_MAX_WR) {
		/* Check that we can write the mminfo struct to user space */
		if (udata && udata->inlen >= sizeof(__u64)) {
			__u64 mi_addr;

			/* Get address of user space mminfo struct */
			err = ib_copy_from_udata(&mi_addr, udata,
						 sizeof(mi_addr));
			if (err)
				goto err1;

			udata->outbuf = (void __user *)(unsigned long)mi_addr;
			udata->outlen = sizeof(mi);

			if (!access_ok(VERIFY_WRITE,
				       (void __user *)udata->outbuf,
					udata->outlen)) {
				err = -EFAULT;
				goto err1;
			}
		}

		err = rxe_queue_resize(q, &attr->max_wr,
				       rcv_wqe_size(srq->rq.max_sge),
				       srq->rq.queue->ip ?
						srq->rq.queue->ip->context :
						NULL,
				       udata, &srq->rq.producer_lock,
				       &srq->rq.consumer_lock);
		if (err)
			goto err2;
	}

	if (mask & IB_SRQ_LIMIT)
		srq->limit = attr->srq_limit;

	return 0;

err2:
	rxe_queue_cleanup(q);
	srq->rq.queue = NULL;
err1:
	return err;
}
Пример #5
0
static int rxe_destroy_srq(struct ib_srq *ibsrq)
{
	struct rxe_srq *srq = to_rsrq(ibsrq);

	if (srq->rq.queue)
		rxe_queue_cleanup(srq->rq.queue);

	rxe_drop_ref(srq->pd);
	rxe_drop_index(srq);
	rxe_drop_ref(srq);

	return 0;
}
Пример #6
0
/* called by the create qp verb */
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
		     struct ib_qp_init_attr *init, struct ib_udata *udata,
		     struct ib_pd *ibpd)
{
	int err;
	struct rxe_cq *rcq = to_rcq(init->recv_cq);
	struct rxe_cq *scq = to_rcq(init->send_cq);
	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;

	rxe_add_ref(pd);
	rxe_add_ref(rcq);
	rxe_add_ref(scq);
	if (srq)
		rxe_add_ref(srq);

	qp->pd			= pd;
	qp->rcq			= rcq;
	qp->scq			= scq;
	qp->srq			= srq;
	qp->udata		= udata;

	rxe_qp_init_misc(rxe, qp, init);

	err = rxe_qp_init_req(rxe, qp, init, context, udata);
	if (err)
		goto err1;

	err = rxe_qp_init_resp(rxe, qp, init, context, udata);
	if (err)
		goto err2;

	qp->attr.qp_state = IB_QPS_RESET;
	qp->valid = 1;

	return 0;

err2:
	rxe_queue_cleanup(qp->sq.queue);
err1:
	if (srq)
		rxe_drop_ref(srq);
	rxe_drop_ref(scq);
	rxe_drop_ref(rcq);
	rxe_drop_ref(pd);

	return err;
}
Пример #7
0
int rxe_queue_resize(struct rxe_queue *q,
		     unsigned int *num_elem_p,
		     unsigned int elem_size,
		     struct ib_ucontext *context,
		     struct ib_udata *udata,
		     spinlock_t *producer_lock,
		     spinlock_t *consumer_lock)
{
	struct rxe_queue *new_q;
	unsigned int num_elem = *num_elem_p;
	int err;
	unsigned long flags = 0, flags1;

	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
	if (!new_q)
		return -ENOMEM;

	err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf,
			   new_q->buf_size, &new_q->ip);
	if (err) {
		vfree(new_q->buf);
		kfree(new_q);
		goto err1;
	}

	spin_lock_irqsave(consumer_lock, flags1);

	if (producer_lock) {
		spin_lock_irqsave(producer_lock, flags);
		err = resize_finish(q, new_q, num_elem);
		spin_unlock_irqrestore(producer_lock, flags);
	} else {
		err = resize_finish(q, new_q, num_elem);
	}

	spin_unlock_irqrestore(consumer_lock, flags1);

	rxe_queue_cleanup(new_q);	/* new/old dep on err */
	if (err)
		goto err1;

	*num_elem_p = num_elem;
	return 0;

err1:
	return err;
}