Пример #1
0
int siw_destroy_qp(struct ib_qp *ofa_qp)
{
	struct siw_qp		*qp = siw_qp_ofa2siw(ofa_qp);
	struct siw_qp_attrs	qp_attrs;

	dprint(DBG_CM, "(QP%d): SIW QP state=%d, cep=0x%p\n",
		QP_ID(qp), qp->attrs.state, qp->cep);

	/*
	 * Mark QP as in process of destruction to prevent from eventual async
	 * callbacks to OFA core
	 */
	qp->attrs.flags |= SIW_QP_IN_DESTROY;
	qp->rx_ctx.rx_suspend = 1;

	down_write(&qp->state_lock);

	qp_attrs.state = SIW_QP_STATE_ERROR;
	(void)siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);

	if (qp->cep) {
		siw_cep_put(qp->cep);
		qp->cep = NULL;
	}

	up_write(&qp->state_lock);

	if (qp->rx_ctx.crc_enabled)
		crypto_free_hash(qp->rx_ctx.mpa_crc_hd.tfm);
	if (qp->tx_ctx.crc_enabled)
		crypto_free_hash(qp->tx_ctx.mpa_crc_hd.tfm);

	/* Drop references */
	siw_cq_put(qp->scq);
	siw_cq_put(qp->rcq);
	siw_pd_put(qp->pd);
	qp->scq = qp->rcq = NULL;

	siw_qp_put(qp);

	return 0;
}
Пример #2
0
int siw_destroy_cq(struct ib_cq *ofa_cq)
{
	struct siw_cq		*cq  = siw_cq_ofa2siw(ofa_cq);
	struct ib_device	*ofa_dev = ofa_cq->device;
	struct siw_dev		*sdev = siw_dev_ofa2siw(ofa_dev);

	siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
	siw_cq_put(cq);

	return 0;
}
Пример #3
0
static ssize_t siw_event_file_write(struct file *filp, const char __user *buf,
		size_t count, loff_t *pos)
{
	struct siw_event_file *file;
	struct urdma_event_storage event;
	struct urdma_cq_event *cq_event;
	struct siw_cq *cq;
	ssize_t rv;

	if (count > sizeof(event)) {
		return -EINVAL;
	}

	if (copy_from_user(&event, buf, count)) {
		return -EFAULT;
	}

	file = filp->private_data;
	spin_lock_irq(&file->lock);
	if (!file->ctx) {
		rv = -EINVAL;
		goto out;
	}

	switch (event.event_type) {
	case SIW_EVENT_COMP_POSTED:
		if (count != sizeof(*cq_event)) {
			rv = -EINVAL;
			goto out;
		}
		cq_event = (struct urdma_cq_event *)&event;
		cq = siw_cq_id2obj(file->ctx->sdev, cq_event->cq_id);
		if (WARN_ON_ONCE(!cq)) {
			rv = -EINVAL;
			goto out;
		}
		cq->ofa_cq.comp_handler(&cq->ofa_cq, cq->ofa_cq.cq_context);
		siw_cq_put(cq);
		break;
	default:
		pr_debug(" got invalid event type %u\n", event.event_type);
		rv = -EINVAL;
		goto out;
	}

	rv = count;

out:
	spin_unlock_irq(&file->lock);
	return rv;
}
Пример #4
0
int siw_destroy_qp(struct ib_qp *ofa_qp)
{
	struct siw_qp		*qp = siw_qp_ofa2siw(ofa_qp);
	struct siw_qp_attrs	qp_attrs;

	pr_debug(DBG_CM "(QP%d): SIW QP state=%d, cep=0x%p\n",
		QP_ID(qp), qp->attrs.state, qp->cep);

	/*
	 * Mark QP as in process of destruction to prevent from eventual async
	 * callbacks to OFA core
	 */
	qp->attrs.flags |= SIW_QP_IN_DESTROY;

	down_write(&qp->state_lock);

	qp_attrs.state = SIW_QP_STATE_ERROR;
	(void)siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);

	if (qp->cep) {
		siw_cep_put(qp->cep);
		qp->cep = NULL;
	}

	up_write(&qp->state_lock);

	/* Drop references */
	siw_cq_put(qp->scq);
	siw_cq_put(qp->rcq);
	siw_pd_put(qp->pd);
	qp->scq = qp->rcq = NULL;

	siw_qp_put(qp);

	return 0;
}
Пример #5
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;
	struct siw_uresp_create_qp	uresp;

	unsigned long flags;
	int kernel_verbs = ofa_pd->uobject ? 0 : 1;
	int rv = 0;

	dprint(DBG_OBJ|DBG_CM, ": new QP on device %s\n",
		ofa_dev->name);

	if (attrs->qp_type != IB_QPT_RC) {
		dprint(DBG_ON, ": Only RC QP's supported\n");
		return ERR_PTR(-ENOSYS);
	}
	if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
	    (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
	    (attrs->cap.max_send_sge > SIW_MAX_SGE)  ||
	    (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
		dprint(DBG_ON, ": QP Size!\n");
		return ERR_PTR(-EINVAL);
	}
	if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
		dprint(DBG_ON, ": Max Inline Send %d > %d!\n",
		       attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
		return ERR_PTR(-EINVAL);
	}
	/*
	 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
	 * but not for a QP unable to hold any WQE (SQ + RQ)
	 */
	if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0)
		return ERR_PTR(-EINVAL);

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		dprint(DBG_ON, ": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		dprint(DBG_OBJ, ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp) {
		dprint(DBG_ON, ": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	INIT_LIST_HEAD(&qp->freeq);
	INIT_LIST_HEAD(&qp->sq);
	INIT_LIST_HEAD(&qp->rq);
	INIT_LIST_HEAD(&qp->orq);
	INIT_LIST_HEAD(&qp->irq);

	init_rwsem(&qp->state_lock);
	spin_lock_init(&qp->freeq_lock);
	spin_lock_init(&qp->sq_lock);
	spin_lock_init(&qp->rq_lock);
	spin_lock_init(&qp->orq_lock);

	init_waitqueue_head(&qp->tx_ctx.waitq);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	if (kernel_verbs) {
		int num_wqe = attrs->cap.max_send_wr + attrs->cap.max_recv_wr;
		while (num_wqe--) {
			struct siw_wqe *wqe = kzalloc(sizeof *wqe, GFP_KERNEL);
			if (!wqe) {
				rv = -ENOMEM;
				goto err_out_idr;
			}
			SIW_INC_STAT_WQE;
			INIT_LIST_HEAD(&wqe->list);
			list_add(&wqe->list, &qp->freeq);
		}
		qp->attrs.flags |= SIW_KERNEL_VERBS;
	}
	if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
		if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
			qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
		else {
			rv = -EINVAL;
			goto err_out;
		}
	}
	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;

	if (attrs->srq) {
		/*
		 * SRQ support.
		 * Verbs 6.3.7: ignore RQ size, if SRQ present
		 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
		 */
		qp->srq = siw_srq_ofa2siw(attrs->srq);
		qp->attrs.rq_size = 0;
		atomic_set(&qp->rq_space, 0);
		dprint(DBG_OBJ, " QP(%d): SRQ(%p) attached\n",
			QP_ID(qp), qp->srq);
	} else {
		qp->srq = NULL;
		qp->attrs.rq_size = attrs->cap.max_recv_wr;
		atomic_set(&qp->rq_space, qp->attrs.rq_size);
	}
	qp->attrs.sq_size = attrs->cap.max_send_wr;
	atomic_set(&qp->sq_space, qp->attrs.sq_size);
	qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
	/*
	 * ofed has no max_send_sge_rdmawrite
	 */
	qp->attrs.sq_max_sges_rdmaw = attrs->cap.max_send_sge;
	qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;

	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		uresp.sq_size = qp->attrs.sq_size;
		uresp.rq_size = qp->attrs.rq_size;
		uresp.qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	atomic_set(&qp->tx_ctx.in_use, 0);

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	INIT_LIST_HEAD(&qp->devq);
	spin_lock_irqsave(&sdev->idr_lock, flags);
	list_add_tail(&qp->devq, &sdev->qp_list);
	spin_unlock_irqrestore(&sdev->idr_lock, flags);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp)
		siw_drain_wq(&qp->freeq);

	kfree(qp);
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}
Пример #6
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;

	int rv = 0;

	pr_debug(DBG_OBJ DBG_CM ": new QP on device %s\n",
		ofa_dev->name);

	if (!ofa_pd->uobject) {
		pr_debug(": This driver does not support kernel clients\n");
		return ERR_PTR(-EINVAL);
	}

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		pr_debug(": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (attrs->qp_type != IB_QPT_RC) {
		pr_debug(": Only RC QP's supported\n");
		rv = -EINVAL;
		goto err_out;
	}
	if (attrs->srq) {
		pr_debug(": SRQ is not supported\n");
		rv = -EINVAL;
		goto err_out;
	}

	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		pr_debug(DBG_OBJ ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof *qp, GFP_KERNEL);
	if (!qp) {
		pr_debug(": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	init_rwsem(&qp->state_lock);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;
	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		struct urdma_udata_create_qp ureq;
		struct urdma_uresp_create_qp uresp;

		rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
		if (rv)
			goto err_out_idr;
		qp->attrs.irq_size = ureq.ird_max;
		qp->attrs.orq_size = ureq.ord_max;
		qp->attrs.urdma_devid = ureq.urdmad_dev_id;
		qp->attrs.urdma_qp_id = ureq.urdmad_qp_id;
		qp->attrs.urdma_rxq = ureq.rxq;
		qp->attrs.urdma_txq = ureq.txq;

		memset(&uresp, 0, sizeof uresp);
		uresp.kmod_qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp) {
		kfree(qp);
	}
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}