示例#1
0
struct ib_ucontext *siw_alloc_ucontext(struct ib_device *ofa_dev,
				       struct ib_udata *udata)
{
	struct siw_ucontext *ctx = NULL;
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);
	int rv;

	dprint(DBG_CM, "(device=%s)\n", ofa_dev->name);

	if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
		dprint(DBG_ON, ": Out of CONTEXT's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx) {
		rv = -ENOMEM;
		goto err_out;
	}
	ctx->sdev = sdev;
	return &ctx->ib_ucontext;

err_out:
	if (ctx)
		kfree(ctx);

	atomic_dec(&sdev->num_ctx);
	return ERR_PTR(rv);
}
示例#2
0
int siw_query_port(struct ib_device *ofa_dev, u8 port,
		     struct ib_port_attr *attr)
{
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);

	memset(attr, 0, sizeof *attr);

	attr->state = sdev->state;
	attr->max_mtu = siw_mtu_net2ofa(sdev->netdev->mtu);
	attr->active_mtu = attr->max_mtu;
	attr->gid_tbl_len = 1;
	attr->port_cap_flags = IB_PORT_CM_SUP;	/* ?? */
	attr->port_cap_flags |= IB_PORT_DEVICE_MGMT_SUP;
	attr->max_msg_sz = -1;
	attr->pkey_tbl_len = 1;
	attr->active_width = 2;
	attr->active_speed = 2;
	attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3;

	/*
	 * All zero
	 *
	 * attr->lid = 0;
	 * attr->bad_pkey_cntr = 0;
	 * attr->qkey_viol_cntr = 0;
	 * attr->sm_lid = 0;
	 * attr->lmc = 0;
	 * attr->max_vl_num = 0;
	 * attr->sm_sl = 0;
	 * attr->subnet_timeout = 0;
	 * attr->init_type_repy = 0;
	 */
	return 0;
}
示例#3
0
/*
 * Minimum siw_query_qp() verb interface.
 *
 * @qp_attr_mask is not used but all available information is provided
 */
int siw_query_qp(struct ib_qp *ofa_qp, struct ib_qp_attr *qp_attr,
		 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
	struct siw_qp *qp;
	struct siw_dev *sdev;

	if (ofa_qp && qp_attr && qp_init_attr) {
		qp = siw_qp_ofa2siw(ofa_qp);
		sdev = siw_dev_ofa2siw(ofa_qp->device);
	} else
		return -EINVAL;

	qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
	qp_init_attr->cap.max_inline_data = SIW_MAX_INLINE;

	qp_attr->cap.max_send_wr = qp->attrs.sq_size;
	qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
	qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
	qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;

	qp_attr->path_mtu = siw_mtu_net2ofa(sdev->netdev->mtu);
	qp_attr->max_rd_atomic = qp->attrs.ird;
	qp_attr->max_dest_rd_atomic = qp->attrs.ord;

	qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
			IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;

	qp_init_attr->cap = qp_attr->cap;

	return 0;
}
示例#4
0
struct ib_cq *siw_create_cq(struct ib_device *ofa_dev,
							const struct ib_cq_init_attr *attr,
//							int size,
//			    int vec /* unused */,
			    struct ib_ucontext *ib_context,
			    struct ib_udata *udata)
{
	struct siw_cq			*cq = NULL;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_uresp_create_cq	uresp;
	int rv;

	if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
		dprint(DBG_ON, ": Out of CQ's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (attr->cqe < 1 || attr->cqe> SIW_MAX_CQE) {
		dprint(DBG_ON, ": CQE: %d\n", attr->cqe);
		rv = -EINVAL;
		goto err_out;
	}
	cq = kmalloc(sizeof *cq, GFP_KERNEL);
	if (!cq) {
		dprint(DBG_ON, ":  kmalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}
//	cq->ofa_cq.cqe = size - 1;
	cq->ofa_cq.cqe = attr->cqe - 1;

	rv = siw_cq_add(sdev, cq);
	if (rv)
		goto err_out_idr;

	INIT_LIST_HEAD(&cq->queue);
	spin_lock_init(&cq->lock);
	atomic_set(&cq->qlen, 0);

	if (ib_context) {
		uresp.cq_id = OBJ_ID(cq);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}
	return &cq->ofa_cq;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
err_out:
	dprint(DBG_OBJ, ": CQ creation failed\n");

	kfree(cq);
	atomic_dec(&sdev->num_cq);

	return ERR_PTR(rv);
}
示例#5
0
int siw_dealloc_pd(struct ib_pd *ofa_pd)
{
	struct siw_pd	*pd = siw_pd_ofa2siw(ofa_pd);
	struct siw_dev	*sdev = siw_dev_ofa2siw(ofa_pd->device);

	siw_remove_obj(&sdev->idr_lock, &sdev->pd_idr, &pd->hdr);
	siw_pd_put(pd);

	return 0;
}
示例#6
0
int siw_query_gid(struct ib_device *ofa_dev, u8 port, int idx,
		   union ib_gid *gid)
{
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);

	/* subnet_prefix == interface_id == 0; */
	memset(gid, 0, sizeof *gid);
	memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);

	return 0;
}
示例#7
0
int siw_destroy_cq(struct ib_cq *ofa_cq)
{
	struct siw_cq		*cq  = siw_cq_ofa2siw(ofa_cq);
	struct ib_device	*ofa_dev = ofa_cq->device;
	struct siw_dev		*sdev = siw_dev_ofa2siw(ofa_dev);

	siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
	siw_cq_put(cq);

	return 0;
}
示例#8
0
struct ib_ucontext *siw_alloc_ucontext(struct ib_device *ofa_dev,
				       struct ib_udata *udata)
{
	struct siw_ucontext *ctx = NULL;
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);
	int rv;

	pr_debug(DBG_CM "(device=%s)\n", ofa_dev->name);

	if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
		pr_debug(": Out of CONTEXT's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		rv = -ENOMEM;
		goto err_out;
	}

	ctx->sdev = sdev;
	if (udata) {
		struct urdma_uresp_alloc_ctx uresp;
		struct file *filp;

		memset(&uresp, 0, sizeof uresp);
		uresp.dev_id = sdev->attrs.vendor_part_id;
		filp = siw_event_file_new(ctx, &uresp.event_fd);
		if (IS_ERR(filp)) {
			rv = PTR_ERR(filp);
			goto err_out;
		}

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv) {
			fput(filp);
			kfree(ctx->event_file);
			put_unused_fd(uresp.event_fd);
			goto err_out;
		}

		fd_install(uresp.event_fd, filp);
	}
	return &ctx->ib_ucontext;

err_out:
	if (ctx)
		kfree(ctx);

	atomic_dec(&sdev->num_ctx);
	return ERR_PTR(rv);
}
示例#9
0
int siw_query_gid(struct ib_device *ofa_dev, u8 port, int idx,
		   union ib_gid *gid)
{
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);

	/* subnet_prefix == interface_id == 0; */
	memset(gid, 0, sizeof *gid);
	if (!WARN(!sdev->netdev, "No netdev associated with device %s",
				ofa_dev->name)) {
		memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
	}

	return 0;
}
示例#10
0
/*
 * siw_dereg_mr()
 *
 * Release Memory Region.
 *
 * TODO: Update function if Memory Windows are supported by siw:
 *       Is OFED core checking for MW dependencies for current
 *       MR before calling MR deregistration?.
 *
 * @ofa_mr:     OFA MR contained in siw MR.
 */
int siw_dereg_mr(struct ib_mr *ofa_mr)
{
	struct siw_mr	*mr;
	struct siw_dev	*sdev = siw_dev_ofa2siw(ofa_mr->device);

	mr = siw_mr_ofa2siw(ofa_mr);

	dprint(DBG_OBJ|DBG_MM, "(MEM%d): Release UMem %p, #ref's: %d\n",
		mr->mem.hdr.id, mr->umem,
		atomic_read(&mr->mem.hdr.ref.refcount));

	mr->mem.stag_state = STAG_INVALID;

	siw_pd_put(mr->pd);
	siw_remove_obj(&sdev->idr_lock, &sdev->mem_idr, &mr->mem.hdr);
	siw_mem_put(&mr->mem);

	return 0;
}
示例#11
0
int siw_query_device(struct ib_device *ofa_dev, struct ib_device_attr *attr,
		struct ib_udata *udata)
#endif
{
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);

	memset(attr, 0, sizeof *attr);

	attr->max_mr_size = -1ULL;
	attr->sys_image_guid = sdev->ofa_dev.node_guid;
	attr->vendor_id = sdev->attrs.vendor_id;
	attr->vendor_part_id = sdev->attrs.vendor_part_id;
	attr->max_qp = sdev->attrs.max_qp;

	attr->device_cap_flags = sdev->attrs.cap_flags;
	attr->max_cq = sdev->attrs.max_cq;
	attr->max_pd = sdev->attrs.max_pd;

	return 0;
}
示例#12
0
struct ib_pd *siw_alloc_pd(struct ib_device *ofa_dev,
			   struct ib_ucontext *context, struct ib_udata *udata)
{
	struct siw_pd	*pd = NULL;
	struct siw_dev	*sdev  = siw_dev_ofa2siw(ofa_dev);
	int rv;

	if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
		dprint(DBG_ON, ": Out of PD's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	pd = kmalloc(sizeof *pd, GFP_KERNEL);
	if (!pd) {
		dprint(DBG_ON, ": malloc\n");
		rv = -ENOMEM;
		goto err_out;
	}
	rv = siw_pd_add(sdev, pd);
	if (rv) {
		dprint(DBG_ON, ": siw_pd_add\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (context) {
		if (ib_copy_to_udata(udata, &pd->hdr.id, sizeof pd->hdr.id)) {
			rv = -EFAULT;
			goto err_out_idr;
		}
	}
	return &pd->ofa_pd;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->pd_idr, &pd->hdr);
err_out:
	kfree(pd);
	atomic_dec(&sdev->num_pd);

	return ERR_PTR(rv);
}
示例#13
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;
	struct siw_uresp_create_qp	uresp;

	unsigned long flags;
	int kernel_verbs = ofa_pd->uobject ? 0 : 1;
	int rv = 0;

	dprint(DBG_OBJ|DBG_CM, ": new QP on device %s\n",
		ofa_dev->name);

	if (attrs->qp_type != IB_QPT_RC) {
		dprint(DBG_ON, ": Only RC QP's supported\n");
		return ERR_PTR(-ENOSYS);
	}
	if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
	    (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
	    (attrs->cap.max_send_sge > SIW_MAX_SGE)  ||
	    (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
		dprint(DBG_ON, ": QP Size!\n");
		return ERR_PTR(-EINVAL);
	}
	if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
		dprint(DBG_ON, ": Max Inline Send %d > %d!\n",
		       attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
		return ERR_PTR(-EINVAL);
	}
	/*
	 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
	 * but not for a QP unable to hold any WQE (SQ + RQ)
	 */
	if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0)
		return ERR_PTR(-EINVAL);

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		dprint(DBG_ON, ": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		dprint(DBG_OBJ, ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp) {
		dprint(DBG_ON, ": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	INIT_LIST_HEAD(&qp->freeq);
	INIT_LIST_HEAD(&qp->sq);
	INIT_LIST_HEAD(&qp->rq);
	INIT_LIST_HEAD(&qp->orq);
	INIT_LIST_HEAD(&qp->irq);

	init_rwsem(&qp->state_lock);
	spin_lock_init(&qp->freeq_lock);
	spin_lock_init(&qp->sq_lock);
	spin_lock_init(&qp->rq_lock);
	spin_lock_init(&qp->orq_lock);

	init_waitqueue_head(&qp->tx_ctx.waitq);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	if (kernel_verbs) {
		int num_wqe = attrs->cap.max_send_wr + attrs->cap.max_recv_wr;
		while (num_wqe--) {
			struct siw_wqe *wqe = kzalloc(sizeof *wqe, GFP_KERNEL);
			if (!wqe) {
				rv = -ENOMEM;
				goto err_out_idr;
			}
			SIW_INC_STAT_WQE;
			INIT_LIST_HEAD(&wqe->list);
			list_add(&wqe->list, &qp->freeq);
		}
		qp->attrs.flags |= SIW_KERNEL_VERBS;
	}
	if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
		if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
			qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
		else {
			rv = -EINVAL;
			goto err_out;
		}
	}
	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;

	if (attrs->srq) {
		/*
		 * SRQ support.
		 * Verbs 6.3.7: ignore RQ size, if SRQ present
		 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
		 */
		qp->srq = siw_srq_ofa2siw(attrs->srq);
		qp->attrs.rq_size = 0;
		atomic_set(&qp->rq_space, 0);
		dprint(DBG_OBJ, " QP(%d): SRQ(%p) attached\n",
			QP_ID(qp), qp->srq);
	} else {
		qp->srq = NULL;
		qp->attrs.rq_size = attrs->cap.max_recv_wr;
		atomic_set(&qp->rq_space, qp->attrs.rq_size);
	}
	qp->attrs.sq_size = attrs->cap.max_send_wr;
	atomic_set(&qp->sq_space, qp->attrs.sq_size);
	qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
	/*
	 * ofed has no max_send_sge_rdmawrite
	 */
	qp->attrs.sq_max_sges_rdmaw = attrs->cap.max_send_sge;
	qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;

	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		uresp.sq_size = qp->attrs.sq_size;
		uresp.rq_size = qp->attrs.rq_size;
		uresp.qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	atomic_set(&qp->tx_ctx.in_use, 0);

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	INIT_LIST_HEAD(&qp->devq);
	spin_lock_irqsave(&sdev->idr_lock, flags);
	list_add_tail(&qp->devq, &sdev->qp_list);
	spin_unlock_irqrestore(&sdev->idr_lock, flags);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp)
		siw_drain_wq(&qp->freeq);

	kfree(qp);
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}
示例#14
0
int siw_query_device(struct ib_device *ofa_dev, struct ib_device_attr *attr, struct ib_udata *udata)
{
	struct siw_dev *sdev = siw_dev_ofa2siw(ofa_dev);
	/*
	 * A process context is needed to report avail memory resources.
	 */
	if (in_interrupt())
		return -EINVAL;

	memset(attr, 0, sizeof *attr);

	attr->max_mr_size = rlimit(RLIMIT_MEMLOCK); /* per process */
	attr->vendor_id = sdev->attrs.vendor_id;
	attr->vendor_part_id = sdev->attrs.vendor_part_id;
	attr->max_qp = sdev->attrs.max_qp;
	attr->max_qp_wr = sdev->attrs.max_qp_wr;

	/*
	 * RDMA Read parameters:
	 * Max. ORD (Outbound Read queue Depth), a.k.a. max_initiator_depth
	 * Max. IRD (Inbound Read queue Depth), a.k.a. max_responder_resources
	 */
	attr->max_qp_rd_atom = sdev->attrs.max_ord;
	attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
	attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
	attr->device_cap_flags = sdev->attrs.cap_flags;
	attr->max_sge = sdev->attrs.max_sge;
	attr->max_sge_rd = sdev->attrs.max_sge_rd;
	attr->max_cq = sdev->attrs.max_cq;
	attr->max_cqe = sdev->attrs.max_cqe;
	attr->max_mr = sdev->attrs.max_mr;
	attr->max_pd = sdev->attrs.max_pd;
	attr->max_mw = sdev->attrs.max_mw;
	attr->max_fmr = sdev->attrs.max_fmr;
	attr->max_srq = sdev->attrs.max_srq;
	attr->max_srq_wr = sdev->attrs.max_srq_wr;
	attr->max_srq_sge = sdev->attrs.max_srq_sge;

	attr->max_ah = 0;
	attr->max_mcast_grp = 0;
	attr->max_mcast_qp_attach = 0;
	attr->max_total_mcast_qp_attach = 0;
	attr->max_ee = 0;
	attr->max_rdd = 0;
	attr->max_ee_rd_atom = 0;
	attr->max_ee_init_rd_atom = 0;
	attr->atomic_cap = IB_ATOMIC_NONE;

	memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);

	/*
	 * TODO: understand what of the following should
	 * get useful information
	 *
	 * attr->fw_ver;
	 * attr->max_map_per_fmr
	 * attr->max_raw_ipv6_qp
	 * attr->max_raw_ethy_qp
	 * attr->max_pkeys
	 * attr->page_size_cap;
	 * attr->hw_ver;
	 * attr->local_ca_ack_delay;
	 */

	udata->outlen = 0;
	return 0;
}
示例#15
0
/*
 * siw_create_cq()
 *
 * Create CQ of requested size on given device.
 *
 * @ofa_dev:	OFA device contained in siw device
 * @size:	maximum number of CQE's allowed.
 * @ib_context: user context.
 * @udata:	used to provide CQ ID back to user.
 */
static struct ib_cq *do_siw_create_cq(struct ib_device *ofa_dev,
				      const struct ib_cq_init_attr *init_attr,
				      struct ib_ucontext *ib_context,
				      struct ib_udata *udata)
{
	struct siw_ucontext		*ctx;
	struct siw_cq			*cq = NULL;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct urdma_uresp_create_cq	uresp;
	int rv;

	if (!ofa_dev) {
		pr_warn("NO OFA device\n");
		rv = -ENODEV;
		goto err_out;
	}
	if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
		pr_debug(": Out of CQ's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (init_attr->cqe < 1) {
		pr_debug(": CQE: %d\n", init_attr->cqe);
		rv = -EINVAL;
		goto err_out;
	}
	cq = kzalloc(sizeof *cq, GFP_KERNEL);
	if (!cq) {
		pr_debug(":  kmalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}
	cq->ofa_cq.cqe = init_attr->cqe;

	if (!ib_context) {
		rv = -EINVAL;
		goto err_out;
	}
	ctx = siw_ctx_ofa2siw(ib_context);

	rv = siw_cq_add(sdev, cq);
	if (rv)
		goto err_out;

	uresp.cq_id = OBJ_ID(cq);

	rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
	if (rv)
		goto err_out_idr;

	return &cq->ofa_cq;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
err_out:
	pr_debug(DBG_OBJ ": CQ creation failed %d", rv);

	kfree(cq);
	atomic_dec(&sdev->num_cq);

	return ERR_PTR(rv);
}
示例#16
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;

	int rv = 0;

	pr_debug(DBG_OBJ DBG_CM ": new QP on device %s\n",
		ofa_dev->name);

	if (!ofa_pd->uobject) {
		pr_debug(": This driver does not support kernel clients\n");
		return ERR_PTR(-EINVAL);
	}

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		pr_debug(": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (attrs->qp_type != IB_QPT_RC) {
		pr_debug(": Only RC QP's supported\n");
		rv = -EINVAL;
		goto err_out;
	}
	if (attrs->srq) {
		pr_debug(": SRQ is not supported\n");
		rv = -EINVAL;
		goto err_out;
	}

	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		pr_debug(DBG_OBJ ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof *qp, GFP_KERNEL);
	if (!qp) {
		pr_debug(": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	init_rwsem(&qp->state_lock);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;
	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		struct urdma_udata_create_qp ureq;
		struct urdma_uresp_create_qp uresp;

		rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
		if (rv)
			goto err_out_idr;
		qp->attrs.irq_size = ureq.ird_max;
		qp->attrs.orq_size = ureq.ord_max;
		qp->attrs.urdma_devid = ureq.urdmad_dev_id;
		qp->attrs.urdma_qp_id = ureq.urdmad_qp_id;
		qp->attrs.urdma_rxq = ureq.rxq;
		qp->attrs.urdma_txq = ureq.txq;

		memset(&uresp, 0, sizeof uresp);
		uresp.kmod_qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp) {
		kfree(qp);
	}
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}