Example #1
0
int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
	struct ehca_shca *shca = container_of(ibqp->pd->device,
					      struct ehca_shca, ib_device);
	union ib_gid my_gid;
	u64 subnet_prefix, interface_id, h_ret;

	if (ibqp->qp_type != IB_QPT_UD) {
		ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
		return -EINVAL;
	}

	if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
		ehca_err(ibqp->device, "invalid mulitcast gid");
		return -EINVAL;
	} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
		ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
		return -EINVAL;
	}

	memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));

	subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
	interface_id = be64_to_cpu(my_gid.global.interface_id);
	h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
				   my_qp->ipz_qp_handle,
				   my_qp->galpas.kernel,
				   lid, subnet_prefix, interface_id);
	if (h_ret != H_SUCCESS)
		ehca_err(ibqp->device,
			 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
			 "h_ret=%li", my_qp, ibqp->qp_num, h_ret);

	return ehca2ib_return_code(h_ret);
}
Example #2
0
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
			     struct ib_qp_init_attr *init_attr,
			     struct ib_udata *udata)
{
	static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
	static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
	struct ehca_qp *my_qp;
	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
					      ib_device);
	struct ib_ucontext *context = NULL;
	u64 h_ret;
	int max_send_sge, max_recv_sge, ret;

	/* h_call's out parameters */
	struct ehca_alloc_qp_parms parms;
	u32 swqe_size = 0, rwqe_size = 0;
	u8 daqp_completion, isdaqp;
	unsigned long flags;

	if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
		init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
		ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
			 init_attr->sq_sig_type);
		return ERR_PTR(-EINVAL);
	}

	/* save daqp completion bits */
	daqp_completion = init_attr->qp_type & 0x60;
	/* save daqp bit */
	isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
	init_attr->qp_type = init_attr->qp_type & 0x1F;

	if (init_attr->qp_type != IB_QPT_UD &&
	    init_attr->qp_type != IB_QPT_SMI &&
	    init_attr->qp_type != IB_QPT_GSI &&
	    init_attr->qp_type != IB_QPT_UC &&
	    init_attr->qp_type != IB_QPT_RC) {
		ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
		return ERR_PTR(-EINVAL);
	}
	if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
	    && isdaqp) {
		ehca_err(pd->device, "unsupported LL QP Type=%x",
			 init_attr->qp_type);
		return ERR_PTR(-EINVAL);
	} else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
		   (init_attr->cap.max_send_wr > 255 ||
		    init_attr->cap.max_recv_wr > 255 )) {
		       ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
				"or max_rq_wr=%x for QP Type=%x",
				init_attr->cap.max_send_wr,
				init_attr->cap.max_recv_wr,init_attr->qp_type);
		       return ERR_PTR(-EINVAL);
	} else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
		  init_attr->cap.max_send_wr > 255) {
		ehca_err(pd->device,
			 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
			 init_attr->cap.max_send_wr, init_attr->qp_type);
		return ERR_PTR(-EINVAL);
	}

	if (pd->uobject && udata)
		context = pd->uobject->context;

	my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
	if (!my_qp) {
		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
		return ERR_PTR(-ENOMEM);
	}

	memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
	spin_lock_init(&my_qp->spinlock_s);
	spin_lock_init(&my_qp->spinlock_r);

	my_qp->recv_cq =
		container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
	my_qp->send_cq =
		container_of(init_attr->send_cq, struct ehca_cq, ib_cq);

	my_qp->init_attr = *init_attr;

	do {
		if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
			ret = -ENOMEM;
			ehca_err(pd->device, "Can't reserve idr resources.");
			goto create_qp_exit0;
		}

		spin_lock_irqsave(&ehca_qp_idr_lock, flags);
		ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
		spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);

	} while (ret == -EAGAIN);

	if (ret) {
		ret = -ENOMEM;
		ehca_err(pd->device, "Can't allocate new idr entry.");
		goto create_qp_exit0;
	}

	parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
	if (parms.servicetype < 0) {
		ret = -EINVAL;
		ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
		goto create_qp_exit0;
	}

	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
		parms.sigtype = HCALL_SIGT_EVERY;
	else
		parms.sigtype = HCALL_SIGT_BY_WQE;

	/* UD_AV CIRCUMVENTION */
	max_send_sge = init_attr->cap.max_send_sge;
	max_recv_sge = init_attr->cap.max_recv_sge;
	if (IB_QPT_UD == init_attr->qp_type ||
	    IB_QPT_GSI == init_attr->qp_type ||
	    IB_QPT_SMI == init_attr->qp_type) {
		max_send_sge += 2;
		max_recv_sge += 2;
	}

	parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
	parms.daqp_ctrl = isdaqp | daqp_completion;
	parms.pd = my_pd->fw_pd;
	parms.max_recv_sge = max_recv_sge;
	parms.max_send_sge = max_send_sge;

	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);

	if (h_ret != H_SUCCESS) {
		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
			 h_ret);
		ret = ehca2ib_return_code(h_ret);
		goto create_qp_exit1;
	}

	my_qp->ib_qp.qp_num = my_qp->real_qp_num;

	switch (init_attr->qp_type) {
	case IB_QPT_RC:
	        if (isdaqp == 0) {
			swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
					     (parms.act_nr_send_sges)]);
			rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
					     (parms.act_nr_recv_sges)]);
		} else { /* for daqp we need to use msg size, not wqe size */
Example #3
0
/*
 * init_qp_queues initializes/constructs r/squeue and registers queue pages.
 */
static inline int init_qp_queues(struct ehca_shca *shca,
				 struct ehca_qp *my_qp,
				 int nr_sq_pages,
				 int nr_rq_pages,
				 int swqe_size,
				 int rwqe_size,
				 int nr_send_sges, int nr_receive_sges)
{
	int ret, cnt, ipz_rc;
	void *vpage;
	u64 rpage, h_ret;
	struct ib_device *ib_dev = &shca->ib_device;
	struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;

	ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
				nr_sq_pages,
				EHCA_PAGESIZE, swqe_size, nr_send_sges);
	if (!ipz_rc) {
		ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
			 ipz_rc);
		return -EBUSY;
	}

	ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
				nr_rq_pages,
				EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
	if (!ipz_rc) {
		ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
			 ipz_rc);
		ret = -EBUSY;
		goto init_qp_queues0;
	}
	/* register SQ pages */
	for (cnt = 0; cnt < nr_sq_pages; cnt++) {
		vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
		if (!vpage) {
			ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
				 "failed p_vpage= %p", vpage);
			ret = -EINVAL;
			goto init_qp_queues1;
		}
		rpage = virt_to_abs(vpage);

		h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
						 my_qp->ipz_qp_handle,
						 &my_qp->pf, 0, 0,
						 rpage, 1,
						 my_qp->galpas.kernel);
		if (h_ret < H_SUCCESS) {
			ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
				 " failed rc=%lx", h_ret);
			ret = ehca2ib_return_code(h_ret);
			goto init_qp_queues1;
		}
	}

	ipz_qeit_reset(&my_qp->ipz_squeue);

	/* register RQ pages */
	for (cnt = 0; cnt < nr_rq_pages; cnt++) {
		vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
		if (!vpage) {
			ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
				 "failed p_vpage = %p", vpage);
			ret = -EINVAL;
			goto init_qp_queues1;
		}

		rpage = virt_to_abs(vpage);

		h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
						 my_qp->ipz_qp_handle,
						 &my_qp->pf, 0, 1,
						 rpage, 1,my_qp->galpas.kernel);
		if (h_ret < H_SUCCESS) {
			ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
				 "rc=%lx", h_ret);
			ret = ehca2ib_return_code(h_ret);
			goto init_qp_queues1;
		}
		if (cnt == (nr_rq_pages - 1)) {	/* last page! */
			if (h_ret != H_SUCCESS) {
				ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
					 "h_ret= %lx ", h_ret);
				ret = ehca2ib_return_code(h_ret);
				goto init_qp_queues1;
			}
			vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
			if (vpage) {
				ehca_err(ib_dev, "ipz_qpageit_get_inc() "
					 "should not succeed vpage=%p", vpage);
				ret = -EINVAL;
				goto init_qp_queues1;
			}
		} else {
			if (h_ret != H_PAGE_REGISTERED) {
				ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
					 "h_ret= %lx ", h_ret);
				ret = ehca2ib_return_code(h_ret);
				goto init_qp_queues1;
			}
		}
	}

	ipz_qeit_reset(&my_qp->ipz_rqueue);

	return 0;

init_qp_queues1:
	ipz_queue_dtor(&my_qp->ipz_rqueue);
init_qp_queues0:
	ipz_queue_dtor(&my_qp->ipz_squeue);
	return ret;
}