Example #1
0
File: srq.c Project: 710leo/LVS
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
				  struct ib_srq_init_attr *init_attr,
				  struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	struct mlx4_ib_srq *srq;
	struct mlx4_wqe_srq_next_seg *next;
	struct mlx4_wqe_data_seg *scatter;
	int desc_size;
	int buf_size;
	int err;
	int i;

	/* Sanity check SRQ size before proceeding */
	if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
	    init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge)
		return ERR_PTR(-EINVAL);

	srq = kmalloc(sizeof *srq, GFP_KERNEL);
	if (!srq)
		return ERR_PTR(-ENOMEM);

	mutex_init(&srq->mutex);
	spin_lock_init(&srq->lock);
	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
	srq->msrq.max_gs = init_attr->attr.max_sge;

	desc_size = max(32UL,
			roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
					   srq->msrq.max_gs *
					   sizeof (struct mlx4_wqe_data_seg)));
	srq->msrq.wqe_shift = ilog2(desc_size);

	buf_size = srq->msrq.max * desc_size;

	if (pd->uobject) {
		struct mlx4_ib_create_srq ucmd;

		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
			err = -EFAULT;
			goto err_srq;
		}

		srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
					buf_size, 0, 0);
		if (IS_ERR(srq->umem)) {
			err = PTR_ERR(srq->umem);
			goto err_srq;
		}

		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
				    ilog2(srq->umem->page_size), &srq->mtt);
		if (err)
			goto err_buf;

		err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
		if (err)
			goto err_mtt;

		err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
					  ucmd.db_addr, &srq->db);
		if (err)
			goto err_mtt;
	} else {
		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
		if (err)
			goto err_srq;

		*srq->db.db = 0;

		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
			err = -ENOMEM;
			goto err_db;
		}

		srq->head    = 0;
		srq->tail    = srq->msrq.max - 1;
		srq->wqe_ctr = 0;

		for (i = 0; i < srq->msrq.max; ++i) {
			next = get_wqe(srq, i);
			next->next_wqe_index =
				cpu_to_be16((i + 1) & (srq->msrq.max - 1));

			for (scatter = (void *) (next + 1);
			     (void *) scatter < (void *) next + desc_size;
			     ++scatter)
				scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
		}

		err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
				    &srq->mtt);
		if (err)
			goto err_buf;

		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
		if (err)
			goto err_mtt;

		srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
		if (!srq->wrid) {
			err = -ENOMEM;
			goto err_mtt;
		}
	}

	err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
			     srq->db.dma, &srq->msrq);
	if (err)
		goto err_wrid;

	srq->msrq.event = mlx4_ib_srq_event;

	if (pd->uobject)
		if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
			err = -EFAULT;
			goto err_wrid;
		}

	init_attr->attr.max_wr = srq->msrq.max - 1;

	return &srq->ibsrq;

err_wrid:
	if (pd->uobject)
		mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
	else
		kfree(srq->wrid);

err_mtt:
	mlx4_mtt_cleanup(dev->dev, &srq->mtt);

err_buf:
	if (pd->uobject)
		ib_umem_release(srq->umem);
	else
		mlx4_buf_free(dev->dev, buf_size, &srq->buf);

err_db:
	if (!pd->uobject)
		mlx4_db_free(dev->dev, &srq->db);

err_srq:
	kfree(srq);

	return ERR_PTR(err);
}
Example #2
0
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
				    const struct ib_cq_init_attr *attr,
				    struct ib_ucontext *context,
				    struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
	struct device *dev = hr_dev->dev;
	struct hns_roce_ib_create_cq ucmd;
	struct hns_roce_ib_create_cq_resp resp = {};
	struct hns_roce_cq *hr_cq = NULL;
	struct hns_roce_uar *uar = NULL;
	int vector = attr->comp_vector;
	int cq_entries = attr->cqe;
	int ret;

	if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
		dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
			cq_entries, hr_dev->caps.max_cqes);
		return ERR_PTR(-EINVAL);
	}

	hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
	if (!hr_cq)
		return ERR_PTR(-ENOMEM);

	if (hr_dev->caps.min_cqes)
		cq_entries = max(cq_entries, hr_dev->caps.min_cqes);

	cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
	hr_cq->ib_cq.cqe = cq_entries - 1;
	spin_lock_init(&hr_cq->lock);

	if (context) {
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
			dev_err(dev, "Failed to copy_from_udata.\n");
			ret = -EFAULT;
			goto err_cq;
		}

		/* Get user space address, write it into mtt table */
		ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
					      &hr_cq->umem, ucmd.buf_addr,
					      cq_entries);
		if (ret) {
			dev_err(dev, "Failed to get_cq_umem.\n");
			goto err_cq;
		}

		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
		    (udata->outlen >= sizeof(resp))) {
			ret = hns_roce_db_map_user(to_hr_ucontext(context),
						   udata, ucmd.db_addr,
						   &hr_cq->db);
			if (ret) {
				dev_err(dev, "cq record doorbell map failed!\n");
				goto err_mtt;
			}
			hr_cq->db_en = 1;
			resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
		}

		/* Get user space parameters */
		uar = &to_hr_ucontext(context)->uar;
	} else {
		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
			ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
			if (ret)
				goto err_cq;

			hr_cq->set_ci_db = hr_cq->db.db_record;
			*hr_cq->set_ci_db = 0;
			hr_cq->db_en = 1;
		}

		/* Init mmt table and write buff address to mtt table */
		ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
					       cq_entries);
		if (ret) {
			dev_err(dev, "Failed to alloc_cq_buf.\n");
			goto err_db;
		}

		uar = &hr_dev->priv_uar;
		hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
				DB_REG_OFFSET * uar->index;
	}

	/* Allocate cq index, fill cq_context */
	ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
				hr_cq, vector);
	if (ret) {
		dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
		goto err_dbmap;
	}

	/*
	 * For the QP created by kernel space, tptr value should be initialized
	 * to zero; For the QP created by user space, it will cause synchronous
	 * problems if tptr is set to zero here, so we initialze it in user
	 * space.
	 */
	if (!context && hr_cq->tptr_addr)
		*hr_cq->tptr_addr = 0;

	/* Get created cq handler and carry out event */
	hr_cq->comp = hns_roce_ib_cq_comp;
	hr_cq->event = hns_roce_ib_cq_event;
	hr_cq->cq_depth = cq_entries;

	if (context) {
		resp.cqn = hr_cq->cqn;
		ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
		if (ret)
			goto err_cqc;
	}

	return &hr_cq->ib_cq;

err_cqc:
	hns_roce_free_cq(hr_dev, hr_cq);

err_dbmap:
	if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
	    (udata->outlen >= sizeof(resp)))
		hns_roce_db_unmap_user(to_hr_ucontext(context),
				       &hr_cq->db);

err_mtt:
	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
	if (context)
		ib_umem_release(hr_cq->umem);
	else
		hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
					hr_cq->ib_cq.cqe);

err_db:
	if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
		hns_roce_free_db(hr_dev, &hr_cq->db);

err_cq:
	kfree(hr_cq);
	return ERR_PTR(ret);
}
Example #3
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
			     struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof (struct t3_cqe));
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}
Example #4
0
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
						  struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(ibdev);
	struct mlx5_ib_alloc_ucontext_req_v2 req;
	struct mlx5_ib_alloc_ucontext_resp resp;
	struct mlx5_ib_ucontext *context;
	struct mlx5_uuar_info *uuari;
	struct mlx5_uar *uars;
	int gross_uuars;
	int num_uars;
	int ver;
	int uuarn;
	int err;
	int i;
	size_t reqlen;

	if (!dev->ib_active)
		return ERR_PTR(-EAGAIN);

	memset(&req, 0, sizeof(req));
	reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
	if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
		ver = 0;
	else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
		ver = 2;
	else
		return ERR_PTR(-EINVAL);

	err = ib_copy_from_udata(&req, udata, reqlen);
	if (err)
		return ERR_PTR(err);

	if (req.flags || req.reserved)
		return ERR_PTR(-EINVAL);

	if (req.total_num_uuars > MLX5_MAX_UUARS)
		return ERR_PTR(-ENOMEM);

	if (req.total_num_uuars == 0)
		return ERR_PTR(-EINVAL);

	req.total_num_uuars = ALIGN(req.total_num_uuars,
				    MLX5_NON_FP_BF_REGS_PER_PAGE);
	if (req.num_low_latency_uuars > req.total_num_uuars - 1)
		return ERR_PTR(-EINVAL);

	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
	resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
	resp.cache_line_size = L1_CACHE_BYTES;
	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);

	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
		return ERR_PTR(-ENOMEM);

	uuari = &context->uuari;
	mutex_init(&uuari->lock);
	uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
	if (!uars) {
		err = -ENOMEM;
		goto out_ctx;
	}

	uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
				sizeof(*uuari->bitmap),
				GFP_KERNEL);
	if (!uuari->bitmap) {
		err = -ENOMEM;
		goto out_uar_ctx;
	}
	/*
	 * clear all fast path uuars
	 */
	for (i = 0; i < gross_uuars; i++) {
		uuarn = i & 3;
		if (uuarn == 2 || uuarn == 3)
			set_bit(i, uuari->bitmap);
	}

	uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
	if (!uuari->count) {
		err = -ENOMEM;
		goto out_bitmap;
	}

	for (i = 0; i < num_uars; i++) {
		err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
		if (err)
			goto out_count;
	}

#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
	context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif

	INIT_LIST_HEAD(&context->db_page_list);
	mutex_init(&context->db_page_mutex);

	resp.tot_uuars = req.total_num_uuars;
	resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
	err = ib_copy_to_udata(udata, &resp,
			       sizeof(resp) - sizeof(resp.reserved));
	if (err)
		goto out_uars;

	uuari->ver = ver;
	uuari->num_low_latency_uuars = req.num_low_latency_uuars;
	uuari->uars = uars;
	uuari->num_uars = num_uars;
	return &context->ibucontext;

out_uars:
	for (i--; i >= 0; i--)
		mlx5_cmd_free_uar(dev->mdev, uars[i].index);
out_count:
	kfree(uuari->count);

out_bitmap:
	kfree(uuari->bitmap);

out_uar_ctx:
	kfree(uars);

out_ctx:
	kfree(context);
	return ERR_PTR(err);
}
Example #5
0
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
				     struct ib_pd *ib_pd,
				     struct ib_qp_init_attr *init_attr,
				     struct ib_udata *udata, unsigned long sqpn,
				     struct hns_roce_qp *hr_qp)
{
	struct device *dev = hr_dev->dev;
	struct hns_roce_ib_create_qp ucmd;
	unsigned long qpn = 0;
	int ret = 0;
	u32 page_shift;
	u32 npages;
	int i;

	mutex_init(&hr_qp->mutex);
	spin_lock_init(&hr_qp->sq.lock);
	spin_lock_init(&hr_qp->rq.lock);

	hr_qp->state = IB_QPS_RESET;

	hr_qp->ibqp.qp_type = init_attr->qp_type;

	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
	else
		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);

	ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
				   !!init_attr->srq, hr_qp);
	if (ret) {
		dev_err(dev, "hns_roce_set_rq_size failed\n");
		goto err_out;
	}

	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
		/* allocate recv inline buf */
		hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
					       sizeof(struct hns_roce_rinl_wqe),
					       GFP_KERNEL);
		if (!hr_qp->rq_inl_buf.wqe_list) {
			ret = -ENOMEM;
			goto err_out;
		}

		hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;

		/* Firstly, allocate a list of sge space buffer */
		hr_qp->rq_inl_buf.wqe_list[0].sg_list =
					kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
					       init_attr->cap.max_recv_sge *
					       sizeof(struct hns_roce_rinl_sge),
					       GFP_KERNEL);
		if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
			ret = -ENOMEM;
			goto err_wqe_list;
		}

		for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
			/* Secondly, reallocate the buffer */
			hr_qp->rq_inl_buf.wqe_list[i].sg_list =
				&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
				init_attr->cap.max_recv_sge];
	}

	if (ib_pd->uobject) {
		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
			dev_err(dev, "ib_copy_from_udata error for create qp\n");
			ret = -EFAULT;
			goto err_rq_sge_list;
		}

		ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
						&ucmd);
		if (ret) {
			dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
			goto err_rq_sge_list;
		}

		hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
					  ucmd.buf_addr, hr_qp->buff_size, 0,
					  0);
		if (IS_ERR(hr_qp->umem)) {
			dev_err(dev, "ib_umem_get error for create qp\n");
			ret = PTR_ERR(hr_qp->umem);
			goto err_rq_sge_list;
		}

		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
		if (hr_dev->caps.mtt_buf_pg_sz) {
			npages = (ib_umem_page_count(hr_qp->umem) +
				  (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
				  (1 << hr_dev->caps.mtt_buf_pg_sz);
			page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
			ret = hns_roce_mtt_init(hr_dev, npages,
				    page_shift,
				    &hr_qp->mtt);
		} else {
			ret = hns_roce_mtt_init(hr_dev,
				    ib_umem_page_count(hr_qp->umem),
				    hr_qp->umem->page_shift,
				    &hr_qp->mtt);
		}
		if (ret) {
			dev_err(dev, "hns_roce_mtt_init error for create qp\n");
			goto err_buf;
		}

		ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
						 hr_qp->umem);
		if (ret) {
			dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
			goto err_mtt;
		}
	} else {
		if (init_attr->create_flags &
		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
			dev_err(dev, "init_attr->create_flags error!\n");
			ret = -EINVAL;
			goto err_rq_sge_list;
		}

		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
			dev_err(dev, "init_attr->create_flags error!\n");
			ret = -EINVAL;
			goto err_rq_sge_list;
		}

		/* Set SQ size */
		ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
						  hr_qp);
		if (ret) {
			dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
			goto err_rq_sge_list;
		}

		/* QP doorbell register address */
		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
				     DB_REG_OFFSET * hr_dev->priv_uar.index;
		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
				     DB_REG_OFFSET * hr_dev->priv_uar.index;

		/* Allocate QP buf */
		page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
		if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
				       (1 << page_shift) * 2,
				       &hr_qp->hr_buf, page_shift)) {
			dev_err(dev, "hns_roce_buf_alloc error!\n");
			ret = -ENOMEM;
			goto err_rq_sge_list;
		}

		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
		/* Write MTT */
		ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
					hr_qp->hr_buf.page_shift, &hr_qp->mtt);
		if (ret) {
			dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
			goto err_buf;
		}

		ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
					     &hr_qp->hr_buf);
		if (ret) {
			dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
			goto err_mtt;
		}

		hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
					       GFP_KERNEL);
		hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
					       GFP_KERNEL);
		if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
			ret = -ENOMEM;
			goto err_wrid;
		}
	}

	if (sqpn) {
		qpn = sqpn;
	} else {
		/* Get QPN */
		ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
		if (ret) {
			dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
			goto err_wrid;
		}
	}

	if (init_attr->qp_type == IB_QPT_GSI &&
	    hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
		/* In v1 engine, GSI QP context in RoCE engine's register */
		ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
		if (ret) {
			dev_err(dev, "hns_roce_qp_alloc failed!\n");
			goto err_qpn;
		}
	} else {
		ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
		if (ret) {
			dev_err(dev, "hns_roce_qp_alloc failed!\n");
			goto err_qpn;
		}
	}

	if (sqpn)
		hr_qp->doorbell_qpn = 1;
	else
		hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);

	hr_qp->event = hns_roce_ib_qp_event;

	return 0;

err_qpn:
	if (!sqpn)
		hns_roce_release_range_qp(hr_dev, qpn, 1);

err_wrid:
	kfree(hr_qp->sq.wrid);
	kfree(hr_qp->rq.wrid);

err_mtt:
	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);

err_buf:
	if (ib_pd->uobject)
		ib_umem_release(hr_qp->umem);
	else
		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);

err_rq_sge_list:
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);

err_wqe_list:
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
		kfree(hr_qp->rq_inl_buf.wqe_list);

err_out:
	return ret;
}
Example #6
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries,
			     struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;

	PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries);
	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		/*
		 * T3A: Add some fluff to handle extra CQEs inserted
		 * for various errors.
		 * Additional CQE possibilities:
		 *      TERMINATE,
		 *      incoming RDMA WRITE Failures
		 *      incoming RDMA READ REQUEST FAILUREs
		 * NOTE: We cannot ensure the CQ won't overflow.
		 */
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof (struct t3_cqe));
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}
Example #7
0
File: srq.c Project: Cai900205/test
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
			   struct mlx5_create_srq_mbox_in **in,
			   struct ib_udata *udata, int buf_size, int *inlen)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_create_srq ucmd;
	int err;
	int npages;
	int page_shift;
	int ncont;
	u32 offset;

	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
		mlx5_ib_dbg(dev, "failed copy udata\n");
		return -EFAULT;
	}
	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);

	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
				0, 0);
	if (IS_ERR(srq->umem)) {
		mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
		err = PTR_ERR(srq->umem);
		return err;
	}

	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
			   &page_shift, &ncont, NULL);
	err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
				     &offset);
	if (err) {
		mlx5_ib_warn(dev, "bad offset\n");
		goto err_umem;
	}

	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
	*in = mlx5_vzalloc(*inlen);
	if (!(*in)) {
		err = -ENOMEM;
		goto err_umem;
	}

	mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);

	err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
				  ucmd.db_addr, &srq->db);
	if (err) {
		mlx5_ib_dbg(dev, "map doorbell failed\n");
		goto err_in;
	}

	(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);

	return 0;

err_in:
	mlx5_vfree(*in);

err_umem:
	ib_umem_release(srq->umem);

	return err;
}
Example #8
0
File: srq.c Project: acton393/linux
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
			   struct mlx5_srq_attr *in,
			   struct ib_udata *udata, int buf_size)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_create_srq ucmd = {};
	size_t ucmdlen;
	int err;
	int npages;
	int page_shift;
	int ncont;
	u32 offset;
	u32 uidx = MLX5_IB_DEFAULT_UIDX;

	ucmdlen = min(udata->inlen, sizeof(ucmd));

	if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
		mlx5_ib_dbg(dev, "failed copy udata\n");
		return -EFAULT;
	}

	if (ucmd.reserved0 || ucmd.reserved1)
		return -EINVAL;

	if (udata->inlen > sizeof(ucmd) &&
	    !ib_is_udata_cleared(udata, sizeof(ucmd),
				 udata->inlen - sizeof(ucmd)))
		return -EINVAL;

	if (in->type == IB_SRQT_XRC) {
		err = get_srq_user_index(to_mucontext(pd->uobject->context),
					 &ucmd, udata->inlen, &uidx);
		if (err)
			return err;
	}

	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);

	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
				0, 0);
	if (IS_ERR(srq->umem)) {
		mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
		err = PTR_ERR(srq->umem);
		return err;
	}

	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
			   &page_shift, &ncont, NULL);
	err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
				     &offset);
	if (err) {
		mlx5_ib_warn(dev, "bad offset\n");
		goto err_umem;
	}

	in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
	if (!in->pas) {
		err = -ENOMEM;
		goto err_umem;
	}

	mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);

	err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
				  ucmd.db_addr, &srq->db);
	if (err) {
		mlx5_ib_dbg(dev, "map doorbell failed\n");
		goto err_in;
	}

	in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	in->page_offset = offset;
	if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
	    in->type == IB_SRQT_XRC)
		in->user_index = uidx;

	return 0;

err_in:
	kvfree(in->pas);

err_umem:
	ib_umem_release(srq->umem);

	return err;
}
Example #9
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
                                    const struct ib_cq_init_attr *attr,
                                    struct ib_ucontext *ib_context,
                                    struct ib_udata *udata)
{
    int entries = attr->cqe;
    struct iwch_dev *rhp;
    struct iwch_cq *chp;
    struct iwch_create_cq_resp uresp;
    struct iwch_create_cq_req ureq;
    struct iwch_ucontext *ucontext = NULL;
    static int warned;
    size_t resplen;

    PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
    if (attr->flags)
        return ERR_PTR(-EINVAL);

    rhp = to_iwch_dev(ibdev);
    chp = kzalloc(sizeof(*chp), GFP_KERNEL);
    if (!chp)
        return ERR_PTR(-ENOMEM);

    if (ib_context) {
        ucontext = to_iwch_ucontext(ib_context);
        if (!t3a_device(rhp)) {
            if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
                kfree(chp);
                return ERR_PTR(-EFAULT);
            }
            chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
        }
    }

    if (t3a_device(rhp)) {

        /*
         * T3A: Add some fluff to handle extra CQEs inserted
         * for various errors.
         * Additional CQE possibilities:
         *      TERMINATE,
         *      incoming RDMA WRITE Failures
         *      incoming RDMA READ REQUEST FAILUREs
         * NOTE: We cannot ensure the CQ won't overflow.
         */
        entries += 16;
    }
    entries = roundup_pow_of_two(entries);
    chp->cq.size_log2 = ilog2(entries);

    if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
        kfree(chp);
        return ERR_PTR(-ENOMEM);
    }
    chp->rhp = rhp;
    chp->ibcq.cqe = 1 << chp->cq.size_log2;
    spin_lock_init(&chp->lock);
    spin_lock_init(&chp->comp_handler_lock);
    atomic_set(&chp->refcnt, 1);
    init_waitqueue_head(&chp->wait);
    if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
        cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
        kfree(chp);
        return ERR_PTR(-ENOMEM);
    }

    if (ucontext) {
        struct iwch_mm_entry *mm;

        mm = kmalloc(sizeof *mm, GFP_KERNEL);
        if (!mm) {
            iwch_destroy_cq(&chp->ibcq);
            return ERR_PTR(-ENOMEM);
        }
        uresp.cqid = chp->cq.cqid;
        uresp.size_log2 = chp->cq.size_log2;
        spin_lock(&ucontext->mmap_lock);
        uresp.key = ucontext->key;
        ucontext->key += PAGE_SIZE;
        spin_unlock(&ucontext->mmap_lock);
        mm->key = uresp.key;
        mm->addr = virt_to_phys(chp->cq.queue);
        if (udata->outlen < sizeof uresp) {
            if (!warned++)
                printk(KERN_WARNING MOD "Warning - "
                       "downlevel libcxgb3 (non-fatal).\n");
            mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
                                 sizeof(struct t3_cqe));
            resplen = sizeof(struct iwch_create_cq_resp_v0);
        } else {
            mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
                                 sizeof(struct t3_cqe));
            uresp.memsize = mm->len;
            uresp.reserved = 0;
            resplen = sizeof uresp;
        }
        if (ib_copy_to_udata(udata, &uresp, resplen)) {
            kfree(mm);
            iwch_destroy_cq(&chp->ibcq);
            return ERR_PTR(-EFAULT);
        }
        insert_mmap(ucontext, mm);
    }
    PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
         chp->cq.cqid, chp, (1 << chp->cq.size_log2),
         (unsigned long long) chp->cq.dma_addr);
    return &chp->ibcq;
}
Example #10
0
/**
 * ipath_modify_srq - modify a shared receive queue
 * @ibsrq: the SRQ to modify
 * @attr: the new attributes of the SRQ
 * @attr_mask: indicates which attributes to modify
 * @udata: user data for ipathverbs.so
 */
int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		     enum ib_srq_attr_mask attr_mask,
		     struct ib_udata *udata)
{
	struct ipath_srq *srq = to_isrq(ibsrq);
	int ret = 0;

	if (attr_mask & IB_SRQ_MAX_WR) {
		struct ipath_rwq *owq;
		struct ipath_rwq *wq;
		struct ipath_rwqe *p;
		u32 sz, size, n, head, tail;

		/* Check that the requested sizes are below the limits. */
		if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
		    ((attr_mask & IB_SRQ_LIMIT) ?
		     attr->srq_limit : srq->limit) > attr->max_wr) {
			ret = -EINVAL;
			goto bail;
		}

		sz = sizeof(struct ipath_rwqe) +
			srq->rq.max_sge * sizeof(struct ib_sge);
		size = attr->max_wr + 1;
		wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
		if (!wq) {
			ret = -ENOMEM;
			goto bail;
		}

		/*
		 * Return the address of the RWQ as the offset to mmap.
		 * See ipath_mmap() for details.
		 */
		if (udata && udata->inlen >= sizeof(__u64)) {
			__u64 offset_addr;
			__u64 offset = (__u64) wq;

			ret = ib_copy_from_udata(&offset_addr, udata,
						 sizeof(offset_addr));
			if (ret) {
				vfree(wq);
				goto bail;
			}
			udata->outbuf = (void __user *) offset_addr;
			ret = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (ret) {
				vfree(wq);
				goto bail;
			}
		}

		spin_lock_irq(&srq->rq.lock);
		/*
		 * validate head pointer value and compute
		 * the number of remaining WQEs.
		 */
		owq = srq->rq.wq;
		head = owq->head;
		if (head >= srq->rq.size)
			head = 0;
		tail = owq->tail;
		if (tail >= srq->rq.size)
			tail = 0;
		n = head;
		if (n < tail)
			n += srq->rq.size - tail;
		else
			n -= tail;
		if (size <= n) {
			spin_unlock_irq(&srq->rq.lock);
			vfree(wq);
			ret = -EINVAL;
			goto bail;
		}
		n = 0;
		p = wq->wq;
		while (tail != head) {
			struct ipath_rwqe *wqe;
			int i;

			wqe = get_rwqe_ptr(&srq->rq, tail);
			p->wr_id = wqe->wr_id;
			p->num_sge = wqe->num_sge;
			for (i = 0; i < wqe->num_sge; i++)
				p->sg_list[i] = wqe->sg_list[i];
			n++;
			p = (struct ipath_rwqe *)((char *) p + sz);
			if (++tail >= srq->rq.size)
				tail = 0;
		}
		srq->rq.wq = wq;
		srq->rq.size = size;
		wq->head = n;
		wq->tail = 0;
		if (attr_mask & IB_SRQ_LIMIT)
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);

		vfree(owq);

		if (srq->ip) {
			struct ipath_mmap_info *ip = srq->ip;
			struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
			u32 s = sizeof(struct ipath_rwq) + size * sz;

			ipath_update_mmap_info(dev, ip, s, wq);
			spin_lock_irq(&dev->pending_lock);
			if (list_empty(&ip->pending_mmaps))
				list_add(&ip->pending_mmaps,
					 &dev->pending_mmaps);
			spin_unlock_irq(&dev->pending_lock);
		}
	} else if (attr_mask & IB_SRQ_LIMIT) {
		spin_lock_irq(&srq->rq.lock);
		if (attr->srq_limit >= srq->rq.size)
			ret = -EINVAL;
Example #11
0
File: srq.c Project: Anjali05/linux
/**
 * rvt_modify_srq - modify a shared receive queue
 * @ibsrq: the SRQ to modify
 * @attr: the new attributes of the SRQ
 * @attr_mask: indicates which attributes to modify
 * @udata: user data for libibverbs.so
 *
 * Return: 0 on success
 */
int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		   enum ib_srq_attr_mask attr_mask,
		   struct ib_udata *udata)
{
	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
	struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
	struct rvt_rwq *wq;
	int ret = 0;

	if (attr_mask & IB_SRQ_MAX_WR) {
		struct rvt_rwq *owq;
		struct rvt_rwqe *p;
		u32 sz, size, n, head, tail;

		/* Check that the requested sizes are below the limits. */
		if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
		    ((attr_mask & IB_SRQ_LIMIT) ?
		     attr->srq_limit : srq->limit) > attr->max_wr)
			return -EINVAL;

		sz = sizeof(struct rvt_rwqe) +
			srq->rq.max_sge * sizeof(struct ib_sge);
		size = attr->max_wr + 1;
		wq = udata ?
			vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
			vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
				     dev->dparms.node);
		if (!wq)
			return -ENOMEM;

		/* Check that we can write the offset to mmap. */
		if (udata && udata->inlen >= sizeof(__u64)) {
			__u64 offset_addr;
			__u64 offset = 0;

			ret = ib_copy_from_udata(&offset_addr, udata,
						 sizeof(offset_addr));
			if (ret)
				goto bail_free;
			udata->outbuf = (void __user *)
					(unsigned long)offset_addr;
			ret = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (ret)
				goto bail_free;
		}

		spin_lock_irq(&srq->rq.lock);
		/*
		 * validate head and tail pointer values and compute
		 * the number of remaining WQEs.
		 */
		owq = srq->rq.wq;
		head = owq->head;
		tail = owq->tail;
		if (head >= srq->rq.size || tail >= srq->rq.size) {
			ret = -EINVAL;
			goto bail_unlock;
		}
		n = head;
		if (n < tail)
			n += srq->rq.size - tail;
		else
			n -= tail;
		if (size <= n) {
			ret = -EINVAL;
			goto bail_unlock;
		}
		n = 0;
		p = wq->wq;
		while (tail != head) {
			struct rvt_rwqe *wqe;
			int i;

			wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
			p->wr_id = wqe->wr_id;
			p->num_sge = wqe->num_sge;
			for (i = 0; i < wqe->num_sge; i++)
				p->sg_list[i] = wqe->sg_list[i];
			n++;
			p = (struct rvt_rwqe *)((char *)p + sz);
			if (++tail >= srq->rq.size)
				tail = 0;
		}
		srq->rq.wq = wq;
		srq->rq.size = size;
		wq->head = n;
		wq->tail = 0;
		if (attr_mask & IB_SRQ_LIMIT)
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);

		vfree(owq);

		if (srq->ip) {
			struct rvt_mmap_info *ip = srq->ip;
			struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
			u32 s = sizeof(struct rvt_rwq) + size * sz;

			rvt_update_mmap_info(dev, ip, s, wq);

			/*
			 * Return the offset to mmap.
			 * See rvt_mmap() for details.
			 */
			if (udata && udata->inlen >= sizeof(__u64)) {
				ret = ib_copy_to_udata(udata, &ip->offset,
						       sizeof(ip->offset));
				if (ret)
					return ret;
			}

			/*
			 * Put user mapping info onto the pending list
			 * unless it already is on the list.
			 */
			spin_lock_irq(&dev->pending_lock);
			if (list_empty(&ip->pending_mmaps))
				list_add(&ip->pending_mmaps,
					 &dev->pending_mmaps);
			spin_unlock_irq(&dev->pending_lock);
		}
	} else if (attr_mask & IB_SRQ_LIMIT) {
		spin_lock_irq(&srq->rq.lock);
		if (attr->srq_limit >= srq->rq.size)
			ret = -EINVAL;
		else
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);
	}
	return ret;

bail_unlock:
	spin_unlock_irq(&srq->rq.lock);
bail_free:
	vfree(wq);
	return ret;
}
Example #12
0
struct ib_qp *siw_create_qp(struct ib_pd *ofa_pd,
			    struct ib_qp_init_attr *attrs,
			    struct ib_udata *udata)
{
	struct siw_qp			*qp = NULL;
	struct siw_pd			*pd = siw_pd_ofa2siw(ofa_pd);
	struct ib_device		*ofa_dev = ofa_pd->device;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_cq			*scq = NULL, *rcq = NULL;

	int rv = 0;

	pr_debug(DBG_OBJ DBG_CM ": new QP on device %s\n",
		ofa_dev->name);

	if (!ofa_pd->uobject) {
		pr_debug(": This driver does not support kernel clients\n");
		return ERR_PTR(-EINVAL);
	}

	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
		pr_debug(": Out of QP's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (attrs->qp_type != IB_QPT_RC) {
		pr_debug(": Only RC QP's supported\n");
		rv = -EINVAL;
		goto err_out;
	}
	if (attrs->srq) {
		pr_debug(": SRQ is not supported\n");
		rv = -EINVAL;
		goto err_out;
	}

	scq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->send_cq)->hdr.id);
	rcq = siw_cq_id2obj(sdev, ((struct siw_cq *)attrs->recv_cq)->hdr.id);

	if (!scq || !rcq) {
		pr_debug(DBG_OBJ ": Fail: SCQ: 0x%p, RCQ: 0x%p\n",
			scq, rcq);
		rv = -EINVAL;
		goto err_out;
	}
	qp = kzalloc(sizeof *qp, GFP_KERNEL);
	if (!qp) {
		pr_debug(": kzalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}

	init_rwsem(&qp->state_lock);

	rv = siw_qp_add(sdev, qp);
	if (rv)
		goto err_out;

	qp->pd  = pd;
	qp->scq = scq;
	qp->rcq = rcq;
	qp->attrs.state = SIW_QP_STATE_IDLE;

	if (udata) {
		struct urdma_udata_create_qp ureq;
		struct urdma_uresp_create_qp uresp;

		rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
		if (rv)
			goto err_out_idr;
		qp->attrs.irq_size = ureq.ird_max;
		qp->attrs.orq_size = ureq.ord_max;
		qp->attrs.urdma_devid = ureq.urdmad_dev_id;
		qp->attrs.urdma_qp_id = ureq.urdmad_qp_id;
		qp->attrs.urdma_rxq = ureq.rxq;
		qp->attrs.urdma_txq = ureq.txq;

		memset(&uresp, 0, sizeof uresp);
		uresp.kmod_qp_id = QP_ID(qp);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}

	qp->ofa_qp.qp_num = QP_ID(qp);

	siw_pd_get(pd);

	return &qp->ofa_qp;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->qp_idr, &qp->hdr);
err_out:
	if (scq)
		siw_cq_put(scq);
	if (rcq)
		siw_cq_put(rcq);

	if (qp) {
		kfree(qp);
	}
	atomic_dec(&sdev->num_qp);

	return ERR_PTR(rv);
}