Beispiel #1
0
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
				  struct ib_srq_init_attr *init_attr,
				  struct ib_udata *udata)
{
	struct mlx4_ib_dev *dev = to_mdev(pd->device);
	struct mlx4_ib_srq *srq;
	struct mlx4_wqe_srq_next_seg *next;
	int desc_size;
	int buf_size;
	int err;
	int i;

	/* Sanity check SRQ size before proceeding */
	if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
	    init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge)
		return ERR_PTR(-EINVAL);

	srq = kmalloc(sizeof *srq, GFP_KERNEL);
	if (!srq)
		return ERR_PTR(-ENOMEM);

	mutex_init(&srq->mutex);
	spin_lock_init(&srq->lock);
	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
	srq->msrq.max_gs = init_attr->attr.max_sge;

	desc_size = max(32UL,
			roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
					   srq->msrq.max_gs *
					   sizeof (struct mlx4_wqe_data_seg)));
	srq->msrq.wqe_shift = ilog2(desc_size);

	buf_size = srq->msrq.max * desc_size;

	if (pd->uobject) {
		struct mlx4_ib_create_srq ucmd;

		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
			err = -EFAULT;
			goto err_srq;
		}

		srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
					buf_size, 0, 0);
		if (IS_ERR(srq->umem)) {
			err = PTR_ERR(srq->umem);
			goto err_srq;
		}

		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
				    ilog2(srq->umem->page_size), &srq->mtt);
		if (err)
			goto err_buf;

		err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
		if (err)
			goto err_mtt;

		err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
					  ucmd.db_addr, &srq->db);
		if (err)
			goto err_mtt;
	} else {
		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
		if (err)
			goto err_srq;

		*srq->db.db = 0;

		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
			err = -ENOMEM;
			goto err_db;
		}

		srq->head    = 0;
		srq->tail    = srq->msrq.max - 1;
		srq->wqe_ctr = 0;

		for (i = 0; i < srq->msrq.max; ++i) {
			next = get_wqe(srq, i);
			next->next_wqe_index =
				cpu_to_be16((i + 1) & (srq->msrq.max - 1));
		}

		err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
				    &srq->mtt);
		if (err)
			goto err_buf;

		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
		if (err)
			goto err_mtt;

		srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
		if (!srq->wrid) {
			err = -ENOMEM;
			goto err_mtt;
		}
	}

	err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
			     srq->db.dma, &srq->msrq);
	if (err)
		goto err_wrid;

	srq->msrq.event = mlx4_ib_srq_event;

	if (pd->uobject)
		if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
			err = -EFAULT;
			goto err_wrid;
		}

	init_attr->attr.max_wr = srq->msrq.max - 1;

	return &srq->ibsrq;

err_wrid:
	if (pd->uobject)
		mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
	else
		kfree(srq->wrid);

err_mtt:
	mlx4_mtt_cleanup(dev->dev, &srq->mtt);

err_buf:
	if (pd->uobject)
		ib_umem_release(srq->umem);
	else
		mlx4_buf_free(dev->dev, buf_size, &srq->buf);

err_db:
	if (!pd->uobject)
		mlx4_db_free(dev->dev, &srq->db);

err_srq:
	kfree(srq);

	return ERR_PTR(err);
}
Beispiel #2
0
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
			   struct mlx5_srq_attr *in,
			   struct ib_udata *udata, int buf_size)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_create_srq ucmd = {};
	size_t ucmdlen;
	int err;
	int npages;
	int page_shift;
	int ncont;
	u32 offset;
	u32 uidx = MLX5_IB_DEFAULT_UIDX;

	ucmdlen = min(udata->inlen, sizeof(ucmd));

	if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
		mlx5_ib_dbg(dev, "failed copy udata\n");
		return -EFAULT;
	}

	if (ucmd.reserved0 || ucmd.reserved1)
		return -EINVAL;

	if (udata->inlen > sizeof(ucmd) &&
	    !ib_is_udata_cleared(udata, sizeof(ucmd),
				 udata->inlen - sizeof(ucmd)))
		return -EINVAL;

	if (in->type == IB_SRQT_XRC) {
		err = get_srq_user_index(to_mucontext(pd->uobject->context),
					 &ucmd, udata->inlen, &uidx);
		if (err)
			return err;
	}

	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);

	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
				0, 0);
	if (IS_ERR(srq->umem)) {
		mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
		err = PTR_ERR(srq->umem);
		return err;
	}

	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
			   &page_shift, &ncont, NULL);
	err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
				     &offset);
	if (err) {
		mlx5_ib_warn(dev, "bad offset\n");
		goto err_umem;
	}

	in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
	if (!in->pas) {
		err = -ENOMEM;
		goto err_umem;
	}

	mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);

	err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
				  ucmd.db_addr, &srq->db);
	if (err) {
		mlx5_ib_dbg(dev, "map doorbell failed\n");
		goto err_in;
	}

	in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
	in->page_offset = offset;
	if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
	    in->type == IB_SRQT_XRC)
		in->user_index = uidx;

	return 0;

err_in:
	kvfree(in->pas);

err_umem:
	ib_umem_release(srq->umem);

	return err;
}
Beispiel #3
0
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
			   struct mlx5_create_srq_mbox_in **in,
			   struct ib_udata *udata, int buf_size, int *inlen)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_create_srq ucmd;
	size_t ucmdlen;
	int err;
	int npages;
	int page_shift;
	int ncont;
	u32 offset;

	ucmdlen =
		(udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
		 sizeof(ucmd)) ? (sizeof(ucmd) -
				  sizeof(ucmd.reserved)) : sizeof(ucmd);

	if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
		mlx5_ib_dbg(dev, "failed copy udata\n");
		return -EFAULT;
	}

	if (ucmdlen == sizeof(ucmd) &&
	    ucmd.reserved != 0)
		return -EINVAL;

	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);

	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
				0, 0);
	if (IS_ERR(srq->umem)) {
		mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
		err = PTR_ERR(srq->umem);
		return err;
	}

	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
			   &page_shift, &ncont, NULL);
	err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
				     &offset);
	if (err) {
		mlx5_ib_warn(dev, "bad offset\n");
		goto err_umem;
	}

	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
	*in = mlx5_vzalloc(*inlen);
	if (!(*in)) {
		err = -ENOMEM;
		goto err_umem;
	}

	mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);

	err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
				  ucmd.db_addr, &srq->db);
	if (err) {
		mlx5_ib_dbg(dev, "map doorbell failed\n");
		goto err_in;
	}

	(*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
	(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);

	return 0;

err_in:
	mlx5_vfree(*in);

err_umem:
	ib_umem_release(srq->umem);

	return err;
}
Beispiel #4
0
static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
{
	mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
	ib_umem_release(srq->umem);
}
Beispiel #5
0
struct ib_srq *mlx4_ib_create_xrc_srq(struct ib_pd *pd,
                                      struct ib_cq *xrc_cq,
                                      struct ib_xrcd *xrcd,
                                      struct ib_srq_init_attr *init_attr,
                                      struct ib_udata *udata)
{
    struct mlx4_ib_dev *dev = to_mdev(pd->device);
    struct mlx4_ib_srq *srq;
    struct mlx4_wqe_srq_next_seg *next;
    u32	cqn;
    u16	xrcdn;
    int desc_size;
    int buf_size;
    int err;
    int i;

    /* Sanity check SRQ size before proceeding */
    if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
            init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge) {
        mlx4_ib_dbg("a size param is out of range. "
                    "max_wr = 0x%x, max_sge = 0x%x",
                    init_attr->attr.max_wr, init_attr->attr.max_sge);
        return ERR_PTR(-EINVAL);
    }

    srq = kzalloc(sizeof *srq, GFP_KERNEL);
    if (!srq)
        return ERR_PTR(-ENOMEM);

    mutex_init(&srq->mutex);
    spin_lock_init(&srq->lock);
    srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
    srq->msrq.max_gs = init_attr->attr.max_sge;

    desc_size = max(32UL,
                    roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
                                       srq->msrq.max_gs *
                                       sizeof (struct mlx4_wqe_data_seg)));
    srq->msrq.wqe_shift = ilog2(desc_size);

    buf_size = srq->msrq.max * desc_size;

    if (pd->uobject) {
        struct mlx4_ib_create_srq ucmd;

        if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
            err = -EFAULT;
            goto err_srq;
        }

        srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
                                buf_size, 0, 0);
        if (IS_ERR(srq->umem)) {
            err = PTR_ERR(srq->umem);
            goto err_srq;
        }

        err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
                            ilog2(srq->umem->page_size), &srq->mtt);
        if (err)
            goto err_buf;

        err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
        if (err)
            goto err_mtt;

        err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
                                  ucmd.db_addr, &srq->db);
        if (err)
            goto err_mtt;
    } else {
        struct mlx4_wqe_data_seg *scatter;

        err = mlx4_db_alloc(dev->dev, &srq->db, 0);
        if (err)
            goto err_srq;

        *srq->db.db = 0;

        if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
            err = -ENOMEM;
            goto err_db;
        }

        srq->head    = 0;
        srq->tail    = srq->msrq.max - 1;
        srq->wqe_ctr = 0;

        for (i = 0; i < srq->msrq.max; ++i) {
            next = get_wqe(srq, i);
            next->next_wqe_index =
                cpu_to_be16((i + 1) & (srq->msrq.max - 1));

            for (scatter = (void *) (next + 1);
                    (void *) scatter < (void *) next + desc_size;
                    ++scatter)
                scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
        }

        err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
                            &srq->mtt);
        if (err)
            goto err_buf;

        err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
        if (err)
            goto err_mtt;

        srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
        if (!srq->wrid) {
            err = -ENOMEM;
            goto err_mtt;
        }
    }

    cqn = xrc_cq ? (u32) (to_mcq(xrc_cq)->mcq.cqn) : 0;
    xrcdn = xrcd ? (u16) (to_mxrcd(xrcd)->xrcdn) :
            (u16) dev->dev->caps.reserved_xrcds;

    err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
                         srq->db.dma, &srq->msrq);
    if (err)
        goto err_wrid;

    srq->msrq.event = mlx4_ib_srq_event;

    if (pd->uobject) {
        if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
            err = -EFAULT;
            goto err_wrid;
        }
    } else
        srq->ibsrq.xrc_srq_num = srq->msrq.srqn;

    init_attr->attr.max_wr = srq->msrq.max - 1;

    return &srq->ibsrq;

err_wrid:
    if (pd->uobject)
        mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
    else
        kfree(srq->wrid);

err_mtt:
    mlx4_mtt_cleanup(dev->dev, &srq->mtt);

err_buf:
    if (pd->uobject)
        ib_umem_release(srq->umem);
    else
        mlx4_buf_free(dev->dev, buf_size, &srq->buf);

err_db:
    if (!pd->uobject)
        mlx4_db_free(dev->dev, &srq->db);

err_srq:
    kfree(srq);

    return ERR_PTR(err);
}