int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); struct mlx4_ib_srq *srq = to_msrq(ibsrq); int ret; /* We don't support resizing SRQs (yet?) */ if (attr_mask & IB_SRQ_MAX_WR) { mlx4_ib_dbg("resize not yet supported"); return -EINVAL; } if (attr_mask & IB_SRQ_LIMIT) { if (attr->srq_limit >= srq->msrq.max) { mlx4_ib_dbg("limit (0x%x) too high", attr->srq_limit); return -EINVAL; } mutex_lock(&srq->mutex); ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit); mutex_unlock(&srq->mutex); if (ret) return ret; } return 0; }
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *context, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_cq *cq; struct mlx4_uar *uar; int err; if (entries < 1 || entries > dev->dev->caps.max_cqes) { mlx4_ib_dbg("invalid num of entries: %d", entries); return ERR_PTR(-EINVAL); } cq = kmalloc(sizeof *cq, GFP_KERNEL); if (!cq) return ERR_PTR(-ENOMEM); entries = roundup_pow_of_two(entries + 1); cq->ibcq.cqe = entries - 1; mutex_init(&cq->resize_mutex); spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; if (context) { struct mlx4_ib_create_cq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_cq; } err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, ucmd.buf_addr, entries); if (err) goto err_cq; err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, &cq->db); if (err) goto err_mtt; uar = &to_mucontext(context)->uar; } else { err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; cq->mcq.set_ci_db = cq->db.db; cq->mcq.arm_db = cq->db.db + 1; *cq->mcq.set_ci_db = 0; *cq->mcq.arm_db = 0; err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); if (err) goto err_db; uar = &dev->priv_uar; } err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, &cq->mcq, vector == IB_CQ_VECTOR_LEAST_ATTACHED ? MLX4_LEAST_ATTACHED_VECTOR : vector, 0); if (err) goto err_dbmap; cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; if (context) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; goto err_dbmap; } return &cq->ibcq; err_dbmap: if (context) mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); if (context) ib_umem_release(cq->umem); else mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_db: if (!context) mlx4_db_free(dev->dev, &cq->db); err_cq: kfree(cq); return ERR_PTR(err); }
struct ib_srq *mlx4_ib_create_xrc_srq(struct ib_pd *pd, struct ib_cq *xrc_cq, struct ib_xrcd *xrcd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_srq *srq; struct mlx4_wqe_srq_next_seg *next; u32 cqn; u16 xrcdn; int desc_size; int buf_size; int err; int i; /* Sanity check SRQ size before proceeding */ if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) { mlx4_ib_dbg("a size param is out of range. " "max_wr = 0x%x, max_sge = 0x%x", init_attr->attr.max_wr, init_attr->attr.max_sge); return ERR_PTR(-EINVAL); } srq = kzalloc(sizeof *srq, GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); mutex_init(&srq->mutex); spin_lock_init(&srq->lock); srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->msrq.max_gs = init_attr->attr.max_sge; desc_size = max(32UL, roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof (struct mlx4_wqe_data_seg))); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; if (pd->uobject) { struct mlx4_ib_create_srq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_srq; } srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 0, 0); if (IS_ERR(srq->umem)) { err = PTR_ERR(srq->umem); goto err_srq; } err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), ilog2(srq->umem->page_size), &srq->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); if (err) goto err_mtt; err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &srq->db); if (err) goto err_mtt; } else { struct mlx4_wqe_data_seg *scatter; err = mlx4_db_alloc(dev->dev, &srq->db, 0); if (err) goto err_srq; *srq->db.db = 0; if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { err = -ENOMEM; goto err_db; } srq->head = 0; srq->tail = srq->msrq.max - 1; srq->wqe_ctr = 0; for (i = 0; i < srq->msrq.max; ++i) { next = get_wqe(srq, i); next->next_wqe_index = cpu_to_be16((i + 1) & (srq->msrq.max - 1)); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + desc_size; ++scatter) scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY); } err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, &srq->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); if (err) goto err_mtt; srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); if (!srq->wrid) { err = -ENOMEM; goto err_mtt; } } cqn = xrc_cq ? (u32) (to_mcq(xrc_cq)->mcq.cqn) : 0; xrcdn = xrcd ? (u16) (to_mxrcd(xrcd)->xrcdn) : (u16) dev->dev->caps.reserved_xrcds; err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, srq->db.dma, &srq->msrq); if (err) goto err_wrid; srq->msrq.event = mlx4_ib_srq_event; if (pd->uobject) { if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { err = -EFAULT; goto err_wrid; } } else srq->ibsrq.xrc_srq_num = srq->msrq.srqn; init_attr->attr.max_wr = srq->msrq.max - 1; return &srq->ibsrq; err_wrid: if (pd->uobject) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else kfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); err_buf: if (pd->uobject) ib_umem_release(srq->umem); else mlx4_buf_free(dev->dev, buf_size, &srq->buf); err_db: if (!pd->uobject) mlx4_db_free(dev->dev, &srq->db); err_srq: kfree(srq); return ERR_PTR(err); }
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mlx4_ib_srq *srq = to_msrq(ibsrq); struct mlx4_wqe_srq_next_seg *next; struct mlx4_wqe_data_seg *scat; unsigned long flags; int err = 0; int nreq; int i; spin_lock_irqsave(&srq->lock, flags); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(wr->num_sge > srq->msrq.max_gs)) { mlx4_ib_dbg("srq num 0x%x: num s/g entries too large (%d)", srq->msrq.srqn, wr->num_sge); err = -EINVAL; *bad_wr = wr; break; } if (unlikely(srq->head == srq->tail)) { mlx4_ib_dbg("srq num 0x%x: No entries available to post.", srq->msrq.srqn); err = -ENOMEM; *bad_wr = wr; break; } srq->wrid[srq->head] = wr->wr_id; next = get_wqe(srq, srq->head); srq->head = be16_to_cpu(next->next_wqe_index); scat = (struct mlx4_wqe_data_seg *) (next + 1); for (i = 0; i < wr->num_sge; ++i) { scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); } if (i < srq->msrq.max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); scat[i].addr = 0; } } if (likely(nreq)) { srq->wqe_ctr += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *srq->db.db = cpu_to_be32(srq->wqe_ctr); } spin_unlock_irqrestore(&srq->lock, flags); return err; }