static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries, struct ib_udata *udata) { struct mlx4_ib_resize_cq ucmd; int err; if (cq->resize_umem) return -EBUSY; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return -EFAULT; cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); if (!cq->resize_buf) return -ENOMEM; err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, &cq->resize_umem, ucmd.buf_addr, entries); if (err) { kfree(cq->resize_buf); cq->resize_buf = NULL; return err; } cq->resize_buf->cqe = entries - 1; return 0; }
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *context, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_cq *cq; struct mlx4_uar *uar; int err; if (entries < 1 || entries > dev->dev->caps.max_cqes) return ERR_PTR(-EINVAL); cq = kmalloc(sizeof *cq, GFP_KERNEL); if (!cq) return ERR_PTR(-ENOMEM); entries = roundup_pow_of_two(entries + 1); cq->ibcq.cqe = entries - 1; mutex_init(&cq->resize_mutex); spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; if (context) { struct mlx4_ib_create_cq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_cq; } err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, ucmd.buf_addr, entries); if (err) goto err_cq; err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, &cq->db); if (err) goto err_mtt; uar = &to_mucontext(context)->uar; } else { err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; cq->mcq.set_ci_db = cq->db.db; cq->mcq.arm_db = cq->db.db + 1; *cq->mcq.set_ci_db = 0; *cq->mcq.arm_db = 0; err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); if (err) goto err_db; uar = &dev->priv_uar; } err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, &cq->mcq, vector, 0); if (err) goto err_dbmap; cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; if (context) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; goto err_dbmap; } return &cq->ibcq; err_dbmap: if (context) mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); if (context) ib_umem_release(cq->umem); else mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_db: if (!context) mlx4_db_free(dev->dev, &cq->db); err_cq: kfree(cq); return ERR_PTR(err); }
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { int entries = attr->cqe; int vector = attr->comp_vector; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_cq *cq; struct mlx4_uar *uar; void *buf_addr; int err; struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( udata, struct mlx4_ib_ucontext, ibucontext); if (entries < 1 || entries > dev->dev->caps.max_cqes) return ERR_PTR(-EINVAL); if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); if (!cq) return ERR_PTR(-ENOMEM); entries = roundup_pow_of_two(entries + 1); cq->ibcq.cqe = entries - 1; mutex_init(&cq->resize_mutex); spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; cq->create_flags = attr->flags; INIT_LIST_HEAD(&cq->send_qp_list); INIT_LIST_HEAD(&cq->recv_qp_list); if (udata) { struct mlx4_ib_create_cq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_cq; } buf_addr = (void *)(unsigned long)ucmd.buf_addr; err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem, ucmd.buf_addr, entries); if (err) goto err_cq; err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); if (err) goto err_mtt; uar = &context->uar; cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; } else { err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; cq->mcq.set_ci_db = cq->db.db; cq->mcq.arm_db = cq->db.db + 1; *cq->mcq.set_ci_db = 0; *cq->mcq.arm_db = 0; err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); if (err) goto err_db; buf_addr = &cq->buf.buf; uar = &dev->priv_uar; cq->mcq.usage = MLX4_RES_USAGE_DRIVER; } if (dev->eq_table) vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, &cq->mcq, vector, 0, !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION), buf_addr, !!udata); if (err) goto err_dbmap; if (udata) cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; else cq->mcq.comp = mlx4_ib_cq_comp; cq->mcq.event = mlx4_ib_cq_event; if (udata) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; goto err_cq_free; } return &cq->ibcq; err_cq_free: mlx4_cq_free(dev->dev, &cq->mcq); err_dbmap: if (udata) mlx4_ib_db_unmap_user(context, &cq->db); err_mtt: mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); if (udata) ib_umem_release(cq->umem); else mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); err_db: if (!udata) mlx4_db_free(dev->dev, &cq->db); err_cq: kfree(cq); return ERR_PTR(err); }