/** * pvrdma_dealloc_pd - deallocate protection domain * @pd: the protection domain to be released * * @return: 0 on success, otherwise errno. */ int pvrdma_dealloc_pd(struct ib_pd *pd) { struct pvrdma_dev *dev = to_vdev(pd->device); union pvrdma_cmd_req req; struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd; int ret; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD; cmd->pd_handle = to_vpd(pd)->pd_handle; ret = pvrdma_cmd_post(dev, &req, NULL, 0); if (ret) dev_warn(&dev->pdev->dev, "could not dealloc protection domain, error: %d\n", ret); kfree(to_vpd(pd)); atomic_dec(&dev->num_pds); return 0; }
/** * pvrdma_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH * @udata: user data blob * * @return: the ib_ah pointer on success, otherwise errno. */ struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, struct ib_udata *udata) { struct pvrdma_dev *dev = to_vdev(pd->device); struct pvrdma_ah *ah; const struct ib_global_route *grh; u8 port_num = rdma_ah_get_port_num(ah_attr); if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) return ERR_PTR(-EINVAL); grh = rdma_ah_read_grh(ah_attr); if ((ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE) || rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw)) return ERR_PTR(-EINVAL); if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah)) return ERR_PTR(-ENOMEM); ah = kzalloc(sizeof(*ah), GFP_KERNEL); if (!ah) { atomic_dec(&dev->num_ahs); return ERR_PTR(-ENOMEM); } ah->av.port_pd = to_vpd(pd)->pd_handle | (port_num << 24); ah->av.src_path_bits = rdma_ah_get_path_bits(ah_attr); ah->av.src_path_bits |= 0x80; ah->av.gid_index = grh->sgid_index; ah->av.hop_limit = grh->hop_limit; ah->av.sl_tclass_flowlabel = (grh->traffic_class << 20) | grh->flow_label; memcpy(ah->av.dgid, grh->dgid.raw, 16); memcpy(ah->av.dmac, ah_attr->roce.dmac, ETH_ALEN); ah->ibah.device = pd->device; ah->ibah.pd = pd; ah->ibah.uobject = NULL; return &ah->ibah; }
/** * pvrdma_create_srq - create shared receive queue * @pd: protection domain * @init_attr: shared receive queue attributes * @udata: user data * * @return: the ib_srq pointer on success, otherwise returns an errno. */ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct pvrdma_srq *srq = NULL; struct pvrdma_dev *dev = to_vdev(pd->device); union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_srq *cmd = &req.create_srq; struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; struct pvrdma_create_srq_resp srq_resp = {0}; struct pvrdma_create_srq ucmd; unsigned long flags; int ret; if (!udata) { /* No support for kernel clients. */ dev_warn(&dev->pdev->dev, "no shared receive queue support for kernel client\n"); return ERR_PTR(-EOPNOTSUPP); } if (init_attr->srq_type != IB_SRQT_BASIC) { dev_warn(&dev->pdev->dev, "shared receive queue type %d not supported\n", init_attr->srq_type); return ERR_PTR(-EINVAL); } if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr || init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) { dev_warn(&dev->pdev->dev, "shared receive queue size invalid\n"); return ERR_PTR(-EINVAL); } if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq)) return ERR_PTR(-ENOMEM); srq = kmalloc(sizeof(*srq), GFP_KERNEL); if (!srq) { ret = -ENOMEM; goto err_srq; } spin_lock_init(&srq->lock); refcount_set(&srq->refcnt, 1); init_completion(&srq->free); dev_dbg(&dev->pdev->dev, "create shared receive queue from user space\n"); if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { ret = -EFAULT; goto err_srq; } srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, ucmd.buf_size, 0, 0); if (IS_ERR(srq->umem)) { ret = PTR_ERR(srq->umem); goto err_srq; } srq->npages = ib_umem_page_count(srq->umem); if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { dev_warn(&dev->pdev->dev, "overflow pages in shared receive queue\n"); ret = -EINVAL; goto err_umem; } ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false); if (ret) { dev_warn(&dev->pdev->dev, "could not allocate page directory\n"); goto err_umem; } pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ; cmd->srq_type = init_attr->srq_type; cmd->nchunks = srq->npages; cmd->pd_handle = to_vpd(pd)->pd_handle; cmd->attrs.max_wr = init_attr->attr.max_wr; cmd->attrs.max_sge = init_attr->attr.max_sge; cmd->attrs.srq_limit = init_attr->attr.srq_limit; cmd->pdir_dma = srq->pdir.dir_dma; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, "could not create shared receive queue, error: %d\n", ret); goto err_page_dir; } srq->srq_handle = resp->srqn; srq_resp.srqn = resp->srqn; spin_lock_irqsave(&dev->srq_tbl_lock, flags); dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); /* Copy udata back. */ if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_srq(&srq->ibsrq); return ERR_PTR(-EINVAL); } return &srq->ibsrq; err_page_dir: pvrdma_page_dir_cleanup(dev, &srq->pdir); err_umem: ib_umem_release(srq->umem); err_srq: kfree(srq); atomic_dec(&dev->num_srqs); return ERR_PTR(ret); }