/** * pvrdma_modify_srq - modify shared receive queue attributes * @ibsrq: the shared receive queue to modify * @attr: the shared receive queue's new attributes * @attr_mask: attributes mask * @udata: user data * * @returns 0 on success, otherwise returns an errno. */ int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct pvrdma_srq *vsrq = to_vsrq(ibsrq); union pvrdma_cmd_req req; struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq; struct pvrdma_dev *dev = to_vdev(ibsrq->device); int ret; /* Only support SRQ limit. */ if (!(attr_mask & IB_SRQ_LIMIT)) return -EINVAL; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ; cmd->srq_handle = vsrq->srq_handle; cmd->attrs.srq_limit = attr->srq_limit; cmd->attr_mask = attr_mask; ret = pvrdma_cmd_post(dev, &req, NULL, 0); if (ret < 0) { dev_warn(&dev->pdev->dev, "could not modify shared receive queue, error: %d\n", ret); return -EINVAL; } return ret; }
/** * pvrdma_query_srq - query shared receive queue * @ibsrq: the shared receive queue to query * @srq_attr: attributes to query and return to client * * @return: 0 for success, otherwise returns an errno. */ int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { struct pvrdma_dev *dev = to_vdev(ibsrq->device); struct pvrdma_srq *srq = to_vsrq(ibsrq); union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_query_srq *cmd = &req.query_srq; struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp; int ret; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ; cmd->srq_handle = srq->srq_handle; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, "could not query shared receive queue, error: %d\n", ret); return -EINVAL; } srq_attr->srq_limit = resp->attrs.srq_limit; srq_attr->max_wr = resp->attrs.max_wr; srq_attr->max_sge = resp->attrs.max_sge; return 0; }
/** * pvrdma_query_pkey - query device port's P_Key table * @ibdev: the device to query * @port: the port number * @index: the index * @pkey: the device P_Key value * * @return: 0 on success, otherwise negative errno */ int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { int err = 0; union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY; cmd->port_num = port; cmd->index = index; err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp, PVRDMA_CMD_QUERY_PKEY_RESP); if (err < 0) { dev_warn(&to_vdev(ibdev)->pdev->dev, "could not query pkey, error: %d\n", err); return err; } *pkey = rsp.query_pkey_resp.pkey; return 0; }
/** * pvrdma_alloc_pd - allocate protection domain * @ibdev: the IB device * @context: user context * @udata: user data * * @return: the ib_pd protection domain pointer on success, otherwise errno. */ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct pvrdma_pd *pd; struct pvrdma_dev *dev = to_vdev(ibdev); union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; void *ptr; /* Check allowed max pds */ if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) return ERR_PTR(-ENOMEM); pd = kmalloc(sizeof(*pd), GFP_KERNEL); if (!pd) { ptr = ERR_PTR(-ENOMEM); goto err; } memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, "failed to allocate protection domain, error: %d\n", ret); ptr = ERR_PTR(ret); goto freepd; } pd->privileged = !context; pd->pd_handle = resp->pd_handle; pd->pdn = resp->pd_handle; pd_resp.pdn = resp->pd_handle; if (context) { if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); pvrdma_dealloc_pd(&pd->ibpd); return ERR_PTR(-EFAULT); } } /* u32 pd handle */ return &pd->ibpd; freepd: kfree(pd); err: atomic_dec(&dev->num_pds); return ptr; }
/** * pvrdma_alloc_ucontext - allocate ucontext * @ibdev: the IB device * @udata: user data * * @return: the ib_ucontext pointer on success, otherwise errno. */ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct pvrdma_dev *vdev = to_vdev(ibdev); struct pvrdma_ucontext *context; union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_uc *cmd = &req.create_uc; struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; struct pvrdma_alloc_ucontext_resp uresp = {0}; int ret; void *ptr; if (!vdev->ib_active) return ERR_PTR(-EAGAIN); context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); context->dev = vdev; ret = pvrdma_uar_alloc(vdev, &context->uar); if (ret) { kfree(context); return ERR_PTR(-ENOMEM); } /* get ctx_handle from host */ memset(cmd, 0, sizeof(*cmd)); cmd->pfn = context->uar.pfn; cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC; ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP); if (ret < 0) { dev_warn(&vdev->pdev->dev, "could not create ucontext, error: %d\n", ret); ptr = ERR_PTR(ret); goto err; } context->ctx_handle = resp->ctx_handle; /* copy back to user */ uresp.qp_tab_size = vdev->dsr->caps.max_qp; ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (ret) { pvrdma_uar_free(vdev, &context->uar); context->ibucontext.device = ibdev; pvrdma_dealloc_ucontext(&context->ibucontext); return ERR_PTR(-EFAULT); } return &context->ibucontext; err: pvrdma_uar_free(vdev, &context->uar); kfree(context); return ptr; }
/** * pvrdma_query_port - query device port attributes * @ibdev: the device to query * @port: the port number * @props: the device properties * * @return: 0 on success, otherwise negative errno */ int pvrdma_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct pvrdma_dev *dev = to_vdev(ibdev); union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_query_port *cmd = &req.query_port; struct pvrdma_cmd_query_port_resp *resp = &rsp.query_port_resp; int err; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT; cmd->port_num = port; err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP); if (err < 0) { dev_warn(&dev->pdev->dev, "could not query port, error: %d\n", err); return err; } /* props being zeroed by the caller, avoid zeroing it here */ props->state = pvrdma_port_state_to_ib(resp->attrs.state); props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu); props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu); props->gid_tbl_len = resp->attrs.gid_tbl_len; props->port_cap_flags = pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags); props->port_cap_flags |= IB_PORT_CM_SUP; props->ip_gids = true; props->max_msg_sz = resp->attrs.max_msg_sz; props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr; props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr; props->pkey_tbl_len = resp->attrs.pkey_tbl_len; props->lid = resp->attrs.lid; props->sm_lid = resp->attrs.sm_lid; props->lmc = resp->attrs.lmc; props->max_vl_num = resp->attrs.max_vl_num; props->sm_sl = resp->attrs.sm_sl; props->subnet_timeout = resp->attrs.subnet_timeout; props->init_type_reply = resp->attrs.init_type_reply; props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width); props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed); props->phys_state = resp->attrs.phys_state; return 0; }
/** * pvrdma_destroy_srq - destroy shared receive queue * @srq: the shared receive queue to destroy * * @return: 0 for success. */ int pvrdma_destroy_srq(struct ib_srq *srq) { struct pvrdma_srq *vsrq = to_vsrq(srq); union pvrdma_cmd_req req; struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq; struct pvrdma_dev *dev = to_vdev(srq->device); int ret; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ; cmd->srq_handle = vsrq->srq_handle; ret = pvrdma_cmd_post(dev, &req, NULL, 0); if (ret < 0) dev_warn(&dev->pdev->dev, "destroy shared receive queue failed, error: %d\n", ret); pvrdma_free_srq(dev, vsrq); return 0; }
/** * pvrdma_dealloc_pd - deallocate protection domain * @pd: the protection domain to be released * * @return: 0 on success, otherwise errno. */ int pvrdma_dealloc_pd(struct ib_pd *pd) { struct pvrdma_dev *dev = to_vdev(pd->device); union pvrdma_cmd_req req; struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd; int ret; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD; cmd->pd_handle = to_vpd(pd)->pd_handle; ret = pvrdma_cmd_post(dev, &req, NULL, 0); if (ret) dev_warn(&dev->pdev->dev, "could not dealloc protection domain, error: %d\n", ret); kfree(to_vpd(pd)); atomic_dec(&dev->num_pds); return 0; }
/** * pvrdma_dealloc_ucontext - deallocate ucontext * @ibcontext: the ucontext * * @return: 0 on success, otherwise errno. */ int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct pvrdma_ucontext *context = to_vucontext(ibcontext); union pvrdma_cmd_req req; struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc; int ret; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC; cmd->ctx_handle = context->ctx_handle; ret = pvrdma_cmd_post(context->dev, &req, NULL, 0); if (ret < 0) dev_warn(&context->dev->pdev->dev, "destroy ucontext failed, error: %d\n", ret); /* Free the UAR even if the device command failed */ pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar); kfree(context); return ret; }
/** * pvrdma_create_srq - create shared receive queue * @pd: protection domain * @init_attr: shared receive queue attributes * @udata: user data * * @return: the ib_srq pointer on success, otherwise returns an errno. */ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct pvrdma_srq *srq = NULL; struct pvrdma_dev *dev = to_vdev(pd->device); union pvrdma_cmd_req req; union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_srq *cmd = &req.create_srq; struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; struct pvrdma_create_srq_resp srq_resp = {0}; struct pvrdma_create_srq ucmd; unsigned long flags; int ret; if (!udata) { /* No support for kernel clients. */ dev_warn(&dev->pdev->dev, "no shared receive queue support for kernel client\n"); return ERR_PTR(-EOPNOTSUPP); } if (init_attr->srq_type != IB_SRQT_BASIC) { dev_warn(&dev->pdev->dev, "shared receive queue type %d not supported\n", init_attr->srq_type); return ERR_PTR(-EINVAL); } if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr || init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) { dev_warn(&dev->pdev->dev, "shared receive queue size invalid\n"); return ERR_PTR(-EINVAL); } if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq)) return ERR_PTR(-ENOMEM); srq = kmalloc(sizeof(*srq), GFP_KERNEL); if (!srq) { ret = -ENOMEM; goto err_srq; } spin_lock_init(&srq->lock); refcount_set(&srq->refcnt, 1); init_completion(&srq->free); dev_dbg(&dev->pdev->dev, "create shared receive queue from user space\n"); if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { ret = -EFAULT; goto err_srq; } srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, ucmd.buf_size, 0, 0); if (IS_ERR(srq->umem)) { ret = PTR_ERR(srq->umem); goto err_srq; } srq->npages = ib_umem_page_count(srq->umem); if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { dev_warn(&dev->pdev->dev, "overflow pages in shared receive queue\n"); ret = -EINVAL; goto err_umem; } ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false); if (ret) { dev_warn(&dev->pdev->dev, "could not allocate page directory\n"); goto err_umem; } pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); memset(cmd, 0, sizeof(*cmd)); cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ; cmd->srq_type = init_attr->srq_type; cmd->nchunks = srq->npages; cmd->pd_handle = to_vpd(pd)->pd_handle; cmd->attrs.max_wr = init_attr->attr.max_wr; cmd->attrs.max_sge = init_attr->attr.max_sge; cmd->attrs.srq_limit = init_attr->attr.srq_limit; cmd->pdir_dma = srq->pdir.dir_dma; ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP); if (ret < 0) { dev_warn(&dev->pdev->dev, "could not create shared receive queue, error: %d\n", ret); goto err_page_dir; } srq->srq_handle = resp->srqn; srq_resp.srqn = resp->srqn; spin_lock_irqsave(&dev->srq_tbl_lock, flags); dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); /* Copy udata back. */ if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_srq(&srq->ibsrq); return ERR_PTR(-EINVAL); } return &srq->ibsrq; err_page_dir: pvrdma_page_dir_cleanup(dev, &srq->pdir); err_umem: ib_umem_release(srq->umem); err_srq: kfree(srq); atomic_dec(&dev->num_srqs); return ERR_PTR(ret); }