static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, struct hns_roce_mtt *hr_mtt, struct hns_roce_uar *hr_uar, struct hns_roce_cq *hr_cq, int vector) { struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_hem_table *mtt_table; struct hns_roce_cq_table *cq_table; struct device *dev = hr_dev->dev; dma_addr_t dma_handle; u64 *mtts; int ret; cq_table = &hr_dev->cq_table; /* Get the physical address of cq buf */ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) mtt_table = &hr_dev->mr_table.mtt_cqe_table; else mtt_table = &hr_dev->mr_table.mtt_table; mtts = hns_roce_table_find(hr_dev, mtt_table, hr_mtt->first_seg, &dma_handle); if (!mtts) { dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n"); return -EINVAL; } if (vector >= hr_dev->caps.num_comp_vectors) { dev_err(dev, "CQ alloc.Invalid vector.\n"); return -EINVAL; } hr_cq->vector = vector; ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); if (ret == -1) { dev_err(dev, "CQ alloc.Failed to alloc index.\n"); return -ENOMEM; } /* Get CQC memory HEM(Hardware Entry Memory) table */ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); if (ret) { dev_err(dev, "CQ alloc.Failed to get context mem.\n"); goto err_out; } /* The cq insert radix tree */ spin_lock_irq(&cq_table->lock); /* Radix_tree: The associated pointer and long integer key value like */ ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq); spin_unlock_irq(&cq_table->lock); if (ret) { dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n"); goto err_put; } /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) { ret = PTR_ERR(mailbox); goto err_radix; } hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle, nent, vector); /* Send mailbox to hw */ ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n"); goto err_radix; } hr_cq->cons_index = 0; hr_cq->arm_sn = 1; hr_cq->uar = hr_uar; atomic_set(&hr_cq->refcount, 1); init_completion(&hr_cq->free); return 0; err_radix: spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); err_out: hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); return ret; }
static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct device *dev = hr_dev->dev; int ret; if (!qpn) return -EINVAL; hr_qp->qpn = qpn; /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); if (ret) { dev_err(dev, "QPC table get failed\n"); goto err_out; } /* Alloc memory for IRRL */ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); if (ret) { dev_err(dev, "IRRL table get failed\n"); goto err_put_qp; } if (hr_dev->caps.trrl_entry_sz) { /* Alloc memory for TRRL */ ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, hr_qp->qpn); if (ret) { dev_err(dev, "TRRL table get failed\n"); goto err_put_irrl; } } spin_lock_irq(&qp_table->lock); ret = radix_tree_insert(&hr_dev->qp_table_tree, hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); spin_unlock_irq(&qp_table->lock); if (ret) { dev_err(dev, "QPC radix_tree_insert failed\n"); goto err_put_trrl; } atomic_set(&hr_qp->refcount, 1); init_completion(&hr_qp->free); return 0; err_put_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); err_put_irrl: hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); err_put_qp: hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); err_out: return ret; }