static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof (struct t3_cqe)); insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }
/* * Get one cq entry from cxio and map it to openib. * * Returns: * 0 cqe returned * -ENOBUFS EMPTY; * -EAGAIN caller must try again * any other neg errno fatal error */ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp, struct ib_wc *wc) { struct iwch_qp *qhp = NULL; struct t3_cqe cqe, *rd_cqe; struct t3_wq *wq; u32 credit = 0; u8 cqe_flushed; u64 cookie; int ret = 1; rd_cqe = cxio_next_cqe(&chp->cq); if (!rd_cqe) return 0; qhp = get_qhp(rhp, CQE_QPID(*rd_cqe)); if (!qhp) wq = NULL; else { mtx_lock(&qhp->lock); wq = &(qhp->wq); } ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); if (t3a_device(chp->rhp) && credit) { CTR3(KTR_IW_CXGB, "%s updating %d cq credits on id %d", __FUNCTION__, credit, chp->cq.cqid); cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); } if (ret) { ret = -EAGAIN; goto out; } ret = 1; wc->wr_id = cookie; wc->qp = &qhp->ibqp; wc->vendor_err = CQE_STATUS(cqe); CTR4(KTR_IW_CXGB, "iwch_poll_cq_one qpid 0x%x type %d opcode %d status 0x%x", CQE_QPID(cqe), CQE_TYPE(cqe), CQE_OPCODE(cqe), CQE_STATUS(cqe)); CTR3(KTR_IW_CXGB, "wrid hi 0x%x lo 0x%x cookie 0x%llx", CQE_WRID_HI(cqe), CQE_WRID_LOW(cqe), (unsigned long long) cookie); if (CQE_TYPE(cqe) == 0) { if (!CQE_STATUS(cqe)) wc->byte_len = CQE_LEN(cqe); else wc->byte_len = 0; wc->opcode = IB_WC_RECV; } else { switch (CQE_OPCODE(cqe)) { case T3_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case T3_READ_REQ: wc->opcode = IB_WC_RDMA_READ; wc->byte_len = CQE_LEN(cqe); break; case T3_SEND: case T3_SEND_WITH_SE: wc->opcode = IB_WC_SEND; break; case T3_BIND_MW: wc->opcode = IB_WC_BIND_MW; break; /* these aren't supported yet */ case T3_SEND_WITH_INV: case T3_SEND_WITH_SE_INV: case T3_LOCAL_INV: case T3_FAST_REGISTER: default: log(LOG_ERR, "Unexpected opcode %d " "in the CQE received for QPID=0x%0x\n", CQE_OPCODE(cqe), CQE_QPID(cqe)); ret = -EINVAL; goto out; } } if (cqe_flushed) wc->status = IB_WC_WR_FLUSH_ERR; else { switch (CQE_STATUS(cqe)) { case TPT_ERR_SUCCESS: wc->status = IB_WC_SUCCESS; break; case TPT_ERR_STAG: wc->status = IB_WC_LOC_ACCESS_ERR; break; case TPT_ERR_PDID: wc->status = IB_WC_LOC_PROT_ERR; break; case TPT_ERR_QPID: case TPT_ERR_ACCESS: wc->status = IB_WC_LOC_ACCESS_ERR; break; case TPT_ERR_WRAP: wc->status = IB_WC_GENERAL_ERR; break; case TPT_ERR_BOUND: wc->status = IB_WC_LOC_LEN_ERR; break; case TPT_ERR_INVALIDATE_SHARED_MR: case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: wc->status = IB_WC_MW_BIND_ERR; break; case TPT_ERR_CRC: case TPT_ERR_MARKER: case TPT_ERR_PDU_LEN_ERR: case TPT_ERR_OUT_OF_RQE: case TPT_ERR_DDP_VERSION: case TPT_ERR_RDMA_VERSION: case TPT_ERR_DDP_QUEUE_NUM: case TPT_ERR_MSN: case TPT_ERR_TBIT: case TPT_ERR_MO: case TPT_ERR_MSN_RANGE: case TPT_ERR_IRD_OVERFLOW: case TPT_ERR_OPCODE: wc->status = IB_WC_FATAL_ERR; break; case TPT_ERR_SWFLUSH: wc->status = IB_WC_WR_FLUSH_ERR; break; default: log(LOG_ERR, "Unexpected cqe_status 0x%x for " "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe)); ret = -EINVAL; } } out: if (wq) mtx_unlock(&qhp->lock); return ret; }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; static int warned; size_t resplen; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); if (attr->flags) return ERR_PTR(-EINVAL); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); kfree(chp); return ERR_PTR(-ENOMEM); } if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); if (udata->outlen < sizeof uresp) { if (!warned++) printk(KERN_WARNING MOD "Warning - " "downlevel libcxgb3 (non-fatal).\n"); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof(struct t3_cqe)); resplen = sizeof(struct iwch_create_cq_resp_v0); } else { mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * sizeof(struct t3_cqe)); uresp.memsize = mm->len; uresp.reserved = 0; resplen = sizeof uresp; } if (ib_copy_to_udata(udata, &uresp, resplen)) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); kfree(chp); return ERR_PTR(-ENOMEM); } if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof (struct t3_cqe)); insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }