static void core_mmu_mmap_init(struct tee_mmap_region *mm, size_t max_elem, struct map_area *map) { struct tee_mmap_region mme; size_t n; memset(mm, 0, max_elem * sizeof(struct tee_mmap_region)); for (n = 0; map[n].type != MEM_AREA_NOTYPE; n++) { mme.pa = map[n].pa; mme.va = map[n].va; mme.size = map[n].size; mme.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PR | TEE_MATTR_GLOBAL; if (map[n].device || !map[n].cached) mme.attr |= TEE_MATTR_CACHE_NONCACHE << TEE_MATTR_CACHE_SHIFT; else mme.attr |= TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT; if (map[n].rw) mme.attr |= TEE_MATTR_PW; if (map[n].exec) mme.attr |= TEE_MATTR_PX; if (map[n].secure) mme.attr |= TEE_MATTR_SECURE; insert_mmap(mm, max_elem, &mme); } }
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct c4iw_ucontext *context; struct c4iw_dev *rhp = to_c4iw_dev(ibdev); static int warned; struct c4iw_alloc_ucontext_resp uresp; int ret = 0; struct c4iw_mm_entry *mm = NULL; PDBG("%s ibdev %p\n", __func__, ibdev); context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) { ret = -ENOMEM; goto err; } c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (!warned++) pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled."); rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; } else { mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto err_free; } uresp.status_page_size = PAGE_SIZE; spin_lock(&context->mmap_lock); uresp.status_page_key = context->key; context->key += PAGE_SIZE; spin_unlock(&context->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err_mm; mm->key = uresp.status_page_key; mm->addr = virt_to_phys(rhp->rdev.status_page); mm->len = PAGE_SIZE; insert_mmap(context, mm); } return &context->ibucontext; err_mm: kfree(mm); err_free: kfree(context); err: return ERR_PTR(ret); }
static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext, struct ib_udata *udata) { struct ib_device *ibdev = ucontext->device; struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext); struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_alloc_ucontext_resp uresp; int ret = 0; struct c4iw_mm_entry *mm = NULL; pr_debug("ibdev %p\n", ibdev); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n"); rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; } else { mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto err; } uresp.status_page_size = PAGE_SIZE; spin_lock(&context->mmap_lock); uresp.status_page_key = context->key; context->key += PAGE_SIZE; spin_unlock(&context->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err_mm; mm->key = uresp.status_page_key; mm->addr = virt_to_phys(rhp->rdev.status_page); mm->len = PAGE_SIZE; insert_mmap(context, mm); } return 0; err_mm: kfree(mm); err: return ret; }
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq_resp uresp; struct c4iw_ucontext *ucontext = NULL; int ret; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_c4iw_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) ucontext = to_c4iw_ucontext(ib_context); /* account for the status page. */ entries++; /* IQ needs one extra entry to differentiate full vs empty. */ entries++; /* * entries must be multiple of 16 for HW. */ entries = roundup(entries, 16); /* * Make actual HW queue 2x to avoid cdix_inc overflows. */ hwentries = entries * 2; /* * Make HW queue at least 64 entries so GTS updates aren't too * frequent. */ if (hwentries < 64) hwentries = 64; memsize = hwentries * sizeof *chp->cq.queue; /* * memsize must be a multiple of the page size if its a user cq. */ if (ucontext) { memsize = roundup(memsize, PAGE_SIZE); hwentries = memsize / sizeof *chp->cq.queue; } chp->cq.size = hwentries; chp->cq.memsize = memsize; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); if (ret) goto err1; chp->rhp = rhp; chp->cq.size--; /* status page */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ret) goto err2; if (ucontext) { mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err3; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) goto err4; uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err5; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.ugts; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; err5: kfree(mm2); err4: kfree(mm); err3: remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); err2: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); err1: kfree(chp); return ERR_PTR(ret); }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof (struct t3_cqe)); insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; static int warned; size_t resplen; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); if (attr->flags) return ERR_PTR(-EINVAL); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); kfree(chp); return ERR_PTR(-ENOMEM); } if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); if (udata->outlen < sizeof uresp) { if (!warned++) printk(KERN_WARNING MOD "Warning - " "downlevel libcxgb3 (non-fatal).\n"); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof(struct t3_cqe)); resplen = sizeof(struct iwch_create_cq_resp_v0); } else { mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * sizeof(struct t3_cqe)); uresp.memsize = mm->len; uresp.reserved = 0; resplen = sizeof uresp; } if (ib_copy_to_udata(udata, &uresp, resplen)) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { int entries = attr->cqe; int vector = attr->comp_vector; struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq ucmd; struct c4iw_create_cq_resp uresp; int ret, wr_len; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct c4iw_ucontext, ibucontext); pr_debug("ib_dev %p entries %d\n", ibdev, entries); if (attr->flags) return ERR_PTR(-EINVAL); rhp = to_c4iw_dev(ibdev); if (vector >= rhp->rdev.lldi.nciq) return ERR_PTR(-EINVAL); if (udata) { if (udata->inlen < sizeof(ucmd)) ucontext->is_32b_cqe = 1; } chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!chp->wr_waitp) { ret = -ENOMEM; goto err_free_chp; } c4iw_init_wr_wait(chp->wr_waitp); wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); if (!chp->destroy_skb) { ret = -ENOMEM; goto err_free_wr_wait; } /* account for the status page. */ entries++; /* IQ needs one extra entry to differentiate full vs empty. */ entries++; /* * entries must be multiple of 16 for HW. */ entries = roundup(entries, 16); /* * Make actual HW queue 2x to avoid cdix_inc overflows. */ hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); /* * Make HW queue at least 64 entries so GTS updates aren't too * frequent. */ if (hwentries < 64) hwentries = 64; memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ? (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue)); /* * memsize must be a multiple of the page size if its a user cq. */ if (udata) memsize = roundup(memsize, PAGE_SIZE); chp->cq.size = hwentries; chp->cq.memsize = memsize; chp->cq.vector = vector; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, chp->wr_waitp); if (ret) goto err_free_skb; chp->rhp = rhp; chp->cq.size--; /* status page */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); if (ret) goto err_destroy_cq; if (ucontext) { ret = -ENOMEM; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err_remove_handle; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) goto err_free_mm; memset(&uresp, 0, sizeof(uresp)); uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; /* communicate to the userspace that * kernel driver supports 64B CQE */ uresp.flags |= C4IW_64B_CQE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, ucontext->is_32b_cqe ? sizeof(uresp) - sizeof(uresp.flags) : sizeof(uresp)); if (ret) goto err_free_mm2; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.bar2_pa; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long)chp->cq.dma_addr); return &chp->ibcq; err_free_mm2: kfree(mm2); err_free_mm: kfree(mm); err_remove_handle: xa_erase_irq(&rhp->cqs, chp->cq.cqid); err_destroy_cq: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, chp->destroy_skb, chp->wr_waitp); err_free_skb: kfree_skb(chp->destroy_skb); err_free_wr_wait: c4iw_put_wr_wait(chp->wr_waitp); err_free_chp: kfree(chp); return ERR_PTR(ret); }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); kfree(chp); return ERR_PTR(-ENOMEM); } if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof (struct t3_cqe)); insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; }
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq_resp uresp; struct c4iw_ucontext *ucontext = NULL; int ret; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_c4iw_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) ucontext = to_c4iw_ucontext(ib_context); /* */ entries++; /* */ entries++; /* */ entries = roundup(entries, 16); /* */ hwentries = entries * 2; /* */ if (hwentries < 64) hwentries = 64; memsize = hwentries * sizeof *chp->cq.queue; /* */ if (ucontext) { memsize = roundup(memsize, PAGE_SIZE); hwentries = memsize / sizeof *chp->cq.queue; while (hwentries > T4_MAX_IQ_SIZE) { memsize -= PAGE_SIZE; hwentries = memsize / sizeof *chp->cq.queue; } } chp->cq.size = hwentries; chp->cq.memsize = memsize; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); if (ret) goto err1; chp->rhp = rhp; chp->cq.size--; /* */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ret) goto err2; if (ucontext) { mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err3; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) goto err4; uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err5; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.ugts; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; err5: kfree(mm2); err4: kfree(mm); err3: remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); err2: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); err1: kfree(chp); return ERR_PTR(ret); }