static int c4iw_dealloc_ucontext(struct ib_ucontext *context) { struct c4iw_dev *rhp = to_c4iw_dev(context->device); struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_mm_entry *mm, *tmp; PDBG("%s context %p\n", __func__, context); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); kfree(ucontext); return 0; }
static void c4iw_dealloc_ucontext(struct ib_ucontext *context) { struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_dev *rhp; struct c4iw_mm_entry *mm, *tmp; pr_debug("context %p\n", context); rhp = to_c4iw_dev(ucontext->ibucontext.device); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); }
static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext, struct ib_udata *udata) { struct ib_device *ibdev = ucontext->device; struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext); struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_alloc_ucontext_resp uresp; int ret = 0; struct c4iw_mm_entry *mm = NULL; pr_debug("ibdev %p\n", ibdev); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n"); rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; } else { mm = kmalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto err; } uresp.status_page_size = PAGE_SIZE; spin_lock(&context->mmap_lock); uresp.status_page_key = context->key; context->key += PAGE_SIZE; spin_unlock(&context->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err_mm; mm->key = uresp.status_page_key; mm->addr = virt_to_phys(rhp->rdev.status_page); mm->len = PAGE_SIZE; insert_mmap(context, mm); } return 0; err_mm: kfree(mm); err: return ret; }
int c4iw_destroy_cq(struct ib_cq *ib_cq) { struct c4iw_cq *chp; struct c4iw_ucontext *ucontext; PDBG("%s ib_cq %p\n", __func__, ib_cq); chp = to_c4iw_cq(ib_cq); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); atomic_dec(&chp->refcnt); wait_event(chp->wait, !atomic_read(&chp->refcnt)); ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) : NULL; destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); kfree(chp); return 0; }
int c4iw_destroy_cq(struct ib_cq *ib_cq) { struct c4iw_cq *chp; struct c4iw_ucontext *ucontext; pr_debug("ib_cq %p\n", ib_cq); chp = to_c4iw_cq(ib_cq); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); atomic_dec(&chp->refcnt); wait_event(chp->wait, !atomic_read(&chp->refcnt)); ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) : NULL; destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, chp->destroy_skb, chp->wr_waitp); c4iw_put_wr_wait(chp->wr_waitp); kfree(chp); return 0; }
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { int len = vma->vm_end - vma->vm_start; u32 key = vma->vm_pgoff << PAGE_SHIFT; struct c4iw_rdev *rdev; int ret = 0; struct c4iw_mm_entry *mm; struct c4iw_ucontext *ucontext; u64 addr; pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff, key, len); if (vma->vm_start & (PAGE_SIZE-1)) return -EINVAL; rdev = &(to_c4iw_dev(context->device)->rdev); ucontext = to_c4iw_ucontext(context); mm = remove_mmap(ucontext, key, len); if (!mm) return -EINVAL; addr = mm->addr; kfree(mm); if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) && (addr < (pci_resource_start(rdev->lldi.pdev, 0) + pci_resource_len(rdev->lldi.pdev, 0)))) { /* * MA_SYNC register... */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq_resp uresp; struct c4iw_ucontext *ucontext = NULL; int ret; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_c4iw_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) ucontext = to_c4iw_ucontext(ib_context); /* account for the status page. */ entries++; /* IQ needs one extra entry to differentiate full vs empty. */ entries++; /* * entries must be multiple of 16 for HW. */ entries = roundup(entries, 16); /* * Make actual HW queue 2x to avoid cdix_inc overflows. */ hwentries = entries * 2; /* * Make HW queue at least 64 entries so GTS updates aren't too * frequent. */ if (hwentries < 64) hwentries = 64; memsize = hwentries * sizeof *chp->cq.queue; /* * memsize must be a multiple of the page size if its a user cq. */ if (ucontext) { memsize = roundup(memsize, PAGE_SIZE); hwentries = memsize / sizeof *chp->cq.queue; } chp->cq.size = hwentries; chp->cq.memsize = memsize; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); if (ret) goto err1; chp->rhp = rhp; chp->cq.size--; /* status page */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ret) goto err2; if (ucontext) { mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err3; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) goto err4; uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err5; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.ugts; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; err5: kfree(mm2); err4: kfree(mm); err3: remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); err2: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); err1: kfree(chp); return ERR_PTR(ret); }
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq_resp uresp; struct c4iw_ucontext *ucontext = NULL; int ret; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); rhp = to_c4iw_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) ucontext = to_c4iw_ucontext(ib_context); /* */ entries++; /* */ entries++; /* */ entries = roundup(entries, 16); /* */ hwentries = entries * 2; /* */ if (hwentries < 64) hwentries = 64; memsize = hwentries * sizeof *chp->cq.queue; /* */ if (ucontext) { memsize = roundup(memsize, PAGE_SIZE); hwentries = memsize / sizeof *chp->cq.queue; while (hwentries > T4_MAX_IQ_SIZE) { memsize -= PAGE_SIZE; hwentries = memsize / sizeof *chp->cq.queue; } } chp->cq.size = hwentries; chp->cq.memsize = memsize; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); if (ret) goto err1; chp->rhp = rhp; chp->cq.size--; /* */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ret) goto err2; if (ucontext) { mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err3; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) goto err4; uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err5; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.ugts; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; err5: kfree(mm2); err4: kfree(mm); err3: remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); err2: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); err1: kfree(chp); return ERR_PTR(ret); }
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_ucontext *ib_context, struct ib_udata *udata) { int entries = attr->cqe; int vector = attr->comp_vector; struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq_resp uresp; struct c4iw_ucontext *ucontext = NULL; int ret, wr_len; size_t memsize, hwentries; struct c4iw_mm_entry *mm, *mm2; pr_debug("ib_dev %p entries %d\n", ibdev, entries); if (attr->flags) return ERR_PTR(-EINVAL); rhp = to_c4iw_dev(ibdev); if (vector >= rhp->rdev.lldi.nciq) return ERR_PTR(-EINVAL); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); if (!chp->wr_waitp) { ret = -ENOMEM; goto err_free_chp; } c4iw_init_wr_wait(chp->wr_waitp); wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); if (!chp->destroy_skb) { ret = -ENOMEM; goto err_free_wr_wait; } if (ib_context) ucontext = to_c4iw_ucontext(ib_context); /* account for the status page. */ entries++; /* IQ needs one extra entry to differentiate full vs empty. */ entries++; /* * entries must be multiple of 16 for HW. */ entries = roundup(entries, 16); /* * Make actual HW queue 2x to avoid cdix_inc overflows. */ hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); /* * Make HW queue at least 64 entries so GTS updates aren't too * frequent. */ if (hwentries < 64) hwentries = 64; memsize = hwentries * sizeof *chp->cq.queue; /* * memsize must be a multiple of the page size if its a user cq. */ if (ucontext) memsize = roundup(memsize, PAGE_SIZE); chp->cq.size = hwentries; chp->cq.memsize = memsize; chp->cq.vector = vector; ret = create_cq(&rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, chp->wr_waitp); if (ret) goto err_free_skb; chp->rhp = rhp; chp->cq.size--; /* status page */ chp->ibcq.cqe = entries - 2; spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ret) goto err_destroy_cq; if (ucontext) { ret = -ENOMEM; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err_remove_handle; mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) goto err_free_mm; uresp.qid_mask = rhp->rdev.cqmask; uresp.cqid = chp->cq.cqid; uresp.size = chp->cq.size; uresp.memsize = chp->cq.memsize; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err_free_mm2; mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = chp->cq.memsize; insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; mm2->addr = chp->cq.bar2_pa; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, (unsigned long long)chp->cq.dma_addr); return &chp->ibcq; err_free_mm2: kfree(mm2); err_free_mm: kfree(mm); err_remove_handle: remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); err_destroy_cq: destroy_cq(&chp->rhp->rdev, &chp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, chp->destroy_skb, chp->wr_waitp); err_free_skb: kfree_skb(chp->destroy_skb); err_free_wr_wait: c4iw_put_wr_wait(chp->wr_waitp); err_free_chp: kfree(chp); return ERR_PTR(ret); }