Ejemplo n.º 1
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
                                    struct ib_ucontext *ib_context,
                                    struct ib_udata *udata)
{
    struct iwch_dev *rhp;
    struct iwch_cq *chp;
    struct iwch_create_cq_resp uresp;
    struct iwch_create_cq_req ureq;
    struct iwch_ucontext *ucontext = NULL;
    static int warned;
    size_t resplen;

    PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
    rhp = to_iwch_dev(ibdev);
    chp = kzalloc(sizeof(*chp), GFP_KERNEL);
    if (!chp)
        return ERR_PTR(-ENOMEM);

    if (ib_context) {
        ucontext = to_iwch_ucontext(ib_context);
        if (!t3a_device(rhp)) {
            if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
                kfree(chp);
                return ERR_PTR(-EFAULT);
            }
            chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
        }
    }

    if (t3a_device(rhp)) {

        /*
         * T3A: Add some fluff to handle extra CQEs inserted
         * for various errors.
         * Additional CQE possibilities:
         *      TERMINATE,
         *      incoming RDMA WRITE Failures
         *      incoming RDMA READ REQUEST FAILUREs
         * NOTE: We cannot ensure the CQ won't overflow.
         */
        entries += 16;
    }
    entries = roundup_pow_of_two(entries);
    chp->cq.size_log2 = ilog2(entries);

    if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
        kfree(chp);
        return ERR_PTR(-ENOMEM);
    }
    chp->rhp = rhp;
    chp->ibcq.cqe = 1 << chp->cq.size_log2;
    spin_lock_init(&chp->lock);
    spin_lock_init(&chp->comp_handler_lock);
    atomic_set(&chp->refcnt, 1);
    init_waitqueue_head(&chp->wait);
    if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
        cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
        kfree(chp);
        return ERR_PTR(-ENOMEM);
    }

    if (ucontext) {
        struct iwch_mm_entry *mm;

        mm = kmalloc(sizeof *mm, GFP_KERNEL);
        if (!mm) {
            iwch_destroy_cq(&chp->ibcq);
            return ERR_PTR(-ENOMEM);
        }
        uresp.cqid = chp->cq.cqid;
        uresp.size_log2 = chp->cq.size_log2;
        spin_lock(&ucontext->mmap_lock);
        uresp.key = ucontext->key;
        ucontext->key += PAGE_SIZE;
        spin_unlock(&ucontext->mmap_lock);
        mm->key = uresp.key;
        mm->addr = virt_to_phys(chp->cq.queue);
        if (udata->outlen < sizeof uresp) {
            if (!warned++)
                printk(KERN_WARNING MOD "Warning - "
                       "downlevel libcxgb3 (non-fatal).\n");
            mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
                                 sizeof(struct t3_cqe));
            resplen = sizeof(struct iwch_create_cq_resp_v0);
        } else {
            mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
                                 sizeof(struct t3_cqe));
            uresp.memsize = mm->len;
            resplen = sizeof uresp;
        }
        if (ib_copy_to_udata(udata, &uresp, resplen)) {
            kfree(mm);
            iwch_destroy_cq(&chp->ibcq);
            return ERR_PTR(-EFAULT);
        }
        insert_mmap(ucontext, mm);
    }
    PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
         chp->cq.cqid, chp, (1 << chp->cq.size_log2),
         (unsigned long long) chp->cq.dma_addr);
    return &chp->ibcq;
}
Ejemplo n.º 2
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
			     struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof (struct t3_cqe));
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}