Exemplo n.º 1
0
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	enum t3_cq_opcode cq_op;
	int err;
	unsigned long flag;
	u32 rptr;

	chp = to_iwch_cq(ibcq);
	rhp = chp->rhp;
	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
		cq_op = CQ_ARM_SE;
	else
		cq_op = CQ_ARM_AN;
	if (chp->user_rptr_addr) {
		if (get_user(rptr, chp->user_rptr_addr))
			return -EFAULT;
		spin_lock_irqsave(&chp->lock, flag);
		chp->cq.rptr = rptr;
	} else
		spin_lock_irqsave(&chp->lock, flag);
	PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
	err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
	spin_unlock_irqrestore(&chp->lock, flag);
	if (err < 0)
		printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
		       chp->cq.cqid);
	if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
		err = 0;
	return err;
}
Exemplo n.º 2
0
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	unsigned long flags;
	int npolled;
	int err = 0;

	chp = to_iwch_cq(ibcq);
	rhp = chp->rhp;

	spin_lock_irqsave(&chp->lock, flags);
	for (npolled = 0; npolled < num_entries; ++npolled) {
#ifdef DEBUG
		int i=0;
#endif

		
		do {
			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
#ifdef DEBUG
			BUG_ON(++i > 1000);
#endif
		} while (err == -EAGAIN);
		if (err <= 0)
			break;
	}
	spin_unlock_irqrestore(&chp->lock, flags);

	if (err < 0)
		return err;
	else {
		return npolled;
	}
}
Exemplo n.º 3
0
static int iwch_destroy_cq(struct ib_cq *ib_cq)
{
	struct iwch_cq *chp;

	PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq);
	chp = to_iwch_cq(ib_cq);

	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
	atomic_dec(&chp->refcnt);
	wait_event(chp->wait, !atomic_read(&chp->refcnt));

	cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
	kfree(chp);
	return 0;
}
Exemplo n.º 4
0
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	int npolled;
	int err = 0;

	chp = to_iwch_cq(ibcq);
	rhp = chp->rhp;

	mtx_lock(&chp->lock);
	for (npolled = 0; npolled < num_entries; ++npolled) {
#ifdef DEBUG
		int i=0;
#endif

		/*
		 * Because T3 can post CQEs that are _not_ associated
		 * with a WR, we might have to poll again after removing
		 * one of these.
		 */
		do {
			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
#ifdef DEBUG
			PANIC_IF(++i > 1000);
#endif
		} while (err == -EAGAIN);
		if (err <= 0)
			break;
	}
	mtx_unlock(&chp->lock);

	if (err < 0) {
		return err;
	} else {
		return npolled;
	}
}
Exemplo n.º 5
0
static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
#ifdef notyet
	struct iwch_cq *chp = to_iwch_cq(cq);
	struct t3_cq oldcq, newcq;
	int ret;

	PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe);

	/* We don't downsize... */
	if (cqe <= cq->cqe)
		return 0;

	/* create new t3_cq with new size */
	cqe = roundup_pow_of_two(cqe+1);
	newcq.size_log2 = ilog2(cqe);

	/* Dont allow resize to less than the current wce count */
	if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
		return -ENOMEM;
	}

	/* Quiesce all QPs using this CQ */
	ret = iwch_quiesce_qps(chp);
	if (ret) {
		return ret;
	}

	ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
	if (ret) {
		return ret;
	}

	/* copy CQEs */
	memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
				        sizeof(struct t3_cqe));

	/* old iwch_qp gets new t3_cq but keeps old cqid */
	oldcq = chp->cq;
	chp->cq = newcq;
	chp->cq.cqid = oldcq.cqid;

	/* resize new t3_cq to update the HW context */
	ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
	if (ret) {
		chp->cq = oldcq;
		return ret;
	}
	chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;

	/* destroy old t3_cq */
	oldcq.cqid = newcq.cqid;
	ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
	if (ret) {
		printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
			__FUNCTION__, ret);
	}

	/* add user hooks here */

	/* resume qps */
	ret = iwch_resume_qps(chp);
	return ret;
#else
	return -ENOSYS;
#endif
}
Exemplo n.º 6
0
static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
#ifdef notyet
	struct iwch_cq *chp = to_iwch_cq(cq);
	struct t3_cq oldcq, newcq;
	int ret;

	PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);

	
	if (cqe <= cq->cqe)
		return 0;

	
	cqe = roundup_pow_of_two(cqe+1);
	newcq.size_log2 = ilog2(cqe);

	
	if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
		return -ENOMEM;
	}

	
	ret = iwch_quiesce_qps(chp);
	if (ret) {
		return ret;
	}

	ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
	if (ret) {
		return ret;
	}

	
	memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
				        sizeof(struct t3_cqe));

	
	oldcq = chp->cq;
	chp->cq = newcq;
	chp->cq.cqid = oldcq.cqid;

	
	ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
	if (ret) {
		chp->cq = oldcq;
		return ret;
	}
	chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;

	
	oldcq.cqid = newcq.cqid;
	ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
	if (ret) {
		printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
			__func__, ret);
	}

	

	
	ret = iwch_resume_qps(chp);
	return ret;
#else
	return -ENOSYS;
#endif
}