/**
 * qib_post_srq_receive - post a receive on a shared receive queue
 * @ibsrq: the SRQ to post the receive on
 * @wr: the list of work requests to post
 * @bad_wr: A pointer to the first WR to cause a problem is put here
 *
 * This may be called from interrupt context.
 */
int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
			 struct ib_recv_wr **bad_wr)
{
	struct qib_srq *srq = to_isrq(ibsrq);
	struct qib_rwq *wq;
	unsigned long flags;
	int ret;

	for (; wr; wr = wr->next) {
		struct qib_rwqe *wqe;
		u32 next;
		int i;

		if ((unsigned) wr->num_sge > srq->rq.max_sge) {
			*bad_wr = wr;
			ret = -EINVAL;
			goto bail;
		}

		spin_lock_irqsave(&srq->rq.lock, flags);
		wq = srq->rq.wq;
		next = wq->head + 1;
		if (next >= srq->rq.size)
			next = 0;
		if (next == wq->tail) {
			spin_unlock_irqrestore(&srq->rq.lock, flags);
			*bad_wr = wr;
			ret = -ENOMEM;
			goto bail;
		}

		wqe = get_rwqe_ptr(&srq->rq, wq->head);
		wqe->wr_id = wr->wr_id;
		wqe->num_sge = wr->num_sge;
		for (i = 0; i < wr->num_sge; i++)
			wqe->sg_list[i] = wr->sg_list[i];
		/* Make sure queue entry is written before the head index. */
		smp_wmb();
		wq->head = next;
		spin_unlock_irqrestore(&srq->rq.lock, flags);
	}
	ret = 0;

bail:
	return ret;
}
Exemplo n.º 2
0
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
{
	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
	struct ib_wc wc;
	int ret = 0;

	if (qp->state == IB_QPS_ERR)
		goto bail;

	qp->state = IB_QPS_ERR;

	spin_lock(&dev->pending_lock);
	if (!list_empty(&qp->timerwait))
		list_del_init(&qp->timerwait);
	if (!list_empty(&qp->piowait))
		list_del_init(&qp->piowait);
	spin_unlock(&dev->pending_lock);

	/*                                                            */
	if (qp->s_last != qp->s_head)
		ipath_schedule_send(qp);

	memset(&wc, 0, sizeof(wc));
	wc.qp = &qp->ibqp;
	wc.opcode = IB_WC_RECV;

	if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
		wc.wr_id = qp->r_wr_id;
		wc.status = err;
		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
	}
	wc.status = IB_WC_WR_FLUSH_ERR;

	if (qp->r_rq.wq) {
		struct ipath_rwq *wq;
		u32 head;
		u32 tail;

		spin_lock(&qp->r_rq.lock);

		/*                                            */
		wq = qp->r_rq.wq;
		head = wq->head;
		if (head >= qp->r_rq.size)
			head = 0;
		tail = wq->tail;
		if (tail >= qp->r_rq.size)
			tail = 0;
		while (tail != head) {
			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
			if (++tail >= qp->r_rq.size)
				tail = 0;
			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
		}
		wq->tail = tail;

		spin_unlock(&qp->r_rq.lock);
	} else if (qp->ibqp.event_handler)
		ret = 1;

bail:
	return ret;
}
/**
 * qib_modify_srq - modify a shared receive queue
 * @ibsrq: the SRQ to modify
 * @attr: the new attributes of the SRQ
 * @attr_mask: indicates which attributes to modify
 * @udata: user data for libibverbs.so
 */
int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		   enum ib_srq_attr_mask attr_mask,
		   struct ib_udata *udata)
{
	struct qib_srq *srq = to_isrq(ibsrq);
	struct qib_rwq *wq;
	int ret = 0;

	if (attr_mask & IB_SRQ_MAX_WR) {
		struct qib_rwq *owq;
		struct qib_rwqe *p;
		u32 sz, size, n, head, tail;

		/* Check that the requested sizes are below the limits. */
		if ((attr->max_wr > ib_qib_max_srq_wrs) ||
		    ((attr_mask & IB_SRQ_LIMIT) ?
		     attr->srq_limit : srq->limit) > attr->max_wr) {
			ret = -EINVAL;
			goto bail;
		}

		sz = sizeof(struct qib_rwqe) +
			srq->rq.max_sge * sizeof(struct ib_sge);
		size = attr->max_wr + 1;
		wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
		if (!wq) {
			ret = -ENOMEM;
			goto bail;
		}

		/* Check that we can write the offset to mmap. */
		if (udata && udata->inlen >= sizeof(__u64)) {
			__u64 offset_addr;
			__u64 offset = 0;

			ret = ib_copy_from_udata(&offset_addr, udata,
						 sizeof(offset_addr));
			if (ret)
				goto bail_free;
			udata->outbuf =
				(void __user *) (unsigned long) offset_addr;
			ret = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (ret)
				goto bail_free;
		}

		spin_lock_irq(&srq->rq.lock);
		/*
		 * validate head and tail pointer values and compute
		 * the number of remaining WQEs.
		 */
		owq = srq->rq.wq;
		head = owq->head;
		tail = owq->tail;
		if (head >= srq->rq.size || tail >= srq->rq.size) {
			ret = -EINVAL;
			goto bail_unlock;
		}
		n = head;
		if (n < tail)
			n += srq->rq.size - tail;
		else
			n -= tail;
		if (size <= n) {
			ret = -EINVAL;
			goto bail_unlock;
		}
		n = 0;
		p = wq->wq;
		while (tail != head) {
			struct qib_rwqe *wqe;
			int i;

			wqe = get_rwqe_ptr(&srq->rq, tail);
			p->wr_id = wqe->wr_id;
			p->num_sge = wqe->num_sge;
			for (i = 0; i < wqe->num_sge; i++)
				p->sg_list[i] = wqe->sg_list[i];
			n++;
			p = (struct qib_rwqe *)((char *) p + sz);
			if (++tail >= srq->rq.size)
				tail = 0;
		}
		srq->rq.wq = wq;
		srq->rq.size = size;
		wq->head = n;
		wq->tail = 0;
		if (attr_mask & IB_SRQ_LIMIT)
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);

		vfree(owq);

		if (srq->ip) {
			struct qib_mmap_info *ip = srq->ip;
			struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
			u32 s = sizeof(struct qib_rwq) + size * sz;

			qib_update_mmap_info(dev, ip, s, wq);

			/*
			 * Return the offset to mmap.
			 * See qib_mmap() for details.
			 */
			if (udata && udata->inlen >= sizeof(__u64)) {
				ret = ib_copy_to_udata(udata, &ip->offset,
						       sizeof(ip->offset));
				if (ret)
					goto bail;
			}

			/*
			 * Put user mapping info onto the pending list
			 * unless it already is on the list.
			 */
			spin_lock_irq(&dev->pending_lock);
			if (list_empty(&ip->pending_mmaps))
				list_add(&ip->pending_mmaps,
					 &dev->pending_mmaps);
			spin_unlock_irq(&dev->pending_lock);
		}
	} else if (attr_mask & IB_SRQ_LIMIT) {
		spin_lock_irq(&srq->rq.lock);
		if (attr->srq_limit >= srq->rq.size)
			ret = -EINVAL;
		else
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);
	}
	goto bail;

bail_unlock:
	spin_unlock_irq(&srq->rq.lock);
bail_free:
	vfree(wq);
bail:
	return ret;
}
Exemplo n.º 4
0
/**
 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
 * @qp: the QP
 * @wr_id_only: update wr_id only, not SGEs
 *
 * Return 0 if no RWQE is available, otherwise return 1.
 *
 * Can be called from interrupt level.
 */
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
	unsigned long flags;
	struct ipath_rq *rq;
	struct ipath_rwq *wq;
	struct ipath_srq *srq;
	struct ipath_rwqe *wqe;
	void (*handler)(struct ib_event *, void *);
	u32 tail;
	int ret;

	qp->r_sge.sg_list = qp->r_sg_list;

	if (qp->ibqp.srq) {
		srq = to_isrq(qp->ibqp.srq);
		handler = srq->ibsrq.event_handler;
		rq = &srq->rq;
	} else {
		srq = NULL;
		handler = NULL;
		rq = &qp->r_rq;
	}

	spin_lock_irqsave(&rq->lock, flags);
	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
		ret = 0;
		goto unlock;
	}

	wq = rq->wq;
	tail = wq->tail;
	/* Validate tail before using it since it is user writable. */
	if (tail >= rq->size)
		tail = 0;
	do {
		if (unlikely(tail == wq->head)) {
			ret = 0;
			goto unlock;
		}
		/* Make sure entry is read after head index is read. */
		smp_rmb();
		wqe = get_rwqe_ptr(rq, tail);
		if (++tail >= rq->size)
			tail = 0;
	} while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len,
						&qp->r_sge));
	qp->r_wr_id = wqe->wr_id;
	wq->tail = tail;

	ret = 1;
	set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
	if (handler) {
		u32 n;

		/*
		 * validate head pointer value and compute
		 * the number of remaining WQEs.
		 */
		n = wq->head;
		if (n >= rq->size)
			n = 0;
		if (n < tail)
			n += rq->size - tail;
		else
			n -= tail;
		if (n < srq->limit) {
			struct ib_event ev;

			srq->limit = 0;
			spin_unlock_irqrestore(&rq->lock, flags);
			ev.device = qp->ibqp.device;
			ev.element.srq = qp->ibqp.srq;
			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
			handler(&ev, srq->ibsrq.srq_context);
			goto bail;
		}
	}
unlock:
	spin_unlock_irqrestore(&rq->lock, flags);
bail:
	return ret;
}
Exemplo n.º 5
0
/**
 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
 * @qp: the QP
 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
 *
 * Return -1 if there is a local error, 0 if no RWQE is available,
 * otherwise return 1.
 *
 * Can be called from interrupt level.
 */
int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
{
    unsigned long flags;
    struct qib_rq *rq;
    struct qib_rwq *wq;
    struct qib_srq *srq;
    struct qib_rwqe *wqe;
    void (*handler)(struct ib_event *, void *);
    u32 tail;
    int ret;

    if (qp->ibqp.srq) {
        srq = to_isrq(qp->ibqp.srq);
        handler = srq->ibsrq.event_handler;
        rq = &srq->rq;
    } else {
        srq = NULL;
        handler = NULL;
        rq = &qp->r_rq;
    }

    spin_lock_irqsave(&rq->lock, flags);
    if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
        ret = 0;
        goto unlock;
    }

    wq = rq->wq;
    tail = wq->tail;
    /* Validate tail before using it since it is user writable. */
    if (tail >= rq->size)
        tail = 0;
    if (unlikely(tail == wq->head)) {
        ret = 0;
        goto unlock;
    }
    /* Make sure entry is read after head index is read. */
    smp_rmb();
    wqe = get_rwqe_ptr(rq, tail);
    /*
     * Even though we update the tail index in memory, the verbs
     * consumer is not supposed to post more entries until a
     * completion is generated.
     */
    if (++tail >= rq->size)
        tail = 0;
    wq->tail = tail;
    if (!wr_id_only && !qib_init_sge(qp, wqe)) {
        ret = -1;
        goto unlock;
    }
    qp->r_wr_id = wqe->wr_id;

    ret = 1;
    set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
    if (handler) {
        u32 n;

        /*
         * Validate head pointer value and compute
         * the number of remaining WQEs.
         */
        n = wq->head;
        if (n >= rq->size)
            n = 0;
        if (n < tail)
            n += rq->size - tail;
        else
            n -= tail;
        if (n < srq->limit) {
            struct ib_event ev;

            srq->limit = 0;
            spin_unlock_irqrestore(&rq->lock, flags);
            ev.device = qp->ibqp.device;
            ev.element.srq = qp->ibqp.srq;
            ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
            handler(&ev, srq->ibsrq.srq_context);
            goto bail;
        }
    }
unlock:
    spin_unlock_irqrestore(&rq->lock, flags);
bail:
    return ret;
}
Exemplo n.º 6
0
/**
 * ipath_modify_srq - modify a shared receive queue
 * @ibsrq: the SRQ to modify
 * @attr: the new attributes of the SRQ
 * @attr_mask: indicates which attributes to modify
 */
int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		     enum ib_srq_attr_mask attr_mask)
{
	struct ipath_srq *srq = to_isrq(ibsrq);
	unsigned long flags;
	int ret;

	if (attr_mask & IB_SRQ_MAX_WR)
		if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
		    (attr->max_sge > srq->rq.max_sge)) {
			ret = -EINVAL;
			goto bail;
		}

	if (attr_mask & IB_SRQ_LIMIT)
		if (attr->srq_limit >= srq->rq.size) {
			ret = -EINVAL;
			goto bail;
		}

	if (attr_mask & IB_SRQ_MAX_WR) {
		struct ipath_rwqe *wq, *p;
		u32 sz, size, n;

		sz = sizeof(struct ipath_rwqe) +
			attr->max_sge * sizeof(struct ipath_sge);
		size = attr->max_wr + 1;
		wq = vmalloc(size * sz);
		if (!wq) {
			ret = -ENOMEM;
			goto bail;
		}

		spin_lock_irqsave(&srq->rq.lock, flags);
		if (srq->rq.head < srq->rq.tail)
			n = srq->rq.size + srq->rq.head - srq->rq.tail;
		else
			n = srq->rq.head - srq->rq.tail;
		if (size <= n || size <= srq->limit) {
			spin_unlock_irqrestore(&srq->rq.lock, flags);
			vfree(wq);
			ret = -EINVAL;
			goto bail;
		}
		n = 0;
		p = wq;
		while (srq->rq.tail != srq->rq.head) {
			struct ipath_rwqe *wqe;
			int i;

			wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail);
			p->wr_id = wqe->wr_id;
			p->length = wqe->length;
			p->num_sge = wqe->num_sge;
			for (i = 0; i < wqe->num_sge; i++)
				p->sg_list[i] = wqe->sg_list[i];
			n++;
			p = (struct ipath_rwqe *)((char *) p + sz);
			if (++srq->rq.tail >= srq->rq.size)
				srq->rq.tail = 0;
		}
		vfree(srq->rq.wq);
		srq->rq.wq = wq;
		srq->rq.size = size;
		srq->rq.head = n;
		srq->rq.tail = 0;
		srq->rq.max_sge = attr->max_sge;
		spin_unlock_irqrestore(&srq->rq.lock, flags);
	}

	if (attr_mask & IB_SRQ_LIMIT) {
		spin_lock_irqsave(&srq->rq.lock, flags);
		srq->limit = attr->srq_limit;
		spin_unlock_irqrestore(&srq->rq.lock, flags);
	}
	ret = 0;

bail:
	return ret;
}
Exemplo n.º 7
0
/**
 * ipath_post_srq_receive - post a receive on a shared receive queue
 * @ibsrq: the SRQ to post the receive on
 * @wr: the list of work requests to post
 * @bad_wr: the first WR to cause a problem is put here
 *
 * This may be called from interrupt context.
 */
int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
			   struct ib_recv_wr **bad_wr)
{
	struct ipath_srq *srq = to_isrq(ibsrq);
	struct ipath_ibdev *dev = to_idev(ibsrq->device);
	unsigned long flags;
	int ret;

	for (; wr; wr = wr->next) {
		struct ipath_rwqe *wqe;
		u32 next;
		int i, j;

		if (wr->num_sge > srq->rq.max_sge) {
			*bad_wr = wr;
			ret = -ENOMEM;
			goto bail;
		}

		spin_lock_irqsave(&srq->rq.lock, flags);
		next = srq->rq.head + 1;
		if (next >= srq->rq.size)
			next = 0;
		if (next == srq->rq.tail) {
			spin_unlock_irqrestore(&srq->rq.lock, flags);
			*bad_wr = wr;
			ret = -ENOMEM;
			goto bail;
		}

		wqe = get_rwqe_ptr(&srq->rq, srq->rq.head);
		wqe->wr_id = wr->wr_id;
		wqe->sg_list[0].mr = NULL;
		wqe->sg_list[0].vaddr = NULL;
		wqe->sg_list[0].length = 0;
		wqe->sg_list[0].sge_length = 0;
		wqe->length = 0;
		for (i = 0, j = 0; i < wr->num_sge; i++) {
			/* Check LKEY */
			if (to_ipd(srq->ibsrq.pd)->user &&
			    wr->sg_list[i].lkey == 0) {
				spin_unlock_irqrestore(&srq->rq.lock,
						       flags);
				*bad_wr = wr;
				ret = -EINVAL;
				goto bail;
			}
			if (wr->sg_list[i].length == 0)
				continue;
			if (!ipath_lkey_ok(&dev->lk_table,
					   &wqe->sg_list[j],
					   &wr->sg_list[i],
					   IB_ACCESS_LOCAL_WRITE)) {
				spin_unlock_irqrestore(&srq->rq.lock,
						       flags);
				*bad_wr = wr;
				ret = -EINVAL;
				goto bail;
			}
			wqe->length += wr->sg_list[i].length;
			j++;
		}
		wqe->num_sge = j;
		srq->rq.head = next;
		spin_unlock_irqrestore(&srq->rq.lock, flags);
	}
	ret = 0;

bail:
	return ret;
}
Exemplo n.º 8
0
/**
 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
 * @qp: the QP
 * @wr_id_only: update wr_id only, not SGEs
 *
 * Return 0 if no RWQE is available, otherwise return 1.
 *
 * Called at interrupt level with the QP r_rq.lock held.
 */
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
	struct ipath_rq *rq;
	struct ipath_srq *srq;
	struct ipath_rwqe *wqe;
	int ret;

	if (!qp->ibqp.srq) {
		rq = &qp->r_rq;
		if (unlikely(rq->tail == rq->head)) {
			ret = 0;
			goto bail;
		}
		wqe = get_rwqe_ptr(rq, rq->tail);
		qp->r_wr_id = wqe->wr_id;
		if (!wr_id_only) {
			qp->r_sge.sge = wqe->sg_list[0];
			qp->r_sge.sg_list = wqe->sg_list + 1;
			qp->r_sge.num_sge = wqe->num_sge;
			qp->r_len = wqe->length;
		}
		if (++rq->tail >= rq->size)
			rq->tail = 0;
		ret = 1;
		goto bail;
	}

	srq = to_isrq(qp->ibqp.srq);
	rq = &srq->rq;
	spin_lock(&rq->lock);
	if (unlikely(rq->tail == rq->head)) {
		spin_unlock(&rq->lock);
		ret = 0;
		goto bail;
	}
	wqe = get_rwqe_ptr(rq, rq->tail);
	qp->r_wr_id = wqe->wr_id;
	if (!wr_id_only) {
		qp->r_sge.sge = wqe->sg_list[0];
		qp->r_sge.sg_list = wqe->sg_list + 1;
		qp->r_sge.num_sge = wqe->num_sge;
		qp->r_len = wqe->length;
	}
	if (++rq->tail >= rq->size)
		rq->tail = 0;
	if (srq->ibsrq.event_handler) {
		struct ib_event ev;
		u32 n;

		if (rq->head < rq->tail)
			n = rq->size + rq->head - rq->tail;
		else
			n = rq->head - rq->tail;
		if (n < srq->limit) {
			srq->limit = 0;
			spin_unlock(&rq->lock);
			ev.device = qp->ibqp.device;
			ev.element.srq = qp->ibqp.srq;
			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
			srq->ibsrq.event_handler(&ev,
						 srq->ibsrq.srq_context);
		} else
			spin_unlock(&rq->lock);
	} else
		spin_unlock(&rq->lock);
	ret = 1;

bail:
	return ret;
}
Exemplo n.º 9
0
int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
{
	unsigned long flags;
	struct qib_rq *rq;
	struct qib_rwq *wq;
	struct qib_srq *srq;
	struct qib_rwqe *wqe;
	void (*handler)(struct ib_event *, void *);
	u32 tail;
	int ret;

	if (qp->ibqp.srq) {
		srq = to_isrq(qp->ibqp.srq);
		handler = srq->ibsrq.event_handler;
		rq = &srq->rq;
	} else {
		srq = NULL;
		handler = NULL;
		rq = &qp->r_rq;
	}

	spin_lock_irqsave(&rq->lock, flags);
	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
		ret = 0;
		goto unlock;
	}

	wq = rq->wq;
	tail = wq->tail;
	
	if (tail >= rq->size)
		tail = 0;
	if (unlikely(tail == wq->head)) {
		ret = 0;
		goto unlock;
	}
	
	smp_rmb();
	wqe = get_rwqe_ptr(rq, tail);
	if (++tail >= rq->size)
		tail = 0;
	wq->tail = tail;
	if (!wr_id_only && !qib_init_sge(qp, wqe)) {
		ret = -1;
		goto unlock;
	}
	qp->r_wr_id = wqe->wr_id;

	ret = 1;
	set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
	if (handler) {
		u32 n;

		n = wq->head;
		if (n >= rq->size)
			n = 0;
		if (n < tail)
			n += rq->size - tail;
		else
			n -= tail;
		if (n < srq->limit) {
			struct ib_event ev;

			srq->limit = 0;
			spin_unlock_irqrestore(&rq->lock, flags);
			ev.device = qp->ibqp.device;
			ev.element.srq = qp->ibqp.srq;
			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
			handler(&ev, srq->ibsrq.srq_context);
			goto bail;
		}
	}
unlock:
	spin_unlock_irqrestore(&rq->lock, flags);
bail:
	return ret;
}
Exemplo n.º 10
0
/**
 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
 * @qp: the QP
 * @wr_id_only: update wr_id only, not SGEs
 *
 * Return 0 if no RWQE is available, otherwise return 1.
 *
 * Can be called from interrupt level.
 */
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
	unsigned long flags;
	struct ipath_rq *rq;
	struct ipath_rwq *wq;
	struct ipath_srq *srq;
	struct ipath_rwqe *wqe;
	void (*handler)(struct ib_event *, void *);
	u32 tail;
	int ret;

	if (qp->ibqp.srq) {
		srq = to_isrq(qp->ibqp.srq);
		handler = srq->ibsrq.event_handler;
		rq = &srq->rq;
	} else {
		srq = NULL;
		handler = NULL;
		rq = &qp->r_rq;
	}

	spin_lock_irqsave(&rq->lock, flags);
	wq = rq->wq;
	tail = wq->tail;
	/* Validate tail before using it since it is user writable. */
	if (tail >= rq->size)
		tail = 0;
	do {
		if (unlikely(tail == wq->head)) {
			spin_unlock_irqrestore(&rq->lock, flags);
			ret = 0;
			goto bail;
		}
		wqe = get_rwqe_ptr(rq, tail);
		if (++tail >= rq->size)
			tail = 0;
	} while (!wr_id_only && !init_sge(qp, wqe));
	qp->r_wr_id = wqe->wr_id;
	wq->tail = tail;

	ret = 1;
	qp->r_wrid_valid = 1;
	if (handler) {
		u32 n;

		/*
		 * validate head pointer value and compute
		 * the number of remaining WQEs.
		 */
		n = wq->head;
		if (n >= rq->size)
			n = 0;
		if (n < tail)
			n += rq->size - tail;
		else
			n -= tail;
		if (n < srq->limit) {
			struct ib_event ev;

			srq->limit = 0;
			spin_unlock_irqrestore(&rq->lock, flags);
			ev.device = qp->ibqp.device;
			ev.element.srq = qp->ibqp.srq;
			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
			handler(&ev, srq->ibsrq.srq_context);
			goto bail;
		}
	}
	spin_unlock_irqrestore(&rq->lock, flags);

bail:
	return ret;
}
Exemplo n.º 11
0
Arquivo: qp.c Projeto: DenisLug/mptcp
/**
 * hfi1_error_qp - put a QP into the error state
 * @qp: the QP to put into the error state
 * @err: the receive completion error to signal if a RWQE is active
 *
 * Flushes both send and receive work queues.
 * Returns true if last WQE event should be generated.
 * The QP r_lock and s_lock should be held and interrupts disabled.
 * If we are already in error state, just return.
 */
int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
{
	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
	struct ib_wc wc;
	int ret = 0;

	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
		goto bail;

	qp->state = IB_QPS_ERR;

	if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
		qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
		del_timer(&qp->s_timer);
	}

	if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
		qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;

	write_seqlock(&dev->iowait_lock);
	if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
		qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
		list_del_init(&qp->s_iowait.list);
		if (atomic_dec_and_test(&qp->refcount))
			wake_up(&qp->wait);
	}
	write_sequnlock(&dev->iowait_lock);

	if (!(qp->s_flags & HFI1_S_BUSY)) {
		qp->s_hdrwords = 0;
		if (qp->s_rdma_mr) {
			hfi1_put_mr(qp->s_rdma_mr);
			qp->s_rdma_mr = NULL;
		}
		flush_tx_list(qp);
	}

	/* Schedule the sending tasklet to drain the send work queue. */
	if (qp->s_last != qp->s_head)
		hfi1_schedule_send(qp);

	clear_mr_refs(qp, 0);

	memset(&wc, 0, sizeof(wc));
	wc.qp = &qp->ibqp;
	wc.opcode = IB_WC_RECV;

	if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
		wc.wr_id = qp->r_wr_id;
		wc.status = err;
		hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
	}
	wc.status = IB_WC_WR_FLUSH_ERR;

	if (qp->r_rq.wq) {
		struct hfi1_rwq *wq;
		u32 head;
		u32 tail;

		spin_lock(&qp->r_rq.lock);

		/* sanity check pointers before trusting them */
		wq = qp->r_rq.wq;
		head = wq->head;
		if (head >= qp->r_rq.size)
			head = 0;
		tail = wq->tail;
		if (tail >= qp->r_rq.size)
			tail = 0;
		while (tail != head) {
			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
			if (++tail >= qp->r_rq.size)
				tail = 0;
			hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
		}
		wq->tail = tail;

		spin_unlock(&qp->r_rq.lock);
	} else if (qp->ibqp.event_handler)
		ret = 1;

bail:
	return ret;
}
Exemplo n.º 12
0
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
{
	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
	struct ib_wc wc;
	int ret = 0;

	ipath_dbg("QP%d/%d in error state (%d)\n",
		  qp->ibqp.qp_num, qp->remote_qpn, err);

	spin_lock(&dev->pending_lock);
	/* XXX What if its already removed by the timeout code? */
	if (!list_empty(&qp->timerwait))
		list_del_init(&qp->timerwait);
	if (!list_empty(&qp->piowait))
		list_del_init(&qp->piowait);
	spin_unlock(&dev->pending_lock);

	wc.vendor_err = 0;
	wc.byte_len = 0;
	wc.imm_data = 0;
	wc.qp = &qp->ibqp;
	wc.src_qp = 0;
	wc.wc_flags = 0;
	wc.pkey_index = 0;
	wc.slid = 0;
	wc.sl = 0;
	wc.dlid_path_bits = 0;
	wc.port_num = 0;
	if (qp->r_wrid_valid) {
		qp->r_wrid_valid = 0;
		wc.wr_id = qp->r_wr_id;
		wc.opcode = IB_WC_RECV;
		wc.status = err;
		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
	}
	wc.status = IB_WC_WR_FLUSH_ERR;

	while (qp->s_last != qp->s_head) {
		struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);

		wc.wr_id = wqe->wr.wr_id;
		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
		if (++qp->s_last >= qp->s_size)
			qp->s_last = 0;
		ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
	}
	qp->s_cur = qp->s_tail = qp->s_head;
	qp->s_hdrwords = 0;
	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;

	if (qp->r_rq.wq) {
		struct ipath_rwq *wq;
		u32 head;
		u32 tail;

		spin_lock(&qp->r_rq.lock);

		/* sanity check pointers before trusting them */
		wq = qp->r_rq.wq;
		head = wq->head;
		if (head >= qp->r_rq.size)
			head = 0;
		tail = wq->tail;
		if (tail >= qp->r_rq.size)
			tail = 0;
		wc.opcode = IB_WC_RECV;
		while (tail != head) {
			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
			if (++tail >= qp->r_rq.size)
				tail = 0;
			ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
		}
		wq->tail = tail;

		spin_unlock(&qp->r_rq.lock);
	} else if (qp->ibqp.event_handler)
		ret = 1;

	return ret;
}