Exemplo n.º 1
0
int ipath_destroy_qp(struct ib_qp *ibqp)
{
	struct ipath_qp *qp = to_iqp(ibqp);
	struct ipath_ibdev *dev = to_idev(ibqp->device);

	/*                                              */
	spin_lock_irq(&qp->s_lock);
	if (qp->state != IB_QPS_RESET) {
		qp->state = IB_QPS_RESET;
		spin_lock(&dev->pending_lock);
		if (!list_empty(&qp->timerwait))
			list_del_init(&qp->timerwait);
		if (!list_empty(&qp->piowait))
			list_del_init(&qp->piowait);
		spin_unlock(&dev->pending_lock);
		qp->s_flags &= ~IPATH_S_ANY_WAIT;
		spin_unlock_irq(&qp->s_lock);
		/*                          */
		tasklet_kill(&qp->s_task);
		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
	} else
		spin_unlock_irq(&qp->s_lock);

	ipath_free_qp(&dev->qp_table, qp);

	if (qp->s_tx) {
		atomic_dec(&qp->refcount);
		if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
			kfree(qp->s_tx->txreq.map_addr);
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
		spin_unlock_irq(&dev->pending_lock);
		qp->s_tx = NULL;
	}

	wait_event(qp->wait, !atomic_read(&qp->refcount));

	/*                                          */
	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
	spin_lock(&dev->n_qps_lock);
	dev->n_qps_allocated--;
	spin_unlock(&dev->n_qps_lock);

	if (qp->ip)
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
	else
		vfree(qp->r_rq.wq);
	kfree(qp->r_ud_sg_list);
	vfree(qp->s_wq);
	kfree(qp);
	return 0;
}
Exemplo n.º 2
0
/**
 * ipath_destroy_qp - destroy a queue pair
 * @ibqp: the queue pair to destroy
 *
 * Returns 0 on success.
 *
 * Note that this can be called while the QP is actively sending or
 * receiving!
 */
int ipath_destroy_qp(struct ib_qp *ibqp)
{
	struct ipath_qp *qp = to_iqp(ibqp);
	struct ipath_ibdev *dev = to_idev(ibqp->device);
	unsigned long flags;

	spin_lock_irqsave(&qp->s_lock, flags);
	qp->state = IB_QPS_ERR;
	spin_unlock_irqrestore(&qp->s_lock, flags);
	spin_lock(&dev->n_qps_lock);
	dev->n_qps_allocated--;
	spin_unlock(&dev->n_qps_lock);

	/* Stop the sending tasklet. */
	tasklet_kill(&qp->s_task);

	/* Make sure the QP isn't on the timeout list. */
	spin_lock_irqsave(&dev->pending_lock, flags);
	if (!list_empty(&qp->timerwait))
		list_del_init(&qp->timerwait);
	if (!list_empty(&qp->piowait))
		list_del_init(&qp->piowait);
	spin_unlock_irqrestore(&dev->pending_lock, flags);

	/*
	 * Make sure that the QP is not in the QPN table so receive
	 * interrupts will discard packets for this QP.  XXX Also remove QP
	 * from multicast table.
	 */
	if (atomic_read(&qp->refcount) != 0)
		ipath_free_qp(&dev->qp_table, qp);

	if (qp->ip)
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
	else
		vfree(qp->r_rq.wq);
	vfree(qp->s_wq);
	kfree(qp);
	return 0;
}
Exemplo n.º 3
0
struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
			      struct ib_qp_init_attr *init_attr,
			      struct ib_udata *udata)
{
	struct ipath_qp *qp;
	int err;
	struct ipath_swqe *swq = NULL;
	struct ipath_ibdev *dev;
	size_t sz;
	size_t sg_list_sz;
	struct ib_qp *ret;

	if (init_attr->create_flags) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	/*                                                        */
	if (!init_attr->srq) {
		if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
		    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
		if (init_attr->cap.max_send_sge +
		    init_attr->cap.max_send_wr +
		    init_attr->cap.max_recv_sge +
		    init_attr->cap.max_recv_wr == 0) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
	}

	switch (init_attr->qp_type) {
	case IB_QPT_UC:
	case IB_QPT_RC:
	case IB_QPT_UD:
	case IB_QPT_SMI:
	case IB_QPT_GSI:
		sz = sizeof(struct ipath_sge) *
			init_attr->cap.max_send_sge +
			sizeof(struct ipath_swqe);
		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
		if (swq == NULL) {
			ret = ERR_PTR(-ENOMEM);
			goto bail;
		}
		sz = sizeof(*qp);
		sg_list_sz = 0;
		if (init_attr->srq) {
			struct ipath_srq *srq = to_isrq(init_attr->srq);

			if (srq->rq.max_sge > 1)
				sg_list_sz = sizeof(*qp->r_sg_list) *
					(srq->rq.max_sge - 1);
		} else if (init_attr->cap.max_recv_sge > 1)
			sg_list_sz = sizeof(*qp->r_sg_list) *
				(init_attr->cap.max_recv_sge - 1);
		qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
		if (!qp) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_swq;
		}
		if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
		    init_attr->qp_type == IB_QPT_SMI ||
		    init_attr->qp_type == IB_QPT_GSI)) {
			qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
			if (!qp->r_ud_sg_list) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_qp;
			}
		} else
			qp->r_ud_sg_list = NULL;
		if (init_attr->srq) {
			sz = 0;
			qp->r_rq.size = 0;
			qp->r_rq.max_sge = 0;
			qp->r_rq.wq = NULL;
			init_attr->cap.max_recv_wr = 0;
			init_attr->cap.max_recv_sge = 0;
		} else {
			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
				sizeof(struct ipath_rwqe);
			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
					      qp->r_rq.size * sz);
			if (!qp->r_rq.wq) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_sg_list;
			}
		}

		/*
                                            
                                
   */
		spin_lock_init(&qp->s_lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait_dma);
		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
		INIT_LIST_HEAD(&qp->piowait);
		INIT_LIST_HEAD(&qp->timerwait);
		qp->state = IB_QPS_RESET;
		qp->s_wq = swq;
		qp->s_size = init_attr->cap.max_send_wr + 1;
		qp->s_max_sge = init_attr->cap.max_send_sge;
		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
		else
			qp->s_flags = 0;
		dev = to_idev(ibpd->device);
		err = ipath_alloc_qpn(&dev->qp_table, qp,
				      init_attr->qp_type);
		if (err) {
			ret = ERR_PTR(err);
			vfree(qp->r_rq.wq);
			goto bail_sg_list;
		}
		qp->ip = NULL;
		qp->s_tx = NULL;
		ipath_reset_qp(qp, init_attr->qp_type);
		break;

	default:
		/*                       */
		ret = ERR_PTR(-ENOSYS);
		goto bail;
	}

	init_attr->cap.max_inline_data = 0;

	/*
                                                        
                                 
  */
	if (udata && udata->outlen >= sizeof(__u64)) {
		if (!qp->r_rq.wq) {
			__u64 offset = 0;

			err = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		} else {
			u32 s = sizeof(struct ipath_rwq) +
				qp->r_rq.size * sz;

			qp->ip =
			    ipath_create_mmap_info(dev, s,
						   ibpd->uobject->context,
						   qp->r_rq.wq);
			if (!qp->ip) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_ip;
			}

			err = ib_copy_to_udata(udata, &(qp->ip->offset),
					       sizeof(qp->ip->offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		}
	}

	spin_lock(&dev->n_qps_lock);
	if (dev->n_qps_allocated == ib_ipath_max_qps) {
		spin_unlock(&dev->n_qps_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_qps_allocated++;
	spin_unlock(&dev->n_qps_lock);

	if (qp->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &qp->ibqp;
	goto bail;

bail_ip:
	if (qp->ip)
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
	else
		vfree(qp->r_rq.wq);
	ipath_free_qp(&dev->qp_table, qp);
	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
bail_sg_list:
	kfree(qp->r_ud_sg_list);
bail_qp:
	kfree(qp);
bail_swq:
	vfree(swq);
bail:
	return ret;
}